hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f70dba349bb470797300f3ecddd576d0367a3975
2,016
py
Python
data_managers/data_manager_vep_cache_downloader/data_manager/data_manager_vep_cache_download.py
brsynth/tools-iuc
26909099e5c61564bd72f67974e30e75f3fad22c
[ "MIT" ]
1
2022-01-25T21:25:21.000Z
2022-01-25T21:25:21.000Z
data_managers/data_manager_vep_cache_downloader/data_manager/data_manager_vep_cache_download.py
Delphine-L/tools-iuc
26909099e5c61564bd72f67974e30e75f3fad22c
[ "MIT" ]
40
2018-01-29T19:18:43.000Z
2019-03-22T20:33:41.000Z
data_managers/data_manager_vep_cache_downloader/data_manager/data_manager_vep_cache_download.py
Delphine-L/tools-iuc
26909099e5c61564bd72f67974e30e75f3fad22c
[ "MIT" ]
3
2018-02-22T15:30:51.000Z
2019-01-29T18:00:08.000Z
#!/usr/bin/env python import json import os import re import sys import tarfile from urllib.request import urlretrieve def main(): # Read in given out_file and create target directory for file download with open(sys.argv[1]) as fh: params = json.load(fh) target_directory = params['output_data'][0]['extra_files_path'] os.mkdir(target_directory) # Process parameters for metadata and file download url = params['param_dict']['url'].rstrip("/") + "/" + params['param_dict']['file_name'].lstrip("/") m = re.search(r"(.*?)(merged|refseq)?_vep_(\d+?)_", params['param_dict']['file_name']) version = str(m.group(3)) cache_type = m.group(2) if m.group(2) else "default" species = m.group(1).rstrip("_") display_name = f"{species.capitalize().replace('_', ' ')} {params['param_dict']['dbkey']} (V{version}{'' if cache_type == 'default' else ', ' + cache_type.capitalize()})" # Download and extract given cache archive, remove archive afterwards final_file, headers = urlretrieve(url, os.path.join(target_directory, params['param_dict']['file_name'])) tar = tarfile.open(final_file, "r:gz") tar.extractall(target_directory) tar.close() os.remove(final_file) # Construct metadata for the new data table entry data_manager_dict = { 'data_tables': { 'vep_versioned_annotation_cache': [ { 'value': params['param_dict']['file_name'].strip(".tar.gz"), 'dbkey': params['param_dict']['dbkey'], 'version': version, 'cachetype': cache_type, 'name': display_name, 'species': species, 'path': './%s' % params['param_dict']['file_name'].strip(".tar.gz") } ] } } # Save metadata to out_file with open(sys.argv[1], 'w') as fh: json.dump(data_manager_dict, fh, sort_keys=True) if __name__ == "__main__": main()
35.368421
174
0.598214
import json import os import re import sys import tarfile from urllib.request import urlretrieve def main(): with open(sys.argv[1]) as fh: params = json.load(fh) target_directory = params['output_data'][0]['extra_files_path'] os.mkdir(target_directory) url = params['param_dict']['url'].rstrip("/") + "/" + params['param_dict']['file_name'].lstrip("/") m = re.search(r"(.*?)(merged|refseq)?_vep_(\d+?)_", params['param_dict']['file_name']) version = str(m.group(3)) cache_type = m.group(2) if m.group(2) else "default" species = m.group(1).rstrip("_") display_name = f"{species.capitalize().replace('_', ' ')} {params['param_dict']['dbkey']} (V{version}{'' if cache_type == 'default' else ', ' + cache_type.capitalize()})" final_file, headers = urlretrieve(url, os.path.join(target_directory, params['param_dict']['file_name'])) tar = tarfile.open(final_file, "r:gz") tar.extractall(target_directory) tar.close() os.remove(final_file) data_manager_dict = { 'data_tables': { 'vep_versioned_annotation_cache': [ { 'value': params['param_dict']['file_name'].strip(".tar.gz"), 'dbkey': params['param_dict']['dbkey'], 'version': version, 'cachetype': cache_type, 'name': display_name, 'species': species, 'path': './%s' % params['param_dict']['file_name'].strip(".tar.gz") } ] } } with open(sys.argv[1], 'w') as fh: json.dump(data_manager_dict, fh, sort_keys=True) if __name__ == "__main__": main()
true
true
f70dbab8eefb6b547f63f4c9cc5f87b20b782acc
4,879
py
Python
litex_boards/platforms/aller.py
mhrtmnn/litex-boards
e950a4a588515c69c0eb559f432fa41d35f5eb0c
[ "BSD-2-Clause" ]
null
null
null
litex_boards/platforms/aller.py
mhrtmnn/litex-boards
e950a4a588515c69c0eb559f432fa41d35f5eb0c
[ "BSD-2-Clause" ]
null
null
null
litex_boards/platforms/aller.py
mhrtmnn/litex-boards
e950a4a588515c69c0eb559f432fa41d35f5eb0c
[ "BSD-2-Clause" ]
null
null
null
# # This file is part of LiteX-Boards. # # Copyright (c) 2018-2019 Rohit Singh <rohit@rohitksingh.in> # Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr> # SPDX-License-Identifier: BSD-2-Clause from litex.build.generic_platform import * from litex.build.xilinx import XilinxPlatform from litex.build.openocd import OpenOCD # IOs ---------------------------------------------------------------------------------------------- _io = [ # clk / rst ("clk100", 0, Pins("W19"), IOStandard("LVCMOS33")), # leds (only a single rgb led, aliased here also) ("user_led", 0, Pins("AB21"), IOStandard("LVCMOS33")), ("user_led", 1, Pins("AB22"), IOStandard("LVCMOS33")), ("user_led", 2, Pins("U20"), IOStandard("LVCMOS33")), # rgb led, active-low ("rgb_led", 0, Subsignal("r", Pins("AB21")), Subsignal("g", Pins("AB22")), Subsignal("b", Pins("U20")), IOStandard("LVCMOS33"), ), # flash ("flash", 0, Subsignal("cs_n", Pins("T19")), Subsignal("mosi", Pins("P22")), Subsignal("miso", Pins("R22")), Subsignal("hold", Pins("R21")), Subsignal("rst_n", Pins("R19")), IOStandard("LVCMOS33") ), ("flash4x", 0, # clock needs to be accessed through STARTUPE2 Subsignal("cs_n", Pins("T19")), Subsignal("dq", Pins("P22", "R22", "P21", "R21")), IOStandard("LVCMOS33") ), # tpm ("tpm", 0, Subsignal("clk", Pins("W20")), Subsignal("rst_n", Pins("V19")), Subsignal("cs_n", Pins("Y18")), Subsignal("mosi", Pins("Y19")), Subsignal("miso", Pins("V18")), IOStandard("LVCMOS33"), ), # pcie ("pcie_x1", 0, Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")), Subsignal("clk_p", Pins("F6")), Subsignal("clk_n", Pins("E6")), Subsignal("rx_p", Pins("B8")), Subsignal("rx_n", Pins("A8")), Subsignal("tx_p", Pins("B4")), Subsignal("tx_n", Pins("A4")) ), ("pcie_x4", 0, Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")), Subsignal("clk_p", Pins("F6")), Subsignal("clk_n", Pins("E6")), Subsignal("rx_p", Pins("B8 D11 B10 D9")), Subsignal("rx_n", Pins("A8 C11 A10 C9")), Subsignal("tx_p", Pins("B4 D5 B6 D7")), Subsignal("tx_n", Pins("A4 C5 A6 C7")) ), # dram ("ddram", 0, Subsignal("a", Pins( "U6 T5 Y6 T6 V2 T4 Y2 R2", "Y1 R4 W5 W1 AA6 U2"), IOStandard("SSTL15")), Subsignal("ba", Pins("W6 U5 R6"), IOStandard("SSTL15")), Subsignal("ras_n", Pins("V5"), IOStandard("SSTL15")), Subsignal("cas_n", Pins("T1"), IOStandard("SSTL15")), Subsignal("we_n", Pins("R3"), IOStandard("SSTL15")), Subsignal("dm", Pins("Y7 AA1"), IOStandard("SSTL15")), Subsignal("dq", Pins( "Y8 AB6 W9 AA8 AB7 V7 AB8 W7", "V4 AB2 AA5 AB3 AB5 W4 AB1 AA4"), IOStandard("SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")), Subsignal("dqs_p", Pins("V9 Y3"), IOStandard("DIFF_SSTL15")), Subsignal("dqs_n", Pins("V8 AA3"), IOStandard("DIFF_SSTL15")), Subsignal("clk_p", Pins("U3"), IOStandard("DIFF_SSTL15")), Subsignal("clk_n", Pins("V3"), IOStandard("DIFF_SSTL15")), Subsignal("cke", Pins("U1"), IOStandard("SSTL15")), Subsignal("odt", Pins("W2"), IOStandard("SSTL15")), Subsignal("reset_n", Pins("U7"), IOStandard("LVCMOS15")), Subsignal("cs_n", Pins("T3"), IOStandard("SSTL15")), Misc("SLEW=FAST"), ), ] # Platform ----------------------------------------------------------------------------------------- class Platform(XilinxPlatform): default_clk_name = "clk100" default_clk_period = 1e9/100e6 def __init__(self): XilinxPlatform.__init__(self, "xc7a200t-fbg484-2", _io, toolchain="vivado") self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]") self.toolchain.bitstream_commands = [ "set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]", "set_property BITSTREAM.CONFIG.CONFIGRATE 16 [current_design]", "set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]" ] self.toolchain.additional_commands = \ ["write_cfgmem -force -format bin -interface spix4 -size 16 " "-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"] def create_programmer(self): return OpenOCD("openocd_xc7_ft232.cfg", "bscan_spi_xc7a200t.bit") def do_finalize(self, fragment): XilinxPlatform.do_finalize(self, fragment) self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
37.530769
100
0.555852
from litex.build.generic_platform import * from litex.build.xilinx import XilinxPlatform from litex.build.openocd import OpenOCD _io = [ ("clk100", 0, Pins("W19"), IOStandard("LVCMOS33")), ("user_led", 0, Pins("AB21"), IOStandard("LVCMOS33")), ("user_led", 1, Pins("AB22"), IOStandard("LVCMOS33")), ("user_led", 2, Pins("U20"), IOStandard("LVCMOS33")), ("rgb_led", 0, Subsignal("r", Pins("AB21")), Subsignal("g", Pins("AB22")), Subsignal("b", Pins("U20")), IOStandard("LVCMOS33"), ), ("flash", 0, Subsignal("cs_n", Pins("T19")), Subsignal("mosi", Pins("P22")), Subsignal("miso", Pins("R22")), Subsignal("hold", Pins("R21")), Subsignal("rst_n", Pins("R19")), IOStandard("LVCMOS33") ), ("flash4x", 0, Subsignal("cs_n", Pins("T19")), Subsignal("dq", Pins("P22", "R22", "P21", "R21")), IOStandard("LVCMOS33") ), ("tpm", 0, Subsignal("clk", Pins("W20")), Subsignal("rst_n", Pins("V19")), Subsignal("cs_n", Pins("Y18")), Subsignal("mosi", Pins("Y19")), Subsignal("miso", Pins("V18")), IOStandard("LVCMOS33"), ), ("pcie_x1", 0, Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")), Subsignal("clk_p", Pins("F6")), Subsignal("clk_n", Pins("E6")), Subsignal("rx_p", Pins("B8")), Subsignal("rx_n", Pins("A8")), Subsignal("tx_p", Pins("B4")), Subsignal("tx_n", Pins("A4")) ), ("pcie_x4", 0, Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")), Subsignal("clk_p", Pins("F6")), Subsignal("clk_n", Pins("E6")), Subsignal("rx_p", Pins("B8 D11 B10 D9")), Subsignal("rx_n", Pins("A8 C11 A10 C9")), Subsignal("tx_p", Pins("B4 D5 B6 D7")), Subsignal("tx_n", Pins("A4 C5 A6 C7")) ), ("ddram", 0, Subsignal("a", Pins( "U6 T5 Y6 T6 V2 T4 Y2 R2", "Y1 R4 W5 W1 AA6 U2"), IOStandard("SSTL15")), Subsignal("ba", Pins("W6 U5 R6"), IOStandard("SSTL15")), Subsignal("ras_n", Pins("V5"), IOStandard("SSTL15")), Subsignal("cas_n", Pins("T1"), IOStandard("SSTL15")), Subsignal("we_n", Pins("R3"), IOStandard("SSTL15")), Subsignal("dm", Pins("Y7 AA1"), IOStandard("SSTL15")), Subsignal("dq", Pins( "Y8 AB6 W9 AA8 AB7 V7 AB8 W7", "V4 AB2 AA5 AB3 AB5 W4 AB1 AA4"), IOStandard("SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")), Subsignal("dqs_p", Pins("V9 Y3"), IOStandard("DIFF_SSTL15")), Subsignal("dqs_n", Pins("V8 AA3"), IOStandard("DIFF_SSTL15")), Subsignal("clk_p", Pins("U3"), IOStandard("DIFF_SSTL15")), Subsignal("clk_n", Pins("V3"), IOStandard("DIFF_SSTL15")), Subsignal("cke", Pins("U1"), IOStandard("SSTL15")), Subsignal("odt", Pins("W2"), IOStandard("SSTL15")), Subsignal("reset_n", Pins("U7"), IOStandard("LVCMOS15")), Subsignal("cs_n", Pins("T3"), IOStandard("SSTL15")), Misc("SLEW=FAST"), ), ] class Platform(XilinxPlatform): default_clk_name = "clk100" default_clk_period = 1e9/100e6 def __init__(self): XilinxPlatform.__init__(self, "xc7a200t-fbg484-2", _io, toolchain="vivado") self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]") self.toolchain.bitstream_commands = [ "set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]", "set_property BITSTREAM.CONFIG.CONFIGRATE 16 [current_design]", "set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]" ] self.toolchain.additional_commands = \ ["write_cfgmem -force -format bin -interface spix4 -size 16 " "-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"] def create_programmer(self): return OpenOCD("openocd_xc7_ft232.cfg", "bscan_spi_xc7a200t.bit") def do_finalize(self, fragment): XilinxPlatform.do_finalize(self, fragment) self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
true
true
f70dbbb741de6aa18a06232d6c29cdf228655fed
597
py
Python
examples/example_are_zones_ipv6_simple.py
dargor/python-cloudflare
fd6464e15b91263f1ce395e4336b1c1fac542880
[ "MIT" ]
2
2020-03-04T17:29:05.000Z
2020-08-13T12:22:06.000Z
examples/example_are_zones_ipv6_simple.py
dargor/python-cloudflare
fd6464e15b91263f1ce395e4336b1c1fac542880
[ "MIT" ]
1
2019-12-31T10:04:40.000Z
2019-12-31T10:04:40.000Z
examples/example_are_zones_ipv6_simple.py
dargor/python-cloudflare
fd6464e15b91263f1ce395e4336b1c1fac542880
[ "MIT" ]
1
2021-05-12T05:37:53.000Z
2021-05-12T05:37:53.000Z
#!/usr/bin/env python """Cloudflare API code - example""" from __future__ import print_function import os import sys sys.path.insert(0, os.path.abspath('..')) import CloudFlare def main(): """Cloudflare API code - example""" cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'per_page':50}) for zone in zones: zone_name = zone['name'] zone_id = zone['id'] settings_ipv6 = cf.zones.settings.ipv6.get(zone_id) ipv6_on = settings_ipv6['value'] print(zone_id, ipv6_on, zone_name) exit(0) if __name__ == '__main__': main()
21.321429
59
0.641541
from __future__ import print_function import os import sys sys.path.insert(0, os.path.abspath('..')) import CloudFlare def main(): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'per_page':50}) for zone in zones: zone_name = zone['name'] zone_id = zone['id'] settings_ipv6 = cf.zones.settings.ipv6.get(zone_id) ipv6_on = settings_ipv6['value'] print(zone_id, ipv6_on, zone_name) exit(0) if __name__ == '__main__': main()
true
true
f70dbd3cd08351c60f8c869a40e796334f83d0bc
1,056
py
Python
blog/views/post_cls.py
junaidiiith/Disaster-Help-Predictor
79938619752861e5141207cca920900cea229a62
[ "Apache-2.0" ]
null
null
null
blog/views/post_cls.py
junaidiiith/Disaster-Help-Predictor
79938619752861e5141207cca920900cea229a62
[ "Apache-2.0" ]
null
null
null
blog/views/post_cls.py
junaidiiith/Disaster-Help-Predictor
79938619752861e5141207cca920900cea229a62
[ "Apache-2.0" ]
null
null
null
from django.contrib.auth.models import User from django.core.paginator import Paginator from django.shortcuts import render from blog.models.clasDict import classes from blog.models.post import Post from django.template.defaulttags import register from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy from django.views import generic from django.views.generic import CreateView, UpdateView, DeleteView from blog.models.comment import Comment from blog.models.post import Post from blog.models.clasDict import classes @register.filter def get_item(dictionary, key): return dictionary.get(key) class PostClassView(generic.DetailView): model = Post template_name = 'blog/post_class.html' def get_context_data(self, **kwargs): # Call the base implementation first to get a context context = super().get_context_data(**kwargs) context['posts'] = Post.objects.filter(clss=self.kwargs['pk']) context['classes'] = classes return context
35.2
78
0.773674
from django.contrib.auth.models import User from django.core.paginator import Paginator from django.shortcuts import render from blog.models.clasDict import classes from blog.models.post import Post from django.template.defaulttags import register from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy from django.views import generic from django.views.generic import CreateView, UpdateView, DeleteView from blog.models.comment import Comment from blog.models.post import Post from blog.models.clasDict import classes @register.filter def get_item(dictionary, key): return dictionary.get(key) class PostClassView(generic.DetailView): model = Post template_name = 'blog/post_class.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['posts'] = Post.objects.filter(clss=self.kwargs['pk']) context['classes'] = classes return context
true
true
f70dbd7d0d336c81ab3e8f1b8917300c587fbab8
16,704
py
Python
archytas.py
javgat/archytas
17bb204d7a605ef802bcd4dab37952a692a8cd8e
[ "Unlicense" ]
1
2021-12-31T20:06:35.000Z
2021-12-31T20:06:35.000Z
archytas.py
javgat/archytas
17bb204d7a605ef802bcd4dab37952a692a8cd8e
[ "Unlicense" ]
null
null
null
archytas.py
javgat/archytas
17bb204d7a605ef802bcd4dab37952a692a8cd8e
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 import sys from PySide2 import QtCore, QtWidgets, QtGui from os.path import exists, join, abspath from os import remove import tweepy from auth.auth import AuthData from tweet.tweet import getTweetsKeyword, tweetRandom, OutputerInterface import types class AuthDataInput(QtWidgets.QWidget): def __init__(self, wrapper: 'AuthDataWidget', authdata_path: str): super().__init__() self.authdata_path = authdata_path self.wrapper = wrapper self.layout_consumer_key = QtWidgets.QHBoxLayout() self.label_consumer_key = QtWidgets.QLabel("Consumer Key", alignment=QtCore.Qt.AlignCenter) self.edit_consumer_key = QtWidgets.QLineEdit() self.layout_consumer_key.addWidget(self.label_consumer_key) self.layout_consumer_key.addWidget(self.edit_consumer_key) self.layout_consumer_secret = QtWidgets.QHBoxLayout() self.label_consumer_secret = QtWidgets.QLabel("Consumer Secret", alignment=QtCore.Qt.AlignCenter) self.edit_consumer_secret = QtWidgets.QLineEdit() self.layout_consumer_secret.addWidget(self.label_consumer_secret) self.layout_consumer_secret.addWidget(self.edit_consumer_secret) self.layout_access_token = QtWidgets.QHBoxLayout() self.label_access_token = QtWidgets.QLabel("Access Token", alignment=QtCore.Qt.AlignCenter) self.edit_access_token = QtWidgets.QLineEdit() self.layout_access_token.addWidget(self.label_access_token) self.layout_access_token.addWidget(self.edit_access_token) self.layout_access_token_secret = QtWidgets.QHBoxLayout() self.label_access_token_secret = QtWidgets.QLabel("Access Token Secret", alignment=QtCore.Qt.AlignCenter) self.edit_access_token_secret = QtWidgets.QLineEdit() self.layout_access_token_secret.addWidget(self.label_access_token_secret) self.layout_access_token_secret.addWidget(self.edit_access_token_secret) self.button_save = QtWidgets.QPushButton("Save") self.layout = QtWidgets.QVBoxLayout(self) self.layout.addLayout(self.layout_consumer_key) self.layout.addLayout(self.layout_consumer_secret) self.layout.addLayout(self.layout_access_token) self.layout.addLayout(self.layout_access_token_secret) self.layout.addWidget(self.button_save) self.button_save.clicked.connect(self.save) @QtCore.Slot() def save(self): # Save in self.authdata_path consumer_key = self.edit_consumer_key.text() consumer_secret = self.edit_consumer_secret.text() access_token = self.edit_access_token.text() access_token_secret = self.edit_access_token_secret.text() ad = AuthData(consumer_key, consumer_secret, access_token, access_token_secret) ad.SaveToJson(self.authdata_path) # Notify parent wrapper self.wrapper.update_storage_status() class AuthDataStored(QtWidgets.QWidget): def __init__(self, wrapper: 'AuthDataWidget', authdata_path: str): super().__init__() self.wrapper = wrapper self.authdata_path = authdata_path # Read auth data self.ad = AuthData.CreateFromJson(authdata_path) self.layout_consumer_key = QtWidgets.QHBoxLayout() self.label_consumer_key = QtWidgets.QLabel("Consumer Key", alignment=QtCore.Qt.AlignLeft) self.label_literal_consumer_key = QtWidgets.QLabel(self.ad.consumer_key, alignment=QtCore.Qt.AlignRight) self.layout_consumer_key.addWidget(self.label_consumer_key) self.layout_consumer_key.addWidget(self.label_literal_consumer_key) self.layout_consumer_secret = QtWidgets.QHBoxLayout() self.label_consumer_secret = QtWidgets.QLabel("Consumer Secret", alignment=QtCore.Qt.AlignLeft) self.label_literal_consumer_secret = QtWidgets.QLabel(self.ad.consumer_secret, alignment=QtCore.Qt.AlignRight) self.layout_consumer_secret.addWidget(self.label_consumer_secret) self.layout_consumer_secret.addWidget(self.label_literal_consumer_secret) self.layout_access_token = QtWidgets.QHBoxLayout() self.label_access_token = QtWidgets.QLabel("Access Token", alignment=QtCore.Qt.AlignLeft) self.label_literal_access_token = QtWidgets.QLabel(self.ad.access_token, alignment=QtCore.Qt.AlignRight) self.layout_access_token.addWidget(self.label_access_token) self.layout_access_token.addWidget(self.label_literal_access_token) self.layout_access_token_secret = QtWidgets.QHBoxLayout() self.label_access_token_secret = QtWidgets.QLabel("Access Token Secret", alignment=QtCore.Qt.AlignLeft) self.label_literal_access_token_secret = QtWidgets.QLabel(self.ad.access_token_secret, alignment=QtCore.Qt.AlignRight) self.layout_access_token_secret.addWidget(self.label_access_token_secret) self.layout_access_token_secret.addWidget(self.label_literal_access_token_secret) self.text = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter) self.layout_buttons = QtWidgets.QHBoxLayout() self.button_connect = QtWidgets.QPushButton("Connect") self.button_edit = QtWidgets.QPushButton("Edit") self.button_delete = QtWidgets.QPushButton("Delete") self.layout_buttons.addWidget(self.button_connect) self.layout_buttons.addWidget(self.button_edit) self.layout_buttons.addWidget(self.button_delete) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.text) self.layout.addLayout(self.layout_consumer_key) self.layout.addLayout(self.layout_consumer_secret) self.layout.addLayout(self.layout_access_token) self.layout.addLayout(self.layout_access_token_secret) self.layout.addLayout(self.layout_buttons) self.button_connect.clicked.connect(self.authenticate) self.button_delete.clicked.connect(self.delete_auth) @QtCore.Slot() def authenticate(self): # Authenticate to Twitter auth = tweepy.OAuthHandler(self.ad.consumer_key, self.ad.consumer_secret) auth.set_access_token(self.ad.access_token, self.ad.access_token_secret) api = tweepy.API(auth) auth_success = False try: api.verify_credentials() result_text = "Authentication OK" auth_success = True except: result_text = "Error during authentication" self.text.setText(result_text) if (auth_success): self.wrapper.update_api(api) @QtCore.Slot() def delete_auth(self): # Remove auth file if exists(self.authdata_path): remove(self.authdata_path) # Notify parent wrapper self.wrapper.update_storage_status() class AuthDataWidget(QtWidgets.QWidget): def __init__(self, authdata_path, archytas: 'ArchytasWidget'): super().__init__() self.authdata_path = authdata_path self.archytas = archytas self.calculate_authdata() self.title = QtWidgets.QLabel("Authentication", alignment=QtCore.Qt.AlignCenter) self.title.setFont(QtGui.QFont("Default", 16)) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.title) self.layout.addWidget(self.authdata_inner) def calculate_authdata(self): self.isAuthDataStored = exists(self.authdata_path) ad = AuthData.CreateFromJson(self.authdata_path) if (self.isAuthDataStored and ad is not None): self.authdata_inner = AuthDataStored(self, self.authdata_path) else: self.authdata_inner = AuthDataInput(self, self.authdata_path) def update_storage_status(self): aw = self.layout.takeAt(1) aw.widget().deleteLater() self.calculate_authdata() self.layout.addWidget(self.authdata_inner) def update_api(self, api: tweepy.API): self.api = api if (self.api is not None): self.archytas.update_api(api) class RetweetWidget(QtWidgets.QWidget): def __init__(self, archytas: 'ArchytasWidget'): super().__init__() self.archytas = archytas self.title = QtWidgets.QLabel("Auto retweeter", alignment=QtCore.Qt.AlignCenter) self.title.setFont(QtGui.QFont("Default", 16)) self.label_err_message = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter) self.layout_number_retweets = QtWidgets.QHBoxLayout() self.label_number_retweets = QtWidgets.QLabel("Number of retweets", alignment=QtCore.Qt.AlignLeft) self.edit_number_retweets = QtWidgets.QLineEdit() self.layout_number_retweets.addWidget(self.label_number_retweets) self.layout_number_retweets.addWidget(self.edit_number_retweets) self.layout_keyword = QtWidgets.QHBoxLayout() self.label_keyword = QtWidgets.QLabel("Keyword", alignment=QtCore.Qt.AlignLeft) self.edit_keyword = QtWidgets.QLineEdit() self.layout_keyword.addWidget(self.label_keyword) self.layout_keyword.addWidget(self.edit_keyword) self.button_retweet = QtWidgets.QPushButton("Retweet") self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.title) self.layout.addWidget(self.label_err_message) self.layout.addLayout(self.layout_number_retweets) self.layout.addLayout(self.layout_keyword) self.layout.addWidget(self.button_retweet) self.button_retweet.clicked.connect(self.retweet) self.set_connected(False) def QtRetweetList(self, api: tweepy.API, tweets: list, index: int, secs: int, finishedAction: types.FunctionType): tweet = tweets[index] try: print('\nRetweet Bot found tweet by @' + tweet.user.screen_name + '. ' + 'Attempting to retweet.') tweet.retweet() print('Retweet published successfully.') index = index + 1 if (index < len(tweets)): QtCore.QTimer.singleShot(secs * 1000, lambda: self.QtRetweetList(api, tweets, index, secs, finishedAction)) else: finishedAction() # Some basic error handling. Will print out why retweet failed, into your terminal. except tweepy.TweepyException as error: print('\nError TweepyException. Retweet not successful. Reason: ') print(error) except tweepy.HTTPException as error: print('\nError HTTPException. Retweet not successful. Reason: ') print(error) def QtRetweetKeyword(self, api, keyword, rewteetRange, secs, finishedAction: types.FunctionType): tweets = getTweetsKeyword(api, keyword, rewteetRange) self.QtRetweetList(api, tweets, 0, secs, finishedAction) @QtCore.Slot() def retweet(self): self.label_err_message.setText("") if (not self.archytas.connected): self.label_err_message.setText("Error: The app is not connected to Twitter") return api = self.archytas.api # Retweet some tweets with the hashtag try: retweetRange = int(self.edit_number_retweets.text()) except: self.label_err_message.setText("Error: Number of retweets is not a number") return keyword = self.edit_keyword.text() self.button_retweet.setEnabled(False) self.label_err_message.setText("Retweeting...") finishedAction = self.finishedRetweetingActions seconds_between_retweets = 2 self.QtRetweetKeyword(api, keyword, retweetRange, seconds_between_retweets, finishedAction) def finishedRetweetingActions(self): self.button_retweet.setEnabled(True) self.label_err_message.setText("Successfully retweeted") def set_connected(self, connected: bool): self.button_retweet.setEnabled(connected) class OutputerTweetWidget(OutputerInterface): def __init__(self, TweetWidget): super().__init__() self.tweetw = TweetWidget def print(self, message: str) -> None: self.tweetw.update_message(message) class TweetWidget(QtWidgets.QWidget): def __init__(self, archytas: 'ArchytasWidget'): super().__init__() self.archytas = archytas self.csv_path = "<No file loaded>" self.loaded_csv = False self.connected = False self.title = QtWidgets.QLabel("Random tweet", alignment=QtCore.Qt.AlignCenter) self.title.setFont(QtGui.QFont("Default", 16)) self.label_err_message = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter) self.layout_input_csv = QtWidgets.QHBoxLayout() self.label_input_csv = QtWidgets.QLabel("Random tweet source:", alignment=QtCore.Qt.AlignLeft) self.label_location_csv = QtWidgets.QLabel(self.csv_path, alignment=QtCore.Qt.AlignLeft) self.button_load_csv = QtWidgets.QPushButton("Browse...") self.layout_input_csv.addWidget(self.label_input_csv) self.layout_input_csv.addWidget(self.label_location_csv) self.layout_input_csv.addWidget(self.button_load_csv) self.button_tweet = QtWidgets.QPushButton("Tweet") self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.title) self.layout.addWidget(self.label_err_message) self.layout.addLayout(self.layout_input_csv) self.layout.addWidget(self.button_tweet) self.button_tweet.clicked.connect(self.tweet) self.button_load_csv.clicked.connect(self.browse_csv) self.try_enabling_tweet_button() @QtCore.Slot() def tweet(self): self.label_err_message.setText("") if (not self.archytas.connected): self.label_err_message.setText("Error: The app is not connected to Twitter") return if (not self.loaded_csv): self.label_err_message.setText("Error: Invalid tweet source file path") return api = self.archytas.api # Tweet randomly selected tweets number_of_tweets = 1 outputer = OutputerTweetWidget(self) tweetRandom(api, self.csv_path, number_of_tweets, outputer) @QtCore.Slot() def browse_csv(self): file_input_csv = QtWidgets.QFileDialog() path_tuple: tuple = file_input_csv.getOpenFileName() path = path_tuple[0] self.update_csv_path(path) def update_csv_path(self, path): self.loaded_csv = True self.csv_path = path self.label_location_csv.setText(self.csv_path) self.try_enabling_tweet_button() def try_enabling_tweet_button(self): if (self.connected and self.loaded_csv): self.button_tweet.setEnabled(True) else: self.button_tweet.setEnabled(False) def set_connected(self, connected: bool): self.connected = connected self.try_enabling_tweet_button() def update_message(self, message: str) -> None: self.label_err_message.setText(message) class ArchytasWidget(QtWidgets.QWidget): def __init__(self, authdata_path): super().__init__() self.connected = False self.authdataw = AuthDataWidget(authdata_path, self) self.retweetw = RetweetWidget(self) self.tweetw = TweetWidget(self) self.line1 = QtWidgets.QFrame() self.line1.setFrameShape(QtWidgets.QFrame.HLine) self.line1.setFrameShadow(QtWidgets.QFrame.Sunken) self.line2 = QtWidgets.QFrame() self.line2.setFrameShape(QtWidgets.QFrame.HLine) self.line2.setFrameShadow(QtWidgets.QFrame.Sunken) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.authdataw) self.layout.addWidget(self.line1) self.layout.addWidget(self.retweetw) self.layout.addWidget(self.line2) self.layout.addWidget(self.tweetw) def update_api(self, api: tweepy.API): self.api = api if (self.api is not None): self.connected = True else: self.connected = False self.retweetw.set_connected(self.connected) self.tweetw.set_connected(self.connected) def resource_path(relative_path): if hasattr(sys, '_MEIPASS'): return join(sys._MEIPASS, relative_path) return join(abspath('.'), relative_path) def main(): app = QtWidgets.QApplication([]) widget = ArchytasWidget("auth_data.json") widget.resize(800, 600) widget.show() widget.setWindowTitle("Archytas") widget.setWindowIcon( QtGui.QIcon(resource_path("./assets/icon.png")) ) sys.exit(app.exec_()) if __name__=="__main__": main()
42.503817
126
0.699653
import sys from PySide2 import QtCore, QtWidgets, QtGui from os.path import exists, join, abspath from os import remove import tweepy from auth.auth import AuthData from tweet.tweet import getTweetsKeyword, tweetRandom, OutputerInterface import types class AuthDataInput(QtWidgets.QWidget): def __init__(self, wrapper: 'AuthDataWidget', authdata_path: str): super().__init__() self.authdata_path = authdata_path self.wrapper = wrapper self.layout_consumer_key = QtWidgets.QHBoxLayout() self.label_consumer_key = QtWidgets.QLabel("Consumer Key", alignment=QtCore.Qt.AlignCenter) self.edit_consumer_key = QtWidgets.QLineEdit() self.layout_consumer_key.addWidget(self.label_consumer_key) self.layout_consumer_key.addWidget(self.edit_consumer_key) self.layout_consumer_secret = QtWidgets.QHBoxLayout() self.label_consumer_secret = QtWidgets.QLabel("Consumer Secret", alignment=QtCore.Qt.AlignCenter) self.edit_consumer_secret = QtWidgets.QLineEdit() self.layout_consumer_secret.addWidget(self.label_consumer_secret) self.layout_consumer_secret.addWidget(self.edit_consumer_secret) self.layout_access_token = QtWidgets.QHBoxLayout() self.label_access_token = QtWidgets.QLabel("Access Token", alignment=QtCore.Qt.AlignCenter) self.edit_access_token = QtWidgets.QLineEdit() self.layout_access_token.addWidget(self.label_access_token) self.layout_access_token.addWidget(self.edit_access_token) self.layout_access_token_secret = QtWidgets.QHBoxLayout() self.label_access_token_secret = QtWidgets.QLabel("Access Token Secret", alignment=QtCore.Qt.AlignCenter) self.edit_access_token_secret = QtWidgets.QLineEdit() self.layout_access_token_secret.addWidget(self.label_access_token_secret) self.layout_access_token_secret.addWidget(self.edit_access_token_secret) self.button_save = QtWidgets.QPushButton("Save") self.layout = QtWidgets.QVBoxLayout(self) self.layout.addLayout(self.layout_consumer_key) self.layout.addLayout(self.layout_consumer_secret) self.layout.addLayout(self.layout_access_token) self.layout.addLayout(self.layout_access_token_secret) self.layout.addWidget(self.button_save) self.button_save.clicked.connect(self.save) @QtCore.Slot() def save(self): consumer_key = self.edit_consumer_key.text() consumer_secret = self.edit_consumer_secret.text() access_token = self.edit_access_token.text() access_token_secret = self.edit_access_token_secret.text() ad = AuthData(consumer_key, consumer_secret, access_token, access_token_secret) ad.SaveToJson(self.authdata_path) self.wrapper.update_storage_status() class AuthDataStored(QtWidgets.QWidget): def __init__(self, wrapper: 'AuthDataWidget', authdata_path: str): super().__init__() self.wrapper = wrapper self.authdata_path = authdata_path self.ad = AuthData.CreateFromJson(authdata_path) self.layout_consumer_key = QtWidgets.QHBoxLayout() self.label_consumer_key = QtWidgets.QLabel("Consumer Key", alignment=QtCore.Qt.AlignLeft) self.label_literal_consumer_key = QtWidgets.QLabel(self.ad.consumer_key, alignment=QtCore.Qt.AlignRight) self.layout_consumer_key.addWidget(self.label_consumer_key) self.layout_consumer_key.addWidget(self.label_literal_consumer_key) self.layout_consumer_secret = QtWidgets.QHBoxLayout() self.label_consumer_secret = QtWidgets.QLabel("Consumer Secret", alignment=QtCore.Qt.AlignLeft) self.label_literal_consumer_secret = QtWidgets.QLabel(self.ad.consumer_secret, alignment=QtCore.Qt.AlignRight) self.layout_consumer_secret.addWidget(self.label_consumer_secret) self.layout_consumer_secret.addWidget(self.label_literal_consumer_secret) self.layout_access_token = QtWidgets.QHBoxLayout() self.label_access_token = QtWidgets.QLabel("Access Token", alignment=QtCore.Qt.AlignLeft) self.label_literal_access_token = QtWidgets.QLabel(self.ad.access_token, alignment=QtCore.Qt.AlignRight) self.layout_access_token.addWidget(self.label_access_token) self.layout_access_token.addWidget(self.label_literal_access_token) self.layout_access_token_secret = QtWidgets.QHBoxLayout() self.label_access_token_secret = QtWidgets.QLabel("Access Token Secret", alignment=QtCore.Qt.AlignLeft) self.label_literal_access_token_secret = QtWidgets.QLabel(self.ad.access_token_secret, alignment=QtCore.Qt.AlignRight) self.layout_access_token_secret.addWidget(self.label_access_token_secret) self.layout_access_token_secret.addWidget(self.label_literal_access_token_secret) self.text = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter) self.layout_buttons = QtWidgets.QHBoxLayout() self.button_connect = QtWidgets.QPushButton("Connect") self.button_edit = QtWidgets.QPushButton("Edit") self.button_delete = QtWidgets.QPushButton("Delete") self.layout_buttons.addWidget(self.button_connect) self.layout_buttons.addWidget(self.button_edit) self.layout_buttons.addWidget(self.button_delete) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.text) self.layout.addLayout(self.layout_consumer_key) self.layout.addLayout(self.layout_consumer_secret) self.layout.addLayout(self.layout_access_token) self.layout.addLayout(self.layout_access_token_secret) self.layout.addLayout(self.layout_buttons) self.button_connect.clicked.connect(self.authenticate) self.button_delete.clicked.connect(self.delete_auth) @QtCore.Slot() def authenticate(self): auth = tweepy.OAuthHandler(self.ad.consumer_key, self.ad.consumer_secret) auth.set_access_token(self.ad.access_token, self.ad.access_token_secret) api = tweepy.API(auth) auth_success = False try: api.verify_credentials() result_text = "Authentication OK" auth_success = True except: result_text = "Error during authentication" self.text.setText(result_text) if (auth_success): self.wrapper.update_api(api) @QtCore.Slot() def delete_auth(self): if exists(self.authdata_path): remove(self.authdata_path) self.wrapper.update_storage_status() class AuthDataWidget(QtWidgets.QWidget): def __init__(self, authdata_path, archytas: 'ArchytasWidget'): super().__init__() self.authdata_path = authdata_path self.archytas = archytas self.calculate_authdata() self.title = QtWidgets.QLabel("Authentication", alignment=QtCore.Qt.AlignCenter) self.title.setFont(QtGui.QFont("Default", 16)) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.title) self.layout.addWidget(self.authdata_inner) def calculate_authdata(self): self.isAuthDataStored = exists(self.authdata_path) ad = AuthData.CreateFromJson(self.authdata_path) if (self.isAuthDataStored and ad is not None): self.authdata_inner = AuthDataStored(self, self.authdata_path) else: self.authdata_inner = AuthDataInput(self, self.authdata_path) def update_storage_status(self): aw = self.layout.takeAt(1) aw.widget().deleteLater() self.calculate_authdata() self.layout.addWidget(self.authdata_inner) def update_api(self, api: tweepy.API): self.api = api if (self.api is not None): self.archytas.update_api(api) class RetweetWidget(QtWidgets.QWidget): def __init__(self, archytas: 'ArchytasWidget'): super().__init__() self.archytas = archytas self.title = QtWidgets.QLabel("Auto retweeter", alignment=QtCore.Qt.AlignCenter) self.title.setFont(QtGui.QFont("Default", 16)) self.label_err_message = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter) self.layout_number_retweets = QtWidgets.QHBoxLayout() self.label_number_retweets = QtWidgets.QLabel("Number of retweets", alignment=QtCore.Qt.AlignLeft) self.edit_number_retweets = QtWidgets.QLineEdit() self.layout_number_retweets.addWidget(self.label_number_retweets) self.layout_number_retweets.addWidget(self.edit_number_retweets) self.layout_keyword = QtWidgets.QHBoxLayout() self.label_keyword = QtWidgets.QLabel("Keyword", alignment=QtCore.Qt.AlignLeft) self.edit_keyword = QtWidgets.QLineEdit() self.layout_keyword.addWidget(self.label_keyword) self.layout_keyword.addWidget(self.edit_keyword) self.button_retweet = QtWidgets.QPushButton("Retweet") self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.title) self.layout.addWidget(self.label_err_message) self.layout.addLayout(self.layout_number_retweets) self.layout.addLayout(self.layout_keyword) self.layout.addWidget(self.button_retweet) self.button_retweet.clicked.connect(self.retweet) self.set_connected(False) def QtRetweetList(self, api: tweepy.API, tweets: list, index: int, secs: int, finishedAction: types.FunctionType): tweet = tweets[index] try: print('\nRetweet Bot found tweet by @' + tweet.user.screen_name + '. ' + 'Attempting to retweet.') tweet.retweet() print('Retweet published successfully.') index = index + 1 if (index < len(tweets)): QtCore.QTimer.singleShot(secs * 1000, lambda: self.QtRetweetList(api, tweets, index, secs, finishedAction)) else: finishedAction() except tweepy.TweepyException as error: print('\nError TweepyException. Retweet not successful. Reason: ') print(error) except tweepy.HTTPException as error: print('\nError HTTPException. Retweet not successful. Reason: ') print(error) def QtRetweetKeyword(self, api, keyword, rewteetRange, secs, finishedAction: types.FunctionType): tweets = getTweetsKeyword(api, keyword, rewteetRange) self.QtRetweetList(api, tweets, 0, secs, finishedAction) @QtCore.Slot() def retweet(self): self.label_err_message.setText("") if (not self.archytas.connected): self.label_err_message.setText("Error: The app is not connected to Twitter") return api = self.archytas.api try: retweetRange = int(self.edit_number_retweets.text()) except: self.label_err_message.setText("Error: Number of retweets is not a number") return keyword = self.edit_keyword.text() self.button_retweet.setEnabled(False) self.label_err_message.setText("Retweeting...") finishedAction = self.finishedRetweetingActions seconds_between_retweets = 2 self.QtRetweetKeyword(api, keyword, retweetRange, seconds_between_retweets, finishedAction) def finishedRetweetingActions(self): self.button_retweet.setEnabled(True) self.label_err_message.setText("Successfully retweeted") def set_connected(self, connected: bool): self.button_retweet.setEnabled(connected) class OutputerTweetWidget(OutputerInterface): def __init__(self, TweetWidget): super().__init__() self.tweetw = TweetWidget def print(self, message: str) -> None: self.tweetw.update_message(message) class TweetWidget(QtWidgets.QWidget): def __init__(self, archytas: 'ArchytasWidget'): super().__init__() self.archytas = archytas self.csv_path = "<No file loaded>" self.loaded_csv = False self.connected = False self.title = QtWidgets.QLabel("Random tweet", alignment=QtCore.Qt.AlignCenter) self.title.setFont(QtGui.QFont("Default", 16)) self.label_err_message = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter) self.layout_input_csv = QtWidgets.QHBoxLayout() self.label_input_csv = QtWidgets.QLabel("Random tweet source:", alignment=QtCore.Qt.AlignLeft) self.label_location_csv = QtWidgets.QLabel(self.csv_path, alignment=QtCore.Qt.AlignLeft) self.button_load_csv = QtWidgets.QPushButton("Browse...") self.layout_input_csv.addWidget(self.label_input_csv) self.layout_input_csv.addWidget(self.label_location_csv) self.layout_input_csv.addWidget(self.button_load_csv) self.button_tweet = QtWidgets.QPushButton("Tweet") self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.title) self.layout.addWidget(self.label_err_message) self.layout.addLayout(self.layout_input_csv) self.layout.addWidget(self.button_tweet) self.button_tweet.clicked.connect(self.tweet) self.button_load_csv.clicked.connect(self.browse_csv) self.try_enabling_tweet_button() @QtCore.Slot() def tweet(self): self.label_err_message.setText("") if (not self.archytas.connected): self.label_err_message.setText("Error: The app is not connected to Twitter") return if (not self.loaded_csv): self.label_err_message.setText("Error: Invalid tweet source file path") return api = self.archytas.api number_of_tweets = 1 outputer = OutputerTweetWidget(self) tweetRandom(api, self.csv_path, number_of_tweets, outputer) @QtCore.Slot() def browse_csv(self): file_input_csv = QtWidgets.QFileDialog() path_tuple: tuple = file_input_csv.getOpenFileName() path = path_tuple[0] self.update_csv_path(path) def update_csv_path(self, path): self.loaded_csv = True self.csv_path = path self.label_location_csv.setText(self.csv_path) self.try_enabling_tweet_button() def try_enabling_tweet_button(self): if (self.connected and self.loaded_csv): self.button_tweet.setEnabled(True) else: self.button_tweet.setEnabled(False) def set_connected(self, connected: bool): self.connected = connected self.try_enabling_tweet_button() def update_message(self, message: str) -> None: self.label_err_message.setText(message) class ArchytasWidget(QtWidgets.QWidget): def __init__(self, authdata_path): super().__init__() self.connected = False self.authdataw = AuthDataWidget(authdata_path, self) self.retweetw = RetweetWidget(self) self.tweetw = TweetWidget(self) self.line1 = QtWidgets.QFrame() self.line1.setFrameShape(QtWidgets.QFrame.HLine) self.line1.setFrameShadow(QtWidgets.QFrame.Sunken) self.line2 = QtWidgets.QFrame() self.line2.setFrameShape(QtWidgets.QFrame.HLine) self.line2.setFrameShadow(QtWidgets.QFrame.Sunken) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.authdataw) self.layout.addWidget(self.line1) self.layout.addWidget(self.retweetw) self.layout.addWidget(self.line2) self.layout.addWidget(self.tweetw) def update_api(self, api: tweepy.API): self.api = api if (self.api is not None): self.connected = True else: self.connected = False self.retweetw.set_connected(self.connected) self.tweetw.set_connected(self.connected) def resource_path(relative_path): if hasattr(sys, '_MEIPASS'): return join(sys._MEIPASS, relative_path) return join(abspath('.'), relative_path) def main(): app = QtWidgets.QApplication([]) widget = ArchytasWidget("auth_data.json") widget.resize(800, 600) widget.show() widget.setWindowTitle("Archytas") widget.setWindowIcon( QtGui.QIcon(resource_path("./assets/icon.png")) ) sys.exit(app.exec_()) if __name__=="__main__": main()
true
true
f70dbdc8b99f161809f8e3871477a8986cf70fdd
958
py
Python
PastYearFeatures/setup.py
Slavkata/Forecast-Report
3cfeac5ab6b60ad32e1b9433b3281b5336373c30
[ "MIT" ]
1
2019-04-11T12:48:44.000Z
2019-04-11T12:48:44.000Z
PastYearFeatures/setup.py
Slavkata/Forecast-Report
3cfeac5ab6b60ad32e1b9433b3281b5336373c30
[ "MIT" ]
null
null
null
PastYearFeatures/setup.py
Slavkata/Forecast-Report
3cfeac5ab6b60ad32e1b9433b3281b5336373c30
[ "MIT" ]
null
null
null
import pandas as p import numpy as np from file_setup_helper import FileSetupHelper as fsh from preprocess_data import PreprocessData as pd from model_export import ModelExport as me import sys from sklearn.linear_model import Ridge def main(): #call for file download with given date file_name = fsh(sys.argv[1], 1460, 0).download_csv() #aquire features and target data filenames features_file_name, target_file_name = pd(file_name).preprocess(True, None) #init model, and open the features and target file #(drop column added by pandas and bypass 29.02 potential error) model = Ridge(alpha=1, fit_intercept=True) X_train = p.read_csv(features_file_name).drop(['Unnamed: 0', 'Date_29.02'], axis=1) y_train = p.read_csv(target_file_name).drop(['Unnamed: 0'], axis=1) #export model with the given datasets and model me(model, X_train, y_train).fit_and_export(sys.argv[2]) if __name__ == '__main__': main()
35.481481
87
0.742171
import pandas as p import numpy as np from file_setup_helper import FileSetupHelper as fsh from preprocess_data import PreprocessData as pd from model_export import ModelExport as me import sys from sklearn.linear_model import Ridge def main(): file_name = fsh(sys.argv[1], 1460, 0).download_csv() features_file_name, target_file_name = pd(file_name).preprocess(True, None) model = Ridge(alpha=1, fit_intercept=True) X_train = p.read_csv(features_file_name).drop(['Unnamed: 0', 'Date_29.02'], axis=1) y_train = p.read_csv(target_file_name).drop(['Unnamed: 0'], axis=1) me(model, X_train, y_train).fit_and_export(sys.argv[2]) if __name__ == '__main__': main()
true
true
f70dbf065339946b0e4e8e12839c75e81cfa780d
971
py
Python
api/inventory/views.py
BuildForSDGCohort2/Team-1053
6b3fc485d9be22ca0d03832e0d658359ca88e1c7
[ "Apache-2.0" ]
null
null
null
api/inventory/views.py
BuildForSDGCohort2/Team-1053
6b3fc485d9be22ca0d03832e0d658359ca88e1c7
[ "Apache-2.0" ]
7
2020-09-05T16:58:46.000Z
2020-09-26T17:02:24.000Z
api/inventory/views.py
BuildForSDGCohort2/Team-1053
6b3fc485d9be22ca0d03832e0d658359ca88e1c7
[ "Apache-2.0" ]
2
2020-08-20T13:38:16.000Z
2020-09-10T10:08:27.000Z
from .models import Stock, Tag, Product from rest_framework import viewsets from rest_framework.permissions import AllowAny from .serializers import ( StockSerializer, TagSerializer, ProductSerializer ) class StockViewSet(viewsets.ModelViewSet): """ API endpoint that allows stock to be viewed or edited. """ queryset = Stock.objects.all().order_by('-date_added') serializer_class = StockSerializer permission_classes = [AllowAny] class TagViewSet(viewsets.ModelViewSet): """ API endpoint that allows tag to be viewed or edited. """ queryset = Tag.objects.all().order_by('-id') serializer_class = TagSerializer permission_classes = [AllowAny] class ProductViewSet(viewsets.ModelViewSet): """ API endpoint that allows product to be viewed or edited. """ queryset = Product.objects.all().order_by('-date_created') serializer_class = ProductSerializer permission_classes = [AllowAny]
26.972222
62
0.723996
from .models import Stock, Tag, Product from rest_framework import viewsets from rest_framework.permissions import AllowAny from .serializers import ( StockSerializer, TagSerializer, ProductSerializer ) class StockViewSet(viewsets.ModelViewSet): queryset = Stock.objects.all().order_by('-date_added') serializer_class = StockSerializer permission_classes = [AllowAny] class TagViewSet(viewsets.ModelViewSet): queryset = Tag.objects.all().order_by('-id') serializer_class = TagSerializer permission_classes = [AllowAny] class ProductViewSet(viewsets.ModelViewSet): queryset = Product.objects.all().order_by('-date_created') serializer_class = ProductSerializer permission_classes = [AllowAny]
true
true
f70dbf18d4e9125b9c469885dcf5b2a766c2f9e4
4,773
py
Python
tensorflow/python/platform/flags.py
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
[ "Apache-2.0" ]
101
2016-12-03T11:40:52.000Z
2017-12-23T02:02:03.000Z
tensorflow/python/platform/flags.py
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
[ "Apache-2.0" ]
10
2017-07-13T00:24:03.000Z
2017-07-17T07:39:03.000Z
tensorflow/python/platform/flags.py
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
[ "Apache-2.0" ]
47
2016-12-04T12:37:24.000Z
2018-01-14T18:13:07.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of the flags interface.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse as _argparse from tensorflow.python.util.all_util import remove_undocumented _global_parser = _argparse.ArgumentParser() # pylint: disable=invalid-name class _FlagValues(object): """Global container and accessor for flags and their values.""" def __init__(self): self.__dict__['__flags'] = {} self.__dict__['__parsed'] = False def _parse_flags(self, args=None): result, unparsed = _global_parser.parse_known_args(args=args) for flag_name, val in vars(result).items(): self.__dict__['__flags'][flag_name] = val self.__dict__['__parsed'] = True return unparsed def __getattr__(self, name): """Retrieves the 'value' attribute of the flag --name.""" if not self.__dict__['__parsed']: self._parse_flags() if name not in self.__dict__['__flags']: raise AttributeError(name) return self.__dict__['__flags'][name] def __setattr__(self, name, value): """Sets the 'value' attribute of the flag --name.""" if not self.__dict__['__parsed']: self._parse_flags() self.__dict__['__flags'][name] = value def _define_helper(flag_name, default_value, docstring, flagtype): """Registers 'flag_name' with 'default_value' and 'docstring'.""" _global_parser.add_argument('--' + flag_name, default=default_value, help=docstring, type=flagtype) # Provides the global object that can be used to access flags. FLAGS = _FlagValues() def DEFINE_string(flag_name, default_value, docstring): """Defines a flag of type 'string'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as a string. docstring: A helpful message explaining the use of the flag. """ _define_helper(flag_name, default_value, docstring, str) def DEFINE_integer(flag_name, default_value, docstring): """Defines a flag of type 'int'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as an int. docstring: A helpful message explaining the use of the flag. """ _define_helper(flag_name, default_value, docstring, int) def DEFINE_boolean(flag_name, default_value, docstring): """Defines a flag of type 'boolean'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as a boolean. docstring: A helpful message explaining the use of the flag. """ # Register a custom function for 'bool' so --flag=True works. def str2bool(v): return v.lower() in ('true', 't', '1') _global_parser.add_argument('--' + flag_name, nargs='?', const=True, help=docstring, default=default_value, type=str2bool) # Add negated version, stay consistent with argparse with regard to # dashes in flag names. _global_parser.add_argument('--no' + flag_name, action='store_false', dest=flag_name.replace('-', '_')) # The internal google library defines the following alias, so we match # the API for consistency. DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name def DEFINE_float(flag_name, default_value, docstring): """Defines a flag of type 'float'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as a float. docstring: A helpful message explaining the use of the flag. """ _define_helper(flag_name, default_value, docstring, float) _allowed_symbols = [ # We rely on gflags documentation. 'DEFINE_bool', 'DEFINE_boolean', 'DEFINE_float', 'DEFINE_integer', 'DEFINE_string', 'FLAGS', ] remove_undocumented(__name__, _allowed_symbols)
33.145833
80
0.670228
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse as _argparse from tensorflow.python.util.all_util import remove_undocumented _global_parser = _argparse.ArgumentParser() class _FlagValues(object): def __init__(self): self.__dict__['__flags'] = {} self.__dict__['__parsed'] = False def _parse_flags(self, args=None): result, unparsed = _global_parser.parse_known_args(args=args) for flag_name, val in vars(result).items(): self.__dict__['__flags'][flag_name] = val self.__dict__['__parsed'] = True return unparsed def __getattr__(self, name): if not self.__dict__['__parsed']: self._parse_flags() if name not in self.__dict__['__flags']: raise AttributeError(name) return self.__dict__['__flags'][name] def __setattr__(self, name, value): if not self.__dict__['__parsed']: self._parse_flags() self.__dict__['__flags'][name] = value def _define_helper(flag_name, default_value, docstring, flagtype): _global_parser.add_argument('--' + flag_name, default=default_value, help=docstring, type=flagtype) FLAGS = _FlagValues() def DEFINE_string(flag_name, default_value, docstring): _define_helper(flag_name, default_value, docstring, str) def DEFINE_integer(flag_name, default_value, docstring): _define_helper(flag_name, default_value, docstring, int) def DEFINE_boolean(flag_name, default_value, docstring): def str2bool(v): return v.lower() in ('true', 't', '1') _global_parser.add_argument('--' + flag_name, nargs='?', const=True, help=docstring, default=default_value, type=str2bool) _global_parser.add_argument('--no' + flag_name, action='store_false', dest=flag_name.replace('-', '_')) DEFINE_bool = DEFINE_boolean def DEFINE_float(flag_name, default_value, docstring): _define_helper(flag_name, default_value, docstring, float) _allowed_symbols = [ 'DEFINE_bool', 'DEFINE_boolean', 'DEFINE_float', 'DEFINE_integer', 'DEFINE_string', 'FLAGS', ] remove_undocumented(__name__, _allowed_symbols)
true
true
f70dbf8b781aa944ac738e7c84d37db767acba27
8,634
py
Python
airtech_api/users/views.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
1
2019-04-04T12:27:55.000Z
2019-04-04T12:27:55.000Z
airtech_api/users/views.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
34
2019-03-26T11:18:17.000Z
2022-02-10T08:12:36.000Z
airtech_api/users/views.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
null
null
null
# Create your views here. from rest_framework.views import APIView from .serializers import UserSerializer, LoginSerializer from ..utils.helpers.json_helpers import generate_response, raise_error, add_token_to_response, validate_url, validate_confirmation_request from ..utils import success_messages from ..utils.error_messages import serialization_errors, tokenization_errors from rest_framework.status import HTTP_404_NOT_FOUND, HTTP_201_CREATED, HTTP_401_UNAUTHORIZED, HTTP_202_ACCEPTED from ..utils.validators.token_validator import TokenValidator, VerifiedUserTokenValidator from ..utils.constants import CONFIRM_EMAIL_TYPE, ADMIN_REQUEST_EMAIL_TYPE, ADMIN_REQUEST_SUBJECT, CONFRIM_EMAIL_SUBJECT from django.http import HttpResponseRedirect from rest_framework.decorators import api_view from rest_framework.parsers import MultiPartParser, JSONParser from ..utils.helpers.email_helpers import send_email_with_token from airtech_api.services.cloudinary import upload_profile_picture from airtech_api.users.models import User from datetime import datetime, timedelta from django.core.files.storage import default_storage from django.core.files.base import ContentFile from django.core.files.uploadedfile import UploadedFile import os class SignupView(APIView): @staticmethod def post(request): """ Saves a new user to the app Returns: A Response object containing the JSON response """ request_data = dict(**request.data) callback_url = request_data.get('callbackURL', '') err_dict = {} if not validate_url(callback_url): err_dict = { 'callbackURL': [serialization_errors['invalid_url_field']] } serializer = UserSerializer(data=request_data) if serializer.is_valid() and not err_dict: _ = serializer.save() serialization_data = serializer.data user_email = serialization_data['email'] server_host = os.getenv('HOSTNAME') send_email_with_token( user_email, 'confirm-email.html', subject=CONFRIM_EMAIL_SUBJECT, redirect_url=callback_url, confirm_link=f'{server_host}/api/v1/auth/confirm-email', mail_type=CONFIRM_EMAIL_TYPE, ) return generate_response( success_messages['confirm_mail'].format(user_email), status_code=HTTP_201_CREATED) err_dict.update(serializer.errors) raise_error(serialization_errors['many_invalid_fields'], err_dict=err_dict) class ConfirmView(APIView): @staticmethod def get(request, **kwargs): token = kwargs.get('token', '') user, redirect_url = validate_confirmation_request( token, CONFIRM_EMAIL_TYPE, success_key='verified') if user: user.verified = True user.save() return HttpResponseRedirect(redirect_to=redirect_url) class RequestAdminAccessView(APIView): permission_classes = [VerifiedUserTokenValidator] protected_methods = ['POST'] @staticmethod def post(request): user = request.decoded_user callback_url = request.data.get('callbackURL', '') if not validate_url(callback_url): raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'callbackURL': [serialization_errors['invalid_url_field']] }) if user.admin: raise_error(serialization_errors['regular_user_only'], status_code=403) if not user.image_url: raise_error(serialization_errors['profile_not_updated'], status_code=403) server_host = os.getenv('HOSTNAME') send_email_with_token( os.getenv('OWNER_EMAIL'), 'admin-request-email.html', subject=ADMIN_REQUEST_SUBJECT, redirect_url=callback_url, confirm_link=f'{server_host}/api/v1/auth/request-admin-access', mail_type=ADMIN_REQUEST_EMAIL_TYPE, first_name=user.first_name, last_name=user.last_name, user_email=user.email, profile_picture=user.image_url, ) return generate_response(success_messages['admin_request_sent']) class LoginView(APIView): @staticmethod def post(request): """ Saves a new user to the app Returns: A Response object containing the JSON response """ request_data = dict(**request.data) serializer = LoginSerializer(data=request_data) if serializer.is_valid(raise_exception=False): user = serializer.validated_data serialized_user = UserSerializer(user).data serialization_data = add_token_to_response(serialized_user, exp=datetime.now() + timedelta(days=4)) return generate_response( success_messages['auth_successful'].format('Login'), serialization_data) if 'non_field_errors' in serializer.errors.keys(): raise_error(serialization_errors['user_not_found'], status_code=HTTP_404_NOT_FOUND) raise_error(serialization_errors['many_invalid_fields'], err_dict=serializer.errors) @api_view(['GET']) def accept_admin_request(request, **kwargs): token = kwargs.get('token', '') user, redirect_url = validate_confirmation_request( token, ADMIN_REQUEST_EMAIL_TYPE, success_key='admin_approval') if user: user.admin = True user.save() return HttpResponseRedirect(redirect_to=redirect_url) class ResendEmailEndpoint(APIView): permission_classes = [TokenValidator] protected_methods = ['POST'] @staticmethod def post(request): callback_url = request.data.get('callbackURL', '') if not validate_url(callback_url): raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'callbackURL': serialization_errors['invalid_url_field'] }) email = request.data.get('email') user = request.decoded_user if user.verified: raise_error(serialization_errors['user_already_verified']) server_host = os.getenv('HOSTNAME') send_email_with_token( user.email, 'confirm-email.html', subject=CONFRIM_EMAIL_SUBJECT, redirect_url=callback_url, confirm_link=f'{server_host}/api/v1/auth/confirm-email', mail_type=CONFIRM_EMAIL_TYPE, ) return generate_response( success_messages['confirm_mail'].format(email)) class UserProfilePicture(APIView): parser_classes = ( MultiPartParser, JSONParser, ) permission_classes = [TokenValidator] @staticmethod def patch(request): file = request.data.get('picture') if not file: raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'picture': serialization_errors['missing_field'], }) if not isinstance(file, UploadedFile): raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'picture': serialization_errors['value_not_a_file'], }) user = request.decoded_user octet_stream_is_valid = file.content_type == 'application/octet-stream' and os.getenv( 'ENVIRONMENT') == 'test' file_is_image = file.content_type.split('/')[0] == 'image' if not octet_stream_is_valid and not file_is_image: raise_error(serialization_errors['not_an_image']) file_size = file.size if file_size > 2_000_000: raise_error(serialization_errors['image_too_large']) file_name = str(user.id) + datetime.now().strftime('%c') + '.jpg' default_storage.save(file_name, ContentFile(file.read())) upload_profile_picture.delay(user.id, user.image_public_id, file_name) return generate_response('Your request is being processed.', status_code=HTTP_202_ACCEPTED)
36.584746
139
0.633079
from rest_framework.views import APIView from .serializers import UserSerializer, LoginSerializer from ..utils.helpers.json_helpers import generate_response, raise_error, add_token_to_response, validate_url, validate_confirmation_request from ..utils import success_messages from ..utils.error_messages import serialization_errors, tokenization_errors from rest_framework.status import HTTP_404_NOT_FOUND, HTTP_201_CREATED, HTTP_401_UNAUTHORIZED, HTTP_202_ACCEPTED from ..utils.validators.token_validator import TokenValidator, VerifiedUserTokenValidator from ..utils.constants import CONFIRM_EMAIL_TYPE, ADMIN_REQUEST_EMAIL_TYPE, ADMIN_REQUEST_SUBJECT, CONFRIM_EMAIL_SUBJECT from django.http import HttpResponseRedirect from rest_framework.decorators import api_view from rest_framework.parsers import MultiPartParser, JSONParser from ..utils.helpers.email_helpers import send_email_with_token from airtech_api.services.cloudinary import upload_profile_picture from airtech_api.users.models import User from datetime import datetime, timedelta from django.core.files.storage import default_storage from django.core.files.base import ContentFile from django.core.files.uploadedfile import UploadedFile import os class SignupView(APIView): @staticmethod def post(request): request_data = dict(**request.data) callback_url = request_data.get('callbackURL', '') err_dict = {} if not validate_url(callback_url): err_dict = { 'callbackURL': [serialization_errors['invalid_url_field']] } serializer = UserSerializer(data=request_data) if serializer.is_valid() and not err_dict: _ = serializer.save() serialization_data = serializer.data user_email = serialization_data['email'] server_host = os.getenv('HOSTNAME') send_email_with_token( user_email, 'confirm-email.html', subject=CONFRIM_EMAIL_SUBJECT, redirect_url=callback_url, confirm_link=f'{server_host}/api/v1/auth/confirm-email', mail_type=CONFIRM_EMAIL_TYPE, ) return generate_response( success_messages['confirm_mail'].format(user_email), status_code=HTTP_201_CREATED) err_dict.update(serializer.errors) raise_error(serialization_errors['many_invalid_fields'], err_dict=err_dict) class ConfirmView(APIView): @staticmethod def get(request, **kwargs): token = kwargs.get('token', '') user, redirect_url = validate_confirmation_request( token, CONFIRM_EMAIL_TYPE, success_key='verified') if user: user.verified = True user.save() return HttpResponseRedirect(redirect_to=redirect_url) class RequestAdminAccessView(APIView): permission_classes = [VerifiedUserTokenValidator] protected_methods = ['POST'] @staticmethod def post(request): user = request.decoded_user callback_url = request.data.get('callbackURL', '') if not validate_url(callback_url): raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'callbackURL': [serialization_errors['invalid_url_field']] }) if user.admin: raise_error(serialization_errors['regular_user_only'], status_code=403) if not user.image_url: raise_error(serialization_errors['profile_not_updated'], status_code=403) server_host = os.getenv('HOSTNAME') send_email_with_token( os.getenv('OWNER_EMAIL'), 'admin-request-email.html', subject=ADMIN_REQUEST_SUBJECT, redirect_url=callback_url, confirm_link=f'{server_host}/api/v1/auth/request-admin-access', mail_type=ADMIN_REQUEST_EMAIL_TYPE, first_name=user.first_name, last_name=user.last_name, user_email=user.email, profile_picture=user.image_url, ) return generate_response(success_messages['admin_request_sent']) class LoginView(APIView): @staticmethod def post(request): request_data = dict(**request.data) serializer = LoginSerializer(data=request_data) if serializer.is_valid(raise_exception=False): user = serializer.validated_data serialized_user = UserSerializer(user).data serialization_data = add_token_to_response(serialized_user, exp=datetime.now() + timedelta(days=4)) return generate_response( success_messages['auth_successful'].format('Login'), serialization_data) if 'non_field_errors' in serializer.errors.keys(): raise_error(serialization_errors['user_not_found'], status_code=HTTP_404_NOT_FOUND) raise_error(serialization_errors['many_invalid_fields'], err_dict=serializer.errors) @api_view(['GET']) def accept_admin_request(request, **kwargs): token = kwargs.get('token', '') user, redirect_url = validate_confirmation_request( token, ADMIN_REQUEST_EMAIL_TYPE, success_key='admin_approval') if user: user.admin = True user.save() return HttpResponseRedirect(redirect_to=redirect_url) class ResendEmailEndpoint(APIView): permission_classes = [TokenValidator] protected_methods = ['POST'] @staticmethod def post(request): callback_url = request.data.get('callbackURL', '') if not validate_url(callback_url): raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'callbackURL': serialization_errors['invalid_url_field'] }) email = request.data.get('email') user = request.decoded_user if user.verified: raise_error(serialization_errors['user_already_verified']) server_host = os.getenv('HOSTNAME') send_email_with_token( user.email, 'confirm-email.html', subject=CONFRIM_EMAIL_SUBJECT, redirect_url=callback_url, confirm_link=f'{server_host}/api/v1/auth/confirm-email', mail_type=CONFIRM_EMAIL_TYPE, ) return generate_response( success_messages['confirm_mail'].format(email)) class UserProfilePicture(APIView): parser_classes = ( MultiPartParser, JSONParser, ) permission_classes = [TokenValidator] @staticmethod def patch(request): file = request.data.get('picture') if not file: raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'picture': serialization_errors['missing_field'], }) if not isinstance(file, UploadedFile): raise_error(serialization_errors['many_invalid_fields'], err_dict={ 'picture': serialization_errors['value_not_a_file'], }) user = request.decoded_user octet_stream_is_valid = file.content_type == 'application/octet-stream' and os.getenv( 'ENVIRONMENT') == 'test' file_is_image = file.content_type.split('/')[0] == 'image' if not octet_stream_is_valid and not file_is_image: raise_error(serialization_errors['not_an_image']) file_size = file.size if file_size > 2_000_000: raise_error(serialization_errors['image_too_large']) file_name = str(user.id) + datetime.now().strftime('%c') + '.jpg' default_storage.save(file_name, ContentFile(file.read())) upload_profile_picture.delay(user.id, user.image_public_id, file_name) return generate_response('Your request is being processed.', status_code=HTTP_202_ACCEPTED)
true
true
f70dc0e1b47b0fa04fb3b4aa6f6643af19261dd4
2,521
py
Python
metricsML/Normalizator.py
Elscha/MetricsML
2ecbc42ad7bd2465f4f75658f44452ea5c552c3b
[ "Apache-2.0" ]
null
null
null
metricsML/Normalizator.py
Elscha/MetricsML
2ecbc42ad7bd2465f4f75658f44452ea5c552c3b
[ "Apache-2.0" ]
null
null
null
metricsML/Normalizator.py
Elscha/MetricsML
2ecbc42ad7bd2465f4f75658f44452ea5c552c3b
[ "Apache-2.0" ]
null
null
null
from metricsML.NormalizationType import NormalizationType import numpy as np import math def normalization(normalization, train_data, test_data, validation_data=None): if not isinstance(normalization, NormalizationType): print("Unknown normalization specified, use " + str(NormalizationType.PERCENTAGE) + " for normalizing data") normalization = NormalizationType.PERCENTAGE; if (normalization is NormalizationType.NO_NORMALIZATION): print("No normalization selected") elif (normalization is NormalizationType.PERCENTAGE): __percentageNormalization(train_data, test_data, validation_data) elif (normalization is NormalizationType.LOGARITHM): __logarithmNormalization(train_data, test_data, validation_data) else: raise TypeError("Unhandled normalization selected") def __percentageNormalization(train_data, test_data, validation_data=None): nColumns = train_data.shape[1] if len(train_data.shape) == 2 else 0; train_max = np.amax(train_data, axis=0) test_max = np.amax(test_data, axis=0) if (validation_data is not None): validation_max = np.amax(validation_data, axis=0) else: validation_max = np.zeros(nColumns) max_vector = np.amax([train_max, test_max, validation_max], axis=0) train_data = train_data/max_vector test_data = test_data/max_vector if (validation_data is not None): validation_data = validation_data/max_vector def __logarithmNormalization(train_data, test_data, validation_data=None): nColumns = train_data.shape[1] if len(train_data.shape) == 2 else 0; train_max = np.amax(train_data, axis=0) test_max = np.amax(test_data, axis=0) if (validation_data is not None): validation_max = np.amax(validation_data, axis=0) else: validation_max = np.zeros(nColumns) max_vector = np.amax([train_max, test_max, validation_max], axis=0) for column in range(0, nColumns): max_value = max_vector[column] if (max_value > 1): train_data[:, column] = [__positiveLogarithm(x) for x in train_data[:, column]] test_data[:, column] = [__positiveLogarithm(x) for x in test_data[:, column]] if (validation_data is not None): validation_data[:, column] = [__positiveLogarithm(x) for x in validation_data[:, column]] def __positiveLogarithm(number, base): if (number > 1): return math.log(number, base) else: return 0
42.016667
116
0.700912
from metricsML.NormalizationType import NormalizationType import numpy as np import math def normalization(normalization, train_data, test_data, validation_data=None): if not isinstance(normalization, NormalizationType): print("Unknown normalization specified, use " + str(NormalizationType.PERCENTAGE) + " for normalizing data") normalization = NormalizationType.PERCENTAGE; if (normalization is NormalizationType.NO_NORMALIZATION): print("No normalization selected") elif (normalization is NormalizationType.PERCENTAGE): __percentageNormalization(train_data, test_data, validation_data) elif (normalization is NormalizationType.LOGARITHM): __logarithmNormalization(train_data, test_data, validation_data) else: raise TypeError("Unhandled normalization selected") def __percentageNormalization(train_data, test_data, validation_data=None): nColumns = train_data.shape[1] if len(train_data.shape) == 2 else 0; train_max = np.amax(train_data, axis=0) test_max = np.amax(test_data, axis=0) if (validation_data is not None): validation_max = np.amax(validation_data, axis=0) else: validation_max = np.zeros(nColumns) max_vector = np.amax([train_max, test_max, validation_max], axis=0) train_data = train_data/max_vector test_data = test_data/max_vector if (validation_data is not None): validation_data = validation_data/max_vector def __logarithmNormalization(train_data, test_data, validation_data=None): nColumns = train_data.shape[1] if len(train_data.shape) == 2 else 0; train_max = np.amax(train_data, axis=0) test_max = np.amax(test_data, axis=0) if (validation_data is not None): validation_max = np.amax(validation_data, axis=0) else: validation_max = np.zeros(nColumns) max_vector = np.amax([train_max, test_max, validation_max], axis=0) for column in range(0, nColumns): max_value = max_vector[column] if (max_value > 1): train_data[:, column] = [__positiveLogarithm(x) for x in train_data[:, column]] test_data[:, column] = [__positiveLogarithm(x) for x in test_data[:, column]] if (validation_data is not None): validation_data[:, column] = [__positiveLogarithm(x) for x in validation_data[:, column]] def __positiveLogarithm(number, base): if (number > 1): return math.log(number, base) else: return 0
true
true
f70dc154bf5a0ec09efd22b5fab261d1b26f93ed
1,324
py
Python
setup.py
bluebottlecoffee/netsuite-1
92f94e10030ff590bb8792772c19e80673b3a722
[ "BSD-2-Clause-FreeBSD" ]
1
2019-07-03T16:16:11.000Z
2019-07-03T16:16:11.000Z
setup.py
bluebottlecoffee/netsuite-1
92f94e10030ff590bb8792772c19e80673b3a722
[ "BSD-2-Clause-FreeBSD" ]
1
2019-03-25T21:16:49.000Z
2019-03-25T21:16:49.000Z
setup.py
bluebottlecoffee/netsuite-1
92f94e10030ff590bb8792772c19e80673b3a722
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
from setuptools import setup setup_kwargs = dict( name='netsuite-bbc-python', version='1.0.1', description='Wrapper around Netsuite SuiteTalk Web Services', packages=['netsuite_bbc'], include_package_data=True, author='Jacob Magnusson', author_email='m@jacobian.se', url='https://github.com/bluebottlecoffee/netsuite-bbc-python', license='BSD', platforms='any', install_requires=[ 'lxml', 'requests-ntlm', 'zeep', ], extras_require={ 'cli': [ 'argh', 'ipython', ], 'test': { 'coverage>=4.2', 'flake8>=3.0.4', 'mypy>=0.560', 'pytest>=3.0.3', 'responses>=0.5.1', }, }, entry_points={ 'console_scripts': [ 'netsuite = netsuite.__main__:main', ], }, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], ) if __name__ == '__main__': setup(**setup_kwargs)
25.960784
66
0.535498
from setuptools import setup setup_kwargs = dict( name='netsuite-bbc-python', version='1.0.1', description='Wrapper around Netsuite SuiteTalk Web Services', packages=['netsuite_bbc'], include_package_data=True, author='Jacob Magnusson', author_email='m@jacobian.se', url='https://github.com/bluebottlecoffee/netsuite-bbc-python', license='BSD', platforms='any', install_requires=[ 'lxml', 'requests-ntlm', 'zeep', ], extras_require={ 'cli': [ 'argh', 'ipython', ], 'test': { 'coverage>=4.2', 'flake8>=3.0.4', 'mypy>=0.560', 'pytest>=3.0.3', 'responses>=0.5.1', }, }, entry_points={ 'console_scripts': [ 'netsuite = netsuite.__main__:main', ], }, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], ) if __name__ == '__main__': setup(**setup_kwargs)
true
true
f70dc1cc367e3b8c96c568718b8ad0c47232d262
3,774
py
Python
litex_boards/targets/terasic_de1soc.py
trabucayre/litex-boards
94b4789286748f90f1c247d95b51470f3e16d1f5
[ "BSD-2-Clause" ]
2
2021-09-20T11:07:48.000Z
2021-12-21T18:20:54.000Z
litex_boards/targets/terasic_de1soc.py
zeldin/litex-boards
d52859d9ef5d8d210118c01ce89e29404ac8d7c6
[ "BSD-2-Clause" ]
null
null
null
litex_boards/targets/terasic_de1soc.py
zeldin/litex-boards
d52859d9ef5d8d210118c01ce89e29404ac8d7c6
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 # # This file is part of LiteX-Boards. # # Copyright (c) 2019 Antony Pavlov <antonynpavlov@gmail.com> # SPDX-License-Identifier: BSD-2-Clause import os import argparse from migen import * from migen.genlib.resetsync import AsyncResetSynchronizer from litex.build.io import DDROutput from litex_boards.platforms import de1soc from litex.soc.cores.clock import CycloneVPLL from litex.soc.integration.soc_core import * from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser from litedram.modules import IS42S16320 from litedram.phy import GENSDRPHY # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq): self.rst = Signal() self.clock_domains.cd_sys = ClockDomain() self.clock_domains.cd_sys_ps = ClockDomain(reset_less=True) # # # # Clk / Rst clk50 = platform.request("clk50") # PLL self.submodules.pll = pll = CycloneVPLL(speedgrade="-C6") self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk50, 50e6) pll.create_clkout(self.cd_sys, sys_clk_freq) pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90) # SDRAM clock self.specials += DDROutput(1, 0, platform.request("sdram_clock"), ClockSignal("sys_ps")) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, sys_clk_freq=int(50e6), with_led_chaser=True, **kwargs): platform = de1soc.Platform() # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident = "LiteX SoC on DE1-SoC", ident_version = True, **kwargs) # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq) # SDR SDRAM -------------------------------------------------------------------------------- if not self.integrated_main_ram_size: self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq) self.add_sdram("sdram", phy = self.sdrphy, module = IS42S16320(sys_clk_freq, "1:1"), l2_cache_size = kwargs.get("l2_size", 8192) ) # Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all("user_led"), sys_clk_freq = sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description="LiteX SoC on DE1-SoC") parser.add_argument("--build", action="store_true", help="Build bitstream") parser.add_argument("--load", action="store_true", help="Load bitstream") parser.add_argument("--sys-clk-freq", default=50e6, help="System clock frequency (default: 50MHz)") builder_args(parser) soc_core_args(parser) args = parser.parse_args() soc = BaseSoC( sys_clk_freq = int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder.build(run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".sof")) if __name__ == "__main__": main()
36.288462
110
0.539216
import os import argparse from migen import * from migen.genlib.resetsync import AsyncResetSynchronizer from litex.build.io import DDROutput from litex_boards.platforms import de1soc from litex.soc.cores.clock import CycloneVPLL from litex.soc.integration.soc_core import * from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser from litedram.modules import IS42S16320 from litedram.phy import GENSDRPHY class _CRG(Module): def __init__(self, platform, sys_clk_freq): self.rst = Signal() self.clock_domains.cd_sys = ClockDomain() self.clock_domains.cd_sys_ps = ClockDomain(reset_less=True) clk50 = platform.request("clk50") self.submodules.pll = pll = CycloneVPLL(speedgrade="-C6") self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk50, 50e6) pll.create_clkout(self.cd_sys, sys_clk_freq) pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90) self.specials += DDROutput(1, 0, platform.request("sdram_clock"), ClockSignal("sys_ps")) class BaseSoC(SoCCore): def __init__(self, sys_clk_freq=int(50e6), with_led_chaser=True, **kwargs): platform = de1soc.Platform() SoCCore.__init__(self, platform, sys_clk_freq, ident = "LiteX SoC on DE1-SoC", ident_version = True, **kwargs) self.submodules.crg = _CRG(platform, sys_clk_freq) if not self.integrated_main_ram_size: self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq) self.add_sdram("sdram", phy = self.sdrphy, module = IS42S16320(sys_clk_freq, "1:1"), l2_cache_size = kwargs.get("l2_size", 8192) ) if with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all("user_led"), sys_clk_freq = sys_clk_freq) def main(): parser = argparse.ArgumentParser(description="LiteX SoC on DE1-SoC") parser.add_argument("--build", action="store_true", help="Build bitstream") parser.add_argument("--load", action="store_true", help="Load bitstream") parser.add_argument("--sys-clk-freq", default=50e6, help="System clock frequency (default: 50MHz)") builder_args(parser) soc_core_args(parser) args = parser.parse_args() soc = BaseSoC( sys_clk_freq = int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder.build(run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".sof")) if __name__ == "__main__": main()
true
true
f70dc2ae8032a89757f7a36b7c0addaadd06f4df
1,651
py
Python
recipes/filters.py
jf248/scrape-the-plate
e51d7e6d234cb5fc4b8c9e18b042b3d602e7661d
[ "MIT" ]
3
2018-08-19T10:58:21.000Z
2019-03-31T17:19:29.000Z
recipes/filters.py
jf248/scrape-the-plate
e51d7e6d234cb5fc4b8c9e18b042b3d602e7661d
[ "MIT" ]
16
2020-02-11T21:59:38.000Z
2022-03-08T22:47:52.000Z
recipes/filters.py
jf248/scrape-the-plate
e51d7e6d234cb5fc4b8c9e18b042b3d602e7661d
[ "MIT" ]
1
2018-08-19T10:58:23.000Z
2018-08-19T10:58:23.000Z
from django_filters import rest_framework as filters from rest_framework.filters import SearchFilter, OrderingFilter from . import models class SearchFilter(SearchFilter): """ A Filter backend """ class DjangoFilterBackend(filters.DjangoFilterBackend): """ A Filter backend """ class OrderingFilter(OrderingFilter): """ A Filter backend """ # class ListFilter(filters.Filter): # def __init__(self, integer=False, **kwargs): # super(ListFilter, self).__init__(**kwargs) # if integer: # self.filter_value_fn = lambda x: int(x) # else: # self.filter_value_fn = lambda x: x # # def sanitize(self, value_list): # return [v for v in value_list if v != ""] # # def filter(self, qs, value): # values = value.split(",") # values = self.sanitize(values) # f = Q() # for v in values: # kwargs = {self.field_name: v} # f = f | Q(**kwargs) # return qs.filter(f) class NumberInFilter(filters.BaseInFilter, filters.NumberFilter): pass class CharInFilter(filters.BaseInFilter, filters.CharFilter): pass class GroceryGroupFilter(filters.FilterSet): id = NumberInFilter(field_name='id') name = CharInFilter(field_name='name') class Meta: model = models.GroceryGroup fields = ['name', 'id'] class RecipeFilter(filters.FilterSet): id = NumberInFilter(field_name='id') user = NumberInFilter(field_name='user') tags = NumberInFilter(field_name='tags') class Meta: model = models.Recipe fields = ['id', 'user', 'tags']
23.253521
65
0.626287
from django_filters import rest_framework as filters from rest_framework.filters import SearchFilter, OrderingFilter from . import models class SearchFilter(SearchFilter): class DjangoFilterBackend(filters.DjangoFilterBackend): class OrderingFilter(OrderingFilter): class NumberInFilter(filters.BaseInFilter, filters.NumberFilter): pass class CharInFilter(filters.BaseInFilter, filters.CharFilter): pass class GroceryGroupFilter(filters.FilterSet): id = NumberInFilter(field_name='id') name = CharInFilter(field_name='name') class Meta: model = models.GroceryGroup fields = ['name', 'id'] class RecipeFilter(filters.FilterSet): id = NumberInFilter(field_name='id') user = NumberInFilter(field_name='user') tags = NumberInFilter(field_name='tags') class Meta: model = models.Recipe fields = ['id', 'user', 'tags']
true
true
f70dc481c8030337d093d5b752decf10198ad691
277
py
Python
erpnext/hr/doctype/appraisal/test_appraisal.py
nagendrarawat/erpnext_custom
1b94ecc3e66eae402347c302cd1663b690fb1ade
[ "MIT" ]
2
2019-10-01T13:07:39.000Z
2019-10-03T03:52:19.000Z
erpnext/hr/doctype/appraisal/test_appraisal.py
nagendrarawat/erpnext_custom
1b94ecc3e66eae402347c302cd1663b690fb1ade
[ "MIT" ]
null
null
null
erpnext/hr/doctype/appraisal/test_appraisal.py
nagendrarawat/erpnext_custom
1b94ecc3e66eae402347c302cd1663b690fb1ade
[ "MIT" ]
3
2019-09-30T19:17:44.000Z
2019-10-23T18:59:12.000Z
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest # test_records = frappe.get_test_records('Appraisal') class TestAppraisal(unittest.TestCase): pass
23.083333
85
0.808664
from __future__ import unicode_literals import frappe import unittest class TestAppraisal(unittest.TestCase): pass
true
true
f70dc58a1a9f50310b830e0b246cfde547f6836f
1,445
py
Python
Examples/Tests/divb_cleaning/analysis.py
oshapoval/WarpX
84d687da21ee93db67fdc43efec8a9cc80d0e6f9
[ "BSD-3-Clause-LBNL" ]
131
2018-09-29T08:11:40.000Z
2022-03-28T23:24:22.000Z
Examples/Tests/divb_cleaning/analysis.py
oshapoval/WarpX
84d687da21ee93db67fdc43efec8a9cc80d0e6f9
[ "BSD-3-Clause-LBNL" ]
1,656
2018-10-02T01:49:24.000Z
2022-03-31T21:27:31.000Z
Examples/Tests/divb_cleaning/analysis.py
oshapoval/WarpX
84d687da21ee93db67fdc43efec8a9cc80d0e6f9
[ "BSD-3-Clause-LBNL" ]
100
2018-10-01T20:41:14.000Z
2022-03-10T10:30:42.000Z
#! /usr/bin/env python # Copyright 2019 # # This file is part of WarpX. # # License: BSD-3-Clause-LBNL import sys sys.path.insert(1, '../../../../warpx/Regression/Checksum/') import numpy as np import yt yt.funcs.mylog.setLevel(50) import re import checksumAPI from scipy.constants import c # Name of the last plotfile fn = sys.argv[1] # Load yt data ds_old = yt.load('divb_cleaning_3d_plt00398') ds_mid = yt.load('divb_cleaning_3d_plt00399') ds_new = yt.load(fn) # this is the last plotfile ad_old = ds_old.covering_grid(level = 0, left_edge = ds_old.domain_left_edge, dims = ds_old.domain_dimensions) ad_mid = ds_mid.covering_grid(level = 0, left_edge = ds_mid.domain_left_edge, dims = ds_mid.domain_dimensions) ad_new = ds_new.covering_grid(level = 0, left_edge = ds_new.domain_left_edge, dims = ds_new.domain_dimensions) G_old = ad_old['boxlib', 'G'].v.squeeze() G_new = ad_new['boxlib', 'G'].v.squeeze() divB = ad_mid['boxlib', 'divB'].v.squeeze() # Check max norm of error on c2 * div(B) = dG/dt # (the time interval between old and new is 2*dt) dt = 1.504557189e-15 x = G_new - G_old y = divB * 2 * dt * c**2 rel_error = np.amax(abs(x - y)) / np.amax(abs(y)) tolerance = 1e-1 assert(rel_error < tolerance) test_name = fn[:-9] # Could also be os.path.split(os.getcwd())[1] if re.search('single_precision', fn): checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) else: checksumAPI.evaluate_checksum(test_name, fn)
28.333333
110
0.717647
import sys sys.path.insert(1, '../../../../warpx/Regression/Checksum/') import numpy as np import yt yt.funcs.mylog.setLevel(50) import re import checksumAPI from scipy.constants import c fn = sys.argv[1] ds_old = yt.load('divb_cleaning_3d_plt00398') ds_mid = yt.load('divb_cleaning_3d_plt00399') ds_new = yt.load(fn) ad_old = ds_old.covering_grid(level = 0, left_edge = ds_old.domain_left_edge, dims = ds_old.domain_dimensions) ad_mid = ds_mid.covering_grid(level = 0, left_edge = ds_mid.domain_left_edge, dims = ds_mid.domain_dimensions) ad_new = ds_new.covering_grid(level = 0, left_edge = ds_new.domain_left_edge, dims = ds_new.domain_dimensions) G_old = ad_old['boxlib', 'G'].v.squeeze() G_new = ad_new['boxlib', 'G'].v.squeeze() divB = ad_mid['boxlib', 'divB'].v.squeeze() dt = 1.504557189e-15 x = G_new - G_old y = divB * 2 * dt * c**2 rel_error = np.amax(abs(x - y)) / np.amax(abs(y)) tolerance = 1e-1 assert(rel_error < tolerance) test_name = fn[:-9] if re.search('single_precision', fn): checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) else: checksumAPI.evaluate_checksum(test_name, fn)
true
true
f70dc61ab5caf97d2323e6f911de2398a0ffcd48
1,185
py
Python
app/forms/user.py
mofilamamra/APP-WILAYA
43aad0e98ad3934d0e1d94b52eece2574cb3c97e
[ "MIT" ]
null
null
null
app/forms/user.py
mofilamamra/APP-WILAYA
43aad0e98ad3934d0e1d94b52eece2574cb3c97e
[ "MIT" ]
null
null
null
app/forms/user.py
mofilamamra/APP-WILAYA
43aad0e98ad3934d0e1d94b52eece2574cb3c97e
[ "MIT" ]
null
null
null
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField, SelectField from wtforms.validators import DataRequired, Email, Length, EqualTo from app.models.Model import Category def get_categories(): categories_query = Category.query.all() categories = [] for category in categories_query: categories.append((category.id,category.title)) return categories # Register form class RegisterForm(FlaskForm): email = StringField(label="email", validators=[DataRequired(), Email()]) username = StringField(label="username", validators=[DataRequired()]) password = PasswordField(label="password", validators=[DataRequired(), Length(min=6)]) confirm = PasswordField(label="confirm", validators=[DataRequired(),EqualTo(fieldname='password')]) category = SelectField('Selectionée une category', validators=[DataRequired()],choices=get_categories()) submit = SubmitField(label="inscrire") # login form class LoginForm(FlaskForm): email = StringField('email', validators=[DataRequired(),Email()]) password = PasswordField('password', validators=[DataRequired()]) submit = SubmitField('identifier')
39.5
108
0.745992
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField, SelectField from wtforms.validators import DataRequired, Email, Length, EqualTo from app.models.Model import Category def get_categories(): categories_query = Category.query.all() categories = [] for category in categories_query: categories.append((category.id,category.title)) return categories class RegisterForm(FlaskForm): email = StringField(label="email", validators=[DataRequired(), Email()]) username = StringField(label="username", validators=[DataRequired()]) password = PasswordField(label="password", validators=[DataRequired(), Length(min=6)]) confirm = PasswordField(label="confirm", validators=[DataRequired(),EqualTo(fieldname='password')]) category = SelectField('Selectionée une category', validators=[DataRequired()],choices=get_categories()) submit = SubmitField(label="inscrire") class LoginForm(FlaskForm): email = StringField('email', validators=[DataRequired(),Email()]) password = PasswordField('password', validators=[DataRequired()]) submit = SubmitField('identifier')
true
true
f70dc6349a1611ac2b4477d44033ddef01b29cff
9,878
py
Python
rtlib/setting.py
tuna2134/rt-bot-sdk
79162688e06c30599e74ae881fff102304124474
[ "BSD-4-Clause" ]
null
null
null
rtlib/setting.py
tuna2134/rt-bot-sdk
79162688e06c30599e74ae881fff102304124474
[ "BSD-4-Clause" ]
null
null
null
rtlib/setting.py
tuna2134/rt-bot-sdk
79162688e06c30599e74ae881fff102304124474
[ "BSD-4-Clause" ]
null
null
null
# RT Lib - Setting from typing import ( TYPE_CHECKING, TypedDict, Optional, Union, Literal, Dict, Tuple, List, overload, get_origin, get_args ) from discord.ext import commands import discord from collections import defaultdict from aiohttp import ClientSession from functools import partial from datetime import datetime from ujson import dumps from time import time from pytz import utc from . import websocket from .slash import Option if TYPE_CHECKING: from .typed import RT class CommandRunData(TypedDict): command: str kwargs: Dict[str, Union[str, int, float, bool]] guild_id: Union[int, Literal[0]] category: str user_id: int ip: str class Setting: @overload def __init__( _, mode: str, name: Optional[str] = None, help_command: Tuple[str, str] = None, **kwargs ): ... def __new__(cls, mode, name=None, help_command=None, **kwargs): return lambda func: func self = super().__new__(cls) self.mode, self.name, self.kwargs = mode, name, kwargs self.help_command = help_command def _decorator(func): func._setting = self return func return _decorator class Context: "ダッシュボードから呼ばれたコマンドで実行されるContextです。" def __init__( self, cog: "SettingManager", data: CommandRunData, command: commands.Command, **kwargs ): # IDを文字列から整数に変換する。 for key, value in list(data.items()): if key.endswith("id"): data[key] = int(value) # 変数を作っていく。 self.data = data self.setting_manager = cog self.bot: "RT" = self.setting_manager.bot self.guild: Optional[discord.Guild] = self.bot.get_guild(data["guild_id"]) self.created_at: datetime = datetime.now(utc) self.edited_at = None self.__setting_context__ = True self.channel: Optional[ Union[discord.abc.GuildChannel, discord.DMChannel] ] = ( self.guild.get_channel(data["kwargs"].pop( "channel_id", data["kwargs"].pop( "channel", data["kwargs"].pop("Channel", 0) ) )) if data["category"].endswith("guild") else self.bot.get_user(data["user_id"]) ) self.author: Union[discord.User, discord.Member] = ( self.guild.get_member(data["user_id"]) if self.guild else self.bot.get_user(data["user_id"]) ) for key in kwargs: setattr(self, key, kwargs.pop(key, None)) self.command = command self.cog = command.cog self.voice_client: Optional[discord.VoiceClient] = \ getattr(self.guild, "voice_client", None) self.prefix = "r2!" if self.bot.test else "rt!" self.me: Union[discord.Member, discord.ClientUser] = \ getattr(self.guild, "me", self.bot.user) self.message = self self.reply = self.send async def trigger_typing(self): ... async def send( self, content: str = None, embed: discord.Embed = None, *args, **kwargs ): "返信をします。" content = self.bot.cogs["Language"].get_text( embed if embed else content, self.author.id ) if isinstance(content, discord.Embed): content = content.to_dict() async with self.setting_manager.session.post( f"{self.bot.get_url()}/api/settings/reply/{self.data['ip']}", json={"data": content} ) as r: self.bot.print( "[SettingManager]", "[Reply]", f"Response: {await r.text()}, Content: {content}" ) @overload async def reply( self, content: str = None, embed: discord.Embed = None, *args, **kwargs ): ... async def delete(self) -> None: ... class SettingManager(commands.Cog): SUPPORTED_DISCORD_ANNOTATIONS = ( "Member", "User", "TextChannel", "VoiceChannel", "StageChannel", "Thread", "Role" ) SUPPORTED_ANNOTATIONS = (str, int, float, bool) def __init__(self, bot: "RT"): self.bot = bot self.data: Dict[ str, Tuple[commands.Command, Setting] ] = {} self.before = {} @property def session(self) -> ClientSession: if not hasattr(self, "_session"): self._session = ClientSession( loop=self.bot.loop, json_serialize=partial( dumps, ensure_ascii=False ) ) return self._session def get_parsed_args(self, annotation: object) -> Union[str, List[str]]: "渡されたオブジェクトから設定項目の型の名前を判定し返します。" if isinstance(annotation, Option): annotation = annotation.annotation if annotation in self.SUPPORTED_ANNOTATIONS: return annotation.__name__ elif getattr(annotation, "__name__", "") in self.SUPPORTED_DISCORD_ANNOTATIONS: return annotation.__name__.replace("Text", "").replace("Voice", "") \ .replace("Stage", "").replace("Thread", "Channel").replace("User", "Member") elif (origin := get_origin(annotation)) == Union: return ["Union"] + [self.get_parsed_args(arg) for arg in get_args(annotation)] elif origin == Literal: return ["Literal"] + list(get_args(annotation)) else: return "str" def reset(self): self.data = {} def add_command(self, command: commands.Command) -> None: self.data[command.qualified_name] = (command, command.callback._setting) @commands.Cog.listener() async def on_command_add(self, command: commands.Command): if hasattr(command.callback, "_setting"): self.add_command(command) @commands.Cog.listener("on_update_api") async def update(self): "APIにBotにあるコマンドの設定のJSONデータを送る。" # バックエンド用のデータを作る。 data = defaultdict(dict) for command, setting in self.data.values(): kwargs = { parameter.name: ( ant := self.get_parsed_args(parameter.annotation), "" if parameter.default == parameter.empty else parameter.default, parameter.kind == parameter.KEYWORD_ONLY \ and ant == "str" ) for parameter in command.clean_params.values() } kwargs.update({ key: (self.get_parsed_args(value), "", False) for key, value in setting.kwargs.items() }) data[setting.mode][command.qualified_name] = { "help": ( self.bot.cogs["BotGeneral"].get_help_url(*setting.help_command) if setting.help_command else self.bot.cogs["BotGeneral"].get_command_url(command) ), "kwargs": kwargs, "sub_category": getattr( command.parent, "name", None ), "headding": ( command.extras.get("headding") or command.__original_kwargs__.get("headding") ), "display_name": setting.name or command.name } # データを送る。 async with self.bot.session.post( f"{self.bot.get_url()}/api/settings/commands/update", json=data ) as r: self.bot.print("[SettingManager]", "[Updater]", time(), await r.text()) self.before = data @websocket.websocket("/api/settings/websocket", auto_connect=True, reconnect=True) async def setting_websocket(self, ws: websocket.WebSocket, _): # ユーザーがダッシュボードから設定を更新した際に、すぐに反応できるようにするためのものです。 await ws.send("on_ready") @setting_websocket.event("on_post") async def post(self, ws: websocket.WebSocket, data: CommandRunData): if isinstance(data, dict): self.bot.loop.create_task( self.run_command(self.data[data["command"]][0], data), name=f"UpdateSetting[{data.get('command')}]: {data.get('user_id')}" ) await ws.send("on_posted") @setting_websocket.event("on_posted") async def posted(self, ws: websocket.WebSocket, _): await self.setting_websocket(ws, None) async def run_command(self, command: commands.Command, data: CommandRunData): "コマンドを走らせます。" ctx = None try: # コマンドのメッセージを組み立てる。 content = f"{self.bot.command_prefix[0]}{command.qualified_name}" for parameter in command.clean_params.values(): tentative = f' "{data["kwargs"].get(parameter.name, "")}"' if parameter.kind == parameter.KEYWORD_ONLY: tentative = f" {tentative[2:-1]}" content += tentative # 実行できるかチェックをしてからオリジナルContextでコマンドを実行する。 ctx = Context(self, data, command) ctx.content = content ctx._state = self.bot.http parsed_ctx = await self.bot.get_context(ctx) ctx.view = parsed_ctx.view ctx.args, ctx.kwargs = parsed_ctx.args, parsed_ctx.kwargs for name in dir(parsed_ctx): if not name.startswith( ( "__", "send", "reply", "trigger", "typing", "created", "channel", "message", "guild" ) ): setattr(ctx, name, getattr(parsed_ctx, name)) return await self.bot.invoke(ctx.message) except Exception as e: if ctx: self.bot.dispatch("command_error", ctx, e) def cog_unload(self): if hasattr(self, "_session"): self.bot.loop.create_task(self._session.close()) def setup(bot): return bot.add_cog(SettingManager(bot))
34.904594
92
0.5738
from typing import ( TYPE_CHECKING, TypedDict, Optional, Union, Literal, Dict, Tuple, List, overload, get_origin, get_args ) from discord.ext import commands import discord from collections import defaultdict from aiohttp import ClientSession from functools import partial from datetime import datetime from ujson import dumps from time import time from pytz import utc from . import websocket from .slash import Option if TYPE_CHECKING: from .typed import RT class CommandRunData(TypedDict): command: str kwargs: Dict[str, Union[str, int, float, bool]] guild_id: Union[int, Literal[0]] category: str user_id: int ip: str class Setting: @overload def __init__( _, mode: str, name: Optional[str] = None, help_command: Tuple[str, str] = None, **kwargs ): ... def __new__(cls, mode, name=None, help_command=None, **kwargs): return lambda func: func self = super().__new__(cls) self.mode, self.name, self.kwargs = mode, name, kwargs self.help_command = help_command def _decorator(func): func._setting = self return func return _decorator class Context: def __init__( self, cog: "SettingManager", data: CommandRunData, command: commands.Command, **kwargs ): for key, value in list(data.items()): if key.endswith("id"): data[key] = int(value) self.data = data self.setting_manager = cog self.bot: "RT" = self.setting_manager.bot self.guild: Optional[discord.Guild] = self.bot.get_guild(data["guild_id"]) self.created_at: datetime = datetime.now(utc) self.edited_at = None self.__setting_context__ = True self.channel: Optional[ Union[discord.abc.GuildChannel, discord.DMChannel] ] = ( self.guild.get_channel(data["kwargs"].pop( "channel_id", data["kwargs"].pop( "channel", data["kwargs"].pop("Channel", 0) ) )) if data["category"].endswith("guild") else self.bot.get_user(data["user_id"]) ) self.author: Union[discord.User, discord.Member] = ( self.guild.get_member(data["user_id"]) if self.guild else self.bot.get_user(data["user_id"]) ) for key in kwargs: setattr(self, key, kwargs.pop(key, None)) self.command = command self.cog = command.cog self.voice_client: Optional[discord.VoiceClient] = \ getattr(self.guild, "voice_client", None) self.prefix = "r2!" if self.bot.test else "rt!" self.me: Union[discord.Member, discord.ClientUser] = \ getattr(self.guild, "me", self.bot.user) self.message = self self.reply = self.send async def trigger_typing(self): ... async def send( self, content: str = None, embed: discord.Embed = None, *args, **kwargs ): content = self.bot.cogs["Language"].get_text( embed if embed else content, self.author.id ) if isinstance(content, discord.Embed): content = content.to_dict() async with self.setting_manager.session.post( f"{self.bot.get_url()}/api/settings/reply/{self.data['ip']}", json={"data": content} ) as r: self.bot.print( "[SettingManager]", "[Reply]", f"Response: {await r.text()}, Content: {content}" ) @overload async def reply( self, content: str = None, embed: discord.Embed = None, *args, **kwargs ): ... async def delete(self) -> None: ... class SettingManager(commands.Cog): SUPPORTED_DISCORD_ANNOTATIONS = ( "Member", "User", "TextChannel", "VoiceChannel", "StageChannel", "Thread", "Role" ) SUPPORTED_ANNOTATIONS = (str, int, float, bool) def __init__(self, bot: "RT"): self.bot = bot self.data: Dict[ str, Tuple[commands.Command, Setting] ] = {} self.before = {} @property def session(self) -> ClientSession: if not hasattr(self, "_session"): self._session = ClientSession( loop=self.bot.loop, json_serialize=partial( dumps, ensure_ascii=False ) ) return self._session def get_parsed_args(self, annotation: object) -> Union[str, List[str]]: if isinstance(annotation, Option): annotation = annotation.annotation if annotation in self.SUPPORTED_ANNOTATIONS: return annotation.__name__ elif getattr(annotation, "__name__", "") in self.SUPPORTED_DISCORD_ANNOTATIONS: return annotation.__name__.replace("Text", "").replace("Voice", "") \ .replace("Stage", "").replace("Thread", "Channel").replace("User", "Member") elif (origin := get_origin(annotation)) == Union: return ["Union"] + [self.get_parsed_args(arg) for arg in get_args(annotation)] elif origin == Literal: return ["Literal"] + list(get_args(annotation)) else: return "str" def reset(self): self.data = {} def add_command(self, command: commands.Command) -> None: self.data[command.qualified_name] = (command, command.callback._setting) @commands.Cog.listener() async def on_command_add(self, command: commands.Command): if hasattr(command.callback, "_setting"): self.add_command(command) @commands.Cog.listener("on_update_api") async def update(self): data = defaultdict(dict) for command, setting in self.data.values(): kwargs = { parameter.name: ( ant := self.get_parsed_args(parameter.annotation), "" if parameter.default == parameter.empty else parameter.default, parameter.kind == parameter.KEYWORD_ONLY \ and ant == "str" ) for parameter in command.clean_params.values() } kwargs.update({ key: (self.get_parsed_args(value), "", False) for key, value in setting.kwargs.items() }) data[setting.mode][command.qualified_name] = { "help": ( self.bot.cogs["BotGeneral"].get_help_url(*setting.help_command) if setting.help_command else self.bot.cogs["BotGeneral"].get_command_url(command) ), "kwargs": kwargs, "sub_category": getattr( command.parent, "name", None ), "headding": ( command.extras.get("headding") or command.__original_kwargs__.get("headding") ), "display_name": setting.name or command.name } async with self.bot.session.post( f"{self.bot.get_url()}/api/settings/commands/update", json=data ) as r: self.bot.print("[SettingManager]", "[Updater]", time(), await r.text()) self.before = data @websocket.websocket("/api/settings/websocket", auto_connect=True, reconnect=True) async def setting_websocket(self, ws: websocket.WebSocket, _): await ws.send("on_ready") @setting_websocket.event("on_post") async def post(self, ws: websocket.WebSocket, data: CommandRunData): if isinstance(data, dict): self.bot.loop.create_task( self.run_command(self.data[data["command"]][0], data), name=f"UpdateSetting[{data.get('command')}]: {data.get('user_id')}" ) await ws.send("on_posted") @setting_websocket.event("on_posted") async def posted(self, ws: websocket.WebSocket, _): await self.setting_websocket(ws, None) async def run_command(self, command: commands.Command, data: CommandRunData): ctx = None try: content = f"{self.bot.command_prefix[0]}{command.qualified_name}" for parameter in command.clean_params.values(): tentative = f' "{data["kwargs"].get(parameter.name, "")}"' if parameter.kind == parameter.KEYWORD_ONLY: tentative = f" {tentative[2:-1]}" content += tentative ctx = Context(self, data, command) ctx.content = content ctx._state = self.bot.http parsed_ctx = await self.bot.get_context(ctx) ctx.view = parsed_ctx.view ctx.args, ctx.kwargs = parsed_ctx.args, parsed_ctx.kwargs for name in dir(parsed_ctx): if not name.startswith( ( "__", "send", "reply", "trigger", "typing", "created", "channel", "message", "guild" ) ): setattr(ctx, name, getattr(parsed_ctx, name)) return await self.bot.invoke(ctx.message) except Exception as e: if ctx: self.bot.dispatch("command_error", ctx, e) def cog_unload(self): if hasattr(self, "_session"): self.bot.loop.create_task(self._session.close()) def setup(bot): return bot.add_cog(SettingManager(bot))
true
true
f70dc692a5a50fd9649f6865d30bbedcfd90fc66
11,387
py
Python
mltk/utils/jlink_stream/data_stream.py
SiliconLabs/mltk
56b19518187e9d1c8a0d275de137fc9058984a1f
[ "Zlib" ]
null
null
null
mltk/utils/jlink_stream/data_stream.py
SiliconLabs/mltk
56b19518187e9d1c8a0d275de137fc9058984a1f
[ "Zlib" ]
1
2021-11-19T20:10:09.000Z
2021-11-19T20:10:09.000Z
mltk/utils/jlink_stream/data_stream.py
sldriedler/mltk
d82a60359cf875f542a2257f1bc7d8eb4bdaa204
[ "Zlib" ]
null
null
null
import time from threading import Event, RLock from mltk.utils import hexdump from .device_interface import DeviceInterface, MAX_BUFFER_SIZE WAIT_FOREVER = 4294967.0 class JLinkDataStream(object): """JLink data stream""" def __init__( self, name:str, mode:str, ifc: DeviceInterface, stream_context: dict ): self._name = name self._mode = mode self._ifc = ifc self._context = stream_context self._is_opened = Event() self._buffer = bytearray() self._buffer_lock = RLock() self._buffer_event = Event() self._notify_event = None self._max_read_size = -1 self._timeout = -1 self._end_time = -1 self._requires_processing = False self._id_mask = (1 << stream_context['id']) self._is_opened.set() @property def name(self) -> str: """The name of the opened stream""" return self._name @property def mode(self) -> str: """The mode the for which the stream was opened, r or w""" return self._mode @property def is_opened(self) -> bool: """If the stream is opened to the device""" return self._is_opened.is_set() @property def max_read_size(self) -> int: """The maximum amount of data to read Set to -1 to disable limit After each read, this value will decrement by the amount of data read. One this value reaches zero, it must be reset otherwise subsequent reads will always return zero. """ return self._max_read_size @max_read_size.setter def max_read_size(self, val:int): if val is None: val = -1 self._max_read_size = val @property def timeout(self) -> float: """The maximum about of time in seconds to read or write data. This is only used if the 'timeout' argument to the read() or write() APIs is None Set to -1 to never timeout """ return self._timeout @timeout.setter def timeout(self, val: float): if val is None: val = -1 self._timeout = val @property def end_time(self) -> float: """The absolute time in seconds to timeout reading or writing Set to None to disable. If end_time > time.time(), then return from the read() or write() API """ return self._end_time @end_time.setter def end_time(self, val:float): if val is None: val = -1 self._end_time = val @property def buffer_used(self) -> int: """The amount of the device data buffer used If the stream was opened for reading then this is the amount of data that was previous received from the device and is waiting to be read by the python script. If the stream was opened for writing, then this is the amount of data that was previously written and is pending to be sent to the device. """ with self._buffer_lock: retval = len(self._buffer) return retval @property def buffer_unused(self) -> int: """The amount of the device data buffer that is available""" with self._buffer_lock: retval = MAX_BUFFER_SIZE - len(self._buffer) return retval @property def read_data_available(self) -> int: """The amount of data that is ready to be read by the python script""" return self.buffer_used @property def write_data_available(self) -> int: """The amount of data that can immediately be written""" return self.buffer_unused @property def buffer_hexdump(self, length=64) -> str: """Return a hexdump string""" length = min(length, self.buffer_used) return hexdump.hexdump(self._buffer[:length], result='return') def close(self): """Close the data stream with the device""" if self._is_opened.is_set(): self._is_opened.clear() self._buffer_event.set() self._ifc.close(self._name) def read(self, max_size:int = None, timeout:float=None) -> bytes: """Read data from data stream opened for reading NOTE: The only returns the data that is immediately available. The amount of data returned may be less than max_size. """ if self.mode != 'r': raise Exception(f'Stream: {self.name} not opened for reading') timeout = self._get_timeout(timeout) max_size = self._get_max_size(max_size) start_time = time.time() while True: self._buffer_event.clear() if not self.is_opened: raise Exception(f'Stream: {self.name} closed') if max_size == 0: return None bufsize = min(self.read_data_available, max_size) if bufsize > 0: retval = self._consume_buffer(bufsize) self._notify_event.set() return bytes(retval) elapsed = (time.time() - start_time) if elapsed >= timeout: return None if self._end_time > 0: time_remaining = self._end_time - time.time() if time_remaining <= 0: return None else: time_remaining = WAIT_FOREVER self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100)) def read_all(self, amount:int, timeout:float=None, initial_timeout:float=None, throw_exception=True) -> bytes: """The the specified amount of data""" if initial_timeout is None: initial_timeout = timeout retval = bytearray() remaining = amount while remaining > 0: chunk_timeout = initial_timeout if len(retval) == 0 else timeout chunk = self.read(max_size=remaining, timeout=chunk_timeout) if chunk is None: break remaining -= len(chunk) retval.extend(chunk) if len(retval) != amount and throw_exception: raise Exception('Failed to read all data') return bytes(retval) def write(self, data:bytes, timeout:float=None, flush=False) -> int: """Write data to a data stream opened for writing""" if self.mode != 'w': raise Exception(f'Stream: {self.name} not opened for writing') timeout = self._get_timeout(timeout) total_write_len = 0 start_time = time.time() while len(data) > 0: self._buffer_event.clear() if not self.is_opened: raise Exception(f'Stream: {self.name} closed') bufsize = min(self.write_data_available, len(data)) if bufsize > 0: self._populate_buffer(data[:bufsize]) data = data[bufsize:] total_write_len += bufsize self._requires_processing = True self._notify_event.set() if len(data) == 0: break elapsed = (time.time() - start_time) if elapsed >= timeout: break if self._end_time > 0: time_remaining = self._end_time - time.time() if time_remaining <= 0: break else: time_remaining = WAIT_FOREVER self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100)) if flush: self.flush(timeout=timeout) return total_write_len def flush(self, timeout:float=None): """Wait while any pending data is transferred to/from the device""" timeout = self._get_timeout(timeout) start_time = time.time() while self.buffer_used > 0: self._buffer_event.clear() if not self.is_opened: raise Exception(f'Stream: {self.name} closed') elapsed = (time.time() - start_time) if elapsed >= timeout: raise Exception('Time-out waiting for buffer to flush') if self._end_time > 0: time_remaining = self._end_time - time.time() if time_remaining <= 0: break else: time_remaining = WAIT_FOREVER self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100)) def _set_notify_event(self, event): self._notify_event = event def _process(self, buffer_status_mask): if not self._requires_processing and (buffer_status_mask & self._id_mask) == 0: return self._requires_processing = False if self.mode == 'r': max_read_len = self.buffer_unused if max_read_len > 0: data = self._ifc.read(self._context, max_read_len) if data: self._populate_buffer(data) else: self._requires_processing = True elif self.mode == 'w': write_len = self._ifc.write(self._context, self._buffer) if write_len: self._consume_buffer(write_len) if self.buffer_used > 0: self._requires_processing = True def _consume_buffer(self, size) -> bytes: with self._buffer_lock: retval = self._buffer[:size] self._buffer = self._buffer[size:] if self._max_read_size != -1: if size <= self._max_read_size: self._max_read_size -= size else: self._max_read_size = 0 if self.mode == 'w': self._buffer_event.set() return retval def _populate_buffer(self, data): with self._buffer_lock: if isinstance(data, str): data = data.encode() self._buffer.extend(data) if self.mode == 'r': self._buffer_event.set() def _get_timeout(self, timeout:float) -> float: if timeout is None: timeout = self._timeout if timeout == -1: timeout = WAIT_FOREVER return timeout def _get_max_size(self, max_size:int) -> int: if max_size is None: max_size = self._max_read_size if max_size == -1: max_size = MAX_BUFFER_SIZE return max_size def __iter__(self): return self def __next__(self): retval = self.read() if retval is None: raise StopIteration # Done iterating. return retval def __enter__(self): return self def __exit__(self, dtype, value, traceback): self.close()
29.423773
114
0.543954
import time from threading import Event, RLock from mltk.utils import hexdump from .device_interface import DeviceInterface, MAX_BUFFER_SIZE WAIT_FOREVER = 4294967.0 class JLinkDataStream(object): def __init__( self, name:str, mode:str, ifc: DeviceInterface, stream_context: dict ): self._name = name self._mode = mode self._ifc = ifc self._context = stream_context self._is_opened = Event() self._buffer = bytearray() self._buffer_lock = RLock() self._buffer_event = Event() self._notify_event = None self._max_read_size = -1 self._timeout = -1 self._end_time = -1 self._requires_processing = False self._id_mask = (1 << stream_context['id']) self._is_opened.set() @property def name(self) -> str: return self._name @property def mode(self) -> str: return self._mode @property def is_opened(self) -> bool: return self._is_opened.is_set() @property def max_read_size(self) -> int: return self._max_read_size @max_read_size.setter def max_read_size(self, val:int): if val is None: val = -1 self._max_read_size = val @property def timeout(self) -> float: return self._timeout @timeout.setter def timeout(self, val: float): if val is None: val = -1 self._timeout = val @property def end_time(self) -> float: return self._end_time @end_time.setter def end_time(self, val:float): if val is None: val = -1 self._end_time = val @property def buffer_used(self) -> int: with self._buffer_lock: retval = len(self._buffer) return retval @property def buffer_unused(self) -> int: with self._buffer_lock: retval = MAX_BUFFER_SIZE - len(self._buffer) return retval @property def read_data_available(self) -> int: return self.buffer_used @property def write_data_available(self) -> int: return self.buffer_unused @property def buffer_hexdump(self, length=64) -> str: length = min(length, self.buffer_used) return hexdump.hexdump(self._buffer[:length], result='return') def close(self): if self._is_opened.is_set(): self._is_opened.clear() self._buffer_event.set() self._ifc.close(self._name) def read(self, max_size:int = None, timeout:float=None) -> bytes: if self.mode != 'r': raise Exception(f'Stream: {self.name} not opened for reading') timeout = self._get_timeout(timeout) max_size = self._get_max_size(max_size) start_time = time.time() while True: self._buffer_event.clear() if not self.is_opened: raise Exception(f'Stream: {self.name} closed') if max_size == 0: return None bufsize = min(self.read_data_available, max_size) if bufsize > 0: retval = self._consume_buffer(bufsize) self._notify_event.set() return bytes(retval) elapsed = (time.time() - start_time) if elapsed >= timeout: return None if self._end_time > 0: time_remaining = self._end_time - time.time() if time_remaining <= 0: return None else: time_remaining = WAIT_FOREVER self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100)) def read_all(self, amount:int, timeout:float=None, initial_timeout:float=None, throw_exception=True) -> bytes: if initial_timeout is None: initial_timeout = timeout retval = bytearray() remaining = amount while remaining > 0: chunk_timeout = initial_timeout if len(retval) == 0 else timeout chunk = self.read(max_size=remaining, timeout=chunk_timeout) if chunk is None: break remaining -= len(chunk) retval.extend(chunk) if len(retval) != amount and throw_exception: raise Exception('Failed to read all data') return bytes(retval) def write(self, data:bytes, timeout:float=None, flush=False) -> int: if self.mode != 'w': raise Exception(f'Stream: {self.name} not opened for writing') timeout = self._get_timeout(timeout) total_write_len = 0 start_time = time.time() while len(data) > 0: self._buffer_event.clear() if not self.is_opened: raise Exception(f'Stream: {self.name} closed') bufsize = min(self.write_data_available, len(data)) if bufsize > 0: self._populate_buffer(data[:bufsize]) data = data[bufsize:] total_write_len += bufsize self._requires_processing = True self._notify_event.set() if len(data) == 0: break elapsed = (time.time() - start_time) if elapsed >= timeout: break if self._end_time > 0: time_remaining = self._end_time - time.time() if time_remaining <= 0: break else: time_remaining = WAIT_FOREVER self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100)) if flush: self.flush(timeout=timeout) return total_write_len def flush(self, timeout:float=None): timeout = self._get_timeout(timeout) start_time = time.time() while self.buffer_used > 0: self._buffer_event.clear() if not self.is_opened: raise Exception(f'Stream: {self.name} closed') elapsed = (time.time() - start_time) if elapsed >= timeout: raise Exception('Time-out waiting for buffer to flush') if self._end_time > 0: time_remaining = self._end_time - time.time() if time_remaining <= 0: break else: time_remaining = WAIT_FOREVER self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100)) def _set_notify_event(self, event): self._notify_event = event def _process(self, buffer_status_mask): if not self._requires_processing and (buffer_status_mask & self._id_mask) == 0: return self._requires_processing = False if self.mode == 'r': max_read_len = self.buffer_unused if max_read_len > 0: data = self._ifc.read(self._context, max_read_len) if data: self._populate_buffer(data) else: self._requires_processing = True elif self.mode == 'w': write_len = self._ifc.write(self._context, self._buffer) if write_len: self._consume_buffer(write_len) if self.buffer_used > 0: self._requires_processing = True def _consume_buffer(self, size) -> bytes: with self._buffer_lock: retval = self._buffer[:size] self._buffer = self._buffer[size:] if self._max_read_size != -1: if size <= self._max_read_size: self._max_read_size -= size else: self._max_read_size = 0 if self.mode == 'w': self._buffer_event.set() return retval def _populate_buffer(self, data): with self._buffer_lock: if isinstance(data, str): data = data.encode() self._buffer.extend(data) if self.mode == 'r': self._buffer_event.set() def _get_timeout(self, timeout:float) -> float: if timeout is None: timeout = self._timeout if timeout == -1: timeout = WAIT_FOREVER return timeout def _get_max_size(self, max_size:int) -> int: if max_size is None: max_size = self._max_read_size if max_size == -1: max_size = MAX_BUFFER_SIZE return max_size def __iter__(self): return self def __next__(self): retval = self.read() if retval is None: raise StopIteration return retval def __enter__(self): return self def __exit__(self, dtype, value, traceback): self.close()
true
true
f70dc6a5b028fb99479752dedde54048af19c596
2,416
py
Python
main.py
im1-pro/GUI-Based-RSM
a164af708eea814c8c51f8a2178a271e5505b29e
[ "MIT" ]
null
null
null
main.py
im1-pro/GUI-Based-RSM
a164af708eea814c8c51f8a2178a271e5505b29e
[ "MIT" ]
null
null
null
main.py
im1-pro/GUI-Based-RSM
a164af708eea814c8c51f8a2178a271e5505b29e
[ "MIT" ]
null
null
null
import rstools from tkinter import * from functools import partial class mainWindow: def __init__(self,master) -> None: self.master = master self.constraint = IntVar() self.constring = [] Label(self.master , text="Revised Simplex Method", font=("Arial",25)).pack() Label(self.master,text="select number of constraint").pack() Scale(self.master,variable=self.constraint,from_=2,to=4,orient=HORIZONTAL).pack() Button(self.master,text="Submit",command=self.next).pack() def next(self): level1 = Toplevel() level1.geometry("400x300") a = self.constraint.get() yy1 = 5 for i in range(a+1): if i==0: l1 = Label(level1,text="Z") l1.place(x=120,y=yy1) else: l2 = Label(level1,text="Constraint"+str(i)) l2.place(x=70,y=yy1) yy1+=20 yy = 5 for i in range(a+1): va = StringVar() e = Entry(level1,textvariable=va) e.place(x=135,y=yy) self.constring.append(va) yy+=20 finalanswer = partial(self.finalanswer,level1) Button(level1,text="calculate",command=finalanswer).place(x=225,y=20*(a+2)) level1.mainloop() def finalanswer(self,level1): Decodedstring = [] for i in self.constring: Decodedstring.append(i.get()) a = len(Decodedstring) cj = rstools.optimizationFunction(Decodedstring[0]) A = [] b = [] for i in range(1,a): A.append(rstools.constraintsFunction(Decodedstring[i])[:-1]) b.append([rstools.constraintsFunction(Decodedstring[i])[-1]]) cb = [[0]*(a-1)] cb = rstools.transpose(cb) B = rstools.B(a-1) print(A,B,b,cb,cj) fans = rstools.answer(A,B,b,cb,cj) fans0 = fans[0] fans1 = fans[1] yy = 150 a = rstools.variables(Decodedstring[0]) for i in range(len(fans0)): Label(level1,text=a[fans1[i]]+" ="+str(fans0[i][0])).place(x=200,y=yy) yy+=20 if __name__ == "__main__": app = Tk() app.title("Linear Programming Problem") app.geometry("500x400") win = mainWindow(app) app.mainloop()
32.213333
90
0.527732
import rstools from tkinter import * from functools import partial class mainWindow: def __init__(self,master) -> None: self.master = master self.constraint = IntVar() self.constring = [] Label(self.master , text="Revised Simplex Method", font=("Arial",25)).pack() Label(self.master,text="select number of constraint").pack() Scale(self.master,variable=self.constraint,from_=2,to=4,orient=HORIZONTAL).pack() Button(self.master,text="Submit",command=self.next).pack() def next(self): level1 = Toplevel() level1.geometry("400x300") a = self.constraint.get() yy1 = 5 for i in range(a+1): if i==0: l1 = Label(level1,text="Z") l1.place(x=120,y=yy1) else: l2 = Label(level1,text="Constraint"+str(i)) l2.place(x=70,y=yy1) yy1+=20 yy = 5 for i in range(a+1): va = StringVar() e = Entry(level1,textvariable=va) e.place(x=135,y=yy) self.constring.append(va) yy+=20 finalanswer = partial(self.finalanswer,level1) Button(level1,text="calculate",command=finalanswer).place(x=225,y=20*(a+2)) level1.mainloop() def finalanswer(self,level1): Decodedstring = [] for i in self.constring: Decodedstring.append(i.get()) a = len(Decodedstring) cj = rstools.optimizationFunction(Decodedstring[0]) A = [] b = [] for i in range(1,a): A.append(rstools.constraintsFunction(Decodedstring[i])[:-1]) b.append([rstools.constraintsFunction(Decodedstring[i])[-1]]) cb = [[0]*(a-1)] cb = rstools.transpose(cb) B = rstools.B(a-1) print(A,B,b,cb,cj) fans = rstools.answer(A,B,b,cb,cj) fans0 = fans[0] fans1 = fans[1] yy = 150 a = rstools.variables(Decodedstring[0]) for i in range(len(fans0)): Label(level1,text=a[fans1[i]]+" ="+str(fans0[i][0])).place(x=200,y=yy) yy+=20 if __name__ == "__main__": app = Tk() app.title("Linear Programming Problem") app.geometry("500x400") win = mainWindow(app) app.mainloop()
true
true
f70dc89229880140b19e82a38e9b341a4c22fd94
2,897
py
Python
hsdecomp/parse/info.py
popjy0312/hsdecomp
f89a8d6c98c864fa45ee80b92221a973d81bac31
[ "MIT" ]
99
2016-01-05T00:43:33.000Z
2021-08-06T15:23:34.000Z
hsdecomp/parse/info.py
popjy0312/hsdecomp
f89a8d6c98c864fa45ee80b92221a973d81bac31
[ "MIT" ]
5
2017-12-29T09:02:56.000Z
2020-12-07T01:54:19.000Z
hsdecomp/parse/info.py
popjy0312/hsdecomp
f89a8d6c98c864fa45ee80b92221a973d81bac31
[ "MIT" ]
21
2016-01-27T21:24:41.000Z
2020-11-28T09:11:18.000Z
import struct from hsdecomp import ptrutil def read_arg_pattern(settings, address): num_args = read_num_args(settings, address) func_type = read_function_type(settings, address) assert num_args >= len(func_type) return func_type + 'v' * (num_args - len(func_type)) def read_num_args(settings, address): return ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*5) def read_function_type(settings, address): type_table = { 3: '', 4: 'n', 5: 'p', 12: 'nn', 13: 'np', 14: 'pn', 15: 'pp', 16: 'nnn', 17: 'nnp', 18: 'npn', 19: 'npp', 20: 'pnn', 21: 'pnp', 22: 'ppn', 23: 'ppp', 24: 'pppp', 25: 'ppppp', 26: 'pppppp', 27: 'ppppppp', 28: 'pppppppp' } type = ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*6) if type >= 12 and settings.version < (7, 8, 0): # Introduction of vector arguments type += 3 if type in type_table: return type_table[type] elif type == 0: bitmap = ptrutil.read_word(settings, settings.text_offset + address - settings.rt.word.size*5) size = bitmap & (settings.word.size - 1) bits = bitmap >> settings.word.lg_size ret = '' for i in range(size): if bits % 2 == 0: ret += 'p' else: ret += 'n' bits //= 2 return ret else: # TODO: Read large bitmaps assert False, "unknown function type" def read_closure_type(settings, address): type_table = { 1: 'constructor', 2: 'constructor (1 ptr, 0 nonptr)', 3: 'constructor (0 ptr, 1 nonptr)', 4: 'constructor (2 ptr, 0 nonptr)', 5: 'constructor (1 ptr, 1 nonptr)', 6: 'constructor (0 ptr, 2 nonptr)', 7: 'constructor (static)', 8: 'constructor (no CAF, static)', 9: 'function', 10: 'function (1 ptr, 0 nonptr)', 11: 'function (0 ptr, 1 nonptr)', 12: 'function (2 ptr, 0 nonptr)', 13: 'function (1 ptr, 1 nonptr)', 14: 'function (0 ptr, 2 nonptr)', 15: 'function (static)', 16: 'thunk', 17: 'thunk (1 ptr, 0 nonptr)', 18: 'thunk (0 ptr, 1 nonptr)', 19: 'thunk (2 ptr, 0 nonptr)', 20: 'thunk (1 ptr, 1 nonptr)', 21: 'thunk (0 ptr, 2 nonptr)', 22: 'thunk (static)', 23: 'selector', 28: 'indirection', 29: 'indirection (permanent)', 30: 'indirection (static)' } type = ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*2) if type in type_table: return type_table[type] else: return 'unknown: ' + str(type)
31.150538
105
0.539524
import struct from hsdecomp import ptrutil def read_arg_pattern(settings, address): num_args = read_num_args(settings, address) func_type = read_function_type(settings, address) assert num_args >= len(func_type) return func_type + 'v' * (num_args - len(func_type)) def read_num_args(settings, address): return ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*5) def read_function_type(settings, address): type_table = { 3: '', 4: 'n', 5: 'p', 12: 'nn', 13: 'np', 14: 'pn', 15: 'pp', 16: 'nnn', 17: 'nnp', 18: 'npn', 19: 'npp', 20: 'pnn', 21: 'pnp', 22: 'ppn', 23: 'ppp', 24: 'pppp', 25: 'ppppp', 26: 'pppppp', 27: 'ppppppp', 28: 'pppppppp' } type = ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*6) if type >= 12 and settings.version < (7, 8, 0): type += 3 if type in type_table: return type_table[type] elif type == 0: bitmap = ptrutil.read_word(settings, settings.text_offset + address - settings.rt.word.size*5) size = bitmap & (settings.word.size - 1) bits = bitmap >> settings.word.lg_size ret = '' for i in range(size): if bits % 2 == 0: ret += 'p' else: ret += 'n' bits //= 2 return ret else: assert False, "unknown function type" def read_closure_type(settings, address): type_table = { 1: 'constructor', 2: 'constructor (1 ptr, 0 nonptr)', 3: 'constructor (0 ptr, 1 nonptr)', 4: 'constructor (2 ptr, 0 nonptr)', 5: 'constructor (1 ptr, 1 nonptr)', 6: 'constructor (0 ptr, 2 nonptr)', 7: 'constructor (static)', 8: 'constructor (no CAF, static)', 9: 'function', 10: 'function (1 ptr, 0 nonptr)', 11: 'function (0 ptr, 1 nonptr)', 12: 'function (2 ptr, 0 nonptr)', 13: 'function (1 ptr, 1 nonptr)', 14: 'function (0 ptr, 2 nonptr)', 15: 'function (static)', 16: 'thunk', 17: 'thunk (1 ptr, 0 nonptr)', 18: 'thunk (0 ptr, 1 nonptr)', 19: 'thunk (2 ptr, 0 nonptr)', 20: 'thunk (1 ptr, 1 nonptr)', 21: 'thunk (0 ptr, 2 nonptr)', 22: 'thunk (static)', 23: 'selector', 28: 'indirection', 29: 'indirection (permanent)', 30: 'indirection (static)' } type = ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*2) if type in type_table: return type_table[type] else: return 'unknown: ' + str(type)
true
true
f70dc8adfb1822a30031714d95fe2f269af4ee84
7,196
py
Python
one/tests/util.py
int-brain-lab/ONE
8766cd27308ddc2c247acb56685be3b2ce204390
[ "MIT" ]
5
2021-08-05T07:48:18.000Z
2022-01-04T15:14:04.000Z
one/tests/util.py
int-brain-lab/ONE
8766cd27308ddc2c247acb56685be3b2ce204390
[ "MIT" ]
7
2021-07-01T15:44:33.000Z
2021-08-31T14:12:32.000Z
one/tests/util.py
int-brain-lab/ONE
8766cd27308ddc2c247acb56685be3b2ce204390
[ "MIT" ]
2
2021-08-11T11:55:55.000Z
2021-12-05T14:50:57.000Z
"""Utilities functions for setting up test fixtures.""" import tempfile from pathlib import Path import shutil import json from uuid import uuid4 import pandas as pd import numpy as np from iblutil.io.parquet import uuid2np, np2str import one.params def set_up_env(use_temp_cache=True) -> tempfile.TemporaryDirectory: """ Create a temporary directory and copy cache fixtures over. Parameters ---------- use_temp_cache : bool If True, copies REST cache fixtures to the temporary directory, otherwise they are copied to the directory returned by one.params.get_params_dir Returns ------- tempfile.TemporaryDirectory The temporary directory containing the test ONE caches """ fixture = Path(__file__).parent.joinpath('fixtures') tempdir = tempfile.TemporaryDirectory() # Copy cache files to temporary directory for cache_file in ('sessions', 'datasets'): filename = shutil.copy(fixture / f'{cache_file}.pqt', tempdir.name) assert Path(filename).exists() # Copy cached rest responses rest_cache_location = Path(tempdir.name) / '.one' if use_temp_cache else None setup_rest_cache(rest_cache_location) return tempdir def setup_rest_cache(param_dir=None): """Copy REST cache fixtures to the .one parameter directory. Parameters ---------- param_dir : str, pathlib.Path The location of the ONE params directory (e.g. ~/.one) """ fixture = Path(__file__).parent.joinpath('fixtures') path_parts = ('.rest', 'test.alyx.internationalbrainlab.org', 'https') rest_cache_dir = Path(param_dir or one.params.get_params_dir()).joinpath(*path_parts) # Ensure empty shutil.rmtree(rest_cache_dir, ignore_errors=True) rest_cache_dir.mkdir(parents=True, exist_ok=True) # Copy over fixtures for file in fixture.joinpath('rest_responses').glob('*'): filename = shutil.copy(file, rest_cache_dir) assert Path(filename).exists() def create_file_tree(one): """Touch all the files in the datasets table. Parameters ---------- one : one.api.One An instance of One containing cache tables to use. """ # Create dset files from cache for session_path, rel_path in one._cache.datasets[['session_path', 'rel_path']].values: filepath = Path(one.cache_dir).joinpath(session_path, rel_path) filepath.parent.mkdir(exist_ok=True, parents=True) filepath.touch() def setup_test_params(token=False, cache_dir=None): """ Copies cache parameter fixture to .one directory. Parameters ---------- token : bool If true, save a token file so that client doesn't hit auth endpoint cache_dir : str, pathlib.Path The cache_dir to save """ params_dir = Path(one.params.get_params_dir()) fixture = Path(__file__).parent.joinpath('fixtures') test_pars = '.test.alyx.internationalbrainlab.org' if not list(params_dir.glob(test_pars)): filename = shutil.copy(fixture / 'params' / test_pars, params_dir) assert Path(filename).exists() # Add to cache map map_file = params_dir / '.caches' if not map_file.exists(): shutil.copy(fixture / 'params' / '.caches', map_file) assert Path(filename).exists() with open(map_file, 'r+') as f: data = json.load(f) data['CLIENT_MAP'][test_pars[1:]] = str(cache_dir or '') f.seek(0) json.dump(data, f) f.truncate() # Add token to file so db not hit if token: pars = one.params.get(client=test_pars[1:]) if not getattr(pars, 'TOKEN', False): one.params.save(pars.set('TOKEN', {'token': 'T0k3N'}), test_pars[1:]) def revisions_datasets_table(collections=('', 'alf/probe00', 'alf/probe01'), revisions=('', '2020-01-08', '2021-07-06'), object='spikes', attributes=('times', 'waveforems')): """Returns a datasets cache DataFrame containing datasets with revision folders. As there are no revised datasets on the test databases, this function acts as a fixture for testing the filtering of datasets by a revision. Parameters ---------- collections : tuple A list of collections revisions : tuple A list of revisions object : str An ALF object attributes : tuple A list of ALF attributes Returns ------- pd.DataFrame A datasets cache table containing datasets made from the input names """ rel_path = [] for attr in attributes: for collec in collections: for rev in (f'#{x}#' if x else '' for x in revisions): rel_path.append('/'.join(x for x in (collec, rev, f'{object}.{attr}.npy') if x)) ids = uuid2np([uuid4() for _ in range(len(rel_path))]) eid_0, eid_1 = uuid2np([uuid4()])[0] return pd.DataFrame(data={ 'rel_path': rel_path, 'session_path': 'subject/1900-01-01/001', 'file_size': None, 'hash': None, 'eid_0': eid_0, 'eid_1': eid_1 }, index=[ids[:, 0], ids[:, 1]]) def create_schema_cache(param_dir=None): """Save REST cache file for docs/ endpoint. Ensures the database isn't hit when the rest_schemas property is accessed. Parameters ---------- param_dir : str, pathlib.Path The location of the parameter directory. If None, the default one is used. """ actions = dict.fromkeys(['list', 'read', 'create', 'update', 'partial_update', 'delete']) endpoints = ['cache', 'dataset-types', 'datasets', 'downloads', 'insertions', 'sessions'] path_parts = ('.rest', 'test.alyx.internationalbrainlab.org', 'https') rest_cache_dir = Path(param_dir or one.params.get_params_dir()).joinpath(*path_parts) with open(rest_cache_dir / '1baff95c2d0e31059720a3716ad5b5a34b61a207', 'r') as f: json.dump({k: actions for k in endpoints}, f) def get_file(root: str, str_id: str) -> str: """ A stub function for iblutil.io.params.getfile. Allows the injection of a different param dir. Parameters ---------- root : str, pathlib.Path The root directory of the new parameters str_id : str The parameter string identifier Returns ------- str The parameter file path """ parts = ['.' + p if not p.startswith('.') else p for p in Path(str_id).parts] pfile = Path(root, *parts).as_posix() return pfile def caches_int2str(caches): """Convert int ids to str ids for cache tables. Parameters ---------- caches : Bunch A bunch of cache tables (from One._cache) """ for table in ('sessions', 'datasets'): # Set integer uuids to NaN cache = caches[table].reset_index() int_cols = cache.filter(regex=r'_\d{1}$').columns for i in range(0, len(int_cols), 2): name = int_cols.values[i].rsplit('_', 1)[0] cache[name] = np2str(cache[int_cols[i:i + 2]]) cache[int_cols] = np.nan caches[table] = cache.set_index('id')
32.125
98
0.63174
import tempfile from pathlib import Path import shutil import json from uuid import uuid4 import pandas as pd import numpy as np from iblutil.io.parquet import uuid2np, np2str import one.params def set_up_env(use_temp_cache=True) -> tempfile.TemporaryDirectory: fixture = Path(__file__).parent.joinpath('fixtures') tempdir = tempfile.TemporaryDirectory() for cache_file in ('sessions', 'datasets'): filename = shutil.copy(fixture / f'{cache_file}.pqt', tempdir.name) assert Path(filename).exists() rest_cache_location = Path(tempdir.name) / '.one' if use_temp_cache else None setup_rest_cache(rest_cache_location) return tempdir def setup_rest_cache(param_dir=None): fixture = Path(__file__).parent.joinpath('fixtures') path_parts = ('.rest', 'test.alyx.internationalbrainlab.org', 'https') rest_cache_dir = Path(param_dir or one.params.get_params_dir()).joinpath(*path_parts) shutil.rmtree(rest_cache_dir, ignore_errors=True) rest_cache_dir.mkdir(parents=True, exist_ok=True) for file in fixture.joinpath('rest_responses').glob('*'): filename = shutil.copy(file, rest_cache_dir) assert Path(filename).exists() def create_file_tree(one): for session_path, rel_path in one._cache.datasets[['session_path', 'rel_path']].values: filepath = Path(one.cache_dir).joinpath(session_path, rel_path) filepath.parent.mkdir(exist_ok=True, parents=True) filepath.touch() def setup_test_params(token=False, cache_dir=None): params_dir = Path(one.params.get_params_dir()) fixture = Path(__file__).parent.joinpath('fixtures') test_pars = '.test.alyx.internationalbrainlab.org' if not list(params_dir.glob(test_pars)): filename = shutil.copy(fixture / 'params' / test_pars, params_dir) assert Path(filename).exists() map_file = params_dir / '.caches' if not map_file.exists(): shutil.copy(fixture / 'params' / '.caches', map_file) assert Path(filename).exists() with open(map_file, 'r+') as f: data = json.load(f) data['CLIENT_MAP'][test_pars[1:]] = str(cache_dir or '') f.seek(0) json.dump(data, f) f.truncate() if token: pars = one.params.get(client=test_pars[1:]) if not getattr(pars, 'TOKEN', False): one.params.save(pars.set('TOKEN', {'token': 'T0k3N'}), test_pars[1:]) def revisions_datasets_table(collections=('', 'alf/probe00', 'alf/probe01'), revisions=('', '2020-01-08', '2021-07-06'), object='spikes', attributes=('times', 'waveforems')): rel_path = [] for attr in attributes: for collec in collections: for rev in (f'#{x}#' if x else '' for x in revisions): rel_path.append('/'.join(x for x in (collec, rev, f'{object}.{attr}.npy') if x)) ids = uuid2np([uuid4() for _ in range(len(rel_path))]) eid_0, eid_1 = uuid2np([uuid4()])[0] return pd.DataFrame(data={ 'rel_path': rel_path, 'session_path': 'subject/1900-01-01/001', 'file_size': None, 'hash': None, 'eid_0': eid_0, 'eid_1': eid_1 }, index=[ids[:, 0], ids[:, 1]]) def create_schema_cache(param_dir=None): actions = dict.fromkeys(['list', 'read', 'create', 'update', 'partial_update', 'delete']) endpoints = ['cache', 'dataset-types', 'datasets', 'downloads', 'insertions', 'sessions'] path_parts = ('.rest', 'test.alyx.internationalbrainlab.org', 'https') rest_cache_dir = Path(param_dir or one.params.get_params_dir()).joinpath(*path_parts) with open(rest_cache_dir / '1baff95c2d0e31059720a3716ad5b5a34b61a207', 'r') as f: json.dump({k: actions for k in endpoints}, f) def get_file(root: str, str_id: str) -> str: parts = ['.' + p if not p.startswith('.') else p for p in Path(str_id).parts] pfile = Path(root, *parts).as_posix() return pfile def caches_int2str(caches): for table in ('sessions', 'datasets'): cache = caches[table].reset_index() int_cols = cache.filter(regex=r'_\d{1}$').columns for i in range(0, len(int_cols), 2): name = int_cols.values[i].rsplit('_', 1)[0] cache[name] = np2str(cache[int_cols[i:i + 2]]) cache[int_cols] = np.nan caches[table] = cache.set_index('id')
true
true
f70dc9c588d7235403908c4d6bfb12e5f2a8c107
2,813
py
Python
datalabeling/export_data.py
thesugar/python-docs-samples
1a59ca688f1d7602d52cd4088fa7b6e3afe0afd0
[ "Apache-2.0" ]
1
2020-06-04T16:50:49.000Z
2020-06-04T16:50:49.000Z
datalabeling/export_data.py
thesugar/python-docs-samples
1a59ca688f1d7602d52cd4088fa7b6e3afe0afd0
[ "Apache-2.0" ]
1
2020-07-23T10:47:32.000Z
2020-07-23T10:47:32.000Z
datalabeling/export_data.py
thesugar/python-docs-samples
1a59ca688f1d7602d52cd4088fa7b6e3afe0afd0
[ "Apache-2.0" ]
1
2020-05-29T20:33:18.000Z
2020-05-29T20:33:18.000Z
#!/usr/bin/env python # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from google.api_core.client_options import ClientOptions # [START datalabeling_export_data_beta] def export_data(dataset_resource_name, annotated_dataset_resource_name, export_gcs_uri): """Exports a dataset from the given Google Cloud project.""" from google.cloud import datalabeling_v1beta1 as datalabeling client = datalabeling.DataLabelingServiceClient() # [END datalabeling_export_data_beta] # If provided, use a provided test endpoint - this will prevent tests on # this snippet from triggering any action by a real human if 'DATALABELING_ENDPOINT' in os.environ: opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT')) client = datalabeling.DataLabelingServiceClient(client_options=opts) # [START datalabeling_export_data_beta] gcs_destination = datalabeling.types.GcsDestination( output_uri=export_gcs_uri, mime_type='text/csv') output_config = datalabeling.types.OutputConfig( gcs_destination=gcs_destination) response = client.export_data( dataset_resource_name, annotated_dataset_resource_name, output_config ) print('Dataset ID: {}\n'.format(response.result().dataset)) print('Output config:') print('\tGcs destination:') print('\t\tOutput URI: {}\n'.format( response.result().output_config.gcs_destination.output_uri)) # [END datalabeling_export_data_beta] if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '--dataset-resource-name', help='Dataset resource name. Required.', required=True ) parser.add_argument( '--annotated-dataset-resource-name', help='Annotated Dataset resource name. Required.', required=True ) parser.add_argument( '--export-gcs-uri', help='The export GCS URI. Required.', required=True ) args = parser.parse_args() export_data( args.dataset_resource_name, args.annotated_dataset_resource_name, args.export_gcs_uri )
31.965909
77
0.713829
import argparse import os from google.api_core.client_options import ClientOptions def export_data(dataset_resource_name, annotated_dataset_resource_name, export_gcs_uri): from google.cloud import datalabeling_v1beta1 as datalabeling client = datalabeling.DataLabelingServiceClient() if 'DATALABELING_ENDPOINT' in os.environ: opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT')) client = datalabeling.DataLabelingServiceClient(client_options=opts) gcs_destination = datalabeling.types.GcsDestination( output_uri=export_gcs_uri, mime_type='text/csv') output_config = datalabeling.types.OutputConfig( gcs_destination=gcs_destination) response = client.export_data( dataset_resource_name, annotated_dataset_resource_name, output_config ) print('Dataset ID: {}\n'.format(response.result().dataset)) print('Output config:') print('\tGcs destination:') print('\t\tOutput URI: {}\n'.format( response.result().output_config.gcs_destination.output_uri)) if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '--dataset-resource-name', help='Dataset resource name. Required.', required=True ) parser.add_argument( '--annotated-dataset-resource-name', help='Annotated Dataset resource name. Required.', required=True ) parser.add_argument( '--export-gcs-uri', help='The export GCS URI. Required.', required=True ) args = parser.parse_args() export_data( args.dataset_resource_name, args.annotated_dataset_resource_name, args.export_gcs_uri )
true
true
f70dca2728d6f341e94b533639987379d7d8a49d
635
py
Python
mixpanel_django_graphos/urls.py
sayonetech/mixpanel-django-graphos
cf74dd73ecb62a505d507bc8d122db397a2c6f34
[ "MIT" ]
null
null
null
mixpanel_django_graphos/urls.py
sayonetech/mixpanel-django-graphos
cf74dd73ecb62a505d507bc8d122db397a2c6f34
[ "MIT" ]
null
null
null
mixpanel_django_graphos/urls.py
sayonetech/mixpanel-django-graphos
cf74dd73ecb62a505d507bc8d122db397a2c6f34
[ "MIT" ]
null
null
null
from django.conf.urls import url from django.contrib import admin from mixpanel_django_graphos.views import ReportActivityView admin.site.index_template = 'admin/index.html' admin.autodiscover() def get_admin_urls(urls): """ Extend admin to include additional urls """ def get_urls(): my_urls = [url(r'^activity-report/$', admin.site.admin_view( ReportActivityView.as_view()), name='activity-report')] return my_urls + urls return get_urls admin_urls = get_admin_urls(admin.site.get_urls()) admin.site.get_urls = admin_urls urlpatterns = [ url(r'^admin/', admin.site.urls), ]
23.518519
68
0.708661
from django.conf.urls import url from django.contrib import admin from mixpanel_django_graphos.views import ReportActivityView admin.site.index_template = 'admin/index.html' admin.autodiscover() def get_admin_urls(urls): def get_urls(): my_urls = [url(r'^activity-report/$', admin.site.admin_view( ReportActivityView.as_view()), name='activity-report')] return my_urls + urls return get_urls admin_urls = get_admin_urls(admin.site.get_urls()) admin.site.get_urls = admin_urls urlpatterns = [ url(r'^admin/', admin.site.urls), ]
true
true
f70dca3c878f2c608ee4decf93cecab1952362b2
27,393
py
Python
tests/keras/layers/recurrent_test.py
mduranmustafa/keras
d4a14ee54728ac8ea6c5ffbf41f559662dcfba46
[ "MIT" ]
75
2018-08-03T01:10:36.000Z
2022-02-25T05:08:39.000Z
tests/keras/layers/recurrent_test.py
coderclear/ConvGRU
c458024d5c379ef990f72b6f6b738301e1895cff
[ "MIT" ]
9
2018-08-14T14:33:58.000Z
2021-09-06T07:04:14.000Z
tests/keras/layers/recurrent_test.py
coderclear/ConvGRU
c458024d5c379ef990f72b6f6b738301e1895cff
[ "MIT" ]
19
2018-08-11T20:44:42.000Z
2021-12-01T00:41:52.000Z
import pytest import numpy as np from numpy.testing import assert_allclose import keras from keras.utils.test_utils import layer_test from keras.utils.test_utils import keras_test from keras.layers import recurrent from keras.layers import embeddings from keras.models import Sequential from keras.models import Model from keras.engine.topology import Input from keras.layers.core import Masking from keras import regularizers from keras import backend as K num_samples, timesteps, embedding_dim, units = 2, 5, 4, 3 embedding_num = 12 @keras_test def rnn_test(f): """ All the recurrent layers share the same interface, so we can run through them with a single function. """ f = keras_test(f) return pytest.mark.parametrize('layer_class', [ recurrent.SimpleRNN, recurrent.GRU, recurrent.LSTM ])(f) @rnn_test def test_return_sequences(layer_class): layer_test(layer_class, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) @rnn_test def test_dynamic_behavior(layer_class): layer = layer_class(units, input_shape=(None, embedding_dim)) model = Sequential() model.add(layer) model.compile('sgd', 'mse') x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) @rnn_test def test_stateful_invalid_use(layer_class): layer = layer_class(units, stateful=True, batch_input_shape=(num_samples, timesteps, embedding_dim)) model = Sequential() model.add(layer) model.compile('sgd', 'mse') x = np.random.random((num_samples * 2, timesteps, embedding_dim)) y = np.random.random((num_samples * 2, units)) with pytest.raises(ValueError): model.fit(x, y) with pytest.raises(ValueError): model.predict(x, batch_size=num_samples + 1) @rnn_test @pytest.mark.skipif((K.backend() == 'cntk'), reason='Not yet supported.') def test_dropout(layer_class): for unroll in [True, False]: layer_test(layer_class, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1, 'unroll': unroll}, input_shape=(num_samples, timesteps, embedding_dim)) # Test that dropout is applied during training x = K.ones((num_samples, timesteps, embedding_dim)) layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5, input_shape=(timesteps, embedding_dim)) y = layer(x) assert y._uses_learning_phase y = layer(x, training=True) assert not getattr(y, '_uses_learning_phase') # Test that dropout is not applied during testing x = np.random.random((num_samples, timesteps, embedding_dim)) layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5, unroll=unroll, input_shape=(timesteps, embedding_dim)) model = Sequential([layer]) assert model.uses_learning_phase y1 = model.predict(x) y2 = model.predict(x) assert_allclose(y1, y2) @rnn_test def test_statefulness(layer_class): model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile(optimizer='sgd', loss='mse') out1 = model.predict(np.ones((num_samples, timesteps))) assert(out1.shape == (num_samples, units)) # train once so that the states change model.train_on_batch(np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different assert(out1.max() != out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) assert(out2.max() != out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) assert_allclose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) assert(out4.max() != out5.max()) @rnn_test def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5) @rnn_test def test_implementation_mode(layer_class): for mode in [1, 2]: # Without dropout layer_test(layer_class, kwargs={'units': units, 'implementation': mode}, input_shape=(num_samples, timesteps, embedding_dim)) # With dropout layer_test(layer_class, kwargs={'units': units, 'implementation': mode, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) # Without bias layer_test(layer_class, kwargs={'units': units, 'implementation': mode, 'use_bias': False}, input_shape=(num_samples, timesteps, embedding_dim)) @rnn_test def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1 @rnn_test def test_trainability(layer_class): layer = layer_class(units) layer.build((None, None, embedding_dim)) assert len(layer.weights) == 3 assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 0 layer.trainable = False assert len(layer.weights) == 3 assert len(layer.trainable_weights) == 0 assert len(layer.non_trainable_weights) == 3 layer.trainable = True assert len(layer.weights) == 3 assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 0 @keras_test def test_masking_layer(): ''' This test based on a previously failing issue here: https://github.com/fchollet/keras/issues/1567 ''' inputs = np.random.random((6, 3, 4)) targets = np.abs(np.random.random((6, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = Sequential() model.add(Masking(input_shape=(3, 4))) model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=False)) model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1) model = Sequential() model.add(Masking(input_shape=(3, 4))) model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=True)) model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1) @rnn_test def test_from_config(layer_class): stateful_flags = (False, True) for stateful in stateful_flags: l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() @rnn_test def test_specify_initial_state_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer.inbound_nodes[0].input_tensors model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets) @rnn_test def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets) @rnn_test def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1)) @rnn_test def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer.inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets) @rnn_test def test_specify_state_with_masking(layer_class): ''' This test based on a previously failing issue here: https://github.com/fchollet/keras/issues/1567 ''' num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input((timesteps, embedding_dim)) _ = Masking()(inputs) initial_state = [Input((units,)) for _ in range(num_states)] output = layer_class(units)(inputs, initial_state=initial_state) model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets) @rnn_test def test_return_state(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] assert len(state) == num_states model = Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, embedding_dim)) state = model.predict(inputs) np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4) @rnn_test def test_state_reuse(layer_class): inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = layer_class(units, return_state=True, return_sequences=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] output = layer_class(units)(output, initial_state=state) model = Model(inputs, output) inputs = np.random.random((num_samples, timesteps, embedding_dim)) outputs = model.predict(inputs) @keras_test def test_minimal_rnn_cell_non_layer(): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = units self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output = states[0] output = keras.backend.dot(inputs, self.kernel) + prev_output return output, [output] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(32, 8), MinimalRNNCell(32, 32)] layer = recurrent.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) @keras_test def test_minimal_rnn_cell_non_layer_multiple_states(): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16)] layer = recurrent.RNN(cells) assert layer.cell.state_size == (32, 32, 16, 16, 8, 8) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) @keras_test def test_minimal_rnn_cell_layer(): class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot(prev_output, self.recurrent_kernel) return output, [output] def get_config(self): config = {'units': self.units} base_config = super(MinimalRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) cell = MinimalRNNCell(32) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = recurrent.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) assert_allclose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)] layer = recurrent.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = recurrent.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) assert_allclose(y_np, y_np_2, atol=1e-4) @keras_test def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y] @rnn_test def test_batch_size_equal_one(layer_class): inputs = Input(batch_shape=(1, timesteps, embedding_dim)) layer = layer_class(units) outputs = layer(inputs) model = Model(inputs, outputs) model.compile('sgd', 'mse') x = np.random.random((1, timesteps, embedding_dim)) y = np.random.random((1, units)) model.train_on_batch(x, y) def test_rnn_cell_with_constants_layer(): class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) cell = RNNCellWithConstants(32) layer = recurrent.RNN(cell) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) assert_allclose(y_np, y_np_2, atol=1e-4) # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer([x, c]) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) assert_allclose(y_np, y_np_3, atol=1e-4) def test_rnn_cell_with_constants_layer_passing_initial_state(): class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) s = keras.Input((32,)) cell = RNNCellWithConstants(32) layer = recurrent.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_np, c_np]) assert_allclose(y_np, y_np_2, atol=1e-4) # verify that state is used y_np_2_different_s = model.predict([x_np, s_np + 10., c_np]) with pytest.raises(AssertionError): assert_allclose(y_np, y_np_2_different_s, atol=1e-4) # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer([x, s, c]) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_np, c_np]) assert_allclose(y_np, y_np_3, atol=1e-4) if __name__ == '__main__': pytest.main([__file__])
35.807843
80
0.631694
import pytest import numpy as np from numpy.testing import assert_allclose import keras from keras.utils.test_utils import layer_test from keras.utils.test_utils import keras_test from keras.layers import recurrent from keras.layers import embeddings from keras.models import Sequential from keras.models import Model from keras.engine.topology import Input from keras.layers.core import Masking from keras import regularizers from keras import backend as K num_samples, timesteps, embedding_dim, units = 2, 5, 4, 3 embedding_num = 12 @keras_test def rnn_test(f): f = keras_test(f) return pytest.mark.parametrize('layer_class', [ recurrent.SimpleRNN, recurrent.GRU, recurrent.LSTM ])(f) @rnn_test def test_return_sequences(layer_class): layer_test(layer_class, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) @rnn_test def test_dynamic_behavior(layer_class): layer = layer_class(units, input_shape=(None, embedding_dim)) model = Sequential() model.add(layer) model.compile('sgd', 'mse') x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) @rnn_test def test_stateful_invalid_use(layer_class): layer = layer_class(units, stateful=True, batch_input_shape=(num_samples, timesteps, embedding_dim)) model = Sequential() model.add(layer) model.compile('sgd', 'mse') x = np.random.random((num_samples * 2, timesteps, embedding_dim)) y = np.random.random((num_samples * 2, units)) with pytest.raises(ValueError): model.fit(x, y) with pytest.raises(ValueError): model.predict(x, batch_size=num_samples + 1) @rnn_test @pytest.mark.skipif((K.backend() == 'cntk'), reason='Not yet supported.') def test_dropout(layer_class): for unroll in [True, False]: layer_test(layer_class, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1, 'unroll': unroll}, input_shape=(num_samples, timesteps, embedding_dim)) x = K.ones((num_samples, timesteps, embedding_dim)) layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5, input_shape=(timesteps, embedding_dim)) y = layer(x) assert y._uses_learning_phase y = layer(x, training=True) assert not getattr(y, '_uses_learning_phase') x = np.random.random((num_samples, timesteps, embedding_dim)) layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5, unroll=unroll, input_shape=(timesteps, embedding_dim)) model = Sequential([layer]) assert model.uses_learning_phase y1 = model.predict(x) y2 = model.predict(x) assert_allclose(y1, y2) @rnn_test def test_statefulness(layer_class): model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile(optimizer='sgd', loss='mse') out1 = model.predict(np.ones((num_samples, timesteps))) assert(out1.shape == (num_samples, units)) model.train_on_batch(np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) assert(out1.max() != out2.max()) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) assert(out2.max() != out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) assert_allclose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) assert(out4.max() != out5.max()) @rnn_test def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5) @rnn_test def test_implementation_mode(layer_class): for mode in [1, 2]: # Without dropout layer_test(layer_class, kwargs={'units': units, 'implementation': mode}, input_shape=(num_samples, timesteps, embedding_dim)) # With dropout layer_test(layer_class, kwargs={'units': units, 'implementation': mode, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) # Without bias layer_test(layer_class, kwargs={'units': units, 'implementation': mode, 'use_bias': False}, input_shape=(num_samples, timesteps, embedding_dim)) @rnn_test def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1 @rnn_test def test_trainability(layer_class): layer = layer_class(units) layer.build((None, None, embedding_dim)) assert len(layer.weights) == 3 assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 0 layer.trainable = False assert len(layer.weights) == 3 assert len(layer.trainable_weights) == 0 assert len(layer.non_trainable_weights) == 3 layer.trainable = True assert len(layer.weights) == 3 assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 0 @keras_test def test_masking_layer(): inputs = np.random.random((6, 3, 4)) targets = np.abs(np.random.random((6, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = Sequential() model.add(Masking(input_shape=(3, 4))) model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=False)) model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1) model = Sequential() model.add(Masking(input_shape=(3, 4))) model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=True)) model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1) @rnn_test def test_from_config(layer_class): stateful_flags = (False, True) for stateful in stateful_flags: l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() @rnn_test def test_specify_initial_state_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer.inbound_nodes[0].input_tensors model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets) @rnn_test def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets) @rnn_test def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1)) @rnn_test def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer.inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets) @rnn_test def test_specify_state_with_masking(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input((timesteps, embedding_dim)) _ = Masking()(inputs) initial_state = [Input((units,)) for _ in range(num_states)] output = layer_class(units)(inputs, initial_state=initial_state) model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets) @rnn_test def test_return_state(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] assert len(state) == num_states model = Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, embedding_dim)) state = model.predict(inputs) np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4) @rnn_test def test_state_reuse(layer_class): inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = layer_class(units, return_state=True, return_sequences=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] output = layer_class(units)(output, initial_state=state) model = Model(inputs, output) inputs = np.random.random((num_samples, timesteps, embedding_dim)) outputs = model.predict(inputs) @keras_test def test_minimal_rnn_cell_non_layer(): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = units self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output = states[0] output = keras.backend.dot(inputs, self.kernel) + prev_output return output, [output] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(32, 8), MinimalRNNCell(32, 32)] layer = recurrent.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) @keras_test def test_minimal_rnn_cell_non_layer_multiple_states(): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16)] layer = recurrent.RNN(cells) assert layer.cell.state_size == (32, 32, 16, 16, 8, 8) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) @keras_test def test_minimal_rnn_cell_layer(): class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot(prev_output, self.recurrent_kernel) return output, [output] def get_config(self): config = {'units': self.units} base_config = super(MinimalRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) cell = MinimalRNNCell(32) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = recurrent.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) assert_allclose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)] layer = recurrent.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = recurrent.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) assert_allclose(y_np, y_np_2, atol=1e-4) @keras_test def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y] @rnn_test def test_batch_size_equal_one(layer_class): inputs = Input(batch_shape=(1, timesteps, embedding_dim)) layer = layer_class(units) outputs = layer(inputs) model = Model(inputs, outputs) model.compile('sgd', 'mse') x = np.random.random((1, timesteps, embedding_dim)) y = np.random.random((1, units)) model.train_on_batch(x, y) def test_rnn_cell_with_constants_layer(): class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) cell = RNNCellWithConstants(32) layer = recurrent.RNN(cell) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) assert_allclose(y_np, y_np_2, atol=1e-4) # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer([x, c]) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) assert_allclose(y_np, y_np_3, atol=1e-4) def test_rnn_cell_with_constants_layer_passing_initial_state(): class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) s = keras.Input((32,)) cell = RNNCellWithConstants(32) layer = recurrent.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_np, c_np]) assert_allclose(y_np, y_np_2, atol=1e-4) # verify that state is used y_np_2_different_s = model.predict([x_np, s_np + 10., c_np]) with pytest.raises(AssertionError): assert_allclose(y_np, y_np_2_different_s, atol=1e-4) # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = recurrent.RNN.from_config(config.copy()) y = layer([x, s, c]) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_np, c_np]) assert_allclose(y_np, y_np_3, atol=1e-4) if __name__ == '__main__': pytest.main([__file__])
true
true
f70dca56405821f455674a34af08519dd64b3467
1,005
py
Python
mallenom/workcal/forms.py
crowmurk/mallenom
c85bba2e657965879e6fac18bb762ca739ff4d98
[ "MIT" ]
null
null
null
mallenom/workcal/forms.py
crowmurk/mallenom
c85bba2e657965879e6fac18bb762ca739ff4d98
[ "MIT" ]
9
2020-02-12T01:04:30.000Z
2022-02-10T09:22:09.000Z
mallenom/workcal/forms.py
crowmurk/mallenom
c85bba2e657965879e6fac18bb762ca739ff4d98
[ "MIT" ]
null
null
null
from django import forms from django.core.validators import MinValueValidator from django.utils.translation import gettext_lazy as _ from .models import DayType, Day class DayTypeForm(forms.ModelForm): class Meta: model = DayType fields = '__all__' class DayForm(forms.ModelForm): class Meta: model = Day fields = '__all__' class CalendarUploadForm(forms.Form): year = forms.IntegerField( required=True, validators=[ MinValueValidator(1999), ], label=_('Year'), help_text=_('Pick a year to import'), ) file = forms.FileField( required=True, label=_('File'), help_text=_('Pick a CSV file containing work calendar'), ) def clean_file(self): if any(self.errors): return None file = self.cleaned_data['file'] if not file.name.endswith('.csv'): raise forms.ValidationError(_('CSV file required')) return file
23.372093
64
0.61592
from django import forms from django.core.validators import MinValueValidator from django.utils.translation import gettext_lazy as _ from .models import DayType, Day class DayTypeForm(forms.ModelForm): class Meta: model = DayType fields = '__all__' class DayForm(forms.ModelForm): class Meta: model = Day fields = '__all__' class CalendarUploadForm(forms.Form): year = forms.IntegerField( required=True, validators=[ MinValueValidator(1999), ], label=_('Year'), help_text=_('Pick a year to import'), ) file = forms.FileField( required=True, label=_('File'), help_text=_('Pick a CSV file containing work calendar'), ) def clean_file(self): if any(self.errors): return None file = self.cleaned_data['file'] if not file.name.endswith('.csv'): raise forms.ValidationError(_('CSV file required')) return file
true
true
f70dcd890ab8bce5843942a3818fdc0c14bb0bc9
4,723
py
Python
data/forms/__init__.py
pombredanne/vulncode-db
bffd1467df54d98e5271ec977330365d5879b60d
[ "Apache-2.0" ]
592
2019-03-05T13:39:57.000Z
2022-03-31T14:52:58.000Z
data/forms/__init__.py
pombredanne/vulncode-db
bffd1467df54d98e5271ec977330365d5879b60d
[ "Apache-2.0" ]
91
2019-04-05T20:45:26.000Z
2021-12-24T02:10:50.000Z
data/forms/__init__.py
pombredanne/vulncode-db
bffd1467df54d98e5271ec977330365d5879b60d
[ "Apache-2.0" ]
84
2019-03-31T03:55:56.000Z
2022-01-03T13:33:44.000Z
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask_wtf import FlaskForm # type: ignore from wtforms import ( # type: ignore StringField, TextAreaField, SubmitField, FieldList, FormField, IntegerField, HiddenField, BooleanField, ) from wtforms import validators from data.models import VulnerabilityGitCommits, VulnerabilityResources from data.models.base import db class BaseForm(FlaskForm): @property def non_hidden_fields(self): for field in self: if isinstance(field, HiddenField): continue yield field class ModelFieldList(FieldList): def __init__(self, *args, **kwargs): self.model = kwargs.pop("model", None) super().__init__(*args, **kwargs) if not self.model: raise ValueError("ModelFieldList requires model to be set") def populate_obj(self, obj, name): if not hasattr(obj, name): setattr(obj, name, []) while len(getattr(obj, name)) < len(self.entries): new_model = self.model() db.session.add(new_model) getattr(obj, name).append(new_model) while len(getattr(obj, name)) > len(self.entries): db.session.delete(getattr(obj, name).pop()) super().populate_obj(obj, name) class CommitLinksForm(FlaskForm): repo_url = StringField( "Git Repo URL", validators=[validators.Optional(), validators.URL()] ) commit_hash = StringField("Commit Hash", validators=[]) # Commit data is optional -> otherwise use: validators.DataRequired(), commit_link = StringField( "Main commit link", validators=[validators.Optional(), validators.URL()] ) repo_name = StringField("Repository Name", validators=[]) class Meta: csrf = False class VulnerabilityResourcesForm(FlaskForm): link = StringField("Link", validators=[validators.DataRequired(), validators.URL()]) class Meta: csrf = False class VulnerabilityDetailsForm(FlaskForm): commits = ModelFieldList( FormField(CommitLinksForm), model=VulnerabilityGitCommits, min_entries=1, default=[VulnerabilityGitCommits], ) # Changing the CVE ID is disabled for now. # The filters argument is used to have Null fields instead of empty strings. # This is important since the cve_id is supposed to be unique OR Null. # cve_id = StringField( # "CVE-ID", # filters=[lambda x: x and str(x).upper().strip(), lambda x: x or None], # validators=[ # validators.Optional(), # validators.Regexp(r"^CVE-\d{4}-\d+$") # ], # ) comment = TextAreaField( "High-Level Bug Overview", validators=[validators.DataRequired()] ) resources = ModelFieldList( FormField(VulnerabilityResourcesForm), model=VulnerabilityResources ) submit = SubmitField("Propose change") class VulnerabilityProposalReject(FlaskForm): review_feedback = TextAreaField( "Feedback what should be changed", validators=[validators.DataRequired()] ) submit_reject = SubmitField("Ask for improvements") class VulnerabilityProposalApprove(FlaskForm): submit_approve = SubmitField("Approve proposal") class VulnerabilityProposalAssign(FlaskForm): submit_assign = SubmitField("Take review") class VulnerabilityProposalUnassign(FlaskForm): submit_unassign = SubmitField("Unassign from this review") class VulnerabilityProposalPublish(FlaskForm): submit_publish = SubmitField("Publish entry") class VulnerabilityDeleteForm(FlaskForm): delete_entry = IntegerField("Delete entry", [validators.required()]) submit = SubmitField() class UserProfileForm(BaseForm): full_name = StringField( "Name", description=( '<small class="form-text text-muted">' "What should be shown next to your contributions.</small>" ), ) hide_name = BooleanField("Hide Name") profile_picture = StringField( "Profile Picture URL", validators=[validators.Optional(), validators.URL()] ) hide_picture = BooleanField("Hide Profile Picture")
30.869281
88
0.677747
from flask_wtf import FlaskForm from wtforms import ( StringField, TextAreaField, SubmitField, FieldList, FormField, IntegerField, HiddenField, BooleanField, ) from wtforms import validators from data.models import VulnerabilityGitCommits, VulnerabilityResources from data.models.base import db class BaseForm(FlaskForm): @property def non_hidden_fields(self): for field in self: if isinstance(field, HiddenField): continue yield field class ModelFieldList(FieldList): def __init__(self, *args, **kwargs): self.model = kwargs.pop("model", None) super().__init__(*args, **kwargs) if not self.model: raise ValueError("ModelFieldList requires model to be set") def populate_obj(self, obj, name): if not hasattr(obj, name): setattr(obj, name, []) while len(getattr(obj, name)) < len(self.entries): new_model = self.model() db.session.add(new_model) getattr(obj, name).append(new_model) while len(getattr(obj, name)) > len(self.entries): db.session.delete(getattr(obj, name).pop()) super().populate_obj(obj, name) class CommitLinksForm(FlaskForm): repo_url = StringField( "Git Repo URL", validators=[validators.Optional(), validators.URL()] ) commit_hash = StringField("Commit Hash", validators=[]) commit_link = StringField( "Main commit link", validators=[validators.Optional(), validators.URL()] ) repo_name = StringField("Repository Name", validators=[]) class Meta: csrf = False class VulnerabilityResourcesForm(FlaskForm): link = StringField("Link", validators=[validators.DataRequired(), validators.URL()]) class Meta: csrf = False class VulnerabilityDetailsForm(FlaskForm): commits = ModelFieldList( FormField(CommitLinksForm), model=VulnerabilityGitCommits, min_entries=1, default=[VulnerabilityGitCommits], ) comment = TextAreaField( "High-Level Bug Overview", validators=[validators.DataRequired()] ) resources = ModelFieldList( FormField(VulnerabilityResourcesForm), model=VulnerabilityResources ) submit = SubmitField("Propose change") class VulnerabilityProposalReject(FlaskForm): review_feedback = TextAreaField( "Feedback what should be changed", validators=[validators.DataRequired()] ) submit_reject = SubmitField("Ask for improvements") class VulnerabilityProposalApprove(FlaskForm): submit_approve = SubmitField("Approve proposal") class VulnerabilityProposalAssign(FlaskForm): submit_assign = SubmitField("Take review") class VulnerabilityProposalUnassign(FlaskForm): submit_unassign = SubmitField("Unassign from this review") class VulnerabilityProposalPublish(FlaskForm): submit_publish = SubmitField("Publish entry") class VulnerabilityDeleteForm(FlaskForm): delete_entry = IntegerField("Delete entry", [validators.required()]) submit = SubmitField() class UserProfileForm(BaseForm): full_name = StringField( "Name", description=( '<small class="form-text text-muted">' "What should be shown next to your contributions.</small>" ), ) hide_name = BooleanField("Hide Name") profile_picture = StringField( "Profile Picture URL", validators=[validators.Optional(), validators.URL()] ) hide_picture = BooleanField("Hide Profile Picture")
true
true
f70dcdbf09dad9fb3d510148238311b3fe78a376
49,747
py
Python
frappe/__init__.py
Havenir/gppert-frappe
d302388ad15b36754a48c5d047d7515dad257b89
[ "MIT" ]
null
null
null
frappe/__init__.py
Havenir/gppert-frappe
d302388ad15b36754a48c5d047d7515dad257b89
[ "MIT" ]
null
null
null
frappe/__init__.py
Havenir/gppert-frappe
d302388ad15b36754a48c5d047d7515dad257b89
[ "MIT" ]
null
null
null
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt """ globals attached to frappe module + some utility functions that should probably be moved """ from __future__ import unicode_literals, print_function from six import iteritems, binary_type, text_type, string_types from werkzeug.local import Local, release_local import os, sys, importlib, inspect, json from past.builtins import cmp from faker import Faker # public from .exceptions import * from .utils.jinja import (get_jenv, get_template, render_template, get_email_from_template, get_jloader) # Hamless for Python 3 # For Python 2 set default encoding to utf-8 if sys.version[0] == '2': reload(sys) sys.setdefaultencoding("utf-8") __version__ = '11.1.65' __title__ = "Frappe Framework" local = Local() class _dict(dict): """dict like object that exposes keys as attributes""" def __getattr__(self, key): ret = self.get(key) if not ret and key.startswith("__"): raise AttributeError() return ret def __setattr__(self, key, value): self[key] = value def __getstate__(self): return self def __setstate__(self, d): self.update(d) def update(self, d): """update and return self -- the missing dict feature in python""" super(_dict, self).update(d) return self def copy(self): return _dict(dict(self).copy()) def _(msg, lang=None): """Returns translated string in current lang, if exists.""" from frappe.translate import get_full_dict from frappe.utils import strip_html_tags, is_html if not hasattr(local, 'lang'): local.lang = lang or 'en' if not lang: lang = local.lang non_translated_msg = msg if is_html(msg): msg = strip_html_tags(msg) # msg should always be unicode msg = as_unicode(msg).strip() # return lang_full_dict according to lang passed parameter return get_full_dict(lang).get(msg) or non_translated_msg def as_unicode(text, encoding='utf-8'): '''Convert to unicode if required''' if isinstance(text, text_type): return text elif text==None: return '' elif isinstance(text, binary_type): return text_type(text, encoding) else: return text_type(text) def get_lang_dict(fortype, name=None): """Returns the translated language dict for the given type and name. :param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot` :param name: name of the document for which assets are to be returned.""" from frappe.translate import get_dict return get_dict(fortype, name) def set_user_lang(user, user_language=None): """Guess and set user language for the session. `frappe.local.lang`""" from frappe.translate import get_user_lang local.lang = get_user_lang(user) # local-globals db = local("db") conf = local("conf") form = form_dict = local("form_dict") request = local("request") response = local("response") session = local("session") user = local("user") flags = local("flags") error_log = local("error_log") debug_log = local("debug_log") message_log = local("message_log") lang = local("lang") def init(site, sites_path=None, new_site=False): """Initialize frappe for the current site. Reset thread locals `frappe.local`""" if getattr(local, "initialised", None): return if not sites_path: sites_path = '.' local.error_log = [] local.message_log = [] local.debug_log = [] local.realtime_log = [] local.flags = _dict({ "ran_schedulers": [], "currently_saving": [], "redirect_location": "", "in_install_db": False, "in_install_app": False, "in_import": False, "in_test": False, "mute_messages": False, "ignore_links": False, "mute_emails": False, "has_dataurl": False, "new_site": new_site }) local.rollback_observers = [] local.test_objects = {} local.site = site local.sites_path = sites_path local.site_path = os.path.join(sites_path, site) local.request_ip = None local.response = _dict({"docs":[]}) local.task_id = None local.conf = _dict(get_site_config()) local.lang = local.conf.lang or "en" local.lang_full_dict = None local.module_app = None local.app_modules = None local.system_settings = _dict() local.user = None local.user_perms = None local.session = None local.role_permissions = {} local.valid_columns = {} local.new_doc_templates = {} local.link_count = {} local.jenv = None local.jloader =None local.cache = {} local.document_cache = {} local.meta_cache = {} local.form_dict = _dict() local.session = _dict() setup_module_map() local.initialised = True def connect(site=None, db_name=None): """Connect to site database instance. :param site: If site is given, calls `frappe.init`. :param db_name: Optional. Will use from `site_config.json`.""" from frappe.database import Database if site: init(site) local.db = Database(user=db_name or local.conf.db_name) set_user("Administrator") def connect_replica(): from frappe.database import Database user = local.conf.db_name password = local.conf.db_password if local.conf.different_credentials_for_replica: user = local.conf.replica_db_name password = local.conf.replica_db_password local.replica_db = Database(host=local.conf.replica_host, user=user, password=password) # swap db connections local.primary_db = local.db local.db = local.replica_db def get_site_config(sites_path=None, site_path=None): """Returns `site_config.json` combined with `sites/common_site_config.json`. `site_config` is a set of site wide settings like database name, password, email etc.""" config = {} sites_path = sites_path or getattr(local, "sites_path", None) site_path = site_path or getattr(local, "site_path", None) if sites_path: common_site_config = os.path.join(sites_path, "common_site_config.json") if os.path.exists(common_site_config): config.update(get_file_json(common_site_config)) if site_path: site_config = os.path.join(site_path, "site_config.json") if os.path.exists(site_config): config.update(get_file_json(site_config)) elif local.site and not local.flags.new_site: print("{0} does not exist".format(local.site)) sys.exit(1) #raise IncorrectSitePath, "{0} does not exist".format(site_config) return _dict(config) def get_conf(site=None): if hasattr(local, 'conf'): return local.conf else: # if no site, get from common_site_config.json with init_site(site): return local.conf class init_site: def __init__(self, site=None): '''If site==None, initialize it for empty site ('') to load common_site_config.json''' self.site = site or '' def __enter__(self): init(self.site) return local def __exit__(self, type, value, traceback): destroy() def destroy(): """Closes connection and releases werkzeug local.""" if db: db.close() release_local(local) # memcache redis_server = None def cache(): """Returns memcache connection.""" global redis_server if not redis_server: from frappe.utils.redis_wrapper import RedisWrapper redis_server = RedisWrapper.from_url(conf.get('redis_cache') or "redis://localhost:11311") return redis_server def get_traceback(): """Returns error traceback.""" from frappe.utils import get_traceback return get_traceback() def errprint(msg): """Log error. This is sent back as `exc` in response. :param msg: Message.""" msg = as_unicode(msg) if not request or (not "cmd" in local.form_dict) or conf.developer_mode: print(msg.encode('utf-8')) error_log.append({"exc": msg}) def log(msg): """Add to `debug_log`. :param msg: Message.""" if not request: if conf.get("logging") or False: print(repr(msg)) debug_log.append(as_unicode(msg)) def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False): """Print a message to the user (via HTTP response). Messages are sent in the `__server_messages` property in the response JSON and shown in a pop-up / modal. :param msg: Message. :param title: [optional] Message title. :param raise_exception: [optional] Raise given exception and show message. :param as_table: [optional] If `msg` is a list of lists, render as HTML table. """ from frappe.utils import encode msg = safe_decode(msg) out = _dict(message=msg) def _raise_exception(): if raise_exception: if flags.rollback_on_exception: db.rollback() import inspect if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception): raise raise_exception(msg) else: raise ValidationError(msg) if flags.mute_messages: _raise_exception() return if as_table and type(msg) in (list, tuple): out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>' if flags.print_messages and out.msg: print("Message: " + repr(out.msg).encode("utf-8")) if title: out.title = title if not indicator and raise_exception: indicator = 'red' if indicator: out.indicator = indicator if alert: out.alert = 1 message_log.append(json.dumps(out)) if raise_exception and hasattr(raise_exception, '__name__'): local.response['exc_type'] = raise_exception.__name__ _raise_exception() def clear_messages(): local.message_log = [] def clear_last_message(): if len(local.message_log) > 0: local.message_log = local.message_log[:-1] def throw(msg, exc=ValidationError, title=None): """Throw execption and show message (`msgprint`). :param msg: Message. :param exc: Exception class. Default `frappe.ValidationError`""" msgprint(msg, raise_exception=exc, title=title, indicator='red') def emit_js(js, user=False, **kwargs): from frappe.realtime import publish_realtime if user == False: user = session.user publish_realtime('eval_js', js, user=user, **kwargs) def create_folder(path, with_init=False): """Create a folder in the given path and add an `__init__.py` file (optional). :param path: Folder path. :param with_init: Create `__init__.py` in the new folder.""" from frappe.utils import touch_file if not os.path.exists(path): os.makedirs(path) if with_init: touch_file(os.path.join(path, "__init__.py")) def set_user(username): """Set current user. :param username: **User** name to set as current user.""" local.session.user = username local.session.sid = username local.cache = {} local.form_dict = _dict() local.jenv = None local.session.data = _dict() local.role_permissions = {} local.new_doc_templates = {} local.user_perms = None def get_user(): from frappe.utils.user import UserPermissions if not local.user_perms: local.user_perms = UserPermissions(local.session.user) return local.user_perms def get_roles(username=None): """Returns roles of current user.""" if not local.session: return ["Guest"] if username: import frappe.permissions return frappe.permissions.get_roles(username) else: return get_user().get_roles() def get_request_header(key, default=None): """Return HTTP request header. :param key: HTTP header key. :param default: Default value.""" return request.headers.get(key, default) def sendmail(recipients=[], sender="", subject="No Subject", message="No Message", as_markdown=False, delayed=True, reference_doctype=None, reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None, attachments=None, content=None, doctype=None, name=None, reply_to=None, cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None, send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False, inline_images=None, template=None, args=None, header=None, print_letterhead=False): """Send email using user's default **Email Account** or global default **Email Account**. :param recipients: List of recipients. :param sender: Email sender. Default is current user. :param subject: Email Subject. :param message: (or `content`) Email Content. :param as_markdown: Convert content markdown to HTML. :param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true :param send_priority: Priority for Email Queue, default 1. :param reference_doctype: (or `doctype`) Append as communication to this DocType. :param reference_name: (or `name`) Append as communication to this document name. :param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe` :param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict). :param attachments: List of attachments. :param reply_to: Reply-To Email Address. :param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email. :param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To. :param send_after: Send after the given datetime. :param expose_recipients: Display all recipients in the footer message - "This email was sent to" :param communication: Communication link to be set in Email Queue record :param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id :param template: Name of html template from templates/emails folder :param args: Arguments for rendering the template :param header: Append header in email """ text_content = None if template: message, text_content = get_email_from_template(template, args) message = content or message if as_markdown: message = frappe.utils.md_to_html(message) if not delayed: now = True from frappe.email import queue queue.send(recipients=recipients, sender=sender, subject=subject, message=message, text_content=text_content, reference_doctype = doctype or reference_doctype, reference_name = name or reference_name, unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message, attachments=attachments, reply_to=reply_to, cc=cc, bcc=bcc, message_id=message_id, in_reply_to=in_reply_to, send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority, communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification, inline_images=inline_images, header=header, print_letterhead=print_letterhead) whitelisted = [] guest_methods = [] xss_safe_methods = [] def whitelist(allow_guest=False, xss_safe=False): """ Decorator for whitelisting a function and making it accessible via HTTP. Standard request will be `/api/method/[path.to.method]` :param allow_guest: Allow non logged-in user to access this method. Use as: @frappe.whitelist() def myfunc(param1, param2): pass """ def innerfn(fn): global whitelisted, guest_methods, xss_safe_methods whitelisted.append(fn) if allow_guest: guest_methods.append(fn) if xss_safe: xss_safe_methods.append(fn) return fn return innerfn def read_only(): def innfn(fn): def wrapper_fn(*args, **kwargs): if conf.read_from_replica: connect_replica() try: retval = fn(*args, **get_newargs(fn, kwargs)) except: raise finally: if local and hasattr(local, 'primary_db'): local.db.close() local.db = local.primary_db return retval return wrapper_fn return innfn def only_for(roles): """Raise `frappe.PermissionError` if the user does not have any of the given **Roles**. :param roles: List of roles to check.""" if local.flags.in_test: return if not isinstance(roles, (tuple, list)): roles = (roles,) roles = set(roles) myroles = set(get_roles()) if not roles.intersection(myroles): raise PermissionError def get_domain_data(module): try: domain_data = get_hooks('domains') if module in domain_data: return _dict(get_attr(get_hooks('domains')[module][0] + '.data')) else: return _dict() except ImportError: if local.flags.in_test: return _dict() else: raise def clear_cache(user=None, doctype=None): """Clear **User**, **DocType** or global cache. :param user: If user is given, only user cache is cleared. :param doctype: If doctype is given, only DocType cache is cleared.""" import frappe.cache_manager if doctype: frappe.cache_manager.clear_doctype_cache(doctype) reset_metadata_version() elif user: frappe.cache_manager.clear_user_cache(user) else: # everything from frappe import translate frappe.cache_manager.clear_user_cache() translate.clear_cache() reset_metadata_version() local.cache = {} local.new_doc_templates = {} for fn in get_hooks("clear_cache"): get_attr(fn)() local.role_permissions = {} def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False): """Raises `frappe.PermissionError` if not permitted. :param doctype: DocType for which permission is to be check. :param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`. :param doc: [optional] Checks User permissions for given doc. :param user: [optional] Check for given user. Default: current user.""" if not doctype and doc: doctype = doc.doctype import frappe.permissions out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user) if throw and not out: if doc: frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name)) else: frappe.throw(_("No permission for {0}").format(doctype)) return out def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None): """Raises `frappe.PermissionError` if not permitted. :param doctype: DocType for which permission is to be check. :param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`. :param doc: Checks User permissions for given doc. :param user: [optional] Check for given user. Default: current user.""" if not user: user = session.user if doc: if isinstance(doc, string_types): doc = get_doc(doctype, doc) doctype = doc.doctype if doc.flags.ignore_permissions: return True # check permission in controller if hasattr(doc, 'has_website_permission'): return doc.has_website_permission(ptype, user, verbose=verbose) hooks = (get_hooks("has_website_permission") or {}).get(doctype, []) if hooks: for method in hooks: result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose) # if even a single permission check is Falsy if not result: return False # else it is Truthy return True else: return False def is_table(doctype): """Returns True if `istable` property (indicating child Table) is set for given DocType.""" def get_tables(): return db.sql_list("select name from tabDocType where istable=1") tables = cache().get_value("is_table", get_tables) return doctype in tables def get_precision(doctype, fieldname, currency=None, doc=None): """Get precision for a given field""" from frappe.model.meta import get_field_precision return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency) def generate_hash(txt=None, length=None): """Generates random hash for given text + current timestamp + random string.""" import hashlib, time from .utils import random_string digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest() if length: digest = digest[:length] return digest def reset_metadata_version(): """Reset `metadata_version` (Client (Javascript) build ID) hash.""" v = generate_hash() cache().set_value("metadata_version", v) return v def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False): """Returns a new document of the given DocType with defaults set. :param doctype: DocType of the new document. :param parent_doc: [optional] add to parent document. :param parentfield: [optional] add against this `parentfield`.""" from frappe.model.create_new import get_new_doc return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict) def set_value(doctype, docname, fieldname, value=None): """Set document value. Calls `frappe.client.set_value`""" import frappe.client return frappe.client.set_value(doctype, docname, fieldname, value) def get_cached_doc(*args, **kwargs): if args and len(args) > 1 and isinstance(args[1], text_type): key = get_document_cache_key(args[0], args[1]) # local cache doc = local.document_cache.get(key) if doc: return doc # redis cache doc = cache().hget('document_cache', key) if doc: doc = get_doc(doc) local.document_cache[key] = doc return doc # database doc = get_doc(*args, **kwargs) return doc def get_document_cache_key(doctype, name): return '{0}::{1}'.format(doctype, name) def clear_document_cache(doctype, name): cache().hdel("last_modified", doctype) key = get_document_cache_key(doctype, name) if key in local.document_cache: del local.document_cache[key] cache().hdel('document_cache', key) def get_cached_value(doctype, name, fieldname, as_dict=False): doc = get_cached_doc(doctype, name) if isinstance(fieldname, string_types): if as_dict: throw('Cannot make dict for single fieldname') return doc.get(fieldname) values = [doc.get(f) for f in fieldname] if as_dict: return _dict(zip(fieldname, values)) return values def get_doc(*args, **kwargs): """Return a `frappe.model.document.Document` object of the given type and name. :param arg1: DocType name as string **or** document JSON. :param arg2: [optional] Document name as string. Examples: # insert a new document todo = frappe.get_doc({"doctype":"ToDo", "description": "test"}) tood.insert() # open an existing document todo = frappe.get_doc("ToDo", "TD0001") """ import frappe.model.document doc = frappe.model.document.get_doc(*args, **kwargs) # set in cache if args and len(args) > 1: key = get_document_cache_key(args[0], args[1]) local.document_cache[key] = doc cache().hset('document_cache', key, doc.as_dict()) return doc def get_last_doc(doctype): """Get last created document of this type.""" d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1) if d: return get_doc(doctype, d[0].name) else: raise DoesNotExistError def get_single(doctype): """Return a `frappe.model.document.Document` object of the given Single doctype.""" return get_doc(doctype, doctype) def get_meta(doctype, cached=True): """Get `frappe.model.meta.Meta` instance of given doctype name.""" import frappe.model.meta return frappe.model.meta.get_meta(doctype, cached=cached) def get_meta_module(doctype): import frappe.modules return frappe.modules.load_doctype_module(doctype) def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False, ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True): """Delete a document. Calls `frappe.model.delete_doc.delete_doc`. :param doctype: DocType of document to be delete. :param name: Name of document to be delete. :param force: Allow even if document is linked. Warning: This may lead to data integrity errors. :param ignore_doctypes: Ignore if child table is one of these. :param for_reload: Call `before_reload` trigger before deleting. :param ignore_permissions: Ignore user permissions.""" import frappe.model.delete_doc frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload, ignore_permissions, flags, ignore_on_trash, ignore_missing) def delete_doc_if_exists(doctype, name, force=0): """Delete document if exists.""" if db.exists(doctype, name): delete_doc(doctype, name, force=force) def reload_doctype(doctype, force=False, reset_permissions=False): """Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files.""" reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype), force=force, reset_permissions=reset_permissions) def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False): """Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files. :param module: Module name. :param dt: DocType name. :param dn: Document name. :param force: Reload even if `modified` timestamp matches. """ import frappe.modules return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions) def rename_doc(*args, **kwargs): """Rename a document. Calls `frappe.model.rename_doc.rename_doc`""" from frappe.model.rename_doc import rename_doc return rename_doc(*args, **kwargs) def get_module(modulename): """Returns a module object for given Python module name using `importlib.import_module`.""" return importlib.import_module(modulename) def scrub(txt): """Returns sluggified string. e.g. `Sales Order` becomes `sales_order`.""" return txt.replace(' ','_').replace('-', '_').lower() def unscrub(txt): """Returns titlified string. e.g. `sales_order` becomes `Sales Order`.""" return txt.replace('_',' ').replace('-', ' ').title() def get_module_path(module, *joins): """Get the path of the given module name. :param module: Module name. :param *joins: Join additional path elements using `os.path.join`.""" module = scrub(module) return get_pymodule_path(local.module_app[module] + "." + module, *joins) def get_app_path(app_name, *joins): """Return path of given app. :param app: App name. :param *joins: Join additional path elements using `os.path.join`.""" return get_pymodule_path(app_name, *joins) def get_site_path(*joins): """Return path of current site. :param *joins: Join additional path elements using `os.path.join`.""" return os.path.join(local.site_path, *joins) def get_pymodule_path(modulename, *joins): """Return path of given Python module name. :param modulename: Python module name. :param *joins: Join additional path elements using `os.path.join`.""" if not "public" in joins: joins = [scrub(part) for part in joins] return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins) def get_module_list(app_name): """Get list of modules for given all via `app/modules.txt`.""" return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt")) def get_all_apps(with_internal_apps=True, sites_path=None): """Get list of all apps via `sites/apps.txt`.""" if not sites_path: sites_path = local.sites_path apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True) if with_internal_apps: for app in get_file_items(os.path.join(local.site_path, "apps.txt")): if app not in apps: apps.append(app) if "frappe" in apps: apps.remove("frappe") apps.insert(0, 'frappe') return apps def get_installed_apps(sort=False, frappe_last=False): """Get list of installed apps in current site.""" if getattr(flags, "in_install_db", True): return [] if not db: connect() installed = json.loads(db.get_global("installed_apps") or "[]") if sort: installed = [app for app in get_all_apps(True) if app in installed] if frappe_last: if 'frappe' in installed: installed.remove('frappe') installed.append('frappe') return installed def get_doc_hooks(): '''Returns hooked methods for given doc. It will expand the dict tuple if required.''' if not hasattr(local, 'doc_events_hooks'): hooks = get_hooks('doc_events', {}) out = {} for key, value in iteritems(hooks): if isinstance(key, tuple): for doctype in key: append_hook(out, doctype, value) else: append_hook(out, key, value) local.doc_events_hooks = out return local.doc_events_hooks def get_hooks(hook=None, default=None, app_name=None): """Get hooks via `app/hooks.py` :param hook: Name of the hook. Will gather all hooks for this name and return as a list. :param default: Default if no hook found. :param app_name: Filter by app.""" def load_app_hooks(app_name=None): hooks = {} for app in [app_name] if app_name else get_installed_apps(sort=True): app = "frappe" if app=="webnotes" else app try: app_hooks = get_module(app + ".hooks") except ImportError: if local.flags.in_install_app: # if app is not installed while restoring # ignore it pass print('Could not find app "{0}"'.format(app_name)) if not request: sys.exit(1) raise for key in dir(app_hooks): if not key.startswith("_"): append_hook(hooks, key, getattr(app_hooks, key)) return hooks no_cache = conf.developer_mode or False if app_name: hooks = _dict(load_app_hooks(app_name)) else: if no_cache: hooks = _dict(load_app_hooks()) else: hooks = _dict(cache().get_value("app_hooks", load_app_hooks)) if hook: return hooks.get(hook) or (default if default is not None else []) else: return hooks def append_hook(target, key, value): '''appends a hook to the the target dict. If the hook key, exists, it will make it a key. If the hook value is a dict, like doc_events, it will listify the values against the key. ''' if isinstance(value, dict): # dict? make a list of values against each key target.setdefault(key, {}) for inkey in value: append_hook(target[key], inkey, value[inkey]) else: # make a list target.setdefault(key, []) if not isinstance(value, list): value = [value] target[key].extend(value) def setup_module_map(): """Rebuild map of all modules (internal).""" _cache = cache() if conf.db_name: local.app_modules = _cache.get_value("app_modules") local.module_app = _cache.get_value("module_app") if not (local.app_modules and local.module_app): local.module_app, local.app_modules = {}, {} for app in get_all_apps(True): if app=="webnotes": app="frappe" local.app_modules.setdefault(app, []) for module in get_module_list(app): module = scrub(module) local.module_app[module] = app local.app_modules[app].append(module) if conf.db_name: _cache.set_value("app_modules", local.app_modules) _cache.set_value("module_app", local.module_app) def get_file_items(path, raise_not_found=False, ignore_empty_lines=True): """Returns items from text file as a list. Ignores empty lines.""" import frappe.utils content = read_file(path, raise_not_found=raise_not_found) if content: content = frappe.utils.strip(content) return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))] else: return [] def get_file_json(path): """Read a file and return parsed JSON object.""" with open(path, 'r') as f: return json.load(f) def read_file(path, raise_not_found=False): """Open a file and return its content as Unicode.""" if isinstance(path, text_type): path = path.encode("utf-8") if os.path.exists(path): with open(path, "r") as f: return as_unicode(f.read()) elif raise_not_found: raise IOError("{} Not Found".format(path)) else: return None def get_attr(method_string): """Get python method object from its name.""" app_name = method_string.split(".")[0] if not local.flags.in_install and app_name not in get_installed_apps(): throw(_("App {0} is not installed").format(app_name), AppNotInstalledError) modulename = '.'.join(method_string.split('.')[:-1]) methodname = method_string.split('.')[-1] return getattr(get_module(modulename), methodname) def call(fn, *args, **kwargs): """Call a function and match arguments.""" if isinstance(fn, string_types): fn = get_attr(fn) newargs = get_newargs(fn, kwargs) return fn(*args, **newargs) def get_newargs(fn, kwargs): if hasattr(fn, 'fnargs'): fnargs = fn.fnargs else: fnargs, varargs, varkw, defaults = inspect.getargspec(fn) newargs = {} for a in kwargs: if (a in fnargs) or varkw: newargs[a] = kwargs.get(a) if "flags" in newargs: del newargs["flags"] return newargs def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True): """Create a new **Property Setter** (for overriding DocType and DocField properties). If doctype is not specified, it will create a property setter for all fields with the given fieldname""" args = _dict(args) if not args.doctype_or_field: args.doctype_or_field = 'DocField' if not args.property_type: args.property_type = db.get_value('DocField', {'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data' if not args.doctype: doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname) else: doctype_list = [args.doctype] for doctype in doctype_list: if not args.property_type: args.property_type = db.get_value('DocField', {'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data' ps = get_doc({ 'doctype': "Property Setter", 'doctype_or_field': args.doctype_or_field, 'doc_type': doctype, 'field_name': args.fieldname, 'property': args.property, 'value': args.value, 'property_type': args.property_type or "Data", '__islocal': 1 }) ps.flags.ignore_validate = ignore_validate ps.flags.validate_fields_for_doctype = validate_fields_for_doctype ps.validate_fieldtype_change() ps.insert() def import_doc(path, ignore_links=False, ignore_insert=False, insert=False): """Import a file using Data Import.""" from frappe.core.doctype.data_import import data_import data_import.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert) def copy_doc(doc, ignore_no_copy=True): """ No_copy fields also get copied.""" import copy def remove_no_copy_fields(d): for df in d.meta.get("fields", {"no_copy": 1}): if hasattr(d, df.fieldname): d.set(df.fieldname, None) fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by'] if not local.flags.in_test: fields_to_clear.append("docstatus") if not isinstance(doc, dict): d = doc.as_dict() else: d = doc newdoc = get_doc(copy.deepcopy(d)) newdoc.set("__islocal", 1) for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']): newdoc.set(fieldname, None) if not ignore_no_copy: remove_no_copy_fields(newdoc) for i, d in enumerate(newdoc.get_all_children()): d.set("__islocal", 1) for fieldname in fields_to_clear: d.set(fieldname, None) if not ignore_no_copy: remove_no_copy_fields(d) return newdoc def compare(val1, condition, val2): """Compare two values using `frappe.utils.compare` `condition` could be: - "^" - "in" - "not in" - "=" - "!=" - ">" - "<" - ">=" - "<=" - "not None" - "None" """ import frappe.utils return frappe.utils.compare(val1, condition, val2) def respond_as_web_page(title, html, success=None, http_status_code=None, context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False, width=None, template='message'): """Send response as a web page with a message rather than JSON. Used to show permission errors etc. :param title: Page title and heading. :param message: Message to be shown. :param success: Alert message. :param http_status_code: HTTP status code :param context: web template context :param indicator_color: color of indicator in title :param primary_action: route on primary button (default is `/`) :param primary_label: label on primary button (default is "Home") :param fullpage: hide header / footer :param width: Width of message in pixels :param template: Optionally pass view template """ local.message_title = title local.message = html local.response['type'] = 'page' local.response['route'] = template local.no_cache = 1 if http_status_code: local.response['http_status_code'] = http_status_code if not context: context = {} if not indicator_color: if success: indicator_color = 'green' elif http_status_code and http_status_code > 300: indicator_color = 'red' else: indicator_color = 'blue' context['indicator_color'] = indicator_color context['primary_label'] = primary_label context['primary_action'] = primary_action context['error_code'] = http_status_code context['fullpage'] = fullpage if width: context['card_width'] = width local.response['context'] = context def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None): """Redirects to /message?id=random Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message :param title: Page title and heading. :param message: Message to be shown. :param http_status_code: HTTP status code. Example Usage: frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>") """ message_id = generate_hash(length=8) message = { 'context': context or {}, 'http_status_code': http_status_code or 200 } message['context'].update({ 'header': title, 'title': title, 'message': html }) if indicator_color: message['context'].update({ "indicator_color": indicator_color }) cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60) location = '/message?id={0}'.format(message_id) if not getattr(local, 'is_ajax', False): local.response["type"] = "redirect" local.response["location"] = location else: return location def build_match_conditions(doctype, as_condition=True): """Return match (User permissions) for given doctype as list or SQL.""" import frappe.desk.reportview return frappe.desk.reportview.build_match_conditions(doctype, as_condition=as_condition) def get_list(doctype, *args, **kwargs): """List database query via `frappe.model.db_query`. Will also check for permissions. :param doctype: DocType on which query is to be made. :param fields: List of fields or `*`. :param filters: List of filters (see example). :param order_by: Order By e.g. `modified desc`. :param limit_page_start: Start results at record #. Default 0. :param limit_page_length: No of records in the page. Default 20. Example usage: # simple dict filter frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"}) # filter as a list of lists frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]]) # filter as a list of dicts frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")}) """ import frappe.model.db_query return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs) def get_all(doctype, *args, **kwargs): """List database query via `frappe.model.db_query`. Will **not** check for permissions. Parameters are same as `frappe.get_list` :param doctype: DocType on which query is to be made. :param fields: List of fields or `*`. Default is: `["name"]`. :param filters: List of filters (see example). :param order_by: Order By e.g. `modified desc`. :param limit_page_start: Start results at record #. Default 0. :param limit_page_length: No of records in the page. Default 20. Example usage: # simple dict filter frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"}) # filter as a list of lists frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]]) # filter as a list of dicts frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")}) """ kwargs["ignore_permissions"] = True if not "limit_page_length" in kwargs: kwargs["limit_page_length"] = 0 return get_list(doctype, *args, **kwargs) def get_value(*args, **kwargs): """Returns a document property or list of properties. Alias for `frappe.db.get_value` :param doctype: DocType name. :param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType. :param fieldname: Column name. :param ignore: Don't raise exception if table, column is missing. :param as_dict: Return values as dict. :param debug: Print query in error log. """ return db.get_value(*args, **kwargs) def as_json(obj, indent=1): from frappe.utils.response import json_handler return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler) def are_emails_muted(): from frappe.utils import cint return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False def get_test_records(doctype): """Returns list of objects from `test_records.json` in the given doctype's folder.""" from frappe.modules import get_doctype_module, get_module_path path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json") if os.path.exists(path): with open(path, "r") as f: return json.loads(f.read()) else: return [] def format_value(*args, **kwargs): """Format value with given field properties. :param value: Value to be formatted. :param df: (Optional) DocField object with properties `fieldtype`, `options` etc.""" import frappe.utils.formatters return frappe.utils.formatters.format_value(*args, **kwargs) def format(*args, **kwargs): """Format value with given field properties. :param value: Value to be formatted. :param df: (Optional) DocField object with properties `fieldtype`, `options` etc.""" import frappe.utils.formatters return frappe.utils.formatters.format_value(*args, **kwargs) def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None, no_letterhead = 0): """Get Print Format for given document. :param doctype: DocType of document. :param name: Name of document. :param print_format: Print Format name. Default 'Standard', :param style: Print Format style. :param as_pdf: Return as PDF. Default False.""" from frappe.website.render import build_page from frappe.utils.pdf import get_pdf local.form_dict.doctype = doctype local.form_dict.name = name local.form_dict.format = print_format local.form_dict.style = style local.form_dict.doc = doc local.form_dict.no_letterhead = no_letterhead if not html: html = build_page("printview") if as_pdf: return get_pdf(html, output = output) else: return html def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, print_letterhead=True): from frappe.utils import scrub_urls if not file_name: file_name = name file_name = file_name.replace(' ','').replace('/','-') print_settings = db.get_singles_dict("Print Settings") _lang = local.lang #set lang as specified in print format attachment if lang: local.lang = lang local.flags.ignore_print_permissions = True no_letterhead = not print_letterhead if int(print_settings.send_print_as_pdf or 0): out = { "fname": file_name + ".pdf", "fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead) } else: out = { "fname": file_name + ".html", "fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead)).encode("utf-8") } local.flags.ignore_print_permissions = False #reset lang to original local lang local.lang = _lang return out def publish_progress(*args, **kwargs): """Show the user progress for a long request :param percent: Percent progress :param title: Title :param doctype: Optional, for document type :param docname: Optional, for document name :param description: Optional description """ import frappe.realtime return frappe.realtime.publish_progress(*args, **kwargs) def publish_realtime(*args, **kwargs): """Publish real-time updates :param event: Event name, like `task_progress` etc. :param message: JSON message object. For async must contain `task_id` :param room: Room in which to publish update (default entire site) :param user: Transmit to user :param doctype: Transmit to doctype, docname :param docname: Transmit to doctype, docname :param after_commit: (default False) will emit after current transaction is committed """ import frappe.realtime return frappe.realtime.publish_realtime(*args, **kwargs) def local_cache(namespace, key, generator, regenerate_if_none=False): """A key value store for caching within a request :param namespace: frappe.local.cache[namespace] :param key: frappe.local.cache[namespace][key] used to retrieve value :param generator: method to generate a value if not found in store """ if namespace not in local.cache: local.cache[namespace] = {} if key not in local.cache[namespace]: local.cache[namespace][key] = generator() elif local.cache[namespace][key]==None and regenerate_if_none: # if key exists but the previous result was None local.cache[namespace][key] = generator() return local.cache[namespace][key] def enqueue(*args, **kwargs): ''' Enqueue method to be executed using a background worker :param method: method string or method object :param queue: (optional) should be either long, default or short :param timeout: (optional) should be set according to the functions :param event: this is passed to enable clearing of jobs from queues :param is_async: (optional) if is_async=False, the method is executed immediately, else via a worker :param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls :param kwargs: keyword arguments to be passed to the method ''' import frappe.utils.background_jobs return frappe.utils.background_jobs.enqueue(*args, **kwargs) def enqueue_doc(*args, **kwargs): ''' Enqueue method to be executed using a background worker :param doctype: DocType of the document on which you want to run the event :param name: Name of the document on which you want to run the event :param method: method string or method object :param queue: (optional) should be either long, default or short :param timeout: (optional) should be set according to the functions :param kwargs: keyword arguments to be passed to the method ''' import frappe.utils.background_jobs return frappe.utils.background_jobs.enqueue_doc(*args, **kwargs) def get_doctype_app(doctype): def _get_doctype_app(): doctype_module = local.db.get_value("DocType", doctype, "module") return local.module_app[scrub(doctype_module)] return local_cache("doctype_app", doctype, generator=_get_doctype_app) loggers = {} log_level = None def logger(module=None, with_more_info=True): '''Returns a python logger that uses StreamHandler''' from frappe.utils.logger import get_logger return get_logger(module or 'default', with_more_info=with_more_info) def log_error(message=None, title=None): '''Log error to Error Log''' return get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()), method=title)).insert(ignore_permissions=True) def get_desk_link(doctype, name): return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype)) def bold(text): return '<b>{0}</b>'.format(text) def safe_eval(code, eval_globals=None, eval_locals=None): '''A safer `eval`''' whitelisted_globals = { "int": int, "float": float, "long": int, "round": round } if '__' in code: throw('Illegal rule {0}. Cannot use "__"'.format(bold(code))) if not eval_globals: eval_globals = {} eval_globals['__builtins__'] = {} eval_globals.update(whitelisted_globals) return eval(code, eval_globals, eval_locals) def get_system_settings(key): if key not in local.system_settings: local.system_settings.update({key: db.get_single_value('System Settings', key)}) return local.system_settings.get(key) def get_active_domains(): from frappe.core.doctype.domain_settings.domain_settings import get_active_domains return get_active_domains() def get_version(doctype, name, limit = None, head = False, raise_err = True): ''' Returns a list of version information of a given DocType (Applicable only if DocType has changes tracked). Example >>> frappe.get_version('User', 'foobar@gmail.com') >>> [ { "version": [version.data], # Refer Version DocType get_diff method and data attribute "user": "admin@gmail.com" # User that created this version "creation": <datetime.datetime> # Creation timestamp of that object. } ] ''' meta = get_meta(doctype) if meta.track_changes: names = db.sql(""" SELECT name from tabVersion WHERE ref_doctype = '{doctype}' AND docname = '{name}' {order_by} {limit} """.format( doctype = doctype, name = name, order_by = 'ORDER BY creation' if head else '', limit = 'LIMIT {limit}'.format(limit = limit) if limit else '' )) from frappe.chat.util import squashify, dictify, safe_json_loads versions = [ ] for name in names: name = squashify(name) doc = get_doc('Version', name) data = doc.data data = safe_json_loads(data) data = dictify(dict( version = data, user = doc.owner, creation = doc.creation )) versions.append(data) return versions else: if raise_err: raise ValueError('{doctype} has no versions tracked.'.format( doctype = doctype )) @whitelist(allow_guest = True) def ping(): return "pong" def safe_encode(param, encoding = 'utf-8'): try: param = param.encode(encoding) except Exception: pass return param def safe_decode(param, encoding = 'utf-8'): try: param = param.decode(encoding) except Exception: pass return param def parse_json(val): from frappe.utils import parse_json return parse_json(val) def mock(type, size = 1, locale = 'en'): results = [ ] faker = Faker(locale) if not type in dir(faker): raise ValueError('Not a valid mock type.') else: for i in range(size): data = getattr(faker, type)() results.append(data) from frappe.chat.util import squashify results = squashify(results) return results
30.613538
180
0.729552
from __future__ import unicode_literals, print_function from six import iteritems, binary_type, text_type, string_types from werkzeug.local import Local, release_local import os, sys, importlib, inspect, json from past.builtins import cmp from faker import Faker from .exceptions import * from .utils.jinja import (get_jenv, get_template, render_template, get_email_from_template, get_jloader) if sys.version[0] == '2': reload(sys) sys.setdefaultencoding("utf-8") __version__ = '11.1.65' __title__ = "Frappe Framework" local = Local() class _dict(dict): def __getattr__(self, key): ret = self.get(key) if not ret and key.startswith("__"): raise AttributeError() return ret def __setattr__(self, key, value): self[key] = value def __getstate__(self): return self def __setstate__(self, d): self.update(d) def update(self, d): super(_dict, self).update(d) return self def copy(self): return _dict(dict(self).copy()) def _(msg, lang=None): from frappe.translate import get_full_dict from frappe.utils import strip_html_tags, is_html if not hasattr(local, 'lang'): local.lang = lang or 'en' if not lang: lang = local.lang non_translated_msg = msg if is_html(msg): msg = strip_html_tags(msg) msg = as_unicode(msg).strip() return get_full_dict(lang).get(msg) or non_translated_msg def as_unicode(text, encoding='utf-8'): if isinstance(text, text_type): return text elif text==None: return '' elif isinstance(text, binary_type): return text_type(text, encoding) else: return text_type(text) def get_lang_dict(fortype, name=None): from frappe.translate import get_dict return get_dict(fortype, name) def set_user_lang(user, user_language=None): from frappe.translate import get_user_lang local.lang = get_user_lang(user) db = local("db") conf = local("conf") form = form_dict = local("form_dict") request = local("request") response = local("response") session = local("session") user = local("user") flags = local("flags") error_log = local("error_log") debug_log = local("debug_log") message_log = local("message_log") lang = local("lang") def init(site, sites_path=None, new_site=False): if getattr(local, "initialised", None): return if not sites_path: sites_path = '.' local.error_log = [] local.message_log = [] local.debug_log = [] local.realtime_log = [] local.flags = _dict({ "ran_schedulers": [], "currently_saving": [], "redirect_location": "", "in_install_db": False, "in_install_app": False, "in_import": False, "in_test": False, "mute_messages": False, "ignore_links": False, "mute_emails": False, "has_dataurl": False, "new_site": new_site }) local.rollback_observers = [] local.test_objects = {} local.site = site local.sites_path = sites_path local.site_path = os.path.join(sites_path, site) local.request_ip = None local.response = _dict({"docs":[]}) local.task_id = None local.conf = _dict(get_site_config()) local.lang = local.conf.lang or "en" local.lang_full_dict = None local.module_app = None local.app_modules = None local.system_settings = _dict() local.user = None local.user_perms = None local.session = None local.role_permissions = {} local.valid_columns = {} local.new_doc_templates = {} local.link_count = {} local.jenv = None local.jloader =None local.cache = {} local.document_cache = {} local.meta_cache = {} local.form_dict = _dict() local.session = _dict() setup_module_map() local.initialised = True def connect(site=None, db_name=None): from frappe.database import Database if site: init(site) local.db = Database(user=db_name or local.conf.db_name) set_user("Administrator") def connect_replica(): from frappe.database import Database user = local.conf.db_name password = local.conf.db_password if local.conf.different_credentials_for_replica: user = local.conf.replica_db_name password = local.conf.replica_db_password local.replica_db = Database(host=local.conf.replica_host, user=user, password=password) local.primary_db = local.db local.db = local.replica_db def get_site_config(sites_path=None, site_path=None): config = {} sites_path = sites_path or getattr(local, "sites_path", None) site_path = site_path or getattr(local, "site_path", None) if sites_path: common_site_config = os.path.join(sites_path, "common_site_config.json") if os.path.exists(common_site_config): config.update(get_file_json(common_site_config)) if site_path: site_config = os.path.join(site_path, "site_config.json") if os.path.exists(site_config): config.update(get_file_json(site_config)) elif local.site and not local.flags.new_site: print("{0} does not exist".format(local.site)) sys.exit(1) return _dict(config) def get_conf(site=None): if hasattr(local, 'conf'): return local.conf else: with init_site(site): return local.conf class init_site: def __init__(self, site=None): self.site = site or '' def __enter__(self): init(self.site) return local def __exit__(self, type, value, traceback): destroy() def destroy(): if db: db.close() release_local(local) redis_server = None def cache(): global redis_server if not redis_server: from frappe.utils.redis_wrapper import RedisWrapper redis_server = RedisWrapper.from_url(conf.get('redis_cache') or "redis://localhost:11311") return redis_server def get_traceback(): from frappe.utils import get_traceback return get_traceback() def errprint(msg): msg = as_unicode(msg) if not request or (not "cmd" in local.form_dict) or conf.developer_mode: print(msg.encode('utf-8')) error_log.append({"exc": msg}) def log(msg): if not request: if conf.get("logging") or False: print(repr(msg)) debug_log.append(as_unicode(msg)) def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False): from frappe.utils import encode msg = safe_decode(msg) out = _dict(message=msg) def _raise_exception(): if raise_exception: if flags.rollback_on_exception: db.rollback() import inspect if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception): raise raise_exception(msg) else: raise ValidationError(msg) if flags.mute_messages: _raise_exception() return if as_table and type(msg) in (list, tuple): out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>' if flags.print_messages and out.msg: print("Message: " + repr(out.msg).encode("utf-8")) if title: out.title = title if not indicator and raise_exception: indicator = 'red' if indicator: out.indicator = indicator if alert: out.alert = 1 message_log.append(json.dumps(out)) if raise_exception and hasattr(raise_exception, '__name__'): local.response['exc_type'] = raise_exception.__name__ _raise_exception() def clear_messages(): local.message_log = [] def clear_last_message(): if len(local.message_log) > 0: local.message_log = local.message_log[:-1] def throw(msg, exc=ValidationError, title=None): msgprint(msg, raise_exception=exc, title=title, indicator='red') def emit_js(js, user=False, **kwargs): from frappe.realtime import publish_realtime if user == False: user = session.user publish_realtime('eval_js', js, user=user, **kwargs) def create_folder(path, with_init=False): from frappe.utils import touch_file if not os.path.exists(path): os.makedirs(path) if with_init: touch_file(os.path.join(path, "__init__.py")) def set_user(username): local.session.user = username local.session.sid = username local.cache = {} local.form_dict = _dict() local.jenv = None local.session.data = _dict() local.role_permissions = {} local.new_doc_templates = {} local.user_perms = None def get_user(): from frappe.utils.user import UserPermissions if not local.user_perms: local.user_perms = UserPermissions(local.session.user) return local.user_perms def get_roles(username=None): if not local.session: return ["Guest"] if username: import frappe.permissions return frappe.permissions.get_roles(username) else: return get_user().get_roles() def get_request_header(key, default=None): return request.headers.get(key, default) def sendmail(recipients=[], sender="", subject="No Subject", message="No Message", as_markdown=False, delayed=True, reference_doctype=None, reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None, attachments=None, content=None, doctype=None, name=None, reply_to=None, cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None, send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False, inline_images=None, template=None, args=None, header=None, print_letterhead=False): text_content = None if template: message, text_content = get_email_from_template(template, args) message = content or message if as_markdown: message = frappe.utils.md_to_html(message) if not delayed: now = True from frappe.email import queue queue.send(recipients=recipients, sender=sender, subject=subject, message=message, text_content=text_content, reference_doctype = doctype or reference_doctype, reference_name = name or reference_name, unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message, attachments=attachments, reply_to=reply_to, cc=cc, bcc=bcc, message_id=message_id, in_reply_to=in_reply_to, send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority, communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification, inline_images=inline_images, header=header, print_letterhead=print_letterhead) whitelisted = [] guest_methods = [] xss_safe_methods = [] def whitelist(allow_guest=False, xss_safe=False): def innerfn(fn): global whitelisted, guest_methods, xss_safe_methods whitelisted.append(fn) if allow_guest: guest_methods.append(fn) if xss_safe: xss_safe_methods.append(fn) return fn return innerfn def read_only(): def innfn(fn): def wrapper_fn(*args, **kwargs): if conf.read_from_replica: connect_replica() try: retval = fn(*args, **get_newargs(fn, kwargs)) except: raise finally: if local and hasattr(local, 'primary_db'): local.db.close() local.db = local.primary_db return retval return wrapper_fn return innfn def only_for(roles): if local.flags.in_test: return if not isinstance(roles, (tuple, list)): roles = (roles,) roles = set(roles) myroles = set(get_roles()) if not roles.intersection(myroles): raise PermissionError def get_domain_data(module): try: domain_data = get_hooks('domains') if module in domain_data: return _dict(get_attr(get_hooks('domains')[module][0] + '.data')) else: return _dict() except ImportError: if local.flags.in_test: return _dict() else: raise def clear_cache(user=None, doctype=None): import frappe.cache_manager if doctype: frappe.cache_manager.clear_doctype_cache(doctype) reset_metadata_version() elif user: frappe.cache_manager.clear_user_cache(user) else: from frappe import translate frappe.cache_manager.clear_user_cache() translate.clear_cache() reset_metadata_version() local.cache = {} local.new_doc_templates = {} for fn in get_hooks("clear_cache"): get_attr(fn)() local.role_permissions = {} def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False): if not doctype and doc: doctype = doc.doctype import frappe.permissions out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user) if throw and not out: if doc: frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name)) else: frappe.throw(_("No permission for {0}").format(doctype)) return out def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None): if not user: user = session.user if doc: if isinstance(doc, string_types): doc = get_doc(doctype, doc) doctype = doc.doctype if doc.flags.ignore_permissions: return True if hasattr(doc, 'has_website_permission'): return doc.has_website_permission(ptype, user, verbose=verbose) hooks = (get_hooks("has_website_permission") or {}).get(doctype, []) if hooks: for method in hooks: result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose) if not result: return False return True else: return False def is_table(doctype): def get_tables(): return db.sql_list("select name from tabDocType where istable=1") tables = cache().get_value("is_table", get_tables) return doctype in tables def get_precision(doctype, fieldname, currency=None, doc=None): from frappe.model.meta import get_field_precision return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency) def generate_hash(txt=None, length=None): import hashlib, time from .utils import random_string digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest() if length: digest = digest[:length] return digest def reset_metadata_version(): v = generate_hash() cache().set_value("metadata_version", v) return v def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False): from frappe.model.create_new import get_new_doc return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict) def set_value(doctype, docname, fieldname, value=None): import frappe.client return frappe.client.set_value(doctype, docname, fieldname, value) def get_cached_doc(*args, **kwargs): if args and len(args) > 1 and isinstance(args[1], text_type): key = get_document_cache_key(args[0], args[1]) doc = local.document_cache.get(key) if doc: return doc doc = cache().hget('document_cache', key) if doc: doc = get_doc(doc) local.document_cache[key] = doc return doc doc = get_doc(*args, **kwargs) return doc def get_document_cache_key(doctype, name): return '{0}::{1}'.format(doctype, name) def clear_document_cache(doctype, name): cache().hdel("last_modified", doctype) key = get_document_cache_key(doctype, name) if key in local.document_cache: del local.document_cache[key] cache().hdel('document_cache', key) def get_cached_value(doctype, name, fieldname, as_dict=False): doc = get_cached_doc(doctype, name) if isinstance(fieldname, string_types): if as_dict: throw('Cannot make dict for single fieldname') return doc.get(fieldname) values = [doc.get(f) for f in fieldname] if as_dict: return _dict(zip(fieldname, values)) return values def get_doc(*args, **kwargs): import frappe.model.document doc = frappe.model.document.get_doc(*args, **kwargs) if args and len(args) > 1: key = get_document_cache_key(args[0], args[1]) local.document_cache[key] = doc cache().hset('document_cache', key, doc.as_dict()) return doc def get_last_doc(doctype): d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1) if d: return get_doc(doctype, d[0].name) else: raise DoesNotExistError def get_single(doctype): return get_doc(doctype, doctype) def get_meta(doctype, cached=True): import frappe.model.meta return frappe.model.meta.get_meta(doctype, cached=cached) def get_meta_module(doctype): import frappe.modules return frappe.modules.load_doctype_module(doctype) def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False, ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True): import frappe.model.delete_doc frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload, ignore_permissions, flags, ignore_on_trash, ignore_missing) def delete_doc_if_exists(doctype, name, force=0): if db.exists(doctype, name): delete_doc(doctype, name, force=force) def reload_doctype(doctype, force=False, reset_permissions=False): reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype), force=force, reset_permissions=reset_permissions) def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False): import frappe.modules return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions) def rename_doc(*args, **kwargs): from frappe.model.rename_doc import rename_doc return rename_doc(*args, **kwargs) def get_module(modulename): return importlib.import_module(modulename) def scrub(txt): return txt.replace(' ','_').replace('-', '_').lower() def unscrub(txt): return txt.replace('_',' ').replace('-', ' ').title() def get_module_path(module, *joins): module = scrub(module) return get_pymodule_path(local.module_app[module] + "." + module, *joins) def get_app_path(app_name, *joins): return get_pymodule_path(app_name, *joins) def get_site_path(*joins): return os.path.join(local.site_path, *joins) def get_pymodule_path(modulename, *joins): if not "public" in joins: joins = [scrub(part) for part in joins] return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins) def get_module_list(app_name): return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt")) def get_all_apps(with_internal_apps=True, sites_path=None): if not sites_path: sites_path = local.sites_path apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True) if with_internal_apps: for app in get_file_items(os.path.join(local.site_path, "apps.txt")): if app not in apps: apps.append(app) if "frappe" in apps: apps.remove("frappe") apps.insert(0, 'frappe') return apps def get_installed_apps(sort=False, frappe_last=False): if getattr(flags, "in_install_db", True): return [] if not db: connect() installed = json.loads(db.get_global("installed_apps") or "[]") if sort: installed = [app for app in get_all_apps(True) if app in installed] if frappe_last: if 'frappe' in installed: installed.remove('frappe') installed.append('frappe') return installed def get_doc_hooks(): if not hasattr(local, 'doc_events_hooks'): hooks = get_hooks('doc_events', {}) out = {} for key, value in iteritems(hooks): if isinstance(key, tuple): for doctype in key: append_hook(out, doctype, value) else: append_hook(out, key, value) local.doc_events_hooks = out return local.doc_events_hooks def get_hooks(hook=None, default=None, app_name=None): def load_app_hooks(app_name=None): hooks = {} for app in [app_name] if app_name else get_installed_apps(sort=True): app = "frappe" if app=="webnotes" else app try: app_hooks = get_module(app + ".hooks") except ImportError: if local.flags.in_install_app: pass print('Could not find app "{0}"'.format(app_name)) if not request: sys.exit(1) raise for key in dir(app_hooks): if not key.startswith("_"): append_hook(hooks, key, getattr(app_hooks, key)) return hooks no_cache = conf.developer_mode or False if app_name: hooks = _dict(load_app_hooks(app_name)) else: if no_cache: hooks = _dict(load_app_hooks()) else: hooks = _dict(cache().get_value("app_hooks", load_app_hooks)) if hook: return hooks.get(hook) or (default if default is not None else []) else: return hooks def append_hook(target, key, value): if isinstance(value, dict): target.setdefault(key, {}) for inkey in value: append_hook(target[key], inkey, value[inkey]) else: target.setdefault(key, []) if not isinstance(value, list): value = [value] target[key].extend(value) def setup_module_map(): _cache = cache() if conf.db_name: local.app_modules = _cache.get_value("app_modules") local.module_app = _cache.get_value("module_app") if not (local.app_modules and local.module_app): local.module_app, local.app_modules = {}, {} for app in get_all_apps(True): if app=="webnotes": app="frappe" local.app_modules.setdefault(app, []) for module in get_module_list(app): module = scrub(module) local.module_app[module] = app local.app_modules[app].append(module) if conf.db_name: _cache.set_value("app_modules", local.app_modules) _cache.set_value("module_app", local.module_app) def get_file_items(path, raise_not_found=False, ignore_empty_lines=True): import frappe.utils content = read_file(path, raise_not_found=raise_not_found) if content: content = frappe.utils.strip(content) return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))] else: return [] def get_file_json(path): with open(path, 'r') as f: return json.load(f) def read_file(path, raise_not_found=False): if isinstance(path, text_type): path = path.encode("utf-8") if os.path.exists(path): with open(path, "r") as f: return as_unicode(f.read()) elif raise_not_found: raise IOError("{} Not Found".format(path)) else: return None def get_attr(method_string): app_name = method_string.split(".")[0] if not local.flags.in_install and app_name not in get_installed_apps(): throw(_("App {0} is not installed").format(app_name), AppNotInstalledError) modulename = '.'.join(method_string.split('.')[:-1]) methodname = method_string.split('.')[-1] return getattr(get_module(modulename), methodname) def call(fn, *args, **kwargs): if isinstance(fn, string_types): fn = get_attr(fn) newargs = get_newargs(fn, kwargs) return fn(*args, **newargs) def get_newargs(fn, kwargs): if hasattr(fn, 'fnargs'): fnargs = fn.fnargs else: fnargs, varargs, varkw, defaults = inspect.getargspec(fn) newargs = {} for a in kwargs: if (a in fnargs) or varkw: newargs[a] = kwargs.get(a) if "flags" in newargs: del newargs["flags"] return newargs def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True): args = _dict(args) if not args.doctype_or_field: args.doctype_or_field = 'DocField' if not args.property_type: args.property_type = db.get_value('DocField', {'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data' if not args.doctype: doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname) else: doctype_list = [args.doctype] for doctype in doctype_list: if not args.property_type: args.property_type = db.get_value('DocField', {'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data' ps = get_doc({ 'doctype': "Property Setter", 'doctype_or_field': args.doctype_or_field, 'doc_type': doctype, 'field_name': args.fieldname, 'property': args.property, 'value': args.value, 'property_type': args.property_type or "Data", '__islocal': 1 }) ps.flags.ignore_validate = ignore_validate ps.flags.validate_fields_for_doctype = validate_fields_for_doctype ps.validate_fieldtype_change() ps.insert() def import_doc(path, ignore_links=False, ignore_insert=False, insert=False): from frappe.core.doctype.data_import import data_import data_import.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert) def copy_doc(doc, ignore_no_copy=True): import copy def remove_no_copy_fields(d): for df in d.meta.get("fields", {"no_copy": 1}): if hasattr(d, df.fieldname): d.set(df.fieldname, None) fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by'] if not local.flags.in_test: fields_to_clear.append("docstatus") if not isinstance(doc, dict): d = doc.as_dict() else: d = doc newdoc = get_doc(copy.deepcopy(d)) newdoc.set("__islocal", 1) for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']): newdoc.set(fieldname, None) if not ignore_no_copy: remove_no_copy_fields(newdoc) for i, d in enumerate(newdoc.get_all_children()): d.set("__islocal", 1) for fieldname in fields_to_clear: d.set(fieldname, None) if not ignore_no_copy: remove_no_copy_fields(d) return newdoc def compare(val1, condition, val2): import frappe.utils return frappe.utils.compare(val1, condition, val2) def respond_as_web_page(title, html, success=None, http_status_code=None, context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False, width=None, template='message'): local.message_title = title local.message = html local.response['type'] = 'page' local.response['route'] = template local.no_cache = 1 if http_status_code: local.response['http_status_code'] = http_status_code if not context: context = {} if not indicator_color: if success: indicator_color = 'green' elif http_status_code and http_status_code > 300: indicator_color = 'red' else: indicator_color = 'blue' context['indicator_color'] = indicator_color context['primary_label'] = primary_label context['primary_action'] = primary_action context['error_code'] = http_status_code context['fullpage'] = fullpage if width: context['card_width'] = width local.response['context'] = context def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None): message_id = generate_hash(length=8) message = { 'context': context or {}, 'http_status_code': http_status_code or 200 } message['context'].update({ 'header': title, 'title': title, 'message': html }) if indicator_color: message['context'].update({ "indicator_color": indicator_color }) cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60) location = '/message?id={0}'.format(message_id) if not getattr(local, 'is_ajax', False): local.response["type"] = "redirect" local.response["location"] = location else: return location def build_match_conditions(doctype, as_condition=True): import frappe.desk.reportview return frappe.desk.reportview.build_match_conditions(doctype, as_condition=as_condition) def get_list(doctype, *args, **kwargs): import frappe.model.db_query return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs) def get_all(doctype, *args, **kwargs): kwargs["ignore_permissions"] = True if not "limit_page_length" in kwargs: kwargs["limit_page_length"] = 0 return get_list(doctype, *args, **kwargs) def get_value(*args, **kwargs): return db.get_value(*args, **kwargs) def as_json(obj, indent=1): from frappe.utils.response import json_handler return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler) def are_emails_muted(): from frappe.utils import cint return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False def get_test_records(doctype): from frappe.modules import get_doctype_module, get_module_path path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json") if os.path.exists(path): with open(path, "r") as f: return json.loads(f.read()) else: return [] def format_value(*args, **kwargs): import frappe.utils.formatters return frappe.utils.formatters.format_value(*args, **kwargs) def format(*args, **kwargs): import frappe.utils.formatters return frappe.utils.formatters.format_value(*args, **kwargs) def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None, no_letterhead = 0): from frappe.website.render import build_page from frappe.utils.pdf import get_pdf local.form_dict.doctype = doctype local.form_dict.name = name local.form_dict.format = print_format local.form_dict.style = style local.form_dict.doc = doc local.form_dict.no_letterhead = no_letterhead if not html: html = build_page("printview") if as_pdf: return get_pdf(html, output = output) else: return html def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, print_letterhead=True): from frappe.utils import scrub_urls if not file_name: file_name = name file_name = file_name.replace(' ','').replace('/','-') print_settings = db.get_singles_dict("Print Settings") _lang = local.lang if lang: local.lang = lang local.flags.ignore_print_permissions = True no_letterhead = not print_letterhead if int(print_settings.send_print_as_pdf or 0): out = { "fname": file_name + ".pdf", "fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead) } else: out = { "fname": file_name + ".html", "fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead)).encode("utf-8") } local.flags.ignore_print_permissions = False local.lang = _lang return out def publish_progress(*args, **kwargs): import frappe.realtime return frappe.realtime.publish_progress(*args, **kwargs) def publish_realtime(*args, **kwargs): import frappe.realtime return frappe.realtime.publish_realtime(*args, **kwargs) def local_cache(namespace, key, generator, regenerate_if_none=False): if namespace not in local.cache: local.cache[namespace] = {} if key not in local.cache[namespace]: local.cache[namespace][key] = generator() elif local.cache[namespace][key]==None and regenerate_if_none: local.cache[namespace][key] = generator() return local.cache[namespace][key] def enqueue(*args, **kwargs): import frappe.utils.background_jobs return frappe.utils.background_jobs.enqueue(*args, **kwargs) def enqueue_doc(*args, **kwargs): import frappe.utils.background_jobs return frappe.utils.background_jobs.enqueue_doc(*args, **kwargs) def get_doctype_app(doctype): def _get_doctype_app(): doctype_module = local.db.get_value("DocType", doctype, "module") return local.module_app[scrub(doctype_module)] return local_cache("doctype_app", doctype, generator=_get_doctype_app) loggers = {} log_level = None def logger(module=None, with_more_info=True): from frappe.utils.logger import get_logger return get_logger(module or 'default', with_more_info=with_more_info) def log_error(message=None, title=None): return get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()), method=title)).insert(ignore_permissions=True) def get_desk_link(doctype, name): return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype)) def bold(text): return '<b>{0}</b>'.format(text) def safe_eval(code, eval_globals=None, eval_locals=None): whitelisted_globals = { "int": int, "float": float, "long": int, "round": round } if '__' in code: throw('Illegal rule {0}. Cannot use "__"'.format(bold(code))) if not eval_globals: eval_globals = {} eval_globals['__builtins__'] = {} eval_globals.update(whitelisted_globals) return eval(code, eval_globals, eval_locals) def get_system_settings(key): if key not in local.system_settings: local.system_settings.update({key: db.get_single_value('System Settings', key)}) return local.system_settings.get(key) def get_active_domains(): from frappe.core.doctype.domain_settings.domain_settings import get_active_domains return get_active_domains() def get_version(doctype, name, limit = None, head = False, raise_err = True): meta = get_meta(doctype) if meta.track_changes: names = db.sql(""" SELECT name from tabVersion WHERE ref_doctype = '{doctype}' AND docname = '{name}' {order_by} {limit} """.format( doctype = doctype, name = name, order_by = 'ORDER BY creation' if head else '', limit = 'LIMIT {limit}'.format(limit = limit) if limit else '' )) from frappe.chat.util import squashify, dictify, safe_json_loads versions = [ ] for name in names: name = squashify(name) doc = get_doc('Version', name) data = doc.data data = safe_json_loads(data) data = dictify(dict( version = data, user = doc.owner, creation = doc.creation )) versions.append(data) return versions else: if raise_err: raise ValueError('{doctype} has no versions tracked.'.format( doctype = doctype )) @whitelist(allow_guest = True) def ping(): return "pong" def safe_encode(param, encoding = 'utf-8'): try: param = param.encode(encoding) except Exception: pass return param def safe_decode(param, encoding = 'utf-8'): try: param = param.decode(encoding) except Exception: pass return param def parse_json(val): from frappe.utils import parse_json return parse_json(val) def mock(type, size = 1, locale = 'en'): results = [ ] faker = Faker(locale) if not type in dir(faker): raise ValueError('Not a valid mock type.') else: for i in range(size): data = getattr(faker, type)() results.append(data) from frappe.chat.util import squashify results = squashify(results) return results
true
true
f70dcfad1d0ecdb79e189a8b31f0250d95bb0200
18,777
py
Python
docs/mxdoc.py
paulk-asert/incubator-mxnet
6acf7e6a051e75d9f1cca0ec3c198c38c0f6a3fe
[ "Apache-2.0" ]
null
null
null
docs/mxdoc.py
paulk-asert/incubator-mxnet
6acf7e6a051e75d9f1cca0ec3c198c38c0f6a3fe
[ "Apache-2.0" ]
null
null
null
docs/mxdoc.py
paulk-asert/incubator-mxnet
6acf7e6a051e75d9f1cca0ec3c198c38c0f6a3fe
[ "Apache-2.0" ]
1
2020-04-09T09:26:31.000Z
2020-04-09T09:26:31.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """A sphnix-doc plugin to build mxnet docs""" from __future__ import print_function import subprocess import re import os import json import sys from recommonmark import transform import pypandoc import contextlib # Use six for Python 2 / 3 compatibility from six import StringIO from six.moves import configparser _BUILD_VER = os.getenv('BUILD_VER', 'default') print("Building version {}".format(_BUILD_VER)) _DOC_SET = 'document_sets_' + _BUILD_VER parser = configparser.SafeConfigParser() parser.read('settings.ini') if _DOC_SET not in parser.sections(): _DOC_SET = 'document_sets_default' for section in [ _DOC_SET ]: print("Document sets to generate:") for candidate in [ 'scala_docs', 'java_docs', 'clojure_docs', 'doxygen_docs', 'julia_docs', 'r_docs' ]: print('%-12s : %s' % (candidate, parser.get(section, candidate))) _MXNET_DOCS_BUILD_MXNET = parser.getboolean('mxnet', 'build_mxnet') _SCALA_DOCS = parser.getboolean(_DOC_SET, 'scala_docs') _JAVA_DOCS = parser.getboolean(_DOC_SET, 'java_docs') _CLOJURE_DOCS = parser.getboolean(_DOC_SET, 'clojure_docs') _DOXYGEN_DOCS = parser.getboolean(_DOC_SET, 'doxygen_docs') _R_DOCS = parser.getboolean(_DOC_SET, 'r_docs') _JULIA_DOCS = parser.getboolean(_DOC_SET, 'julia_docs') _ARTIFACTS = parser.getboolean(_DOC_SET, 'artifacts') # white list to evaluate the code block output, such as ['tutorials/gluon'] _EVAL_WHILTELIST = [] # start or end of a code block _CODE_MARK = re.compile('^([ ]*)```([\w]*)') # language names and the according file extensions and comment symbol _LANGS = {'python' : ('py', '#'), 'r' : ('R','#'), 'scala' : ('scala', '//'), 'java' : ('java', '//'), 'julia' : ('jl', '#'), 'perl' : ('pl', '#'), 'cpp' : ('cc', '//'), 'bash' : ('sh', '#')} _LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS' _SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS' def _run_cmd(cmds): """Run commands, raise exception if failed""" if not isinstance(cmds, str): cmds = "".join(cmds) print("Execute \"%s\"" % cmds) try: subprocess.check_call(cmds, shell=True) except subprocess.CalledProcessError as err: print(err) raise err def generate_doxygen(app): """Run the doxygen make commands""" _run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir) _run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) def build_mxnet(app): """Build mxnet .so lib""" if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')): _run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " % app.builder.srcdir) else: _run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " % app.builder.srcdir) def build_julia_docs(app): """build Julia docs""" dest_path = app.builder.outdir + '/api/julia/site' _run_cmd('cd {}/.. && make -C julia/docs'.format(app.builder.srcdir)) _run_cmd('mkdir -p {}'.format(dest_path)) _run_cmd('cd {}/.. && cp -a julia/docs/site/* {}'.format(app.builder.srcdir, dest_path)) def build_r_docs(app): """build r pdf""" r_root = app.builder.srcdir + '/../R-package' pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf' _run_cmd('cd ' + r_root + '; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path) dest_path = app.builder.outdir + '/api/r/' _run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path) def build_scala(app): """build scala for scala docs, java docs, and clojure docs to use""" if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']): _run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir) _run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir) else: _run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir) def build_scala_docs(app): """build scala doc and then move the outdir""" scala_path = app.builder.srcdir + '/../scala-package' scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"' scala_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) # There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else '' _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}' .format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors)) dest_path = app.builder.outdir + '/api/scala/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) # 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in scaladocs: _run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') def build_java_docs(app): """build java docs and then move the outdir""" java_path = app.builder.srcdir + '/../scala-package' java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"' java_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation' .format(java_path, java_doc_sources, java_doc_classpath)) dest_path = app.builder.outdir + '/api/java/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in javadocs: _run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') def build_clojure_docs(app): """build clojure doc and then move the outdir""" clojure_path = app.builder.srcdir + '/../contrib/clojure-package' _run_cmd('cd ' + clojure_path + '; lein codox') dest_path = app.builder.outdir + '/api/clojure/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc' _run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0') def _convert_md_table_to_rst(table): """Convert a markdown table to rst format""" if len(table) < 3: return '' out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n' for i,l in enumerate(table): cols = l.split('|')[1:-1] if i == 0: ncol = len(cols) else: if len(cols) != ncol: return '' if i == 1: for c in cols: if len(c) is not 0 and '---' not in c: return '' else: for j,c in enumerate(cols): out += ' * - ' if j == 0 else ' - ' out += pypandoc.convert_text( c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n' out += '```\n' return out def convert_table(app, docname, source): """Find tables in a markdown and then convert them into the rst format""" num_tables = 0 for i,j in enumerate(source): table = [] output = '' in_table = False for l in j.split('\n'): r = l.strip() if r.startswith('|'): table.append(r) in_table = True else: if in_table is True: converted = _convert_md_table_to_rst(table) if converted is '': print("Failed to convert the markdown table") print(table) else: num_tables += 1 output += converted in_table = False table = [] output += l + '\n' source[i] = output if num_tables > 0: print('Converted %d tables in %s' % (num_tables, docname)) def _parse_code_lines(lines): """A iterator that returns if a line is within a code block Returns ------- iterator of (str, bool, str, int) - line: the line - in_code: if this line is in a code block - lang: the code block langunage - indent: the code indent """ in_code = False lang = None indent = None for l in lines: m = _CODE_MARK.match(l) if m is not None: if not in_code: if m.groups()[1].lower() in _LANGS: lang = m.groups()[1].lower() indent = len(m.groups()[0]) in_code = True yield (l, in_code, lang, indent) else: yield (l, in_code, lang, indent) lang = None indent = None in_code = False else: yield (l, in_code, lang, indent) def _get_lang_selection_btn(langs): active = True btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">' for l in langs: btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % ( 'active' if active else '', l[0].upper()+l[1:].lower()) active = False btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>' return btngroup def _get_blocks(lines): """split lines into code and non-code blocks Returns ------- iterator of (bool, str, list of str) - if it is a code block - source language - lines of source """ cur_block = [] pre_lang = None pre_in_code = None for (l, in_code, cur_lang, _) in _parse_code_lines(lines): if in_code != pre_in_code: if pre_in_code and len(cur_block) >= 2: cur_block = cur_block[1:-1] # remove ``` # remove empty lines at head while len(cur_block) > 0: if len(cur_block[0]) == 0: cur_block.pop(0) else: break # remove empty lines at tail while len(cur_block) > 0: if len(cur_block[-1]) == 0: cur_block.pop() else: break if len(cur_block): yield (pre_in_code, pre_lang, cur_block) cur_block = [] cur_block.append(l) pre_lang = cur_lang pre_in_code = in_code if len(cur_block): yield (pre_in_code, pre_lang, cur_block) def _get_mk_code_block(src, lang): """Return a markdown code block E.g. ```python import mxnet ```` """ if lang is None: lang = '' return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n' @contextlib.contextmanager def _string_io(): oldout = sys.stdout olderr = sys.stderr strio = StringIO.StringIO() sys.stdout = strio sys.stderr = strio yield strio sys.stdout = oldout sys.stderr = olderr def _get_python_block_output(src, global_dict, local_dict): """Evaluate python source codes Returns (bool, str): - True if success - output """ src = '\n'.join([l for l in src.split('\n') if not l.startswith('%') and not 'plt.show()' in l]) ret_status = True err = '' with _string_io() as s: try: exec(src, global_dict, global_dict) except Exception as e: err = str(e) ret_status = False return (ret_status, s.getvalue()+err) def _get_jupyter_notebook(lang, all_lines): cells = [] # Exclude lines containing <!--notebook-skip-line--> filtered_lines = [line for line in all_lines if "<!--notebook-skip-line-->" not in line] for in_code, blk_lang, lines in _get_blocks(filtered_lines): if blk_lang != lang: in_code = False src = '\n'.join(lines) cell = { "cell_type": "code" if in_code else "markdown", "metadata": {}, "source": src } if in_code: cell.update({ "outputs": [], "execution_count": None, }) cells.append(cell) ipynb = {"nbformat" : 4, "nbformat_minor" : 2, "metadata" : {"language":lang, "display_name":'', "name":''}, "cells" : cells} return ipynb def _get_source(lang, lines): cmt = _LANGS[lang][1] + ' ' out = [] for in_code, lines in _get_blocks(lang, lines): if in_code: out.append('') for l in lines: if in_code: if '%matplotlib' not in l: out.append(l) else: if ('<div>' in l or '</div>' in l or '<script>' in l or '</script>' in l or '<!--' in l or '-->' in l or '%matplotlib' in l ): continue out.append(cmt+l) if in_code: out.append('') return out def _get_src_download_btn(out_prefix, langs, lines): btn = '<div class="btn-group" role="group">\n' for lang in langs: ipynb = out_prefix if lang == 'python': ipynb += '.ipynb' else: ipynb += '_' + lang + '.ipynb' with open(ipynb, 'w') as f: json.dump(_get_jupyter_notebook(lang, lines), f) f = ipynb.split('/')[-1] btn += '<div class="download-btn"><a href="%s" download="%s">' \ '<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f) btn += '</div>\n' return btn def add_buttons(app, docname, source): out_prefix = app.builder.outdir + '/' + docname dirname = os.path.dirname(out_prefix) if not os.path.exists(dirname): os.makedirs(dirname) for i,j in enumerate(source): local_dict = {} global_dict = {} lines = j.split('\n') langs = set([l for (_, _, l, _) in _parse_code_lines(lines) if l is not None and l in _LANGS]) # first convert for k,l in enumerate(lines): if _SRC_DOWNLOAD_MARK in l: lines[k] = _get_src_download_btn( out_prefix, langs, lines) # # then add lang buttons # for k,l in enumerate(lines): # if _LANG_SELECTION_MARK in l: # lines[k] = _get_lang_selection_btn(langs) output = '' for in_code, lang, lines in _get_blocks(lines): src = '\n'.join(lines)+'\n' if in_code: output += _get_mk_code_block(src, lang) if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]): status, blk_out = _get_python_block_output(src, global_dict, local_dict) if len(blk_out): output += '<div class=\"cell-results-header\">Output:</div>\n\n' output += _get_mk_code_block(blk_out, 'results') else: output += src source[i] = output # source[i] = '\n'.join(lines) def copy_artifacts(app): """Copies artifacts needed for website presentation""" dest_path = app.builder.outdir + '/error' source_path = app.builder.srcdir + '/build_version_doc/artifacts' _run_cmd('cd ' + app.builder.srcdir) _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) _run_cmd('cp ' + source_path + '/404.html ' + dest_path) _run_cmd('cp ' + source_path + '/api.html ' + dest_path) dest_path = app.builder.outdir + '/_static' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) _run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path) def setup(app): # If MXNET_DOCS_BUILD_MXNET is set something different than 1 # Skip the build step if os.getenv('MXNET_DOCS_BUILD_MXNET') == '1'or _MXNET_DOCS_BUILD_MXNET: print("Building MXNet!") app.connect("builder-inited", build_mxnet) if _DOXYGEN_DOCS: print("Building Doxygen!") app.connect("builder-inited", generate_doxygen) if _SCALA_DOCS or _CLOJURE_DOCS: print("Building Scala!") app.connect("builder-inited", build_scala) if _SCALA_DOCS: print("Building Scala Docs!") app.connect("builder-inited", build_scala_docs) if _JAVA_DOCS: print("Building Java Docs!") app.connect("builder-inited", build_java_docs) if _CLOJURE_DOCS: print("Building Clojure Docs!") app.connect("builder-inited", build_clojure_docs) if _JULIA_DOCS: print("Building Julia Docs!") app.connect("builder-inited", build_julia_docs) if _R_DOCS: print("Building R Docs!") app.connect("builder-inited", build_r_docs) if _ARTIFACTS: print("Copying Artifacts!") app.connect("builder-inited", copy_artifacts) app.connect('source-read', convert_table) app.connect('source-read', add_buttons) app.add_config_value('recommonmark_config', { 'url_resolver': lambda url: 'http://mxnet.io/' + url, 'enable_eval_rst': True, }, True) app.add_transform(transform.AutoStructify)
37.33002
134
0.57171
from __future__ import print_function import subprocess import re import os import json import sys from recommonmark import transform import pypandoc import contextlib from six import StringIO from six.moves import configparser _BUILD_VER = os.getenv('BUILD_VER', 'default') print("Building version {}".format(_BUILD_VER)) _DOC_SET = 'document_sets_' + _BUILD_VER parser = configparser.SafeConfigParser() parser.read('settings.ini') if _DOC_SET not in parser.sections(): _DOC_SET = 'document_sets_default' for section in [ _DOC_SET ]: print("Document sets to generate:") for candidate in [ 'scala_docs', 'java_docs', 'clojure_docs', 'doxygen_docs', 'julia_docs', 'r_docs' ]: print('%-12s : %s' % (candidate, parser.get(section, candidate))) _MXNET_DOCS_BUILD_MXNET = parser.getboolean('mxnet', 'build_mxnet') _SCALA_DOCS = parser.getboolean(_DOC_SET, 'scala_docs') _JAVA_DOCS = parser.getboolean(_DOC_SET, 'java_docs') _CLOJURE_DOCS = parser.getboolean(_DOC_SET, 'clojure_docs') _DOXYGEN_DOCS = parser.getboolean(_DOC_SET, 'doxygen_docs') _R_DOCS = parser.getboolean(_DOC_SET, 'r_docs') _JULIA_DOCS = parser.getboolean(_DOC_SET, 'julia_docs') _ARTIFACTS = parser.getboolean(_DOC_SET, 'artifacts') _EVAL_WHILTELIST = [] _CODE_MARK = re.compile('^([ ]*)```([\w]*)') _LANGS = {'python' : ('py', '#'), 'r' : ('R','#'), 'scala' : ('scala', '//'), 'java' : ('java', '//'), 'julia' : ('jl', '#'), 'perl' : ('pl', '#'), 'cpp' : ('cc', '//'), 'bash' : ('sh', '#')} _LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS' _SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS' def _run_cmd(cmds): if not isinstance(cmds, str): cmds = "".join(cmds) print("Execute \"%s\"" % cmds) try: subprocess.check_call(cmds, shell=True) except subprocess.CalledProcessError as err: print(err) raise err def generate_doxygen(app): _run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir) _run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) def build_mxnet(app): if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')): _run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " % app.builder.srcdir) else: _run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " % app.builder.srcdir) def build_julia_docs(app): dest_path = app.builder.outdir + '/api/julia/site' _run_cmd('cd {}/.. && make -C julia/docs'.format(app.builder.srcdir)) _run_cmd('mkdir -p {}'.format(dest_path)) _run_cmd('cd {}/.. && cp -a julia/docs/site/* {}'.format(app.builder.srcdir, dest_path)) def build_r_docs(app): r_root = app.builder.srcdir + '/../R-package' pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf' _run_cmd('cd ' + r_root + '; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path) dest_path = app.builder.outdir + '/api/r/' _run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path) def build_scala(app): if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']): _run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir) _run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir) else: _run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir) def build_scala_docs(app): scala_path = app.builder.srcdir + '/../scala-package' scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"' scala_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else '' _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}' .format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors)) dest_path = app.builder.outdir + '/api/scala/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in scaladocs: _run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') def build_java_docs(app): java_path = app.builder.srcdir + '/../scala-package' java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"' java_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation' .format(java_path, java_doc_sources, java_doc_classpath)) dest_path = app.builder.outdir + '/api/java/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in javadocs: _run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') def build_clojure_docs(app): clojure_path = app.builder.srcdir + '/../contrib/clojure-package' _run_cmd('cd ' + clojure_path + '; lein codox') dest_path = app.builder.outdir + '/api/clojure/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc' _run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0') def _convert_md_table_to_rst(table): if len(table) < 3: return '' out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n' for i,l in enumerate(table): cols = l.split('|')[1:-1] if i == 0: ncol = len(cols) else: if len(cols) != ncol: return '' if i == 1: for c in cols: if len(c) is not 0 and '---' not in c: return '' else: for j,c in enumerate(cols): out += ' * - ' if j == 0 else ' - ' out += pypandoc.convert_text( c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n' out += '```\n' return out def convert_table(app, docname, source): num_tables = 0 for i,j in enumerate(source): table = [] output = '' in_table = False for l in j.split('\n'): r = l.strip() if r.startswith('|'): table.append(r) in_table = True else: if in_table is True: converted = _convert_md_table_to_rst(table) if converted is '': print("Failed to convert the markdown table") print(table) else: num_tables += 1 output += converted in_table = False table = [] output += l + '\n' source[i] = output if num_tables > 0: print('Converted %d tables in %s' % (num_tables, docname)) def _parse_code_lines(lines): in_code = False lang = None indent = None for l in lines: m = _CODE_MARK.match(l) if m is not None: if not in_code: if m.groups()[1].lower() in _LANGS: lang = m.groups()[1].lower() indent = len(m.groups()[0]) in_code = True yield (l, in_code, lang, indent) else: yield (l, in_code, lang, indent) lang = None indent = None in_code = False else: yield (l, in_code, lang, indent) def _get_lang_selection_btn(langs): active = True btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">' for l in langs: btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % ( 'active' if active else '', l[0].upper()+l[1:].lower()) active = False btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>' return btngroup def _get_blocks(lines): cur_block = [] pre_lang = None pre_in_code = None for (l, in_code, cur_lang, _) in _parse_code_lines(lines): if in_code != pre_in_code: if pre_in_code and len(cur_block) >= 2: cur_block = cur_block[1:-1] while len(cur_block) > 0: if len(cur_block[0]) == 0: cur_block.pop(0) else: break while len(cur_block) > 0: if len(cur_block[-1]) == 0: cur_block.pop() else: break if len(cur_block): yield (pre_in_code, pre_lang, cur_block) cur_block = [] cur_block.append(l) pre_lang = cur_lang pre_in_code = in_code if len(cur_block): yield (pre_in_code, pre_lang, cur_block) def _get_mk_code_block(src, lang): if lang is None: lang = '' return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n' @contextlib.contextmanager def _string_io(): oldout = sys.stdout olderr = sys.stderr strio = StringIO.StringIO() sys.stdout = strio sys.stderr = strio yield strio sys.stdout = oldout sys.stderr = olderr def _get_python_block_output(src, global_dict, local_dict): src = '\n'.join([l for l in src.split('\n') if not l.startswith('%') and not 'plt.show()' in l]) ret_status = True err = '' with _string_io() as s: try: exec(src, global_dict, global_dict) except Exception as e: err = str(e) ret_status = False return (ret_status, s.getvalue()+err) def _get_jupyter_notebook(lang, all_lines): cells = [] filtered_lines = [line for line in all_lines if "<!--notebook-skip-line-->" not in line] for in_code, blk_lang, lines in _get_blocks(filtered_lines): if blk_lang != lang: in_code = False src = '\n'.join(lines) cell = { "cell_type": "code" if in_code else "markdown", "metadata": {}, "source": src } if in_code: cell.update({ "outputs": [], "execution_count": None, }) cells.append(cell) ipynb = {"nbformat" : 4, "nbformat_minor" : 2, "metadata" : {"language":lang, "display_name":'', "name":''}, "cells" : cells} return ipynb def _get_source(lang, lines): cmt = _LANGS[lang][1] + ' ' out = [] for in_code, lines in _get_blocks(lang, lines): if in_code: out.append('') for l in lines: if in_code: if '%matplotlib' not in l: out.append(l) else: if ('<div>' in l or '</div>' in l or '<script>' in l or '</script>' in l or '<!--' in l or '-->' in l or '%matplotlib' in l ): continue out.append(cmt+l) if in_code: out.append('') return out def _get_src_download_btn(out_prefix, langs, lines): btn = '<div class="btn-group" role="group">\n' for lang in langs: ipynb = out_prefix if lang == 'python': ipynb += '.ipynb' else: ipynb += '_' + lang + '.ipynb' with open(ipynb, 'w') as f: json.dump(_get_jupyter_notebook(lang, lines), f) f = ipynb.split('/')[-1] btn += '<div class="download-btn"><a href="%s" download="%s">' \ '<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f) btn += '</div>\n' return btn def add_buttons(app, docname, source): out_prefix = app.builder.outdir + '/' + docname dirname = os.path.dirname(out_prefix) if not os.path.exists(dirname): os.makedirs(dirname) for i,j in enumerate(source): local_dict = {} global_dict = {} lines = j.split('\n') langs = set([l for (_, _, l, _) in _parse_code_lines(lines) if l is not None and l in _LANGS]) for k,l in enumerate(lines): if _SRC_DOWNLOAD_MARK in l: lines[k] = _get_src_download_btn( out_prefix, langs, lines) output = '' for in_code, lang, lines in _get_blocks(lines): src = '\n'.join(lines)+'\n' if in_code: output += _get_mk_code_block(src, lang) if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]): status, blk_out = _get_python_block_output(src, global_dict, local_dict) if len(blk_out): output += '<div class=\"cell-results-header\">Output:</div>\n\n' output += _get_mk_code_block(blk_out, 'results') else: output += src source[i] = output def copy_artifacts(app): dest_path = app.builder.outdir + '/error' source_path = app.builder.srcdir + '/build_version_doc/artifacts' _run_cmd('cd ' + app.builder.srcdir) _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) _run_cmd('cp ' + source_path + '/404.html ' + dest_path) _run_cmd('cp ' + source_path + '/api.html ' + dest_path) dest_path = app.builder.outdir + '/_static' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) _run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path) def setup(app): if os.getenv('MXNET_DOCS_BUILD_MXNET') == '1'or _MXNET_DOCS_BUILD_MXNET: print("Building MXNet!") app.connect("builder-inited", build_mxnet) if _DOXYGEN_DOCS: print("Building Doxygen!") app.connect("builder-inited", generate_doxygen) if _SCALA_DOCS or _CLOJURE_DOCS: print("Building Scala!") app.connect("builder-inited", build_scala) if _SCALA_DOCS: print("Building Scala Docs!") app.connect("builder-inited", build_scala_docs) if _JAVA_DOCS: print("Building Java Docs!") app.connect("builder-inited", build_java_docs) if _CLOJURE_DOCS: print("Building Clojure Docs!") app.connect("builder-inited", build_clojure_docs) if _JULIA_DOCS: print("Building Julia Docs!") app.connect("builder-inited", build_julia_docs) if _R_DOCS: print("Building R Docs!") app.connect("builder-inited", build_r_docs) if _ARTIFACTS: print("Copying Artifacts!") app.connect("builder-inited", copy_artifacts) app.connect('source-read', convert_table) app.connect('source-read', add_buttons) app.add_config_value('recommonmark_config', { 'url_resolver': lambda url: 'http://mxnet.io/' + url, 'enable_eval_rst': True, }, True) app.add_transform(transform.AutoStructify)
true
true
f70dcff2432a436d9a38ca72b6250e635f385d8f
913
py
Python
ooobuild/dyn/sheet/spreadsheet_view_panes_enumeration.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/dyn/sheet/spreadsheet_view_panes_enumeration.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/dyn/sheet/spreadsheet_view_panes_enumeration.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Service Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.sheet from ...lo.sheet.spreadsheet_view_panes_enumeration import SpreadsheetViewPanesEnumeration as SpreadsheetViewPanesEnumeration __all__ = ['SpreadsheetViewPanesEnumeration']
35.115385
125
0.776561
from ...lo.sheet.spreadsheet_view_panes_enumeration import SpreadsheetViewPanesEnumeration as SpreadsheetViewPanesEnumeration __all__ = ['SpreadsheetViewPanesEnumeration']
true
true
f70dd031a18d68a0c0719d6b1e479e6b260cc10d
3,076
py
Python
ndn_python_repo/storage/sqlite.py
susmit85/ndn-python-repo
45a76bd3e78b0157e364413d62d3aaa51cd20353
[ "Apache-2.0" ]
null
null
null
ndn_python_repo/storage/sqlite.py
susmit85/ndn-python-repo
45a76bd3e78b0157e364413d62d3aaa51cd20353
[ "Apache-2.0" ]
null
null
null
ndn_python_repo/storage/sqlite.py
susmit85/ndn-python-repo
45a76bd3e78b0157e364413d62d3aaa51cd20353
[ "Apache-2.0" ]
null
null
null
import os import sqlite3 from typing import List, Optional from .storage_base import Storage class SqliteStorage(Storage): def __init__(self, db_path): """ Init table "data" with the attribute "key" being the primary key :param db_path: str. Path to database file """ super().__init__() db_path = os.path.expanduser(db_path) if len(os.path.dirname(db_path)) > 0 and not os.path.exists(os.path.dirname(db_path)): try: os.makedirs(os.path.dirname(db_path)) except PermissionError: raise PermissionError(f'Could not create database directory: {db_path}') from None self.conn = sqlite3.connect(os.path.expanduser(db_path)) c = self.conn.cursor() c.execute(""" CREATE TABLE IF NOT EXISTS data ( key BLOB PRIMARY KEY, value BLOB, expire_time_ms INTEGER ) """) self.conn.commit() def _put(self, key: bytes, value: bytes, expire_time_ms=None): """ Insert value and its expiration time into sqlite3, overwrite if already exists. :param key: bytes. :param value: bytes. :param expire_time_ms: Optional[int]. Value is not fresh if expire_time_ms is not specified. """ c = self.conn.cursor() c.execute('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)', (key, value, expire_time_ms)) self.conn.commit() def _put_batch(self, keys: List[bytes], values: List[bytes], expire_time_mss:List[Optional[int]]): """ Batch insert. :param key: List[bytes]. :param value: List[bytes]. :param expire_time_ms: List[Optional[int]]. """ c = self.conn.cursor() c.executemany('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)', zip(keys, values, expire_time_mss)) self.conn.commit() def _get(self, key: bytes, can_be_prefix=False, must_be_fresh=False) -> Optional[bytes]: """ Get value from sqlite3. :param key: bytes. :param can_be_prefix: bool. :param must_be_fresh: bool. :return: bytes. """ c = self.conn.cursor() query = 'SELECT value FROM data WHERE ' if must_be_fresh: query += f'(expire_time_ms > {time_ms()}) AND ' if can_be_prefix: query += 'hex(key) LIKE ?' c.execute(query, (key.hex() + '%', )) else: query += 'key = ?' c.execute(query, (key, )) ret = c.fetchone() return ret[0] if ret else None def _remove(self, key: bytes) -> bool: """ Remove value from sqlite. Return whether removal is successful. :param key: bytes. :return: bool. """ c = self.conn.cursor() n_removed = c.execute('DELETE FROM data WHERE key = ?', (key, )).rowcount self.conn.commit() return n_removed > 0
35.767442
102
0.56762
import os import sqlite3 from typing import List, Optional from .storage_base import Storage class SqliteStorage(Storage): def __init__(self, db_path): super().__init__() db_path = os.path.expanduser(db_path) if len(os.path.dirname(db_path)) > 0 and not os.path.exists(os.path.dirname(db_path)): try: os.makedirs(os.path.dirname(db_path)) except PermissionError: raise PermissionError(f'Could not create database directory: {db_path}') from None self.conn = sqlite3.connect(os.path.expanduser(db_path)) c = self.conn.cursor() c.execute(""" CREATE TABLE IF NOT EXISTS data ( key BLOB PRIMARY KEY, value BLOB, expire_time_ms INTEGER ) """) self.conn.commit() def _put(self, key: bytes, value: bytes, expire_time_ms=None): c = self.conn.cursor() c.execute('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)', (key, value, expire_time_ms)) self.conn.commit() def _put_batch(self, keys: List[bytes], values: List[bytes], expire_time_mss:List[Optional[int]]): c = self.conn.cursor() c.executemany('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)', zip(keys, values, expire_time_mss)) self.conn.commit() def _get(self, key: bytes, can_be_prefix=False, must_be_fresh=False) -> Optional[bytes]: c = self.conn.cursor() query = 'SELECT value FROM data WHERE ' if must_be_fresh: query += f'(expire_time_ms > {time_ms()}) AND ' if can_be_prefix: query += 'hex(key) LIKE ?' c.execute(query, (key.hex() + '%', )) else: query += 'key = ?' c.execute(query, (key, )) ret = c.fetchone() return ret[0] if ret else None def _remove(self, key: bytes) -> bool: c = self.conn.cursor() n_removed = c.execute('DELETE FROM data WHERE key = ?', (key, )).rowcount self.conn.commit() return n_removed > 0
true
true
f70dd03916809216c5fd10697b0165fef0ccd9f7
3,141
py
Python
suzieq/utilities/sq_simnode_rest.py
LucaNicosia/suzieq
c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd
[ "Apache-2.0" ]
null
null
null
suzieq/utilities/sq_simnode_rest.py
LucaNicosia/suzieq
c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd
[ "Apache-2.0" ]
null
null
null
suzieq/utilities/sq_simnode_rest.py
LucaNicosia/suzieq
c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd
[ "Apache-2.0" ]
null
null
null
''' Serve up a fake REST server acting as device REST API ''' import sys import argparse from pathlib import Path import ssl import logging from aiohttp import web import xmltodict def get_filename_from_cmd(cmd_dict: dict) -> str: """Build filename from an xml command""" keys = [] def recursive_items(cmd_dict: dict): for key, value in cmd_dict.items(): if isinstance(value, dict): # do not use list entry for filename if key != "entry": keys.append(key) recursive_items(value) elif value: # do not use property names for filename if not key.startswith("@"): keys.append(key) keys.append(value) else: keys.append(key) recursive_items(cmd_dict) return "_".join(keys).replace("-", "_") def run_server(port=443, input_dir: str = None): """Run sim rest server for the given nos, version and hostname""" api_key = "xxXYHqhFIAWAlWTulizbXtfVfvV5ETfNynHxAlV3ZEUTtrUNKZBDY3aKmCFC" auth_response = f"""<response status="success"> <result> <key>{api_key}</key> </result> </response>""" username = password = "vagrant" routes = web.RouteTableDef() @routes.get('/api/') async def panos_cmd(request): req_type = request.query.get("type", "") req_auth_key = request.headers.get("X-PAN-KEY", "") or \ request.query.get("key", "") # authentication with user and pass to get api key if req_type == "keygen": user = request.query.get("user", "") passwd = request.query.get("password", "") if user == username and passwd == password: return web.Response(text=auth_response) raise web.HTTPForbidden() # cmd queries if req_type == "op" and req_auth_key == api_key: xml_cmd = request.query.get("cmd", "") if xml_cmd == "": raise web.HTTPBadRequest() cmd_dict = xmltodict.parse(xml_cmd) cmd = get_filename_from_cmd(cmd_dict) return web.FileResponse(f"{input_dir}/{cmd}.xml") ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain( 'suzieq/config/etc/cert.pem', 'suzieq/config/etc/key.pem') app = web.Application() app.add_routes(routes) logging.basicConfig(level=logging.DEBUG) web.run_app(app, ssl_context=ssl_context, port=port) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( "-p", "--listening-port", type=int, default=10000, help="Listening port of the ssh server (default: 10000)") parser.add_argument( "-d", "--input-dir", type=str, default=None, help="Input dir to search for host files") args = parser.parse_args() if not Path(args.input_dir).exists(): print(f'ERROR: Path {args.input_dir} does not exist, aborting') sys.exit(1) run_server(port=args.listening_port, input_dir=args.input_dir)
29.914286
76
0.606176
import sys import argparse from pathlib import Path import ssl import logging from aiohttp import web import xmltodict def get_filename_from_cmd(cmd_dict: dict) -> str: keys = [] def recursive_items(cmd_dict: dict): for key, value in cmd_dict.items(): if isinstance(value, dict): if key != "entry": keys.append(key) recursive_items(value) elif value: if not key.startswith("@"): keys.append(key) keys.append(value) else: keys.append(key) recursive_items(cmd_dict) return "_".join(keys).replace("-", "_") def run_server(port=443, input_dir: str = None): api_key = "xxXYHqhFIAWAlWTulizbXtfVfvV5ETfNynHxAlV3ZEUTtrUNKZBDY3aKmCFC" auth_response = f"""<response status="success"> <result> <key>{api_key}</key> </result> </response>""" username = password = "vagrant" routes = web.RouteTableDef() @routes.get('/api/') async def panos_cmd(request): req_type = request.query.get("type", "") req_auth_key = request.headers.get("X-PAN-KEY", "") or \ request.query.get("key", "") if req_type == "keygen": user = request.query.get("user", "") passwd = request.query.get("password", "") if user == username and passwd == password: return web.Response(text=auth_response) raise web.HTTPForbidden() if req_type == "op" and req_auth_key == api_key: xml_cmd = request.query.get("cmd", "") if xml_cmd == "": raise web.HTTPBadRequest() cmd_dict = xmltodict.parse(xml_cmd) cmd = get_filename_from_cmd(cmd_dict) return web.FileResponse(f"{input_dir}/{cmd}.xml") ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain( 'suzieq/config/etc/cert.pem', 'suzieq/config/etc/key.pem') app = web.Application() app.add_routes(routes) logging.basicConfig(level=logging.DEBUG) web.run_app(app, ssl_context=ssl_context, port=port) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( "-p", "--listening-port", type=int, default=10000, help="Listening port of the ssh server (default: 10000)") parser.add_argument( "-d", "--input-dir", type=str, default=None, help="Input dir to search for host files") args = parser.parse_args() if not Path(args.input_dir).exists(): print(f'ERROR: Path {args.input_dir} does not exist, aborting') sys.exit(1) run_server(port=args.listening_port, input_dir=args.input_dir)
true
true
f70dd0823804a8aef036ace7e761585472afb4e7
343
py
Python
docs.py
Pure-Peace/New-ot-socketio
c3124aed6941ecf885f8c16522bcc64a3b4b488c
[ "MIT" ]
1
2022-02-15T08:01:10.000Z
2022-02-15T08:01:10.000Z
docs.py
Pure-Peace/New-ot-socketio
c3124aed6941ecf885f8c16522bcc64a3b4b488c
[ "MIT" ]
null
null
null
docs.py
Pure-Peace/New-ot-socketio
c3124aed6941ecf885f8c16522bcc64a3b4b488c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' otsu.fun - SSWA Resources Docs String @version: 0.1 @author: PurePeace @time: 2020-01-07 @describe: docs string for api resources!!! ''' demo = \ ''' 演示接口 传参:{ foo } --- - Nothing here ``` null ``` ''' # run? not. if __name__ == '__main__': print('only docs, so it doesnt work')
12.703704
44
0.548105
demo = \ ''' 演示接口 传参:{ foo } --- - Nothing here ``` null ``` ''' if __name__ == '__main__': print('only docs, so it doesnt work')
true
true
f70dd0c7750c5520d89862092e2faadca474195e
564
py
Python
recommender/recommender/framework/tf2/__init__.py
ericdoug-qi/RecommendationsInAction
ab1b2b8cd94285b3798df92febc8a73b36db17cd
[ "MIT" ]
null
null
null
recommender/recommender/framework/tf2/__init__.py
ericdoug-qi/RecommendationsInAction
ab1b2b8cd94285b3798df92febc8a73b36db17cd
[ "MIT" ]
null
null
null
recommender/recommender/framework/tf2/__init__.py
ericdoug-qi/RecommendationsInAction
ab1b2b8cd94285b3798df92febc8a73b36db17cd
[ "MIT" ]
null
null
null
# _*_ coding: utf-8 _*_ """ ------------------------------------------------- File Name: __init__.py.py Description : Author : ericdoug date:2021/3/7 ------------------------------------------------- Change Activity: 2021/3/7: created ------------------------------------------------- """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import # sys packages import os # third packages # my packages
20.142857
49
0.542553
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import os
true
true
f70dd0fd149c9b9c8dd2612b9d8bdda428b55863
6,909
py
Python
scripts/pixel_classification_zarr.py
pwalczysko/ilastik
e4fa2c3c1ba1f83d3dcc392ccdd29e4391b8dbcf
[ "BSD-2-Clause" ]
3
2020-08-04T22:09:54.000Z
2021-07-15T11:15:36.000Z
scripts/pixel_classification_zarr.py
pwalczysko/ilastik
e4fa2c3c1ba1f83d3dcc392ccdd29e4391b8dbcf
[ "BSD-2-Clause" ]
29
2019-12-18T11:53:31.000Z
2021-12-10T11:36:20.000Z
scripts/pixel_classification_zarr.py
pwalczysko/ilastik
e4fa2c3c1ba1f83d3dcc392ccdd29e4391b8dbcf
[ "BSD-2-Clause" ]
5
2020-01-24T17:14:57.000Z
2021-04-29T08:51:06.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # # Copyright (c) 2020 University of Dundee. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. # # Version: 1.0 # import time import tempfile import tarfile import numpy import os import zarr import dask.array as da import omero.clients from omero.gateway import BlitzGateway from getpass import getpass from ilastik import app from ilastik.applets.dataSelection.opDataSelection import PreloadedArrayDatasetInfo # noqa import vigra # Connect to the server def connect(hostname, username, password): conn = BlitzGateway(username, password, host=hostname, secure=True) conn.connect() return conn # Load-images def load_images(conn, dataset_id): return conn.getObjects('Image', opts={'dataset': dataset_id}) # Create-dataset def create_dataset(conn, dataset_id): dataset = omero.model.DatasetI() v = "ilastik_probabilities_from_dataset_%s" % dataset_id dataset.setName(omero.rtypes.rstring(v)) v = "ilatisk results probabilities from Dataset:%s" % dataset_id dataset.setDescription(omero.rtypes.rstring(v)) return conn.getUpdateService().saveAndReturnObject(dataset) # Load-data def load_numpy_array(image, path, extension=".tar", resolution=0): # load annotation linked to the image. Download in a tmp dir for ann in image.listAnnotations(): if isinstance(ann, omero.gateway.FileAnnotationWrapper): name = ann.getFile().getName() ns = ann.getNs() if (name.endswith(".zip") or name.endswith(".tar")) and ns is None: file_path = os.path.join(path, name) f_path = os.path.join(path, name.strip(extension)) with open(str(file_path), 'wb') as f: for chunk in ann.getFileInChunks(): f.write(chunk) # extract the file if extension == ".tar": tf = tarfile.open(file_path) tf.extractall(path) tf.close() data = zarr.open(f_path) values = data[resolution][:] # from tczyx to tzyxc values = values.swapaxes(1, 2).swapaxes(2, 3) values = values.swapaxes(3, 4) return values else: data = zarr.open(file_path) return data[:] return None def load_from_s3(image, resolution='0'): id = image.getId() endpoint_url = 'https://minio-dev.openmicroscopy.org/' root = 'idr/outreach/%s.zarr/' % id # data.shape is (t, c, z, y, x) by convention data = da.from_zarr(endpoint_url + root) values = data[:] values = values.swapaxes(1, 2).swapaxes(2, 3).swapaxes(3, 4) return numpy.asarray(values) # Analyze-data def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0): # Prepare ilastik # temporary directory where to download files path = tempfile.mkdtemp() if not os.path.exists(path): os.makedirs(path) os.environ["LAZYFLOW_THREADS"] = "2" os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000" args = app.parse_args([]) args.headless = True args.project = model args.readonly = True shell = app.main(args) start = time.time() for image in images: input_data = load_from_s3(image, path) # run ilastik headless print('running ilastik using %s and %s' % (model, image.getName())) data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}] # noqa shell.workflow.batchProcessingApplet.run_export(data, export_to_array=True) # noqa elapsed = time.time() - start print(elapsed) # Save-results def save_results(conn, image, data, dataset, path): filename, file_extension = os.path.splitext(image.getName()) # Save the probabilities file as an image print("Saving Probabilities as zarr file attached to the original Image") name = filename + "_Probabilities_zarr.zip" desc = "ilastik probabilities from Image:%s" % image.getId() # Re-organise array from tzyxc to zctyx order expected by OMERO # data = data.swapaxes(0, 1).swapaxes(3, 4).swapaxes(2, 3).swapaxes(1, 2) namespace = "ilastik.zarr.demo" fp = os.path.join(path, name) with zarr.ZipStore(fp, mode='w') as store: zarr.array(data, store=store, dtype='int16', compressor=zarr.Blosc(cname='zstd')) ann = conn.createFileAnnfromLocalFile(fp, mimetype="application/zip", ns=namespace, desc=desc) image.linkAnnotation(ann) # Disconnect def disconnect(conn): conn.close() # main def main(): # Collect user credentials try: host = input("Host [wss://outreach.openmicroscopy.org/omero-ws]: ") or 'wss://outreach.openmicroscopy.org/omero-ws' # noqa username = input("Username [trainer-1]: ") or 'trainer-1' password = getpass("Password: ") dataset_id = input("Dataset ID [6161]: ") or '6161' # Connect to the server conn = connect(host, username, password) conn.c.enableKeepAlive(60) # path to the ilastik project ilastik_project = "../notebooks/pipelines/pixel-class-133.ilp" # Load the images in the dataset images = load_images(conn, dataset_id) new_dataset = create_dataset(conn, dataset_id) analyze(conn, images, ilastik_project, new_dataset) finally: disconnect(conn) print("done") if __name__ == "__main__": main()
36.363158
134
0.658561
import time import tempfile import tarfile import numpy import os import zarr import dask.array as da import omero.clients from omero.gateway import BlitzGateway from getpass import getpass from ilastik import app from ilastik.applets.dataSelection.opDataSelection import PreloadedArrayDatasetInfo import vigra def connect(hostname, username, password): conn = BlitzGateway(username, password, host=hostname, secure=True) conn.connect() return conn def load_images(conn, dataset_id): return conn.getObjects('Image', opts={'dataset': dataset_id}) def create_dataset(conn, dataset_id): dataset = omero.model.DatasetI() v = "ilastik_probabilities_from_dataset_%s" % dataset_id dataset.setName(omero.rtypes.rstring(v)) v = "ilatisk results probabilities from Dataset:%s" % dataset_id dataset.setDescription(omero.rtypes.rstring(v)) return conn.getUpdateService().saveAndReturnObject(dataset) def load_numpy_array(image, path, extension=".tar", resolution=0): for ann in image.listAnnotations(): if isinstance(ann, omero.gateway.FileAnnotationWrapper): name = ann.getFile().getName() ns = ann.getNs() if (name.endswith(".zip") or name.endswith(".tar")) and ns is None: file_path = os.path.join(path, name) f_path = os.path.join(path, name.strip(extension)) with open(str(file_path), 'wb') as f: for chunk in ann.getFileInChunks(): f.write(chunk) if extension == ".tar": tf = tarfile.open(file_path) tf.extractall(path) tf.close() data = zarr.open(f_path) values = data[resolution][:] values = values.swapaxes(1, 2).swapaxes(2, 3) values = values.swapaxes(3, 4) return values else: data = zarr.open(file_path) return data[:] return None def load_from_s3(image, resolution='0'): id = image.getId() endpoint_url = 'https://minio-dev.openmicroscopy.org/' root = 'idr/outreach/%s.zarr/' % id data = da.from_zarr(endpoint_url + root) values = data[:] values = values.swapaxes(1, 2).swapaxes(2, 3).swapaxes(3, 4) return numpy.asarray(values) def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0): path = tempfile.mkdtemp() if not os.path.exists(path): os.makedirs(path) os.environ["LAZYFLOW_THREADS"] = "2" os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000" args = app.parse_args([]) args.headless = True args.project = model args.readonly = True shell = app.main(args) start = time.time() for image in images: input_data = load_from_s3(image, path) print('running ilastik using %s and %s' % (model, image.getName())) data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}] shell.workflow.batchProcessingApplet.run_export(data, export_to_array=True) elapsed = time.time() - start print(elapsed) def save_results(conn, image, data, dataset, path): filename, file_extension = os.path.splitext(image.getName()) print("Saving Probabilities as zarr file attached to the original Image") name = filename + "_Probabilities_zarr.zip" desc = "ilastik probabilities from Image:%s" % image.getId() namespace = "ilastik.zarr.demo" fp = os.path.join(path, name) with zarr.ZipStore(fp, mode='w') as store: zarr.array(data, store=store, dtype='int16', compressor=zarr.Blosc(cname='zstd')) ann = conn.createFileAnnfromLocalFile(fp, mimetype="application/zip", ns=namespace, desc=desc) image.linkAnnotation(ann) def disconnect(conn): conn.close() def main(): try: host = input("Host [wss://outreach.openmicroscopy.org/omero-ws]: ") or 'wss://outreach.openmicroscopy.org/omero-ws' username = input("Username [trainer-1]: ") or 'trainer-1' password = getpass("Password: ") dataset_id = input("Dataset ID [6161]: ") or '6161' conn = connect(host, username, password) conn.c.enableKeepAlive(60) ilastik_project = "../notebooks/pipelines/pixel-class-133.ilp" images = load_images(conn, dataset_id) new_dataset = create_dataset(conn, dataset_id) analyze(conn, images, ilastik_project, new_dataset) finally: disconnect(conn) print("done") if __name__ == "__main__": main()
true
true
f70dd2d9076b8943f6493248c806248898cfa18e
49,133
py
Python
sdk/python/pulumi_aws/codedeploy/_inputs.py
alexbowers/pulumi-aws
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
[ "ECL-2.0", "Apache-2.0" ]
260
2018-06-18T14:57:00.000Z
2022-03-29T11:41:03.000Z
sdk/python/pulumi_aws/codedeploy/_inputs.py
alexbowers/pulumi-aws
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
[ "ECL-2.0", "Apache-2.0" ]
1,154
2018-06-19T20:38:20.000Z
2022-03-31T19:48:16.000Z
sdk/python/pulumi_aws/codedeploy/_inputs.py
alexbowers/pulumi-aws
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
[ "ECL-2.0", "Apache-2.0" ]
115
2018-06-28T03:20:27.000Z
2022-03-29T11:41:06.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'DeploymentConfigMinimumHealthyHostsArgs', 'DeploymentConfigTrafficRoutingConfigArgs', 'DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs', 'DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs', 'DeploymentGroupAlarmConfigurationArgs', 'DeploymentGroupAutoRollbackConfigurationArgs', 'DeploymentGroupBlueGreenDeploymentConfigArgs', 'DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs', 'DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs', 'DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs', 'DeploymentGroupDeploymentStyleArgs', 'DeploymentGroupEc2TagFilterArgs', 'DeploymentGroupEc2TagSetArgs', 'DeploymentGroupEc2TagSetEc2TagFilterArgs', 'DeploymentGroupEcsServiceArgs', 'DeploymentGroupLoadBalancerInfoArgs', 'DeploymentGroupLoadBalancerInfoElbInfoArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs', 'DeploymentGroupOnPremisesInstanceTagFilterArgs', 'DeploymentGroupTriggerConfigurationArgs', ] @pulumi.input_type class DeploymentConfigMinimumHealthyHostsArgs: def __init__(__self__, *, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] type: The type can either be `FLEET_PERCENT` or `HOST_COUNT`. :param pulumi.Input[int] value: The value when the type is `FLEET_PERCENT` represents the minimum number of healthy instances as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. When the type is `HOST_COUNT`, the value represents the minimum number of healthy instances as an absolute value. """ if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ The type can either be `FLEET_PERCENT` or `HOST_COUNT`. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[int]]: """ The value when the type is `FLEET_PERCENT` represents the minimum number of healthy instances as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. When the type is `HOST_COUNT`, the value represents the minimum number of healthy instances as an absolute value. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentConfigTrafficRoutingConfigArgs: def __init__(__self__, *, time_based_canary: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']] = None, time_based_linear: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']] = None, type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs'] time_based_canary: The time based canary configuration information. If `type` is `TimeBasedLinear`, use `time_based_linear` instead. :param pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs'] time_based_linear: The time based linear configuration information. If `type` is `TimeBasedCanary`, use `time_based_canary` instead. :param pulumi.Input[str] type: Type of traffic routing config. One of `TimeBasedCanary`, `TimeBasedLinear`, `AllAtOnce`. """ if time_based_canary is not None: pulumi.set(__self__, "time_based_canary", time_based_canary) if time_based_linear is not None: pulumi.set(__self__, "time_based_linear", time_based_linear) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="timeBasedCanary") def time_based_canary(self) -> Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']]: """ The time based canary configuration information. If `type` is `TimeBasedLinear`, use `time_based_linear` instead. """ return pulumi.get(self, "time_based_canary") @time_based_canary.setter def time_based_canary(self, value: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']]): pulumi.set(self, "time_based_canary", value) @property @pulumi.getter(name="timeBasedLinear") def time_based_linear(self) -> Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']]: """ The time based linear configuration information. If `type` is `TimeBasedCanary`, use `time_based_canary` instead. """ return pulumi.get(self, "time_based_linear") @time_based_linear.setter def time_based_linear(self, value: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']]): pulumi.set(self, "time_based_linear", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ Type of traffic routing config. One of `TimeBasedCanary`, `TimeBasedLinear`, `AllAtOnce`. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @pulumi.input_type class DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs: def __init__(__self__, *, interval: Optional[pulumi.Input[int]] = None, percentage: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] interval: The number of minutes between the first and second traffic shifts of a `TimeBasedCanary` deployment. :param pulumi.Input[int] percentage: The percentage of traffic to shift in the first increment of a `TimeBasedCanary` deployment. """ if interval is not None: pulumi.set(__self__, "interval", interval) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter def interval(self) -> Optional[pulumi.Input[int]]: """ The number of minutes between the first and second traffic shifts of a `TimeBasedCanary` deployment. """ return pulumi.get(self, "interval") @interval.setter def interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "interval", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[int]]: """ The percentage of traffic to shift in the first increment of a `TimeBasedCanary` deployment. """ return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "percentage", value) @pulumi.input_type class DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs: def __init__(__self__, *, interval: Optional[pulumi.Input[int]] = None, percentage: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] interval: The number of minutes between each incremental traffic shift of a `TimeBasedLinear` deployment. :param pulumi.Input[int] percentage: The percentage of traffic that is shifted at the start of each increment of a `TimeBasedLinear` deployment. """ if interval is not None: pulumi.set(__self__, "interval", interval) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter def interval(self) -> Optional[pulumi.Input[int]]: """ The number of minutes between each incremental traffic shift of a `TimeBasedLinear` deployment. """ return pulumi.get(self, "interval") @interval.setter def interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "interval", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[int]]: """ The percentage of traffic that is shifted at the start of each increment of a `TimeBasedLinear` deployment. """ return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "percentage", value) @pulumi.input_type class DeploymentGroupAlarmConfigurationArgs: def __init__(__self__, *, alarms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, enabled: Optional[pulumi.Input[bool]] = None, ignore_poll_alarm_failure: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] alarms: A list of alarms configured for the deployment group. _A maximum of 10 alarms can be added to a deployment group_. :param pulumi.Input[bool] enabled: Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. :param pulumi.Input[bool] ignore_poll_alarm_failure: Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is `false`. * `true`: The deployment will proceed even if alarm status information can't be retrieved. * `false`: The deployment will stop if alarm status information can't be retrieved. """ if alarms is not None: pulumi.set(__self__, "alarms", alarms) if enabled is not None: pulumi.set(__self__, "enabled", enabled) if ignore_poll_alarm_failure is not None: pulumi.set(__self__, "ignore_poll_alarm_failure", ignore_poll_alarm_failure) @property @pulumi.getter def alarms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of alarms configured for the deployment group. _A maximum of 10 alarms can be added to a deployment group_. """ return pulumi.get(self, "alarms") @alarms.setter def alarms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "alarms", value) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. """ return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) @property @pulumi.getter(name="ignorePollAlarmFailure") def ignore_poll_alarm_failure(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is `false`. * `true`: The deployment will proceed even if alarm status information can't be retrieved. * `false`: The deployment will stop if alarm status information can't be retrieved. """ return pulumi.get(self, "ignore_poll_alarm_failure") @ignore_poll_alarm_failure.setter def ignore_poll_alarm_failure(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_poll_alarm_failure", value) @pulumi.input_type class DeploymentGroupAutoRollbackConfigurationArgs: def __init__(__self__, *, enabled: Optional[pulumi.Input[bool]] = None, events: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[bool] enabled: Indicates whether a defined automatic rollback configuration is currently enabled for this Deployment Group. If you enable automatic rollback, you must specify at least one event type. :param pulumi.Input[Sequence[pulumi.Input[str]]] events: The event type or types that trigger a rollback. Supported types are `DEPLOYMENT_FAILURE` and `DEPLOYMENT_STOP_ON_ALARM`. """ if enabled is not None: pulumi.set(__self__, "enabled", enabled) if events is not None: pulumi.set(__self__, "events", events) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether a defined automatic rollback configuration is currently enabled for this Deployment Group. If you enable automatic rollback, you must specify at least one event type. """ return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) @property @pulumi.getter def events(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The event type or types that trigger a rollback. Supported types are `DEPLOYMENT_FAILURE` and `DEPLOYMENT_STOP_ON_ALARM`. """ return pulumi.get(self, "events") @events.setter def events(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "events", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigArgs: def __init__(__self__, *, deployment_ready_option: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']] = None, green_fleet_provisioning_option: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']] = None, terminate_blue_instances_on_deployment_success: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']] = None): """ :param pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs'] deployment_ready_option: Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below). :param pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs'] green_fleet_provisioning_option: Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below). :param pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs'] terminate_blue_instances_on_deployment_success: Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below). """ if deployment_ready_option is not None: pulumi.set(__self__, "deployment_ready_option", deployment_ready_option) if green_fleet_provisioning_option is not None: pulumi.set(__self__, "green_fleet_provisioning_option", green_fleet_provisioning_option) if terminate_blue_instances_on_deployment_success is not None: pulumi.set(__self__, "terminate_blue_instances_on_deployment_success", terminate_blue_instances_on_deployment_success) @property @pulumi.getter(name="deploymentReadyOption") def deployment_ready_option(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']]: """ Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below). """ return pulumi.get(self, "deployment_ready_option") @deployment_ready_option.setter def deployment_ready_option(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']]): pulumi.set(self, "deployment_ready_option", value) @property @pulumi.getter(name="greenFleetProvisioningOption") def green_fleet_provisioning_option(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']]: """ Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below). """ return pulumi.get(self, "green_fleet_provisioning_option") @green_fleet_provisioning_option.setter def green_fleet_provisioning_option(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']]): pulumi.set(self, "green_fleet_provisioning_option", value) @property @pulumi.getter(name="terminateBlueInstancesOnDeploymentSuccess") def terminate_blue_instances_on_deployment_success(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']]: """ Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below). """ return pulumi.get(self, "terminate_blue_instances_on_deployment_success") @terminate_blue_instances_on_deployment_success.setter def terminate_blue_instances_on_deployment_success(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']]): pulumi.set(self, "terminate_blue_instances_on_deployment_success", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs: def __init__(__self__, *, action_on_timeout: Optional[pulumi.Input[str]] = None, wait_time_in_minutes: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] action_on_timeout: When to reroute traffic from an original environment to a replacement environment in a blue/green deployment. * `CONTINUE_DEPLOYMENT`: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment. * `STOP_DEPLOYMENT`: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped. :param pulumi.Input[int] wait_time_in_minutes: The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the `STOP_DEPLOYMENT` option for `action_on_timeout`. """ if action_on_timeout is not None: pulumi.set(__self__, "action_on_timeout", action_on_timeout) if wait_time_in_minutes is not None: pulumi.set(__self__, "wait_time_in_minutes", wait_time_in_minutes) @property @pulumi.getter(name="actionOnTimeout") def action_on_timeout(self) -> Optional[pulumi.Input[str]]: """ When to reroute traffic from an original environment to a replacement environment in a blue/green deployment. * `CONTINUE_DEPLOYMENT`: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment. * `STOP_DEPLOYMENT`: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped. """ return pulumi.get(self, "action_on_timeout") @action_on_timeout.setter def action_on_timeout(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action_on_timeout", value) @property @pulumi.getter(name="waitTimeInMinutes") def wait_time_in_minutes(self) -> Optional[pulumi.Input[int]]: """ The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the `STOP_DEPLOYMENT` option for `action_on_timeout`. """ return pulumi.get(self, "wait_time_in_minutes") @wait_time_in_minutes.setter def wait_time_in_minutes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "wait_time_in_minutes", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] action: The method used to add instances to a replacement environment. * `DISCOVER_EXISTING`: Use instances that already exist or will be created manually. * `COPY_AUTO_SCALING_GROUP`: Use settings from a specified **Auto Scaling** group to define and create instances in a new Auto Scaling group. _Exactly one Auto Scaling group must be specified_ when selecting `COPY_AUTO_SCALING_GROUP`. Use `autoscaling_groups` to specify the Auto Scaling group. """ if action is not None: pulumi.set(__self__, "action", action) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ The method used to add instances to a replacement environment. * `DISCOVER_EXISTING`: Use instances that already exist or will be created manually. * `COPY_AUTO_SCALING_GROUP`: Use settings from a specified **Auto Scaling** group to define and create instances in a new Auto Scaling group. _Exactly one Auto Scaling group must be specified_ when selecting `COPY_AUTO_SCALING_GROUP`. Use `autoscaling_groups` to specify the Auto Scaling group. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None, termination_wait_time_in_minutes: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] action: The action to take on instances in the original environment after a successful blue/green deployment. * `TERMINATE`: Instances are terminated after a specified wait time. * `KEEP_ALIVE`: Instances are left running after they are deregistered from the load balancer and removed from the deployment group. :param pulumi.Input[int] termination_wait_time_in_minutes: The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. """ if action is not None: pulumi.set(__self__, "action", action) if termination_wait_time_in_minutes is not None: pulumi.set(__self__, "termination_wait_time_in_minutes", termination_wait_time_in_minutes) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ The action to take on instances in the original environment after a successful blue/green deployment. * `TERMINATE`: Instances are terminated after a specified wait time. * `KEEP_ALIVE`: Instances are left running after they are deregistered from the load balancer and removed from the deployment group. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter(name="terminationWaitTimeInMinutes") def termination_wait_time_in_minutes(self) -> Optional[pulumi.Input[int]]: """ The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. """ return pulumi.get(self, "termination_wait_time_in_minutes") @termination_wait_time_in_minutes.setter def termination_wait_time_in_minutes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "termination_wait_time_in_minutes", value) @pulumi.input_type class DeploymentGroupDeploymentStyleArgs: def __init__(__self__, *, deployment_option: Optional[pulumi.Input[str]] = None, deployment_type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] deployment_option: Indicates whether to route deployment traffic behind a load balancer. Valid Values are `WITH_TRAFFIC_CONTROL` or `WITHOUT_TRAFFIC_CONTROL`. Default is `WITHOUT_TRAFFIC_CONTROL`. :param pulumi.Input[str] deployment_type: Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are `IN_PLACE` or `BLUE_GREEN`. Default is `IN_PLACE`. """ if deployment_option is not None: pulumi.set(__self__, "deployment_option", deployment_option) if deployment_type is not None: pulumi.set(__self__, "deployment_type", deployment_type) @property @pulumi.getter(name="deploymentOption") def deployment_option(self) -> Optional[pulumi.Input[str]]: """ Indicates whether to route deployment traffic behind a load balancer. Valid Values are `WITH_TRAFFIC_CONTROL` or `WITHOUT_TRAFFIC_CONTROL`. Default is `WITHOUT_TRAFFIC_CONTROL`. """ return pulumi.get(self, "deployment_option") @deployment_option.setter def deployment_option(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deployment_option", value) @property @pulumi.getter(name="deploymentType") def deployment_type(self) -> Optional[pulumi.Input[str]]: """ Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are `IN_PLACE` or `BLUE_GREEN`. Default is `IN_PLACE`. """ return pulumi.get(self, "deployment_type") @deployment_type.setter def deployment_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deployment_type", value) @pulumi.input_type class DeploymentGroupEc2TagFilterArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] key: The key of the tag filter. :param pulumi.Input[str] type: The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`. :param pulumi.Input[str] value: The value of the tag filter. """ if key is not None: pulumi.set(__self__, "key", key) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ The key of the tag filter. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The value of the tag filter. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentGroupEc2TagSetArgs: def __init__(__self__, *, ec2_tag_filters: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]] ec2_tag_filters: Tag filters associated with the deployment group. See the AWS docs for details. """ if ec2_tag_filters is not None: pulumi.set(__self__, "ec2_tag_filters", ec2_tag_filters) @property @pulumi.getter(name="ec2TagFilters") def ec2_tag_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]]: """ Tag filters associated with the deployment group. See the AWS docs for details. """ return pulumi.get(self, "ec2_tag_filters") @ec2_tag_filters.setter def ec2_tag_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]]): pulumi.set(self, "ec2_tag_filters", value) @pulumi.input_type class DeploymentGroupEc2TagSetEc2TagFilterArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] key: The key of the tag filter. :param pulumi.Input[str] type: The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`. :param pulumi.Input[str] value: The value of the tag filter. """ if key is not None: pulumi.set(__self__, "key", key) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ The key of the tag filter. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The value of the tag filter. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentGroupEcsServiceArgs: def __init__(__self__, *, cluster_name: pulumi.Input[str], service_name: pulumi.Input[str]): """ :param pulumi.Input[str] cluster_name: The name of the ECS cluster. :param pulumi.Input[str] service_name: The name of the ECS service. """ pulumi.set(__self__, "cluster_name", cluster_name) pulumi.set(__self__, "service_name", service_name) @property @pulumi.getter(name="clusterName") def cluster_name(self) -> pulumi.Input[str]: """ The name of the ECS cluster. """ return pulumi.get(self, "cluster_name") @cluster_name.setter def cluster_name(self, value: pulumi.Input[str]): pulumi.set(self, "cluster_name", value) @property @pulumi.getter(name="serviceName") def service_name(self) -> pulumi.Input[str]: """ The name of the ECS service. """ return pulumi.get(self, "service_name") @service_name.setter def service_name(self, value: pulumi.Input[str]): pulumi.set(self, "service_name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoArgs: def __init__(__self__, *, elb_infos: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]] = None, target_group_infos: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]] = None, target_group_pair_info: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']] = None): """ :param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]] elb_infos: The Classic Elastic Load Balancer to use in a deployment. Conflicts with `target_group_info` and `target_group_pair_info`. :param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]] target_group_infos: The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with `elb_info` and `target_group_pair_info`. :param pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs'] target_group_pair_info: The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with `elb_info` and `target_group_info`. """ if elb_infos is not None: pulumi.set(__self__, "elb_infos", elb_infos) if target_group_infos is not None: pulumi.set(__self__, "target_group_infos", target_group_infos) if target_group_pair_info is not None: pulumi.set(__self__, "target_group_pair_info", target_group_pair_info) @property @pulumi.getter(name="elbInfos") def elb_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]]: """ The Classic Elastic Load Balancer to use in a deployment. Conflicts with `target_group_info` and `target_group_pair_info`. """ return pulumi.get(self, "elb_infos") @elb_infos.setter def elb_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]]): pulumi.set(self, "elb_infos", value) @property @pulumi.getter(name="targetGroupInfos") def target_group_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]]: """ The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with `elb_info` and `target_group_pair_info`. """ return pulumi.get(self, "target_group_infos") @target_group_infos.setter def target_group_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]]): pulumi.set(self, "target_group_infos", value) @property @pulumi.getter(name="targetGroupPairInfo") def target_group_pair_info(self) -> Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']]: """ The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with `elb_info` and `target_group_info`. """ return pulumi.get(self, "target_group_pair_info") @target_group_pair_info.setter def target_group_pair_info(self, value: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']]): pulumi.set(self, "target_group_pair_info", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoElbInfoArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: The name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. """ if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. """ if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs: def __init__(__self__, *, prod_traffic_route: pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs'], target_groups: pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]], test_traffic_route: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']] = None): """ :param pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs'] prod_traffic_route: Configuration block for the production traffic route (documented below). :param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]] target_groups: Configuration blocks for a target group within a target group pair (documented below). :param pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs'] test_traffic_route: Configuration block for the test traffic route (documented below). """ pulumi.set(__self__, "prod_traffic_route", prod_traffic_route) pulumi.set(__self__, "target_groups", target_groups) if test_traffic_route is not None: pulumi.set(__self__, "test_traffic_route", test_traffic_route) @property @pulumi.getter(name="prodTrafficRoute") def prod_traffic_route(self) -> pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs']: """ Configuration block for the production traffic route (documented below). """ return pulumi.get(self, "prod_traffic_route") @prod_traffic_route.setter def prod_traffic_route(self, value: pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs']): pulumi.set(self, "prod_traffic_route", value) @property @pulumi.getter(name="targetGroups") def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]]: """ Configuration blocks for a target group within a target group pair (documented below). """ return pulumi.get(self, "target_groups") @target_groups.setter def target_groups(self, value: pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]]): pulumi.set(self, "target_groups", value) @property @pulumi.getter(name="testTrafficRoute") def test_traffic_route(self) -> Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']]: """ Configuration block for the test traffic route (documented below). """ return pulumi.get(self, "test_traffic_route") @test_traffic_route.setter def test_traffic_route(self, value: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']]): pulumi.set(self, "test_traffic_route", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs: def __init__(__self__, *, listener_arns: pulumi.Input[Sequence[pulumi.Input[str]]]): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] listener_arns: List of Amazon Resource Names (ARNs) of the load balancer listeners. """ pulumi.set(__self__, "listener_arns", listener_arns) @property @pulumi.getter(name="listenerArns") def listener_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: """ List of Amazon Resource Names (ARNs) of the load balancer listeners. """ return pulumi.get(self, "listener_arns") @listener_arns.setter def listener_arns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "listener_arns", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs: def __init__(__self__, *, name: pulumi.Input[str]): """ :param pulumi.Input[str] name: Name of the target group. """ pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Name of the target group. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs: def __init__(__self__, *, listener_arns: pulumi.Input[Sequence[pulumi.Input[str]]]): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] listener_arns: List of Amazon Resource Names (ARNs) of the load balancer listeners. """ pulumi.set(__self__, "listener_arns", listener_arns) @property @pulumi.getter(name="listenerArns") def listener_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: """ List of Amazon Resource Names (ARNs) of the load balancer listeners. """ return pulumi.get(self, "listener_arns") @listener_arns.setter def listener_arns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "listener_arns", value) @pulumi.input_type class DeploymentGroupOnPremisesInstanceTagFilterArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] key: The key of the tag filter. :param pulumi.Input[str] type: The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`. :param pulumi.Input[str] value: The value of the tag filter. """ if key is not None: pulumi.set(__self__, "key", key) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ The key of the tag filter. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The value of the tag filter. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentGroupTriggerConfigurationArgs: def __init__(__self__, *, trigger_events: pulumi.Input[Sequence[pulumi.Input[str]]], trigger_name: pulumi.Input[str], trigger_target_arn: pulumi.Input[str]): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] trigger_events: The event type or types for which notifications are triggered. Some values that are supported: `DeploymentStart`, `DeploymentSuccess`, `DeploymentFailure`, `DeploymentStop`, `DeploymentRollback`, `InstanceStart`, `InstanceSuccess`, `InstanceFailure`. See [the CodeDeploy documentation](http://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-sns-event-notifications-create-trigger.html) for all possible values. :param pulumi.Input[str] trigger_name: The name of the notification trigger. :param pulumi.Input[str] trigger_target_arn: The ARN of the SNS topic through which notifications are sent. """ pulumi.set(__self__, "trigger_events", trigger_events) pulumi.set(__self__, "trigger_name", trigger_name) pulumi.set(__self__, "trigger_target_arn", trigger_target_arn) @property @pulumi.getter(name="triggerEvents") def trigger_events(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: """ The event type or types for which notifications are triggered. Some values that are supported: `DeploymentStart`, `DeploymentSuccess`, `DeploymentFailure`, `DeploymentStop`, `DeploymentRollback`, `InstanceStart`, `InstanceSuccess`, `InstanceFailure`. See [the CodeDeploy documentation](http://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-sns-event-notifications-create-trigger.html) for all possible values. """ return pulumi.get(self, "trigger_events") @trigger_events.setter def trigger_events(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "trigger_events", value) @property @pulumi.getter(name="triggerName") def trigger_name(self) -> pulumi.Input[str]: """ The name of the notification trigger. """ return pulumi.get(self, "trigger_name") @trigger_name.setter def trigger_name(self, value: pulumi.Input[str]): pulumi.set(self, "trigger_name", value) @property @pulumi.getter(name="triggerTargetArn") def trigger_target_arn(self) -> pulumi.Input[str]: """ The ARN of the SNS topic through which notifications are sent. """ return pulumi.get(self, "trigger_target_arn") @trigger_target_arn.setter def trigger_target_arn(self, value: pulumi.Input[str]): pulumi.set(self, "trigger_target_arn", value)
48.264244
495
0.701708
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'DeploymentConfigMinimumHealthyHostsArgs', 'DeploymentConfigTrafficRoutingConfigArgs', 'DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs', 'DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs', 'DeploymentGroupAlarmConfigurationArgs', 'DeploymentGroupAutoRollbackConfigurationArgs', 'DeploymentGroupBlueGreenDeploymentConfigArgs', 'DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs', 'DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs', 'DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs', 'DeploymentGroupDeploymentStyleArgs', 'DeploymentGroupEc2TagFilterArgs', 'DeploymentGroupEc2TagSetArgs', 'DeploymentGroupEc2TagSetEc2TagFilterArgs', 'DeploymentGroupEcsServiceArgs', 'DeploymentGroupLoadBalancerInfoArgs', 'DeploymentGroupLoadBalancerInfoElbInfoArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs', 'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs', 'DeploymentGroupOnPremisesInstanceTagFilterArgs', 'DeploymentGroupTriggerConfigurationArgs', ] @pulumi.input_type class DeploymentConfigMinimumHealthyHostsArgs: def __init__(__self__, *, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[int]] = None): if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentConfigTrafficRoutingConfigArgs: def __init__(__self__, *, time_based_canary: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']] = None, time_based_linear: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']] = None, type: Optional[pulumi.Input[str]] = None): if time_based_canary is not None: pulumi.set(__self__, "time_based_canary", time_based_canary) if time_based_linear is not None: pulumi.set(__self__, "time_based_linear", time_based_linear) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="timeBasedCanary") def time_based_canary(self) -> Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']]: return pulumi.get(self, "time_based_canary") @time_based_canary.setter def time_based_canary(self, value: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']]): pulumi.set(self, "time_based_canary", value) @property @pulumi.getter(name="timeBasedLinear") def time_based_linear(self) -> Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']]: return pulumi.get(self, "time_based_linear") @time_based_linear.setter def time_based_linear(self, value: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']]): pulumi.set(self, "time_based_linear", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @pulumi.input_type class DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs: def __init__(__self__, *, interval: Optional[pulumi.Input[int]] = None, percentage: Optional[pulumi.Input[int]] = None): if interval is not None: pulumi.set(__self__, "interval", interval) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter def interval(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "interval") @interval.setter def interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "interval", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "percentage", value) @pulumi.input_type class DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs: def __init__(__self__, *, interval: Optional[pulumi.Input[int]] = None, percentage: Optional[pulumi.Input[int]] = None): if interval is not None: pulumi.set(__self__, "interval", interval) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter def interval(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "interval") @interval.setter def interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "interval", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "percentage", value) @pulumi.input_type class DeploymentGroupAlarmConfigurationArgs: def __init__(__self__, *, alarms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, enabled: Optional[pulumi.Input[bool]] = None, ignore_poll_alarm_failure: Optional[pulumi.Input[bool]] = None): if alarms is not None: pulumi.set(__self__, "alarms", alarms) if enabled is not None: pulumi.set(__self__, "enabled", enabled) if ignore_poll_alarm_failure is not None: pulumi.set(__self__, "ignore_poll_alarm_failure", ignore_poll_alarm_failure) @property @pulumi.getter def alarms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "alarms") @alarms.setter def alarms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "alarms", value) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) @property @pulumi.getter(name="ignorePollAlarmFailure") def ignore_poll_alarm_failure(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "ignore_poll_alarm_failure") @ignore_poll_alarm_failure.setter def ignore_poll_alarm_failure(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_poll_alarm_failure", value) @pulumi.input_type class DeploymentGroupAutoRollbackConfigurationArgs: def __init__(__self__, *, enabled: Optional[pulumi.Input[bool]] = None, events: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): if enabled is not None: pulumi.set(__self__, "enabled", enabled) if events is not None: pulumi.set(__self__, "events", events) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) @property @pulumi.getter def events(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "events") @events.setter def events(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "events", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigArgs: def __init__(__self__, *, deployment_ready_option: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']] = None, green_fleet_provisioning_option: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']] = None, terminate_blue_instances_on_deployment_success: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']] = None): if deployment_ready_option is not None: pulumi.set(__self__, "deployment_ready_option", deployment_ready_option) if green_fleet_provisioning_option is not None: pulumi.set(__self__, "green_fleet_provisioning_option", green_fleet_provisioning_option) if terminate_blue_instances_on_deployment_success is not None: pulumi.set(__self__, "terminate_blue_instances_on_deployment_success", terminate_blue_instances_on_deployment_success) @property @pulumi.getter(name="deploymentReadyOption") def deployment_ready_option(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']]: return pulumi.get(self, "deployment_ready_option") @deployment_ready_option.setter def deployment_ready_option(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']]): pulumi.set(self, "deployment_ready_option", value) @property @pulumi.getter(name="greenFleetProvisioningOption") def green_fleet_provisioning_option(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']]: return pulumi.get(self, "green_fleet_provisioning_option") @green_fleet_provisioning_option.setter def green_fleet_provisioning_option(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']]): pulumi.set(self, "green_fleet_provisioning_option", value) @property @pulumi.getter(name="terminateBlueInstancesOnDeploymentSuccess") def terminate_blue_instances_on_deployment_success(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']]: return pulumi.get(self, "terminate_blue_instances_on_deployment_success") @terminate_blue_instances_on_deployment_success.setter def terminate_blue_instances_on_deployment_success(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']]): pulumi.set(self, "terminate_blue_instances_on_deployment_success", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs: def __init__(__self__, *, action_on_timeout: Optional[pulumi.Input[str]] = None, wait_time_in_minutes: Optional[pulumi.Input[int]] = None): if action_on_timeout is not None: pulumi.set(__self__, "action_on_timeout", action_on_timeout) if wait_time_in_minutes is not None: pulumi.set(__self__, "wait_time_in_minutes", wait_time_in_minutes) @property @pulumi.getter(name="actionOnTimeout") def action_on_timeout(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "action_on_timeout") @action_on_timeout.setter def action_on_timeout(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action_on_timeout", value) @property @pulumi.getter(name="waitTimeInMinutes") def wait_time_in_minutes(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "wait_time_in_minutes") @wait_time_in_minutes.setter def wait_time_in_minutes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "wait_time_in_minutes", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None): if action is not None: pulumi.set(__self__, "action", action) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @pulumi.input_type class DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None, termination_wait_time_in_minutes: Optional[pulumi.Input[int]] = None): if action is not None: pulumi.set(__self__, "action", action) if termination_wait_time_in_minutes is not None: pulumi.set(__self__, "termination_wait_time_in_minutes", termination_wait_time_in_minutes) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter(name="terminationWaitTimeInMinutes") def termination_wait_time_in_minutes(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "termination_wait_time_in_minutes") @termination_wait_time_in_minutes.setter def termination_wait_time_in_minutes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "termination_wait_time_in_minutes", value) @pulumi.input_type class DeploymentGroupDeploymentStyleArgs: def __init__(__self__, *, deployment_option: Optional[pulumi.Input[str]] = None, deployment_type: Optional[pulumi.Input[str]] = None): if deployment_option is not None: pulumi.set(__self__, "deployment_option", deployment_option) if deployment_type is not None: pulumi.set(__self__, "deployment_type", deployment_type) @property @pulumi.getter(name="deploymentOption") def deployment_option(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "deployment_option") @deployment_option.setter def deployment_option(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deployment_option", value) @property @pulumi.getter(name="deploymentType") def deployment_type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "deployment_type") @deployment_type.setter def deployment_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deployment_type", value) @pulumi.input_type class DeploymentGroupEc2TagFilterArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): if key is not None: pulumi.set(__self__, "key", key) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentGroupEc2TagSetArgs: def __init__(__self__, *, ec2_tag_filters: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]] = None): if ec2_tag_filters is not None: pulumi.set(__self__, "ec2_tag_filters", ec2_tag_filters) @property @pulumi.getter(name="ec2TagFilters") def ec2_tag_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]]: return pulumi.get(self, "ec2_tag_filters") @ec2_tag_filters.setter def ec2_tag_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]]): pulumi.set(self, "ec2_tag_filters", value) @pulumi.input_type class DeploymentGroupEc2TagSetEc2TagFilterArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): if key is not None: pulumi.set(__self__, "key", key) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentGroupEcsServiceArgs: def __init__(__self__, *, cluster_name: pulumi.Input[str], service_name: pulumi.Input[str]): pulumi.set(__self__, "cluster_name", cluster_name) pulumi.set(__self__, "service_name", service_name) @property @pulumi.getter(name="clusterName") def cluster_name(self) -> pulumi.Input[str]: return pulumi.get(self, "cluster_name") @cluster_name.setter def cluster_name(self, value: pulumi.Input[str]): pulumi.set(self, "cluster_name", value) @property @pulumi.getter(name="serviceName") def service_name(self) -> pulumi.Input[str]: return pulumi.get(self, "service_name") @service_name.setter def service_name(self, value: pulumi.Input[str]): pulumi.set(self, "service_name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoArgs: def __init__(__self__, *, elb_infos: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]] = None, target_group_infos: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]] = None, target_group_pair_info: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']] = None): if elb_infos is not None: pulumi.set(__self__, "elb_infos", elb_infos) if target_group_infos is not None: pulumi.set(__self__, "target_group_infos", target_group_infos) if target_group_pair_info is not None: pulumi.set(__self__, "target_group_pair_info", target_group_pair_info) @property @pulumi.getter(name="elbInfos") def elb_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]]: return pulumi.get(self, "elb_infos") @elb_infos.setter def elb_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]]): pulumi.set(self, "elb_infos", value) @property @pulumi.getter(name="targetGroupInfos") def target_group_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]]: return pulumi.get(self, "target_group_infos") @target_group_infos.setter def target_group_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]]): pulumi.set(self, "target_group_infos", value) @property @pulumi.getter(name="targetGroupPairInfo") def target_group_pair_info(self) -> Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']]: return pulumi.get(self, "target_group_pair_info") @target_group_pair_info.setter def target_group_pair_info(self, value: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']]): pulumi.set(self, "target_group_pair_info", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoElbInfoArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None): if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None): if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs: def __init__(__self__, *, prod_traffic_route: pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs'], target_groups: pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]], test_traffic_route: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']] = None): pulumi.set(__self__, "prod_traffic_route", prod_traffic_route) pulumi.set(__self__, "target_groups", target_groups) if test_traffic_route is not None: pulumi.set(__self__, "test_traffic_route", test_traffic_route) @property @pulumi.getter(name="prodTrafficRoute") def prod_traffic_route(self) -> pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs']: return pulumi.get(self, "prod_traffic_route") @prod_traffic_route.setter def prod_traffic_route(self, value: pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs']): pulumi.set(self, "prod_traffic_route", value) @property @pulumi.getter(name="targetGroups") def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]]: return pulumi.get(self, "target_groups") @target_groups.setter def target_groups(self, value: pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]]): pulumi.set(self, "target_groups", value) @property @pulumi.getter(name="testTrafficRoute") def test_traffic_route(self) -> Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']]: return pulumi.get(self, "test_traffic_route") @test_traffic_route.setter def test_traffic_route(self, value: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']]): pulumi.set(self, "test_traffic_route", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs: def __init__(__self__, *, listener_arns: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(__self__, "listener_arns", listener_arns) @property @pulumi.getter(name="listenerArns") def listener_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: return pulumi.get(self, "listener_arns") @listener_arns.setter def listener_arns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "listener_arns", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs: def __init__(__self__, *, name: pulumi.Input[str]): pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> pulumi.Input[str]: return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @pulumi.input_type class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs: def __init__(__self__, *, listener_arns: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(__self__, "listener_arns", listener_arns) @property @pulumi.getter(name="listenerArns") def listener_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: return pulumi.get(self, "listener_arns") @listener_arns.setter def listener_arns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "listener_arns", value) @pulumi.input_type class DeploymentGroupOnPremisesInstanceTagFilterArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): if key is not None: pulumi.set(__self__, "key", key) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class DeploymentGroupTriggerConfigurationArgs: def __init__(__self__, *, trigger_events: pulumi.Input[Sequence[pulumi.Input[str]]], trigger_name: pulumi.Input[str], trigger_target_arn: pulumi.Input[str]): pulumi.set(__self__, "trigger_events", trigger_events) pulumi.set(__self__, "trigger_name", trigger_name) pulumi.set(__self__, "trigger_target_arn", trigger_target_arn) @property @pulumi.getter(name="triggerEvents") def trigger_events(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: return pulumi.get(self, "trigger_events") @trigger_events.setter def trigger_events(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "trigger_events", value) @property @pulumi.getter(name="triggerName") def trigger_name(self) -> pulumi.Input[str]: return pulumi.get(self, "trigger_name") @trigger_name.setter def trigger_name(self, value: pulumi.Input[str]): pulumi.set(self, "trigger_name", value) @property @pulumi.getter(name="triggerTargetArn") def trigger_target_arn(self) -> pulumi.Input[str]: return pulumi.get(self, "trigger_target_arn") @trigger_target_arn.setter def trigger_target_arn(self, value: pulumi.Input[str]): pulumi.set(self, "trigger_target_arn", value)
true
true
f70dd315e27de4268e1123bfe38e0b2334754b73
696
py
Python
securityheaders/checkers/csp/roonlychecker.py
th3cyb3rc0p/securityheaders
941264be581dc01afe28f6416f2d7bed79aecfb3
[ "Apache-2.0" ]
151
2018-07-29T22:34:43.000Z
2022-03-22T05:08:27.000Z
securityheaders/checkers/csp/roonlychecker.py
th3cyb3rc0p/securityheaders
941264be581dc01afe28f6416f2d7bed79aecfb3
[ "Apache-2.0" ]
5
2019-04-24T07:31:36.000Z
2021-04-15T14:31:23.000Z
securityheaders/checkers/csp/roonlychecker.py
th3cyb3rc0p/securityheaders
941264be581dc01afe28f6416f2d7bed79aecfb3
[ "Apache-2.0" ]
42
2018-07-31T08:18:59.000Z
2022-03-28T08:18:32.000Z
from .checker import CSPChecker from .checkerro import CSPReportOnlyChecker from securityheaders.checkers import Finding,FindingType,FindingSeverity class CSPReportOnlyNoCSPChecker(CSPReportOnlyChecker, CSPChecker): def check(self, headers, opt_options=dict()): rocsp = CSPReportOnlyChecker.getcsp(self,headers) csp = CSPChecker.getcsp(self,headers) if not csp and rocsp: description = "The CSP is not enforced as only the content-security-policy-report-only header is present. Can you set the content-security-policy?" return [Finding(rocsp.headerkey, FindingType.REPORT_ONLY,description,FindingSeverity.INFO, None, None)] return []
49.714286
159
0.75431
from .checker import CSPChecker from .checkerro import CSPReportOnlyChecker from securityheaders.checkers import Finding,FindingType,FindingSeverity class CSPReportOnlyNoCSPChecker(CSPReportOnlyChecker, CSPChecker): def check(self, headers, opt_options=dict()): rocsp = CSPReportOnlyChecker.getcsp(self,headers) csp = CSPChecker.getcsp(self,headers) if not csp and rocsp: description = "The CSP is not enforced as only the content-security-policy-report-only header is present. Can you set the content-security-policy?" return [Finding(rocsp.headerkey, FindingType.REPORT_ONLY,description,FindingSeverity.INFO, None, None)] return []
true
true
f70dd473c6c9cff52add68fbc53b82b52967f71d
1,955
py
Python
corehq/apps/linked_domain/applications.py
rochakchauhan/commcare-hq
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
[ "BSD-3-Clause" ]
null
null
null
corehq/apps/linked_domain/applications.py
rochakchauhan/commcare-hq
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
[ "BSD-3-Clause" ]
null
null
null
corehq/apps/linked_domain/applications.py
rochakchauhan/commcare-hq
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
[ "BSD-3-Clause" ]
null
null
null
from corehq.apps.app_manager.dbaccessors import ( get_brief_apps_in_domain, get_latest_released_app, get_latest_released_app_versions_by_app_id, ) from corehq.apps.linked_domain.models import DomainLink from corehq.apps.linked_domain.remote_accessors import ( get_brief_apps, get_latest_released_versions_by_app_id, get_released_app, ) def get_master_app_briefs(domain_link, family_id): if domain_link.is_remote: apps = get_brief_apps(domain_link) else: apps = get_brief_apps_in_domain(domain_link.master_domain, include_remote=False) # Ignore deleted, linked and remote apps return [app for app in apps if family_id in [app._id, app.family_id] and app.doc_type == 'Application'] def get_latest_master_app_release(domain_link, app_id): master_domain = domain_link.master_domain linked_domain = domain_link.linked_domain if domain_link.is_remote: return get_released_app(master_domain, app_id, linked_domain, domain_link.remote_details) else: return get_latest_released_app(master_domain, app_id) def get_latest_master_releases_versions(domain_link): if domain_link.is_remote: return get_latest_released_versions_by_app_id(domain_link) else: return get_latest_released_app_versions_by_app_id(domain_link.master_domain) def create_linked_app(master_domain, master_id, target_domain, target_name, remote_details=None): from corehq.apps.app_manager.models import LinkedApplication linked_app = LinkedApplication( name=target_name, domain=target_domain, ) return link_app(linked_app, master_domain, master_id, remote_details) def link_app(linked_app, master_domain, master_id, remote_details=None): DomainLink.link_domains(linked_app.domain, master_domain, remote_details) linked_app.family_id = master_id linked_app.doc_type = 'LinkedApplication' linked_app.save() return linked_app
34.910714
107
0.783632
from corehq.apps.app_manager.dbaccessors import ( get_brief_apps_in_domain, get_latest_released_app, get_latest_released_app_versions_by_app_id, ) from corehq.apps.linked_domain.models import DomainLink from corehq.apps.linked_domain.remote_accessors import ( get_brief_apps, get_latest_released_versions_by_app_id, get_released_app, ) def get_master_app_briefs(domain_link, family_id): if domain_link.is_remote: apps = get_brief_apps(domain_link) else: apps = get_brief_apps_in_domain(domain_link.master_domain, include_remote=False) return [app for app in apps if family_id in [app._id, app.family_id] and app.doc_type == 'Application'] def get_latest_master_app_release(domain_link, app_id): master_domain = domain_link.master_domain linked_domain = domain_link.linked_domain if domain_link.is_remote: return get_released_app(master_domain, app_id, linked_domain, domain_link.remote_details) else: return get_latest_released_app(master_domain, app_id) def get_latest_master_releases_versions(domain_link): if domain_link.is_remote: return get_latest_released_versions_by_app_id(domain_link) else: return get_latest_released_app_versions_by_app_id(domain_link.master_domain) def create_linked_app(master_domain, master_id, target_domain, target_name, remote_details=None): from corehq.apps.app_manager.models import LinkedApplication linked_app = LinkedApplication( name=target_name, domain=target_domain, ) return link_app(linked_app, master_domain, master_id, remote_details) def link_app(linked_app, master_domain, master_id, remote_details=None): DomainLink.link_domains(linked_app.domain, master_domain, remote_details) linked_app.family_id = master_id linked_app.doc_type = 'LinkedApplication' linked_app.save() return linked_app
true
true
f70dd6369cf11d959d98254bff5293d783855f79
4,442
py
Python
examples/example_shortTermExtreme_2.py
ryancoe/WDRT
039d53b13b8d6ee98bbbab69d6433af4f709e6c0
[ "Apache-2.0" ]
null
null
null
examples/example_shortTermExtreme_2.py
ryancoe/WDRT
039d53b13b8d6ee98bbbab69d6433af4f709e6c0
[ "Apache-2.0" ]
null
null
null
examples/example_shortTermExtreme_2.py
ryancoe/WDRT
039d53b13b8d6ee98bbbab69d6433af4f709e6c0
[ "Apache-2.0" ]
null
null
null
import numpy as np import matplotlib.pyplot as plt import WDRT.shortTermExtreme as ecm import WDRT.fatigue as fatigue method = 1 # 1 - All peaks Weibull # 2 - Weibull tail fit # 3 - Peaks over threshold # 4 - Block maxima GEV # 5 - Block maxima Gumbel # load global peaks t_peaks = np.loadtxt(r'C:\full\filepath\to\WDRT\examples\data\t.dat') peaks = np.loadtxt(r'C:\full\filepath\to\WDRT_py3\WDRT\examples\data\peaks.dat')/1000. # get the 1-hour extreme distribution using the method selected above x_e = np.linspace(0, 2 * np.max(peaks), 10000) t_x = (t_peaks[-1]-t_peaks[0]) + ((t_peaks[-1]-t_peaks[0])/(1.*len(peaks))) t_st = 1. * 60. * 60. if method==1: stextreme_dist, peaks_dist, _ = ecm.extremeDistribution_Weibull(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st) elif method==2: stextreme_dist, peaks_dist, _, _, _ = ecm.extremeDistribution_WeibullTailFit(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st) elif method==3: thresh = np.mean(peaks) + 1.4*np.std(peaks) thresh_x = np.min(x_e[x_e>thresh]) stextreme_dist, peaks_dist, pot_dist, _ = ecm.extremeDistribution_peaksOverThreshold(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st, u=thresh) elif method==4: stextreme_dist,_,bm = ecm.extremeDistribution_blockMaximaGEV(x=peaks, t=t_peaks, t_st=t_st) elif method == 5: stextreme_dist,_,bm = ecm.extremeDistribution_blockMaximaGumb(x=peaks, t=t_peaks, t_st=t_st) # goodness of fit plots if method==1 or method==2: bm = ecm.blockMaxima(x=peaks, t=t_peaks, t_st=t_st) _ = ecm.goodnessOfFitPlots(data=peaks, prob_func=peaks_dist, np_return=1000001, x_pdf=x_e, bins_pdf=20, response_name='PTO Force', response_name_2='Peaks',response_units='kN') if not method==3: fig_gof = ecm.goodnessOfFitPlots(data=bm, prob_func=stextreme_dist, np_return=10001, x_pdf=x_e, bins_pdf=20, response_name='PTO Force', response_name_2='1-hr Extreme',response_units='kN') if method==3: bm = ecm.blockMaxima(x=peaks, t=t_peaks, t_st=t_st) _ = ecm.goodnessOfFitPlots(data=peaks[peaks>thresh_x], prob_func=peaks_dist, np_return=100001, x_pdf=x_e[x_e>thresh_x], bins_pdf=20,m_prob=1.*len(peaks[peaks<thresh_x]), response_name='PTO Force', response_name_2='Peaks',response_units='kN') _ = ecm.goodnessOfFitPlots(data=peaks[peaks>thresh]-thresh, prob_func=pot_dist, np_return=100001, x_pdf=x_e[x_e>thresh]-thresh, bins_pdf=20, response_name='PTO Force', response_name_2='Peaks Over Threshold',response_units='kN') fig_gof = ecm.goodnessOfFitPlots(data=bm, prob_func=stextreme_dist, np_return=10001, x_pdf=x_e[x_e>thresh_x], bins_pdf=20, response_name='PTO Force', response_name_2='1-hr Extreme',response_units='kN') # plot plt.figure() if method==3: plt.plot(t_peaks[peaks<thresh], peaks[peaks<thresh], 'ko', alpha=0.2) plt.plot(t_peaks[peaks>thresh], peaks[peaks>thresh], 'go') plt.plot([0, t_peaks[-1]], [thresh, thresh], 'r--') else: plt.plot(t_peaks, peaks, 'go') plt.plot([0, t_peaks[-1]], [0, 0], 'k--') plt.xlabel('Time, $t$ [s]') plt.ylabel('Response, $x$') plt.xlim([0,3600*2]) plt.grid(True) plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) plt.figure() ax = plt.subplot(2, 1, 1) if method==1 or method==2: plt.plot(x_e, peaks_dist.pdf(x_e), 'g-', label='Peak distribution') if not method==3: plt.plot(x_e, stextreme_dist.pdf(x_e), 'r-', label='Extreme distribution') if method==3: plt.plot(x_e[x_e>thresh_x], peaks_dist.pdf(x_e[x_e>thresh_x]), 'g-', label='Peak distribution') plt.plot(x_e[x_e>thresh_x], stextreme_dist.pdf(x_e[x_e>thresh_x]), 'r-', label='Extreme distribution') xlim = ax.get_xlim() ylim = ax.get_ylim() if method==3: plt.plot([thresh, thresh], [0, ylim[1]], 'k--') plt.ylim([0,ylim[1]]) plt.xlim([0,xlim[1]]) plt.xlabel('Response, $x$') plt.ylabel('$PDF(x)$') plt.grid(True) plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) plt.legend() ax = plt.subplot(2, 1, 2) if method==1 or method==2: plt.plot(x_e, peaks_dist.cdf(x_e), 'g-') if not method==3: plt.plot(x_e, stextreme_dist.cdf(x_e), 'r-') if method==3: plt.plot(x_e[x_e>thresh_x], peaks_dist.cdf(x_e[x_e>thresh_x]), 'g-') plt.plot(x_e[x_e>thresh_x], stextreme_dist.cdf(x_e[x_e>thresh_x]), 'r-') xlim = ax.get_xlim() ylim = ax.get_ylim() if method==3: plt.plot([thresh, thresh], [0, ylim[1]], 'k--') plt.ylim([0,ylim[1]]) plt.xlim([0,xlim[1]]) plt.xlabel('Response, $x$') plt.ylabel('$CDF(x)$') plt.grid(True) plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.show()
42.711538
242
0.716794
import numpy as np import matplotlib.pyplot as plt import WDRT.shortTermExtreme as ecm import WDRT.fatigue as fatigue method = 1 t_peaks = np.loadtxt(r'C:\full\filepath\to\WDRT\examples\data\t.dat') peaks = np.loadtxt(r'C:\full\filepath\to\WDRT_py3\WDRT\examples\data\peaks.dat')/1000. x_e = np.linspace(0, 2 * np.max(peaks), 10000) t_x = (t_peaks[-1]-t_peaks[0]) + ((t_peaks[-1]-t_peaks[0])/(1.*len(peaks))) t_st = 1. * 60. * 60. if method==1: stextreme_dist, peaks_dist, _ = ecm.extremeDistribution_Weibull(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st) elif method==2: stextreme_dist, peaks_dist, _, _, _ = ecm.extremeDistribution_WeibullTailFit(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st) elif method==3: thresh = np.mean(peaks) + 1.4*np.std(peaks) thresh_x = np.min(x_e[x_e>thresh]) stextreme_dist, peaks_dist, pot_dist, _ = ecm.extremeDistribution_peaksOverThreshold(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st, u=thresh) elif method==4: stextreme_dist,_,bm = ecm.extremeDistribution_blockMaximaGEV(x=peaks, t=t_peaks, t_st=t_st) elif method == 5: stextreme_dist,_,bm = ecm.extremeDistribution_blockMaximaGumb(x=peaks, t=t_peaks, t_st=t_st) if method==1 or method==2: bm = ecm.blockMaxima(x=peaks, t=t_peaks, t_st=t_st) _ = ecm.goodnessOfFitPlots(data=peaks, prob_func=peaks_dist, np_return=1000001, x_pdf=x_e, bins_pdf=20, response_name='PTO Force', response_name_2='Peaks',response_units='kN') if not method==3: fig_gof = ecm.goodnessOfFitPlots(data=bm, prob_func=stextreme_dist, np_return=10001, x_pdf=x_e, bins_pdf=20, response_name='PTO Force', response_name_2='1-hr Extreme',response_units='kN') if method==3: bm = ecm.blockMaxima(x=peaks, t=t_peaks, t_st=t_st) _ = ecm.goodnessOfFitPlots(data=peaks[peaks>thresh_x], prob_func=peaks_dist, np_return=100001, x_pdf=x_e[x_e>thresh_x], bins_pdf=20,m_prob=1.*len(peaks[peaks<thresh_x]), response_name='PTO Force', response_name_2='Peaks',response_units='kN') _ = ecm.goodnessOfFitPlots(data=peaks[peaks>thresh]-thresh, prob_func=pot_dist, np_return=100001, x_pdf=x_e[x_e>thresh]-thresh, bins_pdf=20, response_name='PTO Force', response_name_2='Peaks Over Threshold',response_units='kN') fig_gof = ecm.goodnessOfFitPlots(data=bm, prob_func=stextreme_dist, np_return=10001, x_pdf=x_e[x_e>thresh_x], bins_pdf=20, response_name='PTO Force', response_name_2='1-hr Extreme',response_units='kN') plt.figure() if method==3: plt.plot(t_peaks[peaks<thresh], peaks[peaks<thresh], 'ko', alpha=0.2) plt.plot(t_peaks[peaks>thresh], peaks[peaks>thresh], 'go') plt.plot([0, t_peaks[-1]], [thresh, thresh], 'r--') else: plt.plot(t_peaks, peaks, 'go') plt.plot([0, t_peaks[-1]], [0, 0], 'k--') plt.xlabel('Time, $t$ [s]') plt.ylabel('Response, $x$') plt.xlim([0,3600*2]) plt.grid(True) plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) plt.figure() ax = plt.subplot(2, 1, 1) if method==1 or method==2: plt.plot(x_e, peaks_dist.pdf(x_e), 'g-', label='Peak distribution') if not method==3: plt.plot(x_e, stextreme_dist.pdf(x_e), 'r-', label='Extreme distribution') if method==3: plt.plot(x_e[x_e>thresh_x], peaks_dist.pdf(x_e[x_e>thresh_x]), 'g-', label='Peak distribution') plt.plot(x_e[x_e>thresh_x], stextreme_dist.pdf(x_e[x_e>thresh_x]), 'r-', label='Extreme distribution') xlim = ax.get_xlim() ylim = ax.get_ylim() if method==3: plt.plot([thresh, thresh], [0, ylim[1]], 'k--') plt.ylim([0,ylim[1]]) plt.xlim([0,xlim[1]]) plt.xlabel('Response, $x$') plt.ylabel('$PDF(x)$') plt.grid(True) plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) plt.legend() ax = plt.subplot(2, 1, 2) if method==1 or method==2: plt.plot(x_e, peaks_dist.cdf(x_e), 'g-') if not method==3: plt.plot(x_e, stextreme_dist.cdf(x_e), 'r-') if method==3: plt.plot(x_e[x_e>thresh_x], peaks_dist.cdf(x_e[x_e>thresh_x]), 'g-') plt.plot(x_e[x_e>thresh_x], stextreme_dist.cdf(x_e[x_e>thresh_x]), 'r-') xlim = ax.get_xlim() ylim = ax.get_ylim() if method==3: plt.plot([thresh, thresh], [0, ylim[1]], 'k--') plt.ylim([0,ylim[1]]) plt.xlim([0,xlim[1]]) plt.xlabel('Response, $x$') plt.ylabel('$CDF(x)$') plt.grid(True) plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.show()
true
true
f70dd65eb99390da27c92eb399edadf5cb48eec9
481
py
Python
lazyops/lazyconfig/__init__.py
trisongz/lazyops
e8021e34172087ea36ac690b692fd80d06688fa1
[ "Apache-2.0" ]
3
2021-07-12T20:43:31.000Z
2021-08-06T09:05:18.000Z
lazyops/lazyconfig/__init__.py
trisongz/lazyops
e8021e34172087ea36ac690b692fd80d06688fa1
[ "Apache-2.0" ]
null
null
null
lazyops/lazyconfig/__init__.py
trisongz/lazyops
e8021e34172087ea36ac690b692fd80d06688fa1
[ "Apache-2.0" ]
null
null
null
from .nginx import Nginx, NginxConfig from . import tfserving_pb2 as tfserving_config from . import tfserving_api from ._base import TFSModelVersion, TFSModelConfig from .tfserving_pb2 import TFSConfig from .tfserving_api import TFSModelEndpoint, TFServeModel __all__ = [ 'Nginx', 'NginxConfig', 'tfserving_config', 'tfserving_pb2', 'tfserving_api', 'TFSModelVersion', 'TFSModelConfig', 'TFSConfig', 'TFSModelEndpoint', 'TFServeModel' ]
22.904762
57
0.733888
from .nginx import Nginx, NginxConfig from . import tfserving_pb2 as tfserving_config from . import tfserving_api from ._base import TFSModelVersion, TFSModelConfig from .tfserving_pb2 import TFSConfig from .tfserving_api import TFSModelEndpoint, TFServeModel __all__ = [ 'Nginx', 'NginxConfig', 'tfserving_config', 'tfserving_pb2', 'tfserving_api', 'TFSModelVersion', 'TFSModelConfig', 'TFSConfig', 'TFSModelEndpoint', 'TFServeModel' ]
true
true
f70dd7f719724433fbd2b992f920c229ba32ff63
45
py
Python
Num/__init__.py
Ashokkommi0001/patterns
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
[ "MIT" ]
2
2021-03-17T12:08:22.000Z
2021-03-17T12:11:10.000Z
Num/__init__.py
Ashokkommi0001/patterns
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
[ "MIT" ]
null
null
null
Num/__init__.py
Ashokkommi0001/patterns
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
[ "MIT" ]
1
2021-03-17T11:49:39.000Z
2021-03-17T11:49:39.000Z
print("We Created Sub_Package for Numbers")
22.5
44
0.777778
print("We Created Sub_Package for Numbers")
true
true
f70dd94d9dda9ed5dd7b8a20066b99fb23a08f96
1,188
py
Python
maayanlab_bioinformatics/plotting/upset.py
MaayanLab/maayanlab-bioinformatics
f84bda02a8841a65d4c72e491129cdc339fb73b3
[ "Apache-2.0" ]
4
2020-07-16T11:49:59.000Z
2021-08-03T00:54:16.000Z
maayanlab_bioinformatics/plotting/upset.py
MaayanLab/maayanlab-bioinformatics
f84bda02a8841a65d4c72e491129cdc339fb73b3
[ "Apache-2.0" ]
2
2020-05-21T17:04:30.000Z
2022-02-14T21:29:54.000Z
maayanlab_bioinformatics/plotting/upset.py
MaayanLab/maayanlab-bioinformatics
f84bda02a8841a65d4c72e491129cdc339fb73b3
[ "Apache-2.0" ]
null
null
null
import itertools import pandas as pd from typing import Dict, Set, Hashable def upset_from_dict_of_sets(inputs: Dict[Hashable, Set[Hashable]]): ''' Given a dictionary of sets, produce input ready for `upsetplot` python package We produce this input by computing set intersections of all relevant combinations of sets interacting with one another. Example: ```python import upsetplot from maayanlab_bioinformatics.plotting import upset_from_dict_of_sets upsetplot.plot(upset_from_dict_of_sets({ 'A': {'a', 'b', 'c'}, 'B': {'b', 'c', 'd'}, 'C': {'d', 'e', 'f'}, })) ``` :param inputs: (Dict[Hashable, Set[Hashable]]) Several named sets :return: (pd.DataFrame) in a form ready for `upsetplot.plot` ''' sets = [] for n in range(1, len(inputs)+1): if n == 1: it = [[k] for k in inputs.keys()] else: it = map(list, itertools.combinations(inputs.keys(), n)) for V in it: size = len(inputs[V[0]] if n == 1 else set.intersection(*[inputs[v] for v in V])) if size > 0: sets.append(dict({vv: vv in V for vv in inputs.keys()}, size=size)) return pd.DataFrame(sets).groupby(list(inputs.keys()))['size'].sum()
33.942857
87
0.651515
import itertools import pandas as pd from typing import Dict, Set, Hashable def upset_from_dict_of_sets(inputs: Dict[Hashable, Set[Hashable]]): sets = [] for n in range(1, len(inputs)+1): if n == 1: it = [[k] for k in inputs.keys()] else: it = map(list, itertools.combinations(inputs.keys(), n)) for V in it: size = len(inputs[V[0]] if n == 1 else set.intersection(*[inputs[v] for v in V])) if size > 0: sets.append(dict({vv: vv in V for vv in inputs.keys()}, size=size)) return pd.DataFrame(sets).groupby(list(inputs.keys()))['size'].sum()
true
true
f70dd9e9a70a580acef12353c814e75039a923c7
1,111
py
Python
python3_exercicios_feitos/Desafio100.py
LouiMaxine/python3-exercicios-cursoemvideo
782f983829a594496262ec30b87d545e928f7322
[ "MIT" ]
null
null
null
python3_exercicios_feitos/Desafio100.py
LouiMaxine/python3-exercicios-cursoemvideo
782f983829a594496262ec30b87d545e928f7322
[ "MIT" ]
null
null
null
python3_exercicios_feitos/Desafio100.py
LouiMaxine/python3-exercicios-cursoemvideo
782f983829a594496262ec30b87d545e928f7322
[ "MIT" ]
null
null
null
#FUPQ tenha uma lista chamada números e duas funções chamadas sorteio() e somaPar(). A primeira função vai sortear 5 números e vai colocá-las dentro da lista e a segunda função vai mostrar a soma entre todos os valores PARES sorteados pela função anterior. #def sorteio(numeros): # for i in range(0,5): # numeros.append(0,100) # print(numeros) #def somaPar(numeros): # s = 0 # while len(numeros)!=0: # if numeros[0]%2==0: # s += numeros[0] # numeros.remove() #print(f'A soma entre os número pares é {s}') #numeros =[4,2,3] #somaPar(numeros) from random import randint from time import sleep def sorteia(lista): print('Sorteando 5 valores da lista ', end='') for cont in range(0,5): n = randint(1,10) lista.append(n) print(f'{n} ',end='', flush=True) sleep(1) print('PRONTO!') def somaPar(lista): soma = 0 for valor in lista: if valor%2 ==0: soma+=valor print(f'Somando os valores pares de {lista}, temos {soma}') numeros = [] sorteia(numeros) somaPar(numeros)
20.962264
256
0.615662
from random import randint from time import sleep def sorteia(lista): print('Sorteando 5 valores da lista ', end='') for cont in range(0,5): n = randint(1,10) lista.append(n) print(f'{n} ',end='', flush=True) sleep(1) print('PRONTO!') def somaPar(lista): soma = 0 for valor in lista: if valor%2 ==0: soma+=valor print(f'Somando os valores pares de {lista}, temos {soma}') numeros = [] sorteia(numeros) somaPar(numeros)
true
true
f70dda2416705ecdb81d8911d84da03b93d8f992
5,167
py
Python
src/OCR_CNN_Trainning.py
ludwigjer/visualsudoku
a5ed257edfda45123ef3779b8181d5f27412ea50
[ "MIT" ]
null
null
null
src/OCR_CNN_Trainning.py
ludwigjer/visualsudoku
a5ed257edfda45123ef3779b8181d5f27412ea50
[ "MIT" ]
null
null
null
src/OCR_CNN_Trainning.py
ludwigjer/visualsudoku
a5ed257edfda45123ef3779b8181d5f27412ea50
[ "MIT" ]
null
null
null
import numpy as np import cv2 import os from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from keras.layers import Dropout,Flatten from keras.layers.convolutional import Conv2D,MaxPooling2D import pickle from tensorflow.keras.layers import Activation, Dense, Conv2D, Dropout, Flatten, MaxPooling2D # PARAMETERS path = 'numbers/train2' testRatio = 0.2 valRatio = 0.2 imageDimensions= (28,28,3) batchSizeVal= 6 epochsVal = 10 stepsPerEpochVal = 2000 # IMPORTING DATA/IMAGES FROM FOLDERS count = 0 images = [] # LIST CONTAINING ALL THE IMAGES classNo = [] # LIST CONTAINING ALL THE CORRESPONDING CLASS ID OF IMAGES myList = os.listdir(path) print("Total Classes Detected:",len(myList)) noOfClasses = len(myList) print("Importing Classes .......") for x in range (0,noOfClasses): myPicList = os.listdir(path+"/"+str(x)) for y in myPicList: curImg = cv2.imread(path+"/"+str(x)+"/"+y) curImg = cv2.resize(curImg,(28,28)) images.append(curImg) classNo.append(x) print(x,end= " ") print(" ") print("Total Images in Images List = ",len(images)) print("Total IDS in classNo List= ",len(classNo)) # CONVERT TO NUMPY ARRAY images = np.array(images) classNo = np.array(classNo) print(images.shape) print(classNo.shape) # SPLITTING THE DATA X_train,X_test,y_train,y_test = train_test_split(images,classNo,test_size=testRatio) X_train,X_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=valRatio) print(X_train.shape) print(X_test.shape) print(X_validation.shape) # PLOT BAR CHART FOR DISTRIBUTION OF IMAGES numOfSamples= [] for x in range(0,noOfClasses): #print(len(np.where(y_train==x)[0])) numOfSamples.append(len(np.where(y_train==x)[0])) print(numOfSamples) plt.figure(figsize=(10,5)) plt.bar(range(0,noOfClasses),numOfSamples) plt.title("No of Images for each Class") plt.xlabel("Class ID") plt.ylabel("Number of Images") plt.show() # PREPOSSESSING FUNCTION FOR IMAGES FOR TRAINING def preProcessing(img): img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img = cv2.equalizeHist(img) img = img/255 return img #img = preProcessing(X_train[30]) #img = cv2.resize(img,(300,300)) #cv2.imshow("PreProcesssed",img) #cv2.waitKey(0) X_train= np.array(list(map(preProcessing,X_train))) X_test= np.array(list(map(preProcessing,X_test))) X_validation= np.array(list(map(preProcessing,X_validation))) # RESHAPE IMAGES X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1) X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1) X_validation = X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1) # IMAGE AUGMENTATION dataGen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10) dataGen.fit(X_train) # ONE HOT ENCODING OF MATRICES y_train = to_categorical(y_train,noOfClasses) y_test = to_categorical(y_test,noOfClasses) y_validation = to_categorical(y_validation,noOfClasses) # CREATING THE MODEL def myModel(): model = Sequential() model.add(Conv2D(64, kernel_size=(3,3), input_shape= (28, 28, 1))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3,3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3,3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation("relu")) model.add(Dense(32)) model.add(Activation("relu")) model.add(Dense(10)) model.add(Activation("softmax")) model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy']) return model model = myModel() print(model.summary()) #### STARTING THE TRAINING PROCESS history = model.fit_generator(dataGen.flow(X_train,y_train, batch_size=batchSizeVal), steps_per_epoch=stepsPerEpochVal, epochs=epochsVal, validation_data=(X_validation,y_validation), shuffle=1) # PLOT THE RESULTS plt.figure(1) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['training','validation']) plt.title('Loss') plt.xlabel('epoch') plt.figure(2) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.legend(['training','validation']) plt.title('Accuracy') plt.xlabel('epoch') plt.show() # EVALUATE USING TEST IMAGES score = model.evaluate(X_test,y_test,verbose=0) print('Test Score = ',score[0]) print('Test Accuracy =', score[1]) # SAVE THE TRAINED MODEL model.save('model11.h5')
30.216374
104
0.698471
import numpy as np import cv2 import os from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from keras.layers import Dropout,Flatten from keras.layers.convolutional import Conv2D,MaxPooling2D import pickle from tensorflow.keras.layers import Activation, Dense, Conv2D, Dropout, Flatten, MaxPooling2D path = 'numbers/train2' testRatio = 0.2 valRatio = 0.2 imageDimensions= (28,28,3) batchSizeVal= 6 epochsVal = 10 stepsPerEpochVal = 2000 count = 0 images = [] classNo = [] myList = os.listdir(path) print("Total Classes Detected:",len(myList)) noOfClasses = len(myList) print("Importing Classes .......") for x in range (0,noOfClasses): myPicList = os.listdir(path+"/"+str(x)) for y in myPicList: curImg = cv2.imread(path+"/"+str(x)+"/"+y) curImg = cv2.resize(curImg,(28,28)) images.append(curImg) classNo.append(x) print(x,end= " ") print(" ") print("Total Images in Images List = ",len(images)) print("Total IDS in classNo List= ",len(classNo)) images = np.array(images) classNo = np.array(classNo) print(images.shape) print(classNo.shape) X_train,X_test,y_train,y_test = train_test_split(images,classNo,test_size=testRatio) X_train,X_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=valRatio) print(X_train.shape) print(X_test.shape) print(X_validation.shape) numOfSamples= [] for x in range(0,noOfClasses): numOfSamples.append(len(np.where(y_train==x)[0])) print(numOfSamples) plt.figure(figsize=(10,5)) plt.bar(range(0,noOfClasses),numOfSamples) plt.title("No of Images for each Class") plt.xlabel("Class ID") plt.ylabel("Number of Images") plt.show() def preProcessing(img): img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img = cv2.equalizeHist(img) img = img/255 return img X_train= np.array(list(map(preProcessing,X_train))) X_test= np.array(list(map(preProcessing,X_test))) X_validation= np.array(list(map(preProcessing,X_validation))) X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1) X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1) X_validation = X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1) dataGen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10) dataGen.fit(X_train) y_train = to_categorical(y_train,noOfClasses) y_test = to_categorical(y_test,noOfClasses) y_validation = to_categorical(y_validation,noOfClasses) def myModel(): model = Sequential() model.add(Conv2D(64, kernel_size=(3,3), input_shape= (28, 28, 1))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3,3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3,3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation("relu")) model.add(Dense(32)) model.add(Activation("relu")) model.add(Dense(10)) model.add(Activation("softmax")) model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy']) return model model = myModel() print(model.summary()) history = model.fit_generator(dataGen.flow(X_train,y_train, batch_size=batchSizeVal), steps_per_epoch=stepsPerEpochVal, epochs=epochsVal, validation_data=(X_validation,y_validation), shuffle=1) plt.figure(1) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['training','validation']) plt.title('Loss') plt.xlabel('epoch') plt.figure(2) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.legend(['training','validation']) plt.title('Accuracy') plt.xlabel('epoch') plt.show() score = model.evaluate(X_test,y_test,verbose=0) print('Test Score = ',score[0]) print('Test Accuracy =', score[1]) model.save('model11.h5')
true
true
f70ddab857849f6693382263da31f34ac565729b
13,374
py
Python
cryptoapis/model/inline_response40382.py
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
c59ebd914850622b2c6500c4c30af31fb9cecf0e
[ "MIT" ]
5
2021-05-17T04:45:03.000Z
2022-03-23T12:51:46.000Z
cryptoapis/model/inline_response40382.py
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
c59ebd914850622b2c6500c4c30af31fb9cecf0e
[ "MIT" ]
null
null
null
cryptoapis/model/inline_response40382.py
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
c59ebd914850622b2c6500c4c30af31fb9cecf0e
[ "MIT" ]
2
2021-06-02T07:32:26.000Z
2022-02-12T02:36:23.000Z
""" CryptoAPIs Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501 The version of the OpenAPI document: 2.0.0 Contact: developers@cryptoapis.io Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from cryptoapis.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from cryptoapis.exceptions import ApiAttributeError def lazy_import(): from cryptoapis.model.delete_automatic_tokens_forwarding_e403 import DeleteAutomaticTokensForwardingE403 globals()['DeleteAutomaticTokensForwardingE403'] = DeleteAutomaticTokensForwardingE403 class InlineResponse40382(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'api_version': (str,), # noqa: E501 'request_id': (str,), # noqa: E501 'error': (DeleteAutomaticTokensForwardingE403,), # noqa: E501 'context': (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'api_version': 'apiVersion', # noqa: E501 'request_id': 'requestId', # noqa: E501 'error': 'error', # noqa: E501 'context': 'context', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, api_version, request_id, error, *args, **kwargs): # noqa: E501 """InlineResponse40382 - a model defined in OpenAPI Args: api_version (str): Specifies the version of the API that incorporates this endpoint. request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request. error (DeleteAutomaticTokensForwardingE403): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.api_version = api_version self.request_id = request_id self.error = error for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, api_version, request_id, error, *args, **kwargs): # noqa: E501 """InlineResponse40382 - a model defined in OpenAPI Args: api_version (str): Specifies the version of the API that incorporates this endpoint. request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request. error (DeleteAutomaticTokensForwardingE403): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.api_version = api_version self.request_id = request_id self.error = error for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
46.926316
484
0.597054
import re import sys from cryptoapis.model_utils import ( ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from cryptoapis.exceptions import ApiAttributeError def lazy_import(): from cryptoapis.model.delete_automatic_tokens_forwarding_e403 import DeleteAutomaticTokensForwardingE403 globals()['DeleteAutomaticTokensForwardingE403'] = DeleteAutomaticTokensForwardingE403 class InlineResponse40382(ModelNormal): allowed_values = { } validations = { } @cached_property def additional_properties_type(): lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) _nullable = False @cached_property def openapi_types(): lazy_import() return { 'api_version': (str,), 'request_id': (str,), 'error': (DeleteAutomaticTokensForwardingE403,), 'context': (str,), } @cached_property def discriminator(): return None attribute_map = { 'api_version': 'apiVersion', 'request_id': 'requestId', 'error': 'error', 'context': 'context', } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, api_version, request_id, error, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.api_version = api_version self.request_id = request_id self.error = error for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, api_version, request_id, error, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.api_version = api_version self.request_id = request_id self.error = error for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
true
true
f70ddadd8c534ce341100c123ca2bae91c5488da
3,605
py
Python
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_01_preview/aio/_configuration.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
1
2022-02-01T18:50:12.000Z
2022-02-01T18:50:12.000Z
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_01_preview/aio/_configuration.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
null
null
null
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_01_preview/aio/_configuration.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
null
null
null
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy from .._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class DeploymentScriptsClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes """Configuration for DeploymentScriptsClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: Subscription Id which forms part of the URI for every service call. :type subscription_id: str :keyword api_version: Api Version. Default value is "2019-10-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any ) -> None: super(DeploymentScriptsClientConfiguration, self).__init__(**kwargs) api_version = kwargs.pop('api_version', "2019-10-01-preview") # type: str if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") self.credential = credential self.subscription_id = subscription_id self.api_version = api_version self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs: Any ) -> None: self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
49.383562
130
0.705687
from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy from .._version import VERSION if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential class DeploymentScriptsClientConfiguration(Configuration): def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any ) -> None: super(DeploymentScriptsClientConfiguration, self).__init__(**kwargs) api_version = kwargs.pop('api_version', "2019-10-01-preview") if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") self.credential = credential self.subscription_id = subscription_id self.api_version = api_version self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs: Any ) -> None: self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
true
true
f70ddb2ee46cc780846ac894cd6ace61da1a5f0e
600
py
Python
drink_robot/controllers/__init__.py
cactode/drink_robot
03c9e5605ac3f632d94fe108fb463381e889ed18
[ "MIT" ]
null
null
null
drink_robot/controllers/__init__.py
cactode/drink_robot
03c9e5605ac3f632d94fe108fb463381e889ed18
[ "MIT" ]
null
null
null
drink_robot/controllers/__init__.py
cactode/drink_robot
03c9e5605ac3f632d94fe108fb463381e889ed18
[ "MIT" ]
null
null
null
from drink_robot.controllers.index import bp as index from drink_robot.controllers.recipe import bp as recipe from drink_robot.controllers.bottle import bp as bottle from drink_robot.controllers.pour import bp as pour import pigpio def init_pins(app): if app.config['DEBUG']: return gpio = pigpio.pi() for pin in app.config['PINS'].values(): gpio.set_mode(pin, pigpio.OUTPUT) gpio.write(pin, 1) def init_all_blueprints(app): app.register_blueprint(index) app.register_blueprint(recipe) app.register_blueprint(bottle) app.register_blueprint(pour)
30
55
0.74
from drink_robot.controllers.index import bp as index from drink_robot.controllers.recipe import bp as recipe from drink_robot.controllers.bottle import bp as bottle from drink_robot.controllers.pour import bp as pour import pigpio def init_pins(app): if app.config['DEBUG']: return gpio = pigpio.pi() for pin in app.config['PINS'].values(): gpio.set_mode(pin, pigpio.OUTPUT) gpio.write(pin, 1) def init_all_blueprints(app): app.register_blueprint(index) app.register_blueprint(recipe) app.register_blueprint(bottle) app.register_blueprint(pour)
true
true
f70ddb48a8fccefc2404b635a3c5b13345afc3d7
1,116
py
Python
setup.py
Superbil/django-spgateway
b8fa05d32a929a68bbd5336c38613da6bc35e3b7
[ "MIT" ]
null
null
null
setup.py
Superbil/django-spgateway
b8fa05d32a929a68bbd5336c38613da6bc35e3b7
[ "MIT" ]
null
null
null
setup.py
Superbil/django-spgateway
b8fa05d32a929a68bbd5336c38613da6bc35e3b7
[ "MIT" ]
1
2021-05-05T13:38:50.000Z
2021-05-05T13:38:50.000Z
from setuptools import setup __version__ = "1.0.0" with open('README.rst') as f: long_description = f.read() setup( name = "django-spgateway", version = __version__, description = 'Django support for Spgateway', keywords = "django, spgateway", url = "https://github.com/superbil/django-spgateway", license = "MIT", packages = ["spgateway"], include_package_data = True, install_requires = ["django>=1.10", "pycrypto>=2.6.1"], classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries :: Python Modules", "Framework :: Django", "Framework :: Django :: 2.2", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Environment :: Web Environment", ], long_description = long_description, projects_urls = { 'Bug Reports': 'https://github.com/Superbil/django-spgateway/issues', 'Source': 'https://github.com/superbil/django-spgateway', } )
31.885714
77
0.615591
from setuptools import setup __version__ = "1.0.0" with open('README.rst') as f: long_description = f.read() setup( name = "django-spgateway", version = __version__, description = 'Django support for Spgateway', keywords = "django, spgateway", url = "https://github.com/superbil/django-spgateway", license = "MIT", packages = ["spgateway"], include_package_data = True, install_requires = ["django>=1.10", "pycrypto>=2.6.1"], classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries :: Python Modules", "Framework :: Django", "Framework :: Django :: 2.2", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Environment :: Web Environment", ], long_description = long_description, projects_urls = { 'Bug Reports': 'https://github.com/Superbil/django-spgateway/issues', 'Source': 'https://github.com/superbil/django-spgateway', } )
true
true
f70ddbc685903894099e80ce95cb99f6054b1268
1,934
py
Python
setup.py
Anagraph/titiler
287201a554523a1cb4258ff41ec52ca2bdc0ac13
[ "MIT" ]
null
null
null
setup.py
Anagraph/titiler
287201a554523a1cb4258ff41ec52ca2bdc0ac13
[ "MIT" ]
null
null
null
setup.py
Anagraph/titiler
287201a554523a1cb4258ff41ec52ca2bdc0ac13
[ "MIT" ]
null
null
null
"""Setup titiler.""" from setuptools import find_packages, setup with open("README.md") as f: long_description = f.read() inst_reqs = [ "brotli-asgi>=1.0.0", "cogeo-mosaic>=3.0.0rc2,<3.1", "fastapi==0.63.0", "geojson-pydantic", "jinja2>=2.11.2,<3.0.0", "morecantile", "numpy", "pydantic", "python-dotenv", "rasterio", "rio-cogeo>=2.1,<2.2", "rio-tiler>=2.0.5,<2.1", "uvicorn[standard]>=0.12.0,<0.14.0", # Additional requirements for python 3.6 "dataclasses;python_version<'3.7'", "async_exit_stack>=1.0.1,<2.0.0;python_version<'3.7'", "async_generator>=1.10,<2.0.0;python_version<'3.7'", ] extra_reqs = { "dev": ["pytest", "pytest-cov", "pytest-asyncio", "pre-commit", "requests"], "test": ["pytest", "pytest-cov", "pytest-asyncio", "requests"], "docs": ["nbconvert", "mkdocs", "mkdocs-material", "mkdocs-jupyter", "pygments"], } setup( name="titiler", version="0.2.0", description=u"A modern dynamic tile server built on top of FastAPI and Rasterio/GDAL.", long_description=long_description, long_description_content_type="text/markdown", python_requires=">=3.6", classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], keywords="COG STAC MosaicJSON FastAPI", author=u"Vincent Sarago", author_email="vincent@developmentseed.org", url="https://github.com/developmentseed/titiler", license="MIT", packages=find_packages(exclude=["tests*"]), package_data={"titiler": ["templates/*.html", "templates/*.xml"]}, include_package_data=True, zip_safe=False, install_requires=inst_reqs, extras_require=extra_reqs, )
31.704918
91
0.632885
from setuptools import find_packages, setup with open("README.md") as f: long_description = f.read() inst_reqs = [ "brotli-asgi>=1.0.0", "cogeo-mosaic>=3.0.0rc2,<3.1", "fastapi==0.63.0", "geojson-pydantic", "jinja2>=2.11.2,<3.0.0", "morecantile", "numpy", "pydantic", "python-dotenv", "rasterio", "rio-cogeo>=2.1,<2.2", "rio-tiler>=2.0.5,<2.1", "uvicorn[standard]>=0.12.0,<0.14.0", "dataclasses;python_version<'3.7'", "async_exit_stack>=1.0.1,<2.0.0;python_version<'3.7'", "async_generator>=1.10,<2.0.0;python_version<'3.7'", ] extra_reqs = { "dev": ["pytest", "pytest-cov", "pytest-asyncio", "pre-commit", "requests"], "test": ["pytest", "pytest-cov", "pytest-asyncio", "requests"], "docs": ["nbconvert", "mkdocs", "mkdocs-material", "mkdocs-jupyter", "pygments"], } setup( name="titiler", version="0.2.0", description=u"A modern dynamic tile server built on top of FastAPI and Rasterio/GDAL.", long_description=long_description, long_description_content_type="text/markdown", python_requires=">=3.6", classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], keywords="COG STAC MosaicJSON FastAPI", author=u"Vincent Sarago", author_email="vincent@developmentseed.org", url="https://github.com/developmentseed/titiler", license="MIT", packages=find_packages(exclude=["tests*"]), package_data={"titiler": ["templates/*.html", "templates/*.xml"]}, include_package_data=True, zip_safe=False, install_requires=inst_reqs, extras_require=extra_reqs, )
true
true
f70ddbf4298f15f9dd01d4b6682cbe474e824eb3
53,943
py
Python
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/nndct_graph/operator_definition.py
hito0512/Vitis-AI
996459fb96cb077ed2f7e789d515893b1cccbc95
[ "Apache-2.0" ]
1
2022-02-17T22:13:23.000Z
2022-02-17T22:13:23.000Z
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/nndct_graph/operator_definition.py
hito0512/Vitis-AI
996459fb96cb077ed2f7e789d515893b1cccbc95
[ "Apache-2.0" ]
null
null
null
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/nndct_graph/operator_definition.py
hito0512/Vitis-AI
996459fb96cb077ed2f7e789d515893b1cccbc95
[ "Apache-2.0" ]
null
null
null
# # Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from enum import auto, unique, Enum from typing import Any from nndct_shared.base import NNDCT_OP from nndct_shared.nndct_graph.base_operator import (AutoName, NndctIrAttr, OccurenceType, Operation) from nndct_shared.nndct_graph.base_tensor import Tensor import numpy as np class Conv1d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GROUP = auto() BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super(Conv1d, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.KERNEL: [None], self.AttrName.STRIDE: [None], self.AttrName.DILATION: [None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None], self.AttrName.GROUP: [None], self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.DILATION] = NndctIrAttr( name=self.AttrName.DILATION, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.DILATION], occurence_type=OccurenceType.OPTIONAL, default_value=[1], annotation=r"""dilation, [dilation_w, dilation_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0,0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GROUP] = NndctIrAttr( name=self.AttrName.GROUP, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.GROUP], occurence_type=OccurenceType.OPTIONAL, default_value=1, annotation=r"""group""") self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class Conv2d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GROUP = auto() BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super(Conv2d, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.KERNEL: [None, None], self.AttrName.STRIDE: [None, None], self.AttrName.DILATION: [None, None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None], self.AttrName.GROUP: [None], self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.DILATION] = NndctIrAttr( name=self.AttrName.DILATION, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.DILATION], occurence_type=OccurenceType.OPTIONAL, default_value=[1, 1], annotation=r"""dilation, [dilation_w, dilation_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=4, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GROUP] = NndctIrAttr( name=self.AttrName.GROUP, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.GROUP], occurence_type=OccurenceType.OPTIONAL, default_value=1, annotation=r"""group""") self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class Conv3d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() GROUP = auto() PAD_MODE = auto() PAD = auto() BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() OUTPUT_PAD = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.KERNEL: [None, None, None], self.AttrName.STRIDE: [None, None, None], self.AttrName.DILATION: [None, None, None], self.AttrName.GROUP: [None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None, None, None], self.AttrName.OUTPUT_PAD: [None, None, None, None, None, None], self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=3, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h, kernel_d]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h, stride_d]""") self._attrs[self.AttrName.DILATION] = NndctIrAttr( name=self.AttrName.DILATION, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.DILATION], occurence_type=OccurenceType.OPTIONAL, default_value=[1, 1, 1], annotation=r"""dilation, [dilation_w, dilation_h, dilation_d]""") self._attrs[self.AttrName.GROUP] = NndctIrAttr( name=self.AttrName.GROUP, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.GROUP], occurence_type=OccurenceType.OPTIONAL, default_value=1, annotation=r"""group""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""We support 4 padding mode: `FLOOR, CEIL, SAME, VALID`. " "For example, when you parsing models from other frameworks, " "`caffe, pytorch->\"FLOOR\", tensorflow->\"SAME\" or \"VALID\"`""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=6, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom, near, far],""") self._attrs[self.AttrName.OUTPUT_PAD] = NndctIrAttr( name=self.AttrName.OUTPUT_PAD, value_type=int, size=6, value_mem=self._attr_value_mem[self.AttrName.OUTPUT_PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0, 0, 0], annotation=r"""additional size added to one side of each dimension in the output, [" "left, right, top, bottom, near, far],""") self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=None, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class BatchNorm(Operation): @unique class AttrName(AutoName): EPSILON = auto() SCALE = auto() CENTER = auto() OUT_DIM = auto() AXIS = auto() @unique class ParamName(AutoName): GAMMA = auto() BETA = auto() MOVING_MEAN = auto() MOVING_VAR = auto() def __init__(self, *args, **kwargs) -> None: super(BatchNorm, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.EPSILON: [None], self.AttrName.SCALE: [None], self.AttrName.CENTER: [None], self.AttrName.OUT_DIM: [None], self.AttrName.AXIS: [None] } self._attrs[self.AttrName.EPSILON] = NndctIrAttr( name=self.AttrName.EPSILON, value_type=float, size=1, value_mem=self._attr_value_mem[self.AttrName.EPSILON], occurence_type=OccurenceType.REQUIRED, annotation=r"""epsilon""") self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.SCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""scale""") self._attrs[self.AttrName.CENTER] = NndctIrAttr( name=self.AttrName.CENTER, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.CENTER], occurence_type=OccurenceType.REQUIRED, annotation=r"""center""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""num features""") self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""the axis of the input to implement batchnorm""") class Dense(Operation): @unique class AttrName(AutoName): BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super(Dense, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class Concat(Operation): @unique class AttrName(AutoName): AXIS = auto() def __init__(self, *args, **kwargs) -> None: super(Concat, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.AXIS: [None], } self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""specified axis""") class Shape(Operation): @unique class AttrName(AutoName): AXIS = auto() def __init__(self, *args, **kwargs) -> None: super(Shape, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.AXIS: [None], } self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""specified axis""") class Reshape(Operation): @unique class AttrName(AutoName): SHAPE = auto() def __init__(self, *args, **kwargs) -> None: super(Reshape, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.SHAPE: [], # possible any length } self._attrs[self.AttrName.SHAPE] = NndctIrAttr( name=self.AttrName.SHAPE, value_type=(int, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.SHAPE], occurence_type=OccurenceType.REQUIRED, annotation=r"""the target shape""") class MaxPool(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GLOBAL = auto() def __init__(self, *args, **kwargs) -> None: super(MaxPool, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.KERNEL: [None, None], self.AttrName.STRIDE: [None, None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None], self.AttrName.GLOBAL: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=4, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GLOBAL] = NndctIrAttr( name=self.AttrName.GLOBAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.GLOBAL], occurence_type=OccurenceType.OPTIONAL, default_value=False, annotation=r"""global""") class MaxPool1d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GLOBAL = auto() COUNT_INCLUDE_PAD = auto() def __init__(self, *args, **kwargs) -> None: super(MaxPool1d, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.KERNEL: [None], self.AttrName.STRIDE: [None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None], self.AttrName.GLOBAL: [None], self.AttrName.COUNT_INCLUDE_PAD: [None] } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GLOBAL] = NndctIrAttr( name=self.AttrName.GLOBAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.GLOBAL], occurence_type=OccurenceType.OPTIONAL, default_value=False, annotation=r"""global""") self._attrs[self.AttrName.COUNT_INCLUDE_PAD] = NndctIrAttr( name=self.AttrName.COUNT_INCLUDE_PAD, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.COUNT_INCLUDE_PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[True], annotation=r"""when True, will include the zero-padding in the averaging calculation""") class AvgPool(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() PAD_MODE = auto() PAD = auto() GLOBAL = auto() COUNT_INCLUDE_PAD = auto() def __init__(self, *args, **kwargs) -> None: super(AvgPool, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.KERNEL: [None, None], self.AttrName.STRIDE: [None, None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None], self.AttrName.GLOBAL: [None], self.AttrName.COUNT_INCLUDE_PAD: [None] } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=4, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GLOBAL] = NndctIrAttr( name=self.AttrName.GLOBAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.GLOBAL], occurence_type=OccurenceType.OPTIONAL, default_value=False, annotation=r"""global""") self._attrs[self.AttrName.COUNT_INCLUDE_PAD] = NndctIrAttr( name=self.AttrName.COUNT_INCLUDE_PAD, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.COUNT_INCLUDE_PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[True], annotation=r"""when True, will include the zero-padding in the averaging calculation""") class Flatten(Operation): @unique class AttrName(AutoName): START_DIM = "start_axis" END_DIM = "end_axis" def __init__(self, *args, **kwargs) -> None: super(Flatten, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.START_DIM: [None], self.AttrName.END_DIM: [None], } self._attrs[self.AttrName.START_DIM] = NndctIrAttr( name=self.AttrName.START_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.START_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""the first dim to flatten""") self._attrs[self.AttrName.END_DIM] = NndctIrAttr( name=self.AttrName.END_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.END_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""the last dim to flatten""") # including mean, max, min etc. class PermuteInvariantOp(Operation): @unique class AttrName(AutoName): DIMS = "axis" KEEP_DIMS = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.DIMS: [], self.AttrName.KEEP_DIMS: [None], } self._attrs[self.AttrName.DIMS] = NndctIrAttr( name=self.AttrName.DIMS, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.DIMS], occurence_type=OccurenceType.REQUIRED, annotation=r"""The dimensions to reduce. List of integers""") self._attrs[self.AttrName.KEEP_DIMS] = NndctIrAttr( name=self.AttrName.KEEP_DIMS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.KEEP_DIMS], occurence_type=OccurenceType.REQUIRED, annotation=r"""specify whether the reduced dimension is kept or not.""") class Permute(Operation): @unique class AttrName(AutoName): ORDER = auto() def __init__(self, op_type, *args, **kwargs) -> None: super(Permute, self).__init__(op_type) # allocate memory for attr value self._attr_value_mem = { self.AttrName.ORDER: [], } self._attrs[self.AttrName.ORDER] = NndctIrAttr( name=self.AttrName.ORDER, value_type=(int, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.ORDER], occurence_type=OccurenceType.REQUIRED, annotation=r"""The dimensions to reduce. List of integers""") class Softmax(Operation): @unique class AttrName(AutoName): AXIS = auto() def __init__(self) -> None: super(Softmax, self).__init__(NNDCT_OP.SOFTMAX) # allocate memory for attr value self._attr_value_mem = { self.AttrName.AXIS: [None], } self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""the dimension softmax would be performed on. default is the last dimension.""") class Lstm(Operation): @unique class AttrName(AutoName): INPUT_SIZE = auto() HIDDEN_SIZE = auto() BIDIRECTIONAL = auto() NUM_LAYERS = auto() BATCH_FIRST = auto() @unique class ParamName(AutoName): WEIGHT_IH = auto() WEIGHT_HH = auto() WEIGHT_IH_REVERSE = auto() WEIGHT_HH_REVERSE = auto() BIAS = auto() BIAS_REVERSE = auto() def __init__(self, *args, **kwargs) -> None: super(Lstm, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.INPUT_SIZE: [None], self.AttrName.HIDDEN_SIZE: [None], self.AttrName.BIDIRECTIONAL: [None], self.AttrName.NUM_LAYERS: [None], self.AttrName.BATCH_FIRST: [None], } self._attrs[self.AttrName.INPUT_SIZE] = NndctIrAttr( name=self.AttrName.INPUT_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""input size of LSTM.""") self._attrs[self.AttrName.HIDDEN_SIZE] = NndctIrAttr( name=self.AttrName.HIDDEN_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.HIDDEN_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""hidden size of LSTM.""") self._attrs[self.AttrName.BIDIRECTIONAL] = NndctIrAttr( name=self.AttrName.BIDIRECTIONAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIDIRECTIONAL], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, means a bidirectional LSTM.""") self._attrs[self.AttrName.NUM_LAYERS] = NndctIrAttr( name=self.AttrName.NUM_LAYERS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.NUM_LAYERS], occurence_type=OccurenceType.REQUIRED, annotation=r"""Number of recurrent layers""") self._attrs[self.AttrName.BATCH_FIRST] = NndctIrAttr( name=self.AttrName.BATCH_FIRST, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BATCH_FIRST], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, then the input and output tensors are provided as (batch, seq, feature)""" ) class Gru(Operation): @unique class AttrName(AutoName): INPUT_SIZE = auto() HIDDEN_SIZE = auto() BIDIRECTIONAL = auto() NUM_LAYERS = auto() BATCH_FIRST = auto() @unique class ParamName(AutoName): WEIGHT_IH = auto() WEIGHT_HH = auto() WEIGHT_IH_REVERSE = auto() WEIGHT_HH_REVERSE = auto() BIAS_IH = auto() BIAS_HH = auto() BIAS_IH_REVERSE = auto() BIAS_HH_REVERSE = auto() def __init__(self, *args, **kwargs) -> None: super(Gru, self).__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.INPUT_SIZE: [None], self.AttrName.HIDDEN_SIZE: [None], self.AttrName.BIDIRECTIONAL: [None], self.AttrName.NUM_LAYERS: [None], self.AttrName.BATCH_FIRST: [None], } self._attrs[self.AttrName.INPUT_SIZE] = NndctIrAttr( name=self.AttrName.INPUT_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""input size of GRU.""") self._attrs[self.AttrName.HIDDEN_SIZE] = NndctIrAttr( name=self.AttrName.HIDDEN_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.HIDDEN_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""hidden size of GRU.""") self._attrs[self.AttrName.BIDIRECTIONAL] = NndctIrAttr( name=self.AttrName.BIDIRECTIONAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIDIRECTIONAL], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, means a bidirectional GRU.""") self._attrs[self.AttrName.NUM_LAYERS] = NndctIrAttr( name=self.AttrName.NUM_LAYERS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.NUM_LAYERS], occurence_type=OccurenceType.REQUIRED, annotation=r"""Number of recurrent layers""") self._attrs[self.AttrName.BATCH_FIRST] = NndctIrAttr( name=self.AttrName.BATCH_FIRST, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BATCH_FIRST], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, then the input and output tensors are provided as (batch, seq, feature)""" ) class StridedSlice(Operation): @unique class AttrName(AutoName): BEGIN = auto() END = auto() STRIDES = auto() BEGIN_MASK = auto() END_MASK = auto() ELLIPSIS_MASK = auto() NEW_AXIS_MASK = auto() SHRINK_AXIS_MASK = auto() def __init__(self) -> None: super(StridedSlice, self).__init__(NNDCT_OP.STRIDED_SLICE) # allocate memory for attr value self._attr_value_mem = { self.AttrName.BEGIN: [], self.AttrName.END: [], self.AttrName.STRIDES: [], self.AttrName.BEGIN_MASK: [None], self.AttrName.END_MASK: [None], self.AttrName.ELLIPSIS_MASK: [None], self.AttrName.NEW_AXIS_MASK: [None], self.AttrName.SHRINK_AXIS_MASK: [None] } self._attrs[self.AttrName.BEGIN] = NndctIrAttr( name=self.AttrName.BEGIN, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.BEGIN], occurence_type=OccurenceType.REQUIRED, annotation=r"""start location of slicing (included)""") self._attrs[self.AttrName.END] = NndctIrAttr( name=self.AttrName.END, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.END], occurence_type=OccurenceType.REQUIRED, annotation=r"""end location of slicing (excluded)""") self._attrs[self.AttrName.STRIDES] = NndctIrAttr( name=self.AttrName.STRIDES, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.STRIDES], occurence_type=OccurenceType.REQUIRED, annotation=r"""strides of slicing""") self._attrs[self.AttrName.BEGIN_MASK] = NndctIrAttr( name=self.AttrName.BEGIN_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.BEGIN_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of begin_mask is set, begin[i] is ignored and the fullest possible range in that dimension is used instead.""") self._attrs[self.AttrName.END_MASK] = NndctIrAttr( name=self.AttrName.END_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.END_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of end_mask is set, end[i] is ignored and the fullest possible range in that dimension is used instead, except with the end range.""") self._attrs[self.AttrName.ELLIPSIS_MASK] = NndctIrAttr( name=self.AttrName.ELLIPSIS_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.ELLIPSIS_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of ellipsis_mask is set, as many unspecified dimensions as needed will be inserted between other dimensions. Only one non-zero bit is allowed in ellipsis_mask.""") self._attrs[self.AttrName.NEW_AXIS_MASK] = NndctIrAttr( name=self.AttrName.NEW_AXIS_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.NEW_AXIS_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of new_axis_mask is set, then begin, end, and stride are ignored and a new length 1 dimension is added at this point in the output tensor.""") self._attrs[self.AttrName.SHRINK_AXIS_MASK] = NndctIrAttr( name=self.AttrName.SHRINK_AXIS_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.SHRINK_AXIS_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of shrink_axis_mask is set, it implies that taking on the value at index begin[i]. end[i] and strides[i] are ignored in this case.""") class BinaryOp(Operation): @unique class AttrName(AutoName): INPUT = auto() OTHER = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.INPUT: [None], self.AttrName.OTHER: [None], } self._attrs[self.AttrName.INPUT] = NndctIrAttr( name=self.AttrName.INPUT, value_type=(int, float, bool, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT], occurence_type=OccurenceType.REQUIRED, map_to_xir=False, annotation=r"""the first input tensor.""") self._attrs[self.AttrName.OTHER] = NndctIrAttr( name=self.AttrName.OTHER, value_type=(int, float, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.OTHER], occurence_type=OccurenceType.REQUIRED, map_to_xir=False, annotation=r"""the second input tensor.""") class Sub(Operation): @unique class AttrName(AutoName): INPUT = auto() OTHER = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.INPUT: [None], self.AttrName.OTHER: [None], } self._attrs[self.AttrName.INPUT] = NndctIrAttr( name=self.AttrName.INPUT, value_type=(int, float, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT], occurence_type=OccurenceType.REQUIRED, annotation=r"""the first input tensor.""") self._attrs[self.AttrName.OTHER] = NndctIrAttr( name=self.AttrName.OTHER, value_type=(int, float, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.OTHER], occurence_type=OccurenceType.REQUIRED, annotation=r"""the second input tensor.""") class Pad(Operation): @unique class AttrName(AutoName): PAD_WITH = "paddings" MODE = auto() CONSTANT_VALUES = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.PAD) # allocate memory for attr value self._attr_value_mem = { self.AttrName.PAD_WITH: [None, None, None, None, None, None, None, None], self.AttrName.MODE: [None], self.AttrName.CONSTANT_VALUES: [None, None, None, None, None, None, None, None] } self._attrs[self.AttrName.PAD_WITH] = NndctIrAttr( name=self.AttrName.PAD_WITH, value_type=int, size=8, value_mem=self._attr_value_mem[self.AttrName.PAD_WITH], occurence_type=OccurenceType.REQUIRED, annotation=r"""0 , 0 , left, right, top, bottom, 0, 0""") self._attrs[self.AttrName.MODE] = NndctIrAttr( name=self.AttrName.MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""The padding mode. 0:'CONSTANT', 1:'REFLECT', 2:'SYMMETRIC'].""" ) self._attrs[self.AttrName.CONSTANT_VALUES] = NndctIrAttr( name=self.AttrName.CONSTANT_VALUES, value_type=float, size=8, value_mem=self._attr_value_mem[self.AttrName.CONSTANT_VALUES], occurence_type=OccurenceType.REQUIRED, annotation=r"""the value set into the padded locations""") class LeakyReLU(Operation): @unique class AttrName(AutoName): ALPHA = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.LEAKY_RELU) # allocate memory for attr value self._attr_value_mem = { self.AttrName.ALPHA: [None], } self._attrs[self.AttrName.ALPHA] = NndctIrAttr( name=self.AttrName.ALPHA, value_type=float, size=1, value_mem=self._attr_value_mem[self.AttrName.ALPHA], occurence_type=OccurenceType.REQUIRED, annotation=r"""negative slope""") class Resize(Operation): @unique class AttrName(AutoName): SIZE = auto() SCALE = auto() ALIGN_CORNERS = auto() HALF_PIXEL_CENTERS = auto() MODE = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.RESIZE) # allocate memory for attr value self._attr_value_mem = { self.AttrName.SIZE: [None, None], self.AttrName.SCALE: [None, None], self.AttrName.ALIGN_CORNERS: [None], self.AttrName.HALF_PIXEL_CENTERS: [None], self.AttrName.MODE: [None], } self._attrs[self.AttrName.SIZE] = NndctIrAttr( name=self.AttrName.SIZE, value_type=(int, Tensor), size=2, value_mem=self._attr_value_mem[self.AttrName.SIZE], default_value=[0, 0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""output spatial size, [size_w, size_h]""") self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=float, size=2, value_mem=self._attr_value_mem[self.AttrName.SCALE], default_value=[1.0, 1.0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""New size = Origin size * scale. {scale_w, scale_h}.""") self._attrs[self.AttrName.ALIGN_CORNERS] = NndctIrAttr( name=self.AttrName.ALIGN_CORNERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.ALIGN_CORNERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""It must be set When mode is 3.If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false.""") self._attrs[self.AttrName.HALF_PIXEL_CENTERS] = NndctIrAttr( name=self.AttrName.HALF_PIXEL_CENTERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.HALF_PIXEL_CENTERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""half_pixel_centers is false by default in, tf.resize_bilinear() and tf.resize_nearest_neighbor(). is true by default in tf.upsampling2d(), but the version of tf should be > r1.13""") self._attrs[self.AttrName.MODE] = NndctIrAttr( name=self.AttrName.MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""OPENCV-NEAREST -> 0, OPENCV-BILINEAR -> 1, Tensorflow-NEAREST -> 2, Tensorflow-BILINEAR -> 3, To be improved!""") class Resize3d(Operation): @unique class AttrName(AutoName): SIZE = auto() SCALE = auto() ALIGN_CORNERS = auto() HALF_PIXEL_CENTERS = auto() MODE = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.RESIZE_3D) # allocate memory for attr value self._attr_value_mem = { self.AttrName.SIZE: [None, None, None], self.AttrName.SCALE: [None, None, None], self.AttrName.ALIGN_CORNERS: [None], self.AttrName.HALF_PIXEL_CENTERS: [None], self.AttrName.MODE: [None], } self._attrs[self.AttrName.SIZE] = NndctIrAttr( name=self.AttrName.SIZE, value_type=(int, Tensor), size=3, value_mem=self._attr_value_mem[self.AttrName.SIZE], default_value=[0, 0, 0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""output spatial size, [size_h, size_w, size_d]""") self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=float, size=3, value_mem=self._attr_value_mem[self.AttrName.SCALE], default_value=[1.0, 1.0, 1.0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""New size = Origin size * scale. {scale_h, scale_w, scale_d}.""") self._attrs[self.AttrName.ALIGN_CORNERS] = NndctIrAttr( name=self.AttrName.ALIGN_CORNERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.ALIGN_CORNERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""It must be set When mode is 3.If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false.""") self._attrs[self.AttrName.HALF_PIXEL_CENTERS] = NndctIrAttr( name=self.AttrName.HALF_PIXEL_CENTERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.HALF_PIXEL_CENTERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""half_pixel_centers is false by default in, tf.resize_bilinear() and tf.resize_nearest_neighbor(). is true by default in tf.upsampling2d(), but the version of tf should be > r1.13""") self._attrs[self.AttrName.MODE] = NndctIrAttr( name=self.AttrName.MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""Trilinear""") class Constant(Operation): @unique class AttrName(AutoName): DATA = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) # allocate memory for attr value self._attr_value_mem = { self.AttrName.DATA: [], } self._attrs[self.AttrName.DATA] = NndctIrAttr( name=self.AttrName.DATA, value_type=(int, float, list, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.DATA], occurence_type=OccurenceType.REQUIRED, annotation=r"""Constant Parameter""") class Squeeze(Operation): @unique class AttrName(AutoName): DIMS = "axis" def __init__(self) -> None: super().__init__(NNDCT_OP.SQUEEZE) # allocate memory for attr value self._attr_value_mem = { self.AttrName.DIMS: [], } self._attrs[self.AttrName.DIMS] = NndctIrAttr( name=self.AttrName.DIMS, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.DIMS], occurence_type=OccurenceType.REQUIRED, annotation=r"""The dimensions to be squeezed. The dimension index " // "starts at 0.""") class EmbeddingBag(Operation): @unique class ParamName(AutoName): WEIGHT = auto() class LayerNorm(Operation): @unique class ParamName(AutoName): GAMMA = auto() BETA = auto() # e.g. ones, zeros class ConstFromShape(Operation): @unique class AttrName(AutoName): SHAPE = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.SHAPE: [], } self._attrs[self.AttrName.SHAPE] = NndctIrAttr( name=self.AttrName.SHAPE, value_type=(int, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.SHAPE], occurence_type=OccurenceType.REQUIRED, annotation=r"""the target shape""") class UnaryOp(Operation): @unique class AttrName(AutoName): INPUT = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # allocate memory for attr value self._attr_value_mem = { self.AttrName.INPUT: [None], } self._attrs[self.AttrName.INPUT] = NndctIrAttr( name=self.AttrName.INPUT, value_type=(int, str, float, bool, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT], occurence_type=OccurenceType.REQUIRED, map_to_xir=False, annotation=r"""the first input tensor.""") class Reorg(Operation): @unique class AttrName(AutoName): SCALE = auto() REVERSE = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) # allocate memory for attr value self._attr_value_mem = { self.AttrName.SCALE: [None], self.AttrName.REVERSE: [None], } self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=(int, Tensor), size=1, value_mem=self._attr_value_mem[self.AttrName.SCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""scale for reorg""") self._attrs[self.AttrName.REVERSE] = NndctIrAttr( name=self.AttrName.REVERSE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.REVERSE], occurence_type=OccurenceType.REQUIRED, annotation=r"""reverse""") class Gstiling(Operation): @unique class AttrName(AutoName): STRIDE = auto() REVERSE = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) # allocate memory for attr value self._attr_value_mem = { self.AttrName.STRIDE: [None], self.AttrName.REVERSE: [None], } self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=(int, Tensor), size=1, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride for feature maps""") self._attrs[self.AttrName.REVERSE] = NndctIrAttr( name=self.AttrName.REVERSE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.REVERSE], occurence_type=OccurenceType.REQUIRED, annotation=r"""reverse""") class PixelShuffle(Operation): @unique class AttrName(AutoName): SCALE = auto() UPSCALE = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) # allocate memory for attr value self._attr_value_mem = { self.AttrName.SCALE: [None], self.AttrName.UPSCALE: [None], } self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=(int, Tensor), size=1, value_mem=self._attr_value_mem[self.AttrName.SCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""scale for feature maps""") self._attrs[self.AttrName.UPSCALE] = NndctIrAttr( name=self.AttrName.UPSCALE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.UPSCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""upscale or downscale PixelShuffle.""") class Embedding(Operation): @unique class ParamName(AutoName): WEIGHT = auto() class CustomOp(Operation): AttrName = Enum("AttrName", '') def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) self._attr_value_mem = {} self.is_custom_op = True def get_attr_name_from_str(self, attr_name): attr_names = [(name, val.value) for name, val in self.AttrName.__members__.items()] if(not attr_names) or (attr_names and all([attr_name != attr[1] for attr in attr_names])): attr_names += [(attr_name.upper(), attr_name.lower())] self.AttrName = Enum("AttrName", attr_names) return getattr(self.AttrName, attr_name.upper()) def _register_attr_by_name(self, attr_name): if attr_name in self.AttrName.__members__: return attr_name = self.get_attr_name_from_str(attr_name) self._attr_value_mem[attr_name] = [None] self._attrs[attr_name] = NndctIrAttr( name=attr_name, value_type=Any, size=None, occurence_type=OccurenceType.REQUIRED, value_mem=self._attr_value_mem[attr_name]) def set_attr_by_name(self, attr_name, value): if attr_name not in self.AttrName.__members__: self._register_attr_by_name(attr_name) attr_name = self.get_attr_name_from_str(attr_name) self.set_attr(attr_name, value)
32.692727
107
0.648054
from enum import auto, unique, Enum from typing import Any from nndct_shared.base import NNDCT_OP from nndct_shared.nndct_graph.base_operator import (AutoName, NndctIrAttr, OccurenceType, Operation) from nndct_shared.nndct_graph.base_tensor import Tensor import numpy as np class Conv1d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GROUP = auto() BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super(Conv1d, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.KERNEL: [None], self.AttrName.STRIDE: [None], self.AttrName.DILATION: [None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None], self.AttrName.GROUP: [None], self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.DILATION] = NndctIrAttr( name=self.AttrName.DILATION, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.DILATION], occurence_type=OccurenceType.OPTIONAL, default_value=[1], annotation=r"""dilation, [dilation_w, dilation_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0,0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GROUP] = NndctIrAttr( name=self.AttrName.GROUP, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.GROUP], occurence_type=OccurenceType.OPTIONAL, default_value=1, annotation=r"""group""") self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class Conv2d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GROUP = auto() BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super(Conv2d, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.KERNEL: [None, None], self.AttrName.STRIDE: [None, None], self.AttrName.DILATION: [None, None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None], self.AttrName.GROUP: [None], self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.DILATION] = NndctIrAttr( name=self.AttrName.DILATION, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.DILATION], occurence_type=OccurenceType.OPTIONAL, default_value=[1, 1], annotation=r"""dilation, [dilation_w, dilation_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=4, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GROUP] = NndctIrAttr( name=self.AttrName.GROUP, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.GROUP], occurence_type=OccurenceType.OPTIONAL, default_value=1, annotation=r"""group""") self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class Conv3d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() GROUP = auto() PAD_MODE = auto() PAD = auto() BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() OUTPUT_PAD = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.KERNEL: [None, None, None], self.AttrName.STRIDE: [None, None, None], self.AttrName.DILATION: [None, None, None], self.AttrName.GROUP: [None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None, None, None], self.AttrName.OUTPUT_PAD: [None, None, None, None, None, None], self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=3, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h, kernel_d]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h, stride_d]""") self._attrs[self.AttrName.DILATION] = NndctIrAttr( name=self.AttrName.DILATION, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.DILATION], occurence_type=OccurenceType.OPTIONAL, default_value=[1, 1, 1], annotation=r"""dilation, [dilation_w, dilation_h, dilation_d]""") self._attrs[self.AttrName.GROUP] = NndctIrAttr( name=self.AttrName.GROUP, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.GROUP], occurence_type=OccurenceType.OPTIONAL, default_value=1, annotation=r"""group""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""We support 4 padding mode: `FLOOR, CEIL, SAME, VALID`. " "For example, when you parsing models from other frameworks, " "`caffe, pytorch->\"FLOOR\", tensorflow->\"SAME\" or \"VALID\"`""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=6, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom, near, far],""") self._attrs[self.AttrName.OUTPUT_PAD] = NndctIrAttr( name=self.AttrName.OUTPUT_PAD, value_type=int, size=6, value_mem=self._attr_value_mem[self.AttrName.OUTPUT_PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0, 0, 0], annotation=r"""additional size added to one side of each dimension in the output, [" "left, right, top, bottom, near, far],""") self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=None, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class BatchNorm(Operation): @unique class AttrName(AutoName): EPSILON = auto() SCALE = auto() CENTER = auto() OUT_DIM = auto() AXIS = auto() @unique class ParamName(AutoName): GAMMA = auto() BETA = auto() MOVING_MEAN = auto() MOVING_VAR = auto() def __init__(self, *args, **kwargs) -> None: super(BatchNorm, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.EPSILON: [None], self.AttrName.SCALE: [None], self.AttrName.CENTER: [None], self.AttrName.OUT_DIM: [None], self.AttrName.AXIS: [None] } self._attrs[self.AttrName.EPSILON] = NndctIrAttr( name=self.AttrName.EPSILON, value_type=float, size=1, value_mem=self._attr_value_mem[self.AttrName.EPSILON], occurence_type=OccurenceType.REQUIRED, annotation=r"""epsilon""") self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.SCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""scale""") self._attrs[self.AttrName.CENTER] = NndctIrAttr( name=self.AttrName.CENTER, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.CENTER], occurence_type=OccurenceType.REQUIRED, annotation=r"""center""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""num features""") self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""the axis of the input to implement batchnorm""") class Dense(Operation): @unique class AttrName(AutoName): BIAS_TERM = auto() IN_DIM = auto() OUT_DIM = auto() @unique class ParamName(AutoName): WEIGHTS = auto() BIAS = auto() def __init__(self, *args, **kwargs) -> None: super(Dense, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.BIAS_TERM: [None], self.AttrName.IN_DIM: [None], self.AttrName.OUT_DIM: [None], } self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr( name=self.AttrName.BIAS_TERM, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM], occurence_type=OccurenceType.REQUIRED, annotation=r"""whether bias exist""") self._attrs[self.AttrName.IN_DIM] = NndctIrAttr( name=self.AttrName.IN_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.IN_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""in_channels""") self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr( name=self.AttrName.OUT_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.OUT_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""out_channels""") class Concat(Operation): @unique class AttrName(AutoName): AXIS = auto() def __init__(self, *args, **kwargs) -> None: super(Concat, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.AXIS: [None], } self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""specified axis""") class Shape(Operation): @unique class AttrName(AutoName): AXIS = auto() def __init__(self, *args, **kwargs) -> None: super(Shape, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.AXIS: [None], } self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""specified axis""") class Reshape(Operation): @unique class AttrName(AutoName): SHAPE = auto() def __init__(self, *args, **kwargs) -> None: super(Reshape, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.SHAPE: [], } self._attrs[self.AttrName.SHAPE] = NndctIrAttr( name=self.AttrName.SHAPE, value_type=(int, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.SHAPE], occurence_type=OccurenceType.REQUIRED, annotation=r"""the target shape""") class MaxPool(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GLOBAL = auto() def __init__(self, *args, **kwargs) -> None: super(MaxPool, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.KERNEL: [None, None], self.AttrName.STRIDE: [None, None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None], self.AttrName.GLOBAL: [None], } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=4, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GLOBAL] = NndctIrAttr( name=self.AttrName.GLOBAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.GLOBAL], occurence_type=OccurenceType.OPTIONAL, default_value=False, annotation=r"""global""") class MaxPool1d(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() DILATION = auto() PAD_MODE = auto() PAD = auto() GLOBAL = auto() COUNT_INCLUDE_PAD = auto() def __init__(self, *args, **kwargs) -> None: super(MaxPool1d, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.KERNEL: [None], self.AttrName.STRIDE: [None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None], self.AttrName.GLOBAL: [None], self.AttrName.COUNT_INCLUDE_PAD: [None] } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GLOBAL] = NndctIrAttr( name=self.AttrName.GLOBAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.GLOBAL], occurence_type=OccurenceType.OPTIONAL, default_value=False, annotation=r"""global""") self._attrs[self.AttrName.COUNT_INCLUDE_PAD] = NndctIrAttr( name=self.AttrName.COUNT_INCLUDE_PAD, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.COUNT_INCLUDE_PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[True], annotation=r"""when True, will include the zero-padding in the averaging calculation""") class AvgPool(Operation): @unique class AttrName(AutoName): KERNEL = auto() STRIDE = auto() PAD_MODE = auto() PAD = auto() GLOBAL = auto() COUNT_INCLUDE_PAD = auto() def __init__(self, *args, **kwargs) -> None: super(AvgPool, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.KERNEL: [None, None], self.AttrName.STRIDE: [None, None], self.AttrName.PAD_MODE: [None], self.AttrName.PAD: [None, None, None, None], self.AttrName.GLOBAL: [None], self.AttrName.COUNT_INCLUDE_PAD: [None] } self._attrs[self.AttrName.KERNEL] = NndctIrAttr( name=self.AttrName.KERNEL, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.KERNEL], occurence_type=OccurenceType.REQUIRED, annotation=r"""kernel size, [kernel_w, kernel_h]""") self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=int, size=2, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride [stride_w, stride_h]""") self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr( name=self.AttrName.PAD_MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.PAD_MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL for the FUTURE. use attr pad. SAME, make output with same width and height as input. VALID, no padding""") self._attrs[self.AttrName.PAD] = NndctIrAttr( name=self.AttrName.PAD, value_type=int, size=4, value_mem=self._attr_value_mem[self.AttrName.PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[0, 0, 0, 0], annotation=r"""padding size, only effective when pad mode is PADDING, [" "left, right, top, bottom],""") self._attrs[self.AttrName.GLOBAL] = NndctIrAttr( name=self.AttrName.GLOBAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.GLOBAL], occurence_type=OccurenceType.OPTIONAL, default_value=False, annotation=r"""global""") self._attrs[self.AttrName.COUNT_INCLUDE_PAD] = NndctIrAttr( name=self.AttrName.COUNT_INCLUDE_PAD, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.COUNT_INCLUDE_PAD], occurence_type=OccurenceType.OPTIONAL, default_value=[True], annotation=r"""when True, will include the zero-padding in the averaging calculation""") class Flatten(Operation): @unique class AttrName(AutoName): START_DIM = "start_axis" END_DIM = "end_axis" def __init__(self, *args, **kwargs) -> None: super(Flatten, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.START_DIM: [None], self.AttrName.END_DIM: [None], } self._attrs[self.AttrName.START_DIM] = NndctIrAttr( name=self.AttrName.START_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.START_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""the first dim to flatten""") self._attrs[self.AttrName.END_DIM] = NndctIrAttr( name=self.AttrName.END_DIM, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.END_DIM], occurence_type=OccurenceType.REQUIRED, annotation=r"""the last dim to flatten""") class PermuteInvariantOp(Operation): @unique class AttrName(AutoName): DIMS = "axis" KEEP_DIMS = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.DIMS: [], self.AttrName.KEEP_DIMS: [None], } self._attrs[self.AttrName.DIMS] = NndctIrAttr( name=self.AttrName.DIMS, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.DIMS], occurence_type=OccurenceType.REQUIRED, annotation=r"""The dimensions to reduce. List of integers""") self._attrs[self.AttrName.KEEP_DIMS] = NndctIrAttr( name=self.AttrName.KEEP_DIMS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.KEEP_DIMS], occurence_type=OccurenceType.REQUIRED, annotation=r"""specify whether the reduced dimension is kept or not.""") class Permute(Operation): @unique class AttrName(AutoName): ORDER = auto() def __init__(self, op_type, *args, **kwargs) -> None: super(Permute, self).__init__(op_type) self._attr_value_mem = { self.AttrName.ORDER: [], } self._attrs[self.AttrName.ORDER] = NndctIrAttr( name=self.AttrName.ORDER, value_type=(int, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.ORDER], occurence_type=OccurenceType.REQUIRED, annotation=r"""The dimensions to reduce. List of integers""") class Softmax(Operation): @unique class AttrName(AutoName): AXIS = auto() def __init__(self) -> None: super(Softmax, self).__init__(NNDCT_OP.SOFTMAX) self._attr_value_mem = { self.AttrName.AXIS: [None], } self._attrs[self.AttrName.AXIS] = NndctIrAttr( name=self.AttrName.AXIS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.AXIS], occurence_type=OccurenceType.REQUIRED, annotation=r"""the dimension softmax would be performed on. default is the last dimension.""") class Lstm(Operation): @unique class AttrName(AutoName): INPUT_SIZE = auto() HIDDEN_SIZE = auto() BIDIRECTIONAL = auto() NUM_LAYERS = auto() BATCH_FIRST = auto() @unique class ParamName(AutoName): WEIGHT_IH = auto() WEIGHT_HH = auto() WEIGHT_IH_REVERSE = auto() WEIGHT_HH_REVERSE = auto() BIAS = auto() BIAS_REVERSE = auto() def __init__(self, *args, **kwargs) -> None: super(Lstm, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.INPUT_SIZE: [None], self.AttrName.HIDDEN_SIZE: [None], self.AttrName.BIDIRECTIONAL: [None], self.AttrName.NUM_LAYERS: [None], self.AttrName.BATCH_FIRST: [None], } self._attrs[self.AttrName.INPUT_SIZE] = NndctIrAttr( name=self.AttrName.INPUT_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""input size of LSTM.""") self._attrs[self.AttrName.HIDDEN_SIZE] = NndctIrAttr( name=self.AttrName.HIDDEN_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.HIDDEN_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""hidden size of LSTM.""") self._attrs[self.AttrName.BIDIRECTIONAL] = NndctIrAttr( name=self.AttrName.BIDIRECTIONAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIDIRECTIONAL], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, means a bidirectional LSTM.""") self._attrs[self.AttrName.NUM_LAYERS] = NndctIrAttr( name=self.AttrName.NUM_LAYERS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.NUM_LAYERS], occurence_type=OccurenceType.REQUIRED, annotation=r"""Number of recurrent layers""") self._attrs[self.AttrName.BATCH_FIRST] = NndctIrAttr( name=self.AttrName.BATCH_FIRST, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BATCH_FIRST], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, then the input and output tensors are provided as (batch, seq, feature)""" ) class Gru(Operation): @unique class AttrName(AutoName): INPUT_SIZE = auto() HIDDEN_SIZE = auto() BIDIRECTIONAL = auto() NUM_LAYERS = auto() BATCH_FIRST = auto() @unique class ParamName(AutoName): WEIGHT_IH = auto() WEIGHT_HH = auto() WEIGHT_IH_REVERSE = auto() WEIGHT_HH_REVERSE = auto() BIAS_IH = auto() BIAS_HH = auto() BIAS_IH_REVERSE = auto() BIAS_HH_REVERSE = auto() def __init__(self, *args, **kwargs) -> None: super(Gru, self).__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.INPUT_SIZE: [None], self.AttrName.HIDDEN_SIZE: [None], self.AttrName.BIDIRECTIONAL: [None], self.AttrName.NUM_LAYERS: [None], self.AttrName.BATCH_FIRST: [None], } self._attrs[self.AttrName.INPUT_SIZE] = NndctIrAttr( name=self.AttrName.INPUT_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""input size of GRU.""") self._attrs[self.AttrName.HIDDEN_SIZE] = NndctIrAttr( name=self.AttrName.HIDDEN_SIZE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.HIDDEN_SIZE], occurence_type=OccurenceType.REQUIRED, annotation=r"""hidden size of GRU.""") self._attrs[self.AttrName.BIDIRECTIONAL] = NndctIrAttr( name=self.AttrName.BIDIRECTIONAL, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BIDIRECTIONAL], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, means a bidirectional GRU.""") self._attrs[self.AttrName.NUM_LAYERS] = NndctIrAttr( name=self.AttrName.NUM_LAYERS, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.NUM_LAYERS], occurence_type=OccurenceType.REQUIRED, annotation=r"""Number of recurrent layers""") self._attrs[self.AttrName.BATCH_FIRST] = NndctIrAttr( name=self.AttrName.BATCH_FIRST, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.BATCH_FIRST], occurence_type=OccurenceType.REQUIRED, annotation=r""" If True, then the input and output tensors are provided as (batch, seq, feature)""" ) class StridedSlice(Operation): @unique class AttrName(AutoName): BEGIN = auto() END = auto() STRIDES = auto() BEGIN_MASK = auto() END_MASK = auto() ELLIPSIS_MASK = auto() NEW_AXIS_MASK = auto() SHRINK_AXIS_MASK = auto() def __init__(self) -> None: super(StridedSlice, self).__init__(NNDCT_OP.STRIDED_SLICE) self._attr_value_mem = { self.AttrName.BEGIN: [], self.AttrName.END: [], self.AttrName.STRIDES: [], self.AttrName.BEGIN_MASK: [None], self.AttrName.END_MASK: [None], self.AttrName.ELLIPSIS_MASK: [None], self.AttrName.NEW_AXIS_MASK: [None], self.AttrName.SHRINK_AXIS_MASK: [None] } self._attrs[self.AttrName.BEGIN] = NndctIrAttr( name=self.AttrName.BEGIN, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.BEGIN], occurence_type=OccurenceType.REQUIRED, annotation=r"""start location of slicing (included)""") self._attrs[self.AttrName.END] = NndctIrAttr( name=self.AttrName.END, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.END], occurence_type=OccurenceType.REQUIRED, annotation=r"""end location of slicing (excluded)""") self._attrs[self.AttrName.STRIDES] = NndctIrAttr( name=self.AttrName.STRIDES, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.STRIDES], occurence_type=OccurenceType.REQUIRED, annotation=r"""strides of slicing""") self._attrs[self.AttrName.BEGIN_MASK] = NndctIrAttr( name=self.AttrName.BEGIN_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.BEGIN_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of begin_mask is set, begin[i] is ignored and the fullest possible range in that dimension is used instead.""") self._attrs[self.AttrName.END_MASK] = NndctIrAttr( name=self.AttrName.END_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.END_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of end_mask is set, end[i] is ignored and the fullest possible range in that dimension is used instead, except with the end range.""") self._attrs[self.AttrName.ELLIPSIS_MASK] = NndctIrAttr( name=self.AttrName.ELLIPSIS_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.ELLIPSIS_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of ellipsis_mask is set, as many unspecified dimensions as needed will be inserted between other dimensions. Only one non-zero bit is allowed in ellipsis_mask.""") self._attrs[self.AttrName.NEW_AXIS_MASK] = NndctIrAttr( name=self.AttrName.NEW_AXIS_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.NEW_AXIS_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of new_axis_mask is set, then begin, end, and stride are ignored and a new length 1 dimension is added at this point in the output tensor.""") self._attrs[self.AttrName.SHRINK_AXIS_MASK] = NndctIrAttr( name=self.AttrName.SHRINK_AXIS_MASK, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.SHRINK_AXIS_MASK], default_value=0, occurence_type=OccurenceType.OPTIONAL, annotation=r"""If the ith bit of shrink_axis_mask is set, it implies that taking on the value at index begin[i]. end[i] and strides[i] are ignored in this case.""") class BinaryOp(Operation): @unique class AttrName(AutoName): INPUT = auto() OTHER = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.INPUT: [None], self.AttrName.OTHER: [None], } self._attrs[self.AttrName.INPUT] = NndctIrAttr( name=self.AttrName.INPUT, value_type=(int, float, bool, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT], occurence_type=OccurenceType.REQUIRED, map_to_xir=False, annotation=r"""the first input tensor.""") self._attrs[self.AttrName.OTHER] = NndctIrAttr( name=self.AttrName.OTHER, value_type=(int, float, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.OTHER], occurence_type=OccurenceType.REQUIRED, map_to_xir=False, annotation=r"""the second input tensor.""") class Sub(Operation): @unique class AttrName(AutoName): INPUT = auto() OTHER = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.INPUT: [None], self.AttrName.OTHER: [None], } self._attrs[self.AttrName.INPUT] = NndctIrAttr( name=self.AttrName.INPUT, value_type=(int, float, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT], occurence_type=OccurenceType.REQUIRED, annotation=r"""the first input tensor.""") self._attrs[self.AttrName.OTHER] = NndctIrAttr( name=self.AttrName.OTHER, value_type=(int, float, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.OTHER], occurence_type=OccurenceType.REQUIRED, annotation=r"""the second input tensor.""") class Pad(Operation): @unique class AttrName(AutoName): PAD_WITH = "paddings" MODE = auto() CONSTANT_VALUES = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.PAD) self._attr_value_mem = { self.AttrName.PAD_WITH: [None, None, None, None, None, None, None, None], self.AttrName.MODE: [None], self.AttrName.CONSTANT_VALUES: [None, None, None, None, None, None, None, None] } self._attrs[self.AttrName.PAD_WITH] = NndctIrAttr( name=self.AttrName.PAD_WITH, value_type=int, size=8, value_mem=self._attr_value_mem[self.AttrName.PAD_WITH], occurence_type=OccurenceType.REQUIRED, annotation=r"""0 , 0 , left, right, top, bottom, 0, 0""") self._attrs[self.AttrName.MODE] = NndctIrAttr( name=self.AttrName.MODE, value_type=int, size=1, value_mem=self._attr_value_mem[self.AttrName.MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""The padding mode. 0:'CONSTANT', 1:'REFLECT', 2:'SYMMETRIC'].""" ) self._attrs[self.AttrName.CONSTANT_VALUES] = NndctIrAttr( name=self.AttrName.CONSTANT_VALUES, value_type=float, size=8, value_mem=self._attr_value_mem[self.AttrName.CONSTANT_VALUES], occurence_type=OccurenceType.REQUIRED, annotation=r"""the value set into the padded locations""") class LeakyReLU(Operation): @unique class AttrName(AutoName): ALPHA = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.LEAKY_RELU) self._attr_value_mem = { self.AttrName.ALPHA: [None], } self._attrs[self.AttrName.ALPHA] = NndctIrAttr( name=self.AttrName.ALPHA, value_type=float, size=1, value_mem=self._attr_value_mem[self.AttrName.ALPHA], occurence_type=OccurenceType.REQUIRED, annotation=r"""negative slope""") class Resize(Operation): @unique class AttrName(AutoName): SIZE = auto() SCALE = auto() ALIGN_CORNERS = auto() HALF_PIXEL_CENTERS = auto() MODE = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.RESIZE) self._attr_value_mem = { self.AttrName.SIZE: [None, None], self.AttrName.SCALE: [None, None], self.AttrName.ALIGN_CORNERS: [None], self.AttrName.HALF_PIXEL_CENTERS: [None], self.AttrName.MODE: [None], } self._attrs[self.AttrName.SIZE] = NndctIrAttr( name=self.AttrName.SIZE, value_type=(int, Tensor), size=2, value_mem=self._attr_value_mem[self.AttrName.SIZE], default_value=[0, 0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""output spatial size, [size_w, size_h]""") self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=float, size=2, value_mem=self._attr_value_mem[self.AttrName.SCALE], default_value=[1.0, 1.0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""New size = Origin size * scale. {scale_w, scale_h}.""") self._attrs[self.AttrName.ALIGN_CORNERS] = NndctIrAttr( name=self.AttrName.ALIGN_CORNERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.ALIGN_CORNERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""It must be set When mode is 3.If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false.""") self._attrs[self.AttrName.HALF_PIXEL_CENTERS] = NndctIrAttr( name=self.AttrName.HALF_PIXEL_CENTERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.HALF_PIXEL_CENTERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""half_pixel_centers is false by default in, tf.resize_bilinear() and tf.resize_nearest_neighbor(). is true by default in tf.upsampling2d(), but the version of tf should be > r1.13""") self._attrs[self.AttrName.MODE] = NndctIrAttr( name=self.AttrName.MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""OPENCV-NEAREST -> 0, OPENCV-BILINEAR -> 1, Tensorflow-NEAREST -> 2, Tensorflow-BILINEAR -> 3, To be improved!""") class Resize3d(Operation): @unique class AttrName(AutoName): SIZE = auto() SCALE = auto() ALIGN_CORNERS = auto() HALF_PIXEL_CENTERS = auto() MODE = auto() def __init__(self) -> None: super().__init__(NNDCT_OP.RESIZE_3D) self._attr_value_mem = { self.AttrName.SIZE: [None, None, None], self.AttrName.SCALE: [None, None, None], self.AttrName.ALIGN_CORNERS: [None], self.AttrName.HALF_PIXEL_CENTERS: [None], self.AttrName.MODE: [None], } self._attrs[self.AttrName.SIZE] = NndctIrAttr( name=self.AttrName.SIZE, value_type=(int, Tensor), size=3, value_mem=self._attr_value_mem[self.AttrName.SIZE], default_value=[0, 0, 0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""output spatial size, [size_h, size_w, size_d]""") self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=float, size=3, value_mem=self._attr_value_mem[self.AttrName.SCALE], default_value=[1.0, 1.0, 1.0], occurence_type=OccurenceType.OPTIONAL, annotation=r"""New size = Origin size * scale. {scale_h, scale_w, scale_d}.""") self._attrs[self.AttrName.ALIGN_CORNERS] = NndctIrAttr( name=self.AttrName.ALIGN_CORNERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.ALIGN_CORNERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""It must be set When mode is 3.If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false.""") self._attrs[self.AttrName.HALF_PIXEL_CENTERS] = NndctIrAttr( name=self.AttrName.HALF_PIXEL_CENTERS, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.HALF_PIXEL_CENTERS], default_value=False, occurence_type=OccurenceType.OPTIONAL, annotation=r"""half_pixel_centers is false by default in, tf.resize_bilinear() and tf.resize_nearest_neighbor(). is true by default in tf.upsampling2d(), but the version of tf should be > r1.13""") self._attrs[self.AttrName.MODE] = NndctIrAttr( name=self.AttrName.MODE, value_type=str, size=1, value_mem=self._attr_value_mem[self.AttrName.MODE], occurence_type=OccurenceType.REQUIRED, annotation=r"""Trilinear""") class Constant(Operation): @unique class AttrName(AutoName): DATA = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) self._attr_value_mem = { self.AttrName.DATA: [], } self._attrs[self.AttrName.DATA] = NndctIrAttr( name=self.AttrName.DATA, value_type=(int, float, list, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.DATA], occurence_type=OccurenceType.REQUIRED, annotation=r"""Constant Parameter""") class Squeeze(Operation): @unique class AttrName(AutoName): DIMS = "axis" def __init__(self) -> None: super().__init__(NNDCT_OP.SQUEEZE) self._attr_value_mem = { self.AttrName.DIMS: [], } self._attrs[self.AttrName.DIMS] = NndctIrAttr( name=self.AttrName.DIMS, value_type=int, size=None, value_mem=self._attr_value_mem[self.AttrName.DIMS], occurence_type=OccurenceType.REQUIRED, annotation=r"""The dimensions to be squeezed. The dimension index " // "starts at 0.""") class EmbeddingBag(Operation): @unique class ParamName(AutoName): WEIGHT = auto() class LayerNorm(Operation): @unique class ParamName(AutoName): GAMMA = auto() BETA = auto() class ConstFromShape(Operation): @unique class AttrName(AutoName): SHAPE = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.SHAPE: [], } self._attrs[self.AttrName.SHAPE] = NndctIrAttr( name=self.AttrName.SHAPE, value_type=(int, Tensor), size=None, value_mem=self._attr_value_mem[self.AttrName.SHAPE], occurence_type=OccurenceType.REQUIRED, annotation=r"""the target shape""") class UnaryOp(Operation): @unique class AttrName(AutoName): INPUT = auto() def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._attr_value_mem = { self.AttrName.INPUT: [None], } self._attrs[self.AttrName.INPUT] = NndctIrAttr( name=self.AttrName.INPUT, value_type=(int, str, float, bool, Tensor, np.ndarray), size=1, value_mem=self._attr_value_mem[self.AttrName.INPUT], occurence_type=OccurenceType.REQUIRED, map_to_xir=False, annotation=r"""the first input tensor.""") class Reorg(Operation): @unique class AttrName(AutoName): SCALE = auto() REVERSE = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) self._attr_value_mem = { self.AttrName.SCALE: [None], self.AttrName.REVERSE: [None], } self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=(int, Tensor), size=1, value_mem=self._attr_value_mem[self.AttrName.SCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""scale for reorg""") self._attrs[self.AttrName.REVERSE] = NndctIrAttr( name=self.AttrName.REVERSE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.REVERSE], occurence_type=OccurenceType.REQUIRED, annotation=r"""reverse""") class Gstiling(Operation): @unique class AttrName(AutoName): STRIDE = auto() REVERSE = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) self._attr_value_mem = { self.AttrName.STRIDE: [None], self.AttrName.REVERSE: [None], } self._attrs[self.AttrName.STRIDE] = NndctIrAttr( name=self.AttrName.STRIDE, value_type=(int, Tensor), size=1, value_mem=self._attr_value_mem[self.AttrName.STRIDE], occurence_type=OccurenceType.REQUIRED, annotation=r"""stride for feature maps""") self._attrs[self.AttrName.REVERSE] = NndctIrAttr( name=self.AttrName.REVERSE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.REVERSE], occurence_type=OccurenceType.REQUIRED, annotation=r"""reverse""") class PixelShuffle(Operation): @unique class AttrName(AutoName): SCALE = auto() UPSCALE = auto() def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) self._attr_value_mem = { self.AttrName.SCALE: [None], self.AttrName.UPSCALE: [None], } self._attrs[self.AttrName.SCALE] = NndctIrAttr( name=self.AttrName.SCALE, value_type=(int, Tensor), size=1, value_mem=self._attr_value_mem[self.AttrName.SCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""scale for feature maps""") self._attrs[self.AttrName.UPSCALE] = NndctIrAttr( name=self.AttrName.UPSCALE, value_type=bool, size=1, value_mem=self._attr_value_mem[self.AttrName.UPSCALE], occurence_type=OccurenceType.REQUIRED, annotation=r"""upscale or downscale PixelShuffle.""") class Embedding(Operation): @unique class ParamName(AutoName): WEIGHT = auto() class CustomOp(Operation): AttrName = Enum("AttrName", '') def __init__(self, nndct_op_type) -> None: super().__init__(nndct_op_type) self._attr_value_mem = {} self.is_custom_op = True def get_attr_name_from_str(self, attr_name): attr_names = [(name, val.value) for name, val in self.AttrName.__members__.items()] if(not attr_names) or (attr_names and all([attr_name != attr[1] for attr in attr_names])): attr_names += [(attr_name.upper(), attr_name.lower())] self.AttrName = Enum("AttrName", attr_names) return getattr(self.AttrName, attr_name.upper()) def _register_attr_by_name(self, attr_name): if attr_name in self.AttrName.__members__: return attr_name = self.get_attr_name_from_str(attr_name) self._attr_value_mem[attr_name] = [None] self._attrs[attr_name] = NndctIrAttr( name=attr_name, value_type=Any, size=None, occurence_type=OccurenceType.REQUIRED, value_mem=self._attr_value_mem[attr_name]) def set_attr_by_name(self, attr_name, value): if attr_name not in self.AttrName.__members__: self._register_attr_by_name(attr_name) attr_name = self.get_attr_name_from_str(attr_name) self.set_attr(attr_name, value)
true
true
f70ddc4c4d19b9cebd3863fd65c6a3c4653e8b84
1,276
py
Python
Gathered CTF writeups/ctf-7867/2020/pbctf/queensarah2/graphic.py
mihaid-b/CyberSakura
f60e6b6bfd6898c69b84424b080090ae98f8076c
[ "MIT" ]
1
2022-03-27T06:00:41.000Z
2022-03-27T06:00:41.000Z
Gathered CTF writeups/ctf-7867/2020/pbctf/queensarah2/graphic.py
mihaid-b/CyberSakura
f60e6b6bfd6898c69b84424b080090ae98f8076c
[ "MIT" ]
null
null
null
Gathered CTF writeups/ctf-7867/2020/pbctf/queensarah2/graphic.py
mihaid-b/CyberSakura
f60e6b6bfd6898c69b84424b080090ae98f8076c
[ "MIT" ]
1
2022-03-27T06:01:42.000Z
2022-03-27T06:01:42.000Z
from string import ascii_lowercase from itertools import product import gizeh import numpy as np import random random.seed(1234) alphabet = ascii_lowercase + "_" bigrams = [''.join(bigram) for bigram in product(alphabet, repeat=2)] random.shuffle(bigrams) scale = 2 width = 512 * scale height = 512 * scale def draw(bs, name, theta_offset=0): surface = gizeh.Surface(width=width, height=height) r = width / 2 * 3/4 offset = [width / 2, height / 2] theta_step = (2 * np.pi) / (len(bs)) i = 0 for theta in np.linspace(0, 2 * np.pi, len(bs) + 1)[:-1]: t = theta + (theta_offset * theta_step / 2) xy = [r * np.sin(t) + offset[0], r * np.cos(t) + offset[1]] text = gizeh.text( bs[i], fontfamily="monospace", fontsize=20 * scale, fill=(0, 0, 0), xy=xy, angle=0 ) text.draw(surface) i += 1 surface.write_to_png("gen/" + name + ".png") even = bigrams[:16] even0 = [x for i, x in enumerate(even) if i % 2 == 0] even1 = [x for i, x in enumerate(even) if i % 2 == 1] bigrams = bigrams[16:] draw(even, "even") draw(even0, "even0") draw(even1, "even1", theta_offset=1) odd = bigrams[:15] bigrams = bigrams[15:] draw(odd, "odd")
22.785714
69
0.579937
from string import ascii_lowercase from itertools import product import gizeh import numpy as np import random random.seed(1234) alphabet = ascii_lowercase + "_" bigrams = [''.join(bigram) for bigram in product(alphabet, repeat=2)] random.shuffle(bigrams) scale = 2 width = 512 * scale height = 512 * scale def draw(bs, name, theta_offset=0): surface = gizeh.Surface(width=width, height=height) r = width / 2 * 3/4 offset = [width / 2, height / 2] theta_step = (2 * np.pi) / (len(bs)) i = 0 for theta in np.linspace(0, 2 * np.pi, len(bs) + 1)[:-1]: t = theta + (theta_offset * theta_step / 2) xy = [r * np.sin(t) + offset[0], r * np.cos(t) + offset[1]] text = gizeh.text( bs[i], fontfamily="monospace", fontsize=20 * scale, fill=(0, 0, 0), xy=xy, angle=0 ) text.draw(surface) i += 1 surface.write_to_png("gen/" + name + ".png") even = bigrams[:16] even0 = [x for i, x in enumerate(even) if i % 2 == 0] even1 = [x for i, x in enumerate(even) if i % 2 == 1] bigrams = bigrams[16:] draw(even, "even") draw(even0, "even0") draw(even1, "even1", theta_offset=1) odd = bigrams[:15] bigrams = bigrams[15:] draw(odd, "odd")
true
true
f70ddcf562c1108c3fc8bc142051e25a3d8ed3e5
5,366
py
Python
src/python/tests/metrics_output_parser_test.py
guoyr/genny
f1164927916163824885e019c2498b1ee2042069
[ "Apache-2.0" ]
null
null
null
src/python/tests/metrics_output_parser_test.py
guoyr/genny
f1164927916163824885e019c2498b1ee2042069
[ "Apache-2.0" ]
null
null
null
src/python/tests/metrics_output_parser_test.py
guoyr/genny
f1164927916163824885e019c2498b1ee2042069
[ "Apache-2.0" ]
null
null
null
# Copyright 2019-present MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for metrics_output_parser""" import unittest import genny.metrics_output_parser as parser import tests.parser_test_lib as test_lib class GennyOutputParserTest(unittest.TestCase): def raises_parse_error(self, input_str): with self.assertRaises(parser.ParseError): test_lib.parse_string(input_str) def test_no_clocks(self): self.raises_parse_error(""" Timers 1234,A.0.o,345 """) def test_missing_clocks(self): self.raises_parse_error(""" Clocks Timers 1234,A.0.o,345 """) def test_timers_before_clocks(self): self.raises_parse_error(""" Timers 1234,A.0.o,345 Clocks SystemTime,23439048 MetricsTime,303947 """) def test_csv_no_sections_have_data(self): self.assertEqual( test_lib.parse_string(""" Clocks Gauges Counters Timers """).timers(), {}) def test_empty_input(self): self.assertEqual(test_lib.parse_string("").timers(), {}) def test_fixture1(self): actual = test_lib.parse_fixture('csvoutput1').timers() self.assertEqual( actual, { 'InsertTest.output': { 'mean': 1252307.75, 'n': 4, 'threads': {'id-0', 'id-1'}, 'started': 1537814141061109, 'ended': 1537814143687260 }, 'HelloTest.output': { 'mean': 55527.25, 'n': 4, 'threads': {'0', '1'}, 'started': 1537814141061476, 'ended': 1537814143457943 } }) def test_fixture2(self): actual = test_lib.parse_fixture('csvoutput2').timers() self.assertEqual( actual, { 'InsertRemoveTest.remove': { 'n': 823, 'threads': set([ str(x) for x in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 ] ]), 'started': 1540233103870294, 'ended': 1540233383199723, 'mean': 4297048.190765492 }, 'InsertRemoveTest.insert': { 'n': 823, 'threads': set([ str(x) for x in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 ] ]), 'started': 1540233073074953, 'ended': 1540233380763649, 'mean': 8656706.69744836 }, 'Genny.Setup': { 'n': 1, 'threads': {'0'}, 'started': 1540233035593684, 'ended': 1540233044288445, 'mean': 8694761.0 } }) def test_fixture3(self): actual = test_lib.parse_fixture('csvoutput3').timers() self.assertEqual( actual, { 'InsertRemoveTest.remove.op-time': { 'n': 6, 'threads': {'id-99'}, 'started': 1547004939911848888, 'ended': 1547004939932315379, 'mean': 1814961.8333333333 } })
34.844156
99
0.432725
import unittest import genny.metrics_output_parser as parser import tests.parser_test_lib as test_lib class GennyOutputParserTest(unittest.TestCase): def raises_parse_error(self, input_str): with self.assertRaises(parser.ParseError): test_lib.parse_string(input_str) def test_no_clocks(self): self.raises_parse_error(""" Timers 1234,A.0.o,345 """) def test_missing_clocks(self): self.raises_parse_error(""" Clocks Timers 1234,A.0.o,345 """) def test_timers_before_clocks(self): self.raises_parse_error(""" Timers 1234,A.0.o,345 Clocks SystemTime,23439048 MetricsTime,303947 """) def test_csv_no_sections_have_data(self): self.assertEqual( test_lib.parse_string(""" Clocks Gauges Counters Timers """).timers(), {}) def test_empty_input(self): self.assertEqual(test_lib.parse_string("").timers(), {}) def test_fixture1(self): actual = test_lib.parse_fixture('csvoutput1').timers() self.assertEqual( actual, { 'InsertTest.output': { 'mean': 1252307.75, 'n': 4, 'threads': {'id-0', 'id-1'}, 'started': 1537814141061109, 'ended': 1537814143687260 }, 'HelloTest.output': { 'mean': 55527.25, 'n': 4, 'threads': {'0', '1'}, 'started': 1537814141061476, 'ended': 1537814143457943 } }) def test_fixture2(self): actual = test_lib.parse_fixture('csvoutput2').timers() self.assertEqual( actual, { 'InsertRemoveTest.remove': { 'n': 823, 'threads': set([ str(x) for x in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 ] ]), 'started': 1540233103870294, 'ended': 1540233383199723, 'mean': 4297048.190765492 }, 'InsertRemoveTest.insert': { 'n': 823, 'threads': set([ str(x) for x in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 ] ]), 'started': 1540233073074953, 'ended': 1540233380763649, 'mean': 8656706.69744836 }, 'Genny.Setup': { 'n': 1, 'threads': {'0'}, 'started': 1540233035593684, 'ended': 1540233044288445, 'mean': 8694761.0 } }) def test_fixture3(self): actual = test_lib.parse_fixture('csvoutput3').timers() self.assertEqual( actual, { 'InsertRemoveTest.remove.op-time': { 'n': 6, 'threads': {'id-99'}, 'started': 1547004939911848888, 'ended': 1547004939932315379, 'mean': 1814961.8333333333 } })
true
true
f70dddfd53fd99825c38c174683fd5bfb96c6f8f
15,270
py
Python
nets/block.py
tarepan/mutated_DVC
7fbbf4754285944387ec5d5108ed5f3d473d4f81
[ "MIT" ]
null
null
null
nets/block.py
tarepan/mutated_DVC
7fbbf4754285944387ec5d5108ed5f3d473d4f81
[ "MIT" ]
null
null
null
nets/block.py
tarepan/mutated_DVC
7fbbf4754285944387ec5d5108ed5f3d473d4f81
[ "MIT" ]
1
2019-06-05T16:03:32.000Z
2019-06-05T16:03:32.000Z
import math import chainer import chainer.functions as F import chainer.links as L import numpy as np from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D from .sn_linear import SNLinear def _upsample(x): h, w = x.shape[2:] return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2)) def _downsample(x): return F.average_pooling_2d(x, 2) def upsample_conv(x, conv): return conv(_upsample(x)) def _upsample_frq(x): h, w = x.shape[2:] return F.unpooling_2d(x, (1,2), outsize=(h, w * 2)) def _downsample_frq(x): return F.average_pooling_2d(x, (1,2)) def upsample_conv_frq(x, conv): return conv(_upsample_frq(x)) class ResBlock(chainer.Chain): def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None): super(ResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None self.learnable_sc = in_channels != out_channels self.dr = dr self.bn = bn with self.init_scope(): self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) if bn: self.b1 = L.BatchNormalization(out_channels) self.b2 = L.BatchNormalization(out_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) def residual(self, x): h = x h = self.c1(h) if self.bn: h = self.b1(h) if self.activation: h = self.activation(h) if self.mode: h = self.mode(h) if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.c2(h) if self.bn: h = self.b2(h) if self.activation: h = self.activation(h) return h def shortcut(self, x): if self.mode: x = self.mode(x) if self.learnable_sc: x = self.c_sc(x) return x def __call__(self, x): return self.residual(x) + self.shortcut(x) class ConvBlock(chainer.Chain): def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None): super(ConvBlock, self).__init__() # initializer = chainer.initializers.GlorotUniform() initializer = chainer.initializers.HeUniform() self.activation = activation self.bn = bn self.dr = dr with self.init_scope(): if mode == 'none': self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn) elif mode == 'none-7': self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn) elif mode == 'down': self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'up': self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'full-down': self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn) elif mode == 'frq': self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) elif mode == 'frq-down': self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_downsample(x)) elif mode == 'frq-up': self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_upsample(x)) elif mode == 'pad': self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn) elif mode == 'trim': self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn) else: raise Exception('mode is missing') if bn: self.b = L.BatchNormalization(out_channels) def __call__(self, h): if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.c(h) if self.bn: h = self.b(h) if self.activation: h = self.activation(h) return h class CoPSBlock(chainer.Chain): def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True): super(CoPSBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.bn = bn with self.init_scope(): self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer) self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer) if bn: self.b = L.BatchNormalization(out_channels) def pixel_shuffle(self, x): out = self.ps(x) b = out.shape[0] c = out.shape[1] h = out.shape[2] w = out.shape[3] out = F.reshape(out, (b, 2, 2, c//4, h, w)) out = F.transpose(out, (0, 3, 4, 1, 5, 2)) out = F.reshape(out, (b, c//4, h*2, w*2)) return out def __call__(self, h): h = self.pixel_shuffle(h) h = self.c(h) if self.bn: h = self.b(h) if self.activation: h = self.activation(h) return h class SNResBlock(chainer.Chain): def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None): super(SNResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.dr = dr self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up' with self.init_scope(): self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer) self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer) if self.learnable_sc: self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) def residual(self, x): h = x h = self.activation(h) h = self.c1(h) if self.sample: h = self.sample(h) if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.activation(h) h = self.c2(h) return h def shortcut(self, x): if self.learnable_sc: x = self.c_sc(x) if self.sample: return self.sample(x) else: return x else: return x def __call__(self, x): return self.residual(x) + self.shortcut(x) class SNConvBlock(chainer.Chain): def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None): super(SNConvBlock, self).__init__() # initializer = chainer.initializers.GlorotUniform() initializer = chainer.initializers.HeUniform() self.activation = activation self.bn = bn self.dr = dr with self.init_scope(): if mode == 'none': self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn) elif mode == 'none-7': self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn) elif mode == 'down': self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'up': self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'full-down': self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn) elif mode == 'frq': self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) elif mode == 'frq-down': self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_downsample(x)) elif mode == 'frq-up': self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_upsample(x)) else: raise Exception('mode is missing') if bn: self.b = L.BatchNormalization(out_channels) def __call__(self, h): if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.c(h) if self.bn: h = self.b(h) if self.activation: h = self.activation(h) return h class SNLinearBlock(chainer.Chain): def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None): super(SNLinearBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.dr = dr if type(out_channels) is tuple: self.out_shape = (-1,)+out_channels else: self.out_shape = None with self.init_scope(): self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer) def __call__(self, x): if self.dr: x = F.dropout(x, self.dr) x = self.l(x) x = self.activation(x) if self.out_shape: x = F.reshape(x, self.out_shape) return x class SNMDBlock(chainer.Chain): def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None): super(SNMDBlock, self).__init__() # initializer = chainer.initializers.GlorotUniform() initializer = chainer.initializers.HeUniform() self.B = B self.C = C self.dr = dr self.gap = gap if gap: in_size = 1 if type(in_size) is int: in_size = (in_size, in_size) with self.init_scope(): self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer) self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer) def __call__(self, x): if self.dr: with chainer.using_config('train', True): x = F.dropout(x, self.dr) if self.gap: x = F.sum(x, axis=(2,3)) N = x.shape[0] #Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py feature = F.reshape(F.leaky_relu(x), (N, -1)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) h = self.l(h) return h class SNL1DBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None): super(SNL1DBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.dr = dr self.out_ch = out_ch with self.init_scope(): self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer) def __call__(self, x): if self.dr: x = F.dropout(x, self.dr) x = F.transpose(x, (0, 2, 1, 3)) out_shape = list(x.shape) x = F.reshape(x, (-1, x.shape[2]*x.shape[3])) x = self.l(x) x = self.activation(x) out_shape[2] = self.out_ch x = F.reshape(x, out_shape) x = F.transpose(x, (0, 2, 1, 3)) return x class L1DBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None): super(L1DBlock, self).__init__() # initializer = chainer.initializers.GlorotUniform() initializer = chainer.initializers.HeUniform() self.activation = activation self.dr = dr self.out_ch = out_ch with self.init_scope(): self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer) def __call__(self, x): if self.dr: x = F.dropout(x, self.dr) x = F.transpose(x, (0, 2, 1, 3)) out_shape = list(x.shape) x = F.reshape(x, (-1, x.shape[2]*x.shape[3])) x = self.l(x) x = self.activation(x) out_shape[2] = self.out_ch x = F.reshape(x, out_shape) x = F.transpose(x, (0, 2, 1, 3)) return x class CLBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None): super(CLBlock, self).__init__() self.dr = dr if out_ch - liner_out_ch <= 0: raise Exception('out_ch <= liner_out_ch!') with self.init_scope(): self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation) self.l = L1DBlock(in_ch, liner_out_ch, width, activation) def __call__(self, x): h = x if self.dr: h = F.dropout(h, self.dr) h1 = self.c(h) h2 = self.l(h) h = F.concat([h1,h2]) return h class SNCLBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None): super(SNCLBlock, self).__init__() self.dr = dr with self.init_scope(): self.c = SNConvBlock(in_ch, out_ch-1, activation=activation) self.l = SNL1DBlock(in_ch, 1, width, activation) def __call__(self, x): h = x if self.dr: h = F.dropout(h, self.dr) h1 = self.c(h) h2 = self.l(h) h = F.concat([h1,h2]) return h
40.07874
134
0.583628
import math import chainer import chainer.functions as F import chainer.links as L import numpy as np from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D from .sn_linear import SNLinear def _upsample(x): h, w = x.shape[2:] return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2)) def _downsample(x): return F.average_pooling_2d(x, 2) def upsample_conv(x, conv): return conv(_upsample(x)) def _upsample_frq(x): h, w = x.shape[2:] return F.unpooling_2d(x, (1,2), outsize=(h, w * 2)) def _downsample_frq(x): return F.average_pooling_2d(x, (1,2)) def upsample_conv_frq(x, conv): return conv(_upsample_frq(x)) class ResBlock(chainer.Chain): def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None): super(ResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None self.learnable_sc = in_channels != out_channels self.dr = dr self.bn = bn with self.init_scope(): self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) if bn: self.b1 = L.BatchNormalization(out_channels) self.b2 = L.BatchNormalization(out_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) def residual(self, x): h = x h = self.c1(h) if self.bn: h = self.b1(h) if self.activation: h = self.activation(h) if self.mode: h = self.mode(h) if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.c2(h) if self.bn: h = self.b2(h) if self.activation: h = self.activation(h) return h def shortcut(self, x): if self.mode: x = self.mode(x) if self.learnable_sc: x = self.c_sc(x) return x def __call__(self, x): return self.residual(x) + self.shortcut(x) class ConvBlock(chainer.Chain): def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None): super(ConvBlock, self).__init__() initializer = chainer.initializers.HeUniform() self.activation = activation self.bn = bn self.dr = dr with self.init_scope(): if mode == 'none': self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn) elif mode == 'none-7': self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn) elif mode == 'down': self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'up': self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'full-down': self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn) elif mode == 'frq': self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) elif mode == 'frq-down': self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_downsample(x)) elif mode == 'frq-up': self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_upsample(x)) elif mode == 'pad': self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn) elif mode == 'trim': self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn) else: raise Exception('mode is missing') if bn: self.b = L.BatchNormalization(out_channels) def __call__(self, h): if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.c(h) if self.bn: h = self.b(h) if self.activation: h = self.activation(h) return h class CoPSBlock(chainer.Chain): def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True): super(CoPSBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.bn = bn with self.init_scope(): self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer) self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer) if bn: self.b = L.BatchNormalization(out_channels) def pixel_shuffle(self, x): out = self.ps(x) b = out.shape[0] c = out.shape[1] h = out.shape[2] w = out.shape[3] out = F.reshape(out, (b, 2, 2, c//4, h, w)) out = F.transpose(out, (0, 3, 4, 1, 5, 2)) out = F.reshape(out, (b, c//4, h*2, w*2)) return out def __call__(self, h): h = self.pixel_shuffle(h) h = self.c(h) if self.bn: h = self.b(h) if self.activation: h = self.activation(h) return h class SNResBlock(chainer.Chain): def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None): super(SNResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.dr = dr self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up' with self.init_scope(): self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer) self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer) if self.learnable_sc: self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) def residual(self, x): h = x h = self.activation(h) h = self.c1(h) if self.sample: h = self.sample(h) if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.activation(h) h = self.c2(h) return h def shortcut(self, x): if self.learnable_sc: x = self.c_sc(x) if self.sample: return self.sample(x) else: return x else: return x def __call__(self, x): return self.residual(x) + self.shortcut(x) class SNConvBlock(chainer.Chain): def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None): super(SNConvBlock, self).__init__() initializer = chainer.initializers.HeUniform() self.activation = activation self.bn = bn self.dr = dr with self.init_scope(): if mode == 'none': self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn) elif mode == 'none-7': self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn) elif mode == 'down': self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'up': self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'full-down': self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn) elif mode == 'frq': self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) elif mode == 'frq-down': self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_downsample(x)) elif mode == 'frq-up': self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn) self.activation = lambda x: activation(_upsample(x)) else: raise Exception('mode is missing') if bn: self.b = L.BatchNormalization(out_channels) def __call__(self, h): if self.dr: with chainer.using_config('train', True): h = F.dropout(h, self.dr) h = self.c(h) if self.bn: h = self.b(h) if self.activation: h = self.activation(h) return h class SNLinearBlock(chainer.Chain): def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None): super(SNLinearBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.dr = dr if type(out_channels) is tuple: self.out_shape = (-1,)+out_channels else: self.out_shape = None with self.init_scope(): self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer) def __call__(self, x): if self.dr: x = F.dropout(x, self.dr) x = self.l(x) x = self.activation(x) if self.out_shape: x = F.reshape(x, self.out_shape) return x class SNMDBlock(chainer.Chain): def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None): super(SNMDBlock, self).__init__() initializer = chainer.initializers.HeUniform() self.B = B self.C = C self.dr = dr self.gap = gap if gap: in_size = 1 if type(in_size) is int: in_size = (in_size, in_size) with self.init_scope(): self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer) self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer) def __call__(self, x): if self.dr: with chainer.using_config('train', True): x = F.dropout(x, self.dr) if self.gap: x = F.sum(x, axis=(2,3)) N = x.shape[0] feature = F.reshape(F.leaky_relu(x), (N, -1)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) h = self.l(h) return h class SNL1DBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None): super(SNL1DBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.dr = dr self.out_ch = out_ch with self.init_scope(): self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer) def __call__(self, x): if self.dr: x = F.dropout(x, self.dr) x = F.transpose(x, (0, 2, 1, 3)) out_shape = list(x.shape) x = F.reshape(x, (-1, x.shape[2]*x.shape[3])) x = self.l(x) x = self.activation(x) out_shape[2] = self.out_ch x = F.reshape(x, out_shape) x = F.transpose(x, (0, 2, 1, 3)) return x class L1DBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None): super(L1DBlock, self).__init__() initializer = chainer.initializers.HeUniform() self.activation = activation self.dr = dr self.out_ch = out_ch with self.init_scope(): self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer) def __call__(self, x): if self.dr: x = F.dropout(x, self.dr) x = F.transpose(x, (0, 2, 1, 3)) out_shape = list(x.shape) x = F.reshape(x, (-1, x.shape[2]*x.shape[3])) x = self.l(x) x = self.activation(x) out_shape[2] = self.out_ch x = F.reshape(x, out_shape) x = F.transpose(x, (0, 2, 1, 3)) return x class CLBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None): super(CLBlock, self).__init__() self.dr = dr if out_ch - liner_out_ch <= 0: raise Exception('out_ch <= liner_out_ch!') with self.init_scope(): self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation) self.l = L1DBlock(in_ch, liner_out_ch, width, activation) def __call__(self, x): h = x if self.dr: h = F.dropout(h, self.dr) h1 = self.c(h) h2 = self.l(h) h = F.concat([h1,h2]) return h class SNCLBlock(chainer.Chain): def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None): super(SNCLBlock, self).__init__() self.dr = dr with self.init_scope(): self.c = SNConvBlock(in_ch, out_ch-1, activation=activation) self.l = SNL1DBlock(in_ch, 1, width, activation) def __call__(self, x): h = x if self.dr: h = F.dropout(h, self.dr) h1 = self.c(h) h2 = self.l(h) h = F.concat([h1,h2]) return h
true
true
f70de03bca9d19e38a77151c1eec1f1ef25fdc85
4,610
py
Python
test/functional/combine_logs.py
swyft-project/swyft-core
d4575beda0936b0f0a5780610ba6e09cd993add3
[ "MIT" ]
3
2019-09-29T10:50:37.000Z
2020-07-01T23:08:33.000Z
test/functional/combine_logs.py
swyft-project/swyft-core
d4575beda0936b0f0a5780610ba6e09cd993add3
[ "MIT" ]
12
2019-09-09T16:19:47.000Z
2019-09-26T13:18:10.000Z
test/functional/combine_logs.py
swyft-project/swyft-core
d4575beda0936b0f0a5780610ba6e09cd993add3
[ "MIT" ]
1
2019-10-13T10:30:17.000Z
2019-10-13T10:30:17.000Z
#!/usr/bin/env python3 """Combine logs from multiple swyft nodes as well as the test_framework log. This streams the combined log output to stdout. Use combine_logs.py > outputfile to write to an outputfile.""" import argparse from collections import defaultdict, namedtuple import heapq import itertools import os import re import sys # Matches on the date format at the start of the log event TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z") LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event']) def main(): """Main function. Parses args, reads the log files and renders them as text or html.""" parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__) parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)') parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2') args, unknown_args = parser.parse_known_args() if args.color and os.name != 'posix': print("Color output requires posix terminal colors.") sys.exit(1) if args.html and args.color: print("Only one out of --color or --html should be specified") sys.exit(1) # There should only be one unknown argument - the path of the temporary test directory if len(unknown_args) != 1: print("Unexpected arguments" + str(unknown_args)) sys.exit(1) log_events = read_logs(unknown_args[0]) print_logs(log_events, color=args.color, html=args.html) def read_logs(tmp_dir): """Reads log files. Delegates to generator function get_log_events() to provide individual log events for each of the input log files.""" files = [("test", "%s/test_framework.log" % tmp_dir)] for i in itertools.count(): logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i) if not os.path.isfile(logfile): break files.append(("node%d" % i, logfile)) return heapq.merge(*[get_log_events(source, f) for source, f in files]) def get_log_events(source, logfile): """Generator function that returns individual log events. Log events may be split over multiple lines. We use the timestamp regex match as the marker for a new log event.""" try: with open(logfile, 'r') as infile: event = '' timestamp = '' for line in infile: # skip blank lines if line == '\n': continue # if this line has a timestamp, it's the start of a new log event. time_match = TIMESTAMP_PATTERN.match(line) if time_match: if event: yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) event = line timestamp = time_match.group() # if it doesn't have a timestamp, it's a continuation line of the previous log. else: event += "\n" + line # Flush the final event yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) except FileNotFoundError: print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr) def print_logs(log_events, color=False, html=False): """Renders the iterator of log events into text or html.""" if not html: colors = defaultdict(lambda: '') if color: colors["test"] = "\033[0;36m" # CYAN colors["node0"] = "\033[0;34m" # BLUE colors["node1"] = "\033[0;32m" # GREEN colors["node2"] = "\033[0;31m" # RED colors["node3"] = "\033[0;33m" # YELLOW colors["reset"] = "\033[0m" # Reset font color for event in log_events: print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"])) else: try: import jinja2 except ImportError: print("jinja2 not found. Try `pip install jinja2`") sys.exit(1) print(jinja2.Environment(loader=jinja2.FileSystemLoader('./')) .get_template('combined_log_template.html') .render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events])) if __name__ == '__main__': main()
40.086957
196
0.618221
import argparse from collections import defaultdict, namedtuple import heapq import itertools import os import re import sys TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z") LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event']) def main(): parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__) parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)') parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2') args, unknown_args = parser.parse_known_args() if args.color and os.name != 'posix': print("Color output requires posix terminal colors.") sys.exit(1) if args.html and args.color: print("Only one out of --color or --html should be specified") sys.exit(1) if len(unknown_args) != 1: print("Unexpected arguments" + str(unknown_args)) sys.exit(1) log_events = read_logs(unknown_args[0]) print_logs(log_events, color=args.color, html=args.html) def read_logs(tmp_dir): files = [("test", "%s/test_framework.log" % tmp_dir)] for i in itertools.count(): logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i) if not os.path.isfile(logfile): break files.append(("node%d" % i, logfile)) return heapq.merge(*[get_log_events(source, f) for source, f in files]) def get_log_events(source, logfile): try: with open(logfile, 'r') as infile: event = '' timestamp = '' for line in infile: if line == '\n': continue time_match = TIMESTAMP_PATTERN.match(line) if time_match: if event: yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) event = line timestamp = time_match.group() # if it doesn't have a timestamp, it's a continuation line of the previous log. else: event += "\n" + line # Flush the final event yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) except FileNotFoundError: print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr) def print_logs(log_events, color=False, html=False): if not html: colors = defaultdict(lambda: '') if color: colors["test"] = "\033[0;36m" # CYAN colors["node0"] = "\033[0;34m" # BLUE colors["node1"] = "\033[0;32m" # GREEN colors["node2"] = "\033[0;31m" # RED colors["node3"] = "\033[0;33m" # YELLOW colors["reset"] = "\033[0m" # Reset font color for event in log_events: print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"])) else: try: import jinja2 except ImportError: print("jinja2 not found. Try `pip install jinja2`") sys.exit(1) print(jinja2.Environment(loader=jinja2.FileSystemLoader('./')) .get_template('combined_log_template.html') .render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events])) if __name__ == '__main__': main()
true
true
f70de0d0901635161785158d2542852de107f55a
7,179
py
Python
kubernetes/client/models/v1beta1_pod_security_policy_list.py
henrywu2019/python
fb7214144395c05349e70a58ea129576f6b11fc4
[ "Apache-2.0" ]
4,417
2018-01-13T04:30:48.000Z
2022-03-31T15:33:59.000Z
kubernetes/client/models/v1beta1_pod_security_policy_list.py
henrywu2019/python
fb7214144395c05349e70a58ea129576f6b11fc4
[ "Apache-2.0" ]
1,414
2018-01-12T19:31:56.000Z
2022-03-31T22:01:02.000Z
kubernetes/client/models/v1beta1_pod_security_policy_list.py
henrywu2019/python
fb7214144395c05349e70a58ea129576f6b11fc4
[ "Apache-2.0" ]
2,854
2018-01-14T08:57:33.000Z
2022-03-31T01:41:56.000Z
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.21 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1beta1PodSecurityPolicyList(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1PodSecurityPolicy]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1beta1PodSecurityPolicyList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1beta1PodSecurityPolicyList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1beta1PodSecurityPolicyList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1beta1PodSecurityPolicyList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1beta1PodSecurityPolicyList. # noqa: E501 :type: str """ self._api_version = api_version @property def items(self): """Gets the items of this V1beta1PodSecurityPolicyList. # noqa: E501 items is a list of schema objects. # noqa: E501 :return: The items of this V1beta1PodSecurityPolicyList. # noqa: E501 :rtype: list[V1beta1PodSecurityPolicy] """ return self._items @items.setter def items(self, items): """Sets the items of this V1beta1PodSecurityPolicyList. items is a list of schema objects. # noqa: E501 :param items: The items of this V1beta1PodSecurityPolicyList. # noqa: E501 :type: list[V1beta1PodSecurityPolicy] """ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501 raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501 self._items = items @property def kind(self): """Gets the kind of this V1beta1PodSecurityPolicyList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1beta1PodSecurityPolicyList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1beta1PodSecurityPolicyList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1beta1PodSecurityPolicyList. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1beta1PodSecurityPolicyList. # noqa: E501 :return: The metadata of this V1beta1PodSecurityPolicyList. # noqa: E501 :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1beta1PodSecurityPolicyList. :param metadata: The metadata of this V1beta1PodSecurityPolicyList. # noqa: E501 :type: V1ListMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1PodSecurityPolicyList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1PodSecurityPolicyList): return True return self.to_dict() != other.to_dict()
34.849515
312
0.63839
import pprint import re import six from kubernetes.client.configuration import Configuration class V1beta1PodSecurityPolicyList(object): openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1PodSecurityPolicy]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if self.local_vars_configuration.client_side_validation and items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property def metadata(self): return self._metadata @metadata.setter def metadata(self, metadata): self._metadata = metadata def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, V1beta1PodSecurityPolicyList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, V1beta1PodSecurityPolicyList): return True return self.to_dict() != other.to_dict()
true
true
f70de19e3f87e9d3723781f6cf1e2ae3fb38361b
13,640
py
Python
code/main.py
autonomousvision/handheld_svbrdf_geometry
41218b0546e7386229b87c94d528cd193127acff
[ "MIT" ]
53
2020-03-29T04:15:44.000Z
2022-03-19T02:49:12.000Z
code/main.py
autonomousvision/handheld_svbrdf_geometry
41218b0546e7386229b87c94d528cd193127acff
[ "MIT" ]
3
2020-07-13T12:54:58.000Z
2020-11-18T15:50:04.000Z
code/main.py
autonomousvision/handheld_svbrdf_geometry
41218b0546e7386229b87c94d528cd193127acff
[ "MIT" ]
7
2020-06-16T14:42:48.000Z
2021-04-13T04:34:52.000Z
""" Copyright (c) 2020 Autonomous Vision Group (AVG), Max Planck Institute for Intelligent Systems, Tuebingen, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import json import os from tqdm import tqdm import cv2 import torch import itertools from data import DataAdapterFactory from experiment_state import ExperimentState from experiment_settings import ExperimentSettings, recursive_dict_update from optimization import optimize from higo import higo_baseline import general_settings from evaluation import evaluate_state from utils.logging import error settings_dict_base = { 'data_settings': { 'data_type': "XIMEA", 'center_view': None, 'nr_neighbours': 40, 'base_input_path': "<input_data_base_folder>/", 'object_name': None, 'base_output_path': "<output_base_folder>/", 'gt_scan_folder': "<gt_scan_folder>/", 'calibration_path_geometric': "<calibration_base_folder>/geometric/", 'vignetting_file': '<calibration_base_folder>/photometric/vignetting.npz', 'depth_folder': 'tsdf-fusion-depth_oldCV_40_views', 'center_stride': 2, 'depth_scale': 1e-3, 'light_scale': 1e0, 'lazy_image_loading': False, 'input_reup_sample': 1., 'input_down_sample': 1., 'manual_center_view_crop': None, }, 'parametrization_settings': { 'locations': 'depth map', 'normals': 'per point', 'materials': 'base specular materials', 'brdf': 'cook torrance F1', 'observation_poses': 'quaternion', 'lights': 'point light', }, 'initialization_settings': { 'normals': 'from_closed_form', 'diffuse': 'from_closed_form', 'specular': 'hardcoded', 'lights': 'precalibrated', 'light_calibration_files': { "positions": "<calibration_base_folder>/photometric/lights_array.pkl", "intensities": "<calibration_base_folder>/photometric/LED_light_intensities.npy", "attenuations": "<calibration_base_folder>/photometric/LED_angular_dependency.npy", } }, 'default_optimization_settings': { 'parameters': [ 'locations', 'poses', 'normals', 'vignetting', 'diffuse_materials', 'specular_weights', 'specular_materials', 'light_positions', 'light_intensities', 'light_attenuations', ], 'losses': { "photoconsistency L1": 1e-4, "geometric consistency": 1e1, "depth compatibility": 1e10, "normal smoothness": 1e-1, "material sparsity": 1e-1, "material smoothness": 1e0, }, "iterations": 1000, 'visualize_initial': False, 'visualize_results': True, 'target_set': "training", }, 'optimization_steps': [], } settings_dict_higo = recursive_dict_update( settings_dict_base, { 'data_settings': { 'output_path_suffix': 'higo', }, 'higo_baseline': { 'step_size': 0.001, #1 mm 'step_radius': 25, 'eta': 10, 'lambda_n': 7.5, 'lambda_s': 3.0, 'lambda_1': 0.1, 'lambda_2': 0.5, }, } ) disjoint_iteration = [ { 'parameters': ['specular_weights'], "iterations": 40, }, { 'parameters': ['diffuse_materials', 'specular_materials'], "iterations": 40, }, { 'parameters': ['normals'], "iterations": 60, }, { 'parameters': ['locations', 'observation_poses'], "iterations": 60, }, ] settings_dict_disjoint = recursive_dict_update( settings_dict_base, { 'data_settings': { 'output_path_suffix': 'disjoint', }, 'default_optimization_settings': { }, 'optimization_steps': [ *(disjoint_iteration * 5), # { # 'iterations': 1000, # 'parameters': [ # 'observation_poses', # ], # 'visualize_initial': True, # 'target_set': "testing", # }, ] } ) settings_dict_proposed = recursive_dict_update( settings_dict_base, { 'data_settings': { 'output_path_suffix': 'proposed', }, 'optimization_steps': [ { 'parameters': [ 'diffuse_materials', 'specular_weights', 'specular_materials', 'normals', 'observation_poses', 'locations', ], 'visualize_initial': True, }, # { # 'parameters': [ # 'observation_poses', # ], # 'visualize_initial': True, # 'target_set': "testing", # }, ] } ) def run_experiment(settings_dict): experiment_settings = ExperimentSettings(settings_dict) # localize to the current computer as required experiment_settings.localize() # create an empty experiment object, with the correct parametrizations experiment_settings.check_stored("parametrization_settings") experiment_state = ExperimentState.create(experiment_settings.get('parametrization_settings')) experiment_settings.save("parametrization_settings") # create the data adapter experiment_settings.check_stored("data_settings") data_adapter = DataAdapterFactory( experiment_settings.get('data_settings')['data_type'] )( experiment_settings.get('local_data_settings') ) if not experiment_settings.get('data_settings')['lazy_image_loading']: device = torch.device(general_settings.device_name) image_tensors = [ observation.get_image() for observation in tqdm(data_adapter.images, desc="Preloading training images") ] # now compact all observations into a few big tensors and remove the old tensors # this makes for much faster access/operations data_adapter.compound_image_tensors = {} data_adapter.compound_image_tensor_sizes = {} training_indices_batches, _ = data_adapter.get_training_info() testing_indices_batches, _ = data_adapter.get_testing_info() for batch in itertools.chain(training_indices_batches, testing_indices_batches): compound_H = max([image_tensors[i].shape[-2] for i in batch]) compound_W = max([image_tensors[i].shape[-1] for i in batch]) C = len(batch) compound_images = torch.zeros(C, 3, compound_H, compound_W, dtype=torch.float, device=device) compound_sizes = torch.zeros(C, 2, dtype=torch.long, device=device) for idx, image_idx in enumerate(batch): src_tensor = image_tensors[image_idx] compound_images[idx,:,:src_tensor.shape[-2], :src_tensor.shape[-1]] = src_tensor compound_sizes[idx,0] = src_tensor.shape[-1] compound_sizes[idx,1] = src_tensor.shape[-2] del data_adapter.images[image_idx]._image data_adapter.compound_image_tensors[batch] = compound_images data_adapter.compound_image_tensor_sizes[batch] = compound_sizes del image_tensors experiment_settings.save("data_settings") # initialize the parametrizations with the requested values, if the initialization is not available on disk initialization_state_folder = experiment_settings.get_state_folder("initialization") if experiment_settings.check_stored("initialization_settings"): experiment_state.load(initialization_state_folder) else: experiment_state.initialize(data_adapter, experiment_settings.get('local_initialization_settings')) experiment_state.save(initialization_state_folder) experiment_settings.save("initialization_settings") # evaluate_state("initialization", # experiment_settings.get('data_settings')['object_name'], # experiment_settings.get('local_data_settings')['gt_scan_folder'], # experiment_state) experiment_state.visualize_statics( experiment_settings.get_output_path(), data_adapter ) if experiment_settings.get("higo_baseline") is not None: higo_state_folder = experiment_settings.get_state_folder("higo") if not experiment_settings.check_stored("higo_baseline"): higo_experiment_state = higo_baseline( experiment_state, data_adapter, higo_state_folder, experiment_settings.get('higo_baseline') ) higo_experiment_state.visualize( experiment_settings.get_output_path(), "higo_baseline", data_adapter, losses = [], shadows_occlusions=False ) higo_experiment_state.save(higo_state_folder) experiment_settings.save("higo_baseline") else: higo_experiment_state = ExperimentState.copy(experiment_state) higo_experiment_state.load(higo_state_folder) evaluate_state("higo baseline", experiment_settings.get('data_settings')['object_name'], experiment_settings.get('local_data_settings')['gt_scan_folder'], higo_experiment_state) optimization_step_settings = experiment_settings.get('default_optimization_settings') experiment_settings.check_stored("default_optimization_settings") experiment_settings.save("default_optimization_settings") for step_index in range(len(experiment_settings.get('optimization_steps'))): step_state_folder = experiment_settings.get_state_folder("optimization_steps", step_index) optimization_settings = experiment_settings.get("optimization_steps", step_index) shorthand = experiment_settings.get_shorthand("optimization_steps", step_index) set_name = "%02d_%s" % (step_index, shorthand) if optimization_settings['visualize_initial']: experiment_state.visualize( experiment_settings.get_output_path(), "%02d__initial" % step_index, data_adapter, optimization_settings['losses'] ) if experiment_settings.check_stored("optimization_steps", step_index): experiment_state.load(step_state_folder) else: optimize( experiment_state, data_adapter, optimization_settings, output_path_structure=os.path.join( experiment_settings.get_output_path(), "evolution_%%s_%s.png" % set_name ) ) experiment_state.save(step_state_folder) experiment_settings.save("optimization_steps", step_index) if optimization_settings['visualize_results']: experiment_state.visualize( experiment_settings.get_output_path(), set_name, data_adapter, optimization_settings['losses'] ) evaluate_state(experiment_settings.get('data_settings').get('output_path_suffix', 'proposed'), experiment_settings.get('data_settings')['object_name'], experiment_settings.get('local_data_settings')['gt_scan_folder'], experiment_state) if __name__ == "__main__": for object_name, center_view in [ # ("peter", 1171), # ("peter", 566), # ("teapot", 451), # ("teapot", 999), # ("gnome", 308), # ("gnome", 488), # ("girl", 882), # ("girl", 1059), ("duck", 826), # ("duck", 49), # ("fire_hydrant", 582), # ("fire_hydrant", 704), # ("pineapple", 401), # ("pineapple", 536), # ("bunny", 670), # ("bunny", 180), ]: for settings_dict in [settings_dict_proposed, settings_dict_higo, settings_dict_disjoint]: settings_dict['data_settings']['object_name'] = object_name settings_dict['data_settings']['center_view'] = center_view run_experiment(settings_dict)
37.888889
114
0.615396
import json import os from tqdm import tqdm import cv2 import torch import itertools from data import DataAdapterFactory from experiment_state import ExperimentState from experiment_settings import ExperimentSettings, recursive_dict_update from optimization import optimize from higo import higo_baseline import general_settings from evaluation import evaluate_state from utils.logging import error settings_dict_base = { 'data_settings': { 'data_type': "XIMEA", 'center_view': None, 'nr_neighbours': 40, 'base_input_path': "<input_data_base_folder>/", 'object_name': None, 'base_output_path': "<output_base_folder>/", 'gt_scan_folder': "<gt_scan_folder>/", 'calibration_path_geometric': "<calibration_base_folder>/geometric/", 'vignetting_file': '<calibration_base_folder>/photometric/vignetting.npz', 'depth_folder': 'tsdf-fusion-depth_oldCV_40_views', 'center_stride': 2, 'depth_scale': 1e-3, 'light_scale': 1e0, 'lazy_image_loading': False, 'input_reup_sample': 1., 'input_down_sample': 1., 'manual_center_view_crop': None, }, 'parametrization_settings': { 'locations': 'depth map', 'normals': 'per point', 'materials': 'base specular materials', 'brdf': 'cook torrance F1', 'observation_poses': 'quaternion', 'lights': 'point light', }, 'initialization_settings': { 'normals': 'from_closed_form', 'diffuse': 'from_closed_form', 'specular': 'hardcoded', 'lights': 'precalibrated', 'light_calibration_files': { "positions": "<calibration_base_folder>/photometric/lights_array.pkl", "intensities": "<calibration_base_folder>/photometric/LED_light_intensities.npy", "attenuations": "<calibration_base_folder>/photometric/LED_angular_dependency.npy", } }, 'default_optimization_settings': { 'parameters': [ 'locations', 'poses', 'normals', 'vignetting', 'diffuse_materials', 'specular_weights', 'specular_materials', 'light_positions', 'light_intensities', 'light_attenuations', ], 'losses': { "photoconsistency L1": 1e-4, "geometric consistency": 1e1, "depth compatibility": 1e10, "normal smoothness": 1e-1, "material sparsity": 1e-1, "material smoothness": 1e0, }, "iterations": 1000, 'visualize_initial': False, 'visualize_results': True, 'target_set': "training", }, 'optimization_steps': [], } settings_dict_higo = recursive_dict_update( settings_dict_base, { 'data_settings': { 'output_path_suffix': 'higo', }, 'higo_baseline': { 'step_size': 0.001, 'step_radius': 25, 'eta': 10, 'lambda_n': 7.5, 'lambda_s': 3.0, 'lambda_1': 0.1, 'lambda_2': 0.5, }, } ) disjoint_iteration = [ { 'parameters': ['specular_weights'], "iterations": 40, }, { 'parameters': ['diffuse_materials', 'specular_materials'], "iterations": 40, }, { 'parameters': ['normals'], "iterations": 60, }, { 'parameters': ['locations', 'observation_poses'], "iterations": 60, }, ] settings_dict_disjoint = recursive_dict_update( settings_dict_base, { 'data_settings': { 'output_path_suffix': 'disjoint', }, 'default_optimization_settings': { }, 'optimization_steps': [ *(disjoint_iteration * 5), ] } ) settings_dict_proposed = recursive_dict_update( settings_dict_base, { 'data_settings': { 'output_path_suffix': 'proposed', }, 'optimization_steps': [ { 'parameters': [ 'diffuse_materials', 'specular_weights', 'specular_materials', 'normals', 'observation_poses', 'locations', ], 'visualize_initial': True, }, ] } ) def run_experiment(settings_dict): experiment_settings = ExperimentSettings(settings_dict) experiment_settings.localize() experiment_settings.check_stored("parametrization_settings") experiment_state = ExperimentState.create(experiment_settings.get('parametrization_settings')) experiment_settings.save("parametrization_settings") experiment_settings.check_stored("data_settings") data_adapter = DataAdapterFactory( experiment_settings.get('data_settings')['data_type'] )( experiment_settings.get('local_data_settings') ) if not experiment_settings.get('data_settings')['lazy_image_loading']: device = torch.device(general_settings.device_name) image_tensors = [ observation.get_image() for observation in tqdm(data_adapter.images, desc="Preloading training images") ] data_adapter.compound_image_tensors = {} data_adapter.compound_image_tensor_sizes = {} training_indices_batches, _ = data_adapter.get_training_info() testing_indices_batches, _ = data_adapter.get_testing_info() for batch in itertools.chain(training_indices_batches, testing_indices_batches): compound_H = max([image_tensors[i].shape[-2] for i in batch]) compound_W = max([image_tensors[i].shape[-1] for i in batch]) C = len(batch) compound_images = torch.zeros(C, 3, compound_H, compound_W, dtype=torch.float, device=device) compound_sizes = torch.zeros(C, 2, dtype=torch.long, device=device) for idx, image_idx in enumerate(batch): src_tensor = image_tensors[image_idx] compound_images[idx,:,:src_tensor.shape[-2], :src_tensor.shape[-1]] = src_tensor compound_sizes[idx,0] = src_tensor.shape[-1] compound_sizes[idx,1] = src_tensor.shape[-2] del data_adapter.images[image_idx]._image data_adapter.compound_image_tensors[batch] = compound_images data_adapter.compound_image_tensor_sizes[batch] = compound_sizes del image_tensors experiment_settings.save("data_settings") initialization_state_folder = experiment_settings.get_state_folder("initialization") if experiment_settings.check_stored("initialization_settings"): experiment_state.load(initialization_state_folder) else: experiment_state.initialize(data_adapter, experiment_settings.get('local_initialization_settings')) experiment_state.save(initialization_state_folder) experiment_settings.save("initialization_settings") experiment_state.visualize_statics( experiment_settings.get_output_path(), data_adapter ) if experiment_settings.get("higo_baseline") is not None: higo_state_folder = experiment_settings.get_state_folder("higo") if not experiment_settings.check_stored("higo_baseline"): higo_experiment_state = higo_baseline( experiment_state, data_adapter, higo_state_folder, experiment_settings.get('higo_baseline') ) higo_experiment_state.visualize( experiment_settings.get_output_path(), "higo_baseline", data_adapter, losses = [], shadows_occlusions=False ) higo_experiment_state.save(higo_state_folder) experiment_settings.save("higo_baseline") else: higo_experiment_state = ExperimentState.copy(experiment_state) higo_experiment_state.load(higo_state_folder) evaluate_state("higo baseline", experiment_settings.get('data_settings')['object_name'], experiment_settings.get('local_data_settings')['gt_scan_folder'], higo_experiment_state) optimization_step_settings = experiment_settings.get('default_optimization_settings') experiment_settings.check_stored("default_optimization_settings") experiment_settings.save("default_optimization_settings") for step_index in range(len(experiment_settings.get('optimization_steps'))): step_state_folder = experiment_settings.get_state_folder("optimization_steps", step_index) optimization_settings = experiment_settings.get("optimization_steps", step_index) shorthand = experiment_settings.get_shorthand("optimization_steps", step_index) set_name = "%02d_%s" % (step_index, shorthand) if optimization_settings['visualize_initial']: experiment_state.visualize( experiment_settings.get_output_path(), "%02d__initial" % step_index, data_adapter, optimization_settings['losses'] ) if experiment_settings.check_stored("optimization_steps", step_index): experiment_state.load(step_state_folder) else: optimize( experiment_state, data_adapter, optimization_settings, output_path_structure=os.path.join( experiment_settings.get_output_path(), "evolution_%%s_%s.png" % set_name ) ) experiment_state.save(step_state_folder) experiment_settings.save("optimization_steps", step_index) if optimization_settings['visualize_results']: experiment_state.visualize( experiment_settings.get_output_path(), set_name, data_adapter, optimization_settings['losses'] ) evaluate_state(experiment_settings.get('data_settings').get('output_path_suffix', 'proposed'), experiment_settings.get('data_settings')['object_name'], experiment_settings.get('local_data_settings')['gt_scan_folder'], experiment_state) if __name__ == "__main__": for object_name, center_view in [ ("duck", 826), ]: for settings_dict in [settings_dict_proposed, settings_dict_higo, settings_dict_disjoint]: settings_dict['data_settings']['object_name'] = object_name settings_dict['data_settings']['center_view'] = center_view run_experiment(settings_dict)
true
true
f70de1a155e0edcd66535bceb99ec7baf0f1e9b8
6,705
py
Python
src/datasets/decorators.py
KMarkert/RHEAS
453f24ef635ca5a6338d3e2b19f215835dd1f10d
[ "MIT" ]
81
2015-12-10T01:46:23.000Z
2022-01-28T18:55:34.000Z
src/datasets/decorators.py
kandread/RHEAS
453f24ef635ca5a6338d3e2b19f215835dd1f10d
[ "MIT" ]
111
2015-12-19T22:29:04.000Z
2019-06-13T17:08:25.000Z
src/datasets/decorators.py
nasa/RHEAS
453f24ef635ca5a6338d3e2b19f215835dd1f10d
[ "MIT" ]
58
2015-12-10T07:23:06.000Z
2021-03-03T17:57:08.000Z
""" Definition for RHEAS Datasets decorators. .. module:: datasets.decorators :synopsis: Definition of the Datasets decorators .. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov> """ from functools import wraps import tempfile import shutil import urllib from datetime import datetime from ftplib import FTP import re from pydap.client import open_url import netCDF4 as netcdf4 import numpy as np from osgeo import gdal import datasets def resetDatetime(dt): """Set time to 00:00 to align with daily data.""" return datetime(dt.year, dt.month, dt.day, 0, 0) def path(fetch): """Decorator for getting files from local path.""" @wraps(fetch) def wrapper(*args, **kwargs): url, bbox, dt = fetch(*args, **kwargs) outpath = tempfile.mkdtemp() filename = url.format(dt.year, dt.month, dt.day) try: shutil.copy(filename, outpath) lfilename = filename.split("/")[-1] except: lfilename = None return outpath, lfilename, bbox, dt return wrapper def http(fetch): """Decorator for downloading files from HTTP sites.""" @wraps(fetch) def wrapper(*args, **kwargs): url, bbox, dt = fetch(*args, **kwargs) outpath = tempfile.mkdtemp() filename = url.format(dt.year, dt.month, dt.day) try: lfilename = filename.split("/")[-1] urllib.urlcleanup() urllib.urlretrieve(filename, "{0}/{1}".format(outpath, lfilename)) except: lfilename = None return outpath, lfilename, bbox, dt return wrapper def ftp(fetch): """Decorator for downloading files from FTP sites.""" @wraps(fetch) def wrapper(*args, **kwargs): url, bbox, dt = fetch(*args, **kwargs) ftpurl = url.split("/")[2] outpath = tempfile.mkdtemp() try: conn = FTP(ftpurl) conn.login() conn.cwd("/".join(url.split("/")[3:-1]).format(dt.year, dt.month, dt.day)) name = url.split("/")[-1].format(dt.year, dt.month, dt.day) filenames = [f for f in conn.nlst() if re.match(r".*{0}.*".format(name), f) is not None] if len(filenames) > 0: filename = filenames[0] with open("{0}/{1}".format(outpath, filename), 'wb') as f: conn.retrbinary("RETR {0}".format(filename), f.write) filenames.append("{0}/{1}".format(outpath, filename)) else: filename = None except: filename = None return outpath, filename, bbox, dt return wrapper def opendap(fetch): """Decorator for fetching data from Opendap servers.""" @wraps(fetch) def wrapper(*args, **kwargs): url, varname, bbox, dt = fetch(*args, **kwargs) ds = open_url(url) for var in ds.keys(): if var.lower().startswith("lon") or var.lower() == "x": lonvar = var if var.lower().startswith("lat") or var.lower() == "y": latvar = var if var.lower().startswith("time") or var.lower() == "t": timevar = var lat = ds[latvar][:].data lon = ds[lonvar][:].data lon[lon > 180] -= 360 res = abs(lat[0]-lat[1]) # assume rectangular grid i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox) t = ds[timevar] tt = netcdf4.num2date(t[:].data, units=t.units) ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]] if len(ti) > 0: lati = np.argsort(lat)[::-1][i1:i2] loni = np.argsort(lon)[j1:j2] if len(ds[varname].data[0].shape) > 3: data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1] else: data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1] dt = tt[ti] else: data = None dt = None lat = np.sort(lat)[::-1][i1:i2] lon = np.sort(lon)[j1:j2] return data, lat, lon, dt return wrapper def netcdf(fetch): """Decorator for fetching NetCDF files (local or from Opendap servers).""" @wraps(fetch) def wrapper(*args, **kwargs): url, varname, bbox, dt = fetch(*args, **kwargs) ds = netcdf4.Dataset(url) for var in ds.variables: if var.lower().startswith("lon") or var.lower() == "x": lonvar = var if var.lower().startswith("lat") or var.lower() == "y": latvar = var if var.lower().startswith("time") or var.lower() == "t": timevar = var lat = ds.variables[latvar][:] lon = ds.variables[lonvar][:] lon[lon > 180] -= 360 res = abs(lat[0]-lat[1]) # assume rectangular grid i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox) t = ds.variables[timevar] tt = netcdf4.num2date(t[:], units=t.units) ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]] if len(ti) > 0: lati = np.argsort(lat)[::-1][i1:i2] loni = np.argsort(lon)[j1:j2] if len(ds.variables[varname].shape) > 3: data = ds.variables[varname][ti, 0, lati, loni] else: data = ds.variables[varname][ti, lati, loni] dt = tt[ti] else: data = None dt = None lat = np.sort(lat)[::-1][i1:i2] lon = np.sort(lon)[j1:j2] return data, lat, lon, dt return wrapper def geotiff(fetch): """Decorator for reading data from raster files.""" @wraps(fetch) def wrapper(*args, **kwargs): outpath, filename, bbox, dt = fetch(*args, **kwargs) if filename is not None: lfilename = datasets.uncompress(filename, outpath) f = gdal.Open("{0}/{1}".format(outpath, lfilename)) xul, xres, _, yul, _, yres = f.GetGeoTransform() data = f.ReadAsArray() nr, nc = data.shape lat = np.arange(yul + yres/2.0, yul + yres * nr, yres) lon = np.arange(xul + xres/2.0, xul + xres * nc, xres) i1, i2, j1, j2 = datasets.spatialSubset(lat, lon, xres, bbox) data = data[i1:i2, j1:j2] lat = lat[i1:i2] lon = lon[j1:j2] shutil.rmtree(outpath) else: data = lat = lon = None return data, lat, lon, dt return wrapper
35.855615
111
0.538702
from functools import wraps import tempfile import shutil import urllib from datetime import datetime from ftplib import FTP import re from pydap.client import open_url import netCDF4 as netcdf4 import numpy as np from osgeo import gdal import datasets def resetDatetime(dt): return datetime(dt.year, dt.month, dt.day, 0, 0) def path(fetch): @wraps(fetch) def wrapper(*args, **kwargs): url, bbox, dt = fetch(*args, **kwargs) outpath = tempfile.mkdtemp() filename = url.format(dt.year, dt.month, dt.day) try: shutil.copy(filename, outpath) lfilename = filename.split("/")[-1] except: lfilename = None return outpath, lfilename, bbox, dt return wrapper def http(fetch): @wraps(fetch) def wrapper(*args, **kwargs): url, bbox, dt = fetch(*args, **kwargs) outpath = tempfile.mkdtemp() filename = url.format(dt.year, dt.month, dt.day) try: lfilename = filename.split("/")[-1] urllib.urlcleanup() urllib.urlretrieve(filename, "{0}/{1}".format(outpath, lfilename)) except: lfilename = None return outpath, lfilename, bbox, dt return wrapper def ftp(fetch): @wraps(fetch) def wrapper(*args, **kwargs): url, bbox, dt = fetch(*args, **kwargs) ftpurl = url.split("/")[2] outpath = tempfile.mkdtemp() try: conn = FTP(ftpurl) conn.login() conn.cwd("/".join(url.split("/")[3:-1]).format(dt.year, dt.month, dt.day)) name = url.split("/")[-1].format(dt.year, dt.month, dt.day) filenames = [f for f in conn.nlst() if re.match(r".*{0}.*".format(name), f) is not None] if len(filenames) > 0: filename = filenames[0] with open("{0}/{1}".format(outpath, filename), 'wb') as f: conn.retrbinary("RETR {0}".format(filename), f.write) filenames.append("{0}/{1}".format(outpath, filename)) else: filename = None except: filename = None return outpath, filename, bbox, dt return wrapper def opendap(fetch): @wraps(fetch) def wrapper(*args, **kwargs): url, varname, bbox, dt = fetch(*args, **kwargs) ds = open_url(url) for var in ds.keys(): if var.lower().startswith("lon") or var.lower() == "x": lonvar = var if var.lower().startswith("lat") or var.lower() == "y": latvar = var if var.lower().startswith("time") or var.lower() == "t": timevar = var lat = ds[latvar][:].data lon = ds[lonvar][:].data lon[lon > 180] -= 360 res = abs(lat[0]-lat[1]) i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox) t = ds[timevar] tt = netcdf4.num2date(t[:].data, units=t.units) ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]] if len(ti) > 0: lati = np.argsort(lat)[::-1][i1:i2] loni = np.argsort(lon)[j1:j2] if len(ds[varname].data[0].shape) > 3: data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1] else: data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1] dt = tt[ti] else: data = None dt = None lat = np.sort(lat)[::-1][i1:i2] lon = np.sort(lon)[j1:j2] return data, lat, lon, dt return wrapper def netcdf(fetch): @wraps(fetch) def wrapper(*args, **kwargs): url, varname, bbox, dt = fetch(*args, **kwargs) ds = netcdf4.Dataset(url) for var in ds.variables: if var.lower().startswith("lon") or var.lower() == "x": lonvar = var if var.lower().startswith("lat") or var.lower() == "y": latvar = var if var.lower().startswith("time") or var.lower() == "t": timevar = var lat = ds.variables[latvar][:] lon = ds.variables[lonvar][:] lon[lon > 180] -= 360 res = abs(lat[0]-lat[1]) i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox) t = ds.variables[timevar] tt = netcdf4.num2date(t[:], units=t.units) ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]] if len(ti) > 0: lati = np.argsort(lat)[::-1][i1:i2] loni = np.argsort(lon)[j1:j2] if len(ds.variables[varname].shape) > 3: data = ds.variables[varname][ti, 0, lati, loni] else: data = ds.variables[varname][ti, lati, loni] dt = tt[ti] else: data = None dt = None lat = np.sort(lat)[::-1][i1:i2] lon = np.sort(lon)[j1:j2] return data, lat, lon, dt return wrapper def geotiff(fetch): @wraps(fetch) def wrapper(*args, **kwargs): outpath, filename, bbox, dt = fetch(*args, **kwargs) if filename is not None: lfilename = datasets.uncompress(filename, outpath) f = gdal.Open("{0}/{1}".format(outpath, lfilename)) xul, xres, _, yul, _, yres = f.GetGeoTransform() data = f.ReadAsArray() nr, nc = data.shape lat = np.arange(yul + yres/2.0, yul + yres * nr, yres) lon = np.arange(xul + xres/2.0, xul + xres * nc, xres) i1, i2, j1, j2 = datasets.spatialSubset(lat, lon, xres, bbox) data = data[i1:i2, j1:j2] lat = lat[i1:i2] lon = lon[j1:j2] shutil.rmtree(outpath) else: data = lat = lon = None return data, lat, lon, dt return wrapper
true
true
f70de2889f7d7dccb5ca34957060cc783c397d36
8,570
py
Python
openmdao/solvers/tests/test_solver_debug_print.py
LeeElvis/OpenMDAO
e9e002054c0ecad7467da2a7bbb8fdf68fccfb8c
[ "Apache-2.0" ]
null
null
null
openmdao/solvers/tests/test_solver_debug_print.py
LeeElvis/OpenMDAO
e9e002054c0ecad7467da2a7bbb8fdf68fccfb8c
[ "Apache-2.0" ]
null
null
null
openmdao/solvers/tests/test_solver_debug_print.py
LeeElvis/OpenMDAO
e9e002054c0ecad7467da2a7bbb8fdf68fccfb8c
[ "Apache-2.0" ]
null
null
null
"""Tests the `debug_print` option for Nonlinear solvers.""" import os import re import sys import shutil import tempfile import unittest from distutils.version import LooseVersion from io import StringIO import numpy as np import openmdao.api as om from openmdao.test_suite.scripts.circuit_analysis import Circuit from openmdao.utils.general_utils import run_model from openmdao.utils.general_utils import printoptions try: from parameterized import parameterized except ImportError: from openmdao.utils.assert_utils import SkipParameterized as parameterized nonlinear_solvers = [ om.NonlinearBlockGS, om.NonlinearBlockJac, om.NewtonSolver, om.BroydenSolver ] class TestNonlinearSolvers(unittest.TestCase): def setUp(self): import re import os from tempfile import mkdtemp # perform test in temporary directory self.startdir = os.getcwd() self.tempdir = mkdtemp(prefix='test_solver') os.chdir(self.tempdir) # iteration coordinate, file name and variable data are common for all tests coord = 'rank0:root._solve_nonlinear|0|NLRunOnce|0|circuit._solve_nonlinear|0' self.filename = 'solver_errors.0.out' self.expected_data = '\n'.join([ "", "# Inputs and outputs at start of iteration '%s':" % coord, "", "# nonlinear inputs", "{'circuit.D1.V_in': array([ 1.]),", " 'circuit.D1.V_out': array([ 0.]),", " 'circuit.R1.V_in': array([ 1.]),", " 'circuit.R1.V_out': array([ 0.]),", " 'circuit.R2.V_in': array([ 1.]),", " 'circuit.R2.V_out': array([ 1.]),", " 'circuit.n1.I_in:0': array([ 0.1]),", " 'circuit.n1.I_out:0': array([ 1.]),", " 'circuit.n1.I_out:1': array([ 1.]),", " 'circuit.n2.I_in:0': array([ 1.]),", " 'circuit.n2.I_out:0': array([ 1.])}", "", "# nonlinear outputs", "{'circuit.D1.I': array([ 1.]),", " 'circuit.R1.I': array([ 1.]),", " 'circuit.R2.I': array([ 1.]),", " 'circuit.n1.V': array([ 10.]),", " 'circuit.n2.V': array([ 0.001])}", "" ]) def tearDown(self): import os from shutil import rmtree # clean up the temporary directory os.chdir(self.startdir) try: rmtree(self.tempdir) except OSError: pass @parameterized.expand([ [solver.__name__, solver] for solver in nonlinear_solvers ]) def test_solver_debug_print(self, name, solver): p = om.Problem() model = p.model model.add_subsystem('ground', om.IndepVarComp('V', 0., units='V')) model.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A')) model.add_subsystem('circuit', Circuit()) model.connect('source.I', 'circuit.I_in') model.connect('ground.V', 'circuit.Vg') p.setup() nl = model.circuit.nonlinear_solver = solver() nl.options['debug_print'] = True nl.options['err_on_non_converge'] = True if name == 'NonlinearBlockGS': nl.options['use_apply_nonlinear'] = True if name == 'NewtonSolver': nl.options['solve_subsystems'] = True # suppress solver output for test nl.options['iprint'] = model.circuit.linear_solver.options['iprint'] = -1 # For Broydensolver, don't calc Jacobian try: nl.options['compute_jacobian'] = False except KeyError: pass # set some poor initial guesses so that we don't converge p['circuit.n1.V'] = 10. p['circuit.n2.V'] = 1e-3 opts = {} # formatting has changed in numpy 1.14 and beyond. if LooseVersion(np.__version__) >= LooseVersion("1.14"): opts["legacy"] = '1.13' with printoptions(**opts): # run the model and check for expected output file output = run_model(p, ignore_exception=True) expected_output = '\n'.join([ self.expected_data, "Inputs and outputs at start of iteration " "have been saved to '%s'.\n" % self.filename ]) self.assertEqual(output, expected_output) with open(self.filename, 'r') as f: self.assertEqual(f.read(), self.expected_data) # setup & run again to make sure there is no error due to existing file p.setup() with printoptions(**opts): run_model(p, ignore_exception=False) def test_solver_debug_print_feature(self): from distutils.version import LooseVersion import numpy as np import openmdao.api as om from openmdao.test_suite.scripts.circuit_analysis import Circuit from openmdao.utils.general_utils import printoptions p = om.Problem() model = p.model model.add_subsystem('circuit', Circuit()) p.setup() nl = model.circuit.nonlinear_solver = om.NewtonSolver(solve_subsystems=False) nl.options['iprint'] = 2 nl.options['debug_print'] = True nl.options['err_on_non_converge'] = True # set some poor initial guesses so that we don't converge p.set_val('circuit.I_in', 0.1, units='A') p.set_val('circuit.Vg', 0.0, units='V') p.set_val('circuit.n1.V', 10.) p.set_val('circuit.n2.V', 1e-3) opts = {} # formatting has changed in numpy 1.14 and beyond. if LooseVersion(np.__version__) >= LooseVersion("1.14"): opts["legacy"] = '1.13' with printoptions(**opts): # run the model try: p.run_model() except om.AnalysisError: pass with open(self.filename, 'r') as f: self.assertEqual(f.read(), self.expected_data) class TestNonlinearSolversIsolated(unittest.TestCase): """ This test needs to run isolated to preclude interactions in the underlying `warnings` module that is used to raise the singular entry error. """ ISOLATED = True def setUp(self): # perform test in temporary directory self.startdir = os.getcwd() self.tempdir = tempfile.mkdtemp(prefix='test_solver') os.chdir(self.tempdir) def tearDown(self): # clean up the temporary directory os.chdir(self.startdir) try: shutil.rmtree(self.tempdir) except OSError: pass def test_debug_after_raised_error(self): prob = om.Problem() model = prob.model comp = om.IndepVarComp() comp.add_output('dXdt:TAS', val=1.0) comp.add_output('accel_target', val=2.0) model.add_subsystem('des_vars', comp, promotes=['*']) teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group()) teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*']) thrust_bal = om.BalanceComp() thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS', rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0) teg.add_subsystem(name='thrust_bal', subsys=thrust_bal, promotes_inputs=['dXdt:TAS', 'accel_target'], promotes_outputs=['thrust']) teg.linear_solver = om.DirectSolver() teg.nonlinear_solver = om.NewtonSolver() teg.nonlinear_solver.options['solve_subsystems'] = True teg.nonlinear_solver.options['max_sub_solves'] = 1 teg.nonlinear_solver.options['atol'] = 1e-4 teg.nonlinear_solver.options['debug_print'] = True prob.setup() prob.set_solver_print(level=0) stdout = sys.stdout strout = StringIO() sys.stdout = strout with self.assertRaises(RuntimeError) as cm: prob.run_model() sys.stdout = stdout output = strout.getvalue() target = "'thrust_equilibrium_group.thrust_bal.thrust'" self.assertTrue(target in output, msg=target + "NOT FOUND IN" + output) # Make sure exception is unchanged. expected_msg = "Singular entry found in Group (thrust_equilibrium_group) for row associated with state/residual 'thrust' ('thrust_equilibrium_group.thrust_bal.thrust') index 0." self.assertEqual(expected_msg, str(cm.exception)) if __name__ == "__main__": unittest.main()
32.339623
185
0.597316
import os import re import sys import shutil import tempfile import unittest from distutils.version import LooseVersion from io import StringIO import numpy as np import openmdao.api as om from openmdao.test_suite.scripts.circuit_analysis import Circuit from openmdao.utils.general_utils import run_model from openmdao.utils.general_utils import printoptions try: from parameterized import parameterized except ImportError: from openmdao.utils.assert_utils import SkipParameterized as parameterized nonlinear_solvers = [ om.NonlinearBlockGS, om.NonlinearBlockJac, om.NewtonSolver, om.BroydenSolver ] class TestNonlinearSolvers(unittest.TestCase): def setUp(self): import re import os from tempfile import mkdtemp self.startdir = os.getcwd() self.tempdir = mkdtemp(prefix='test_solver') os.chdir(self.tempdir) coord = 'rank0:root._solve_nonlinear|0|NLRunOnce|0|circuit._solve_nonlinear|0' self.filename = 'solver_errors.0.out' self.expected_data = '\n'.join([ "", "# Inputs and outputs at start of iteration '%s':" % coord, "", "# nonlinear inputs", "{'circuit.D1.V_in': array([ 1.]),", " 'circuit.D1.V_out': array([ 0.]),", " 'circuit.R1.V_in': array([ 1.]),", " 'circuit.R1.V_out': array([ 0.]),", " 'circuit.R2.V_in': array([ 1.]),", " 'circuit.R2.V_out': array([ 1.]),", " 'circuit.n1.I_in:0': array([ 0.1]),", " 'circuit.n1.I_out:0': array([ 1.]),", " 'circuit.n1.I_out:1': array([ 1.]),", " 'circuit.n2.I_in:0': array([ 1.]),", " 'circuit.n2.I_out:0': array([ 1.])}", "", "# nonlinear outputs", "{'circuit.D1.I': array([ 1.]),", " 'circuit.R1.I': array([ 1.]),", " 'circuit.R2.I': array([ 1.]),", " 'circuit.n1.V': array([ 10.]),", " 'circuit.n2.V': array([ 0.001])}", "" ]) def tearDown(self): import os from shutil import rmtree os.chdir(self.startdir) try: rmtree(self.tempdir) except OSError: pass @parameterized.expand([ [solver.__name__, solver] for solver in nonlinear_solvers ]) def test_solver_debug_print(self, name, solver): p = om.Problem() model = p.model model.add_subsystem('ground', om.IndepVarComp('V', 0., units='V')) model.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A')) model.add_subsystem('circuit', Circuit()) model.connect('source.I', 'circuit.I_in') model.connect('ground.V', 'circuit.Vg') p.setup() nl = model.circuit.nonlinear_solver = solver() nl.options['debug_print'] = True nl.options['err_on_non_converge'] = True if name == 'NonlinearBlockGS': nl.options['use_apply_nonlinear'] = True if name == 'NewtonSolver': nl.options['solve_subsystems'] = True nl.options['iprint'] = model.circuit.linear_solver.options['iprint'] = -1 try: nl.options['compute_jacobian'] = False except KeyError: pass # set some poor initial guesses so that we don't converge p['circuit.n1.V'] = 10. p['circuit.n2.V'] = 1e-3 opts = {} if LooseVersion(np.__version__) >= LooseVersion("1.14"): opts["legacy"] = '1.13' with printoptions(**opts): output = run_model(p, ignore_exception=True) expected_output = '\n'.join([ self.expected_data, "Inputs and outputs at start of iteration " "have been saved to '%s'.\n" % self.filename ]) self.assertEqual(output, expected_output) with open(self.filename, 'r') as f: self.assertEqual(f.read(), self.expected_data) p.setup() with printoptions(**opts): run_model(p, ignore_exception=False) def test_solver_debug_print_feature(self): from distutils.version import LooseVersion import numpy as np import openmdao.api as om from openmdao.test_suite.scripts.circuit_analysis import Circuit from openmdao.utils.general_utils import printoptions p = om.Problem() model = p.model model.add_subsystem('circuit', Circuit()) p.setup() nl = model.circuit.nonlinear_solver = om.NewtonSolver(solve_subsystems=False) nl.options['iprint'] = 2 nl.options['debug_print'] = True nl.options['err_on_non_converge'] = True p.set_val('circuit.I_in', 0.1, units='A') p.set_val('circuit.Vg', 0.0, units='V') p.set_val('circuit.n1.V', 10.) p.set_val('circuit.n2.V', 1e-3) opts = {} # formatting has changed in numpy 1.14 and beyond. if LooseVersion(np.__version__) >= LooseVersion("1.14"): opts["legacy"] = '1.13' with printoptions(**opts): # run the model try: p.run_model() except om.AnalysisError: pass with open(self.filename, 'r') as f: self.assertEqual(f.read(), self.expected_data) class TestNonlinearSolversIsolated(unittest.TestCase): ISOLATED = True def setUp(self): # perform test in temporary directory self.startdir = os.getcwd() self.tempdir = tempfile.mkdtemp(prefix='test_solver') os.chdir(self.tempdir) def tearDown(self): # clean up the temporary directory os.chdir(self.startdir) try: shutil.rmtree(self.tempdir) except OSError: pass def test_debug_after_raised_error(self): prob = om.Problem() model = prob.model comp = om.IndepVarComp() comp.add_output('dXdt:TAS', val=1.0) comp.add_output('accel_target', val=2.0) model.add_subsystem('des_vars', comp, promotes=['*']) teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group()) teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*']) thrust_bal = om.BalanceComp() thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS', rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0) teg.add_subsystem(name='thrust_bal', subsys=thrust_bal, promotes_inputs=['dXdt:TAS', 'accel_target'], promotes_outputs=['thrust']) teg.linear_solver = om.DirectSolver() teg.nonlinear_solver = om.NewtonSolver() teg.nonlinear_solver.options['solve_subsystems'] = True teg.nonlinear_solver.options['max_sub_solves'] = 1 teg.nonlinear_solver.options['atol'] = 1e-4 teg.nonlinear_solver.options['debug_print'] = True prob.setup() prob.set_solver_print(level=0) stdout = sys.stdout strout = StringIO() sys.stdout = strout with self.assertRaises(RuntimeError) as cm: prob.run_model() sys.stdout = stdout output = strout.getvalue() target = "'thrust_equilibrium_group.thrust_bal.thrust'" self.assertTrue(target in output, msg=target + "NOT FOUND IN" + output) # Make sure exception is unchanged. expected_msg = "Singular entry found in Group (thrust_equilibrium_group) for row associated with state/residual 'thrust' ('thrust_equilibrium_group.thrust_bal.thrust') index 0." self.assertEqual(expected_msg, str(cm.exception)) if __name__ == "__main__": unittest.main()
true
true
f70de46ec15d5e1242f26c7edce5421e4886e55d
11,741
py
Python
tests/training/trainer_test.py
vidurj/allennlp
0aec18ed8e076357dcfe0e89bb4078485e1e4e19
[ "Apache-2.0" ]
1
2018-06-14T10:11:20.000Z
2018-06-14T10:11:20.000Z
tests/training/trainer_test.py
Uppaal/allennlp
5b513d4f7c7365ac33b3cbc557506b46a9b50450
[ "Apache-2.0" ]
null
null
null
tests/training/trainer_test.py
Uppaal/allennlp
5b513d4f7c7365ac33b3cbc557506b46a9b50450
[ "Apache-2.0" ]
1
2022-03-27T19:45:13.000Z
2022-03-27T19:45:13.000Z
# pylint: disable=invalid-name import glob import os import re import time import torch import pytest from allennlp.common.testing import AllenNlpTestCase from allennlp.training.trainer import Trainer, sparse_clip_norm, is_sparse from allennlp.data import Vocabulary from allennlp.common.params import Params from allennlp.common.checks import ConfigurationError from allennlp.models.simple_tagger import SimpleTagger from allennlp.data.iterators import BasicIterator from allennlp.data.dataset_readers import SequenceTaggingDatasetReader class TestTrainer(AllenNlpTestCase): def setUp(self): super(TestTrainer, self).setUp() self.instances = SequenceTaggingDatasetReader().read('tests/fixtures/data/sequence_tagging.tsv') vocab = Vocabulary.from_instances(self.instances) self.vocab = vocab self.model_params = Params({ "text_field_embedder": { "tokens": { "type": "embedding", "embedding_dim": 5 } }, "encoder": { "type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2 } }) self.model = SimpleTagger.from_params(self.vocab, self.model_params) self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01) self.iterator = BasicIterator(batch_size=2) self.iterator.index_with(vocab) def test_trainer_can_run(self): trainer = Trainer(model=self.model, optimizer=self.optimizer, iterator=self.iterator, train_dataset=self.instances, validation_dataset=self.instances, num_epochs=2) metrics = trainer.train() assert 'best_validation_loss' in metrics assert isinstance(metrics['best_validation_loss'], float) assert 'best_epoch' in metrics assert isinstance(metrics['best_epoch'], int) # Making sure that both increasing and decreasing validation metrics work. trainer = Trainer(model=self.model, optimizer=self.optimizer, iterator=self.iterator, train_dataset=self.instances, validation_dataset=self.instances, validation_metric='+loss', num_epochs=2) metrics = trainer.train() assert 'best_validation_loss' in metrics assert isinstance(metrics['best_validation_loss'], float) assert 'best_epoch' in metrics assert isinstance(metrics['best_epoch'], int) @pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.") def test_trainer_can_run_cuda(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=2, cuda_device=0) trainer.train() @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need multiple GPUs.") def test_trainer_can_run_multiple_gpu(self): multigpu_iterator = BasicIterator(batch_size=4) multigpu_iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, multigpu_iterator, self.instances, num_epochs=2, cuda_device=[0, 1]) trainer.train() def test_trainer_can_resume_training(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=1, serialization_dir=self.TEST_DIR) trainer.train() new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR) epoch, val_metrics_per_epoch = new_trainer._restore_checkpoint() # pylint: disable=protected-access assert epoch == 1 assert len(val_metrics_per_epoch) == 1 assert isinstance(val_metrics_per_epoch[0], float) assert val_metrics_per_epoch[0] != 0. new_trainer.train() def test_should_stop_early_with_increasing_metric(self): new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, patience=5, validation_metric="+test") assert new_trainer._should_stop_early([.5, .3, .2, .1, .4, .4]) # pylint: disable=protected-access assert not new_trainer._should_stop_early([.3, .3, .3, .2, .5, .1]) # pylint: disable=protected-access def test_should_stop_early_with_decreasing_metric(self): new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, patience=5, validation_metric="-test") assert new_trainer._should_stop_early([.02, .3, .2, .1, .4, .4]) # pylint: disable=protected-access assert not new_trainer._should_stop_early([.3, .3, .2, .1, .4, .5]) # pylint: disable=protected-access def test_train_driver_raises_on_model_with_no_loss_key(self): class FakeModel(torch.nn.Module): def forward(self, **kwargs): # pylint: disable=arguments-differ,unused-argument return {} with pytest.raises(ConfigurationError): trainer = Trainer(FakeModel(), self.optimizer, self.iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR) trainer.train() def test_trainer_can_log_histograms(self): # enable activation logging for module in self.model.modules(): module.should_log_activations = True trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, histogram_interval=2) trainer.train() def test_trainer_respects_num_serialized_models_to_keep(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=5, serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=3) trainer.train() # Now check the serialized files for prefix in ['model_state_epoch_*', 'training_state_epoch_*']: file_names = glob.glob(os.path.join(self.TEST_DIR, prefix)) epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names] assert sorted(epochs) == [2, 3, 4] def test_trainer_respects_keep_serialized_model_every_num_seconds(self): # To test: # Create an iterator that sleeps for 0.5 second per epoch, so the total training # time for one epoch is slightly greater then 0.5 seconds. # Run for 6 epochs, keeping the last 2 models, models also kept every 1 second. # Check the resulting checkpoints. Should then have models at epochs # 2, 4, plus the last two at 5 and 6. class WaitingIterator(BasicIterator): # pylint: disable=arguments-differ def _create_batches(self, *args, **kwargs): time.sleep(0.5) return super(WaitingIterator, self)._create_batches(*args, **kwargs) iterator = WaitingIterator(batch_size=2) iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, iterator, self.instances, num_epochs=6, serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=2, keep_serialized_model_every_num_seconds=1) trainer.train() # Now check the serialized files for prefix in ['model_state_epoch_*', 'training_state_epoch_*']: file_names = glob.glob(os.path.join(self.TEST_DIR, prefix)) epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names] # epoch N has N-1 in file name assert sorted(epochs) == [1, 3, 4, 5] def test_trainer_saves_models_at_specified_interval(self): iterator = BasicIterator(batch_size=4) iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR, model_save_interval=0.0001) trainer.train() # Now check the serialized files for models saved during the epoch. prefix = 'model_state_epoch_*' file_names = sorted(glob.glob(os.path.join(self.TEST_DIR, prefix))) epochs = [re.search(r"_([0-9\.\-]+)\.th", fname).group(1) for fname in file_names] # We should have checkpoints at the end of each epoch and during each, e.g. # [0.timestamp, 0, 1.timestamp, 1] assert len(epochs) == 4 assert epochs[3] == '1' assert '.' in epochs[0] # Now make certain we can restore from timestamped checkpoint. # To do so, remove the checkpoint from the end of epoch 1&2, so # that we are forced to restore from the timestamped checkpoints. for k in range(2): os.remove(os.path.join(self.TEST_DIR, 'model_state_epoch_{}.th'.format(k))) os.remove(os.path.join(self.TEST_DIR, 'training_state_epoch_{}.th'.format(k))) os.remove(os.path.join(self.TEST_DIR, 'best.th')) restore_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR, model_save_interval=0.0001) epoch, _ = restore_trainer._restore_checkpoint() # pylint: disable=protected-access assert epoch == 2 # One batch per epoch. assert restore_trainer._batch_num_total == 2 # pylint: disable=protected-access class TestSparseClipGrad(AllenNlpTestCase): def test_sparse_clip_grad(self): # create a sparse embedding layer, then take gradient embedding = torch.nn.Embedding(100, 16, sparse=True) embedding.zero_grad() ids = torch.autograd.Variable((torch.rand(17) * 100).long()) # Set some of the ids to the same value so that the sparse gradient # has repeated indices. This tests some additional logic. ids[:5] = 5 loss = embedding(ids).sum() loss.backward() assert is_sparse(embedding.weight.grad) # Now try to clip the gradients. _ = sparse_clip_norm([embedding.weight], 1.5) # Final norm should be 1.5 grad = embedding.weight.grad.data.coalesce() self.assertAlmostEqual(grad._values().norm(2.0), 1.5, places=5) # pylint: disable=protected-access
46.964
111
0.597905
import glob import os import re import time import torch import pytest from allennlp.common.testing import AllenNlpTestCase from allennlp.training.trainer import Trainer, sparse_clip_norm, is_sparse from allennlp.data import Vocabulary from allennlp.common.params import Params from allennlp.common.checks import ConfigurationError from allennlp.models.simple_tagger import SimpleTagger from allennlp.data.iterators import BasicIterator from allennlp.data.dataset_readers import SequenceTaggingDatasetReader class TestTrainer(AllenNlpTestCase): def setUp(self): super(TestTrainer, self).setUp() self.instances = SequenceTaggingDatasetReader().read('tests/fixtures/data/sequence_tagging.tsv') vocab = Vocabulary.from_instances(self.instances) self.vocab = vocab self.model_params = Params({ "text_field_embedder": { "tokens": { "type": "embedding", "embedding_dim": 5 } }, "encoder": { "type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2 } }) self.model = SimpleTagger.from_params(self.vocab, self.model_params) self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01) self.iterator = BasicIterator(batch_size=2) self.iterator.index_with(vocab) def test_trainer_can_run(self): trainer = Trainer(model=self.model, optimizer=self.optimizer, iterator=self.iterator, train_dataset=self.instances, validation_dataset=self.instances, num_epochs=2) metrics = trainer.train() assert 'best_validation_loss' in metrics assert isinstance(metrics['best_validation_loss'], float) assert 'best_epoch' in metrics assert isinstance(metrics['best_epoch'], int) trainer = Trainer(model=self.model, optimizer=self.optimizer, iterator=self.iterator, train_dataset=self.instances, validation_dataset=self.instances, validation_metric='+loss', num_epochs=2) metrics = trainer.train() assert 'best_validation_loss' in metrics assert isinstance(metrics['best_validation_loss'], float) assert 'best_epoch' in metrics assert isinstance(metrics['best_epoch'], int) @pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.") def test_trainer_can_run_cuda(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=2, cuda_device=0) trainer.train() @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need multiple GPUs.") def test_trainer_can_run_multiple_gpu(self): multigpu_iterator = BasicIterator(batch_size=4) multigpu_iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, multigpu_iterator, self.instances, num_epochs=2, cuda_device=[0, 1]) trainer.train() def test_trainer_can_resume_training(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=1, serialization_dir=self.TEST_DIR) trainer.train() new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR) epoch, val_metrics_per_epoch = new_trainer._restore_checkpoint() assert epoch == 1 assert len(val_metrics_per_epoch) == 1 assert isinstance(val_metrics_per_epoch[0], float) assert val_metrics_per_epoch[0] != 0. new_trainer.train() def test_should_stop_early_with_increasing_metric(self): new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, patience=5, validation_metric="+test") assert new_trainer._should_stop_early([.5, .3, .2, .1, .4, .4]) assert not new_trainer._should_stop_early([.3, .3, .3, .2, .5, .1]) def test_should_stop_early_with_decreasing_metric(self): new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, patience=5, validation_metric="-test") assert new_trainer._should_stop_early([.02, .3, .2, .1, .4, .4]) assert not new_trainer._should_stop_early([.3, .3, .2, .1, .4, .5]) def test_train_driver_raises_on_model_with_no_loss_key(self): class FakeModel(torch.nn.Module): def forward(self, **kwargs): return {} with pytest.raises(ConfigurationError): trainer = Trainer(FakeModel(), self.optimizer, self.iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR) trainer.train() def test_trainer_can_log_histograms(self): for module in self.model.modules(): module.should_log_activations = True trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, histogram_interval=2) trainer.train() def test_trainer_respects_num_serialized_models_to_keep(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=5, serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=3) trainer.train() for prefix in ['model_state_epoch_*', 'training_state_epoch_*']: file_names = glob.glob(os.path.join(self.TEST_DIR, prefix)) epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names] assert sorted(epochs) == [2, 3, 4] def test_trainer_respects_keep_serialized_model_every_num_seconds(self): class WaitingIterator(BasicIterator): def _create_batches(self, *args, **kwargs): time.sleep(0.5) return super(WaitingIterator, self)._create_batches(*args, **kwargs) iterator = WaitingIterator(batch_size=2) iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, iterator, self.instances, num_epochs=6, serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=2, keep_serialized_model_every_num_seconds=1) trainer.train() for prefix in ['model_state_epoch_*', 'training_state_epoch_*']: file_names = glob.glob(os.path.join(self.TEST_DIR, prefix)) epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names] assert sorted(epochs) == [1, 3, 4, 5] def test_trainer_saves_models_at_specified_interval(self): iterator = BasicIterator(batch_size=4) iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR, model_save_interval=0.0001) trainer.train() prefix = 'model_state_epoch_*' file_names = sorted(glob.glob(os.path.join(self.TEST_DIR, prefix))) epochs = [re.search(r"_([0-9\.\-]+)\.th", fname).group(1) for fname in file_names] assert len(epochs) == 4 assert epochs[3] == '1' assert '.' in epochs[0] for k in range(2): os.remove(os.path.join(self.TEST_DIR, 'model_state_epoch_{}.th'.format(k))) os.remove(os.path.join(self.TEST_DIR, 'training_state_epoch_{}.th'.format(k))) os.remove(os.path.join(self.TEST_DIR, 'best.th')) restore_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR, model_save_interval=0.0001) epoch, _ = restore_trainer._restore_checkpoint() assert epoch == 2 assert restore_trainer._batch_num_total == 2 class TestSparseClipGrad(AllenNlpTestCase): def test_sparse_clip_grad(self): embedding = torch.nn.Embedding(100, 16, sparse=True) embedding.zero_grad() ids = torch.autograd.Variable((torch.rand(17) * 100).long()) ids[:5] = 5 loss = embedding(ids).sum() loss.backward() assert is_sparse(embedding.weight.grad) _ = sparse_clip_norm([embedding.weight], 1.5) grad = embedding.weight.grad.data.coalesce() self.assertAlmostEqual(grad._values().norm(2.0), 1.5, places=5)
true
true
f70de471db03751013d05958de5d7e298c464dd4
9,919
py
Python
tests/store/artifact/test_ftp_artifact_repo.py
iPieter/kiwi
76b66872fce68873809a0dea112e2ed552ae5b63
[ "Apache-2.0" ]
null
null
null
tests/store/artifact/test_ftp_artifact_repo.py
iPieter/kiwi
76b66872fce68873809a0dea112e2ed552ae5b63
[ "Apache-2.0" ]
1
2021-01-24T13:34:51.000Z
2021-01-24T13:34:51.000Z
tests/store/artifact/test_ftp_artifact_repo.py
iPieter/kiwi
76b66872fce68873809a0dea112e2ed552ae5b63
[ "Apache-2.0" ]
null
null
null
# pylint: disable=redefined-outer-name from mock import MagicMock import pytest import posixpath import ftplib from ftplib import FTP from kiwi.store.artifact.artifact_repository_registry import get_artifact_repository from kiwi.store.artifact.ftp_artifact_repo import FTPArtifactRepository @pytest.fixture def ftp_mock(): return MagicMock(autospec=FTP) def test_artifact_uri_factory(): repo = get_artifact_repository("ftp://user:pass@test_ftp:123/some/path") assert isinstance(repo, FTPArtifactRepository) def test_list_artifacts_empty(ftp_mock): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) ftp_mock.nlst = MagicMock(return_value=[]) assert repo.list_artifacts() == [] ftp_mock.nlst.assert_called_once_with("/some/path") def test_list_artifacts(ftp_mock): artifact_root_path = "/experiment_id/run_id/" repo = FTPArtifactRepository("ftp://test_ftp"+artifact_root_path) repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) # mocked file structure # |- file # |- model # |- model.pb file_path = "file" file_size = 678 dir_path = "model" ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None]) ftp_mock.nlst = MagicMock(return_value=[file_path, dir_path]) ftp_mock.size = MagicMock(return_value=file_size) artifacts = repo.list_artifacts(path=None) ftp_mock.nlst.assert_called_once_with(artifact_root_path) ftp_mock.size.assert_called_once_with(artifact_root_path + file_path) assert len(artifacts) == 2 assert artifacts[0].path == file_path assert artifacts[0].is_dir is False assert artifacts[0].file_size == file_size assert artifacts[1].path == dir_path assert artifacts[1].is_dir is True assert artifacts[1].file_size is None def test_list_artifacts_with_subdir(ftp_mock): artifact_root_path = "/experiment_id/run_id/" repo = FTPArtifactRepository("sftp://test_sftp"+artifact_root_path) repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) # mocked file structure # |- model # |- model.pb # |- variables dir_name = 'model' # list artifacts at sub directory level file_path = 'model.pb' file_size = 345 subdir_name = 'variables' ftp_mock.nlst = MagicMock(return_value=[file_path, subdir_name]) ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None]) ftp_mock.size = MagicMock(return_value=file_size) artifacts = repo.list_artifacts(path=dir_name) ftp_mock.nlst.assert_called_once_with(artifact_root_path + dir_name) ftp_mock.size.assert_called_once_with(artifact_root_path + dir_name + '/' + file_path) assert len(artifacts) == 2 assert artifacts[0].path == dir_name + '/' + file_path assert artifacts[0].is_dir is False assert artifacts[0].file_size == file_size assert artifacts[1].path == dir_name + '/' + subdir_name assert artifacts[1].is_dir is True assert artifacts[1].file_size is None def test_log_artifact(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) d = tmpdir.mkdir("data") f = d.join("test.txt") f.write("hello world!") fpath = d + '/test.txt' fpath = fpath.strpath ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None]) repo.log_artifact(fpath) ftp_mock.mkd.assert_called_once_with('/some/path') ftp_mock.cwd.assert_called_with('/some/path') ftp_mock.storbinary.assert_called_once() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test.txt' def test_log_artifact_multiple_calls(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) d = tmpdir.mkdir("data") file1 = d.join("test1.txt") file1.write("hello world!") fpath1 = d + '/test1.txt' fpath1 = fpath1.strpath file2 = d.join("test2.txt") file2.write("hello world!") fpath2 = d + '/test2.txt' fpath2 = fpath2.strpath ftp_mock.cwd = MagicMock(side_effect=[ ftplib.error_perm, None, ftplib.error_perm, None, None, None ]) repo.log_artifact(fpath1) ftp_mock.mkd.assert_called_once_with('/some/path') ftp_mock.cwd.assert_called_with('/some/path') ftp_mock.storbinary.assert_called() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt' ftp_mock.reset_mock() repo.log_artifact(fpath1, "subdir") ftp_mock.mkd.assert_called_once_with('/some/path/subdir') ftp_mock.cwd.assert_called_with('/some/path/subdir') ftp_mock.storbinary.assert_called() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt' ftp_mock.reset_mock() repo.log_artifact(fpath2) ftp_mock.mkd.assert_not_called() ftp_mock.cwd.assert_called_with('/some/path') ftp_mock.storbinary.assert_called() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test2.txt' def test_log_artifacts(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) subd = tmpdir.mkdir("data").mkdir("subdir") subd.join("a.txt").write("A") subd.join("b.txt").write("B") subd.join("c.txt").write("C") ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None, None, None, None, None]) repo.log_artifacts(subd.strpath) ftp_mock.mkd.assert_any_call('/some/path/subdir') ftp_mock.cwd.assert_any_call('/some/path/subdir') assert ftp_mock.storbinary.call_count == 3 storbinary_call_args = sorted([ftp_mock.storbinary.call_args_list[i][0][0] for i in range(3)]) assert storbinary_call_args == ['STOR a.txt', 'STOR b.txt', 'STOR c.txt'] def test_download_artifacts_single(ftp_mock): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) ftp_mock.cwd = MagicMock(side_effect=ftplib.error_perm) repo.download_artifacts("test.txt") ftp_mock.retrbinary.assert_called_once() assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR /some/path/test.txt' def test_download_artifacts(ftp_mock): artifact_root_path = "/some/path" repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path) repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) # mocked file structure # |- model # |- model.pb # |- empty_dir # |- variables # |- test.txt dir_path = posixpath.join(artifact_root_path, 'model') # list artifacts at sub directory level model_file_path_sub = 'model.pb' model_file_path_full = posixpath.join(dir_path, model_file_path_sub) empty_dir_name = "empty_dir" empty_dir_path = posixpath.join(dir_path, empty_dir_name) subdir_name = 'variables' subdir_path_full = posixpath.join(dir_path, subdir_name) subfile_name = 'test.txt' subfile_path_full = posixpath.join(artifact_root_path, subdir_path_full, subfile_name) is_dir_mapping = { dir_path: True, empty_dir_path: True, model_file_path_full: False, subdir_path_full: True, subfile_path_full: False, } is_dir_call_args = [ dir_path, model_file_path_full, empty_dir_path, subdir_path_full, model_file_path_full, subdir_path_full, subfile_path_full, subfile_path_full ] def cwd_side_effect(call_arg): if not is_dir_mapping[call_arg]: raise ftplib.error_perm ftp_mock.cwd = MagicMock(side_effect=cwd_side_effect) def nlst_side_effect(call_arg): if call_arg == dir_path: return [model_file_path_sub, subdir_name, empty_dir_name] elif call_arg == subdir_path_full: return [subfile_name] elif call_arg == empty_dir_path: return [] else: raise Exception("should never call nlst for non-directories {}".format(call_arg)) ftp_mock.nlst = MagicMock(side_effect=nlst_side_effect) repo.download_artifacts("model") cwd_call_args = [arg_entry[0][0] for arg_entry in ftp_mock.cwd.call_args_list] assert set(cwd_call_args) == set(is_dir_call_args) assert ftp_mock.nlst.call_count == 3 assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR ' + model_file_path_full assert ftp_mock.retrbinary.call_args_list[1][0][0] == 'RETR ' + subfile_path_full def test_log_artifact_reuse_ftp_client(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) d = tmpdir.mkdir("data") file = d.join("test.txt") file.write("hello world!") fpath = file.strpath repo.log_artifact(fpath) repo.log_artifact(fpath, "subdir1/subdir2") repo.log_artifact(fpath, "subdir3") assert repo.get_ftp_client.call_count == 3
32.953488
98
0.713177
from mock import MagicMock import pytest import posixpath import ftplib from ftplib import FTP from kiwi.store.artifact.artifact_repository_registry import get_artifact_repository from kiwi.store.artifact.ftp_artifact_repo import FTPArtifactRepository @pytest.fixture def ftp_mock(): return MagicMock(autospec=FTP) def test_artifact_uri_factory(): repo = get_artifact_repository("ftp://user:pass@test_ftp:123/some/path") assert isinstance(repo, FTPArtifactRepository) def test_list_artifacts_empty(ftp_mock): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) ftp_mock.nlst = MagicMock(return_value=[]) assert repo.list_artifacts() == [] ftp_mock.nlst.assert_called_once_with("/some/path") def test_list_artifacts(ftp_mock): artifact_root_path = "/experiment_id/run_id/" repo = FTPArtifactRepository("ftp://test_ftp"+artifact_root_path) repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) file_path = "file" file_size = 678 dir_path = "model" ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None]) ftp_mock.nlst = MagicMock(return_value=[file_path, dir_path]) ftp_mock.size = MagicMock(return_value=file_size) artifacts = repo.list_artifacts(path=None) ftp_mock.nlst.assert_called_once_with(artifact_root_path) ftp_mock.size.assert_called_once_with(artifact_root_path + file_path) assert len(artifacts) == 2 assert artifacts[0].path == file_path assert artifacts[0].is_dir is False assert artifacts[0].file_size == file_size assert artifacts[1].path == dir_path assert artifacts[1].is_dir is True assert artifacts[1].file_size is None def test_list_artifacts_with_subdir(ftp_mock): artifact_root_path = "/experiment_id/run_id/" repo = FTPArtifactRepository("sftp://test_sftp"+artifact_root_path) repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) dir_name = 'model' file_path = 'model.pb' file_size = 345 subdir_name = 'variables' ftp_mock.nlst = MagicMock(return_value=[file_path, subdir_name]) ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None]) ftp_mock.size = MagicMock(return_value=file_size) artifacts = repo.list_artifacts(path=dir_name) ftp_mock.nlst.assert_called_once_with(artifact_root_path + dir_name) ftp_mock.size.assert_called_once_with(artifact_root_path + dir_name + '/' + file_path) assert len(artifacts) == 2 assert artifacts[0].path == dir_name + '/' + file_path assert artifacts[0].is_dir is False assert artifacts[0].file_size == file_size assert artifacts[1].path == dir_name + '/' + subdir_name assert artifacts[1].is_dir is True assert artifacts[1].file_size is None def test_log_artifact(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) d = tmpdir.mkdir("data") f = d.join("test.txt") f.write("hello world!") fpath = d + '/test.txt' fpath = fpath.strpath ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None]) repo.log_artifact(fpath) ftp_mock.mkd.assert_called_once_with('/some/path') ftp_mock.cwd.assert_called_with('/some/path') ftp_mock.storbinary.assert_called_once() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test.txt' def test_log_artifact_multiple_calls(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) d = tmpdir.mkdir("data") file1 = d.join("test1.txt") file1.write("hello world!") fpath1 = d + '/test1.txt' fpath1 = fpath1.strpath file2 = d.join("test2.txt") file2.write("hello world!") fpath2 = d + '/test2.txt' fpath2 = fpath2.strpath ftp_mock.cwd = MagicMock(side_effect=[ ftplib.error_perm, None, ftplib.error_perm, None, None, None ]) repo.log_artifact(fpath1) ftp_mock.mkd.assert_called_once_with('/some/path') ftp_mock.cwd.assert_called_with('/some/path') ftp_mock.storbinary.assert_called() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt' ftp_mock.reset_mock() repo.log_artifact(fpath1, "subdir") ftp_mock.mkd.assert_called_once_with('/some/path/subdir') ftp_mock.cwd.assert_called_with('/some/path/subdir') ftp_mock.storbinary.assert_called() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt' ftp_mock.reset_mock() repo.log_artifact(fpath2) ftp_mock.mkd.assert_not_called() ftp_mock.cwd.assert_called_with('/some/path') ftp_mock.storbinary.assert_called() assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test2.txt' def test_log_artifacts(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) subd = tmpdir.mkdir("data").mkdir("subdir") subd.join("a.txt").write("A") subd.join("b.txt").write("B") subd.join("c.txt").write("C") ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None, None, None, None, None]) repo.log_artifacts(subd.strpath) ftp_mock.mkd.assert_any_call('/some/path/subdir') ftp_mock.cwd.assert_any_call('/some/path/subdir') assert ftp_mock.storbinary.call_count == 3 storbinary_call_args = sorted([ftp_mock.storbinary.call_args_list[i][0][0] for i in range(3)]) assert storbinary_call_args == ['STOR a.txt', 'STOR b.txt', 'STOR c.txt'] def test_download_artifacts_single(ftp_mock): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) ftp_mock.cwd = MagicMock(side_effect=ftplib.error_perm) repo.download_artifacts("test.txt") ftp_mock.retrbinary.assert_called_once() assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR /some/path/test.txt' def test_download_artifacts(ftp_mock): artifact_root_path = "/some/path" repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path) repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) dir_path = posixpath.join(artifact_root_path, 'model') model_file_path_sub = 'model.pb' model_file_path_full = posixpath.join(dir_path, model_file_path_sub) empty_dir_name = "empty_dir" empty_dir_path = posixpath.join(dir_path, empty_dir_name) subdir_name = 'variables' subdir_path_full = posixpath.join(dir_path, subdir_name) subfile_name = 'test.txt' subfile_path_full = posixpath.join(artifact_root_path, subdir_path_full, subfile_name) is_dir_mapping = { dir_path: True, empty_dir_path: True, model_file_path_full: False, subdir_path_full: True, subfile_path_full: False, } is_dir_call_args = [ dir_path, model_file_path_full, empty_dir_path, subdir_path_full, model_file_path_full, subdir_path_full, subfile_path_full, subfile_path_full ] def cwd_side_effect(call_arg): if not is_dir_mapping[call_arg]: raise ftplib.error_perm ftp_mock.cwd = MagicMock(side_effect=cwd_side_effect) def nlst_side_effect(call_arg): if call_arg == dir_path: return [model_file_path_sub, subdir_name, empty_dir_name] elif call_arg == subdir_path_full: return [subfile_name] elif call_arg == empty_dir_path: return [] else: raise Exception("should never call nlst for non-directories {}".format(call_arg)) ftp_mock.nlst = MagicMock(side_effect=nlst_side_effect) repo.download_artifacts("model") cwd_call_args = [arg_entry[0][0] for arg_entry in ftp_mock.cwd.call_args_list] assert set(cwd_call_args) == set(is_dir_call_args) assert ftp_mock.nlst.call_count == 3 assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR ' + model_file_path_full assert ftp_mock.retrbinary.call_args_list[1][0][0] == 'RETR ' + subfile_path_full def test_log_artifact_reuse_ftp_client(ftp_mock, tmpdir): repo = FTPArtifactRepository("ftp://test_ftp/some/path") repo.get_ftp_client = MagicMock() call_mock = MagicMock(return_value=ftp_mock) repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) d = tmpdir.mkdir("data") file = d.join("test.txt") file.write("hello world!") fpath = file.strpath repo.log_artifact(fpath) repo.log_artifact(fpath, "subdir1/subdir2") repo.log_artifact(fpath, "subdir3") assert repo.get_ftp_client.call_count == 3
true
true
f70de6df8415c16650561737d8ed1470b7a3654c
2,082
py
Python
inference.py
speakupai/ml_deployment
f80735049de8111b2415608046bb2b0af57fcdd3
[ "MIT" ]
1
2021-03-15T19:09:50.000Z
2021-03-15T19:09:50.000Z
inference.py
speakupai/ml_deployment
f80735049de8111b2415608046bb2b0af57fcdd3
[ "MIT" ]
null
null
null
inference.py
speakupai/ml_deployment
f80735049de8111b2415608046bb2b0af57fcdd3
[ "MIT" ]
1
2022-03-19T15:50:55.000Z
2022-03-19T15:50:55.000Z
import os import librosa import numpy as np import soundfile as sf import torch from tqdm import tqdm from utils import data, spectrogram, spectrogram_clean from models.hifi_gan import Generator from models.wavenet import WaveNet from utils.hparams import hparams as hp def inference(audio_clip): original_file = audio_clip save_dir = './uploads' checkpoint_path = './saved_model/latest_checkpoint.pt' #default_inf_device = 'cpu', # Load checkpoint checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) # Initializing model, optimizer, criterion and scaler model = Generator(wavenet=WaveNet()) model.to('cpu') model.load_state_dict(checkpoint['generator_state_dict']) model.eval() inference_files = [original_file] with torch.no_grad(): for file in inference_files: filename = os.path.splitext(os.path.split(file)[1])[0] x, _ = librosa.load(file, sr=16000, mono=True) target_length = len(x) x = torch.tensor(x).to('cpu') x = data.preprocess_inference_data(x, hp.inference.batched, hp.inference.batch_size, hp.inference.sequence_length, 16000) # run inference and create spectrograms y = [] for x_batch in tqdm(x): spect_orig = np.array(x_batch[0]) spectrogram.create_spectrogram(spect_orig) clean_temp = model.inference(x_batch) y.append(clean_temp) spectrogram_clean.create_spectrogram(np.array(clean_temp)) y = data.postprocess_inference_data(y, hp.inference.batched, 16000) y = y[:target_length].detach().cpu().numpy() sf.write(os.path.join(save_dir, f'{filename}_denoised.wav'), y.astype(np.float32), samplerate=hp.dsp.sample_rate)
36.526316
78
0.590778
import os import librosa import numpy as np import soundfile as sf import torch from tqdm import tqdm from utils import data, spectrogram, spectrogram_clean from models.hifi_gan import Generator from models.wavenet import WaveNet from utils.hparams import hparams as hp def inference(audio_clip): original_file = audio_clip save_dir = './uploads' checkpoint_path = './saved_model/latest_checkpoint.pt' checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) model = Generator(wavenet=WaveNet()) model.to('cpu') model.load_state_dict(checkpoint['generator_state_dict']) model.eval() inference_files = [original_file] with torch.no_grad(): for file in inference_files: filename = os.path.splitext(os.path.split(file)[1])[0] x, _ = librosa.load(file, sr=16000, mono=True) target_length = len(x) x = torch.tensor(x).to('cpu') x = data.preprocess_inference_data(x, hp.inference.batched, hp.inference.batch_size, hp.inference.sequence_length, 16000) y = [] for x_batch in tqdm(x): spect_orig = np.array(x_batch[0]) spectrogram.create_spectrogram(spect_orig) clean_temp = model.inference(x_batch) y.append(clean_temp) spectrogram_clean.create_spectrogram(np.array(clean_temp)) y = data.postprocess_inference_data(y, hp.inference.batched, 16000) y = y[:target_length].detach().cpu().numpy() sf.write(os.path.join(save_dir, f'{filename}_denoised.wav'), y.astype(np.float32), samplerate=hp.dsp.sample_rate)
true
true
f70de75d5798c0cd75fe044ca0665dbad0c8a799
1,796
py
Python
dbaasp_api_helper_libraries/python/request/SearchRequest.py
reymond-group/MLpeptide
675b4777bcaaf09eca21173c15c12450ff515470
[ "MIT" ]
14
2021-03-19T11:52:56.000Z
2022-02-03T13:11:02.000Z
dbaasp_api_helper_libraries/python/request/SearchRequest.py
reymond-group/MLpeptide
675b4777bcaaf09eca21173c15c12450ff515470
[ "MIT" ]
null
null
null
dbaasp_api_helper_libraries/python/request/SearchRequest.py
reymond-group/MLpeptide
675b4777bcaaf09eca21173c15c12450ff515470
[ "MIT" ]
6
2021-03-21T01:28:24.000Z
2021-12-15T09:41:42.000Z
from AbstractRequest import AbstractRequest class SearchRequest(AbstractRequest): name = "" sequence = "" sequence_option = "" sequence_length = "" n_terminus_id = "" c_terminus_id = "" target_group_id = "" target_object_id = "" synthesis_type = "" kingdom_id = "" bond_id = "" unusual_amino_acid_id = "" author_id = "" journal_id = "" article_year = "" article_title = "" complexity = "monomer" target_species_id = "" non_standart_experimental_conditions = "false" hemolytic_and_cytotoxic_activities = "false" def query_type(self): return "search" def get_parameters(self): dict = {} dict["complexity"] = self.complexity dict["name"] = self.name dict["sequence"] = self.sequence dict["sequence_option"] = self.sequence_option dict["sequence_length"] = self.sequence_length dict["n_terminus_id"] = self.n_terminus_id dict["c_terminus_id"] = self.c_terminus_id dict["target_group_id"] = self.target_group_id dict["target_object_id"] = self.target_object_id dict["synthesis_type"] = self.synthesis_type dict["kingdom_id"] = self.kingdom_id dict["bond_id"] = self.bond_id dict["unusual_amino_acid_id"] = self.unusual_amino_acid_id dict["author_id"] = self.author_id dict["journal_id"] = self.journal_id dict["article_year"] = self.article_year dict["article_title"] = self.article_title dict["target_species_id"] = self.target_species_id dict["non_standart_experimental_conditions"] = self.non_standart_experimental_conditions dict["hemolytic_and_cytotoxic_activities"] = self.hemolytic_and_cytotoxic_activities return dict
35.215686
96
0.662584
from AbstractRequest import AbstractRequest class SearchRequest(AbstractRequest): name = "" sequence = "" sequence_option = "" sequence_length = "" n_terminus_id = "" c_terminus_id = "" target_group_id = "" target_object_id = "" synthesis_type = "" kingdom_id = "" bond_id = "" unusual_amino_acid_id = "" author_id = "" journal_id = "" article_year = "" article_title = "" complexity = "monomer" target_species_id = "" non_standart_experimental_conditions = "false" hemolytic_and_cytotoxic_activities = "false" def query_type(self): return "search" def get_parameters(self): dict = {} dict["complexity"] = self.complexity dict["name"] = self.name dict["sequence"] = self.sequence dict["sequence_option"] = self.sequence_option dict["sequence_length"] = self.sequence_length dict["n_terminus_id"] = self.n_terminus_id dict["c_terminus_id"] = self.c_terminus_id dict["target_group_id"] = self.target_group_id dict["target_object_id"] = self.target_object_id dict["synthesis_type"] = self.synthesis_type dict["kingdom_id"] = self.kingdom_id dict["bond_id"] = self.bond_id dict["unusual_amino_acid_id"] = self.unusual_amino_acid_id dict["author_id"] = self.author_id dict["journal_id"] = self.journal_id dict["article_year"] = self.article_year dict["article_title"] = self.article_title dict["target_species_id"] = self.target_species_id dict["non_standart_experimental_conditions"] = self.non_standart_experimental_conditions dict["hemolytic_and_cytotoxic_activities"] = self.hemolytic_and_cytotoxic_activities return dict
true
true
f70dea4e11c5c3e043f6e1d93223b8c0819cd935
508
py
Python
examples/reporting/plotly_reporting.py
noklam/trains
70536544ed5e2b9aac8576ef2eaaef31c99ca670
[ "Apache-2.0" ]
1
2020-11-19T14:00:40.000Z
2020-11-19T14:00:40.000Z
examples/reporting/plotly_reporting.py
aliceUnhinged613/trains
8ec6bba4d91104a2bdd2e537bec21078529540e0
[ "Apache-2.0" ]
2
2020-07-05T08:28:40.000Z
2020-08-11T13:32:49.000Z
examples/reporting/plotly_reporting.py
aliceUnhinged613/trains
8ec6bba4d91104a2bdd2e537bec21078529540e0
[ "Apache-2.0" ]
null
null
null
# TRAINS - Example of Plotly integration and reporting # from trains import Task import plotly.express as px task = Task.init('examples', 'plotly reporting') print('reporting plotly figures') # Iris dataset df = px.data.iris() # create complex plotly figure fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species", marginal_y="rug", marginal_x="histogram") # report the plotly figure task.get_logger().report_plotly(title="iris", series="sepal", iteration=0, figure=fig) print('done')
24.190476
114
0.740157
from trains import Task import plotly.express as px task = Task.init('examples', 'plotly reporting') print('reporting plotly figures') df = px.data.iris() fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species", marginal_y="rug", marginal_x="histogram") task.get_logger().report_plotly(title="iris", series="sepal", iteration=0, figure=fig) print('done')
true
true
f70deba878d97e20aa8d572289554d7a64e18ff4
1,543
py
Python
xlsxwriter/test/comparison/test_optimize10.py
eddiechapman/XlsxWriter
c636117ab30e64e4b7b824c9105595c42887c2c9
[ "BSD-2-Clause-FreeBSD" ]
1
2021-03-27T11:14:47.000Z
2021-03-27T11:14:47.000Z
xlsxwriter/test/comparison/test_optimize10.py
xiaolanmeng86/XlsxWriter
6c3ea23a410e8216eab8f5751e5544ffb444b3da
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
xlsxwriter/test/comparison/test_optimize10.py
xiaolanmeng86/XlsxWriter
6c3ea23a410e8216eab8f5751e5544ffb444b3da
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org # from ..excel_comparison_test import ExcelComparisonTest import codecs from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('optimize10.xlsx') self.set_text_file('unicode_polish_utf8.txt') def test_create_file(self): """Test example file converting Unicode text.""" # Open the input file with the correct encoding. textfile = codecs.open(self.txt_filename, 'r', 'utf-8') # Create an new Excel file and convert the text data. workbook = Workbook(self.got_filename, {'constant_memory': True, 'in_memory': False}) worksheet = workbook.add_worksheet() # Widen the first column to make the text clearer. worksheet.set_column('A:A', 50) # Start from the first cell. row = 0 col = 0 # Read the text file and write it to the worksheet. for line in textfile: # Ignore the comments in the sample file. if line.startswith('#'): continue # Write any other lines to the worksheet. worksheet.write(row, col, line.rstrip("\n")) row += 1 workbook.close() textfile.close() self.assertExcelEqual()
28.054545
93
0.594945
from ..excel_comparison_test import ExcelComparisonTest import codecs from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): def setUp(self): self.set_filename('optimize10.xlsx') self.set_text_file('unicode_polish_utf8.txt') def test_create_file(self): textfile = codecs.open(self.txt_filename, 'r', 'utf-8') workbook = Workbook(self.got_filename, {'constant_memory': True, 'in_memory': False}) worksheet = workbook.add_worksheet() worksheet.set_column('A:A', 50) row = 0 col = 0 for line in textfile: if line.startswith('#'): continue worksheet.write(row, col, line.rstrip("\n")) row += 1 workbook.close() textfile.close() self.assertExcelEqual()
true
true
f70dec16ef67a8c588c37a51e5fa25ca0a3c4db9
4,614
py
Python
tebless/widgets/menu.py
Akhail/Tebless
87faff5547f168d0cf2d5caaf313c1efe1c19950
[ "MIT" ]
5
2017-09-20T02:12:25.000Z
2019-10-22T14:12:07.000Z
tebless/widgets/menu.py
mdbetancourt/Tebless
87faff5547f168d0cf2d5caaf313c1efe1c19950
[ "MIT" ]
3
2021-06-14T14:20:53.000Z
2021-11-15T17:47:37.000Z
tebless/widgets/menu.py
Akhail/Tebless
87faff5547f168d0cf2d5caaf313c1efe1c19950
[ "MIT" ]
1
2021-04-13T14:03:53.000Z
2021-04-13T14:03:53.000Z
# Copyright (c) 2017 Michel Betancourt # # This software is released under the MIT License. # https://opensource.org/licenses/MIT """ menu.py Created by Michel Betancourt on 2017. Copyright (c) 2017 MIT. All rights reserved. """ from math import floor, ceil from tebless.utils.styles import red from tebless.devs import Widget, echo from tebless.utils.keyboard import KEY_DOWN, KEY_UP __all__ = ['Menu'] class Menu(Widget): """Widget show a list of elements. :param cordx: Position on axis X :param cordy: Position on axis Y :param items: Element to show :param is_menu: Is a menu or only show items :param limit: Max items to show :param header: Text header of table :param footer: Text footer of table :param selector: A function that return text to show on select :param width: Width of table :param empty: Whats show if table is empty :param key: A function return text of object in list """ def __init__(self, items=None, *args, **kwargs): super().__init__(items=items, on_key_arrow=self._on_key_arrow, *args, **kwargs) self._items = items or [] self._len_items = len(self._items) self._empty = kwargs.get('empty', ['Sin elementos']) self._is_menu = kwargs.get('is_menu', True) self._limit = round(kwargs.get('limit', 4)) if 'width' not in kwargs: self._width = self.term.width self._header = kwargs.get('header', '') self._footer = kwargs.get('footer', '') def selector(text, **kwargs): return red('| ') + text if self.term.length(text) > 0 else text self._selector = kwargs.get('selector', selector) self._key = kwargs.get('key', lambda x: x) self._formater = kwargs.get( 'formater', lambda text, **kw: ' ' + text[:self._width]) self._page = 1 self._index = 0 self._height = 0 def _on_key_arrow(self, key): if key.code == KEY_DOWN: self.index = (self.index + 1) % self._len_items elif key.code == KEY_UP: self.index = (self.index - 1) % self._len_items def paint(self): self._page = ceil((self._index + 1) / self._limit) echo(self.term.move(self.y, self.x)) header_height, footer_height = 0, 0 if self._header != '': header_height = len(self._header.split('\n')) if self._footer != '': footer_height = len(self._footer.split('\n')) items = self.items if self.items else self._empty first = floor(self._index / self._limit) * self._limit max_page = ceil(len(items) / self._limit) items = items[first:self._limit + first] vars_op = { 'page': self._page, 'last': max_page, 'count': self._len_items } # Print header if self._header != '': echo(self._header.format(**vars_op) + '\n') self._height = header_height # Print elements for idx, item in enumerate(items): array_text = self._key(item) if isinstance(array_text, str): array_text = [array_text] for index, text in enumerate(array_text): echo(self.term.move_x(self.x)) tmp = self._formater(**{ 'text': text, 'index': index, 'lenght': len(array_text) }) pos = self._index % self._limit == idx if pos and self._is_menu and text != '': tmp = self._selector(**{ 'text': text[:self.width], 'index': pos, 'lenght': len(array_text) }) tmp += '\n' self._height += tmp.count('\n') echo(tmp) # Print footer if self._footer != '': echo(self.term.move_x(self.x)) echo(self._footer.format(**vars_op)) self._height += footer_height @property def value(self): return self.items[self._index] @property def index(self): return self._index @index.setter def index(self, value): self._index = value self.on_change() @property def items(self): return list(self._items) @items.setter def items(self, value): self._index = 0 self._items = list(value) self._len_items = len(self._items) self.on_change()
29.767742
75
0.552016
from math import floor, ceil from tebless.utils.styles import red from tebless.devs import Widget, echo from tebless.utils.keyboard import KEY_DOWN, KEY_UP __all__ = ['Menu'] class Menu(Widget): def __init__(self, items=None, *args, **kwargs): super().__init__(items=items, on_key_arrow=self._on_key_arrow, *args, **kwargs) self._items = items or [] self._len_items = len(self._items) self._empty = kwargs.get('empty', ['Sin elementos']) self._is_menu = kwargs.get('is_menu', True) self._limit = round(kwargs.get('limit', 4)) if 'width' not in kwargs: self._width = self.term.width self._header = kwargs.get('header', '') self._footer = kwargs.get('footer', '') def selector(text, **kwargs): return red('| ') + text if self.term.length(text) > 0 else text self._selector = kwargs.get('selector', selector) self._key = kwargs.get('key', lambda x: x) self._formater = kwargs.get( 'formater', lambda text, **kw: ' ' + text[:self._width]) self._page = 1 self._index = 0 self._height = 0 def _on_key_arrow(self, key): if key.code == KEY_DOWN: self.index = (self.index + 1) % self._len_items elif key.code == KEY_UP: self.index = (self.index - 1) % self._len_items def paint(self): self._page = ceil((self._index + 1) / self._limit) echo(self.term.move(self.y, self.x)) header_height, footer_height = 0, 0 if self._header != '': header_height = len(self._header.split('\n')) if self._footer != '': footer_height = len(self._footer.split('\n')) items = self.items if self.items else self._empty first = floor(self._index / self._limit) * self._limit max_page = ceil(len(items) / self._limit) items = items[first:self._limit + first] vars_op = { 'page': self._page, 'last': max_page, 'count': self._len_items } if self._header != '': echo(self._header.format(**vars_op) + '\n') self._height = header_height for idx, item in enumerate(items): array_text = self._key(item) if isinstance(array_text, str): array_text = [array_text] for index, text in enumerate(array_text): echo(self.term.move_x(self.x)) tmp = self._formater(**{ 'text': text, 'index': index, 'lenght': len(array_text) }) pos = self._index % self._limit == idx if pos and self._is_menu and text != '': tmp = self._selector(**{ 'text': text[:self.width], 'index': pos, 'lenght': len(array_text) }) tmp += '\n' self._height += tmp.count('\n') echo(tmp) if self._footer != '': echo(self.term.move_x(self.x)) echo(self._footer.format(**vars_op)) self._height += footer_height @property def value(self): return self.items[self._index] @property def index(self): return self._index @index.setter def index(self, value): self._index = value self.on_change() @property def items(self): return list(self._items) @items.setter def items(self, value): self._index = 0 self._items = list(value) self._len_items = len(self._items) self.on_change()
true
true
f70decdb295edbb7dae9d6cc7d9c4abef0653a3b
3,479
py
Python
trackers/tracking/utils/io.py
SvipRepetitionCounting/AlphaPose
0cc38e4c1d6f08ea9c34c720ae188506d3de6eb6
[ "Apache-2.0" ]
6,306
2018-02-04T11:14:11.000Z
2022-03-31T13:36:53.000Z
trackers/tracking/utils/io.py
SvipRepetitionCounting/AlphaPose
0cc38e4c1d6f08ea9c34c720ae188506d3de6eb6
[ "Apache-2.0" ]
982
2018-02-05T03:06:49.000Z
2022-03-31T16:58:57.000Z
trackers/tracking/utils/io.py
SvipRepetitionCounting/AlphaPose
0cc38e4c1d6f08ea9c34c720ae188506d3de6eb6
[ "Apache-2.0" ]
1,855
2018-02-04T11:27:12.000Z
2022-03-31T17:25:53.000Z
import os from typing import Dict import numpy as np from utils.log import logger def write_results(filename, results_dict: Dict, data_type: str): if not filename: return path = os.path.dirname(filename) if not os.path.exists(path): os.makedirs(path) if data_type in ('mot', 'mcmot', 'lab'): save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' elif data_type == 'kitti': save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' else: raise ValueError(data_type) with open(filename, 'w') as f: for frame_id, frame_data in results_dict.items(): if data_type == 'kitti': frame_id -= 1 for tlwh, track_id in frame_data: if track_id < 0: continue x1, y1, w, h = tlwh x2, y2 = x1 + w, y1 + h line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) f.write(line) logger.info('Save results to {}'.format(filename)) def read_results(filename, data_type: str, is_gt=False, is_ignore=False): if data_type in ('mot', 'lab'): read_fun = read_mot_results else: raise ValueError('Unknown data type: {}'.format(data_type)) return read_fun(filename, is_gt, is_ignore) """ labels={'ped', ... % 1 'person_on_vhcl', ... % 2 'car', ... % 3 'bicycle', ... % 4 'mbike', ... % 5 'non_mot_vhcl', ... % 6 'static_person', ... % 7 'distractor', ... % 8 'occluder', ... % 9 'occluder_on_grnd', ... %10 'occluder_full', ... % 11 'reflection', ... % 12 'crowd' ... % 13 }; """ def read_mot_results(filename, is_gt, is_ignore): valid_labels = {1} ignore_labels = {2, 7, 8, 12} results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: for line in f.readlines(): linelist = line.split(',') if len(linelist) < 7: continue fid = int(linelist[0]) if fid < 1: continue results_dict.setdefault(fid, list()) if is_gt: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) mark = int(float(linelist[6])) if mark == 0 or label not in valid_labels: continue score = 1 elif is_ignore: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if label not in ignore_labels and vis_ratio >= 0: continue else: continue score = 1 else: score = float(linelist[6]) tlwh = tuple(map(float, linelist[2:6])) target_id = int(linelist[1]) results_dict[fid].append((tlwh, target_id, score)) return results_dict def unzip_objs(objs): if len(objs) > 0: tlwhs, ids, scores = zip(*objs) else: tlwhs, ids, scores = [], [], [] tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) return tlwhs, ids, scores
31.0625
119
0.500431
import os from typing import Dict import numpy as np from utils.log import logger def write_results(filename, results_dict: Dict, data_type: str): if not filename: return path = os.path.dirname(filename) if not os.path.exists(path): os.makedirs(path) if data_type in ('mot', 'mcmot', 'lab'): save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' elif data_type == 'kitti': save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' else: raise ValueError(data_type) with open(filename, 'w') as f: for frame_id, frame_data in results_dict.items(): if data_type == 'kitti': frame_id -= 1 for tlwh, track_id in frame_data: if track_id < 0: continue x1, y1, w, h = tlwh x2, y2 = x1 + w, y1 + h line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) f.write(line) logger.info('Save results to {}'.format(filename)) def read_results(filename, data_type: str, is_gt=False, is_ignore=False): if data_type in ('mot', 'lab'): read_fun = read_mot_results else: raise ValueError('Unknown data type: {}'.format(data_type)) return read_fun(filename, is_gt, is_ignore) def read_mot_results(filename, is_gt, is_ignore): valid_labels = {1} ignore_labels = {2, 7, 8, 12} results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: for line in f.readlines(): linelist = line.split(',') if len(linelist) < 7: continue fid = int(linelist[0]) if fid < 1: continue results_dict.setdefault(fid, list()) if is_gt: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) mark = int(float(linelist[6])) if mark == 0 or label not in valid_labels: continue score = 1 elif is_ignore: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if label not in ignore_labels and vis_ratio >= 0: continue else: continue score = 1 else: score = float(linelist[6]) tlwh = tuple(map(float, linelist[2:6])) target_id = int(linelist[1]) results_dict[fid].append((tlwh, target_id, score)) return results_dict def unzip_objs(objs): if len(objs) > 0: tlwhs, ids, scores = zip(*objs) else: tlwhs, ids, scores = [], [], [] tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) return tlwhs, ids, scores
true
true
f70ded31226ba5c1563012f47f50290515bf741e
1,738
py
Python
src/day_2/day_2_02/00_relax_homebrew.py
BlockResearchGroup/AAG2018
f4d1188798593b4aec019aa8bfe091305330b374
[ "MIT" ]
1
2018-10-01T16:16:21.000Z
2018-10-01T16:16:21.000Z
src/day_2/day_2_02/00_relax_homebrew.py
compas-Workshops/AAG2018
f4d1188798593b4aec019aa8bfe091305330b374
[ "MIT" ]
null
null
null
src/day_2/day_2_02/00_relax_homebrew.py
compas-Workshops/AAG2018
f4d1188798593b4aec019aa8bfe091305330b374
[ "MIT" ]
2
2018-09-22T22:12:11.000Z
2018-11-30T10:31:00.000Z
import rhinoscriptsyntax as rs from compas.datastructures import Mesh from compas.geometry import add_vectors from compas.geometry import centroid_points from compas.geometry import subtract_vectors from compas.geometry import mesh_smooth_area from compas.geometry import mesh_smooth_centroid from compas_rhino.helpers import mesh_from_guid from compas_rhino.artists.meshartist import MeshArtist if __name__ == '__main__': # select rhino mesh guid = rs.GetObject("Select mesh", 32) # create compas mesh object from rhino mesh mesh = mesh_from_guid(Mesh,guid) # set vertices on boundary as fixed fixed = set(mesh.vertices_on_boundary()) # number of iterations kmax = 20 for k in range(kmax): updated = {} # loop over all vertices for key in mesh.vertices(): pt = mesh.vertex_coordinates(key) if key in fixed: # don't alter pt coordinates if fixed updated[key] = pt else: # get neighboring keys nbrs = mesh.vertex_neighbors(key) # get neighboring points pts_nbrs = [mesh.vertex_coordinates(nbr) for nbr in nbrs] # compute barycenter for neighboring points cent = centroid_points(pts_nbrs) # store new coordinates updated[key] = cent # update coordinates of all mesh vertices for key, attr in mesh.vertices(True): x, y, z = updated[key] attr['x'] = x attr['y'] = y attr['z'] = z # draw mesh artist = MeshArtist(mesh, layer='relaxed_mesh_laplacian') artist.draw()
31.035714
73
0.613349
import rhinoscriptsyntax as rs from compas.datastructures import Mesh from compas.geometry import add_vectors from compas.geometry import centroid_points from compas.geometry import subtract_vectors from compas.geometry import mesh_smooth_area from compas.geometry import mesh_smooth_centroid from compas_rhino.helpers import mesh_from_guid from compas_rhino.artists.meshartist import MeshArtist if __name__ == '__main__': guid = rs.GetObject("Select mesh", 32) mesh = mesh_from_guid(Mesh,guid) fixed = set(mesh.vertices_on_boundary()) kmax = 20 for k in range(kmax): updated = {} for key in mesh.vertices(): pt = mesh.vertex_coordinates(key) if key in fixed: updated[key] = pt else: # get neighboring keys nbrs = mesh.vertex_neighbors(key) # get neighboring points pts_nbrs = [mesh.vertex_coordinates(nbr) for nbr in nbrs] # compute barycenter for neighboring points cent = centroid_points(pts_nbrs) # store new coordinates updated[key] = cent # update coordinates of all mesh vertices for key, attr in mesh.vertices(True): x, y, z = updated[key] attr['x'] = x attr['y'] = y attr['z'] = z # draw mesh artist = MeshArtist(mesh, layer='relaxed_mesh_laplacian') artist.draw()
true
true
f70deec4af033ae1ded15fd2a71343bb676b5a82
1,641
py
Python
supercheckers/journals.py
mikegoodspeed/supercheckers-python
220c271913cedfd5a816b8a2d220e92591c3d936
[ "MIT" ]
null
null
null
supercheckers/journals.py
mikegoodspeed/supercheckers-python
220c271913cedfd5a816b8a2d220e92591c3d936
[ "MIT" ]
null
null
null
supercheckers/journals.py
mikegoodspeed/supercheckers-python
220c271913cedfd5a816b8a2d220e92591c3d936
[ "MIT" ]
null
null
null
import copy from typing import List, Optional, Tuple from . import boards, enums, moves JournalEntry = Tuple[Optional[moves.Move], boards.Board] class Journal: """A journal of all previous Move and Board states.""" def __init__(self, board: boards.Board): """ Create a journal. :param board: a Board to use as the initial state """ self._log: List[JournalEntry] = [(None, board.copy())] @property def current_turn_number(self) -> int: """ Return the current turn number. The first move returns 1. :return: the current turn number """ return len(self._log) @property def current_team(self) -> enums.Team: """ Return the current team based on the current turn number. :return: the current Team """ return enums.Team.ONE if self.current_turn_number % 2 != 0 else enums.Team.TWO @property def current_board(self) -> boards.Board: """ Return a copy of the current board. :return: the current board """ return self._log[-1][1].copy() def apply(self, move: moves.Move) -> None: """ Apply a Move to the current_board and save it to the journal. This method assumes that the move has been validated. :param move: a Move """ board = self.current_board board.apply(move) self._log.append((move, board)) def copy(self) -> "Journal": """ Return a deep copy of this journal. :return: a Journal """ return copy.deepcopy(self)
24.132353
86
0.585009
import copy from typing import List, Optional, Tuple from . import boards, enums, moves JournalEntry = Tuple[Optional[moves.Move], boards.Board] class Journal: def __init__(self, board: boards.Board): self._log: List[JournalEntry] = [(None, board.copy())] @property def current_turn_number(self) -> int: return len(self._log) @property def current_team(self) -> enums.Team: return enums.Team.ONE if self.current_turn_number % 2 != 0 else enums.Team.TWO @property def current_board(self) -> boards.Board: return self._log[-1][1].copy() def apply(self, move: moves.Move) -> None: board = self.current_board board.apply(move) self._log.append((move, board)) def copy(self) -> "Journal": return copy.deepcopy(self)
true
true
f70deeeb8ce7aa5229c9aa79aa8d7741be5bb81b
3,286
py
Python
orchestra/contrib/orchestration/migrations/0009_rename_route_async_run_async.py
RubenPX/django-orchestra
5ab4779e1ae12ec99569d682601b7810587ed381
[ "Unlicense" ]
null
null
null
orchestra/contrib/orchestration/migrations/0009_rename_route_async_run_async.py
RubenPX/django-orchestra
5ab4779e1ae12ec99569d682601b7810587ed381
[ "Unlicense" ]
4
2021-01-30T14:26:46.000Z
2022-03-18T16:28:39.000Z
orchestra/contrib/orchestration/migrations/0009_rename_route_async_run_async.py
RubenPX/django-orchestra
5ab4779e1ae12ec99569d682601b7810587ed381
[ "Unlicense" ]
3
2022-02-06T04:35:59.000Z
2022-03-17T00:40:17.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2021-03-30 10:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('orchestration', '0008_auto_20190805_1134'), ] operations = [ migrations.RenameField( model_name='route', old_name='async', new_name='run_async', ), migrations.AlterField( model_name='route', name='backend', field=models.CharField(choices=[('Apache2Traffic', '[M] Apache 2 Traffic'), ('ApacheTrafficByName', '[M] ApacheTrafficByName'), ('DokuWikiMuTraffic', '[M] DokuWiki MU Traffic'), ('DovecotMaildirDisk', '[M] Dovecot Maildir size'), ('Exim4Traffic', '[M] Exim4 traffic'), ('MailmanSubscribers', '[M] Mailman subscribers'), ('MailmanTraffic', '[M] Mailman traffic'), ('MysqlDisk', '[M] MySQL disk'), ('PostfixMailscannerTraffic', '[M] Postfix-Mailscanner traffic'), ('ProxmoxOpenVZTraffic', '[M] ProxmoxOpenVZTraffic'), ('UNIXUserDisk', '[M] UNIX user disk'), ('VsFTPdTraffic', '[M] VsFTPd traffic'), ('WordpressMuTraffic', '[M] Wordpress MU Traffic'), ('NextCloudDiskQuota', '[M] nextCloud SaaS Disk Quota'), ('NextcloudTraffic', '[M] nextCloud SaaS Traffic'), ('OwnCloudDiskQuota', '[M] ownCloud SaaS Disk Quota'), ('OwncloudTraffic', '[M] ownCloud SaaS Traffic'), ('PhpListTraffic', '[M] phpList SaaS Traffic'), ('Apache2Controller', '[S] Apache 2'), ('BSCWController', '[S] BSCW SaaS'), ('Bind9MasterDomainController', '[S] Bind9 master domain'), ('Bind9SlaveDomainController', '[S] Bind9 slave domain'), ('DokuWikiMuController', '[S] DokuWiki multisite'), ('DrupalMuController', '[S] Drupal multisite'), ('GitLabSaaSController', '[S] GitLab SaaS'), ('LetsEncryptController', "[S] Let's encrypt!"), ('LxcController', '[S] LxcController'), ('AutoresponseController', '[S] Mail autoresponse'), ('MailmanController', '[S] Mailman'), ('MailmanVirtualDomainController', '[S] Mailman virtdomain-only'), ('MoodleController', '[S] Moodle'), ('MoodleWWWRootController', '[S] Moodle WWWRoot (required)'), ('MoodleMuController', '[S] Moodle multisite'), ('MySQLController', '[S] MySQL database'), ('MySQLUserController', '[S] MySQL user'), ('PHPController', '[S] PHP FPM/FCGID'), ('PostfixAddressController', '[S] Postfix address'), ('PostfixAddressVirtualDomainController', '[S] Postfix address virtdomain-only'), ('ProxmoxOVZ', '[S] ProxmoxOVZ'), ('uWSGIPythonController', '[S] Python uWSGI'), ('RoundcubeIdentityController', '[S] Roundcube Identity Controller'), ('StaticController', '[S] Static'), ('SymbolicLinkController', '[S] Symbolic link webapp'), ('UNIXUserMaildirController', '[S] UNIX maildir user'), ('UNIXUserController', '[S] UNIX user'), ('WebalizerAppController', '[S] Webalizer App'), ('WebalizerController', '[S] Webalizer Content'), ('WordPressForceSSLController', '[S] WordPress Force SSL'), ('WordPressURLController', '[S] WordPress URL'), ('WordPressController', '[S] Wordpress'), ('WordpressMuController', '[S] Wordpress multisite'), ('NextCloudController', '[S] nextCloud SaaS'), ('OwnCloudController', '[S] ownCloud SaaS'), ('PhpListSaaSController', '[S] phpList SaaS')], max_length=256, verbose_name='backend'), ), ]
126.384615
2,740
0.686549
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('orchestration', '0008_auto_20190805_1134'), ] operations = [ migrations.RenameField( model_name='route', old_name='async', new_name='run_async', ), migrations.AlterField( model_name='route', name='backend', field=models.CharField(choices=[('Apache2Traffic', '[M] Apache 2 Traffic'), ('ApacheTrafficByName', '[M] ApacheTrafficByName'), ('DokuWikiMuTraffic', '[M] DokuWiki MU Traffic'), ('DovecotMaildirDisk', '[M] Dovecot Maildir size'), ('Exim4Traffic', '[M] Exim4 traffic'), ('MailmanSubscribers', '[M] Mailman subscribers'), ('MailmanTraffic', '[M] Mailman traffic'), ('MysqlDisk', '[M] MySQL disk'), ('PostfixMailscannerTraffic', '[M] Postfix-Mailscanner traffic'), ('ProxmoxOpenVZTraffic', '[M] ProxmoxOpenVZTraffic'), ('UNIXUserDisk', '[M] UNIX user disk'), ('VsFTPdTraffic', '[M] VsFTPd traffic'), ('WordpressMuTraffic', '[M] Wordpress MU Traffic'), ('NextCloudDiskQuota', '[M] nextCloud SaaS Disk Quota'), ('NextcloudTraffic', '[M] nextCloud SaaS Traffic'), ('OwnCloudDiskQuota', '[M] ownCloud SaaS Disk Quota'), ('OwncloudTraffic', '[M] ownCloud SaaS Traffic'), ('PhpListTraffic', '[M] phpList SaaS Traffic'), ('Apache2Controller', '[S] Apache 2'), ('BSCWController', '[S] BSCW SaaS'), ('Bind9MasterDomainController', '[S] Bind9 master domain'), ('Bind9SlaveDomainController', '[S] Bind9 slave domain'), ('DokuWikiMuController', '[S] DokuWiki multisite'), ('DrupalMuController', '[S] Drupal multisite'), ('GitLabSaaSController', '[S] GitLab SaaS'), ('LetsEncryptController', "[S] Let's encrypt!"), ('LxcController', '[S] LxcController'), ('AutoresponseController', '[S] Mail autoresponse'), ('MailmanController', '[S] Mailman'), ('MailmanVirtualDomainController', '[S] Mailman virtdomain-only'), ('MoodleController', '[S] Moodle'), ('MoodleWWWRootController', '[S] Moodle WWWRoot (required)'), ('MoodleMuController', '[S] Moodle multisite'), ('MySQLController', '[S] MySQL database'), ('MySQLUserController', '[S] MySQL user'), ('PHPController', '[S] PHP FPM/FCGID'), ('PostfixAddressController', '[S] Postfix address'), ('PostfixAddressVirtualDomainController', '[S] Postfix address virtdomain-only'), ('ProxmoxOVZ', '[S] ProxmoxOVZ'), ('uWSGIPythonController', '[S] Python uWSGI'), ('RoundcubeIdentityController', '[S] Roundcube Identity Controller'), ('StaticController', '[S] Static'), ('SymbolicLinkController', '[S] Symbolic link webapp'), ('UNIXUserMaildirController', '[S] UNIX maildir user'), ('UNIXUserController', '[S] UNIX user'), ('WebalizerAppController', '[S] Webalizer App'), ('WebalizerController', '[S] Webalizer Content'), ('WordPressForceSSLController', '[S] WordPress Force SSL'), ('WordPressURLController', '[S] WordPress URL'), ('WordPressController', '[S] Wordpress'), ('WordpressMuController', '[S] Wordpress multisite'), ('NextCloudController', '[S] nextCloud SaaS'), ('OwnCloudController', '[S] ownCloud SaaS'), ('PhpListSaaSController', '[S] phpList SaaS')], max_length=256, verbose_name='backend'), ), ]
true
true
f70df10cc910b834bf139d78539754aab65713f2
10,670
py
Python
test/test_bregman.py
rturrisige/POT
c5039bcafde999114283f7e59fb03e176027d740
[ "MIT" ]
null
null
null
test/test_bregman.py
rturrisige/POT
c5039bcafde999114283f7e59fb03e176027d740
[ "MIT" ]
null
null
null
test/test_bregman.py
rturrisige/POT
c5039bcafde999114283f7e59fb03e176027d740
[ "MIT" ]
null
null
null
"""Tests for module bregman on OT with bregman projections """ # Author: Remi Flamary <remi.flamary@unice.fr> # Kilian Fatras <kilian.fatras@irisa.fr> # # License: MIT License import numpy as np import ot import pytest def test_sinkhorn(): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10) # check constratints np.testing.assert_allclose( u, G.sum(1), atol=1e-05) # cf convergence sinkhorn np.testing.assert_allclose( u, G.sum(0), atol=1e-05) # cf convergence sinkhorn def test_sinkhorn_empty(): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True) # check constratints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, method='sinkhorn_stabilized', verbose=True, log=True) # check constratints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn( [], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling', verbose=True, log=True) # check constratints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) def test_sinkhorn_variants(): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10) Gs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10) Ges = ot.sinkhorn( u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10) G_green = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10) # check values np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Ges, atol=1e-05) np.testing.assert_allclose(G0, G_green, atol=1e-5) print(G0, G_green) def test_sinkhorn_variants_log(): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G0, log0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10, log=True) Gs, logs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True) Ges, loges = ot.sinkhorn( u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10, log=True) G_green, loggreen = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10, log=True) # check values np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Ges, atol=1e-05) np.testing.assert_allclose(G0, G_green, atol=1e-5) print(G0, G_green) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"]) def test_barycenter(method): n_bins = 100 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) # wasserstein reg = 1e-2 bary_wass = ot.bregman.barycenter(A, M, reg, weights, method=method) np.testing.assert_allclose(1, np.sum(bary_wass)) ot.bregman.barycenter(A, M, reg, log=True, verbose=True) def test_barycenter_stabilization(): n_bins = 100 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) # wasserstein reg = 1e-2 bar_stable = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn_stabilized", stopThr=1e-8) bar = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn", stopThr=1e-8) np.testing.assert_allclose(bar, bar_stable) def test_wasserstein_bary_2d(): size = 100 # size of a square image a1 = np.random.randn(size, size) a1 += a1.min() a1 = a1 / np.sum(a1) a2 = np.random.randn(size, size) a2 += a2.min() a2 = a2 / np.sum(a2) # creating matrix A containing all distributions A = np.zeros((2, size, size)) A[0, :, :] = a1 A[1, :, :] = a2 # wasserstein reg = 1e-2 bary_wass = ot.bregman.convolutional_barycenter2d(A, reg) np.testing.assert_allclose(1, np.sum(bary_wass)) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True) def test_unmix(): n_bins = 50 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) a = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # creating matrix A containing all distributions D = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() M0 = ot.utils.dist0(2) M0 /= M0.max() h0 = ot.unif(2) # wasserstein reg = 1e-3 um = ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01,) np.testing.assert_allclose(1, np.sum(um), rtol=1e-03, atol=1e-03) np.testing.assert_allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03) ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01, log=True, verbose=True) def test_empirical_sinkhorn(): # test sinkhorn n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(np.arange(n), (n, 1)) X_t = np.reshape(np.arange(0, n), (n, 1)) M = ot.dist(X_s, X_t) M_m = ot.dist(X_s, X_t, metric='minkowski') G_sqe = ot.bregman.empirical_sinkhorn(X_s, X_t, 1) sinkhorn_sqe = ot.sinkhorn(a, b, M, 1) G_log, log_es = ot.bregman.empirical_sinkhorn(X_s, X_t, 0.1, log=True) sinkhorn_log, log_s = ot.sinkhorn(a, b, M, 0.1, log=True) G_m = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, metric='minkowski') sinkhorn_m = ot.sinkhorn(a, b, M_m, 1) loss_emp_sinkhorn = ot.bregman.empirical_sinkhorn2(X_s, X_t, 1) loss_sinkhorn = ot.sinkhorn2(a, b, M, 1) # check constratints np.testing.assert_allclose( sinkhorn_sqe.sum(1), G_sqe.sum(1), atol=1e-05) # metric sqeuclidian np.testing.assert_allclose( sinkhorn_sqe.sum(0), G_sqe.sum(0), atol=1e-05) # metric sqeuclidian np.testing.assert_allclose( sinkhorn_log.sum(1), G_log.sum(1), atol=1e-05) # log np.testing.assert_allclose( sinkhorn_log.sum(0), G_log.sum(0), atol=1e-05) # log np.testing.assert_allclose( sinkhorn_m.sum(1), G_m.sum(1), atol=1e-05) # metric euclidian np.testing.assert_allclose( sinkhorn_m.sum(0), G_m.sum(0), atol=1e-05) # metric euclidian np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05) def test_empirical_sinkhorn_divergence(): #Test sinkhorn divergence n = 10 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(np.arange(n), (n, 1)) X_t = np.reshape(np.arange(0, n * 2, 2), (n, 1)) M = ot.dist(X_s, X_t) M_s = ot.dist(X_s, X_s) M_t = ot.dist(X_t, X_t) emp_sinkhorn_div = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1) sinkhorn_div = (ot.sinkhorn2(a, b, M, 1) - 1 / 2 * ot.sinkhorn2(a, a, M_s, 1) - 1 / 2 * ot.sinkhorn2(b, b, M_t, 1)) emp_sinkhorn_div_log, log_es = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1, log=True) sink_div_log_ab, log_s_ab = ot.sinkhorn2(a, b, M, 1, log=True) sink_div_log_a, log_s_a = ot.sinkhorn2(a, a, M_s, 1, log=True) sink_div_log_b, log_s_b = ot.sinkhorn2(b, b, M_t, 1, log=True) sink_div_log = sink_div_log_ab - 1 / 2 * (sink_div_log_a + sink_div_log_b) # check constratints np.testing.assert_allclose( emp_sinkhorn_div, sinkhorn_div, atol=1e-05) # cf conv emp sinkhorn np.testing.assert_allclose( emp_sinkhorn_div_log, sink_div_log, atol=1e-05) # cf conv emp sinkhorn def test_stabilized_vs_sinkhorn_multidim(): # test if stable version matches sinkhorn # for multidimensional inputs n = 100 # Gaussian distributions a = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std b1 = ot.datasets.make_1D_gauss(n, m=60, s=8) b2 = ot.datasets.make_1D_gauss(n, m=30, s=4) # creating matrix A containing all distributions b = np.vstack((b1, b2)).T M = ot.utils.dist0(n) M /= np.median(M) epsilon = 0.1 G, log = ot.bregman.sinkhorn(a, b, M, reg=epsilon, method="sinkhorn_stabilized", log=True) G2, log2 = ot.bregman.sinkhorn(a, b, M, epsilon, method="sinkhorn", log=True) np.testing.assert_allclose(G, G2) def test_implemented_methods(): IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized'] ONLY_1D_methods = ['greenkhorn', 'sinkhorn_epsilon_scaling'] NOT_VALID_TOKENS = ['foo'] # test generalized sinkhorn for unbalanced OT barycenter n = 3 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = ot.utils.unif(n) A = rng.rand(n, 2) M = ot.dist(x, x) epsilon = 1. for method in IMPLEMENTED_METHODS: ot.bregman.sinkhorn(a, b, M, epsilon, method=method) ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) ot.bregman.barycenter(A, M, reg=epsilon, method=method) with pytest.raises(ValueError): for method in set(NOT_VALID_TOKENS): ot.bregman.sinkhorn(a, b, M, epsilon, method=method) ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) ot.bregman.barycenter(A, M, reg=epsilon, method=method) for method in ONLY_1D_methods: ot.bregman.sinkhorn(a, b, M, epsilon, method=method) with pytest.raises(ValueError): ot.bregman.sinkhorn2(a, b, M, epsilon, method=method)
31.382353
119
0.625773
import numpy as np import ot import pytest def test_sinkhorn(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10) np.testing.assert_allclose( u, G.sum(1), atol=1e-05) np.testing.assert_allclose( u, G.sum(0), atol=1e-05) def test_sinkhorn_empty(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True) np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, method='sinkhorn_stabilized', verbose=True, log=True) np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn( [], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling', verbose=True, log=True) np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) def test_sinkhorn_variants(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10) Gs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10) Ges = ot.sinkhorn( u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10) G_green = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10) np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Ges, atol=1e-05) np.testing.assert_allclose(G0, G_green, atol=1e-5) print(G0, G_green) def test_sinkhorn_variants_log(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G0, log0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10, log=True) Gs, logs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True) Ges, loges = ot.sinkhorn( u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10, log=True) G_green, loggreen = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10, log=True) np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Ges, atol=1e-05) np.testing.assert_allclose(G0, G_green, atol=1e-5) print(G0, G_green) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"]) def test_barycenter(method): n_bins = 100 a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) A = np.vstack((a1, a2)).T M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 weights = np.array([1 - alpha, alpha]) reg = 1e-2 bary_wass = ot.bregman.barycenter(A, M, reg, weights, method=method) np.testing.assert_allclose(1, np.sum(bary_wass)) ot.bregman.barycenter(A, M, reg, log=True, verbose=True) def test_barycenter_stabilization(): n_bins = 100 a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) A = np.vstack((a1, a2)).T M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 weights = np.array([1 - alpha, alpha]) reg = 1e-2 bar_stable = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn_stabilized", stopThr=1e-8) bar = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn", stopThr=1e-8) np.testing.assert_allclose(bar, bar_stable) def test_wasserstein_bary_2d(): size = 100 a1 = np.random.randn(size, size) a1 += a1.min() a1 = a1 / np.sum(a1) a2 = np.random.randn(size, size) a2 += a2.min() a2 = a2 / np.sum(a2) A = np.zeros((2, size, size)) A[0, :, :] = a1 A[1, :, :] = a2 reg = 1e-2 bary_wass = ot.bregman.convolutional_barycenter2d(A, reg) np.testing.assert_allclose(1, np.sum(bary_wass)) ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True) def test_unmix(): n_bins = 50 a1 = ot.datasets.make_1D_gauss(n_bins, m=20, s=10) a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) a = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) D = np.vstack((a1, a2)).T M = ot.utils.dist0(n_bins) M /= M.max() M0 = ot.utils.dist0(2) M0 /= M0.max() h0 = ot.unif(2) reg = 1e-3 um = ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01,) np.testing.assert_allclose(1, np.sum(um), rtol=1e-03, atol=1e-03) np.testing.assert_allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03) ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01, log=True, verbose=True) def test_empirical_sinkhorn(): n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(np.arange(n), (n, 1)) X_t = np.reshape(np.arange(0, n), (n, 1)) M = ot.dist(X_s, X_t) M_m = ot.dist(X_s, X_t, metric='minkowski') G_sqe = ot.bregman.empirical_sinkhorn(X_s, X_t, 1) sinkhorn_sqe = ot.sinkhorn(a, b, M, 1) G_log, log_es = ot.bregman.empirical_sinkhorn(X_s, X_t, 0.1, log=True) sinkhorn_log, log_s = ot.sinkhorn(a, b, M, 0.1, log=True) G_m = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, metric='minkowski') sinkhorn_m = ot.sinkhorn(a, b, M_m, 1) loss_emp_sinkhorn = ot.bregman.empirical_sinkhorn2(X_s, X_t, 1) loss_sinkhorn = ot.sinkhorn2(a, b, M, 1) np.testing.assert_allclose( sinkhorn_sqe.sum(1), G_sqe.sum(1), atol=1e-05) np.testing.assert_allclose( sinkhorn_sqe.sum(0), G_sqe.sum(0), atol=1e-05) np.testing.assert_allclose( sinkhorn_log.sum(1), G_log.sum(1), atol=1e-05) np.testing.assert_allclose( sinkhorn_log.sum(0), G_log.sum(0), atol=1e-05) np.testing.assert_allclose( sinkhorn_m.sum(1), G_m.sum(1), atol=1e-05) np.testing.assert_allclose( sinkhorn_m.sum(0), G_m.sum(0), atol=1e-05) np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05) def test_empirical_sinkhorn_divergence(): n = 10 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(np.arange(n), (n, 1)) X_t = np.reshape(np.arange(0, n * 2, 2), (n, 1)) M = ot.dist(X_s, X_t) M_s = ot.dist(X_s, X_s) M_t = ot.dist(X_t, X_t) emp_sinkhorn_div = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1) sinkhorn_div = (ot.sinkhorn2(a, b, M, 1) - 1 / 2 * ot.sinkhorn2(a, a, M_s, 1) - 1 / 2 * ot.sinkhorn2(b, b, M_t, 1)) emp_sinkhorn_div_log, log_es = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1, log=True) sink_div_log_ab, log_s_ab = ot.sinkhorn2(a, b, M, 1, log=True) sink_div_log_a, log_s_a = ot.sinkhorn2(a, a, M_s, 1, log=True) sink_div_log_b, log_s_b = ot.sinkhorn2(b, b, M_t, 1, log=True) sink_div_log = sink_div_log_ab - 1 / 2 * (sink_div_log_a + sink_div_log_b) np.testing.assert_allclose( emp_sinkhorn_div, sinkhorn_div, atol=1e-05) np.testing.assert_allclose( emp_sinkhorn_div_log, sink_div_log, atol=1e-05) def test_stabilized_vs_sinkhorn_multidim(): n = 100 a = ot.datasets.make_1D_gauss(n, m=20, s=5) b1 = ot.datasets.make_1D_gauss(n, m=60, s=8) b2 = ot.datasets.make_1D_gauss(n, m=30, s=4) b = np.vstack((b1, b2)).T M = ot.utils.dist0(n) M /= np.median(M) epsilon = 0.1 G, log = ot.bregman.sinkhorn(a, b, M, reg=epsilon, method="sinkhorn_stabilized", log=True) G2, log2 = ot.bregman.sinkhorn(a, b, M, epsilon, method="sinkhorn", log=True) np.testing.assert_allclose(G, G2) def test_implemented_methods(): IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized'] ONLY_1D_methods = ['greenkhorn', 'sinkhorn_epsilon_scaling'] NOT_VALID_TOKENS = ['foo'] n = 3 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) b = ot.utils.unif(n) A = rng.rand(n, 2) M = ot.dist(x, x) epsilon = 1. for method in IMPLEMENTED_METHODS: ot.bregman.sinkhorn(a, b, M, epsilon, method=method) ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) ot.bregman.barycenter(A, M, reg=epsilon, method=method) with pytest.raises(ValueError): for method in set(NOT_VALID_TOKENS): ot.bregman.sinkhorn(a, b, M, epsilon, method=method) ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) ot.bregman.barycenter(A, M, reg=epsilon, method=method) for method in ONLY_1D_methods: ot.bregman.sinkhorn(a, b, M, epsilon, method=method) with pytest.raises(ValueError): ot.bregman.sinkhorn2(a, b, M, epsilon, method=method)
true
true
f70df122acd2a8ee313bdd36f2a52d143735fdcf
7,347
py
Python
blog/views.py
maretaatmadja/django_patronus
fe34e1a7c40b535165ef4e752adb61e4c41c32ae
[ "MIT" ]
null
null
null
blog/views.py
maretaatmadja/django_patronus
fe34e1a7c40b535165ef4e752adb61e4c41c32ae
[ "MIT" ]
8
2021-03-30T13:46:40.000Z
2022-03-12T00:35:09.000Z
blog/views.py
maretaatmadja/django_patronus
fe34e1a7c40b535165ef4e752adb61e4c41c32ae
[ "MIT" ]
null
null
null
from django.shortcuts import render, get_object_or_404 from .models import Post, Comment from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from .forms import EmailPostForm, CommentForm, SearchForm from django.core.mail import send_mail from taggit.models import Tag from django.db.models import Count from django.contrib.postgres.search import (SearchVector, SearchQuery, SearchRank) from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, FormView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from django.utils.text import slugify @login_required() def post_list(request, tag_slug=None): #posts = Post.published.all() object_list = Post.published.all() tag = None if tag_slug: tag = get_object_or_404(Tag, slug=tag_slug) object_list = object_list.filter(tags__in=[tag]) paginator = Paginator(object_list, 2) page = request.GET.get('page') try: posts = paginator.page(page) #tolong paginator kirimin page dengan halaman [page] except PageNotAnInteger: posts = paginator.page(1) #kl pagenya gaada, kirim page hal 1 except EmptyPage: posts = paginator.page(paginator.num_pages) return render(request, 'blog/post/list.html', {'posts': posts, 'page': page, 'tag': tag}) #return http response class PostListView(LoginRequiredMixin, ListView): queryset = Post.published.all() context_object_name = 'posts' paginate_by = 2 template_name = 'blog/post/list.html' def get_queryset(self): qs = super().get_queryset() tag_slug = self.kwargs.get('tag_slug') if tag_slug: tag = get_object_or_404(Tag, slug=tag_slug) qs = qs.filter(tags__in=[tag]) self.tag = tag else: self.tag = None return qs def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) if self.tag: context['tag'] = self.tag return context @login_required() def post_detail(request, year, month, day, post): post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day) comments = post.comments.filter(active=True) new_comment = None if request.method == 'POST': comment_form = CommentForm(data=request.POST) if comment_form.is_valid(): new_comment = comment_form.save(commit=False) new_comment.post = post new_comment.save() else: comment_form = CommentForm() post_tags_ids = post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3) similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id) similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4] return render(request, 'blog/post/detail.html', {'post': post, 'comments': comments, 'new_comment': new_comment, 'comment_form': comment_form, 'similar_posts': similar_posts}) class PostDetailView(LoginRequiredMixin, FormView): form_class = CommentForm template_name = 'blog/post/detail.html' def get_initial(self): pk = self.kwargs.get('pk') slug = self.kwargs.get('slug') self.post = get_object_or_404(Post, pk=pk, slug=slug) self.comments = self.post.comments.filter(active=True) self.new_comment = None post_tags_ids = self.post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3) similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=self.post.id) self.similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4] return super().get_initial() def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['post'] = self.post context['comments'] = self.comments context['similar_posts'] = self.similar_posts return context def form_valid(self, form): new_comment = form.save(commit=False) new_comment.post = self.post new_comment.save() context = self.get_context_data() context['new_comment'] = new_comment return render(self.request, self.template_name, context=context) @login_required() def post_share(request, post_id): post = get_object_or_404(Post, id=post_id, status='published') sent = False if request.method == 'POST': form = EmailPostForm(request.POST) #form terisi if form.is_valid(): cd = form.cleaned_data post_url = request.build_absolute_uri(post.get_absolute_url()) subject = f"{cd['name']} recommends you read {post.title}" #f itu untuk format bisa terima variable name, title message = (f"Read {post.title} at {post_url}\n\n" f"{cd['name']} comments: {cd['comments']}") send_mail(subject, message, 'django.patronus@gmail.com', [cd['to'],]) sent = True else: form = EmailPostForm() #form baru return render(request, 'blog/post/share.html', { 'post' : post, 'form' : form, 'sent' : sent }) @login_required() def post_search(request): if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): query = form.cleaned_data['query'] #results = Post.published.annotate(search = SearchVector('title', 'body')).filter(search=query) #search vector: bisa search from multiple fields search_vector = SearchVector('title', 'body') search_query = SearchQuery(query) results = Post.published.annotate(search=search_vector, rank=SearchRank(search_vector, search_query)).filter(search=search_query).order_by('-rank') else: form = SearchForm() query = None results = [] return render(request, 'blog/post/search.html', {'form': form, 'query': query, 'results': results}) class PostCreateView(LoginRequiredMixin, CreateView): model = Post fields = ['title', 'body', 'tags'] template_name = 'blog/post/post_form.html' def form_valid(self, form): form.instance.author = self.request.user form.instance.status = 'published' form.instance.slug = slugify(form.instance.title, allow_unicode=True) return super().form_valid(form) class PostUpdateView(LoginRequiredMixin, UpdateView): model = Post fields = ['title', 'body', 'tags'] template_name = 'blog/post/post_form.html' query_pk_and_slug = True def get_queryset(self): qs = super().get_queryset() return qs.filter(author = self.request.user) def form_valid(self, form): form.instance.slug = slugify(form.instance.title, allow_unicode=True) return super().form_valid(form) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'blog/post/post_confirm_delete.html' success_url = reverse_lazy('blog:post_list') query_pk_and_slug = True def get_queryset(self): qs = super().get_queryset() return qs.filter(author = self.request.user)
37.871134
179
0.670206
from django.shortcuts import render, get_object_or_404 from .models import Post, Comment from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from .forms import EmailPostForm, CommentForm, SearchForm from django.core.mail import send_mail from taggit.models import Tag from django.db.models import Count from django.contrib.postgres.search import (SearchVector, SearchQuery, SearchRank) from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, FormView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from django.utils.text import slugify @login_required() def post_list(request, tag_slug=None): object_list = Post.published.all() tag = None if tag_slug: tag = get_object_or_404(Tag, slug=tag_slug) object_list = object_list.filter(tags__in=[tag]) paginator = Paginator(object_list, 2) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) return render(request, 'blog/post/list.html', {'posts': posts, 'page': page, 'tag': tag}) class PostListView(LoginRequiredMixin, ListView): queryset = Post.published.all() context_object_name = 'posts' paginate_by = 2 template_name = 'blog/post/list.html' def get_queryset(self): qs = super().get_queryset() tag_slug = self.kwargs.get('tag_slug') if tag_slug: tag = get_object_or_404(Tag, slug=tag_slug) qs = qs.filter(tags__in=[tag]) self.tag = tag else: self.tag = None return qs def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) if self.tag: context['tag'] = self.tag return context @login_required() def post_detail(request, year, month, day, post): post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day) comments = post.comments.filter(active=True) new_comment = None if request.method == 'POST': comment_form = CommentForm(data=request.POST) if comment_form.is_valid(): new_comment = comment_form.save(commit=False) new_comment.post = post new_comment.save() else: comment_form = CommentForm() post_tags_ids = post.tags.values_list('id', flat=True) similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id) similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4] return render(request, 'blog/post/detail.html', {'post': post, 'comments': comments, 'new_comment': new_comment, 'comment_form': comment_form, 'similar_posts': similar_posts}) class PostDetailView(LoginRequiredMixin, FormView): form_class = CommentForm template_name = 'blog/post/detail.html' def get_initial(self): pk = self.kwargs.get('pk') slug = self.kwargs.get('slug') self.post = get_object_or_404(Post, pk=pk, slug=slug) self.comments = self.post.comments.filter(active=True) self.new_comment = None post_tags_ids = self.post.tags.values_list('id', flat=True) similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=self.post.id) self.similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4] return super().get_initial() def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['post'] = self.post context['comments'] = self.comments context['similar_posts'] = self.similar_posts return context def form_valid(self, form): new_comment = form.save(commit=False) new_comment.post = self.post new_comment.save() context = self.get_context_data() context['new_comment'] = new_comment return render(self.request, self.template_name, context=context) @login_required() def post_share(request, post_id): post = get_object_or_404(Post, id=post_id, status='published') sent = False if request.method == 'POST': form = EmailPostForm(request.POST) if form.is_valid(): cd = form.cleaned_data post_url = request.build_absolute_uri(post.get_absolute_url()) subject = f"{cd['name']} recommends you read {post.title}" message = (f"Read {post.title} at {post_url}\n\n" f"{cd['name']} comments: {cd['comments']}") send_mail(subject, message, 'django.patronus@gmail.com', [cd['to'],]) sent = True else: form = EmailPostForm() return render(request, 'blog/post/share.html', { 'post' : post, 'form' : form, 'sent' : sent }) @login_required() def post_search(request): if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): query = form.cleaned_data['query'] search_vector = SearchVector('title', 'body') search_query = SearchQuery(query) results = Post.published.annotate(search=search_vector, rank=SearchRank(search_vector, search_query)).filter(search=search_query).order_by('-rank') else: form = SearchForm() query = None results = [] return render(request, 'blog/post/search.html', {'form': form, 'query': query, 'results': results}) class PostCreateView(LoginRequiredMixin, CreateView): model = Post fields = ['title', 'body', 'tags'] template_name = 'blog/post/post_form.html' def form_valid(self, form): form.instance.author = self.request.user form.instance.status = 'published' form.instance.slug = slugify(form.instance.title, allow_unicode=True) return super().form_valid(form) class PostUpdateView(LoginRequiredMixin, UpdateView): model = Post fields = ['title', 'body', 'tags'] template_name = 'blog/post/post_form.html' query_pk_and_slug = True def get_queryset(self): qs = super().get_queryset() return qs.filter(author = self.request.user) def form_valid(self, form): form.instance.slug = slugify(form.instance.title, allow_unicode=True) return super().form_valid(form) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'blog/post/post_confirm_delete.html' success_url = reverse_lazy('blog:post_list') query_pk_and_slug = True def get_queryset(self): qs = super().get_queryset() return qs.filter(author = self.request.user)
true
true
f70df1259d7f6ab6e9e3d00d46b7fa3520b97d6f
5,681
py
Python
asSeq2Int.py
chriskhng/asSeq2Int
d8b6b6c58561ec4e3f91fc35dc56e4d84cc1d0fc
[ "MIT" ]
null
null
null
asSeq2Int.py
chriskhng/asSeq2Int
d8b6b6c58561ec4e3f91fc35dc56e4d84cc1d0fc
[ "MIT" ]
null
null
null
asSeq2Int.py
chriskhng/asSeq2Int
d8b6b6c58561ec4e3f91fc35dc56e4d84cc1d0fc
[ "MIT" ]
null
null
null
## - ## - asSeq2Int ## - ## - Created by Chris Ng 20201214 ## - input the user SoI here: - ## ## - post-CIBR version will prompt the user in the terminal for an SoI user_input_SoI = "mxxt" ## - The following can be used to test the code ## SxSxSSXXSXSS ## s.sXs ## S.s ## m.xT ## - #### Initialize empty lists lst_rawFASTA = [] proteinSeqOnly = [] lst_FASTAentry = [] lst_masterFASTA = [] lst_results = [['Protein Name', 'Sequence Hit', 'Position of Hit']] ## - #### Part 1 ## - Part 1 reformat the FASTA file into list of list for Part 2 ## - It first opens the file with open('dummy1.FASTA') as FASTA_file: #Actual code will ask for the name of the .FASTA FASTA_data = FASTA_file.read() ## - File can close now ## - Splits the file into a list using "*/n>" as the delimiter ## - lst_rawFASTA is a list of each protein entry, containing protein name, sequence, etc, in a single string lst_rawFASTA = FASTA_data.split('*\n>') ## - relevant info will be pulled out from each protein entry ## - We are interested in the protein name and protein sequence for protein_entry in lst_rawFASTA: ## - The protein name is pulled from each FASTA entry and saved into FASTAproteinNames ## - This is done by indexing from 0 to the first comma (which delimits name from the next entry) FASTAproteinNames = protein_entry[0: protein_entry.find(',')] ## - Next, the protein sequence is pulled from protein_entry. Reformatting is required. ## - The protein sequence in protein_entry is seprated by "\n" ## - First, '\n' is replaced with ", " FASTAprotein_comma = protein_entry.replace("\n", ", ") ## - Next, the beginning of the protein sequence (and end of the previous item) is marked by '*$' for_protein_seqjoin = FASTAprotein_comma.replace('\",', '*$') ## - Then, only the string with sequence info is saved into proteinSeqOnly ## - This is done by indexing from where ("*$" is located + 3) till the end of the string proteinSeqOnly = for_protein_seqjoin[for_protein_seqjoin.find("*$")+3:] ## - Finally, the protein sequence is joined together by removing ', ' ## - proteinSeqFinal is the protein sequence of each protein entry proteinSeqFinal = proteinSeqOnly.replace(', ', '') ## - Now, we put protein name and protein sequence together into a list for each entry ## - First, the protein names are separated into genomic name, common name, and ID name lst_FASTAentry = FASTAproteinNames.split(' ') ## - Next, the protein sequence is appended to end of the names. lst_FASTAentry.append (proteinSeqFinal) ## - Finally, the list for the entry is appended to the growing list of protein entries from the FASTA. lst_masterFASTA.append (lst_FASTAentry) ## - At this point, lst_masterFASTA contains artifacts from the reformatting above. ## - Namely, ">" and "*" remains on the first of first and last of last indices respectively. ## - These artifacts are removed in the following 2 lines. (lst_masterFASTA[0])[0] = (lst_masterFASTA[0])[0].replace('>', '') (lst_masterFASTA[-1])[-1] = (lst_masterFASTA[-1])[-1].replace('*', '') # print(lst_masterFASTA) ## - This list of sublists of protein entries will be used in Part 2. ## - This list will be searched for the user-input SoI to output the name of proteins containing the SoI ## - #### Part 2 ## - Part 2 will search the lst_masterFASTA from Part 1 for the user-input SoI ## - Regular Expression (re) is imported and will be used for the search. import re ## - the user input SoI will be .upper, in case user input in lowercase upper_SoI = user_input_SoI.upper() ## - Then, if the user used x to denote "any", it will be replaced by . regex_SoI = upper_SoI.replace("X", ".") ## - the reformated user-input is then placed into RegEx pattern match syntax pattern = '^' + regex_SoI + "$" ## - The protein sequence in each protein entry in lst_masterFASTA will be iterated thru searching for SoI for protein_entry in lst_masterFASTA: ## protein_entry looks like: #protein_entry = ['YAL001C', 'TFC3', 'SGDID:S000000001', 'MVLTIYPDELVQIVSDKI..." protein_seq = protein_entry[3] ## - The following cuts each protein sequence into blocks with the length of the input SoI for i in range(len(protein_seq)-(len(user_input_SoI)-1)): protein_sequence_block =protein_seq[i:(i + len(user_input_SoI))] # The following commented code checks to see if I cut the blocks correctly # print(protein_seq[i:(i+len(pattern)-2)]) result = re.match(pattern, protein_sequence_block) ## - If a protein sequence block matches the user-input sequence, it will print: if result: print(upper_SoI + " found in:") print(protein_entry[1]) # the protein name and ... print("as " + protein_seq[i:(i + len(pattern) - 2)] + ' which starts at position: ' + str(i+1) + "\n") # The following appends to a list that will be used to populate the output .txt file lst_results.append([protein_entry[1], protein_seq[i:(i + len(pattern) - 2)], str(i+1)]) ## - the actual sequence it matched, and which amino acid position the match started in the protein sequence else: continue ## - lst_results is then written into temp_results.csv with csv module import csv with open('temp_results.csv', 'w', newline='') as temp_results_file: writer = csv.writer(temp_results_file) writer.writerows(lst_results) ## - #### The .py code is over for the purposes of the CIBR final project ## - #### Next, temp_results.csv will be copied and renamed to the date-time.csv ## - #### Finally, this renamed csv will be rsync'ed ## - #### (currently set to a temp_dir in the current dir, but can be changed to a local dir).
47.341667
114
0.701637
user_input_SoI = "mxxt" lst_rawFASTA = [] proteinSeqOnly = [] lst_FASTAentry = [] lst_masterFASTA = [] lst_results = [['Protein Name', 'Sequence Hit', 'Position of Hit']] with open('dummy1.FASTA') as FASTA_file: FASTA_data = FASTA_file.read() lst_rawFASTA = FASTA_data.split('*\n>') for protein_entry in lst_rawFASTA: FASTAproteinNames = protein_entry[0: protein_entry.find(',')] FASTAprotein_comma = protein_entry.replace("\n", ", ") for_protein_seqjoin = FASTAprotein_comma.replace('\",', '*$') ## - Then, only the string with sequence info is saved into proteinSeqOnly ## - This is done by indexing from where ("*$" is located + 3) till the end of the string proteinSeqOnly = for_protein_seqjoin[for_protein_seqjoin.find("*$")+3:] ## - Finally, the protein sequence is joined together by removing ', ' ## - proteinSeqFinal is the protein sequence of each protein entry proteinSeqFinal = proteinSeqOnly.replace(', ', '') ## - Now, we put protein name and protein sequence together into a list for each entry ## - First, the protein names are separated into genomic name, common name, and ID name lst_FASTAentry = FASTAproteinNames.split(' ') ## - Next, the protein sequence is appended to end of the names. lst_FASTAentry.append (proteinSeqFinal) ## - Finally, the list for the entry is appended to the growing list of protein entries from the FASTA. lst_masterFASTA.append (lst_FASTAentry) ## - At this point, lst_masterFASTA contains artifacts from the reformatting above. ## - Namely, ">" and "*" remains on the first of first and last of last indices respectively. ## - These artifacts are removed in the following 2 lines. (lst_masterFASTA[0])[0] = (lst_masterFASTA[0])[0].replace('>', '') (lst_masterFASTA[-1])[-1] = (lst_masterFASTA[-1])[-1].replace('*', '') # print(lst_masterFASTA) ## - This list of sublists of protein entries will be used in Part 2. ## - This list will be searched for the user-input SoI to output the name of proteins containing the SoI ## - #### Part 2 ## - Part 2 will search the lst_masterFASTA from Part 1 for the user-input SoI ## - Regular Expression (re) is imported and will be used for the search. import re ## - the user input SoI will be .upper, in case user input in lowercase upper_SoI = user_input_SoI.upper() ## - Then, if the user used x to denote "any", it will be replaced by . regex_SoI = upper_SoI.replace("X", ".") ## - the reformated user-input is then placed into RegEx pattern match syntax pattern = '^' + regex_SoI + "$" ## - The protein sequence in each protein entry in lst_masterFASTA will be iterated thru searching for SoI for protein_entry in lst_masterFASTA: ## protein_entry looks like: #protein_entry = ['YAL001C', 'TFC3', 'SGDID:S000000001', 'MVLTIYPDELVQIVSDKI..." protein_seq = protein_entry[3] ## - The following cuts each protein sequence into blocks with the length of the input SoI for i in range(len(protein_seq)-(len(user_input_SoI)-1)): protein_sequence_block =protein_seq[i:(i + len(user_input_SoI))] # The following commented code checks to see if I cut the blocks correctly # print(protein_seq[i:(i+len(pattern)-2)]) result = re.match(pattern, protein_sequence_block) ## - If a protein sequence block matches the user-input sequence, it will print: if result: print(upper_SoI + " found in:") print(protein_entry[1]) # the protein name and ... print("as " + protein_seq[i:(i + len(pattern) - 2)] + ' which starts at position: ' + str(i+1) + "\n") # The following appends to a list that will be used to populate the output .txt file lst_results.append([protein_entry[1], protein_seq[i:(i + len(pattern) - 2)], str(i+1)]) ## - the actual sequence it matched, and which amino acid position the match started in the protein sequence else: continue ## - lst_results is then written into temp_results.csv with csv module import csv with open('temp_results.csv', 'w', newline='') as temp_results_file: writer = csv.writer(temp_results_file) writer.writerows(lst_results) ## - #### The .py code is over for the purposes of the CIBR final project ## - #### Next, temp_results.csv will be copied and renamed to the date-time.csv ## - #### Finally, this renamed csv will be rsync'ed
true
true
f70df2368816798a2248ee008d35ad3b0736fda8
4,715
py
Python
src/tensorforce/tensorforce/core/optimizers/optimizer.py
linus87/drl_shape_optimization
39e6b66bd5b70dfce07e145aafe815071bc1b6fe
[ "MIT" ]
17
2020-12-28T16:25:47.000Z
2022-03-27T18:28:44.000Z
src/tensorforce/tensorforce/core/optimizers/optimizer.py
linus87/drl_shape_optimization
39e6b66bd5b70dfce07e145aafe815071bc1b6fe
[ "MIT" ]
2
2021-04-18T03:40:02.000Z
2022-01-24T08:40:10.000Z
src/tensorforce/tensorforce/core/optimizers/optimizer.py
linus87/drl_shape_optimization
39e6b66bd5b70dfce07e145aafe815071bc1b6fe
[ "MIT" ]
8
2020-12-23T05:59:52.000Z
2022-03-28T12:06:35.000Z
# Copyright 2018 Tensorforce Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf from tensorforce import TensorforceError, util from tensorforce.core import Module class Optimizer(Module): """ Base class for optimizers which minimize a not yet further specified expression, usually some kind of loss function. More generally, an optimizer can be considered as some method of updating a set of variables. """ def __init__(self, name, summary_labels=None): super().__init__(name=name, l2_regularization=0.0, summary_labels=summary_labels) def tf_step(self, variables, **kwargs): """ Creates the TensorFlow operations for performing an optimization step on the given variables, including actually changing the values of the variables. Args: variables: List of variables to optimize. **kwargs: Additional arguments depending on the specific optimizer implementation. For instance, often includes `fn_loss` if a loss function is optimized. Returns: List of delta tensors corresponding to the updates for each optimized variable. """ raise NotImplementedError def tf_apply_step(self, variables, deltas): """ Applies the given (and already calculated) step deltas to the variable values. Args: variables: List of variables. deltas: List of deltas of same length. Returns: The step-applied operation. A tf.group of tf.assign_add ops. """ if len(variables) != len(deltas): raise TensorforceError("Invalid variables and deltas lists.") assignments = list() for variable, delta in zip(variables, deltas): assignments.append(tf.assign_add(ref=variable, value=delta)) with tf.control_dependencies(control_inputs=assignments): return util.no_operation() def tf_minimize(self, variables, **kwargs): """ Performs an optimization step. Args: variables: List of variables to optimize. **kwargs: Additional optimizer-specific arguments. The following arguments are used by some optimizers: - arguments: Dict of arguments for callables, like fn_loss. - fn_loss: A callable returning the loss of the current model. - fn_reference: A callable returning the reference values, in case of a comparative loss. - fn_kl_divergence: A callable returning the KL-divergence relative to the current model. - sampled_loss: A sampled loss (integer). - return_estimated_improvement: Returns the estimated improvement resulting from the natural gradient calculation if true. - source_variables: List of source variables to synchronize with. - global_variables: List of global variables to apply the proposed optimization step to. Returns: The optimization operation. """ deltas = self.step(variables=variables, **kwargs) for n in range(len(variables)): name = variables[n].name if name[-2:] != ':0': raise TensorforceError.unexpected() deltas[n] = self.add_summary( label=('updates', 'updates-full'), name=(name[:-2] + '-update'), tensor=deltas[n], mean_variance=True ) deltas[n] = self.add_summary( label='updates-full', name=(name[:-2] + '-update'), tensor=deltas[n] ) with tf.control_dependencies(control_inputs=deltas): return util.no_operation() def add_variable(self, name, dtype, shape, is_trainable=False, initializer='zeros'): if is_trainable: raise TensorforceError("Invalid trainable variable.") return super().add_variable( name=name, dtype=dtype, shape=shape, is_trainable=is_trainable, initializer=initializer )
40.299145
99
0.639449
import tensorflow as tf from tensorforce import TensorforceError, util from tensorforce.core import Module class Optimizer(Module): def __init__(self, name, summary_labels=None): super().__init__(name=name, l2_regularization=0.0, summary_labels=summary_labels) def tf_step(self, variables, **kwargs): raise NotImplementedError def tf_apply_step(self, variables, deltas): if len(variables) != len(deltas): raise TensorforceError("Invalid variables and deltas lists.") assignments = list() for variable, delta in zip(variables, deltas): assignments.append(tf.assign_add(ref=variable, value=delta)) with tf.control_dependencies(control_inputs=assignments): return util.no_operation() def tf_minimize(self, variables, **kwargs): deltas = self.step(variables=variables, **kwargs) for n in range(len(variables)): name = variables[n].name if name[-2:] != ':0': raise TensorforceError.unexpected() deltas[n] = self.add_summary( label=('updates', 'updates-full'), name=(name[:-2] + '-update'), tensor=deltas[n], mean_variance=True ) deltas[n] = self.add_summary( label='updates-full', name=(name[:-2] + '-update'), tensor=deltas[n] ) with tf.control_dependencies(control_inputs=deltas): return util.no_operation() def add_variable(self, name, dtype, shape, is_trainable=False, initializer='zeros'): if is_trainable: raise TensorforceError("Invalid trainable variable.") return super().add_variable( name=name, dtype=dtype, shape=shape, is_trainable=is_trainable, initializer=initializer )
true
true
f70df3e61f4e512407213856daa0ae3a575ea648
415
py
Python
sjfxjc/foundations-for-analytics-with-python-master/csv/pandas_value_meets_condition.py
SaronZhou/python
40d73b49b9b17542c73a3c09d28e479d2fefcde3
[ "MIT" ]
null
null
null
sjfxjc/foundations-for-analytics-with-python-master/csv/pandas_value_meets_condition.py
SaronZhou/python
40d73b49b9b17542c73a3c09d28e479d2fefcde3
[ "MIT" ]
null
null
null
sjfxjc/foundations-for-analytics-with-python-master/csv/pandas_value_meets_condition.py
SaronZhou/python
40d73b49b9b17542c73a3c09d28e479d2fefcde3
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import pandas as pd import sys input_file = sys.argv[1] output_file = sys.argv[2] data_frame = pd.read_csv(input_file) data_frame['Cost'] = data_frame['Cost'].str.strip('$').astype(float) data_frame_value_meets_condition = data_frame.loc[(data_frame['Supplier Name']\ .str.contains('Z')) | (data_frame['Cost'] > 600.0), :] data_frame_value_meets_condition.to_csv(output_file, index=False)
29.642857
79
0.749398
import pandas as pd import sys input_file = sys.argv[1] output_file = sys.argv[2] data_frame = pd.read_csv(input_file) data_frame['Cost'] = data_frame['Cost'].str.strip('$').astype(float) data_frame_value_meets_condition = data_frame.loc[(data_frame['Supplier Name']\ .str.contains('Z')) | (data_frame['Cost'] > 600.0), :] data_frame_value_meets_condition.to_csv(output_file, index=False)
true
true
f70df46d75e27c5da8419f866a2137f10d639440
1,708
py
Python
superglue_data_splitting.py
xuqiantong/pet
2287b6e1cea7f3cc1c8a3ba3f07c470a708f10f3
[ "Apache-2.0" ]
null
null
null
superglue_data_splitting.py
xuqiantong/pet
2287b6e1cea7f3cc1c8a3ba3f07c470a708f10f3
[ "Apache-2.0" ]
null
null
null
superglue_data_splitting.py
xuqiantong/pet
2287b6e1cea7f3cc1c8a3ba3f07c470a708f10f3
[ "Apache-2.0" ]
null
null
null
import os import random import shutil # TASK_DEV_SIZES = {"boolq": 500, "cb": 50, "copa": 50, "multirc": 50, "record": 7500, "rte": 250, "wic": 100, "wsc": 50} TASK_DEV_SIZES = {"BoolQ": 500, "CB": 50, "COPA": 50, "MultiRC": 50, "ReCoRD": 7500, "RTE": 250, "WiC": 100, "WSC": 50} def file_len(fname): count = 0 with open(fname) as file: for line in file: if not line: break else: count += 1 return count if __name__ == "__main__": for task_name, size in TASK_DEV_SIZES.items(): try: os.makedirs(os.path.join("split_data", task_name.lower())) except FileExistsError: pass train_file_path = os.path.join("data", task_name, "train.jsonl") test_file_path = os.path.join("data", task_name, "val.jsonl") new_train_file_path = os.path.join("split_data", task_name.lower(), "train.jsonl") dev_file_path = os.path.join("split_data", task_name.lower(), "val.jsonl") new_test_file_path = os.path.join("split_data", task_name.lower(), "test.jsonl") total_lines = file_len(train_file_path) print(f"{task_name}: {size} out of {total_lines}") indexes = list(range(total_lines)) dev_indices = random.sample(indexes, size) with open(train_file_path, encoding="utf8") as f, open(new_train_file_path, 'w', encoding="utf8") as g, open( dev_file_path, 'w', encoding="utf8") as h: for i, line in enumerate(f): if i in dev_indices: h.write(line) else: g.write(line) shutil.copy(test_file_path, new_test_file_path)
39.72093
121
0.584309
import os import random import shutil TASK_DEV_SIZES = {"BoolQ": 500, "CB": 50, "COPA": 50, "MultiRC": 50, "ReCoRD": 7500, "RTE": 250, "WiC": 100, "WSC": 50} def file_len(fname): count = 0 with open(fname) as file: for line in file: if not line: break else: count += 1 return count if __name__ == "__main__": for task_name, size in TASK_DEV_SIZES.items(): try: os.makedirs(os.path.join("split_data", task_name.lower())) except FileExistsError: pass train_file_path = os.path.join("data", task_name, "train.jsonl") test_file_path = os.path.join("data", task_name, "val.jsonl") new_train_file_path = os.path.join("split_data", task_name.lower(), "train.jsonl") dev_file_path = os.path.join("split_data", task_name.lower(), "val.jsonl") new_test_file_path = os.path.join("split_data", task_name.lower(), "test.jsonl") total_lines = file_len(train_file_path) print(f"{task_name}: {size} out of {total_lines}") indexes = list(range(total_lines)) dev_indices = random.sample(indexes, size) with open(train_file_path, encoding="utf8") as f, open(new_train_file_path, 'w', encoding="utf8") as g, open( dev_file_path, 'w', encoding="utf8") as h: for i, line in enumerate(f): if i in dev_indices: h.write(line) else: g.write(line) shutil.copy(test_file_path, new_test_file_path)
true
true
f70df4d4be9ecd46e198b7ef771b1a26644c0a2a
9,417
py
Python
src/ui/main_window.py
xiqiuyimeng/Dubbo-test-client
4c7b76767fc5163d086fc5a4a7fc842704e10978
[ "Apache-2.0" ]
null
null
null
src/ui/main_window.py
xiqiuyimeng/Dubbo-test-client
4c7b76767fc5163d086fc5a4a7fc842704e10978
[ "Apache-2.0" ]
null
null
null
src/ui/main_window.py
xiqiuyimeng/Dubbo-test-client
4c7b76767fc5163d086fc5a4a7fc842704e10978
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from PyQt5 import QtWidgets, QtCore from PyQt5.QtCore import Qt from PyQt5.QtGui import QCursor from PyQt5.QtWidgets import QVBoxLayout, QToolBar, QSplitter from src.constant.conn_dialog_constant import ADD_CONN_MENU, EDIT_CONN_MENU from src.constant.main_constant import LOCATION_BUTTON, TAB_ID_SEPARATOR, TREE_TOP_TEXT from src.function.db.conn_sqlite import Connection from src.ui.async_func.async_reopen_item import AsyncReopenManager from src.ui.button.label_button import LabelButton from src.ui.dialog.conn.conn_dialog import ConnDialog from src.ui.func.common import keep_center, close_sqlite from src.ui.func.menu_bar import fill_menu_bar from src.ui.func.tool_bar import fill_tool_bar from src.ui.func.tree import tree_node_factory, Context from src.ui.tab.tab_bar import MyTabBar from src.ui.tab.tab_widget import MyTabWidget from src.ui.tree.my_tree import MyTreeWidget _author_ = 'luwt' _date_ = '2021/10/31 17:39' class MainWindow(QtWidgets.QMainWindow): def __init__(self, screen_rect): super().__init__() # 当前屏幕的分辨率大小 self.desktop_screen_rect = screen_rect # 创建主控件,用以包含所有内容 self.main_widget = QtWidgets.QWidget() # 主控件中的布局 self.main_layout = QVBoxLayout() # 主部件 self.central_widget = QtWidgets.QWidget() self.central_widget.setObjectName("central_widget") # 主部件布局为水平布局 self.horizontalLayout = QtWidgets.QHBoxLayout(self.central_widget) self.horizontalLayout.setObjectName("horizontalLayout") self.main_splitter = QSplitter() self.main_splitter.setOrientation(Qt.Horizontal) self.main_splitter.setObjectName("main_splitter") self.horizontalLayout.addWidget(self.main_splitter) self.horizontalLayout.setSpacing(0) # 左边树结构frame self.tree_frame = QtWidgets.QFrame(self.main_splitter) self.tree_frame.setObjectName("tree_frame") self.tree_layout = QtWidgets.QVBoxLayout(self.tree_frame) self.tree_layout.setObjectName("tree_layout") self.tree_layout.setSpacing(0) self.tree_layout.setContentsMargins(0, 0, 0, 0) # 树顶部的工具栏 self.tree_header_widget = QtWidgets.QWidget(self.tree_frame) self.tree_layout.addWidget(self.tree_header_widget) self.tree_header_layout = QtWidgets.QGridLayout(self.tree_header_widget) # 左、右、顶部边距设为0 self.tree_header_layout.setContentsMargins(0, 0, 0, self.tree_header_layout.contentsMargins().bottom()) # 树标题 self.tree_tool_header = QtWidgets.QLabel(self.tree_header_widget) self.tree_tool_header.setText(TREE_TOP_TEXT) self.tree_header_layout.addWidget(self.tree_tool_header, 0, 0, 1, 8) # 定位按钮 self.tree_tool_location_button = LabelButton(self.tree_header_widget) self.tree_tool_location_button.setObjectName("tree_tool_location_button") # 默认不可用 self.tree_tool_location_button.setEnabled(False) self.tree_header_layout.addWidget(self.tree_tool_location_button, 0, 8, 1, 1) # 左边树结构 self.tree_widget = MyTreeWidget(self.tree_frame, self) self.tree_widget.setObjectName("tree_widget") self.tree_widget.headerItem().setHidden(True) self.tree_layout.addWidget(self.tree_widget) # 右边tab区frame self.tab_frame = QtWidgets.QFrame(self.main_splitter) self.tab_frame.setObjectName("tab_frame") self.tab_layout = QtWidgets.QVBoxLayout(self.tab_frame) self.tab_layout.setObjectName("tab_layout") # 右边tab区 self.tab_widget = MyTabWidget(self.tab_frame, main_window=self) self.tab_widget.setObjectName("tab_widget") self.tab_bar = MyTabBar(self.tab_widget) self.tab_bar.setObjectName("tab_bar") self.tab_widget.setTabBar(self.tab_bar) self.tab_layout.addWidget(self.tab_widget) self.tab_layout.setSpacing(0) self.tab_layout.setContentsMargins(0, 0, 0, 0) # 菜单栏 self.menubar = QtWidgets.QMenuBar(self) self.menubar.setObjectName("menubar") self.setMenuBar(self.menubar) # 工具栏 self.toolBar = QToolBar(self) self.toolBar.setObjectName("toolBar") self.addToolBar(Qt.TopToolBarArea, self.toolBar) # 状态栏 self.statusbar = QtWidgets.QStatusBar(self) self.statusbar.setObjectName("statusbar") self.setStatusBar(self.statusbar) self._translate = QtCore.QCoreApplication.translate self.reopen_manager = ... self.setup_ui() self.translate_ui() self.bind_action() def setup_ui(self): self.setObjectName("MainWindow") self.setWindowFlags(Qt.WindowTitleHint) # 按当前分辨率计算窗口大小 self.resize(self.desktop_screen_rect.width() * 0.65, self.desktop_screen_rect.height() * 0.7) # 窗体居中 keep_center(self, self.desktop_screen_rect) # 设置所有控件间距为0 self.main_layout.setSpacing(0) self.main_layout.setContentsMargins(0, 0, 0, 0) self.main_widget.setLayout(self.main_layout) self.setCentralWidget(self.main_widget) self.main_splitter.setStretchFactor(0, 2) self.main_splitter.setStretchFactor(1, 9) # 填充菜单栏 fill_menu_bar(self) # 填充工具栏 fill_tool_bar(self) # 设置名称显示在图标下面(默认本来是只显示图标) self.toolBar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon) self.main_layout.addWidget(self.central_widget) def bind_action(self): # 异步重新打开上次退出时的工作状态 self.reopen_manager = AsyncReopenManager(self, self.tree_widget, self.tab_widget) self.reopen_manager.start() # 双击树节点事件 self.tree_widget.doubleClicked.connect(self.get_tree_list) # 右击事件 self.tree_widget.setContextMenuPolicy(Qt.CustomContextMenu) self.tree_widget.customContextMenuRequested.connect(self.right_click_menu) # tab页清除或打开tab信号 self.tab_widget.opened_tab_signal.connect(lambda: self.tree_tool_location_button.setEnabled(True)) self.tab_widget.clear_tabs_signal.connect(lambda: self.tree_tool_location_button.setEnabled(False)) # 定位 self.tree_tool_location_button.clicked.connect(self.location_method) def connect_rest_signal(self): # 点击、展开、收起节点,都需要让列根据内容自适应,从而可以保证水平滚动条 self.tree_widget.doubleClicked.connect(self.handle_expanded_changed) self.tree_widget.expanded.connect(self.handle_expanded_changed) self.tree_widget.collapsed.connect(self.handle_expanded_changed) def translate_ui(self): self.setWindowTitle(self._translate("MainWindow", "Dubbo-test-client")) self.tree_tool_location_button.setText(LOCATION_BUTTON) def add_conn(self): """打开添加连接窗口""" conn_info = Connection(*((None,) * len(Connection._fields))) conn_dialog = ConnDialog(conn_info, ADD_CONN_MENU, self.geometry(), self.tree_widget) conn_dialog.exec() def edit_conn(self, conn_info, tree_item): """打开编辑连接窗口""" conn_dialog = ConnDialog(conn_info, EDIT_CONN_MENU, self.geometry(), tree_item=tree_item) conn_dialog.exec() def get_tree_list(self): """获取树的子节点,双击触发,连接 -> service -> method,按顺序读取出来""" item = self.tree_widget.currentItem() node = tree_node_factory(item) Context(node).open_item(item, self) def handle_expanded_changed(self, index): # 根据当前内容决定列宽度 self.tree_widget.tree_column_resize() item = self.tree_widget.itemFromIndex(index) # method节点没有expanded属性,没有必要进行处理,监听conn和service节点就可以 if item.parent() is None or item.parent().parent() is None: expanded = self.tree_widget.itemFromIndex(index).isExpanded() self.tree_widget.update_expanded(item.text(1), expanded, item) def right_click_menu(self, pos): """ 右键菜单功能,实现右键弹出菜单功能 :param pos:右键的坐标位置 """ # 获取当前元素,只有在元素上才显示菜单 item = self.tree_widget.itemAt(pos) if item: # 生成右键菜单 menu = QtWidgets.QMenu() node = tree_node_factory(item) menu_names = Context(node).get_menu_names(item, self) [menu.addAction(QtWidgets.QAction(option, menu)) for option in menu_names] # 右键菜单点击事件 menu.triggered.connect(lambda action: Context(node).handle_menu_func(item, action.text(), self)) # 右键菜单弹出位置跟随焦点位置 menu.exec_(QCursor.pos()) def del_history(self): item = self.tree_widget.currentItem() node = tree_node_factory(item) Context(node).del_history(item, self) def location_method(self): tab_id = self.tab_widget.currentWidget().property("tab_id") # 根据tab_id构造特点,逐级确定位置,根据分隔符拆分,拆分完,应该是连接id,接口名,方法名 self.tree_widget.tab_id_splits = tab_id.split(TAB_ID_SEPARATOR) conn_id = self.tree_widget.tab_id_splits[0] # 遍历根节点 for conn_idx in range(self.tree_widget.topLevelItemCount()): conn_item = self.tree_widget.topLevelItem(conn_idx) # 找到对应的连接节点,遍历 if int(conn_id) == eval(conn_item.text(2)).get("id"): method_item = self.tree_widget.recursive_search_item(conn_item) self.tree_widget.set_selected_focus(method_item) def close(self): close_sqlite() self.tab_widget.close() super().close()
42.040179
111
0.693533
from PyQt5 import QtWidgets, QtCore from PyQt5.QtCore import Qt from PyQt5.QtGui import QCursor from PyQt5.QtWidgets import QVBoxLayout, QToolBar, QSplitter from src.constant.conn_dialog_constant import ADD_CONN_MENU, EDIT_CONN_MENU from src.constant.main_constant import LOCATION_BUTTON, TAB_ID_SEPARATOR, TREE_TOP_TEXT from src.function.db.conn_sqlite import Connection from src.ui.async_func.async_reopen_item import AsyncReopenManager from src.ui.button.label_button import LabelButton from src.ui.dialog.conn.conn_dialog import ConnDialog from src.ui.func.common import keep_center, close_sqlite from src.ui.func.menu_bar import fill_menu_bar from src.ui.func.tool_bar import fill_tool_bar from src.ui.func.tree import tree_node_factory, Context from src.ui.tab.tab_bar import MyTabBar from src.ui.tab.tab_widget import MyTabWidget from src.ui.tree.my_tree import MyTreeWidget _author_ = 'luwt' _date_ = '2021/10/31 17:39' class MainWindow(QtWidgets.QMainWindow): def __init__(self, screen_rect): super().__init__() self.desktop_screen_rect = screen_rect self.main_widget = QtWidgets.QWidget() self.main_layout = QVBoxLayout() self.central_widget = QtWidgets.QWidget() self.central_widget.setObjectName("central_widget") self.horizontalLayout = QtWidgets.QHBoxLayout(self.central_widget) self.horizontalLayout.setObjectName("horizontalLayout") self.main_splitter = QSplitter() self.main_splitter.setOrientation(Qt.Horizontal) self.main_splitter.setObjectName("main_splitter") self.horizontalLayout.addWidget(self.main_splitter) self.horizontalLayout.setSpacing(0) self.tree_frame = QtWidgets.QFrame(self.main_splitter) self.tree_frame.setObjectName("tree_frame") self.tree_layout = QtWidgets.QVBoxLayout(self.tree_frame) self.tree_layout.setObjectName("tree_layout") self.tree_layout.setSpacing(0) self.tree_layout.setContentsMargins(0, 0, 0, 0) self.tree_header_widget = QtWidgets.QWidget(self.tree_frame) self.tree_layout.addWidget(self.tree_header_widget) self.tree_header_layout = QtWidgets.QGridLayout(self.tree_header_widget) self.tree_header_layout.setContentsMargins(0, 0, 0, self.tree_header_layout.contentsMargins().bottom()) self.tree_tool_header = QtWidgets.QLabel(self.tree_header_widget) self.tree_tool_header.setText(TREE_TOP_TEXT) self.tree_header_layout.addWidget(self.tree_tool_header, 0, 0, 1, 8) self.tree_tool_location_button = LabelButton(self.tree_header_widget) self.tree_tool_location_button.setObjectName("tree_tool_location_button") self.tree_tool_location_button.setEnabled(False) self.tree_header_layout.addWidget(self.tree_tool_location_button, 0, 8, 1, 1) self.tree_widget = MyTreeWidget(self.tree_frame, self) self.tree_widget.setObjectName("tree_widget") self.tree_widget.headerItem().setHidden(True) self.tree_layout.addWidget(self.tree_widget) self.tab_frame = QtWidgets.QFrame(self.main_splitter) self.tab_frame.setObjectName("tab_frame") self.tab_layout = QtWidgets.QVBoxLayout(self.tab_frame) self.tab_layout.setObjectName("tab_layout") self.tab_widget = MyTabWidget(self.tab_frame, main_window=self) self.tab_widget.setObjectName("tab_widget") self.tab_bar = MyTabBar(self.tab_widget) self.tab_bar.setObjectName("tab_bar") self.tab_widget.setTabBar(self.tab_bar) self.tab_layout.addWidget(self.tab_widget) self.tab_layout.setSpacing(0) self.tab_layout.setContentsMargins(0, 0, 0, 0) self.menubar = QtWidgets.QMenuBar(self) self.menubar.setObjectName("menubar") self.setMenuBar(self.menubar) self.toolBar = QToolBar(self) self.toolBar.setObjectName("toolBar") self.addToolBar(Qt.TopToolBarArea, self.toolBar) self.statusbar = QtWidgets.QStatusBar(self) self.statusbar.setObjectName("statusbar") self.setStatusBar(self.statusbar) self._translate = QtCore.QCoreApplication.translate self.reopen_manager = ... self.setup_ui() self.translate_ui() self.bind_action() def setup_ui(self): self.setObjectName("MainWindow") self.setWindowFlags(Qt.WindowTitleHint) self.resize(self.desktop_screen_rect.width() * 0.65, self.desktop_screen_rect.height() * 0.7) keep_center(self, self.desktop_screen_rect) self.main_layout.setSpacing(0) self.main_layout.setContentsMargins(0, 0, 0, 0) self.main_widget.setLayout(self.main_layout) self.setCentralWidget(self.main_widget) self.main_splitter.setStretchFactor(0, 2) self.main_splitter.setStretchFactor(1, 9) fill_menu_bar(self) fill_tool_bar(self) self.toolBar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon) self.main_layout.addWidget(self.central_widget) def bind_action(self): self.reopen_manager = AsyncReopenManager(self, self.tree_widget, self.tab_widget) self.reopen_manager.start() self.tree_widget.doubleClicked.connect(self.get_tree_list) self.tree_widget.setContextMenuPolicy(Qt.CustomContextMenu) self.tree_widget.customContextMenuRequested.connect(self.right_click_menu) self.tab_widget.opened_tab_signal.connect(lambda: self.tree_tool_location_button.setEnabled(True)) self.tab_widget.clear_tabs_signal.connect(lambda: self.tree_tool_location_button.setEnabled(False)) self.tree_tool_location_button.clicked.connect(self.location_method) def connect_rest_signal(self): self.tree_widget.doubleClicked.connect(self.handle_expanded_changed) self.tree_widget.expanded.connect(self.handle_expanded_changed) self.tree_widget.collapsed.connect(self.handle_expanded_changed) def translate_ui(self): self.setWindowTitle(self._translate("MainWindow", "Dubbo-test-client")) self.tree_tool_location_button.setText(LOCATION_BUTTON) def add_conn(self): conn_info = Connection(*((None,) * len(Connection._fields))) conn_dialog = ConnDialog(conn_info, ADD_CONN_MENU, self.geometry(), self.tree_widget) conn_dialog.exec() def edit_conn(self, conn_info, tree_item): conn_dialog = ConnDialog(conn_info, EDIT_CONN_MENU, self.geometry(), tree_item=tree_item) conn_dialog.exec() def get_tree_list(self): item = self.tree_widget.currentItem() node = tree_node_factory(item) Context(node).open_item(item, self) def handle_expanded_changed(self, index): self.tree_widget.tree_column_resize() item = self.tree_widget.itemFromIndex(index) if item.parent() is None or item.parent().parent() is None: expanded = self.tree_widget.itemFromIndex(index).isExpanded() self.tree_widget.update_expanded(item.text(1), expanded, item) def right_click_menu(self, pos): item = self.tree_widget.itemAt(pos) if item: menu = QtWidgets.QMenu() node = tree_node_factory(item) menu_names = Context(node).get_menu_names(item, self) [menu.addAction(QtWidgets.QAction(option, menu)) for option in menu_names] menu.triggered.connect(lambda action: Context(node).handle_menu_func(item, action.text(), self)) menu.exec_(QCursor.pos()) def del_history(self): item = self.tree_widget.currentItem() node = tree_node_factory(item) Context(node).del_history(item, self) def location_method(self): tab_id = self.tab_widget.currentWidget().property("tab_id") self.tree_widget.tab_id_splits = tab_id.split(TAB_ID_SEPARATOR) conn_id = self.tree_widget.tab_id_splits[0] for conn_idx in range(self.tree_widget.topLevelItemCount()): conn_item = self.tree_widget.topLevelItem(conn_idx) if int(conn_id) == eval(conn_item.text(2)).get("id"): method_item = self.tree_widget.recursive_search_item(conn_item) self.tree_widget.set_selected_focus(method_item) def close(self): close_sqlite() self.tab_widget.close() super().close()
true
true
f70df560d3b7759360d9cd45c31fd4d6debd635b
2,441
py
Python
simshop/builders/IcarusVerilog.py
inneralien/SimShop
95d0d4fa66a4d20cc2552fa1444915f36ad73926
[ "BSD-2-Clause-FreeBSD" ]
1
2020-07-27T05:05:21.000Z
2020-07-27T05:05:21.000Z
simshop/builders/IcarusVerilog.py
inneralien/SimShop
95d0d4fa66a4d20cc2552fa1444915f36ad73926
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
simshop/builders/IcarusVerilog.py
inneralien/SimShop
95d0d4fa66a4d20cc2552fa1444915f36ad73926
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
# Copyright 2010-2011, RTLCores. All rights reserved. # http://rtlcores.com # See LICENSE.txt from VerilogSim import VerilogSim class IcarusVerilog(VerilogSim): """ Icarus Verilog class to build a compile command and a simulation command. Inherits VerilogSim Defaults: TIMESCALE : 1ns / 10ps OUTFILE : sim DUMPFILE : dump.vcd WARN : all """ def __init__(self, cfg): """ """ VerilogSim.__init__(self, cfg) self.cfg = cfg ## Default flags specific to Icarus Verilog ## and any required list comprehension commands # self['compcmd'] = ['iverilog'] # self['simcmd'] = ['vvp'] # -n = non-interactive mode self['builddir'] = ['run'] self['warn'] = ['all'] self['warn'].cmd = lambda x: self._prepend('-W', x) self['outfile'] = [self.cfg.outfile] self['outfile'].cmd = lambda x: self._prepend('-o', x) ## Run the populate method to do the cfg conversion ## Populate will overwrite any flags defined locally if they exist ## in the config file self.populate() # The test_files need to come before the rtl_files on the command # line or it exposes a strange thing where Icarus will detect an # rtl file that doesn't exist, but not return a non-zero exit code. # This causes the sim executable to be generated and run but there # is nothing to run, so it looks like it completes successfully. def buildCompCmd(self): self.comp_cmd = self['compcmd'] + \ self['warn'].conv() + \ self['outfile'].conv() + \ self['defines'].conv() + \ self['rtl_inc_dirs'].conv() + \ self['test_inc_dirs'].conv() + \ self['test_files'].conv() + \ self['rtl_files'].conv() + \ [self.cfg.auto_test] # [self.cfg['auto_test']] self.cmds.append(self.comp_cmd) def buildSimCmd(self): log = '-l' + self.cfg.build_path + '/' + self.cfg['logfile'] self.sim_cmd = self['simcmd'] + \ ['-n'] + \ [log] + \ self['outfile'] + \ self['plusargs'].conv() self.cmds.append(self.sim_cmd)
36.432836
75
0.526424
from VerilogSim import VerilogSim class IcarusVerilog(VerilogSim): def __init__(self, cfg): VerilogSim.__init__(self, cfg) self.cfg = cfg self['builddir'] = ['run'] self['warn'] = ['all'] self['warn'].cmd = lambda x: self._prepend('-W', x) self['outfile'] = [self.cfg.outfile] self['outfile'].cmd = lambda x: self._prepend('-o', x) self.populate() # This causes the sim executable to be generated and run but there # is nothing to run, so it looks like it completes successfully. def buildCompCmd(self): self.comp_cmd = self['compcmd'] + \ self['warn'].conv() + \ self['outfile'].conv() + \ self['defines'].conv() + \ self['rtl_inc_dirs'].conv() + \ self['test_inc_dirs'].conv() + \ self['test_files'].conv() + \ self['rtl_files'].conv() + \ [self.cfg.auto_test] # [self.cfg['auto_test']] self.cmds.append(self.comp_cmd) def buildSimCmd(self): log = '-l' + self.cfg.build_path + '/' + self.cfg['logfile'] self.sim_cmd = self['simcmd'] + \ ['-n'] + \ [log] + \ self['outfile'] + \ self['plusargs'].conv() self.cmds.append(self.sim_cmd)
true
true
f70df5d1b2bf6b206c068257215e8e2edb3034f7
154
py
Python
music_controller/spotify/credentials.py
jinlee487/Music-Controller-Web-App-Tutorial
07216d0bdcb86e0537ffd0ca655dfedcef713fee
[ "Apache-2.0" ]
null
null
null
music_controller/spotify/credentials.py
jinlee487/Music-Controller-Web-App-Tutorial
07216d0bdcb86e0537ffd0ca655dfedcef713fee
[ "Apache-2.0" ]
null
null
null
music_controller/spotify/credentials.py
jinlee487/Music-Controller-Web-App-Tutorial
07216d0bdcb86e0537ffd0ca655dfedcef713fee
[ "Apache-2.0" ]
null
null
null
CLIENT_ID = "a126d5791c694dac84956d88bdeab74f" CLIENT_SECRET = "18f7ba3185ae43df90092e87aedf0b31" REDIRECT_URI = "http://127.0.01:8000/spotify/redirect"
30.8
54
0.824675
CLIENT_ID = "a126d5791c694dac84956d88bdeab74f" CLIENT_SECRET = "18f7ba3185ae43df90092e87aedf0b31" REDIRECT_URI = "http://127.0.01:8000/spotify/redirect"
true
true
f70df74781bebfd64b66722de04dbed6a5678a18
4,168
py
Python
spacy/tests/pipeline/test_spancat.py
yohasebe/spaCy
3dcb747980303457c662d668d5a0735c9efc9b72
[ "BSD-3-Clause", "MIT" ]
null
null
null
spacy/tests/pipeline/test_spancat.py
yohasebe/spaCy
3dcb747980303457c662d668d5a0735c9efc9b72
[ "BSD-3-Clause", "MIT" ]
null
null
null
spacy/tests/pipeline/test_spancat.py
yohasebe/spaCy
3dcb747980303457c662d668d5a0735c9efc9b72
[ "BSD-3-Clause", "MIT" ]
null
null
null
from numpy.testing import assert_equal from spacy.language import Language from spacy.training import Example from spacy.util import fix_random_seed, registry SPAN_KEY = "labeled_spans" TRAIN_DATA = [ ("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}), ( "I like London and Berlin.", {"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC")]}}, ), ] def make_get_examples(nlp): train_examples = [] for t in TRAIN_DATA: eg = Example.from_dict(nlp.make_doc(t[0]), t[1]) train_examples.append(eg) def get_examples(): return train_examples return get_examples def test_simple_train(): fix_random_seed(0) nlp = Language() spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY}) get_examples = make_get_examples(nlp) nlp.initialize(get_examples) sgd = nlp.create_optimizer() assert len(spancat.labels) != 0 for i in range(40): losses = {} nlp.update(list(get_examples()), losses=losses, drop=0.1, sgd=sgd) doc = nlp("I like London and Berlin.") assert doc.spans[spancat.key] == doc.spans[SPAN_KEY] assert len(doc.spans[spancat.key]) == 2 assert doc.spans[spancat.key][0].text == "London" scores = nlp.evaluate(get_examples()) assert f"spans_{SPAN_KEY}_f" in scores assert scores[f"spans_{SPAN_KEY}_f"] == 1.0 def test_ngram_suggester(en_tokenizer): # test different n-gram lengths for size in [1, 2, 3]: ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[size]) docs = [ en_tokenizer(text) for text in [ "a", "a b", "a b c", "a b c d", "a b c d e", "a " * 100, ] ] ngrams = ngram_suggester(docs) # span sizes are correct for s in ngrams.data: assert s[1] - s[0] == size # spans are within docs offset = 0 for i, doc in enumerate(docs): spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]] spans_set = set() for span in spans: assert 0 <= span[0] < len(doc) assert 0 < span[1] <= len(doc) spans_set.add((span[0], span[1])) # spans are unique assert spans.shape[0] == len(spans_set) offset += ngrams.lengths[i] # the number of spans is correct assert_equal(ngrams.lengths, [max(0, len(doc) - (size - 1)) for doc in docs]) # test 1-3-gram suggestions ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1, 2, 3]) docs = [ en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"] ] ngrams = ngram_suggester(docs) assert_equal(ngrams.lengths, [1, 3, 6, 9, 12]) assert_equal( ngrams.data, [ # doc 0 [0, 1], # doc 1 [0, 1], [1, 2], [0, 2], # doc 2 [0, 1], [1, 2], [2, 3], [0, 2], [1, 3], [0, 3], # doc 3 [0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [1, 3], [2, 4], [0, 3], [1, 4], # doc 4 [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [0, 2], [1, 3], [2, 4], [3, 5], [0, 3], [1, 4], [2, 5], ], ) # test some empty docs ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1]) docs = [en_tokenizer(text) for text in ["", "a", ""]] ngrams = ngram_suggester(docs) assert_equal(ngrams.lengths, [len(doc) for doc in docs]) # test all empty docs ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1]) docs = [en_tokenizer(text) for text in ["", "", ""]] ngrams = ngram_suggester(docs) assert_equal(ngrams.lengths, [len(doc) for doc in docs])
28.944444
85
0.497601
from numpy.testing import assert_equal from spacy.language import Language from spacy.training import Example from spacy.util import fix_random_seed, registry SPAN_KEY = "labeled_spans" TRAIN_DATA = [ ("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}), ( "I like London and Berlin.", {"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC")]}}, ), ] def make_get_examples(nlp): train_examples = [] for t in TRAIN_DATA: eg = Example.from_dict(nlp.make_doc(t[0]), t[1]) train_examples.append(eg) def get_examples(): return train_examples return get_examples def test_simple_train(): fix_random_seed(0) nlp = Language() spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY}) get_examples = make_get_examples(nlp) nlp.initialize(get_examples) sgd = nlp.create_optimizer() assert len(spancat.labels) != 0 for i in range(40): losses = {} nlp.update(list(get_examples()), losses=losses, drop=0.1, sgd=sgd) doc = nlp("I like London and Berlin.") assert doc.spans[spancat.key] == doc.spans[SPAN_KEY] assert len(doc.spans[spancat.key]) == 2 assert doc.spans[spancat.key][0].text == "London" scores = nlp.evaluate(get_examples()) assert f"spans_{SPAN_KEY}_f" in scores assert scores[f"spans_{SPAN_KEY}_f"] == 1.0 def test_ngram_suggester(en_tokenizer): for size in [1, 2, 3]: ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[size]) docs = [ en_tokenizer(text) for text in [ "a", "a b", "a b c", "a b c d", "a b c d e", "a " * 100, ] ] ngrams = ngram_suggester(docs) for s in ngrams.data: assert s[1] - s[0] == size offset = 0 for i, doc in enumerate(docs): spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]] spans_set = set() for span in spans: assert 0 <= span[0] < len(doc) assert 0 < span[1] <= len(doc) spans_set.add((span[0], span[1])) assert spans.shape[0] == len(spans_set) offset += ngrams.lengths[i] assert_equal(ngrams.lengths, [max(0, len(doc) - (size - 1)) for doc in docs]) ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1, 2, 3]) docs = [ en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"] ] ngrams = ngram_suggester(docs) assert_equal(ngrams.lengths, [1, 3, 6, 9, 12]) assert_equal( ngrams.data, [ [0, 1], [0, 1], [1, 2], [0, 2], [0, 1], [1, 2], [2, 3], [0, 2], [1, 3], [0, 3], [0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [1, 3], [2, 4], [0, 3], [1, 4], [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [0, 2], [1, 3], [2, 4], [3, 5], [0, 3], [1, 4], [2, 5], ], ) ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1]) docs = [en_tokenizer(text) for text in ["", "a", ""]] ngrams = ngram_suggester(docs) assert_equal(ngrams.lengths, [len(doc) for doc in docs]) ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1]) docs = [en_tokenizer(text) for text in ["", "", ""]] ngrams = ngram_suggester(docs) assert_equal(ngrams.lengths, [len(doc) for doc in docs])
true
true
f70df7b09fa8426339edb7e899978fac26c78843
1,039
py
Python
officetimer/timer_controller.py
adityakamble49/office-timer
eff7928c7c0b084d89aad33d9ce31f866dcec295
[ "Apache-2.0" ]
1
2020-12-30T14:42:13.000Z
2020-12-30T14:42:13.000Z
officetimer/timer_controller.py
adityakamble49/office-timer
eff7928c7c0b084d89aad33d9ce31f866dcec295
[ "Apache-2.0" ]
null
null
null
officetimer/timer_controller.py
adityakamble49/office-timer
eff7928c7c0b084d89aad33d9ce31f866dcec295
[ "Apache-2.0" ]
null
null
null
import time from optparse import OptionParser def build_option_parser(): parser = OptionParser() parser.add_option("-t", "--time", dest="given_time", type="string", help="Use HH:MM format for timer") return parser.parse_args() def countdown_timer(given_time_seconds): while given_time_seconds: minutes, seconds = divmod(given_time_seconds, 60) hours, minutes = divmod(minutes, 60) time_format = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds) print(time_format, end='\r') time.sleep(1) given_time_seconds -= 1 def main(): (options, args) = build_option_parser() given_time = options.given_time if given_time: hours = int(given_time.split(':')[0]) minutes = int(given_time.split(':')[1]) given_time_seconds = (hours * 3600) + (minutes * 60) countdown_timer(given_time_seconds) else: print("Use -h option to view help\n Developer: Aditya Kamble (adityakamble49.com)") if __name__ == '__main__': main()
29.685714
106
0.651588
import time from optparse import OptionParser def build_option_parser(): parser = OptionParser() parser.add_option("-t", "--time", dest="given_time", type="string", help="Use HH:MM format for timer") return parser.parse_args() def countdown_timer(given_time_seconds): while given_time_seconds: minutes, seconds = divmod(given_time_seconds, 60) hours, minutes = divmod(minutes, 60) time_format = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds) print(time_format, end='\r') time.sleep(1) given_time_seconds -= 1 def main(): (options, args) = build_option_parser() given_time = options.given_time if given_time: hours = int(given_time.split(':')[0]) minutes = int(given_time.split(':')[1]) given_time_seconds = (hours * 3600) + (minutes * 60) countdown_timer(given_time_seconds) else: print("Use -h option to view help\n Developer: Aditya Kamble (adityakamble49.com)") if __name__ == '__main__': main()
true
true
f70df9eae0b23ad5c6e4e7748075f9c9e72b18c2
2,394
py
Python
JPHacks2019-final/JPHacks2019-demofinal/VMforBeginner/VMforBeginnerApp/forms.py
jphacks/FK_1905
37ac7d2373c372d537e5ee4d7ffa441fe10c7770
[ "MIT" ]
null
null
null
JPHacks2019-final/JPHacks2019-demofinal/VMforBeginner/VMforBeginnerApp/forms.py
jphacks/FK_1905
37ac7d2373c372d537e5ee4d7ffa441fe10c7770
[ "MIT" ]
null
null
null
JPHacks2019-final/JPHacks2019-demofinal/VMforBeginner/VMforBeginnerApp/forms.py
jphacks/FK_1905
37ac7d2373c372d537e5ee4d7ffa441fe10c7770
[ "MIT" ]
null
null
null
from django import forms from .utils import PYTHON_PATH class EditorForm(forms.Form): code = forms.CharField( widget=forms.Textarea, required=False, ) file_name = forms.CharField( required=False, ) dir_name = forms.CharField( required=False, ) select_python = forms.ChoiceField( choices= PYTHON_PATH, ) pip_name = forms.CharField( required=False, ) def __init__(self, request, *args, **kwargs): super().__init__(*args, **kwargs) open_file_path = request.GET.get('open_file_path') if open_file_path: try: self.fields['code'].initial = open(open_file_path, 'rb').read().decode('utf-8') # 現在開いているファイルを削除した except FileNotFoundError: self.fields['code'].initial = '' else: self.fields['code'].initial = '' venv_path = request.GET.get('venv_path') if venv_path: choices = [(venv_path, 'venv')] + PYTHON_PATH self.fields['select_python'] = forms.ChoiceField(choices=choices) class ChkForm(forms.Form): labels = ['Machine Learning','Visualize', 'other',] CHOICE = [ ('keras','Keras'), ('tensorflow','Tensorflow'), ('chainer','Chainer')] CHOICE1 = [ ('matplotlib','Matplotlib'), ('seaborn','Seaborn'), ] CHOICE2 = [ ('numpy','numpy'), ('pandas','pandas'), ('opencv-python','opencv'), ] one = forms.MultipleChoiceField( label=labels[0], required=False, disabled=False, initial=[], choices=CHOICE, widget=forms.CheckboxSelectMultiple(attrs={ 'id': 'one','class': 'form-check-input'})) two = forms.MultipleChoiceField( label=labels[1], required=False, disabled=False, initial=[], choices=CHOICE1, widget=forms.CheckboxSelectMultiple(attrs={ 'id': 'two','class': 'form-check-input'})) three = forms.MultipleChoiceField( label=labels[2], required=False, disabled=False, initial=[], choices=CHOICE2, widget=forms.CheckboxSelectMultiple(attrs={ 'id': 'three','class': 'form-check-input'}))
26.898876
95
0.541353
from django import forms from .utils import PYTHON_PATH class EditorForm(forms.Form): code = forms.CharField( widget=forms.Textarea, required=False, ) file_name = forms.CharField( required=False, ) dir_name = forms.CharField( required=False, ) select_python = forms.ChoiceField( choices= PYTHON_PATH, ) pip_name = forms.CharField( required=False, ) def __init__(self, request, *args, **kwargs): super().__init__(*args, **kwargs) open_file_path = request.GET.get('open_file_path') if open_file_path: try: self.fields['code'].initial = open(open_file_path, 'rb').read().decode('utf-8') except FileNotFoundError: self.fields['code'].initial = '' else: self.fields['code'].initial = '' venv_path = request.GET.get('venv_path') if venv_path: choices = [(venv_path, 'venv')] + PYTHON_PATH self.fields['select_python'] = forms.ChoiceField(choices=choices) class ChkForm(forms.Form): labels = ['Machine Learning','Visualize', 'other',] CHOICE = [ ('keras','Keras'), ('tensorflow','Tensorflow'), ('chainer','Chainer')] CHOICE1 = [ ('matplotlib','Matplotlib'), ('seaborn','Seaborn'), ] CHOICE2 = [ ('numpy','numpy'), ('pandas','pandas'), ('opencv-python','opencv'), ] one = forms.MultipleChoiceField( label=labels[0], required=False, disabled=False, initial=[], choices=CHOICE, widget=forms.CheckboxSelectMultiple(attrs={ 'id': 'one','class': 'form-check-input'})) two = forms.MultipleChoiceField( label=labels[1], required=False, disabled=False, initial=[], choices=CHOICE1, widget=forms.CheckboxSelectMultiple(attrs={ 'id': 'two','class': 'form-check-input'})) three = forms.MultipleChoiceField( label=labels[2], required=False, disabled=False, initial=[], choices=CHOICE2, widget=forms.CheckboxSelectMultiple(attrs={ 'id': 'three','class': 'form-check-input'}))
true
true
f70dfb1861ab923f051015034244517f67647433
2,009
py
Python
pyatv/protocols/mrp/protobuf/RemoteTextInputMessage_pb2.py
crxporter/pyatv
e694a210b3810c64044116bf40e7b75420b5fe75
[ "MIT" ]
null
null
null
pyatv/protocols/mrp/protobuf/RemoteTextInputMessage_pb2.py
crxporter/pyatv
e694a210b3810c64044116bf40e7b75420b5fe75
[ "MIT" ]
null
null
null
pyatv/protocols/mrp/protobuf/RemoteTextInputMessage_pb2.py
crxporter/pyatv
e694a210b3810c64044116bf40e7b75420b5fe75
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: pyatv/protocols/mrp/protobuf/RemoteTextInputMessage.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n9pyatv/protocols/mrp/protobuf/RemoteTextInputMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"J\n\x16RemoteTextInputMessage\x12\x11\n\ttimestamp\x18\x01 \x01(\x01\x12\x0f\n\x07version\x18\x02 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c:I\n\x16remoteTextInputMessage\x12\x10.ProtocolMessage\x18G \x01(\x0b\x32\x17.RemoteTextInputMessage') REMOTETEXTINPUTMESSAGE_FIELD_NUMBER = 71 remoteTextInputMessage = DESCRIPTOR.extensions_by_name['remoteTextInputMessage'] _REMOTETEXTINPUTMESSAGE = DESCRIPTOR.message_types_by_name['RemoteTextInputMessage'] RemoteTextInputMessage = _reflection.GeneratedProtocolMessageType('RemoteTextInputMessage', (_message.Message,), { 'DESCRIPTOR' : _REMOTETEXTINPUTMESSAGE, '__module__' : 'pyatv.protocols.mrp.protobuf.RemoteTextInputMessage_pb2' # @@protoc_insertion_point(class_scope:RemoteTextInputMessage) }) _sym_db.RegisterMessage(RemoteTextInputMessage) if _descriptor._USE_C_DESCRIPTORS == False: pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(remoteTextInputMessage) DESCRIPTOR._options = None _REMOTETEXTINPUTMESSAGE._serialized_start=113 _REMOTETEXTINPUTMESSAGE._serialized_end=187 # @@protoc_insertion_point(module_scope)
51.512821
431
0.844699
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n9pyatv/protocols/mrp/protobuf/RemoteTextInputMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"J\n\x16RemoteTextInputMessage\x12\x11\n\ttimestamp\x18\x01 \x01(\x01\x12\x0f\n\x07version\x18\x02 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c:I\n\x16remoteTextInputMessage\x12\x10.ProtocolMessage\x18G \x01(\x0b\x32\x17.RemoteTextInputMessage') REMOTETEXTINPUTMESSAGE_FIELD_NUMBER = 71 remoteTextInputMessage = DESCRIPTOR.extensions_by_name['remoteTextInputMessage'] _REMOTETEXTINPUTMESSAGE = DESCRIPTOR.message_types_by_name['RemoteTextInputMessage'] RemoteTextInputMessage = _reflection.GeneratedProtocolMessageType('RemoteTextInputMessage', (_message.Message,), { 'DESCRIPTOR' : _REMOTETEXTINPUTMESSAGE, '__module__' : 'pyatv.protocols.mrp.protobuf.RemoteTextInputMessage_pb2' # @@protoc_insertion_point(class_scope:RemoteTextInputMessage) }) _sym_db.RegisterMessage(RemoteTextInputMessage) if _descriptor._USE_C_DESCRIPTORS == False: pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(remoteTextInputMessage) DESCRIPTOR._options = None _REMOTETEXTINPUTMESSAGE._serialized_start=113 _REMOTETEXTINPUTMESSAGE._serialized_end=187 # @@protoc_insertion_point(module_scope)
true
true
f70dfb3337113af622cde89f6c3b6714e7082b52
1,610
py
Python
mldictionary_api/routes/api.py
PabloEmidio/MLDictionaryAPI
501f203802c1f6aafe213c0fb7a5808f9a9ad3ab
[ "MIT" ]
2
2022-01-17T16:17:22.000Z
2022-01-27T13:21:37.000Z
mldictionary_api/routes/api.py
PabloEmidio/MLDictionaryAPI
501f203802c1f6aafe213c0fb7a5808f9a9ad3ab
[ "MIT" ]
null
null
null
mldictionary_api/routes/api.py
PabloEmidio/MLDictionaryAPI
501f203802c1f6aafe213c0fb7a5808f9a9ad3ab
[ "MIT" ]
null
null
null
from flask import Blueprint from werkzeug.exceptions import NotFound, InternalServerError, TooManyRequests from mldictionary_api.const import API_PREFIX from mldictionary_api.resources.response import ResponseAPI from mldictionary_api.resources.const import ( ENGLISH_REPR, ENGLISH_TO_PORTUGUESE_REPR, PORTUGUESE_REPR, PORTUGUESE_TO_ENGLISH, SPANISH_REPR, ) api = Blueprint('mldictionary_api', __name__, url_prefix=API_PREFIX) @api.route('/dictionary/en/<word>/') def english(word: str): return ResponseAPI().get_meanings(ENGLISH_REPR, word) @api.route('/dictionary/pt/<word>/') def portuguese(word: str): return ResponseAPI().get_meanings(PORTUGUESE_REPR, word) @api.route('/dictionary/es/<word>/') def spanish(word: str): return ResponseAPI().get_meanings(SPANISH_REPR, word) @api.route('/translator/en-pt/<word>/') def english_to_portuguese(word: str): return ResponseAPI().get_meanings(ENGLISH_TO_PORTUGUESE_REPR, word) @api.route('/translator/pt-en/<word>/') def portuguese_to_english(word: str): return ResponseAPI().get_meanings(PORTUGUESE_TO_ENGLISH, word) @api.app_errorhandler(NotFound) def not_found(err): return ResponseAPI().handle_error(err) @api.app_errorhandler(TooManyRequests) def too_many_requests(err): return ResponseAPI().handle_error(err) @api.app_errorhandler(InternalServerError) def internal_error(err): return ResponseAPI().handle_error(err) @api.app_errorhandler(Exception) def general_exception(err): err.description = 'Don\'t recognize erro' err.code = 500 return ResponseAPI().handle_error(err)
25.967742
78
0.769565
from flask import Blueprint from werkzeug.exceptions import NotFound, InternalServerError, TooManyRequests from mldictionary_api.const import API_PREFIX from mldictionary_api.resources.response import ResponseAPI from mldictionary_api.resources.const import ( ENGLISH_REPR, ENGLISH_TO_PORTUGUESE_REPR, PORTUGUESE_REPR, PORTUGUESE_TO_ENGLISH, SPANISH_REPR, ) api = Blueprint('mldictionary_api', __name__, url_prefix=API_PREFIX) @api.route('/dictionary/en/<word>/') def english(word: str): return ResponseAPI().get_meanings(ENGLISH_REPR, word) @api.route('/dictionary/pt/<word>/') def portuguese(word: str): return ResponseAPI().get_meanings(PORTUGUESE_REPR, word) @api.route('/dictionary/es/<word>/') def spanish(word: str): return ResponseAPI().get_meanings(SPANISH_REPR, word) @api.route('/translator/en-pt/<word>/') def english_to_portuguese(word: str): return ResponseAPI().get_meanings(ENGLISH_TO_PORTUGUESE_REPR, word) @api.route('/translator/pt-en/<word>/') def portuguese_to_english(word: str): return ResponseAPI().get_meanings(PORTUGUESE_TO_ENGLISH, word) @api.app_errorhandler(NotFound) def not_found(err): return ResponseAPI().handle_error(err) @api.app_errorhandler(TooManyRequests) def too_many_requests(err): return ResponseAPI().handle_error(err) @api.app_errorhandler(InternalServerError) def internal_error(err): return ResponseAPI().handle_error(err) @api.app_errorhandler(Exception) def general_exception(err): err.description = 'Don\'t recognize erro' err.code = 500 return ResponseAPI().handle_error(err)
true
true
f70dfbb1563c8580cbfa60c64c23d188248e7d98
3,344
py
Python
scripts/insert2db/reputation/plugins/dshield_medium.py
macdaliot/exist
65244f79c602c5a00c3ea6a7eef512ce9c21e60a
[ "MIT" ]
159
2019-03-15T10:46:19.000Z
2022-03-12T09:19:31.000Z
scripts/insert2db/reputation/plugins/dshield_medium.py
macdaliot/exist
65244f79c602c5a00c3ea6a7eef512ce9c21e60a
[ "MIT" ]
6
2019-03-16T12:51:24.000Z
2020-07-09T02:25:42.000Z
scripts/insert2db/reputation/plugins/dshield_medium.py
macdaliot/exist
65244f79c602c5a00c3ea6a7eef512ce9c21e60a
[ "MIT" ]
36
2019-03-16T10:37:14.000Z
2021-11-14T21:04:18.000Z
import sys import os import configparser import requests import pandas as pd import hashlib from io import StringIO from datetime import datetime, timezone ## Django Setup import django import pymysql pymysql.install_as_MySQLdb() conffile = os.path.join(os.path.dirname(__file__), "../../conf/insert2db.conf") conf = configparser.SafeConfigParser() conf.read(conffile) sys.path.append(conf.get('exist', 'syspath')) os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings') django.setup() from apps.reputation.models import blacklist import django.utils.timezone as tzone from django.db import IntegrityError ## Logger Setup from logging import getLogger, DEBUG, NullHandler logger = getLogger(__name__) logger.addHandler(NullHandler()) logger.setLevel(DEBUG) logger.propagate = True DataDir = os.path.join(os.path.dirname(__file__), '../data/') class Tracker(): def __init__(self): self.name = 'Dshield_Medium' self.ID = 222 self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt' self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt' self.header = [ 'domain', ] def cmpFiles(self, oldfile, newtext): diffline = '' if not os.path.exists(oldfile): f = open(oldfile, 'w') f.close() oldsets = set(open(oldfile).readlines()) newsets = set(newtext.replace('\r\n','\n').splitlines(True)) results = newsets.difference(oldsets) for result in results: diffline += result return diffline[:-1] def delComment(self, s): result = '' for line in s.splitlines(True): if not line.startswith('#') \ and line != "Site\n": result += line return result def makeDataframe(self): df = pd.DataFrame() newline = '' try: res = requests.get(self.URL) if res.status_code != 200: return df newline = self.cmpFiles(self.DataFilePath, res.text) newline = self.delComment(newline) except Exception as e: logger.error(e) if not newline == '': open(self.DataFilePath, 'w').write(res.text) df = pd.read_csv(StringIO(newline), names=self.header) return df def parse(self): logger.info("start parsing: %s", self.name) df = self.makeDataframe() queries = [] if not df.empty: for i, v in df.iterrows(): line = str(self.ID) + "," line += str(v.values) md5 = hashlib.md5(line.encode('utf-8')).hexdigest() try: query = blacklist( id = md5, domain = v.domain, datetime = tzone.now(), source = self.ID, referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt', ) except Exception as e: logger.error("%s: %s", e, line) queries.append(query) else: logger.info("no update") logger.info("done parsing: %s, %s queries were parsed", self.name, len(queries)) return queries
31.54717
96
0.574163
import sys import os import configparser import requests import pandas as pd import hashlib from io import StringIO from datetime import datetime, timezone import django import pymysql pymysql.install_as_MySQLdb() conffile = os.path.join(os.path.dirname(__file__), "../../conf/insert2db.conf") conf = configparser.SafeConfigParser() conf.read(conffile) sys.path.append(conf.get('exist', 'syspath')) os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings') django.setup() from apps.reputation.models import blacklist import django.utils.timezone as tzone from django.db import IntegrityError from logging import getLogger, DEBUG, NullHandler logger = getLogger(__name__) logger.addHandler(NullHandler()) logger.setLevel(DEBUG) logger.propagate = True DataDir = os.path.join(os.path.dirname(__file__), '../data/') class Tracker(): def __init__(self): self.name = 'Dshield_Medium' self.ID = 222 self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt' self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt' self.header = [ 'domain', ] def cmpFiles(self, oldfile, newtext): diffline = '' if not os.path.exists(oldfile): f = open(oldfile, 'w') f.close() oldsets = set(open(oldfile).readlines()) newsets = set(newtext.replace('\r\n','\n').splitlines(True)) results = newsets.difference(oldsets) for result in results: diffline += result return diffline[:-1] def delComment(self, s): result = '' for line in s.splitlines(True): if not line.startswith('#') \ and line != "Site\n": result += line return result def makeDataframe(self): df = pd.DataFrame() newline = '' try: res = requests.get(self.URL) if res.status_code != 200: return df newline = self.cmpFiles(self.DataFilePath, res.text) newline = self.delComment(newline) except Exception as e: logger.error(e) if not newline == '': open(self.DataFilePath, 'w').write(res.text) df = pd.read_csv(StringIO(newline), names=self.header) return df def parse(self): logger.info("start parsing: %s", self.name) df = self.makeDataframe() queries = [] if not df.empty: for i, v in df.iterrows(): line = str(self.ID) + "," line += str(v.values) md5 = hashlib.md5(line.encode('utf-8')).hexdigest() try: query = blacklist( id = md5, domain = v.domain, datetime = tzone.now(), source = self.ID, referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt', ) except Exception as e: logger.error("%s: %s", e, line) queries.append(query) else: logger.info("no update") logger.info("done parsing: %s, %s queries were parsed", self.name, len(queries)) return queries
true
true
f70dfbd382b9b6eecad2903bf2393b0b5e8a7f8d
974
py
Python
reminders/reminders/celery.py
bamboo2panda/reminders
f767f8dca900f16a13e40ae2a7e5645412c54844
[ "MIT" ]
null
null
null
reminders/reminders/celery.py
bamboo2panda/reminders
f767f8dca900f16a13e40ae2a7e5645412c54844
[ "MIT" ]
null
null
null
reminders/reminders/celery.py
bamboo2panda/reminders
f767f8dca900f16a13e40ae2a7e5645412c54844
[ "MIT" ]
null
null
null
import os from celery import Celery from celery.schedules import crontab # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reminders.settings') app = Celery('reminders') # Using a string here means the worker doesn't have to serialize # the configuration object to child processes. # - namespace='CELERY' means all celery-related configuration keys # should have a `CELERY_` prefix. app.config_from_object('django.conf:settings', namespace='CELERY') app.conf.beat_schedule = { 'add-every-60-seconds': { 'task': 'schedule.tasks.remind_by_mail', 'schedule': 10.0 }, 'test_task_every_10_seconds': { 'task': 'schedule.tasks.print_test', 'schedule': 10.0 }, } app.conf.timezone = 'UTC' # Load task modules from all registered Django app configs. app.autodiscover_tasks() @app.task(bind=True) def debug_task(self): print(f'Request: {self.request!r}')
26.324324
69
0.715606
import os from celery import Celery from celery.schedules import crontab os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reminders.settings') app = Celery('reminders') # the configuration object to child processes. # - namespace='CELERY' means all celery-related configuration keys # should have a `CELERY_` prefix. app.config_from_object('django.conf:settings', namespace='CELERY') app.conf.beat_schedule = { 'add-every-60-seconds': { 'task': 'schedule.tasks.remind_by_mail', 'schedule': 10.0 }, 'test_task_every_10_seconds': { 'task': 'schedule.tasks.print_test', 'schedule': 10.0 }, } app.conf.timezone = 'UTC' # Load task modules from all registered Django app configs. app.autodiscover_tasks() @app.task(bind=True) def debug_task(self): print(f'Request: {self.request!r}')
true
true
f70dfc229f304b61a9709e930e2e9b4fbec020ab
673
py
Python
pj_358.py
luisalvaradoar/pj_euler
03e4ee9d0cc64ae9650b801554bf053d2db0b684
[ "Apache-2.0" ]
null
null
null
pj_358.py
luisalvaradoar/pj_euler
03e4ee9d0cc64ae9650b801554bf053d2db0b684
[ "Apache-2.0" ]
null
null
null
pj_358.py
luisalvaradoar/pj_euler
03e4ee9d0cc64ae9650b801554bf053d2db0b684
[ "Apache-2.0" ]
null
null
null
def rotcir(ns): lista = [ns] for i in range(len(ns) - 1): a = ns[0] ns = ns[1:len(ns)+1] ns += a lista.append(ns) return(lista) def cyclic_number(ns): rotaciones = rotcir(ns) for n in range(1, len(ns)): Ns = str(n*int(ns)) while len(Ns) != len(ns): Ns = '0' + Ns if Ns not in rotaciones: return False return True import itertools def per_n(n): lista1 = list(itertools.product('0123456789',repeat=n)) lista2 = [] for i in lista1: n = '' for j in range(len(i)): n += i[j] lista2.append(n) return(lista2) i = 8 agregado = per_n(i) for j in agregado: ns = '00000000137' + j + '56789' if cyclic_number(ns): print(ns) break
14.955556
56
0.606241
def rotcir(ns): lista = [ns] for i in range(len(ns) - 1): a = ns[0] ns = ns[1:len(ns)+1] ns += a lista.append(ns) return(lista) def cyclic_number(ns): rotaciones = rotcir(ns) for n in range(1, len(ns)): Ns = str(n*int(ns)) while len(Ns) != len(ns): Ns = '0' + Ns if Ns not in rotaciones: return False return True import itertools def per_n(n): lista1 = list(itertools.product('0123456789',repeat=n)) lista2 = [] for i in lista1: n = '' for j in range(len(i)): n += i[j] lista2.append(n) return(lista2) i = 8 agregado = per_n(i) for j in agregado: ns = '00000000137' + j + '56789' if cyclic_number(ns): print(ns) break
true
true
f70dfdbde3e077aab6bf814a03c39fed976f228e
463
py
Python
examples/aditi/aniket/sister.py
FlaskAio/navycut
40f378f1710a26645df8d726c4d1caf33097da50
[ "MIT" ]
4
2021-09-22T09:23:04.000Z
2022-03-05T05:58:46.000Z
examples/aditi/aniket/sister.py
FlaskAio/navycut
40f378f1710a26645df8d726c4d1caf33097da50
[ "MIT" ]
21
2021-09-27T03:19:21.000Z
2022-03-31T03:20:59.000Z
examples/aditi/aniket/sister.py
FlaskAio/navycut
40f378f1710a26645df8d726c4d1caf33097da50
[ "MIT" ]
null
null
null
""" Do not change anything if you dont have enough knowledge how to handle it, otherwise it may mess the server. """ from navycut.core import AppSister from navycut.utils import path __basedir__ = path.abspath(__file__).parent class AniketSister(AppSister): name = "aniket" template_folder = __basedir__ / "templates" static_folder = __basedir__ / "static" static_url_path = "/static" url_prefix = "/aniket" import_app_feature = True
24.368421
57
0.732181
from navycut.core import AppSister from navycut.utils import path __basedir__ = path.abspath(__file__).parent class AniketSister(AppSister): name = "aniket" template_folder = __basedir__ / "templates" static_folder = __basedir__ / "static" static_url_path = "/static" url_prefix = "/aniket" import_app_feature = True
true
true
f70dfe64c9a27850e1221bf79984fca6b651700b
4,895
py
Python
sphinx_toolbox/wikipedia.py
arisp99/sphinx-toolbox
2987080e2d65c0dd2d392dcf7f1f5a904a9231f5
[ "MIT" ]
30
2021-03-01T00:15:55.000Z
2022-03-01T13:23:59.000Z
sphinx_toolbox/wikipedia.py
arisp99/sphinx-toolbox
2987080e2d65c0dd2d392dcf7f1f5a904a9231f5
[ "MIT" ]
56
2020-12-17T12:39:04.000Z
2022-03-21T19:00:55.000Z
sphinx_toolbox/wikipedia.py
arisp99/sphinx-toolbox
2987080e2d65c0dd2d392dcf7f1f5a904a9231f5
[ "MIT" ]
4
2021-07-04T16:57:52.000Z
2022-03-21T19:35:31.000Z
#!/usr/bin/env python3 # # wikipedia.py """ Sphinx extension to create links to Wikipedia articles. .. versionadded:: 0.2.0 .. extensions:: sphinx_toolbox.wikipedia Configuration -------------- .. latex:vspace:: -5px .. confval:: wikipedia_lang :type: :class:`str` :required: False :default: ``'en'`` The Wikipedia language to use for :rst:role:`wikipedia` roles. .. versionadded:: 0.2.0 Usage ------ .. latex:vspace:: -5px .. rst:role:: wikipedia Role which shows a link to the given article on Wikipedia. The title and language can be customised. :bold-title:`Example` .. rest-example:: :wikipedia:`Sphinx` :wikipedia:`mythical creature <Sphinx>` :wikipedia:`Answer to the Ultimate Question of Life, the Universe, and Everything <:de:42 (Antwort)>` .. only:: html .. rest-example:: :wikipedia:`:zh:斯芬克斯` API Reference ---------------- """ # # Copyright © 2020-2021 Dominic Davis-Foster <dominic@davis-foster.co.uk> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # Based on https://github.com/quiver/sphinx-ext-wikipedia # BSD Licensed # # Parts of the docstrings based on https://docutils.sourceforge.io/docs/howto/rst-roles.html # # stdlib import re from typing import Dict, List, Tuple from urllib.parse import quote # 3rd party from apeye.url import URL from docutils import nodes from docutils.nodes import system_message from docutils.parsers.rst.states import Inliner from sphinx.application import Sphinx from sphinx.util.nodes import split_explicit_title # this package from sphinx_toolbox.utils import SphinxExtMetadata, metadata_add_version __all__ = ["make_wikipedia_link", "setup"] base_url = "https://%s.wikipedia.org/wiki" _wiki_lang_re = re.compile(":(.*?):(.*)") def _get_wikipedia_lang(inliner: Inliner): # pragma: no cover return inliner.document.settings.env.config.wikipedia_lang def make_wikipedia_link( name: str, rawtext: str, text: str, lineno: int, inliner: Inliner, options: Dict = {}, content: List[str] = [] ) -> Tuple[List[nodes.reference], List[system_message]]: """ Adds a link to the given article on :wikipedia:`Wikipedia`. :param name: The local name of the interpreted role, the role name actually used in the document. :param rawtext: A string containing the entire interpreted text input, including the role and markup. :param text: The interpreted text content. :param lineno: The line number where the interpreted text begins. :param inliner: The :class:`docutils.parsers.rst.states.Inliner` object that called :func:`~.source_role`. It contains the several attributes useful for error reporting and document tree access. :param options: A dictionary of directive options for customization (from the ``role`` directive), to be interpreted by the function. Used for additional attributes for the generated elements and other functionality. :param content: A list of strings, the directive content for customization (from the ``role`` directive). To be interpreted by the function. :return: A list containing the created node, and a list containing any messages generated during the function. """ text = nodes.unescape(text) has_explicit, title, target = split_explicit_title(text) m = _wiki_lang_re.match(target) if m: lang, target = m.groups() if not has_explicit: title = target else: lang = _get_wikipedia_lang(inliner) ref = URL(base_url % lang) / quote(target.replace(' ', '_'), safe='') node = nodes.reference(rawtext, title, refuri=str(ref), **options) return [node], [] @metadata_add_version def setup(app: Sphinx) -> SphinxExtMetadata: """ Setup :mod:`sphinx_toolbox.wikipedia`. .. versionadded:: 1.0.0 :param app: The Sphinx application. """ app.add_role("wikipedia", make_wikipedia_link) app.add_config_value("wikipedia_lang", "en", "env", [str]) return {"parallel_read_safe": True}
28.794118
111
0.735649
import re from typing import Dict, List, Tuple from urllib.parse import quote from apeye.url import URL from docutils import nodes from docutils.nodes import system_message from docutils.parsers.rst.states import Inliner from sphinx.application import Sphinx from sphinx.util.nodes import split_explicit_title from sphinx_toolbox.utils import SphinxExtMetadata, metadata_add_version __all__ = ["make_wikipedia_link", "setup"] base_url = "https://%s.wikipedia.org/wiki" _wiki_lang_re = re.compile(":(.*?):(.*)") def _get_wikipedia_lang(inliner: Inliner): return inliner.document.settings.env.config.wikipedia_lang def make_wikipedia_link( name: str, rawtext: str, text: str, lineno: int, inliner: Inliner, options: Dict = {}, content: List[str] = [] ) -> Tuple[List[nodes.reference], List[system_message]]: text = nodes.unescape(text) has_explicit, title, target = split_explicit_title(text) m = _wiki_lang_re.match(target) if m: lang, target = m.groups() if not has_explicit: title = target else: lang = _get_wikipedia_lang(inliner) ref = URL(base_url % lang) / quote(target.replace(' ', '_'), safe='') node = nodes.reference(rawtext, title, refuri=str(ref), **options) return [node], [] @metadata_add_version def setup(app: Sphinx) -> SphinxExtMetadata: app.add_role("wikipedia", make_wikipedia_link) app.add_config_value("wikipedia_lang", "en", "env", [str]) return {"parallel_read_safe": True}
true
true
f70dffb054dee576c07829185b1bb6e2aa0e8342
22,040
py
Python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_flow_logs_operations.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
3
2020-06-23T02:25:27.000Z
2021-09-07T18:48:11.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_flow_logs_operations.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
510
2019-07-17T16:11:19.000Z
2021-08-02T08:38:32.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_flow_logs_operations.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
5
2019-09-04T12:51:37.000Z
2020-09-16T07:28:40.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class FlowLogsOperations: """FlowLogsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_05_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_or_update_initial( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, parameters: "_models.FlowLog", **kwargs ) -> "_models.FlowLog": cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'FlowLog') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('FlowLog', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('FlowLog', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, parameters: "_models.FlowLog", **kwargs ) -> AsyncLROPoller["_models.FlowLog"]: """Create or update a flow log for the specified network security group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_watcher_name: The name of the network watcher. :type network_watcher_name: str :param flow_log_name: The name of the flow log. :type flow_log_name: str :param parameters: Parameters that define the create or update flow log resource. :type parameters: ~azure.mgmt.network.v2020_05_01.models.FlowLog :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either FlowLog or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.FlowLog] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, flow_log_name=flow_log_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('FlowLog', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore async def get( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, **kwargs ) -> "_models.FlowLog": """Gets a flow log resource by name. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_watcher_name: The name of the network watcher. :type network_watcher_name: str :param flow_log_name: The name of the flow log resource. :type flow_log_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowLog, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_05_01.models.FlowLog :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('FlowLog', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore async def begin_delete( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, **kwargs ) -> AsyncLROPoller[None]: """Deletes the specified flow log resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_watcher_name: The name of the network watcher. :type network_watcher_name: str :param flow_log_name: The name of the flow log resource. :type flow_log_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, flow_log_name=flow_log_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore def list( self, resource_group_name: str, network_watcher_name: str, **kwargs ) -> AsyncIterable["_models.FlowLogListResult"]: """Lists all flow log resources for the specified Network Watcher. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either FlowLogListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.FlowLogListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('FlowLogListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs'} # type: ignore
50.666667
222
0.672641
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class FlowLogsOperations: models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_or_update_initial( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, parameters: "_models.FlowLog", **kwargs ) -> "_models.FlowLog": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self._create_or_update_initial.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(parameters, 'FlowLog') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('FlowLog', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('FlowLog', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} async def begin_create_or_update( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, parameters: "_models.FlowLog", **kwargs ) -> AsyncLROPoller["_models.FlowLog"]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, flow_log_name=flow_log_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('FlowLog', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} async def get( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, **kwargs ) -> "_models.FlowLog": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" url = self.get.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('FlowLog', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} async def _delete_initial( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, **kwargs ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" url = self._delete_initial.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} async def begin_delete( self, resource_group_name: str, network_watcher_name: str, flow_log_name: str, **kwargs ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, flow_log_name=flow_log_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} def list( self, resource_group_name: str, network_watcher_name: str, **kwargs ) -> AsyncIterable["_models.FlowLogListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: url = self.list.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('FlowLogListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs'}
true
true
f70dffe0df962a8eeadf76f5e2e402a0010a002f
33,459
py
Python
djangae/tests/test_migrations.py
farridav/djangae
afdef831481d99f3259ca1a10adcd0c35d3424c1
[ "BSD-3-Clause" ]
null
null
null
djangae/tests/test_migrations.py
farridav/djangae
afdef831481d99f3259ca1a10adcd0c35d3424c1
[ "BSD-3-Clause" ]
null
null
null
djangae/tests/test_migrations.py
farridav/djangae
afdef831481d99f3259ca1a10adcd0c35d3424c1
[ "BSD-3-Clause" ]
null
null
null
# encoding: utf-8 # STANDARD LIB from unittest import skipIf # THIRD PARTY from django.apps.registry import apps # Apps from django.conf import settings from django.db import connection, models from django.db.migrations.state import ProjectState from django.test import override_settings from google.appengine.api import datastore from google.appengine.runtime import DeadlineExceededError # DJANGAE from djangae.contrib import sleuth from djangae.db.migrations import operations from djangae.db.migrations.mapper_library import ( _get_range, _mid_key, _mid_string, _next_string, shard_query, ShardedTaskMarker, start_mapping, ) from djangae.test import TestCase # Workaround for https://code.djangoproject.com/ticket/28188 def return_a_string(): return "squirrel" class TestModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = "djangae" class OtherModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = "djangae" class OtherAppModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = "testapp" class UniqueException(Exception): """ An exception which we can explicity throw and catch. """ pass def tickle_entity(entity): entity['is_tickled'] = True datastore.Put(entity) def tickle_entity_volitle(entity): """ Like `tickle_entity`, but raises DeadlineExceededError every 3rd call. """ call_count = getattr(tickle_entity_volitle, "call_count", 1) tickle_entity_volitle.call_count = call_count + 1 if call_count % 3 == 0: raise DeadlineExceededError() else: tickle_entity(entity) def flush_task_markers(): """ Delete all ShardedTaskMarker objects from the DB. Useful to call in setUp(), as Django doesn't wipe this kind because there's no model for it. """ namespaces = set() namespaces.add(settings.DATABASES['default'].get('NAMESPACE', '')) namespaces.add(settings.DATABASES.get('ns1', {}).get('NAMESPACE', '')) for namespace in namespaces: query = datastore.Query( ShardedTaskMarker.KIND, namespace=namespace, keys_only=True ).Run() datastore.Delete([x for x in query]) class MigrationOperationTests(TestCase): multi_db = True def setUp(self): # We need to clean out the migration task markers from the Datastore between each test, as # the standard flush only cleans out models super(MigrationOperationTests, self).setUp() flush_task_markers() def start_operation(self, operation, detonate=True): # Make a from_state and a to_state to pass to the operation, these can just be the # current state of the models from_state = ProjectState.from_apps(apps) to_state = from_state.clone() schema_editor = connection.schema_editor() app_label = TestModel._meta.app_label # If we just start the operation then it will hang forever waiting for its mapper task to # complete, so we won't even be able to call process_task_queues(). So to avoid that we # detonate the _wait_until_task_finished method. Then tasks can be processed after that. if detonate: with sleuth.detonate( "djangae.tests.test_migrations.operations.%s._wait_until_task_finished" % operation.__class__.__name__, UniqueException ): try: operation.database_forwards(app_label, schema_editor, from_state, to_state) except UniqueException: pass else: operation.database_forwards(app_label, schema_editor, from_state, to_state) def get_entities(self, model=TestModel, namespace=None): namespace = namespace or settings.DATABASES['default'].get('NAMESPACE', '') query = datastore.Query( model._meta.db_table, namespace=namespace, ) return [x for x in query.Run()] def test_run_operation_creates_and_updates_task_marker(self): """ If we run one of our custom operations, then it should create the task marker in the DB and defer a task, then set the marker to 'is_finished' when done. """ TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) self.start_operation(operation) # Now check that the task marker has been created. # Usefully, calling database_forwards() on the operation will have caused it to set the # `identifier` attribute on itself, meaning we can now just call _get_task_marker() task_marker = datastore.Get( [ShardedTaskMarker.get_key(operation.identifier, operation.namespace)] )[0] if task_marker is None: self.fail("Migration operation did not create its task marker") self.assertFalse(task_marker.get("is_finished")) self.assertNumTasksEquals(1) self.process_task_queues() # Now check that the task marker has been marked as finished task_marker = datastore.Get( [ShardedTaskMarker.get_key(operation.identifier, operation.namespace)] )[0] self.assertTrue(task_marker["is_finished"]) self.assertNumTasksEquals(0) def test_starting_operation_twice_does_not_trigger_task_twice(self): """ If we run an operation, and then try to run it again before the task has finished processing, then it should not trigger a second task. """ TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) self.start_operation(operation) task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertFalse(task_marker["is_finished"]) # We expect there to be a task queued for processing the operation self.assertNumTasksEquals(1) # Now try to run it again self.start_operation(operation) # We expect there to still be the same number of tasks self.assertNumTasksEquals(1) def test_running_finished_operation_does_not_trigger_new_task(self): """ If we re-trigger an operation which has already been run and finished, it should simply return without starting a new task or updating the task marker. """ TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) # Run the operation and check that it finishes with sleuth.watch("djangae.db.migrations.operations.AddFieldData._start_task") as start: self.start_operation(operation) self.assertTrue(start.called) task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertFalse(task_marker["is_finished"]) self.assertNumTasksEquals(1) self.process_task_queues() task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertTrue(task_marker["is_finished"]) # Run the operation again. It should see that's it's finished and just return immediately. self.assertNumTasksEquals(0) with sleuth.watch("djangae.db.migrations.operations.AddFieldData._start_task") as start: self.start_operation(operation, detonate=False) self.assertFalse(start.called) self.assertNumTasksEquals(0) task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertTrue(task_marker["is_finished"]) def test_queue_option(self): """ The `queue` kwarg should determine the task queue that the operation runs on. """ for x in xrange(3): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default=return_a_string), queue="another", # Ensure that we trigger a re-defer, so that we test that the correct queue is used for # subsequent tasks, not just the first one entities_per_task=1, shard_count=1 ) self.start_operation(operation) # The task(s) should not be in the default queue, but in the "another" queue instead self.assertEqual(self.get_task_count("default"), 0) self.assertTrue(self.get_task_count("another") > 0) # And if we only run the tasks on the "another" queue, the whole operation should complete. self.process_task_queues("another") # And the entities should be updated entities = self.get_entities() self.assertTrue(all(entity['new_field'] == 'squirrel' for entity in entities)) def test_default_queue_setting(self): """ If no `queue` kwarg is passed then the DJANGAE_MIGRATION_DEFAULT_QUEUE setting should be used to determine the task queue. """ for x in xrange(2): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel"), ) # Check that starting the operation with a different setting correctly affects the queue. # Note that here we don't check that *all* tasks go on the correct queue, just the first # one. We test that more thoroughly in `test_queue_option` above. with override_settings(DJANGAE_MIGRATION_DEFAULT_QUEUE="another"): self.start_operation(operation) self.assertEqual(self.get_task_count("default"), 0) self.assertTrue(self.get_task_count("another") > 0) self.flush_task_queues() flush_task_markers() # santity checks: assert getattr(settings, "DJANGAE_MIGRATION_DEFAULT_QUEUE", None) is None assert self.get_task_count() == 0 # Trigger the operation without that setting. The task(s) should go on the default queue. self.start_operation(operation) self.assertTrue(self.get_task_count("default") > 0) def test_uid_allows_separate_identical_operations_to_be_run(self): """ By passing the 'uid' kwarg to an operation, we should allow it to be run, even if an otherwise idential operation has already been run. """ operation1 = operations.AddFieldData( "testmodel", "new_field", models.BooleanField(default=True) ) operation2 = operations.AddFieldData( "testmodel", "new_field", models.BooleanField(default=True) ) operation3 = operations.AddFieldData( "testmodel", "new_field", models.BooleanField(default=True), uid="x" ) # Create a model instance and run the first operation on it instance = TestModel.objects.create() self.start_operation(operation1) self.process_task_queues() # Check that the migration ran successfully entity = self.get_entities()[0] self.assertTrue(entity["new_field"]) # Now create another entity and make sure that the second migration (which is idential) # does NOT run on it instance.delete() instance = TestModel.objects.create() self.start_operation(operation2) self.process_task_queues() entity = self.get_entities()[0] self.assertIsNone(entity.get("new_field")) # Now run the third operation, which is identical but has a uid, so SHOULD be run self.start_operation(operation3) self.process_task_queues() entity = self.get_entities()[0] self.assertTrue(entity["new_field"]) def test_addfielddata(self): """ Test the AddFieldData operation. """ for x in xrange(2): TestModel.objects.create() # Just for sanity, check that none of the entities have the new field value yet entities = self.get_entities() self.assertFalse(any(entity.get("new_field") for entity in entities)) operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) self.start_operation(operation) self.process_task_queues() # The entities should now all have the 'new_field' actually mapped over entities = self.get_entities() self.assertTrue(all(entity['new_field'] == 'squirrel' for entity in entities)) def test_removefielddata(self): """ Test the RemoveFieldData operation. """ for x in xrange(2): TestModel.objects.create(name="name_%s" % x) # Just for sanity, check that all of the entities have `name` value entities = self.get_entities() self.assertTrue(all(entity["name"] for entity in entities)) operation = operations.RemoveFieldData( "testmodel", "name", models.CharField(max_length=100) ) self.start_operation(operation) self.process_task_queues() # The entities should now all have the 'name' value removed entities = self.get_entities() self.assertFalse(any(entity.get("name") for entity in entities)) def test_copyfielddata(self): """ Test the CopyFieldData operation. """ for x in xrange(2): TestModel.objects.create(name="name_%s" % x) # Just for sanity, check that none of the entities have the new "new_field" value entities = self.get_entities() self.assertFalse(any(entity.get("new_field") for entity in entities)) operation = operations.CopyFieldData( "testmodel", "name", "new_field" ) self.start_operation(operation) self.process_task_queues() # The entities should now all have the "new_field" value entities = self.get_entities() self.assertTrue(all(entity["new_field"] == entity["name"] for entity in entities)) def test_deletemodeldata(self): """ Test the DeleteModelData operation. """ for x in xrange(2): TestModel.objects.create() # Just for sanity, check that the entities exist! entities = self.get_entities() self.assertEqual(len(entities), 2) operation = operations.DeleteModelData("testmodel") self.start_operation(operation) self.process_task_queues() # The entities should now all be gone entities = self.get_entities() self.assertEqual(len(entities), 0) def test_copymodeldata_overwrite(self): """ Test the CopyModelData operation with overwrite_existing=True. """ # Create the TestModel instances, with OtherModel instances with matching PKs for x in xrange(2): instance = TestModel.objects.create(name="name_which_will_be_copied") OtherModel.objects.create(name="original_name", id=instance.pk) # Just for sanity, check that the entities exist testmodel_entities = self.get_entities() othermodel_entities = self.get_entities(model=OtherModel) self.assertEqual(len(testmodel_entities), 2) self.assertEqual(len(othermodel_entities), 2) operation = operations.CopyModelData( "testmodel", "djangae", "othermodel", overwrite_existing=True ) self.start_operation(operation) self.process_task_queues() # The OtherModel entities should now all have a name lof "name_which_will_be_copied" othermodel_entities = self.get_entities(model=OtherModel) self.assertTrue(all( entity["name"] == "name_which_will_be_copied" for entity in othermodel_entities )) def test_copymodeldata_no_overwrite(self): """ Test the CopyModelData operation with overwrite_existing=False. """ # Create the TestModel instances, with OtherModel instances with matching PKs only for # odd PKs for x in xrange(1, 5): TestModel.objects.create(id=x, name="name_which_will_be_copied") if x % 2: OtherModel.objects.create(id=x, name="original_name") # Just for sanity, check that the entities exist testmodel_entities = self.get_entities() othermodel_entities = self.get_entities(model=OtherModel) self.assertEqual(len(testmodel_entities), 4) self.assertEqual(len(othermodel_entities), 2) operation = operations.CopyModelData( "testmodel", "djangae", "othermodel", overwrite_existing=False ) self.start_operation(operation) self.process_task_queues() # We now expect there to be 4 OtherModel entities, but only the ones which didn't exist # already (i.e. the ones with even PKs) should have the name copied from the TestModel othermodel_entities = self.get_entities(model=OtherModel) self.assertEqual(len(othermodel_entities), 4) for entity in othermodel_entities: if entity.key().id() % 2: self.assertEqual(entity["name"], "original_name") else: self.assertEqual(entity["name"], "name_which_will_be_copied") @skipIf("ns1" not in settings.DATABASES, "This test is designed for the Djangae testapp settings") def test_copymodeldatatonamespace_overwrite(self): """ Test the CopyModelDataToNamespace operation with overwrite_existing=True. """ ns1 = settings.DATABASES["ns1"]["NAMESPACE"] # Create instances, with copies in the other namespace with matching IDs for x in xrange(2): instance = TestModel.objects.create(name="name_which_will_be_copied") instance.save(using="ns1") # Just for sanity, check that the entities exist entities = self.get_entities() ns1_entities = self.get_entities(namespace=ns1) self.assertEqual(len(entities), 2) self.assertEqual(len(ns1_entities), 2) operation = operations.CopyModelDataToNamespace( "testmodel", ns1, overwrite_existing=True ) self.start_operation(operation) self.process_task_queues() # The entities in ns1 should now all have a name lof "name_which_will_be_copied" ns1_entities = self.get_entities(namespace=ns1) self.assertTrue(all( entity["name"] == "name_which_will_be_copied" for entity in ns1_entities )) @skipIf("ns1" not in settings.DATABASES, "This test is designed for the Djangae testapp settings") def test_copymodeldatatonamespace_no_overwrite(self): """ Test the CopyModelDataToNamespace operation with overwrite_existing=False. """ ns1 = settings.DATABASES["ns1"]["NAMESPACE"] # Create the TestModel instances, with OtherModel instances with matching PKs only for # odd PKs for x in xrange(1, 5): TestModel.objects.create(id=x, name="name_which_will_be_copied") if x % 2: ns1_instance = TestModel(id=x, name="original_name") ns1_instance.save(using="ns1") # Just for sanity, check that the entities exist entities = self.get_entities() ns1_entities = self.get_entities(namespace=ns1) self.assertEqual(len(entities), 4) self.assertEqual(len(ns1_entities), 2) operation = operations.CopyModelDataToNamespace( "testmodel", ns1, overwrite_existing=False ) self.start_operation(operation) self.process_task_queues() # We now expect there to be 4 entities in the new namespace, but only the ones which didn't # exist already (i.e. the ones with even PKs) should have their `name` updated ns1_entities = self.get_entities(namespace=ns1) self.assertEqual(len(ns1_entities), 4) for entity in ns1_entities: if entity.key().id() % 2: self.assertEqual(entity["name"], "original_name") else: self.assertEqual(entity["name"], "name_which_will_be_copied") @skipIf( "ns1" not in settings.DATABASES or "testapp" not in settings.INSTALLED_APPS, "This test is designed for the Djangae testapp settings" ) def test_copymodeldatatonamespace_new_app_label(self): """ Test the CopyModelDataToNamespace operation with new data being saved to a new model in a new app as well as in a new namespace. """ ns1 = settings.DATABASES["ns1"]["NAMESPACE"] for x in xrange(2): TestModel.objects.create(name="name_which_will_be_copied") # Just for sanity, check that the entities exist entities = self.get_entities() new_entities = self.get_entities(model=OtherAppModel, namespace=ns1) self.assertEqual(len(entities), 2) self.assertEqual(len(new_entities), 0) operation = operations.CopyModelDataToNamespace( "testmodel", ns1, to_app_label="testapp", to_model_name="otherappmodel" ) self.start_operation(operation) self.process_task_queues() # The entities in ns1 should now all have a name lof "name_which_will_be_copied" new_entities = self.get_entities(model=OtherAppModel, namespace=ns1) self.assertEqual(len(new_entities), 2) self.assertTrue(all( entity["name"] == "name_which_will_be_copied" for entity in new_entities )) def test_mapfunctiononentities(self): """ Test the MapFunctionOnEntities operation. """ for x in xrange(2): TestModel.objects.create() # Test that our entities have not had our function called on them entities = self.get_entities() self.assertFalse(any(entity.get("is_tickled") for entity in entities)) operation = operations.MapFunctionOnEntities("testmodel", tickle_entity) self.start_operation(operation) self.process_task_queues() entities = self.get_entities() self.assertEqual(len(entities), 2) self.assertTrue(all(entity.get("is_tickled") for entity in entities)) class MidStringTestCase(TestCase): """ Tests for the _mid_string function in the mapper_library. """ def test_handles_args_in_either_order(self): """ It shouldn't matter whether we pass the "higher" string as the first or second param. """ low = "aaaaa" high = "zzzzz" mid1 = _mid_string(low, high) mid2 = _mid_string(low, high) self.assertEqual(mid1, mid2) self.assertTrue(low < mid1 < high) def test_basic_behaviour(self): """ Test finding the midpoint between two string in an obvious case. """ start = "a" end = "c" self.assertEqual(_mid_string(start, end), "b") def test_slightly_less_basic_behaviour(self): start = "aaaaaaaaaaaa" end = "z" mid_low_apprx = "l" mid_high_apprx = "n" result = _mid_string(start, end) self.assertTrue(mid_low_apprx < result < mid_high_apprx) def test_handles_strings_of_different_lengths(self): """ Strings of different lengths should return another of a length mid way between """ start = "aaa" end = "zzzzzzzzzzzzz" mid = _mid_string(start, end) self.assertTrue(start < mid < end) def test_handles_unicode(self): """ It should be able to do comparisions on non-ascii strings. """ start = u"aaa£¢$›😇" end = u"zzz🤡" mid = _mid_string(start, end) self.assertTrue(start < mid < end) def test_does_not_return_string_starting_with_double_underscore(self): """ A string that starts with a double underscore is not a valid Datastore key and so should not be returned. """ # The true mid point between this start and end combination is a double underscore start = "^^" end = "``" result = _mid_string(start, end) self.assertNotEqual(result, "__") class MidKeyTestCase(TestCase): """ Tests for the `_mid_key` function. """ def test_mixed_integers_and_strings_not_allowed(self): """ Finding the mid point between keys of different types is not currently supported and should therefore raise an error. """ key1 = datastore.Key.from_path("my_kind", 1) key2 = datastore.Key.from_path("my_kind", "1") self.assertRaises(NotImplementedError, _mid_key, key1, key2) def test_mid_integer_key(self): """ Given 2 keys with integer `id_or_name` values, the returned key should have an `id_or_name` which is an integer somewhere between the two. """ key1 = datastore.Key.from_path("my_kind", 1) key2 = datastore.Key.from_path("my_kind", 100) result = _mid_key(key1, key2) self.assertEqual(result.kind(), key1.kind()) self.assertEqual(result.namespace(), key1.namespace()) self.assertTrue(1 < result.id_or_name() < 100) def test_mid_string_key(self): """ Given 2 keys with string `id_or_name` values, the returned key should have an `id_or_name` which is a string somewhere between the two. """ key1 = datastore.Key.from_path("my_kind", "1") key2 = datastore.Key.from_path("my_kind", "100") result = _mid_key(key1, key2) self.assertEqual(result.kind(), key1.kind()) self.assertEqual(result.namespace(), key1.namespace()) self.assertTrue("1" < result.id_or_name() < "100") class NextStringTestCase(TestCase): """ Tests for the _next_string function in the mapper_library. """ def test_basic_behaviour(self): try: unichr(65536) # Python wide-unicode build (Linux) UTF-32 highest_unicode_char = unichr(0x10ffff) except ValueError: # Python narrow build (OSX) # Python 2 using 16 bit unicode, so the highest possible character is (2**16) - 1 highest_unicode_char = unichr(2 ** 16 - 1) checks = ( # Pairs of (input, expected_output) ("a", "b"), ("aaaa", "aaab"), # unichr((2 ** 32) - 1) is the last possible unicode character (highest_unicode_char, highest_unicode_char + unichr(1)), (u"aaa" + highest_unicode_char, u"aaa" + highest_unicode_char + unichr(1)), ) for input_text, expected_output in checks: self.assertEqual(_next_string(input_text), expected_output) class GetKeyRangeTestCase(TestCase): """ Tests for the `_get_range` function. """ def test_integer_range(self): """ Given 2 integer-based keys, it should return the range that the IDs span. """ key1 = datastore.Key.from_path("my_kind", 4012809128) key2 = datastore.Key.from_path("my_kind", 9524773032) self.assertEqual(_get_range(key1, key2), 9524773032 - 4012809128) def test_string_range(self): """ Given 2 string-based keys, it should return a representation of the range that the two keys span. """ key1 = datastore.Key.from_path("my_kind", "a") key2 = datastore.Key.from_path("my_kind", "b") # The difference between "a" and "b" is 1 character self.assertEqual(_get_range(key1, key2), unichr(1)) def test_mixed_keys_cause_exception(self): """ Trying to get a range between 2 keys when one is an integer and the other is a string should cause an explosion. """ key1 = datastore.Key.from_path("my_kind", "a") key2 = datastore.Key.from_path("my_kind", 12345) self.assertRaises(Exception, _get_range, key1, key2) class ShardQueryTestCase(TestCase): """ Tests for the `shard_query` function. """ def test_query_sharding(self): ns1 = settings.DATABASES["default"]["NAMESPACE"] for x in xrange(1, 21): TestModel.objects.create(pk=x) qry = datastore.Query(TestModel._meta.db_table, namespace=ns1) shards = shard_query(qry, 1) self.assertEqual(1, len(shards)) shards = shard_query(qry, 20) self.assertEqual(20, len(shards)) shards = shard_query(qry, 50) # We can't create 50 shards if there are only 20 objects self.assertEqual(20, len(shards)) class MapperLibraryTestCase(TestCase): """ Tests which check the behaviour of the mapper library directly. """ def setUp(self): # We need to clean out the migration task markers from the Datastore between each test, as # the standard flush only cleans out models super(MapperLibraryTestCase, self).setUp() flush_task_markers() def _get_testmodel_query(self, db="default"): namespace = settings.DATABASES[db].get('NAMESPACE', '') return datastore.Query( TestModel._meta.db_table, namespace=namespace ) def _get_taskmarker_query(self, namespace=""): return datastore.Query("ShardedTaskMarker", namespace=namespace) def test_basic_processing(self): """ Test that calling `start_mapping` with some sensible parameters will do the right processing. """ objs = [] for x in xrange(2): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.bulk_create(objs) start_mapping("my_lovely_mapper", self._get_testmodel_query(), tickle_entity) self.process_task_queues() # And check that every entity has been tickled self.assertTrue(all(e['is_tickled'] for e in self._get_testmodel_query().Run())) def test_cannot_start_same_mapping_twice(self): """ Calling `start_mapping` with the same parameters twice then it should NOT create 2 mappers. """ objs = [] for x in xrange(2): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.bulk_create(objs) assert self._get_taskmarker_query().Count() == 0 # Sanity marker = start_mapping("my_test_mapper", self._get_testmodel_query(), tickle_entity) task_count = self.get_task_count() assert marker # Sanity assert task_count # Sanity # Now try to defer the same mapper again marker = start_mapping("my_test_mapper", self._get_testmodel_query(), tickle_entity) # That shouldn't have worked, so the number of tasks should remain unchanged self.assertEqual(self.get_task_count(), task_count) # And it should not have returned a marker self.assertIsNone(marker) def test_can_start_same_mapping_in_2_different_namespaces(self): """ Calling `start_mapping` with the same parameters but with different namespaces on the query should work and correctly defer 2 processing tasks. """ dbs = ("default", "ns1") # Create some objects in 2 different namespaces for db in dbs: objs = [] for x in xrange(2): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.using(db).bulk_create(objs) # Start the same mapper twice but in 2 different namespaces, and check that they both work current_task_count = self.get_task_count() markers = set() for db in dbs: marker = start_mapping("my_test_mapper", self._get_testmodel_query(db), tickle_entity) self.assertIsNotNone(marker) self.assertFalse(marker in markers) markers.add(marker) new_task_count = self.get_task_count() self.assertTrue(new_task_count > current_task_count) current_task_count = new_task_count def test_mapper_will_continue_after_deadline_exceeded_error(self): """ If DeadlineExceededError is encountered when processing one of the entities, the mapper should redefer and continue. """ objs = [] for x in xrange(8): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.bulk_create(objs) identifier = "my_test_mapper" query = self._get_testmodel_query() # Reset the call_count on tickle_entity_volitle. We can't use sleuth.watch because a # wrapped function can't be pickled tickle_entity_volitle.call_count = 0 # Run the mapper and run all the tasks start_mapping( identifier, query, tickle_entity_volitle, shard_count=1, ) self.process_task_queues() # Check that the tickle_entity_volitle function was called more times than there are # entities (because some calls should have failed and been retried) # self.assertTrue(tickle_entity_volitle.call_count > TestModel.objects.count()) # And check that every entity has been tickled self.assertTrue(all(e['is_tickled'] for e in self._get_testmodel_query().Run()))
41.154982
119
0.657013
from unittest import skipIf from django.apps.registry import apps from django.conf import settings from django.db import connection, models from django.db.migrations.state import ProjectState from django.test import override_settings from google.appengine.api import datastore from google.appengine.runtime import DeadlineExceededError from djangae.contrib import sleuth from djangae.db.migrations import operations from djangae.db.migrations.mapper_library import ( _get_range, _mid_key, _mid_string, _next_string, shard_query, ShardedTaskMarker, start_mapping, ) from djangae.test import TestCase def return_a_string(): return "squirrel" class TestModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = "djangae" class OtherModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = "djangae" class OtherAppModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = "testapp" class UniqueException(Exception): pass def tickle_entity(entity): entity['is_tickled'] = True datastore.Put(entity) def tickle_entity_volitle(entity): call_count = getattr(tickle_entity_volitle, "call_count", 1) tickle_entity_volitle.call_count = call_count + 1 if call_count % 3 == 0: raise DeadlineExceededError() else: tickle_entity(entity) def flush_task_markers(): namespaces = set() namespaces.add(settings.DATABASES['default'].get('NAMESPACE', '')) namespaces.add(settings.DATABASES.get('ns1', {}).get('NAMESPACE', '')) for namespace in namespaces: query = datastore.Query( ShardedTaskMarker.KIND, namespace=namespace, keys_only=True ).Run() datastore.Delete([x for x in query]) class MigrationOperationTests(TestCase): multi_db = True def setUp(self): super(MigrationOperationTests, self).setUp() flush_task_markers() def start_operation(self, operation, detonate=True): from_state = ProjectState.from_apps(apps) to_state = from_state.clone() schema_editor = connection.schema_editor() app_label = TestModel._meta.app_label # detonate the _wait_until_task_finished method. Then tasks can be processed after that. if detonate: with sleuth.detonate( "djangae.tests.test_migrations.operations.%s._wait_until_task_finished" % operation.__class__.__name__, UniqueException ): try: operation.database_forwards(app_label, schema_editor, from_state, to_state) except UniqueException: pass else: operation.database_forwards(app_label, schema_editor, from_state, to_state) def get_entities(self, model=TestModel, namespace=None): namespace = namespace or settings.DATABASES['default'].get('NAMESPACE', '') query = datastore.Query( model._meta.db_table, namespace=namespace, ) return [x for x in query.Run()] def test_run_operation_creates_and_updates_task_marker(self): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) self.start_operation(operation) # Now check that the task marker has been created. # Usefully, calling database_forwards() on the operation will have caused it to set the # `identifier` attribute on itself, meaning we can now just call _get_task_marker() task_marker = datastore.Get( [ShardedTaskMarker.get_key(operation.identifier, operation.namespace)] )[0] if task_marker is None: self.fail("Migration operation did not create its task marker") self.assertFalse(task_marker.get("is_finished")) self.assertNumTasksEquals(1) self.process_task_queues() # Now check that the task marker has been marked as finished task_marker = datastore.Get( [ShardedTaskMarker.get_key(operation.identifier, operation.namespace)] )[0] self.assertTrue(task_marker["is_finished"]) self.assertNumTasksEquals(0) def test_starting_operation_twice_does_not_trigger_task_twice(self): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) self.start_operation(operation) task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertFalse(task_marker["is_finished"]) # We expect there to be a task queued for processing the operation self.assertNumTasksEquals(1) # Now try to run it again self.start_operation(operation) # We expect there to still be the same number of tasks self.assertNumTasksEquals(1) def test_running_finished_operation_does_not_trigger_new_task(self): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) # Run the operation and check that it finishes with sleuth.watch("djangae.db.migrations.operations.AddFieldData._start_task") as start: self.start_operation(operation) self.assertTrue(start.called) task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertFalse(task_marker["is_finished"]) self.assertNumTasksEquals(1) self.process_task_queues() task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertTrue(task_marker["is_finished"]) # Run the operation again. It should see that's it's finished and just return immediately. self.assertNumTasksEquals(0) with sleuth.watch("djangae.db.migrations.operations.AddFieldData._start_task") as start: self.start_operation(operation, detonate=False) self.assertFalse(start.called) self.assertNumTasksEquals(0) task_marker = datastore.Get( ShardedTaskMarker.get_key(operation.identifier, operation.namespace) ) self.assertTrue(task_marker["is_finished"]) def test_queue_option(self): for x in xrange(3): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default=return_a_string), queue="another", # Ensure that we trigger a re-defer, so that we test that the correct queue is used for # subsequent tasks, not just the first one entities_per_task=1, shard_count=1 ) self.start_operation(operation) # The task(s) should not be in the default queue, but in the "another" queue instead self.assertEqual(self.get_task_count("default"), 0) self.assertTrue(self.get_task_count("another") > 0) # And if we only run the tasks on the "another" queue, the whole operation should complete. self.process_task_queues("another") # And the entities should be updated entities = self.get_entities() self.assertTrue(all(entity['new_field'] == 'squirrel' for entity in entities)) def test_default_queue_setting(self): for x in xrange(2): TestModel.objects.create() operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel"), ) # Check that starting the operation with a different setting correctly affects the queue. # Note that here we don't check that *all* tasks go on the correct queue, just the first with override_settings(DJANGAE_MIGRATION_DEFAULT_QUEUE="another"): self.start_operation(operation) self.assertEqual(self.get_task_count("default"), 0) self.assertTrue(self.get_task_count("another") > 0) self.flush_task_queues() flush_task_markers() assert getattr(settings, "DJANGAE_MIGRATION_DEFAULT_QUEUE", None) is None assert self.get_task_count() == 0 self.start_operation(operation) self.assertTrue(self.get_task_count("default") > 0) def test_uid_allows_separate_identical_operations_to_be_run(self): operation1 = operations.AddFieldData( "testmodel", "new_field", models.BooleanField(default=True) ) operation2 = operations.AddFieldData( "testmodel", "new_field", models.BooleanField(default=True) ) operation3 = operations.AddFieldData( "testmodel", "new_field", models.BooleanField(default=True), uid="x" ) instance = TestModel.objects.create() self.start_operation(operation1) self.process_task_queues() entity = self.get_entities()[0] self.assertTrue(entity["new_field"]) instance.delete() instance = TestModel.objects.create() self.start_operation(operation2) self.process_task_queues() entity = self.get_entities()[0] self.assertIsNone(entity.get("new_field")) self.start_operation(operation3) self.process_task_queues() entity = self.get_entities()[0] self.assertTrue(entity["new_field"]) def test_addfielddata(self): for x in xrange(2): TestModel.objects.create() entities = self.get_entities() self.assertFalse(any(entity.get("new_field") for entity in entities)) operation = operations.AddFieldData( "testmodel", "new_field", models.CharField(max_length=100, default="squirrel") ) self.start_operation(operation) self.process_task_queues() entities = self.get_entities() self.assertTrue(all(entity['new_field'] == 'squirrel' for entity in entities)) def test_removefielddata(self): for x in xrange(2): TestModel.objects.create(name="name_%s" % x) entities = self.get_entities() self.assertTrue(all(entity["name"] for entity in entities)) operation = operations.RemoveFieldData( "testmodel", "name", models.CharField(max_length=100) ) self.start_operation(operation) self.process_task_queues() entities = self.get_entities() self.assertFalse(any(entity.get("name") for entity in entities)) def test_copyfielddata(self): for x in xrange(2): TestModel.objects.create(name="name_%s" % x) entities = self.get_entities() self.assertFalse(any(entity.get("new_field") for entity in entities)) operation = operations.CopyFieldData( "testmodel", "name", "new_field" ) self.start_operation(operation) self.process_task_queues() entities = self.get_entities() self.assertTrue(all(entity["new_field"] == entity["name"] for entity in entities)) def test_deletemodeldata(self): for x in xrange(2): TestModel.objects.create() entities = self.get_entities() self.assertEqual(len(entities), 2) operation = operations.DeleteModelData("testmodel") self.start_operation(operation) self.process_task_queues() entities = self.get_entities() self.assertEqual(len(entities), 0) def test_copymodeldata_overwrite(self): for x in xrange(2): instance = TestModel.objects.create(name="name_which_will_be_copied") OtherModel.objects.create(name="original_name", id=instance.pk) testmodel_entities = self.get_entities() othermodel_entities = self.get_entities(model=OtherModel) self.assertEqual(len(testmodel_entities), 2) self.assertEqual(len(othermodel_entities), 2) operation = operations.CopyModelData( "testmodel", "djangae", "othermodel", overwrite_existing=True ) self.start_operation(operation) self.process_task_queues() othermodel_entities = self.get_entities(model=OtherModel) self.assertTrue(all( entity["name"] == "name_which_will_be_copied" for entity in othermodel_entities )) def test_copymodeldata_no_overwrite(self): for x in xrange(1, 5): TestModel.objects.create(id=x, name="name_which_will_be_copied") if x % 2: OtherModel.objects.create(id=x, name="original_name") testmodel_entities = self.get_entities() othermodel_entities = self.get_entities(model=OtherModel) self.assertEqual(len(testmodel_entities), 4) self.assertEqual(len(othermodel_entities), 2) operation = operations.CopyModelData( "testmodel", "djangae", "othermodel", overwrite_existing=False ) self.start_operation(operation) self.process_task_queues() # already (i.e. the ones with even PKs) should have the name copied from the TestModel othermodel_entities = self.get_entities(model=OtherModel) self.assertEqual(len(othermodel_entities), 4) for entity in othermodel_entities: if entity.key().id() % 2: self.assertEqual(entity["name"], "original_name") else: self.assertEqual(entity["name"], "name_which_will_be_copied") @skipIf("ns1" not in settings.DATABASES, "This test is designed for the Djangae testapp settings") def test_copymodeldatatonamespace_overwrite(self): ns1 = settings.DATABASES["ns1"]["NAMESPACE"] # Create instances, with copies in the other namespace with matching IDs for x in xrange(2): instance = TestModel.objects.create(name="name_which_will_be_copied") instance.save(using="ns1") # Just for sanity, check that the entities exist entities = self.get_entities() ns1_entities = self.get_entities(namespace=ns1) self.assertEqual(len(entities), 2) self.assertEqual(len(ns1_entities), 2) operation = operations.CopyModelDataToNamespace( "testmodel", ns1, overwrite_existing=True ) self.start_operation(operation) self.process_task_queues() # The entities in ns1 should now all have a name lof "name_which_will_be_copied" ns1_entities = self.get_entities(namespace=ns1) self.assertTrue(all( entity["name"] == "name_which_will_be_copied" for entity in ns1_entities )) @skipIf("ns1" not in settings.DATABASES, "This test is designed for the Djangae testapp settings") def test_copymodeldatatonamespace_no_overwrite(self): ns1 = settings.DATABASES["ns1"]["NAMESPACE"] # Create the TestModel instances, with OtherModel instances with matching PKs only for # odd PKs for x in xrange(1, 5): TestModel.objects.create(id=x, name="name_which_will_be_copied") if x % 2: ns1_instance = TestModel(id=x, name="original_name") ns1_instance.save(using="ns1") # Just for sanity, check that the entities exist entities = self.get_entities() ns1_entities = self.get_entities(namespace=ns1) self.assertEqual(len(entities), 4) self.assertEqual(len(ns1_entities), 2) operation = operations.CopyModelDataToNamespace( "testmodel", ns1, overwrite_existing=False ) self.start_operation(operation) self.process_task_queues() # We now expect there to be 4 entities in the new namespace, but only the ones which didn't ns1_entities = self.get_entities(namespace=ns1) self.assertEqual(len(ns1_entities), 4) for entity in ns1_entities: if entity.key().id() % 2: self.assertEqual(entity["name"], "original_name") else: self.assertEqual(entity["name"], "name_which_will_be_copied") @skipIf( "ns1" not in settings.DATABASES or "testapp" not in settings.INSTALLED_APPS, "This test is designed for the Djangae testapp settings" ) def test_copymodeldatatonamespace_new_app_label(self): ns1 = settings.DATABASES["ns1"]["NAMESPACE"] for x in xrange(2): TestModel.objects.create(name="name_which_will_be_copied") entities = self.get_entities() new_entities = self.get_entities(model=OtherAppModel, namespace=ns1) self.assertEqual(len(entities), 2) self.assertEqual(len(new_entities), 0) operation = operations.CopyModelDataToNamespace( "testmodel", ns1, to_app_label="testapp", to_model_name="otherappmodel" ) self.start_operation(operation) self.process_task_queues() new_entities = self.get_entities(model=OtherAppModel, namespace=ns1) self.assertEqual(len(new_entities), 2) self.assertTrue(all( entity["name"] == "name_which_will_be_copied" for entity in new_entities )) def test_mapfunctiononentities(self): for x in xrange(2): TestModel.objects.create() entities = self.get_entities() self.assertFalse(any(entity.get("is_tickled") for entity in entities)) operation = operations.MapFunctionOnEntities("testmodel", tickle_entity) self.start_operation(operation) self.process_task_queues() entities = self.get_entities() self.assertEqual(len(entities), 2) self.assertTrue(all(entity.get("is_tickled") for entity in entities)) class MidStringTestCase(TestCase): def test_handles_args_in_either_order(self): low = "aaaaa" high = "zzzzz" mid1 = _mid_string(low, high) mid2 = _mid_string(low, high) self.assertEqual(mid1, mid2) self.assertTrue(low < mid1 < high) def test_basic_behaviour(self): start = "a" end = "c" self.assertEqual(_mid_string(start, end), "b") def test_slightly_less_basic_behaviour(self): start = "aaaaaaaaaaaa" end = "z" mid_low_apprx = "l" mid_high_apprx = "n" result = _mid_string(start, end) self.assertTrue(mid_low_apprx < result < mid_high_apprx) def test_handles_strings_of_different_lengths(self): start = "aaa" end = "zzzzzzzzzzzzz" mid = _mid_string(start, end) self.assertTrue(start < mid < end) def test_handles_unicode(self): start = u"aaa£¢$›😇" end = u"zzz🤡" mid = _mid_string(start, end) self.assertTrue(start < mid < end) def test_does_not_return_string_starting_with_double_underscore(self): start = "^^" end = "``" result = _mid_string(start, end) self.assertNotEqual(result, "__") class MidKeyTestCase(TestCase): def test_mixed_integers_and_strings_not_allowed(self): key1 = datastore.Key.from_path("my_kind", 1) key2 = datastore.Key.from_path("my_kind", "1") self.assertRaises(NotImplementedError, _mid_key, key1, key2) def test_mid_integer_key(self): key1 = datastore.Key.from_path("my_kind", 1) key2 = datastore.Key.from_path("my_kind", 100) result = _mid_key(key1, key2) self.assertEqual(result.kind(), key1.kind()) self.assertEqual(result.namespace(), key1.namespace()) self.assertTrue(1 < result.id_or_name() < 100) def test_mid_string_key(self): key1 = datastore.Key.from_path("my_kind", "1") key2 = datastore.Key.from_path("my_kind", "100") result = _mid_key(key1, key2) self.assertEqual(result.kind(), key1.kind()) self.assertEqual(result.namespace(), key1.namespace()) self.assertTrue("1" < result.id_or_name() < "100") class NextStringTestCase(TestCase): def test_basic_behaviour(self): try: unichr(65536) highest_unicode_char = unichr(0x10ffff) except ValueError: highest_unicode_char = unichr(2 ** 16 - 1) checks = ( ("a", "b"), ("aaaa", "aaab"), (highest_unicode_char, highest_unicode_char + unichr(1)), (u"aaa" + highest_unicode_char, u"aaa" + highest_unicode_char + unichr(1)), ) for input_text, expected_output in checks: self.assertEqual(_next_string(input_text), expected_output) class GetKeyRangeTestCase(TestCase): def test_integer_range(self): key1 = datastore.Key.from_path("my_kind", 4012809128) key2 = datastore.Key.from_path("my_kind", 9524773032) self.assertEqual(_get_range(key1, key2), 9524773032 - 4012809128) def test_string_range(self): key1 = datastore.Key.from_path("my_kind", "a") key2 = datastore.Key.from_path("my_kind", "b") self.assertEqual(_get_range(key1, key2), unichr(1)) def test_mixed_keys_cause_exception(self): key1 = datastore.Key.from_path("my_kind", "a") key2 = datastore.Key.from_path("my_kind", 12345) self.assertRaises(Exception, _get_range, key1, key2) class ShardQueryTestCase(TestCase): def test_query_sharding(self): ns1 = settings.DATABASES["default"]["NAMESPACE"] for x in xrange(1, 21): TestModel.objects.create(pk=x) qry = datastore.Query(TestModel._meta.db_table, namespace=ns1) shards = shard_query(qry, 1) self.assertEqual(1, len(shards)) shards = shard_query(qry, 20) self.assertEqual(20, len(shards)) shards = shard_query(qry, 50) self.assertEqual(20, len(shards)) class MapperLibraryTestCase(TestCase): def setUp(self): # We need to clean out the migration task markers from the Datastore between each test, as # the standard flush only cleans out models super(MapperLibraryTestCase, self).setUp() flush_task_markers() def _get_testmodel_query(self, db="default"): namespace = settings.DATABASES[db].get('NAMESPACE', '') return datastore.Query( TestModel._meta.db_table, namespace=namespace ) def _get_taskmarker_query(self, namespace=""): return datastore.Query("ShardedTaskMarker", namespace=namespace) def test_basic_processing(self): objs = [] for x in xrange(2): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.bulk_create(objs) start_mapping("my_lovely_mapper", self._get_testmodel_query(), tickle_entity) self.process_task_queues() # And check that every entity has been tickled self.assertTrue(all(e['is_tickled'] for e in self._get_testmodel_query().Run())) def test_cannot_start_same_mapping_twice(self): objs = [] for x in xrange(2): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.bulk_create(objs) assert self._get_taskmarker_query().Count() == 0 # Sanity marker = start_mapping("my_test_mapper", self._get_testmodel_query(), tickle_entity) task_count = self.get_task_count() assert marker # Sanity assert task_count # Sanity # Now try to defer the same mapper again marker = start_mapping("my_test_mapper", self._get_testmodel_query(), tickle_entity) # That shouldn't have worked, so the number of tasks should remain unchanged self.assertEqual(self.get_task_count(), task_count) self.assertIsNone(marker) def test_can_start_same_mapping_in_2_different_namespaces(self): dbs = ("default", "ns1") for db in dbs: objs = [] for x in xrange(2): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.using(db).bulk_create(objs) current_task_count = self.get_task_count() markers = set() for db in dbs: marker = start_mapping("my_test_mapper", self._get_testmodel_query(db), tickle_entity) self.assertIsNotNone(marker) self.assertFalse(marker in markers) markers.add(marker) new_task_count = self.get_task_count() self.assertTrue(new_task_count > current_task_count) current_task_count = new_task_count def test_mapper_will_continue_after_deadline_exceeded_error(self): objs = [] for x in xrange(8): objs.append(TestModel(name="Test-%s" % x)) TestModel.objects.bulk_create(objs) identifier = "my_test_mapper" query = self._get_testmodel_query() # wrapped function can't be pickled tickle_entity_volitle.call_count = 0 start_mapping( identifier, query, tickle_entity_volitle, shard_count=1, ) self.process_task_queues() self.assertTrue(all(e['is_tickled'] for e in self._get_testmodel_query().Run()))
true
true
f70e000ee4d5e5439ae086cbb2c848253ec1032f
198
py
Python
CSV2JSON/CSVreader.py
anthonylzh555/CV_project
96517cd654c5e7f467fad306936e4ad01be64ea2
[ "Unlicense" ]
null
null
null
CSV2JSON/CSVreader.py
anthonylzh555/CV_project
96517cd654c5e7f467fad306936e4ad01be64ea2
[ "Unlicense" ]
null
null
null
CSV2JSON/CSVreader.py
anthonylzh555/CV_project
96517cd654c5e7f467fad306936e4ad01be64ea2
[ "Unlicense" ]
null
null
null
import csv import json csvfile = open('20180308.csv', 'r') jsonfile = open('20180308.json', 'w') reader = csv.DictReader(csvfile) out = json.dumps( [ row for row in reader ] ) jsonfile.write(out)
19.8
45
0.69697
import csv import json csvfile = open('20180308.csv', 'r') jsonfile = open('20180308.json', 'w') reader = csv.DictReader(csvfile) out = json.dumps( [ row for row in reader ] ) jsonfile.write(out)
true
true
f70e0019e1b6f9da8bae5be63c8d6102f298888f
1,344
py
Python
tests/utils.py
fyntex/lib-cl-sii-python
b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34
[ "MIT" ]
8
2020-03-07T19:58:40.000Z
2021-12-15T13:47:40.000Z
tests/utils.py
fyntex/lib-cl-sii-python
b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34
[ "MIT" ]
141
2020-01-17T22:47:35.000Z
2022-03-31T18:29:47.000Z
tests/utils.py
fyndata/lib-cl-sii-python
d618247fe7c73e11b391015ae8a00e7b282c2606
[ "MIT" ]
4
2019-05-18T15:43:35.000Z
2019-11-06T21:55:09.000Z
import json import os from typing import Mapping _TESTS_DIR_PATH = os.path.dirname(__file__) def get_test_file_path(path: str) -> str: filepath = os.path.join( _TESTS_DIR_PATH, path, ) return filepath def read_test_file_bytes(path: str) -> bytes: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rb') as file: content = file.read() return content def read_test_file_str_ascii(path: str) -> str: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rt', encoding='ascii') as file: content = file.read() return content def read_test_file_str_utf8(path: str) -> str: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rt', encoding='utf8') as file: content = file.read() return content def read_test_file_json_dict(path: str) -> Mapping[str, object]: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rb') as file: content = json.load(file) if isinstance(content, Mapping): return content else: raise TypeError( f"Expected JSON file content to be a 'Mapping', not a '{content.__class__.__name__}'.", )
21
99
0.614583
import json import os from typing import Mapping _TESTS_DIR_PATH = os.path.dirname(__file__) def get_test_file_path(path: str) -> str: filepath = os.path.join( _TESTS_DIR_PATH, path, ) return filepath def read_test_file_bytes(path: str) -> bytes: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rb') as file: content = file.read() return content def read_test_file_str_ascii(path: str) -> str: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rt', encoding='ascii') as file: content = file.read() return content def read_test_file_str_utf8(path: str) -> str: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rt', encoding='utf8') as file: content = file.read() return content def read_test_file_json_dict(path: str) -> Mapping[str, object]: filepath = os.path.join( _TESTS_DIR_PATH, path, ) with open(filepath, mode='rb') as file: content = json.load(file) if isinstance(content, Mapping): return content else: raise TypeError( f"Expected JSON file content to be a 'Mapping', not a '{content.__class__.__name__}'.", )
true
true
f70e025c9c6583b9c7c6f7774f638cb33684700d
6,572
py
Python
cc_container_worker/application_container/__main__.py
curious-containers/cc-container-worker
89a226a366fe798ba3eda04e0ebc3632b2450bc8
[ "Apache-2.0" ]
null
null
null
cc_container_worker/application_container/__main__.py
curious-containers/cc-container-worker
89a226a366fe798ba3eda04e0ebc3632b2450bc8
[ "Apache-2.0" ]
null
null
null
cc_container_worker/application_container/__main__.py
curious-containers/cc-container-worker
89a226a366fe798ba3eda04e0ebc3632b2450bc8
[ "Apache-2.0" ]
null
null
null
import os import sys import json import jsonschema from subprocess import Popen, PIPE from threading import Thread from traceback import format_exc from cc_container_worker.application_container.telemetry import Telemetry from cc_container_worker.commons.data import ac_download, ac_upload, tracing_upload from cc_container_worker.commons.callbacks import CallbackHandler from cc_container_worker.commons.schemas import application_config_schema CONFIG_FILE_PATH = os.path.join(os.path.expanduser('~'), '.config', 'cc-container-worker', 'config.json') LOCAL_TRACING_FILE = { 'dir': '/var/tmp/cc-tracing', 'name': 'data.csv', 'optional': True } def main(): settings = json.loads(sys.argv[1]) callback_handler = CallbackHandler(settings, container_type='application') config = None try: with open(CONFIG_FILE_PATH) as f: config = json.load(f) jsonschema.validate(config, application_config_schema) except: description = 'Could not load JSON config file from path {}'.format(CONFIG_FILE_PATH) callback_handler.send_callback( callback_type='started', state='failed', description=description, exception=format_exc() ) exit(3) for key, val in config['local_result_files'].items(): try: if not os.path.exists(val['dir']): os.makedirs(val['dir']) except: pass description = 'Container started.' additional_settings = callback_handler.send_callback( callback_type='started', state='success', description=description ) meta_data = { 'application_container_id': settings['container_id'], 'task_id': additional_settings['task_id'] } input_files = additional_settings['input_files'] result_files = additional_settings['result_files'] if len(input_files) != len(config['local_input_files']): description = 'Number of local_input_files in config does not match input_files.' callback_handler.send_callback(callback_type='files_retrieved', state='failed', description=description) exit(5) try: ac_download(input_files, config['local_input_files']) except: description = 'Could not retrieve input files.' callback_handler.send_callback( callback_type='files_retrieved', state='failed', description=description, exception=format_exc() ) exit(6) description = 'Input files retrieved.' callback_handler.send_callback(callback_type='files_retrieved', state='success', description=description) telemetry_data = None application_command = config['application_command'] try: if additional_settings.get('parameters'): if isinstance(additional_settings['parameters'], dict): application_command = '{} \'{}\''.format( application_command, json.dumps(additional_settings['parameters']) ) elif isinstance(additional_settings['parameters'], list): application_command += ''.join([' {}'.format(val) for val in additional_settings['parameters']]) else: raise Exception('Type of parameters not valid: {}'.format(type(additional_settings['parameters']))) preexec_fn = None if additional_settings.get('sandbox'): from cc_container_worker.application_container.sandbox import Sandbox sandbox = Sandbox(config=additional_settings.get('sandbox')) preexec_fn = sandbox.enter if additional_settings.get('tracing'): from cc_container_worker.application_container.tracing import Tracing if not os.path.exists(LOCAL_TRACING_FILE['dir']): os.makedirs(LOCAL_TRACING_FILE['dir']) local_tracing_file_path = os.path.join(LOCAL_TRACING_FILE['dir'], LOCAL_TRACING_FILE['name']) sp = Popen(application_command, stdout=PIPE, stderr=PIPE, shell=True, preexec_fn=preexec_fn) tracing = Tracing(sp.pid, config=additional_settings.get('tracing'), outfile=local_tracing_file_path) tracing.start() telemetry = Telemetry(sp, config=config) t = Thread(target=telemetry.monitor) t.start() std_out, std_err = sp.communicate() tracing.finish() else: sp = Popen(application_command, stdout=PIPE, stderr=PIPE, shell=True, preexec_fn=preexec_fn) telemetry = Telemetry(sp, config=config) t = Thread(target=telemetry.monitor) t.start() std_out, std_err = sp.communicate() return_code = sp.returncode # Collect telemetry data telemetry_data = telemetry.result() if std_out: telemetry_data['std_out'] = str(std_out) if std_err: telemetry_data['std_err'] = str(std_err) telemetry_data['return_code'] = return_code except: callback_handler.send_callback( callback_type='processed', state='failed', description='Processing failed.', exception=format_exc() ) exit(8) description = 'Processing succeeded.' state = 'success' exception = None if return_code != 0: description = 'Processing failed.' state = 'failed' try: if additional_settings.get('tracing'): tracing_file = additional_settings['tracing'].get('tracing_file') if tracing_file: tracing_upload(tracing_file, LOCAL_TRACING_FILE, meta_data) except: if return_code != 0: description = 'Processing failed and tracing file upload failed.' else: description = 'Tracing file upload failed.' state = 'failed' exception = format_exc() callback_handler.send_callback( callback_type='processed', state=state, description=description, exception=exception, telemetry=telemetry_data, ) if return_code != 0: exit(9) try: ac_upload(result_files, config['local_result_files'], meta_data) except: description = 'Could not send result files.' callback_handler.send_callback( callback_type='results_sent', state='failed', description=description, exception=format_exc() ) exit(10) callback_handler.send_callback( callback_type='results_sent', state='success', description='Result files sent.' ) if __name__ == '__main__': main()
35.524324
115
0.654139
import os import sys import json import jsonschema from subprocess import Popen, PIPE from threading import Thread from traceback import format_exc from cc_container_worker.application_container.telemetry import Telemetry from cc_container_worker.commons.data import ac_download, ac_upload, tracing_upload from cc_container_worker.commons.callbacks import CallbackHandler from cc_container_worker.commons.schemas import application_config_schema CONFIG_FILE_PATH = os.path.join(os.path.expanduser('~'), '.config', 'cc-container-worker', 'config.json') LOCAL_TRACING_FILE = { 'dir': '/var/tmp/cc-tracing', 'name': 'data.csv', 'optional': True } def main(): settings = json.loads(sys.argv[1]) callback_handler = CallbackHandler(settings, container_type='application') config = None try: with open(CONFIG_FILE_PATH) as f: config = json.load(f) jsonschema.validate(config, application_config_schema) except: description = 'Could not load JSON config file from path {}'.format(CONFIG_FILE_PATH) callback_handler.send_callback( callback_type='started', state='failed', description=description, exception=format_exc() ) exit(3) for key, val in config['local_result_files'].items(): try: if not os.path.exists(val['dir']): os.makedirs(val['dir']) except: pass description = 'Container started.' additional_settings = callback_handler.send_callback( callback_type='started', state='success', description=description ) meta_data = { 'application_container_id': settings['container_id'], 'task_id': additional_settings['task_id'] } input_files = additional_settings['input_files'] result_files = additional_settings['result_files'] if len(input_files) != len(config['local_input_files']): description = 'Number of local_input_files in config does not match input_files.' callback_handler.send_callback(callback_type='files_retrieved', state='failed', description=description) exit(5) try: ac_download(input_files, config['local_input_files']) except: description = 'Could not retrieve input files.' callback_handler.send_callback( callback_type='files_retrieved', state='failed', description=description, exception=format_exc() ) exit(6) description = 'Input files retrieved.' callback_handler.send_callback(callback_type='files_retrieved', state='success', description=description) telemetry_data = None application_command = config['application_command'] try: if additional_settings.get('parameters'): if isinstance(additional_settings['parameters'], dict): application_command = '{} \'{}\''.format( application_command, json.dumps(additional_settings['parameters']) ) elif isinstance(additional_settings['parameters'], list): application_command += ''.join([' {}'.format(val) for val in additional_settings['parameters']]) else: raise Exception('Type of parameters not valid: {}'.format(type(additional_settings['parameters']))) preexec_fn = None if additional_settings.get('sandbox'): from cc_container_worker.application_container.sandbox import Sandbox sandbox = Sandbox(config=additional_settings.get('sandbox')) preexec_fn = sandbox.enter if additional_settings.get('tracing'): from cc_container_worker.application_container.tracing import Tracing if not os.path.exists(LOCAL_TRACING_FILE['dir']): os.makedirs(LOCAL_TRACING_FILE['dir']) local_tracing_file_path = os.path.join(LOCAL_TRACING_FILE['dir'], LOCAL_TRACING_FILE['name']) sp = Popen(application_command, stdout=PIPE, stderr=PIPE, shell=True, preexec_fn=preexec_fn) tracing = Tracing(sp.pid, config=additional_settings.get('tracing'), outfile=local_tracing_file_path) tracing.start() telemetry = Telemetry(sp, config=config) t = Thread(target=telemetry.monitor) t.start() std_out, std_err = sp.communicate() tracing.finish() else: sp = Popen(application_command, stdout=PIPE, stderr=PIPE, shell=True, preexec_fn=preexec_fn) telemetry = Telemetry(sp, config=config) t = Thread(target=telemetry.monitor) t.start() std_out, std_err = sp.communicate() return_code = sp.returncode telemetry_data = telemetry.result() if std_out: telemetry_data['std_out'] = str(std_out) if std_err: telemetry_data['std_err'] = str(std_err) telemetry_data['return_code'] = return_code except: callback_handler.send_callback( callback_type='processed', state='failed', description='Processing failed.', exception=format_exc() ) exit(8) description = 'Processing succeeded.' state = 'success' exception = None if return_code != 0: description = 'Processing failed.' state = 'failed' try: if additional_settings.get('tracing'): tracing_file = additional_settings['tracing'].get('tracing_file') if tracing_file: tracing_upload(tracing_file, LOCAL_TRACING_FILE, meta_data) except: if return_code != 0: description = 'Processing failed and tracing file upload failed.' else: description = 'Tracing file upload failed.' state = 'failed' exception = format_exc() callback_handler.send_callback( callback_type='processed', state=state, description=description, exception=exception, telemetry=telemetry_data, ) if return_code != 0: exit(9) try: ac_upload(result_files, config['local_result_files'], meta_data) except: description = 'Could not send result files.' callback_handler.send_callback( callback_type='results_sent', state='failed', description=description, exception=format_exc() ) exit(10) callback_handler.send_callback( callback_type='results_sent', state='success', description='Result files sent.' ) if __name__ == '__main__': main()
true
true
f70e02bea1eb8fd1dcde3853686bc29b064869be
12,417
py
Python
mgt2001/hyp/non.py
derekdylu/mgt2001
b228d5e75e75a2f3f170e35db1bea999b765bec8
[ "MIT" ]
5
2021-03-01T18:31:41.000Z
2022-01-08T12:10:22.000Z
mgt2001/hyp/non.py
derekdylu/mgt2001
b228d5e75e75a2f3f170e35db1bea999b765bec8
[ "MIT" ]
1
2021-06-18T09:30:27.000Z
2021-06-18T09:30:27.000Z
mgt2001/hyp/non.py
derekdylu/mgt2001
b228d5e75e75a2f3f170e35db1bea999b765bec8
[ "MIT" ]
2
2021-06-11T07:58:41.000Z
2021-10-03T13:49:24.000Z
from matplotlib import pyplot as plt import pandas as pd import numpy as np import math import scipy.stats as stats def inter_p_value(p_value): # interpretation if p_value >= 0 and p_value < 0.01: inter_p = 'Overwhelming Evidence' elif p_value >= 0.01 and p_value < 0.05: inter_p = 'Strong Evidence' elif p_value >= 0.05 and p_value < 0.1: inter_p = 'Weak Evidence' elif p_value >= .1: inter_p = 'No Evidence' return inter_p def grank(data): if type(data) == np.ndarray or type(data) == list: alldata = data.copy() data = data.copy() else: alldata = data.values.copy() data = data.values.copy() alldata.sort() tmp_df = pd.DataFrame({'value': alldata}) tmp_df['rank'] = tmp_df.index + 1 value_to_rank = tmp_df.groupby('value').mean().reset_index() samp = pd.DataFrame({'value': data}) samp = pd.merge(samp, value_to_rank, how='left') return samp['rank'] def ranksum_z_test(df=None, to_compute='', alternative=None, precision=4, alpha=0.05): """ df can only have two columns and df.shape[0] > 10 alternative has three options: 'two-sided', 'less', 'greater' """ # sort all data points by values tmp_values = df.values.reshape(-1) tmp_values = tmp_values[~np.isnan(tmp_values)] tmp_values.sort() # assign ranks updated_df = pd.DataFrame({'value': tmp_values}) updated_df['rank'] = updated_df.index + 1 # average rank for identical value updated_df = updated_df.groupby('value').mean().reset_index() # display(updated_df) # Compute Sum of Ranks samp1 = pd.DataFrame({'value': df[to_compute].dropna().values}) samp1 = pd.merge(samp1, updated_df) T = samp1['rank'].sum() # compute mean and standard deviation n1 = df.iloc[:, 0].dropna().shape[0] n2 = df.iloc[:, 1].dropna().shape[0] E_T = n1*(n1+n2+1)/2 sigmaT = (n1*n2*(n1+n2+1)/12) ** 0.5 z = (T-E_T)/sigmaT # compute p-value # right (greater) p_value = 1 - stats.norm.cdf(z) if alternative == 'greater': pass elif alternative == 'less': p_value = stats.norm.cdf(z) elif alternative == 'two-sided': # two-tail if p_value > 0.5: p_value = stats.norm.cdf(z) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= z-test ======= T (sum of ranks) = {T} (n1, n2) = ({n1}, {n2}) mu_t = {E_T} sigma_t = {sigmaT} z statistic value (observed) = {z:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) result_dict = {'T': T, 'ET': E_T, 'sigmaT': sigmaT, 'z': z, 'p-value': p_value} return updated_df, result_dict def sign_binom_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05): n = diff.size - np.sum(diff == 0) if sign == '+': sign_count = np.sum(diff > 0) else: sign_count = np.sum(diff < 0) if alternative == 'greater' or alternative == 'less': # 如果超過一半就要切換 if sign_count > n / 2: p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5) else: p_value = stats.binom.cdf(sign_count, n=n, p=0.5) elif alternative == 'two-sided': p_value = stats.binom.cdf(sign_count, n=n, p=0.5) if p_value > 0.5: p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= Sign Test - Binomial Distribution ======= (For small sample size (<= 10)) Targeted Sign: {sign} n = {n} Sign counts = {sign_count} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) return sign_count, p_value def sign_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05): diff = diff[~(diff == 0)] n = len(diff) if sign == '+': T = np.sum(diff > 0) else: T = np.sum(diff < 0) z_stat = (T - 0.5 * n) / (.5 * (n ** 0.5)) # right tail if alternative == 'greater': p_value = 1 - stats.norm.cdf(z_stat) elif alternative == 'less': p_value = stats.norm.cdf(z_stat) elif alternative == 'two-sided': p_value = 1 - stats.norm.cdf(z_stat) if p_value > 0.5: p_value = stats.norm.cdf(z_stat) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= Sign Test - z Statistic ======= (For large sample size (> 10)) Targeted Sign: {sign} n = {n} Sign counts = {T} z statistic = {z_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) return T, p_value def wilcoxon_signed_ranksum_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05): diff = diff[~(diff == 0)] n = len(diff) diff_abs = np.sort(np.abs(diff).to_numpy()) updated_diff = pd.DataFrame({'diff_abs': diff_abs}) updated_diff['rank'] = updated_diff.index + 1 updated_diff = updated_diff.groupby('diff_abs').mean().reset_index() new_df = pd.DataFrame({'diff': diff, 'diff_abs': np.abs(diff)}) new_df = pd.merge(new_df, updated_diff) if sign == '+': T = np.sum(new_df['rank'][new_df['diff'] > 0]) else: T = np.sum(new_df['rank'][new_df['diff'] < 0]) E_T = n * (n + 1) / 4 sigma_T = (n * (n + 1) * (2 * n + 1) / 24) ** 0.5 z_stat = (T - E_T) / sigma_T if alternative == 'greater': # right tail test p_value = 1 - stats.norm.cdf(z_stat) elif alternative == 'less': # left tail test p_value = stats.norm.cdf(z_stat) elif alternative == 'two-sided': # two-tailed test p_value = 1 - stats.norm.cdf(z_stat) if p_value > 0.5: p_value = stats.norm.cdf(z_stat) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= Wilcoxon Signed Rank Sum Test - z Statistic ======= (For large sample size (> 30)) Targeted Sign: {sign} n = {n} Sum of rank (T statistic) = {T} mu_t = {E_T} sigma_t = {sigma_T} z statistic value (observed) = {z_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) result_dict = {'n': n, 'T': T, 'E_T': E_T, 'sigma_T': sigma_T, 'z_stat': z_stat, 'p_value': p_value} return new_df, result_dict def kruskal_chi2_test(data=None, alpha=0.05, precision=4): """ col = 要比較的 target row = data for each target """ if type(data) == pd.DataFrame: data = data.copy().to_numpy() alldata = np.concatenate(data.copy()) else: alldata = np.concatenate(data.copy()) k = data.shape[1] alldata.sort() tmp_df = pd.DataFrame(({'value': alldata})) tmp_df['rank'] = tmp_df.index + 1 # rank value_to_rank = tmp_df.groupby('value').mean().reset_index() T = [] sample_rank_df = [] for i in range(k): samp = pd.DataFrame( {'value': data[:, i][~np.isnan(data[:, i])]}) samp = pd.merge(samp, value_to_rank) sample_rank_df.append(samp) T.append(samp['rank'].sum()) n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)] # print(T) # print(n) rule_of_five_str = "" if (np.sum(np.array(n) < 5) > 0): rule_of_five_str += "!(At least one sample size is less than 5)" else: rule_of_five_str += "(All sample size >= 5)" N = np.sum(n) t_over_n = 0 for i in range(k): t_over_n += T[i] ** 2 / n[i] H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1) p_value = 1 - stats.chi2.cdf(H, k - 1) chi2_stat = stats.chi2.ppf(1 - alpha, k - 1) result_dict = {'H': H, 'p-value': p_value, 'T': T, 'sample_rank_df': sample_rank_df} flag = p_value < alpha result = f'''======= Kruskal-Wallis Test with Chi-squared Test ======= {rule_of_five_str} H statistic value (observed) = {H:.{precision}f} chi2 critical value = {chi2_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 (Not all {k} population locations are the same) → {flag} ''' print(result) return result_dict def friedman_chi2_test(data=None, alpha=0.05, precision=4): """ col = 要比較的 target row = blocked data for each target """ if type(data) == np.ndarray: data = pd.DataFrame(data) new_df = data.apply(grank, axis=1) b, k = new_df.shape rule_of_five_str = "" if (b < 5 and k < 5): rule_of_five_str += f"!(Number of blocks = {b} < 5 and number of populations = {k} < 5)" else: rule_of_five_str += f"(Number of blocks = {b} >= 5 or number of populations {k} >= 5)" T = new_df.sum().to_numpy() F_r = 12 / b / k / (k + 1) * np.sum(T ** 2) - 3 * b * (k + 1) p_value = 1 - stats.chi2.cdf(F_r, k - 1) chi2_stat = stats.chi2.ppf(1 - alpha, k - 1) result_dict = {'F_r': F_r, 'p-value': p_value, 'T': T, 'sample_ranked_df': new_df} flag = p_value < alpha result = f'''======= Friedman Test with Chi-squared Test ======= {rule_of_five_str} F_r statistic value (observed) = {F_r:.{precision}f} chi2 critical value = {chi2_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 (Not all {k} population locations are the same) → {flag} ''' print(result) return result_dict def pearson_test(data=None, a=None, b=None, alpha=0.05, precision=4): """ a, b 還不能傳入東西 Make sure that data is in the form of [a, b] """ cov_mat = np.cov(data.values, rowvar=False) cor_mat = np.corrcoef(data.values, rowvar=False) cov = cov_mat[0][1] cor = cor_mat[0][1] n = data.shape[0] d_of_f = n - 2 t_c = stats.t.ppf(1 - alpha / 2, df=d_of_f) t_stat = cor * (((n - 2) / (1 - cor ** 2)) ** 0.5) flag = abs(t_stat) > t_c result_dict = {'cov': cov, 't_stat': t_stat, 'cor': cor, 't_c': t_c} results = f"""======= Pearson Correlation Coefficient ======= Covariance: {cov:.{precision}f} Coefficient of Correlation: {cor:.{precision}f} t (Critical Value) = {t_c:.{precision}f} t (Observed Value) = {t_stat:.{precision}f} Reject H_0 (There are linear relationship between two variables) → {flag} """ print(results) return result_dict def spearman_test(a=None, b=None, alpha=0.05, precision=4): spearman_restult_cor, spearman_restult_p_value = stats.spearmanr(a, b) # print(f'Correlation = {cor:.4f}, p-value={p_value:.4f}') n = len(a) rule_of_30_str = '' results = f"""======= Spearman Rank Correlation Coefficient ======= [scipy.stats.spearmanr] Coefficient of Correlation: {spearman_restult_cor:.{precision}f} p-value={spearman_restult_p_value:.{precision}f} ({inter_p_value(spearman_restult_p_value)}) """ if (n < 30): rule_of_30_str += f"!(n = {n} < 30)" flag = spearman_restult_p_value < alpha results += f""" Reject H_0 (There are relationship between two variables) → {flag} """ result_dict = {'spearman_result': [ spearman_restult_cor, spearman_restult_p_value]} else: rule_of_30_str += f"(n = {n} >= 30)" flag = spearman_restult_p_value < alpha results += f""" Reject H_0 (There are relationship between two variables) → {flag} """ z_stat = spearman_restult_cor * ((n - 1) ** 0.5) z_cv = stats.norm.ppf(1 - alpha/2) p_value = stats.norm.sf(z_stat) * 2 if p_value > 1: p_value = stats.norm.cdf(z_stat) * 2 flag = p_value < alpha results += f""" [z test statistic] {rule_of_30_str} r_s: {spearman_restult_cor:.{precision}f} (using spearmanr's result) z stat (observed value) = {z_stat:.{precision}f} z (critical value) = {z_cv:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 (There are relationship between two variables) → {flag} """ result_dict = {'spearman_result': [ spearman_restult_cor, spearman_restult_p_value], 'z_stat': z_stat, 'z_cv': z_cv, 'p-value': p_value} print(results) return result_dict
29.079625
112
0.588065
from matplotlib import pyplot as plt import pandas as pd import numpy as np import math import scipy.stats as stats def inter_p_value(p_value): if p_value >= 0 and p_value < 0.01: inter_p = 'Overwhelming Evidence' elif p_value >= 0.01 and p_value < 0.05: inter_p = 'Strong Evidence' elif p_value >= 0.05 and p_value < 0.1: inter_p = 'Weak Evidence' elif p_value >= .1: inter_p = 'No Evidence' return inter_p def grank(data): if type(data) == np.ndarray or type(data) == list: alldata = data.copy() data = data.copy() else: alldata = data.values.copy() data = data.values.copy() alldata.sort() tmp_df = pd.DataFrame({'value': alldata}) tmp_df['rank'] = tmp_df.index + 1 value_to_rank = tmp_df.groupby('value').mean().reset_index() samp = pd.DataFrame({'value': data}) samp = pd.merge(samp, value_to_rank, how='left') return samp['rank'] def ranksum_z_test(df=None, to_compute='', alternative=None, precision=4, alpha=0.05): tmp_values = df.values.reshape(-1) tmp_values = tmp_values[~np.isnan(tmp_values)] tmp_values.sort() updated_df = pd.DataFrame({'value': tmp_values}) updated_df['rank'] = updated_df.index + 1 updated_df = updated_df.groupby('value').mean().reset_index() samp1 = pd.DataFrame({'value': df[to_compute].dropna().values}) samp1 = pd.merge(samp1, updated_df) T = samp1['rank'].sum() n1 = df.iloc[:, 0].dropna().shape[0] n2 = df.iloc[:, 1].dropna().shape[0] E_T = n1*(n1+n2+1)/2 sigmaT = (n1*n2*(n1+n2+1)/12) ** 0.5 z = (T-E_T)/sigmaT p_value = 1 - stats.norm.cdf(z) if alternative == 'greater': pass elif alternative == 'less': p_value = stats.norm.cdf(z) elif alternative == 'two-sided': if p_value > 0.5: p_value = stats.norm.cdf(z) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= z-test ======= T (sum of ranks) = {T} (n1, n2) = ({n1}, {n2}) mu_t = {E_T} sigma_t = {sigmaT} z statistic value (observed) = {z:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) result_dict = {'T': T, 'ET': E_T, 'sigmaT': sigmaT, 'z': z, 'p-value': p_value} return updated_df, result_dict def sign_binom_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05): n = diff.size - np.sum(diff == 0) if sign == '+': sign_count = np.sum(diff > 0) else: sign_count = np.sum(diff < 0) if alternative == 'greater' or alternative == 'less': if sign_count > n / 2: p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5) else: p_value = stats.binom.cdf(sign_count, n=n, p=0.5) elif alternative == 'two-sided': p_value = stats.binom.cdf(sign_count, n=n, p=0.5) if p_value > 0.5: p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= Sign Test - Binomial Distribution ======= (For small sample size (<= 10)) Targeted Sign: {sign} n = {n} Sign counts = {sign_count} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) return sign_count, p_value def sign_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05): diff = diff[~(diff == 0)] n = len(diff) if sign == '+': T = np.sum(diff > 0) else: T = np.sum(diff < 0) z_stat = (T - 0.5 * n) / (.5 * (n ** 0.5)) if alternative == 'greater': p_value = 1 - stats.norm.cdf(z_stat) elif alternative == 'less': p_value = stats.norm.cdf(z_stat) elif alternative == 'two-sided': p_value = 1 - stats.norm.cdf(z_stat) if p_value > 0.5: p_value = stats.norm.cdf(z_stat) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= Sign Test - z Statistic ======= (For large sample size (> 10)) Targeted Sign: {sign} n = {n} Sign counts = {T} z statistic = {z_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) return T, p_value def wilcoxon_signed_ranksum_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05): diff = diff[~(diff == 0)] n = len(diff) diff_abs = np.sort(np.abs(diff).to_numpy()) updated_diff = pd.DataFrame({'diff_abs': diff_abs}) updated_diff['rank'] = updated_diff.index + 1 updated_diff = updated_diff.groupby('diff_abs').mean().reset_index() new_df = pd.DataFrame({'diff': diff, 'diff_abs': np.abs(diff)}) new_df = pd.merge(new_df, updated_diff) if sign == '+': T = np.sum(new_df['rank'][new_df['diff'] > 0]) else: T = np.sum(new_df['rank'][new_df['diff'] < 0]) E_T = n * (n + 1) / 4 sigma_T = (n * (n + 1) * (2 * n + 1) / 24) ** 0.5 z_stat = (T - E_T) / sigma_T if alternative == 'greater': p_value = 1 - stats.norm.cdf(z_stat) elif alternative == 'less': p_value = stats.norm.cdf(z_stat) elif alternative == 'two-sided': p_value = 1 - stats.norm.cdf(z_stat) if p_value > 0.5: p_value = stats.norm.cdf(z_stat) p_value *= 2 flag = False if p_value < alpha: flag = True result = f'''======= Wilcoxon Signed Rank Sum Test - z Statistic ======= (For large sample size (> 30)) Targeted Sign: {sign} n = {n} Sum of rank (T statistic) = {T} mu_t = {E_T} sigma_t = {sigma_T} z statistic value (observed) = {z_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 ({alternative}) → {flag} ''' print(result) result_dict = {'n': n, 'T': T, 'E_T': E_T, 'sigma_T': sigma_T, 'z_stat': z_stat, 'p_value': p_value} return new_df, result_dict def kruskal_chi2_test(data=None, alpha=0.05, precision=4): if type(data) == pd.DataFrame: data = data.copy().to_numpy() alldata = np.concatenate(data.copy()) else: alldata = np.concatenate(data.copy()) k = data.shape[1] alldata.sort() tmp_df = pd.DataFrame(({'value': alldata})) tmp_df['rank'] = tmp_df.index + 1 value_to_rank = tmp_df.groupby('value').mean().reset_index() T = [] sample_rank_df = [] for i in range(k): samp = pd.DataFrame( {'value': data[:, i][~np.isnan(data[:, i])]}) samp = pd.merge(samp, value_to_rank) sample_rank_df.append(samp) T.append(samp['rank'].sum()) n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)] rule_of_five_str = "" if (np.sum(np.array(n) < 5) > 0): rule_of_five_str += "!(At least one sample size is less than 5)" else: rule_of_five_str += "(All sample size >= 5)" N = np.sum(n) t_over_n = 0 for i in range(k): t_over_n += T[i] ** 2 / n[i] H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1) p_value = 1 - stats.chi2.cdf(H, k - 1) chi2_stat = stats.chi2.ppf(1 - alpha, k - 1) result_dict = {'H': H, 'p-value': p_value, 'T': T, 'sample_rank_df': sample_rank_df} flag = p_value < alpha result = f'''======= Kruskal-Wallis Test with Chi-squared Test ======= {rule_of_five_str} H statistic value (observed) = {H:.{precision}f} chi2 critical value = {chi2_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 (Not all {k} population locations are the same) → {flag} ''' print(result) return result_dict def friedman_chi2_test(data=None, alpha=0.05, precision=4): if type(data) == np.ndarray: data = pd.DataFrame(data) new_df = data.apply(grank, axis=1) b, k = new_df.shape rule_of_five_str = "" if (b < 5 and k < 5): rule_of_five_str += f"!(Number of blocks = {b} < 5 and number of populations = {k} < 5)" else: rule_of_five_str += f"(Number of blocks = {b} >= 5 or number of populations {k} >= 5)" T = new_df.sum().to_numpy() F_r = 12 / b / k / (k + 1) * np.sum(T ** 2) - 3 * b * (k + 1) p_value = 1 - stats.chi2.cdf(F_r, k - 1) chi2_stat = stats.chi2.ppf(1 - alpha, k - 1) result_dict = {'F_r': F_r, 'p-value': p_value, 'T': T, 'sample_ranked_df': new_df} flag = p_value < alpha result = f'''======= Friedman Test with Chi-squared Test ======= {rule_of_five_str} F_r statistic value (observed) = {F_r:.{precision}f} chi2 critical value = {chi2_stat:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 (Not all {k} population locations are the same) → {flag} ''' print(result) return result_dict def pearson_test(data=None, a=None, b=None, alpha=0.05, precision=4): cov_mat = np.cov(data.values, rowvar=False) cor_mat = np.corrcoef(data.values, rowvar=False) cov = cov_mat[0][1] cor = cor_mat[0][1] n = data.shape[0] d_of_f = n - 2 t_c = stats.t.ppf(1 - alpha / 2, df=d_of_f) t_stat = cor * (((n - 2) / (1 - cor ** 2)) ** 0.5) flag = abs(t_stat) > t_c result_dict = {'cov': cov, 't_stat': t_stat, 'cor': cor, 't_c': t_c} results = f"""======= Pearson Correlation Coefficient ======= Covariance: {cov:.{precision}f} Coefficient of Correlation: {cor:.{precision}f} t (Critical Value) = {t_c:.{precision}f} t (Observed Value) = {t_stat:.{precision}f} Reject H_0 (There are linear relationship between two variables) → {flag} """ print(results) return result_dict def spearman_test(a=None, b=None, alpha=0.05, precision=4): spearman_restult_cor, spearman_restult_p_value = stats.spearmanr(a, b) n = len(a) rule_of_30_str = '' results = f"""======= Spearman Rank Correlation Coefficient ======= [scipy.stats.spearmanr] Coefficient of Correlation: {spearman_restult_cor:.{precision}f} p-value={spearman_restult_p_value:.{precision}f} ({inter_p_value(spearman_restult_p_value)}) """ if (n < 30): rule_of_30_str += f"!(n = {n} < 30)" flag = spearman_restult_p_value < alpha results += f""" Reject H_0 (There are relationship between two variables) → {flag} """ result_dict = {'spearman_result': [ spearman_restult_cor, spearman_restult_p_value]} else: rule_of_30_str += f"(n = {n} >= 30)" flag = spearman_restult_p_value < alpha results += f""" Reject H_0 (There are relationship between two variables) → {flag} """ z_stat = spearman_restult_cor * ((n - 1) ** 0.5) z_cv = stats.norm.ppf(1 - alpha/2) p_value = stats.norm.sf(z_stat) * 2 if p_value > 1: p_value = stats.norm.cdf(z_stat) * 2 flag = p_value < alpha results += f""" [z test statistic] {rule_of_30_str} r_s: {spearman_restult_cor:.{precision}f} (using spearmanr's result) z stat (observed value) = {z_stat:.{precision}f} z (critical value) = {z_cv:.{precision}f} p-value = {p_value:.{precision}f} ({inter_p_value(p_value)}) Reject H_0 (There are relationship between two variables) → {flag} """ result_dict = {'spearman_result': [ spearman_restult_cor, spearman_restult_p_value], 'z_stat': z_stat, 'z_cv': z_cv, 'p-value': p_value} print(results) return result_dict
true
true
f70e039d9a538ccea5c7dfb4044a5badc285fa16
825
py
Python
modules/post-exploitation/gpp-decrypt.py
TheRealJoeyCo/ptf
dd1985696ba8d33be46645efd9d086ac48bd42cf
[ "FTL" ]
1
2021-01-12T23:32:22.000Z
2021-01-12T23:32:22.000Z
modules/post-exploitation/gpp-decrypt.py
TheRealJoeyCo/ptf
dd1985696ba8d33be46645efd9d086ac48bd42cf
[ "FTL" ]
null
null
null
modules/post-exploitation/gpp-decrypt.py
TheRealJoeyCo/ptf
dd1985696ba8d33be46645efd9d086ac48bd42cf
[ "FTL" ]
2
2018-02-07T23:39:48.000Z
2018-05-16T22:31:28.000Z
#!/usr/bin/env python ##################################### # Installation module for gpp-decrypt ##################################### # AUTHOR OF MODULE NAME AUTHOR="Larry Spohn (Spoonman)" # DESCRIPTION OF THE MODULE DESCRIPTION="This module will install/upgrade gpp-decrypt - a tool for decrypting passwords found in Group Policy Preferences (GPP)" # INSTALL TYPE GIT, SVN, FILE DOWNLOAD # OPTIONS = GIT, SVN, FILE INSTALL_TYPE="GIT" # LOCATION OF THE FILE OR GIT/SVN REPOSITORY REPOSITORY_LOCATION="https://github.com/SecurityToolsArchive/gpp-decrypt" # WHERE DO YOU WANT TO INSTALL IT INSTALL_LOCATION="gpp-decrypt" # DEPENDS FOR DEBIAN INSTALLS DEBIAN="git" # DEPENDS FOR FEDORA INSTALLS FEDORA="git" # COMMANDS TO RUN AFTER AFTER_COMMANDS="" # THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL LAUNCHER=""
25
132
0.699394
AUTHOR="Larry Spohn (Spoonman)" DESCRIPTION="This module will install/upgrade gpp-decrypt - a tool for decrypting passwords found in Group Policy Preferences (GPP)" INSTALL_TYPE="GIT" REPOSITORY_LOCATION="https://github.com/SecurityToolsArchive/gpp-decrypt" INSTALL_LOCATION="gpp-decrypt" DEBIAN="git" FEDORA="git" AFTER_COMMANDS="" LAUNCHER=""
true
true
f70e03f7c7e6c6fb27ff349ab1be2511be8a66bf
8,655
py
Python
test/option-k.py
EmanueleCannizzaro/scons
6baa4e65cdf4df6951473545b69435711864e509
[ "MIT" ]
1
2019-09-18T06:37:02.000Z
2019-09-18T06:37:02.000Z
test/option-k.py
EmanueleCannizzaro/scons
6baa4e65cdf4df6951473545b69435711864e509
[ "MIT" ]
null
null
null
test/option-k.py
EmanueleCannizzaro/scons
6baa4e65cdf4df6951473545b69435711864e509
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/option-k.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" import os.path import TestSCons _python_ = TestSCons._python_ test = TestSCons.TestSCons() test.subdir('work1', 'work2', 'work3') test.write('succeed.py', r""" import sys file = open(sys.argv[1], 'wb') file.write("succeed.py: %s\n" % sys.argv[1]) file.close() sys.exit(0) """) test.write('fail.py', r""" import sys sys.exit(1) """) # # Test: work1 # test.write(['work1', 'SConstruct'], """\ Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS') Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS') env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail }) env.Fail(target = 'aaa.1', source = 'aaa.in') env.Succeed(target = 'aaa.out', source = 'aaa.1') env.Succeed(target = 'bbb.out', source = 'bbb.in') """ % locals()) test.write(['work1', 'aaa.in'], "aaa.in\n") test.write(['work1', 'bbb.in'], "bbb.in\n") test.run(chdir = 'work1', arguments = 'aaa.out bbb.out', stderr = 'scons: *** [aaa.1] Error 1\n', status = 2) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_not_exist(test.workpath('work1', 'bbb.out')) test.run(chdir = 'work1', arguments = '-k aaa.out bbb.out', stderr = 'scons: *** [aaa.1] Error 1\n', status = 2) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_match(['work1', 'bbb.out'], "succeed.py: bbb.out\n") test.unlink(['work1', 'bbb.out']) test.run(chdir = 'work1', arguments = '--keep-going aaa.out bbb.out', stderr = 'scons: *** [aaa.1] Error 1\n', status = 2) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_match(['work1', 'bbb.out'], "succeed.py: bbb.out\n") expect = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Cleaning targets ... Removed bbb.out scons: done cleaning targets. """ test.run(chdir = 'work1', arguments = '--clean --keep-going aaa.out bbb.out', stdout = expect) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_not_exist(test.workpath('work1', 'bbb.out')) # # Test: work2 # test.write(['work2', 'SConstruct'], """\ Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS') Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS') env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail }) env.Fail('aaa.out', 'aaa.in') env.Succeed('bbb.out', 'aaa.out') env.Succeed('ccc.out', 'ccc.in') env.Succeed('ddd.out', 'ccc.in') """ % locals()) test.write(['work2', 'aaa.in'], "aaa.in\n") test.write(['work2', 'ccc.in'], "ccc.in\n") test.run(chdir = 'work2', arguments = '-k .', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py ccc.out %(_python_)s ../succeed.py ddd.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work2', 'aaa.out']) test.must_not_exist(['work2', 'bbb.out']) test.must_match(['work2', 'ccc.out'], "succeed.py: ccc.out\n") test.must_match(['work2', 'ddd.out'], "succeed.py: ddd.out\n") # # Test: work3 # # Check that the -k (keep-going) switch works correctly when the Nodes # forms a DAG. The test case is the following # # all # | # +-----+-----+-------------+ # | | | # a1 a2 a3 # | | | # + +---+---+ +---+---+ # \ | / | | # \ bbb.out / a4 ccc.out # \ / / # \ / / # \ / / # aaa.out (fails) # test.write(['work3', 'SConstruct'], """\ Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS') Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS') env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail }) a = env.Fail('aaa.out', 'aaa.in') b = env.Succeed('bbb.out', 'bbb.in') c = env.Succeed('ccc.out', 'ccc.in') a1 = Alias( 'a1', a ) a2 = Alias( 'a2', a+b) a4 = Alias( 'a4', c) a3 = Alias( 'a3', a4+c) Alias('all', a1+a2+a3) """ % locals()) test.write(['work3', 'aaa.in'], "aaa.in\n") test.write(['work3', 'bbb.in'], "bbb.in\n") test.write(['work3', 'ccc.in'], "ccc.in\n") # Test tegular build (i.e. without -k) test.run(chdir = 'work3', arguments = '.', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out scons: building terminated because of errors. """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-c .') test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) # Current directory test.run(chdir = 'work3', arguments = '-k .', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py bbb.out %(_python_)s ../succeed.py ccc.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_exist(['work3', 'bbb.out']) test.must_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-c .') test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) # Single target test.run(chdir = 'work3', arguments = '--keep-going all', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py bbb.out %(_python_)s ../succeed.py ccc.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_exist(['work3', 'bbb.out']) test.must_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-c .') test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) # Separate top-level targets test.run(chdir = 'work3', arguments = '-k a1 a2 a3', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py bbb.out %(_python_)s ../succeed.py ccc.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_exist(['work3', 'bbb.out']) test.must_exist(['work3', 'ccc.out']) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
28.65894
90
0.619295
__revision__ = "test/option-k.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" import os.path import TestSCons _python_ = TestSCons._python_ test = TestSCons.TestSCons() test.subdir('work1', 'work2', 'work3') test.write('succeed.py', r""" import sys file = open(sys.argv[1], 'wb') file.write("succeed.py: %s\n" % sys.argv[1]) file.close() sys.exit(0) """) test.write('fail.py', r""" import sys sys.exit(1) """) test.write(['work1', 'SConstruct'], """\ Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS') Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS') env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail }) env.Fail(target = 'aaa.1', source = 'aaa.in') env.Succeed(target = 'aaa.out', source = 'aaa.1') env.Succeed(target = 'bbb.out', source = 'bbb.in') """ % locals()) test.write(['work1', 'aaa.in'], "aaa.in\n") test.write(['work1', 'bbb.in'], "bbb.in\n") test.run(chdir = 'work1', arguments = 'aaa.out bbb.out', stderr = 'scons: *** [aaa.1] Error 1\n', status = 2) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_not_exist(test.workpath('work1', 'bbb.out')) test.run(chdir = 'work1', arguments = '-k aaa.out bbb.out', stderr = 'scons: *** [aaa.1] Error 1\n', status = 2) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_match(['work1', 'bbb.out'], "succeed.py: bbb.out\n") test.unlink(['work1', 'bbb.out']) test.run(chdir = 'work1', arguments = '--keep-going aaa.out bbb.out', stderr = 'scons: *** [aaa.1] Error 1\n', status = 2) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_match(['work1', 'bbb.out'], "succeed.py: bbb.out\n") expect = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Cleaning targets ... Removed bbb.out scons: done cleaning targets. """ test.run(chdir = 'work1', arguments = '--clean --keep-going aaa.out bbb.out', stdout = expect) test.must_not_exist(test.workpath('work1', 'aaa.1')) test.must_not_exist(test.workpath('work1', 'aaa.out')) test.must_not_exist(test.workpath('work1', 'bbb.out')) test.write(['work2', 'SConstruct'], """\ Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS') Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS') env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail }) env.Fail('aaa.out', 'aaa.in') env.Succeed('bbb.out', 'aaa.out') env.Succeed('ccc.out', 'ccc.in') env.Succeed('ddd.out', 'ccc.in') """ % locals()) test.write(['work2', 'aaa.in'], "aaa.in\n") test.write(['work2', 'ccc.in'], "ccc.in\n") test.run(chdir = 'work2', arguments = '-k .', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py ccc.out %(_python_)s ../succeed.py ddd.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work2', 'aaa.out']) test.must_not_exist(['work2', 'bbb.out']) test.must_match(['work2', 'ccc.out'], "succeed.py: ccc.out\n") test.must_match(['work2', 'ddd.out'], "succeed.py: ddd.out\n") test.write(['work3', 'SConstruct'], """\ Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS') Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS') env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail }) a = env.Fail('aaa.out', 'aaa.in') b = env.Succeed('bbb.out', 'bbb.in') c = env.Succeed('ccc.out', 'ccc.in') a1 = Alias( 'a1', a ) a2 = Alias( 'a2', a+b) a4 = Alias( 'a4', c) a3 = Alias( 'a3', a4+c) Alias('all', a1+a2+a3) """ % locals()) test.write(['work3', 'aaa.in'], "aaa.in\n") test.write(['work3', 'bbb.in'], "bbb.in\n") test.write(['work3', 'ccc.in'], "ccc.in\n") test.run(chdir = 'work3', arguments = '.', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out scons: building terminated because of errors. """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-c .') test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-k .', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py bbb.out %(_python_)s ../succeed.py ccc.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_exist(['work3', 'bbb.out']) test.must_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-c .') test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '--keep-going all', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py bbb.out %(_python_)s ../succeed.py ccc.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_exist(['work3', 'bbb.out']) test.must_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-c .') test.must_not_exist(['work3', 'aaa.out']) test.must_not_exist(['work3', 'bbb.out']) test.must_not_exist(['work3', 'ccc.out']) test.run(chdir = 'work3', arguments = '-k a1 a2 a3', status = 2, stderr = None, stdout = """\ scons: Reading SConscript files ... scons: done reading SConscript files. scons: Building targets ... %(_python_)s ../fail.py aaa.out %(_python_)s ../succeed.py bbb.out %(_python_)s ../succeed.py ccc.out scons: done building targets (errors occurred during build). """ % locals()) test.must_not_exist(['work3', 'aaa.out']) test.must_exist(['work3', 'bbb.out']) test.must_exist(['work3', 'ccc.out']) test.pass_test()
true
true
f70e062a03a17b15487aef3c92d81d8e650ad003
2,883
py
Python
conans/server/rest/bottle_plugins/authorization_header.py
laundry-96/conan
fd938f7220ca042d94c42ec5eb607ee69c6785a3
[ "MIT" ]
1
2021-06-14T01:39:27.000Z
2021-06-14T01:39:27.000Z
conans/server/rest/bottle_plugins/authorization_header.py
laundry-96/conan
fd938f7220ca042d94c42ec5eb607ee69c6785a3
[ "MIT" ]
1
2020-04-18T10:13:37.000Z
2020-04-18T10:16:37.000Z
conans/server/rest/bottle_plugins/authorization_header.py
laundry-96/conan
fd938f7220ca042d94c42ec5eb607ee69c6785a3
[ "MIT" ]
1
2021-06-03T23:08:43.000Z
2021-06-03T23:08:43.000Z
import inspect from abc import ABCMeta, abstractmethod from bottle import PluginError, request from conans.util.log import logger class AuthorizationHeader(object): """ Generic plugin to handle Authorization header. Must be extended and implement some abstract methods in subclasses """ __metaclass__ = ABCMeta name = 'authorizationheader' api = 2 def __init__(self, keyword): # Required self.keyword = keyword def setup(self, app): """ Make sure that other installed plugins don't affect the same keyword argument. """ for other in app.plugins: if not isinstance(other, self.__class__): continue if other.keyword == self.keyword: raise PluginError("Found another AuthorizationHeaderBottlePlugin plugin with " "conflicting settings (non-unique keyword).") def apply(self, callback, context): """ Test if the original callback accepts a 'self.keyword' keyword. """ args = inspect.getargspec(context.callback)[0] logger.debug("Call: %s" % str(callback)) if self.keyword not in args: return callback def wrapper(*args, **kwargs): """ Check for user credentials in http header """ # Get Authorization header_value = self.get_authorization_header_value() new_kwargs = self.parse_authorization_value(header_value) if not new_kwargs: raise self.get_invalid_header_response() kwargs.update(new_kwargs) return callback(*args, **kwargs) # kwargs has :xxx variables from url # Replace the route callback with the wrapped one. return wrapper def get_authorization_header_value(self): """ Get from the request the header of http basic auth: http://en.wikipedia.org/wiki/Basic_access_authentication """ auth_type = self.get_authorization_type() if request.headers.get("Authorization", None) is not None: auth_line = request.headers.get("Authorization", None) if not auth_line.startswith("%s " % auth_type): raise self.get_invalid_header_response() return auth_line[len(auth_type) + 1:] else: return None @abstractmethod def get_authorization_type(self): """Abstract. Example: Basic (for http basic auth) or Beagle for JWT""" raise NotImplementedError() @abstractmethod def parse_authorization_value(self, header_value): """Abstract. Parse header_value and return kwargs to apply bottle method parameters""" raise NotImplementedError() @abstractmethod def get_invalid_header_response(self): """A response from a malformed header""" raise NotImplementedError()
36.961538
94
0.644121
import inspect from abc import ABCMeta, abstractmethod from bottle import PluginError, request from conans.util.log import logger class AuthorizationHeader(object): __metaclass__ = ABCMeta name = 'authorizationheader' api = 2 def __init__(self, keyword): self.keyword = keyword def setup(self, app): for other in app.plugins: if not isinstance(other, self.__class__): continue if other.keyword == self.keyword: raise PluginError("Found another AuthorizationHeaderBottlePlugin plugin with " "conflicting settings (non-unique keyword).") def apply(self, callback, context): args = inspect.getargspec(context.callback)[0] logger.debug("Call: %s" % str(callback)) if self.keyword not in args: return callback def wrapper(*args, **kwargs): header_value = self.get_authorization_header_value() new_kwargs = self.parse_authorization_value(header_value) if not new_kwargs: raise self.get_invalid_header_response() kwargs.update(new_kwargs) return callback(*args, **kwargs) return wrapper def get_authorization_header_value(self): auth_type = self.get_authorization_type() if request.headers.get("Authorization", None) is not None: auth_line = request.headers.get("Authorization", None) if not auth_line.startswith("%s " % auth_type): raise self.get_invalid_header_response() return auth_line[len(auth_type) + 1:] else: return None @abstractmethod def get_authorization_type(self): raise NotImplementedError() @abstractmethod def parse_authorization_value(self, header_value): raise NotImplementedError() @abstractmethod def get_invalid_header_response(self): raise NotImplementedError()
true
true
f70e06d56adcd25862d3ef5d4056a66389c16fa6
25,584
py
Python
.venv/lib/python2.7/site-packages/_pytest/main.py
aruneli/rancher-tests
f0ff5539420ac354fc951ed239b002cecde52505
[ "Apache-2.0" ]
null
null
null
.venv/lib/python2.7/site-packages/_pytest/main.py
aruneli/rancher-tests
f0ff5539420ac354fc951ed239b002cecde52505
[ "Apache-2.0" ]
null
null
null
.venv/lib/python2.7/site-packages/_pytest/main.py
aruneli/rancher-tests
f0ff5539420ac354fc951ed239b002cecde52505
[ "Apache-2.0" ]
null
null
null
""" core implementation of testing process: init, session, runtest loop. """ import re import py import pytest, _pytest import os, sys, imp try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin from _pytest.runner import collect_one_node tracebackcutdir = py.path.local(_pytest.__file__).dirpath() # exitcodes for the command line EXIT_OK = 0 EXIT_TESTSFAILED = 1 EXIT_INTERRUPTED = 2 EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 name_re = re.compile("^[a-zA-Z_]\w*$") def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg']) #parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", # "**/test_*.py", "**/*_test.py"] #) group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_true", default=False, dest="exitfirst", help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", action="store", type=int, dest="maxfail", default=0, help="exit after first num failures or errors.") group._addoption('--strict', action="store_true", help="run pytest in strict mode, warnings become errors.") group._addoption("-c", metavar="file", type=str, dest="inifilename", help="load configuration from `file` instead of trying to locate one of the implicit configuration files.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", help="only collect tests, don't execute them."), group.addoption('--pyargs', action="store_true", help="try to interpret all arguments as python packages.") group.addoption("--ignore", action="append", metavar="path", help="ignore path during collection (multi-allowed).") # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, metavar="dir", help="only load conftest.py's relative to specified dir.") group = parser.getgroup("debugconfig", "test session debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") def pytest_namespace(): collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) return dict(collect=collect) def pytest_configure(config): pytest.config = config # compatibiltiy if config.option.exitfirst: config.option.maxfail = 1 def wrap_session(config, doit): """Skeleton command line program""" session = Session(config) session.exitstatus = EXIT_OK initstate = 0 try: try: config.do_configure() initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 doit(config, session) except pytest.UsageError: args = sys.exc_info()[1].args for msg in args: sys.stderr.write("ERROR: %s\n" %(msg,)) session.exitstatus = EXIT_USAGEERROR except KeyboardInterrupt: excinfo = py.code.ExceptionInfo() config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED except: excinfo = py.code.ExceptionInfo() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") else: if session._testsfailed: session.exitstatus = EXIT_TESTSFAILED finally: excinfo = None # Explicitly break reference cycle. session.startdir.chdir() if initstate >= 2: config.hook.pytest_sessionfinish( session=session, exitstatus=session.exitstatus) if initstate >= 1: config.do_unconfigure() config.pluginmanager.ensure_shutdown() return session.exitstatus def pytest_cmdline_main(config): return wrap_session(config, _main) def _main(config, session): """ default command line protocol for initialization, session, running tests and reporting. """ config.hook.pytest_collection(session=session) config.hook.pytest_runtestloop(session=session) def pytest_collection(session): return session.perform_collect() def pytest_runtestloop(session): if session.config.option.collectonly: return True def getnextitem(i): # this is a function to avoid python2 # keeping sys.exc_info set when calling into a test # python2 keeps sys.exc_info till the frame is left try: return session.items[i+1] except IndexError: return None for i, item in enumerate(session.items): nextitem = getnextitem(i) item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True def pytest_ignore_collect(path, config): p = path.dirpath() ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) return path in ignore_paths class FSHookProxy(object): def __init__(self, fspath, config): self.fspath = fspath self.config = config def __getattr__(self, name): plugins = self.config._getmatchingplugins(self.fspath) x = self.config.hook._getcaller(name, plugins) self.__dict__[name] = x return x def compatproperty(name): def fget(self): # deprecated - use pytest.name return getattr(pytest, name) return property(fget) class NodeKeywords(MappingMixin): def __init__(self, node): self.node = node self.parent = node.parent self._markers = {node.name: True} def __getitem__(self, key): try: return self._markers[key] except KeyError: if self.parent is None: raise return self.parent.keywords[key] def __setitem__(self, key, value): self._markers[key] = value def __delitem__(self, key): raise ValueError("cannot delete key in keywords dict") def __iter__(self): seen = set(self._markers) if self.parent is not None: seen.update(self.parent.keywords) return iter(seen) def __len__(self): return len(self.__iter__()) def keys(self): return list(self) def __repr__(self): return "<NodeKeywords for node %s>" % (self.node, ) class Node(object): """ base class for Collector and Item the test collection tree. Collector subclasses have children, Items are terminal nodes.""" def __init__(self, name, parent=None, config=None, session=None): #: a unique name within the scope of the parent node self.name = name #: the parent collector node. self.parent = parent #: the pytest config object self.config = config or parent.config #: the session this node is part of self.session = session or parent.session #: filesystem path where this node was collected from (can be None) self.fspath = getattr(parent, 'fspath', None) #: keywords/markers collected from all scopes self.keywords = NodeKeywords(self) #: allow adding of extra keywords to use for matching self.extra_keyword_matches = set() # used for storing artificial fixturedefs for direct parametrization self._name2pseudofixturedef = {} #self.extrainit() @property def ihook(self): """ fspath sensitive hook proxy used to call pytest hooks""" return self.session.gethookproxy(self.fspath) #def extrainit(self): # """"extra initialization after Node is initialized. Implemented # by some subclasses. """ Module = compatproperty("Module") Class = compatproperty("Class") Instance = compatproperty("Instance") Function = compatproperty("Function") File = compatproperty("File") Item = compatproperty("Item") def _getcustomclass(self, name): cls = getattr(self, name) if cls != getattr(pytest, name): py.log._apiwarn("2.0", "use of node.%s is deprecated, " "use pytest_pycollect_makeitem(...) to create custom " "collection nodes" % name) return cls def __repr__(self): return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None)) def warn(self, code, message): """ generate a warning with the given code and message for this item. """ assert isinstance(code, str) fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) else: fslocation = "%s:%s" % fslocation[:2] self.ihook.pytest_logwarning(code=code, message=message, nodeid=self.nodeid, fslocation=fslocation) # methods for ordering nodes @property def nodeid(self): """ a ::-separated string denoting its collection tree address. """ try: return self._nodeid except AttributeError: self._nodeid = x = self._makeid() return x def _makeid(self): return self.parent.nodeid + "::" + self.name def __hash__(self): return hash(self.nodeid) def setup(self): pass def teardown(self): pass def _memoizedcall(self, attrname, function): exattrname = "_ex_" + attrname failure = getattr(self, exattrname, None) if failure is not None: py.builtin._reraise(failure[0], failure[1], failure[2]) if hasattr(self, attrname): return getattr(self, attrname) try: res = function() except py.builtin._sysex: raise except: failure = sys.exc_info() setattr(self, exattrname, failure) raise setattr(self, attrname, res) return res def listchain(self): """ return list of all parent collectors up to self, starting from root of collection tree. """ chain = [] item = self while item is not None: chain.append(item) item = item.parent chain.reverse() return chain def add_marker(self, marker): """ dynamically add a marker object to the node. ``marker`` can be a string or pytest.mark.* instance. """ from _pytest.mark import MarkDecorator if isinstance(marker, py.builtin._basestring): marker = MarkDecorator(marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker def get_marker(self, name): """ get a marker object from this node or None if the node doesn't have a marker with that name. """ val = self.keywords.get(name, None) if val is not None: from _pytest.mark import MarkInfo, MarkDecorator if isinstance(val, (MarkDecorator, MarkInfo)): return val def listextrakeywords(self): """ Return a set of all extra keywords in self and any parents.""" extra_keywords = set() item = self for item in self.listchain(): extra_keywords.update(item.extra_keyword_matches) return extra_keywords def listnames(self): return [x.name for x in self.listchain()] def getplugins(self): return self.config._getmatchingplugins(self.fspath) def addfinalizer(self, fin): """ register a function to be called when this node is finalized. This method can only be called when this node is active in a setup chain, for example during self.setup(). """ self.session._setupstate.addfinalizer(fin, self) def getparent(self, cls): """ get the next parent node (including ourself) which is an instance of the given class""" current = self while current and not isinstance(current, cls): current = current.parent return current def _prunetraceback(self, excinfo): pass def _repr_failure_py(self, excinfo, style=None): fm = self.session._fixturemanager if excinfo.errisinstance(fm.FixtureLookupError): return excinfo.value.formatrepr() tbfilter = True if self.config.option.fulltrace: style="long" else: self._prunetraceback(excinfo) tbfilter = False # prunetraceback already does it if style == "auto": style = "long" # XXX should excinfo.getrepr record all data and toterminal() process it? if style is None: if self.config.option.tbstyle == "short": style = "short" else: style = "long" return excinfo.getrepr(funcargs=True, showlocals=self.config.option.showlocals, style=style, tbfilter=tbfilter) repr_failure = _repr_failure_py class Collector(Node): """ Collector instances create children through collect() and thus iteratively build a tree. """ class CollectError(Exception): """ an error during collection, contains a custom message. """ def collect(self): """ returns a list of children (items and collectors) for this collection node. """ raise NotImplementedError("abstract") def repr_failure(self, excinfo): """ represent a collection failure. """ if excinfo.errisinstance(self.CollectError): exc = excinfo.value return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") def _memocollect(self): """ internal helper method to cache results of calling collect(). """ return self._memoizedcall('_collected', lambda: list(self.collect())) def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: ntraceback = ntraceback.cut(excludepath=tracebackcutdir) excinfo.traceback = ntraceback.filter() class FSCollector(Collector): def __init__(self, fspath, parent=None, config=None, session=None): fspath = py.path.local(fspath) # xxx only for test_resultlog.py? name = fspath.basename if parent is not None: rel = fspath.relto(parent.fspath) if rel: name = rel name = name.replace(os.sep, "/") super(FSCollector, self).__init__(name, parent, config, session) self.fspath = fspath def _makeid(self): relpath = self.fspath.relto(self.config.rootdir) if os.sep != "/": relpath = relpath.replace(os.sep, "/") return relpath class File(FSCollector): """ base class for collecting tests from a file. """ class Item(Node): """ a basic test invocation item. Note that for a single function there might be multiple test invocation items. """ nextitem = None def __init__(self, name, parent=None, config=None, session=None): super(Item, self).__init__(name, parent, config, session) self._report_sections = [] def add_report_section(self, when, key, content): if content: self._report_sections.append((when, key, content)) def reportinfo(self): return self.fspath, None, "" @property def location(self): try: return self._location except AttributeError: location = self.reportinfo() # bestrelpath is a quite slow function cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) try: fspath = cache[location[0]] except KeyError: fspath = self.session.fspath.bestrelpath(location[0]) cache[location[0]] = fspath location = (fspath, location[1], str(location[2])) self._location = location return location class NoMatch(Exception): """ raised if matching cannot locate a matching names. """ class Session(FSCollector): class Interrupted(KeyboardInterrupt): """ signals an interrupted test run. """ __module__ = 'builtins' # for py3 def __init__(self, config): FSCollector.__init__(self, config.rootdir, parent=None, config=config, session=self) self.config.pluginmanager.register(self, name="session", prepend=True) self._testsfailed = 0 self.shouldstop = False self.trace = config.trace.root.get("collection") self._norecursepatterns = config.getini("norecursedirs") self.startdir = py.path.local() self._fs2hookproxy = {} def _makeid(self): return "" def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self._testsfailed += 1 maxfail = self.config.getvalue("maxfail") if maxfail and self._testsfailed >= maxfail: self.shouldstop = "stopping after %d failures" % ( self._testsfailed) pytest_collectreport = pytest_runtest_logreport def isinitpath(self, path): return path in self._initialpaths def gethookproxy(self, fspath): try: return self._fs2hookproxy[fspath] except KeyError: self._fs2hookproxy[fspath] = x = FSHookProxy(fspath, self.config) return x def perform_collect(self, args=None, genitems=True): hook = self.config.hook try: items = self._perform_collect(args, genitems) hook.pytest_collection_modifyitems(session=self, config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) return items def _perform_collect(self, args, genitems): if args is None: args = self.config.args self.trace("perform_collect", self, args) self.trace.root.indent += 1 self._notfound = [] self._initialpaths = set() self._initialparts = [] self.items = items = [] for arg in args: parts = self._parsearg(arg) self._initialparts.append(parts) self._initialpaths.add(parts[0]) rep = collect_one_node(self) self.ihook.pytest_collectreport(report=rep) self.trace.root.indent -= 1 if self._notfound: errors = [] for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) errors.append("not found: %s\n%s" % (arg, line)) #XXX: test this raise pytest.UsageError(*errors) if not genitems: return rep.result else: if rep.passed: for node in rep.result: self.items.extend(self.genitems(node)) return items def collect(self): for parts in self._initialparts: arg = "::".join(map(str, parts)) self.trace("processing argument", arg) self.trace.root.indent += 1 try: for x in self._collect(arg): yield x except NoMatch: # we are inside a make_report hook so # we cannot directly pass through the exception self._notfound.append((arg, sys.exc_info()[1])) self.trace.root.indent -= 1 def _collect(self, arg): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): assert not names, "invalid arg %r" %(arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): yield x else: assert path.check(file=1) for x in self.matchnodes(self._collectfile(path), names): yield x def _collectfile(self, path): ihook = self.gethookproxy(path) if not self.isinitpath(path): if ihook.pytest_ignore_collect(path=path, config=self.config): return () return ihook.pytest_collect_file(path=path, parent=self) def _recurse(self, path): ihook = self.gethookproxy(path.dirpath()) if ihook.pytest_ignore_collect(path=path, config=self.config): return for pat in self._norecursepatterns: if path.check(fnmatch=pat): return False ihook = self.gethookproxy(path) ihook.pytest_collect_directory(path=path, parent=self) return True def _tryconvertpyarg(self, x): mod = None path = [os.path.abspath('.')] + sys.path for name in x.split('.'): # ignore anything that's not a proper name here # else something like --pyargs will mess up '.' # since imp.find_module will actually sometimes work for it # but it's supposed to be considered a filesystem path # not a package if name_re.match(name) is None: return x try: fd, mod, type_ = imp.find_module(name, path) except ImportError: return x else: if fd is not None: fd.close() if type_[2] != imp.PKG_DIRECTORY: path = [os.path.dirname(mod)] else: path = [mod] return mod def _parsearg(self, arg): """ return (fspath, names) tuple after checking the file exists. """ arg = str(arg) if self.config.option.pyargs: arg = self._tryconvertpyarg(arg) parts = str(arg).split("::") relpath = parts[0].replace("/", os.sep) path = self.config.invocation_dir.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: msg = "file or package not found: " else: msg = "file not found: " raise pytest.UsageError(msg + arg) parts[0] = path return parts def matchnodes(self, matching, names): self.trace("matchnodes", matching, names) self.trace.root.indent += 1 nodes = self._matchnodes(matching, names) num = len(nodes) self.trace("matchnodes finished -> ", num, "nodes") self.trace.root.indent -= 1 if num == 0: raise NoMatch(matching, names[:1]) return nodes def _matchnodes(self, matching, names): if not matching or not names: return matching name = names[0] assert name nextnames = names[1:] resultnodes = [] for node in matching: if isinstance(node, pytest.Item): if not names: resultnodes.append(node) continue assert isinstance(node, pytest.Collector) rep = collect_one_node(node) if rep.passed: has_matched = False for x in rep.result: if x.name == name: resultnodes.extend(self.matchnodes([x], nextnames)) has_matched = True # XXX accept IDs that don't have "()" for class instances if not has_matched and len(rep.result) == 1 and x.name == "()": nextnames.insert(0, name) resultnodes.extend(self.matchnodes([x], nextnames)) node.ihook.pytest_collectreport(report=rep) return resultnodes def genitems(self, node): self.trace("genitems", node) if isinstance(node, pytest.Item): node.ihook.pytest_itemcollected(item=node) yield node else: assert isinstance(node, pytest.Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: for x in self.genitems(subnode): yield x node.ihook.pytest_collectreport(report=rep)
35.046575
122
0.597209
import re import py import pytest, _pytest import os, sys, imp try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin from _pytest.runner import collect_one_node tracebackcutdir = py.path.local(_pytest.__file__).dirpath() EXIT_OK = 0 EXIT_TESTSFAILED = 1 EXIT_INTERRUPTED = 2 EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 name_re = re.compile("^[a-zA-Z_]\w*$") def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg']) group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_true", default=False, dest="exitfirst", help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", action="store", type=int, dest="maxfail", default=0, help="exit after first num failures or errors.") group._addoption('--strict', action="store_true", help="run pytest in strict mode, warnings become errors.") group._addoption("-c", metavar="file", type=str, dest="inifilename", help="load configuration from `file` instead of trying to locate one of the implicit configuration files.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", help="only collect tests, don't execute them."), group.addoption('--pyargs', action="store_true", help="try to interpret all arguments as python packages.") group.addoption("--ignore", action="append", metavar="path", help="ignore path during collection (multi-allowed).") # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, metavar="dir", help="only load conftest.py's relative to specified dir.") group = parser.getgroup("debugconfig", "test session debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") def pytest_namespace(): collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) return dict(collect=collect) def pytest_configure(config): pytest.config = config if config.option.exitfirst: config.option.maxfail = 1 def wrap_session(config, doit): session = Session(config) session.exitstatus = EXIT_OK initstate = 0 try: try: config.do_configure() initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 doit(config, session) except pytest.UsageError: args = sys.exc_info()[1].args for msg in args: sys.stderr.write("ERROR: %s\n" %(msg,)) session.exitstatus = EXIT_USAGEERROR except KeyboardInterrupt: excinfo = py.code.ExceptionInfo() config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED except: excinfo = py.code.ExceptionInfo() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") else: if session._testsfailed: session.exitstatus = EXIT_TESTSFAILED finally: excinfo = None session.startdir.chdir() if initstate >= 2: config.hook.pytest_sessionfinish( session=session, exitstatus=session.exitstatus) if initstate >= 1: config.do_unconfigure() config.pluginmanager.ensure_shutdown() return session.exitstatus def pytest_cmdline_main(config): return wrap_session(config, _main) def _main(config, session): config.hook.pytest_collection(session=session) config.hook.pytest_runtestloop(session=session) def pytest_collection(session): return session.perform_collect() def pytest_runtestloop(session): if session.config.option.collectonly: return True def getnextitem(i): try: return session.items[i+1] except IndexError: return None for i, item in enumerate(session.items): nextitem = getnextitem(i) item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True def pytest_ignore_collect(path, config): p = path.dirpath() ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) return path in ignore_paths class FSHookProxy(object): def __init__(self, fspath, config): self.fspath = fspath self.config = config def __getattr__(self, name): plugins = self.config._getmatchingplugins(self.fspath) x = self.config.hook._getcaller(name, plugins) self.__dict__[name] = x return x def compatproperty(name): def fget(self): return getattr(pytest, name) return property(fget) class NodeKeywords(MappingMixin): def __init__(self, node): self.node = node self.parent = node.parent self._markers = {node.name: True} def __getitem__(self, key): try: return self._markers[key] except KeyError: if self.parent is None: raise return self.parent.keywords[key] def __setitem__(self, key, value): self._markers[key] = value def __delitem__(self, key): raise ValueError("cannot delete key in keywords dict") def __iter__(self): seen = set(self._markers) if self.parent is not None: seen.update(self.parent.keywords) return iter(seen) def __len__(self): return len(self.__iter__()) def keys(self): return list(self) def __repr__(self): return "<NodeKeywords for node %s>" % (self.node, ) class Node(object): def __init__(self, name, parent=None, config=None, session=None): self.name = name self.parent = parent self.config = config or parent.config self.session = session or parent.session self.fspath = getattr(parent, 'fspath', None) self.keywords = NodeKeywords(self) self.extra_keyword_matches = set() self._name2pseudofixturedef = {} @property def ihook(self): return self.session.gethookproxy(self.fspath) # by some subclasses. """ Module = compatproperty("Module") Class = compatproperty("Class") Instance = compatproperty("Instance") Function = compatproperty("Function") File = compatproperty("File") Item = compatproperty("Item") def _getcustomclass(self, name): cls = getattr(self, name) if cls != getattr(pytest, name): py.log._apiwarn("2.0", "use of node.%s is deprecated, " "use pytest_pycollect_makeitem(...) to create custom " "collection nodes" % name) return cls def __repr__(self): return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None)) def warn(self, code, message): assert isinstance(code, str) fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) else: fslocation = "%s:%s" % fslocation[:2] self.ihook.pytest_logwarning(code=code, message=message, nodeid=self.nodeid, fslocation=fslocation) # methods for ordering nodes @property def nodeid(self): try: return self._nodeid except AttributeError: self._nodeid = x = self._makeid() return x def _makeid(self): return self.parent.nodeid + "::" + self.name def __hash__(self): return hash(self.nodeid) def setup(self): pass def teardown(self): pass def _memoizedcall(self, attrname, function): exattrname = "_ex_" + attrname failure = getattr(self, exattrname, None) if failure is not None: py.builtin._reraise(failure[0], failure[1], failure[2]) if hasattr(self, attrname): return getattr(self, attrname) try: res = function() except py.builtin._sysex: raise except: failure = sys.exc_info() setattr(self, exattrname, failure) raise setattr(self, attrname, res) return res def listchain(self): chain = [] item = self while item is not None: chain.append(item) item = item.parent chain.reverse() return chain def add_marker(self, marker): from _pytest.mark import MarkDecorator if isinstance(marker, py.builtin._basestring): marker = MarkDecorator(marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker def get_marker(self, name): val = self.keywords.get(name, None) if val is not None: from _pytest.mark import MarkInfo, MarkDecorator if isinstance(val, (MarkDecorator, MarkInfo)): return val def listextrakeywords(self): extra_keywords = set() item = self for item in self.listchain(): extra_keywords.update(item.extra_keyword_matches) return extra_keywords def listnames(self): return [x.name for x in self.listchain()] def getplugins(self): return self.config._getmatchingplugins(self.fspath) def addfinalizer(self, fin): self.session._setupstate.addfinalizer(fin, self) def getparent(self, cls): current = self while current and not isinstance(current, cls): current = current.parent return current def _prunetraceback(self, excinfo): pass def _repr_failure_py(self, excinfo, style=None): fm = self.session._fixturemanager if excinfo.errisinstance(fm.FixtureLookupError): return excinfo.value.formatrepr() tbfilter = True if self.config.option.fulltrace: style="long" else: self._prunetraceback(excinfo) tbfilter = False # prunetraceback already does it if style == "auto": style = "long" # XXX should excinfo.getrepr record all data and toterminal() process it? if style is None: if self.config.option.tbstyle == "short": style = "short" else: style = "long" return excinfo.getrepr(funcargs=True, showlocals=self.config.option.showlocals, style=style, tbfilter=tbfilter) repr_failure = _repr_failure_py class Collector(Node): class CollectError(Exception): def collect(self): raise NotImplementedError("abstract") def repr_failure(self, excinfo): if excinfo.errisinstance(self.CollectError): exc = excinfo.value return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") def _memocollect(self): return self._memoizedcall('_collected', lambda: list(self.collect())) def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: ntraceback = ntraceback.cut(excludepath=tracebackcutdir) excinfo.traceback = ntraceback.filter() class FSCollector(Collector): def __init__(self, fspath, parent=None, config=None, session=None): fspath = py.path.local(fspath) # xxx only for test_resultlog.py? name = fspath.basename if parent is not None: rel = fspath.relto(parent.fspath) if rel: name = rel name = name.replace(os.sep, "/") super(FSCollector, self).__init__(name, parent, config, session) self.fspath = fspath def _makeid(self): relpath = self.fspath.relto(self.config.rootdir) if os.sep != "/": relpath = relpath.replace(os.sep, "/") return relpath class File(FSCollector): class Item(Node): nextitem = None def __init__(self, name, parent=None, config=None, session=None): super(Item, self).__init__(name, parent, config, session) self._report_sections = [] def add_report_section(self, when, key, content): if content: self._report_sections.append((when, key, content)) def reportinfo(self): return self.fspath, None, "" @property def location(self): try: return self._location except AttributeError: location = self.reportinfo() # bestrelpath is a quite slow function cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) try: fspath = cache[location[0]] except KeyError: fspath = self.session.fspath.bestrelpath(location[0]) cache[location[0]] = fspath location = (fspath, location[1], str(location[2])) self._location = location return location class NoMatch(Exception): class Session(FSCollector): class Interrupted(KeyboardInterrupt): __module__ = 'builtins' # for py3 def __init__(self, config): FSCollector.__init__(self, config.rootdir, parent=None, config=config, session=self) self.config.pluginmanager.register(self, name="session", prepend=True) self._testsfailed = 0 self.shouldstop = False self.trace = config.trace.root.get("collection") self._norecursepatterns = config.getini("norecursedirs") self.startdir = py.path.local() self._fs2hookproxy = {} def _makeid(self): return "" def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self._testsfailed += 1 maxfail = self.config.getvalue("maxfail") if maxfail and self._testsfailed >= maxfail: self.shouldstop = "stopping after %d failures" % ( self._testsfailed) pytest_collectreport = pytest_runtest_logreport def isinitpath(self, path): return path in self._initialpaths def gethookproxy(self, fspath): try: return self._fs2hookproxy[fspath] except KeyError: self._fs2hookproxy[fspath] = x = FSHookProxy(fspath, self.config) return x def perform_collect(self, args=None, genitems=True): hook = self.config.hook try: items = self._perform_collect(args, genitems) hook.pytest_collection_modifyitems(session=self, config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) return items def _perform_collect(self, args, genitems): if args is None: args = self.config.args self.trace("perform_collect", self, args) self.trace.root.indent += 1 self._notfound = [] self._initialpaths = set() self._initialparts = [] self.items = items = [] for arg in args: parts = self._parsearg(arg) self._initialparts.append(parts) self._initialpaths.add(parts[0]) rep = collect_one_node(self) self.ihook.pytest_collectreport(report=rep) self.trace.root.indent -= 1 if self._notfound: errors = [] for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) errors.append("not found: %s\n%s" % (arg, line)) #XXX: test this raise pytest.UsageError(*errors) if not genitems: return rep.result else: if rep.passed: for node in rep.result: self.items.extend(self.genitems(node)) return items def collect(self): for parts in self._initialparts: arg = "::".join(map(str, parts)) self.trace("processing argument", arg) self.trace.root.indent += 1 try: for x in self._collect(arg): yield x except NoMatch: # we are inside a make_report hook so # we cannot directly pass through the exception self._notfound.append((arg, sys.exc_info()[1])) self.trace.root.indent -= 1 def _collect(self, arg): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): assert not names, "invalid arg %r" %(arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): yield x else: assert path.check(file=1) for x in self.matchnodes(self._collectfile(path), names): yield x def _collectfile(self, path): ihook = self.gethookproxy(path) if not self.isinitpath(path): if ihook.pytest_ignore_collect(path=path, config=self.config): return () return ihook.pytest_collect_file(path=path, parent=self) def _recurse(self, path): ihook = self.gethookproxy(path.dirpath()) if ihook.pytest_ignore_collect(path=path, config=self.config): return for pat in self._norecursepatterns: if path.check(fnmatch=pat): return False ihook = self.gethookproxy(path) ihook.pytest_collect_directory(path=path, parent=self) return True def _tryconvertpyarg(self, x): mod = None path = [os.path.abspath('.')] + sys.path for name in x.split('.'): # ignore anything that's not a proper name here # else something like --pyargs will mess up '.' # since imp.find_module will actually sometimes work for it # but it's supposed to be considered a filesystem path # not a package if name_re.match(name) is None: return x try: fd, mod, type_ = imp.find_module(name, path) except ImportError: return x else: if fd is not None: fd.close() if type_[2] != imp.PKG_DIRECTORY: path = [os.path.dirname(mod)] else: path = [mod] return mod def _parsearg(self, arg): arg = str(arg) if self.config.option.pyargs: arg = self._tryconvertpyarg(arg) parts = str(arg).split("::") relpath = parts[0].replace("/", os.sep) path = self.config.invocation_dir.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: msg = "file or package not found: " else: msg = "file not found: " raise pytest.UsageError(msg + arg) parts[0] = path return parts def matchnodes(self, matching, names): self.trace("matchnodes", matching, names) self.trace.root.indent += 1 nodes = self._matchnodes(matching, names) num = len(nodes) self.trace("matchnodes finished -> ", num, "nodes") self.trace.root.indent -= 1 if num == 0: raise NoMatch(matching, names[:1]) return nodes def _matchnodes(self, matching, names): if not matching or not names: return matching name = names[0] assert name nextnames = names[1:] resultnodes = [] for node in matching: if isinstance(node, pytest.Item): if not names: resultnodes.append(node) continue assert isinstance(node, pytest.Collector) rep = collect_one_node(node) if rep.passed: has_matched = False for x in rep.result: if x.name == name: resultnodes.extend(self.matchnodes([x], nextnames)) has_matched = True # XXX accept IDs that don't have "()" for class instances if not has_matched and len(rep.result) == 1 and x.name == "()": nextnames.insert(0, name) resultnodes.extend(self.matchnodes([x], nextnames)) node.ihook.pytest_collectreport(report=rep) return resultnodes def genitems(self, node): self.trace("genitems", node) if isinstance(node, pytest.Item): node.ihook.pytest_itemcollected(item=node) yield node else: assert isinstance(node, pytest.Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: for x in self.genitems(subnode): yield x node.ihook.pytest_collectreport(report=rep)
true
true
f70e06e5e95179c1e68d413f9bfa9545ed8a7309
1,319
py
Python
qt_client/components/plot_data_model.py
cosmoscope/qt-client
c6cb59267c8be9149a95fb853a4f181d9092c86b
[ "BSD-3-Clause" ]
null
null
null
qt_client/components/plot_data_model.py
cosmoscope/qt-client
c6cb59267c8be9149a95fb853a4f181d9092c86b
[ "BSD-3-Clause" ]
1
2020-10-29T19:55:04.000Z
2020-10-29T19:55:04.000Z
qt_client/components/plot_data_model.py
cosmoscope/qt-client
c6cb59267c8be9149a95fb853a4f181d9092c86b
[ "BSD-3-Clause" ]
null
null
null
import numpy as np from PyQt5.QtCore import (QAbstractTableModel, QModelIndex, QObject, Qt, QVariant, pyqtProperty, pyqtSignal, pyqtSlot) from ..hub import Hub, Message class PlotDataModel(QAbstractTableModel): # DataRole = Qt.UserRole + 1 def __init__(self, *args, **kwargs): super(PlotDataModel, self).__init__(*args, **kwargs) self._data = list(zip(np.arange(100), np.random.sample(100))) # The data model needs to listen for add data events self._hub = Hub() # self._hub.subscribe(AddDataMessage, self.add_data, self) # self._hub.subscribe(AddPlotDataMessage, self.add_data, self) # def roleNames(self): # return { # self.DataRole: b'data' # } def rowCount(self, parent=None, *args, **kwargs): return len(self._data) def columnCount(self, parent=None, *args, **kwargs): return 2 def data(self, index, role=None): return self._data[index.row()][index.column()] # if role == self.DataRole: # return self._data[index.row()] if role == Qt.DisplayRole: return self._data[index.row()][index.column()] elif role == Qt.EditRole: return self._data[index.row()][index.column()] return QVariant()
31.404762
72
0.611069
import numpy as np from PyQt5.QtCore import (QAbstractTableModel, QModelIndex, QObject, Qt, QVariant, pyqtProperty, pyqtSignal, pyqtSlot) from ..hub import Hub, Message class PlotDataModel(QAbstractTableModel): def __init__(self, *args, **kwargs): super(PlotDataModel, self).__init__(*args, **kwargs) self._data = list(zip(np.arange(100), np.random.sample(100))) self._hub = Hub() def rowCount(self, parent=None, *args, **kwargs): return len(self._data) def columnCount(self, parent=None, *args, **kwargs): return 2 def data(self, index, role=None): return self._data[index.row()][index.column()] if role == Qt.DisplayRole: return self._data[index.row()][index.column()] elif role == Qt.EditRole: return self._data[index.row()][index.column()] return QVariant()
true
true
f70e0713db5be0d8102c99abc4e445c892d286d5
19,283
py
Python
Pre_Production/Midi_Pre_Processor.py
EricCacciavillani/LyreBird
858657faef39d1adcba19ff0213210ba490b4afa
[ "MIT" ]
1
2019-05-04T02:34:20.000Z
2019-05-04T02:34:20.000Z
Pre_Production/Midi_Pre_Processor.py
EricCacciavillani/LyreBird
858657faef39d1adcba19ff0213210ba490b4afa
[ "MIT" ]
null
null
null
Pre_Production/Midi_Pre_Processor.py
EricCacciavillani/LyreBird
858657faef39d1adcba19ff0213210ba490b4afa
[ "MIT" ]
1
2019-04-04T19:14:09.000Z
2019-04-04T19:14:09.000Z
import pretty_midi import glob import os import copy from collections import Counter from multiprocessing.dummy import Pool as ThreadPool from tqdm import tqdm # Import shared files import sys sys.path.append('..') from Shared_Files.Global_Util import * from Shared_Files.Constants import * import warnings warnings.filterwarnings("ignore") class MidiPreProcessor: """ Reads across multiple Notes sets stores meta Notes on each set and associated files for Notes analysis and model training. """ def __init__(self, path_to_full_data_set, genre_sub_sample_set=sys.maxsize, generate_validation=False): """ :param path_to_full_data_set: Pass in a string to the path of directory holding all dataset(s) :param genre_sub_sample_set: Parses each genre into a subset based on the passed integer value. :param generate_validation: Boolean to mark files to be used as validation """ # Progress-bar for threading-pool self.__pbar = None # --- self.__all_possible_instr_note_pairs = set() self.__all_possible_instr_note_pairs_counter = Counter() self.__instr_note_pairs_dict = dict() self.__all_instruments = set() # Files to ignore for when splicing Notes into train/test self.__blacklisted_files_validation = set() # Stores all genres to another dict that stores # the corresponding file note size self.__genre_file_dict = dict() self.__genre_instr_note_counters = dict() # Stores all corrupted files found self.__corrupted_files_paths = [] # Store files that are to small (Determined by the input sequence) self.__small_files_paths = [] # Init encoders and decoders self.__master_instr_note_encoder = dict() self.__master_instr_note_decoder = dict() self.__master_instr_encoder = dict() self.__master_instr_decoder = dict() self.__master_genre_encoder = dict() self.__master_genre_decoder = dict() # --------------------------------- # Numeric counts self.__total_file_count = 0 self.__total_intr_note_pair_size = 0 # Thread pool out reading multiple files of each dataset thread_pool_results = self.__thread_pool_datasets_reader( self.__genre_dataset_init, path_to_full_data_set, genre_sub_sample_set) # Init all Notes based on thread pool results for genre_count, genre_dataset_result in enumerate(thread_pool_results): # Add to set of all instr/note pairs self.__all_possible_instr_note_pairs |= genre_dataset_result["genre_instr_note_pairs"] # Add to set of all instruments self.__all_instruments |= genre_dataset_result["genre_instruments"] # Numeric value of non-unique total instr/note pairs self.__total_intr_note_pair_size += genre_dataset_result[ "genre_size"] # Store files based on the genre of songs self.__genre_file_dict = {**self.__genre_file_dict, **genre_dataset_result["genre_file_meta_data"]} # Store counter object based on genre self.__genre_instr_note_counters[genre_dataset_result[ "genre_name"]] = genre_dataset_result["genre_instr_note_pairs_counter"] # Counter object of all possible instr/note self.__all_possible_instr_note_pairs_counter += genre_dataset_result["genre_instr_note_pairs_counter"] # --- self.__corrupted_files_paths += genre_dataset_result[ "corrupted_files"] self.__small_files_paths += genre_dataset_result["small_files"] # Sort all Notes before encoding for my own sanity self.__all_possible_instr_note_pairs = sorted( self.__all_possible_instr_note_pairs) self.__all_instruments = sorted(self.__all_instruments) self.__instr_note_pairs_dict = {instr:[instr_note_pair for instr_note_pair in self.__all_possible_instr_note_pairs if instr_note_pair.find(instr) != -1] for instr in self.__all_instruments} # Begin creating label encoders and decoders # ----- for label, (genre, _) in enumerate( self.__genre_instr_note_counters.items()): self.__master_genre_encoder[genre] = label + 1 self.__master_genre_decoder = {v: k for k, v in self.__master_genre_encoder.items()} # ----- for label, instr_note_pair in enumerate( self.__all_possible_instr_note_pairs): self.__master_instr_note_encoder[instr_note_pair] = label + 1 self.__master_instr_note_decoder = {v: k for k, v in self.__master_instr_note_encoder.items()} # ----- for label, instr in enumerate( self.__all_instruments): self.__master_instr_encoder[instr] = label + 1 self.__master_instr_decoder = {v: k for k, v in self.__master_instr_encoder.items()} # ------------------------------------- # Corrupted files were found. if self.__corrupted_files_paths: print("The Pre Processor found {0} corrupted files".format(len(self.__corrupted_files_paths))) print("Displaying all corrupted songs:\n") for song in self.__corrupted_files_paths: print("\t", song.split("/", 6)[-1]) print() display_options_menu(menu_intro="Corrupted files found!\n" "\tIt's fine if you don't delete" " them.Just know the pre-processor" " will not use them at all.", menu_options={1: "Delete all corrupted files", 2: "Ignore"}) user_input = input("\nInput: ") # Remove corrupted files if user_input == "1": self.delete_corrupted_files() else: pass # --------------------------------------------- # Small files were found. if self.__small_files_paths: print("The Pre Processor found {0} files that" " are smaller or equal to than {1} Classical_Notes".format( len(self.__small_files_paths), MIDI_CONSTANTS.SMALL_FILE_CHECK)) print("Displaying all small songs:\n") for song in self.__small_files_paths: print("\t", song.split("/", 6)[-1]) print() display_options_menu(menu_intro="Small files found!\n" "\tIt's fine if you don't delete" " them.Just know the pre-processor" " will not use them at all.", menu_options={1: "Delete all small files", 2: "Ignore"}) user_input = input("\nInput: ") # Remove small files if user_input == "1": self.delete_small_files() else: pass # --------------------------------------------- if generate_validation: # Marks files to be selected for validation self.__generate_validation_files() def __thread_pool_datasets_reader(self, func, path_to_full_data_set, genre_sub_sample_set): """ Thread pools out the dataset by genre """ # Get all folder paths for each genre all_train_datasets_paths = [x[0] for x in os.walk( path_to_full_data_set)] all_train_datasets_paths.pop(0) all_files_by_genre = [] for dataset_pth in all_train_datasets_paths: dataset_files = [dataset_pth + "/" + file for file in glob.glob1(dataset_pth, "*.mid")][:genre_sub_sample_set] # Ensures files were actually extracted if len(dataset_files): self.__total_file_count += len(dataset_files) all_files_by_genre.append(dataset_files) # Init progress bar self.__pbar = tqdm(total=self.__total_file_count) # Begin threaded pool pool = ThreadPool(HARDWARE_RELATED_CONSTANTS.THREAD_POOL_AMOUNT) all_results = pool.imap_unordered(func, all_files_by_genre) # End threaded pool pool.close() pool.join() self.__pbar.close() self.__pbar = None return all_results def __genre_dataset_init(self, genre_train_files): """ Init full dataset attributes on MidiPreProcessor init """ # Store meta Notes on file and genre specific Notes genre_instr_note_pairs = set() genre_instr_note_pairs_counter = Counter() genre_instruments = set() genre_file_meta_data = dict() genre_size = 0 # Store invalid file paths corrupted_files = [] small_files = [] genre_name = genre_train_files[0].split("/")[-2].replace('_Midi', '') for _, file in enumerate(genre_train_files): # Update thread pool progress bar self.__pbar.update(1) self.__pbar.set_postfix_str(s=file.split("/", -1)[-1][:20], refresh=True) # Meta Notes on the file midi_file_attr = self.read_midi_file(file) # Check if flags were raised if midi_file_attr["corrupted"]: corrupted_files.append(file) elif midi_file_attr["small_file_check"]: small_files.append(file) # File passed requirements; store meta Notes on genre and file else: genre_instruments |= midi_file_attr["instruments"] genre_instr_note_pairs |= set( midi_file_attr["flat_instr_note_seq"]) genre_size += midi_file_attr["flat_instr_note_seq_len"] genre_file_meta_data[file] = {"flat_instr_note_seq": midi_file_attr[ "flat_instr_note_seq"], "flat_instr_note_seq_len": midi_file_attr[ "flat_instr_note_seq_len"], "instruments": midi_file_attr[ "instruments"],} genre_instr_note_pairs_counter += Counter(midi_file_attr["flat_instr_note_seq"]) return {"genre_name": genre_name, "genre_size": genre_size, "genre_instruments": genre_instruments, "genre_instr_note_pairs": genre_instr_note_pairs, "genre_instr_note_pairs_counter": genre_instr_note_pairs_counter, "genre_file_meta_data": {genre_name: genre_file_meta_data}, "corrupted_files": corrupted_files, "small_files": small_files,} def __generate_validation_files(self): """ Mark files for the validation set """ self.__blacklisted_files_validation = set() # Find files for best fit the for the validation set per genre for genre_name, instr_note_counter in self.__genre_instr_note_counters.items(): genre_note_count = sum(instr_note_counter.values()) needed_validation_note_count = int( (genre_note_count / self.__total_intr_note_pair_size) \ * genre_note_count) note_count_file_dict = {file_meta_data["flat_instr_note_seq_len"]: file_name for file_name, file_meta_data in self.__genre_file_dict[ genre_name].items()} note_count_file_list = list(note_count_file_dict.keys()) ''' The validation count is decreasing per file note count; When it reaches this arbitrary threshold the validation set for this particular genre has been reached ''' while True and needed_validation_note_count > 25: closest_file_note_count = find_nearest( numbers=note_count_file_list, target=needed_validation_note_count) needed_validation_note_count -= closest_file_note_count self.__blacklisted_files_validation.add( note_count_file_dict[closest_file_note_count]) note_count_file_list.remove(closest_file_note_count) def read_midi_file(self, file): """ Extract out the instruments/Classical_Notes of the midi file. """ # Attempt to parse midi file try: midi_data = pretty_midi.PrettyMIDI(file) # Midi file couldn't be opened; Raise flag; return dummy dict except: return {"flat_instr_note_seq": [], "flat_instr_note_seq_len": 0, "instruments": {}, "small_file_check": False, "corrupted": True} # Stores instrument note pair flat_instr_note_seq = [] file_instruments = set() # Move through midi file; store Notes on instrument/note relationship in # string for instr in midi_data.instruments: for note_obj in instr.notes: program_instr_str = "Program" + PARAMETER_VAL_SPLITTER.STR + str(instr.program)\ + INSTRUMENT_NOTE_SPLITTER.STR\ + "Is_Drum" + PARAMETER_VAL_SPLITTER.STR + str(instr.is_drum) file_instruments.add(program_instr_str) flat_instr_note_seq.append( (program_instr_str + INSTRUMENT_NOTE_SPLITTER.STR + "Note" + PARAMETER_VAL_SPLITTER.STR + pretty_midi.note_number_to_name(note_obj.pitch), note_obj)) # --- flat_instr_note_seq_len = len(flat_instr_note_seq) # File is to small for our neural networks to take; Raise flag; if flat_instr_note_seq_len <= MIDI_CONSTANTS.SMALL_FILE_CHECK: return {"flat_instr_note_seq": flat_instr_note_seq, "flat_instr_note_seq_len": flat_instr_note_seq_len, "instruments": file_instruments, "small_file_check": True, "corrupted": False} # Sort Classical_Notes in proper sequence based on their starting and end points flat_instr_note_seq.sort(key=lambda tup: (tup[1].start, tup[1].end)) flat_instr_note_seq = [instr_note[0] for instr_note in flat_instr_note_seq] # Return dict for more explict multi return type return {"flat_instr_note_seq": flat_instr_note_seq, "flat_instr_note_seq_len": flat_instr_note_seq_len, "instruments": file_instruments, "small_file_check": False, "corrupted": False} # Delete the unused files from personal directory def delete_corrupted_files(self): for song in self.__corrupted_files_paths: os.remove(song) self.__corrupted_files_paths = [] def delete_small_files(self): for song in self.__small_files_paths: os.remove(song) self.__small_files_paths = [] # --------------- Setters --------------- def re_init_validation(self, new_file_list): self.__blacklisted_files_validation = new_file_list # --------------- Getters --------------- def return_all_possible_instr_note_pairs(self): return copy.deepcopy(self.__all_possible_instr_note_pairs) def return_genre_instr_note_counters(self): return copy.deepcopy(self.__genre_instr_note_counters) def return_all_possible_instr_note_pairs_counter(self): return copy.deepcopy(self.__all_possible_instr_note_pairs_counter) # ---- def return_all_instruments(self): return copy.deepcopy(self.__all_instruments) def return_instr_note_pairs_dict(self): return copy.deepcopy(self.__instr_note_pairs_dict) # ---- def return_blacklisted_files_validation(self): return copy.deepcopy(self.__blacklisted_files_validation) def return_genre_file_dict(self): return copy.deepcopy(self.__genre_file_dict) # ---- def return_corrupted_files_paths(self): return copy.deepcopy(self.__corrupted_files_paths) def return_small_files_paths(self): return copy.deepcopy(self.__small_files_paths) # ---- def return_master_instr_note_encoder(self): return copy.deepcopy(self.__master_instr_note_encoder) def return_master_instr_note_decoder(self): return copy.deepcopy(self.__master_instr_note_decoder) # ---- def return_master_instr_encoder(self): return copy.deepcopy(self.__master_instr_encoder) def return_master_instr_decoder(self): return copy.deepcopy(self.__master_instr_decoder) # ---- def return_master_genre_encoder(self): return copy.deepcopy(self.__master_genre_encoder) def return_master_genre_decoder(self): return copy.deepcopy(self.__master_genre_decoder) # --------------- Basic Functionality --------------- def encode_instr_note(self, instr_note_str): return self.__master_instr_note_encoder[instr_note_str] def encode_instr_note_seq(self, instr_note_seq): return [self.__master_instr_note_encoder[instr_note_pair] for instr_note_pair in instr_note_seq] # ---- def decode_instr_note(self, instr_note_num): return self.__master_instr_note_decoder[instr_note_num] def decode_instr_note_seq(self, instr_note_seq): return [self.__master_instr_note_decoder[instr_note_pair] for instr_note_pair in instr_note_seq] # ---- def encode_instr(self, instr_str): return self.__master_instr_encoder[instr_str] def decode_instr(self, instr_num): return self.__master_instr_decoder[instr_num] # ---- def encode_genre(self, genre_str): return self.__master_genre_encoder[genre_str] def decode_genre(self, genre_num): return self.__master_genre_decoder[genre_num]
38.798793
114
0.58995
import pretty_midi import glob import os import copy from collections import Counter from multiprocessing.dummy import Pool as ThreadPool from tqdm import tqdm import sys sys.path.append('..') from Shared_Files.Global_Util import * from Shared_Files.Constants import * import warnings warnings.filterwarnings("ignore") class MidiPreProcessor: def __init__(self, path_to_full_data_set, genre_sub_sample_set=sys.maxsize, generate_validation=False): self.__pbar = None self.__all_possible_instr_note_pairs = set() self.__all_possible_instr_note_pairs_counter = Counter() self.__instr_note_pairs_dict = dict() self.__all_instruments = set() self.__blacklisted_files_validation = set() self.__genre_file_dict = dict() self.__genre_instr_note_counters = dict() self.__corrupted_files_paths = [] self.__small_files_paths = [] self.__master_instr_note_encoder = dict() self.__master_instr_note_decoder = dict() self.__master_instr_encoder = dict() self.__master_instr_decoder = dict() self.__master_genre_encoder = dict() self.__master_genre_decoder = dict() self.__total_file_count = 0 self.__total_intr_note_pair_size = 0 thread_pool_results = self.__thread_pool_datasets_reader( self.__genre_dataset_init, path_to_full_data_set, genre_sub_sample_set) for genre_count, genre_dataset_result in enumerate(thread_pool_results): self.__all_possible_instr_note_pairs |= genre_dataset_result["genre_instr_note_pairs"] self.__all_instruments |= genre_dataset_result["genre_instruments"] self.__total_intr_note_pair_size += genre_dataset_result[ "genre_size"] self.__genre_file_dict = {**self.__genre_file_dict, **genre_dataset_result["genre_file_meta_data"]} self.__genre_instr_note_counters[genre_dataset_result[ "genre_name"]] = genre_dataset_result["genre_instr_note_pairs_counter"] self.__all_possible_instr_note_pairs_counter += genre_dataset_result["genre_instr_note_pairs_counter"] self.__corrupted_files_paths += genre_dataset_result[ "corrupted_files"] self.__small_files_paths += genre_dataset_result["small_files"] self.__all_possible_instr_note_pairs = sorted( self.__all_possible_instr_note_pairs) self.__all_instruments = sorted(self.__all_instruments) self.__instr_note_pairs_dict = {instr:[instr_note_pair for instr_note_pair in self.__all_possible_instr_note_pairs if instr_note_pair.find(instr) != -1] for instr in self.__all_instruments} for label, (genre, _) in enumerate( self.__genre_instr_note_counters.items()): self.__master_genre_encoder[genre] = label + 1 self.__master_genre_decoder = {v: k for k, v in self.__master_genre_encoder.items()} for label, instr_note_pair in enumerate( self.__all_possible_instr_note_pairs): self.__master_instr_note_encoder[instr_note_pair] = label + 1 self.__master_instr_note_decoder = {v: k for k, v in self.__master_instr_note_encoder.items()} for label, instr in enumerate( self.__all_instruments): self.__master_instr_encoder[instr] = label + 1 self.__master_instr_decoder = {v: k for k, v in self.__master_instr_encoder.items()} if self.__corrupted_files_paths: print("The Pre Processor found {0} corrupted files".format(len(self.__corrupted_files_paths))) print("Displaying all corrupted songs:\n") for song in self.__corrupted_files_paths: print("\t", song.split("/", 6)[-1]) print() display_options_menu(menu_intro="Corrupted files found!\n" "\tIt's fine if you don't delete" " them.Just know the pre-processor" " will not use them at all.", menu_options={1: "Delete all corrupted files", 2: "Ignore"}) user_input = input("\nInput: ") if user_input == "1": self.delete_corrupted_files() else: pass if self.__small_files_paths: print("The Pre Processor found {0} files that" " are smaller or equal to than {1} Classical_Notes".format( len(self.__small_files_paths), MIDI_CONSTANTS.SMALL_FILE_CHECK)) print("Displaying all small songs:\n") for song in self.__small_files_paths: print("\t", song.split("/", 6)[-1]) print() display_options_menu(menu_intro="Small files found!\n" "\tIt's fine if you don't delete" " them.Just know the pre-processor" " will not use them at all.", menu_options={1: "Delete all small files", 2: "Ignore"}) user_input = input("\nInput: ") if user_input == "1": self.delete_small_files() else: pass if generate_validation: self.__generate_validation_files() def __thread_pool_datasets_reader(self, func, path_to_full_data_set, genre_sub_sample_set): all_train_datasets_paths = [x[0] for x in os.walk( path_to_full_data_set)] all_train_datasets_paths.pop(0) all_files_by_genre = [] for dataset_pth in all_train_datasets_paths: dataset_files = [dataset_pth + "/" + file for file in glob.glob1(dataset_pth, "*.mid")][:genre_sub_sample_set] if len(dataset_files): self.__total_file_count += len(dataset_files) all_files_by_genre.append(dataset_files) self.__pbar = tqdm(total=self.__total_file_count) pool = ThreadPool(HARDWARE_RELATED_CONSTANTS.THREAD_POOL_AMOUNT) all_results = pool.imap_unordered(func, all_files_by_genre) pool.close() pool.join() self.__pbar.close() self.__pbar = None return all_results def __genre_dataset_init(self, genre_train_files): genre_instr_note_pairs = set() genre_instr_note_pairs_counter = Counter() genre_instruments = set() genre_file_meta_data = dict() genre_size = 0 corrupted_files = [] small_files = [] genre_name = genre_train_files[0].split("/")[-2].replace('_Midi', '') for _, file in enumerate(genre_train_files): self.__pbar.update(1) self.__pbar.set_postfix_str(s=file.split("/", -1)[-1][:20], refresh=True) midi_file_attr = self.read_midi_file(file) if midi_file_attr["corrupted"]: corrupted_files.append(file) elif midi_file_attr["small_file_check"]: small_files.append(file) else: genre_instruments |= midi_file_attr["instruments"] genre_instr_note_pairs |= set( midi_file_attr["flat_instr_note_seq"]) genre_size += midi_file_attr["flat_instr_note_seq_len"] genre_file_meta_data[file] = {"flat_instr_note_seq": midi_file_attr[ "flat_instr_note_seq"], "flat_instr_note_seq_len": midi_file_attr[ "flat_instr_note_seq_len"], "instruments": midi_file_attr[ "instruments"],} genre_instr_note_pairs_counter += Counter(midi_file_attr["flat_instr_note_seq"]) return {"genre_name": genre_name, "genre_size": genre_size, "genre_instruments": genre_instruments, "genre_instr_note_pairs": genre_instr_note_pairs, "genre_instr_note_pairs_counter": genre_instr_note_pairs_counter, "genre_file_meta_data": {genre_name: genre_file_meta_data}, "corrupted_files": corrupted_files, "small_files": small_files,} def __generate_validation_files(self): self.__blacklisted_files_validation = set() for genre_name, instr_note_counter in self.__genre_instr_note_counters.items(): genre_note_count = sum(instr_note_counter.values()) needed_validation_note_count = int( (genre_note_count / self.__total_intr_note_pair_size) \ * genre_note_count) note_count_file_dict = {file_meta_data["flat_instr_note_seq_len"]: file_name for file_name, file_meta_data in self.__genre_file_dict[ genre_name].items()} note_count_file_list = list(note_count_file_dict.keys()) while True and needed_validation_note_count > 25: closest_file_note_count = find_nearest( numbers=note_count_file_list, target=needed_validation_note_count) needed_validation_note_count -= closest_file_note_count self.__blacklisted_files_validation.add( note_count_file_dict[closest_file_note_count]) note_count_file_list.remove(closest_file_note_count) def read_midi_file(self, file): try: midi_data = pretty_midi.PrettyMIDI(file) except: return {"flat_instr_note_seq": [], "flat_instr_note_seq_len": 0, "instruments": {}, "small_file_check": False, "corrupted": True} # Stores instrument note pair flat_instr_note_seq = [] file_instruments = set() # Move through midi file; store Notes on instrument/note relationship in # string for instr in midi_data.instruments: for note_obj in instr.notes: program_instr_str = "Program" + PARAMETER_VAL_SPLITTER.STR + str(instr.program)\ + INSTRUMENT_NOTE_SPLITTER.STR\ + "Is_Drum" + PARAMETER_VAL_SPLITTER.STR + str(instr.is_drum) file_instruments.add(program_instr_str) flat_instr_note_seq.append( (program_instr_str + INSTRUMENT_NOTE_SPLITTER.STR + "Note" + PARAMETER_VAL_SPLITTER.STR + pretty_midi.note_number_to_name(note_obj.pitch), note_obj)) # --- flat_instr_note_seq_len = len(flat_instr_note_seq) # File is to small for our neural networks to take; Raise flag; if flat_instr_note_seq_len <= MIDI_CONSTANTS.SMALL_FILE_CHECK: return {"flat_instr_note_seq": flat_instr_note_seq, "flat_instr_note_seq_len": flat_instr_note_seq_len, "instruments": file_instruments, "small_file_check": True, "corrupted": False} # Sort Classical_Notes in proper sequence based on their starting and end points flat_instr_note_seq.sort(key=lambda tup: (tup[1].start, tup[1].end)) flat_instr_note_seq = [instr_note[0] for instr_note in flat_instr_note_seq] # Return dict for more explict multi return type return {"flat_instr_note_seq": flat_instr_note_seq, "flat_instr_note_seq_len": flat_instr_note_seq_len, "instruments": file_instruments, "small_file_check": False, "corrupted": False} # Delete the unused files from personal directory def delete_corrupted_files(self): for song in self.__corrupted_files_paths: os.remove(song) self.__corrupted_files_paths = [] def delete_small_files(self): for song in self.__small_files_paths: os.remove(song) self.__small_files_paths = [] # --------------- Setters --------------- def re_init_validation(self, new_file_list): self.__blacklisted_files_validation = new_file_list # --------------- Getters --------------- def return_all_possible_instr_note_pairs(self): return copy.deepcopy(self.__all_possible_instr_note_pairs) def return_genre_instr_note_counters(self): return copy.deepcopy(self.__genre_instr_note_counters) def return_all_possible_instr_note_pairs_counter(self): return copy.deepcopy(self.__all_possible_instr_note_pairs_counter) # ---- def return_all_instruments(self): return copy.deepcopy(self.__all_instruments) def return_instr_note_pairs_dict(self): return copy.deepcopy(self.__instr_note_pairs_dict) # ---- def return_blacklisted_files_validation(self): return copy.deepcopy(self.__blacklisted_files_validation) def return_genre_file_dict(self): return copy.deepcopy(self.__genre_file_dict) # ---- def return_corrupted_files_paths(self): return copy.deepcopy(self.__corrupted_files_paths) def return_small_files_paths(self): return copy.deepcopy(self.__small_files_paths) # ---- def return_master_instr_note_encoder(self): return copy.deepcopy(self.__master_instr_note_encoder) def return_master_instr_note_decoder(self): return copy.deepcopy(self.__master_instr_note_decoder) # ---- def return_master_instr_encoder(self): return copy.deepcopy(self.__master_instr_encoder) def return_master_instr_decoder(self): return copy.deepcopy(self.__master_instr_decoder) # ---- def return_master_genre_encoder(self): return copy.deepcopy(self.__master_genre_encoder) def return_master_genre_decoder(self): return copy.deepcopy(self.__master_genre_decoder) # --------------- Basic Functionality --------------- def encode_instr_note(self, instr_note_str): return self.__master_instr_note_encoder[instr_note_str] def encode_instr_note_seq(self, instr_note_seq): return [self.__master_instr_note_encoder[instr_note_pair] for instr_note_pair in instr_note_seq] # ---- def decode_instr_note(self, instr_note_num): return self.__master_instr_note_decoder[instr_note_num] def decode_instr_note_seq(self, instr_note_seq): return [self.__master_instr_note_decoder[instr_note_pair] for instr_note_pair in instr_note_seq] # ---- def encode_instr(self, instr_str): return self.__master_instr_encoder[instr_str] def decode_instr(self, instr_num): return self.__master_instr_decoder[instr_num] # ---- def encode_genre(self, genre_str): return self.__master_genre_encoder[genre_str] def decode_genre(self, genre_num): return self.__master_genre_decoder[genre_num]
true
true
f70e07c2c6875cd51efabd172d924854be736523
4,145
py
Python
openstack-cyborg-2.0.0/cyborg/tests/unit/objects/test_device.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
null
null
null
openstack-cyborg-2.0.0/cyborg/tests/unit/objects/test_device.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
openstack-cyborg-2.0.0/cyborg/tests/unit/objects/test_device.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# Copyright 2019 Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cyborg import objects from cyborg.tests.unit.db import base from cyborg.tests.unit.db import utils class TestDeviceObject(base.DbTestCase): def setUp(self): super(TestDeviceObject, self).setUp() self.fake_device = utils.get_test_device() def test_get(self): uuid = self.fake_device['uuid'] with mock.patch.object(self.dbapi, 'device_get', autospec=True) as mock_device_get: mock_device_get.return_value = self.fake_device device = objects.Device.get(self.context, uuid) mock_device_get.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, device._context) def test_list(self): with mock.patch.object(self.dbapi, 'device_list', autospec=True) as mock_device_list: mock_device_list.return_value = [self.fake_device] devices = objects.Device.list(self.context) self.assertEqual(1, mock_device_list.call_count) self.assertEqual(1, len(devices)) self.assertIsInstance(devices[0], objects.Device) self.assertEqual(self.context, devices[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'device_create', autospec=True) as mock_device_create: mock_device_create.return_value = self.fake_device device = objects.Device(self.context, **self.fake_device) device.create(self.context) mock_device_create.assert_called_once_with( self.context, self.fake_device) self.assertEqual(self.context, device._context) def test_destroy(self): uuid = self.fake_device['uuid'] with mock.patch.object(self.dbapi, 'device_get', autospec=True) as mock_device_get: mock_device_get.return_value = self.fake_device with mock.patch.object(self.dbapi, 'device_delete', autospec=True) as mock_device_delete: device = objects.Device.get(self.context, uuid) device.destroy(self.context) mock_device_delete.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, device._context) def test_update(self): uuid = self.fake_device['uuid'] with mock.patch.object(self.dbapi, 'device_get', autospec=True) as mock_device_get: mock_device_get.return_value = self.fake_device with mock.patch.object(self.dbapi, 'device_update', autospec=True) as mock_device_update: fake = self.fake_device fake["vendor_board_info"] = "new_vendor_board_info" mock_device_update.return_value = fake device = objects.Device.get(self.context, uuid) device.vendor_board_info = 'new_vendor_board_info' device.save(self.context) mock_device_get.assert_called_once_with(self.context, uuid) mock_device_update.assert_called_once_with( self.context, uuid, {'vendor_board_info': 'new_vendor_board_info'}) self.assertEqual(self.context, device._context)
45.549451
78
0.61158
import mock from cyborg import objects from cyborg.tests.unit.db import base from cyborg.tests.unit.db import utils class TestDeviceObject(base.DbTestCase): def setUp(self): super(TestDeviceObject, self).setUp() self.fake_device = utils.get_test_device() def test_get(self): uuid = self.fake_device['uuid'] with mock.patch.object(self.dbapi, 'device_get', autospec=True) as mock_device_get: mock_device_get.return_value = self.fake_device device = objects.Device.get(self.context, uuid) mock_device_get.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, device._context) def test_list(self): with mock.patch.object(self.dbapi, 'device_list', autospec=True) as mock_device_list: mock_device_list.return_value = [self.fake_device] devices = objects.Device.list(self.context) self.assertEqual(1, mock_device_list.call_count) self.assertEqual(1, len(devices)) self.assertIsInstance(devices[0], objects.Device) self.assertEqual(self.context, devices[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'device_create', autospec=True) as mock_device_create: mock_device_create.return_value = self.fake_device device = objects.Device(self.context, **self.fake_device) device.create(self.context) mock_device_create.assert_called_once_with( self.context, self.fake_device) self.assertEqual(self.context, device._context) def test_destroy(self): uuid = self.fake_device['uuid'] with mock.patch.object(self.dbapi, 'device_get', autospec=True) as mock_device_get: mock_device_get.return_value = self.fake_device with mock.patch.object(self.dbapi, 'device_delete', autospec=True) as mock_device_delete: device = objects.Device.get(self.context, uuid) device.destroy(self.context) mock_device_delete.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, device._context) def test_update(self): uuid = self.fake_device['uuid'] with mock.patch.object(self.dbapi, 'device_get', autospec=True) as mock_device_get: mock_device_get.return_value = self.fake_device with mock.patch.object(self.dbapi, 'device_update', autospec=True) as mock_device_update: fake = self.fake_device fake["vendor_board_info"] = "new_vendor_board_info" mock_device_update.return_value = fake device = objects.Device.get(self.context, uuid) device.vendor_board_info = 'new_vendor_board_info' device.save(self.context) mock_device_get.assert_called_once_with(self.context, uuid) mock_device_update.assert_called_once_with( self.context, uuid, {'vendor_board_info': 'new_vendor_board_info'}) self.assertEqual(self.context, device._context)
true
true