hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71c3d9f2d35818503ab834cb34b0e0f458291f8 | 1,541 | py | Python | signal_ocean/geo/models.py | ktsitsikas-signal/SignalSDK | 1b125ae963ee2b53a2a3ec5a7ae6bf9511608355 | [
"Apache-2.0"
] | null | null | null | signal_ocean/geo/models.py | ktsitsikas-signal/SignalSDK | 1b125ae963ee2b53a2a3ec5a7ae6bf9511608355 | [
"Apache-2.0"
] | null | null | null | signal_ocean/geo/models.py | ktsitsikas-signal/SignalSDK | 1b125ae963ee2b53a2a3ec5a7ae6bf9511608355 | [
"Apache-2.0"
] | null | null | null | # noqa: D100
from dataclasses import dataclass
from decimal import Decimal
from typing import Optional
@dataclass(frozen=True)
class Country:
"""Represents a country.
Attributes:
id: The ID of the country.
name: The name of the country.
country_code: Alpha-2 codes used by the ISO 3166
standard.
country_code_numeric: UN codes used by the ISO 3166
standard.
country_code_iso3: Alpha-3 codes used by the ISO 3166
standard.
"""
id: int
name: str
country_code: str
country_code_numeric: str
country_code_iso3: str
@dataclass(frozen=True)
class Port:
"""A maritime facility where vessels can dock.
Attributes:
id: ID of the port.
country_id: ID of the country the port is in.
area_id: ID of the area the port is in.
name: Name of the port.
latitude: Latitude of the port.
longitude: Longitude of the port.
source: The source of information about the port.
"""
id: int
country_id: int
area_id: int
name: str
latitude: Decimal
longitude: Decimal
source: str
@dataclass(frozen=True)
class Area:
"""A geographical area.
Attributes:
id: ID of the area.
name: Name of the area.
area_type_id: ID of the area type.
parent_area_id: ID of this area's parent area. None if the area has no
parent.
"""
id: int
name: str
area_type_id: int
parent_area_id: Optional[int]
| 22.333333 | 78 | 0.627515 |
from dataclasses import dataclass
from decimal import Decimal
from typing import Optional
@dataclass(frozen=True)
class Country:
id: int
name: str
country_code: str
country_code_numeric: str
country_code_iso3: str
@dataclass(frozen=True)
class Port:
id: int
country_id: int
area_id: int
name: str
latitude: Decimal
longitude: Decimal
source: str
@dataclass(frozen=True)
class Area:
id: int
name: str
area_type_id: int
parent_area_id: Optional[int]
| true | true |
f71c3e06df78623471caeb98f86f952dc33b019f | 671 | py | Python | oocran/django/ns/admin.py | howls90/oocran | 9951f3ff752f9f6517a4d016476c1d1e2bb44a4d | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2018-12-12T10:32:16.000Z | 2022-02-07T19:46:10.000Z | oocran/django/ns/admin.py | howls90/oocran | 9951f3ff752f9f6517a4d016476c1d1e2bb44a4d | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-01-11T06:56:35.000Z | 2017-01-11T06:58:44.000Z | oocran/django/ns/admin.py | howls90/OOCRAN | 9951f3ff752f9f6517a4d016476c1d1e2bb44a4d | [
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2017-05-29T03:34:23.000Z | 2022-02-07T19:46:11.000Z | from django.contrib import admin
from .models import Ns, Nvf
class NsModelAdmin(admin.ModelAdmin):
list_display = ["name", "update", "timestamp"]
list_display_links = ["update"]
list_filter = ["update", "timestamp"]
list_editable = ["name"]
search_fields = ["name"]
class Meta:
model = Ns
admin.site.register(Ns, NsModelAdmin)
class NvfModelAdmin(admin.ModelAdmin):
list_display = ["name", "update", "timestamp"]
list_display_links = ["update"]
list_filter = ["update", "timestamp"]
list_editable = ["name"]
search_fields = ["name"]
class Meta:
model = Nvf
admin.site.register(Nvf, NvfModelAdmin)
| 21.645161 | 50 | 0.657228 | from django.contrib import admin
from .models import Ns, Nvf
class NsModelAdmin(admin.ModelAdmin):
list_display = ["name", "update", "timestamp"]
list_display_links = ["update"]
list_filter = ["update", "timestamp"]
list_editable = ["name"]
search_fields = ["name"]
class Meta:
model = Ns
admin.site.register(Ns, NsModelAdmin)
class NvfModelAdmin(admin.ModelAdmin):
list_display = ["name", "update", "timestamp"]
list_display_links = ["update"]
list_filter = ["update", "timestamp"]
list_editable = ["name"]
search_fields = ["name"]
class Meta:
model = Nvf
admin.site.register(Nvf, NvfModelAdmin)
| true | true |
f71c3fd4413d254a1be32c01d461ea5d95ef19f5 | 560 | py | Python | Video_Generation_with_Detections.py | ludvikalkhoury/Sperm-Heads-Segmentation-and-Localization-using-Urbano-s-Method | 30580d02d0981d59376fbec6b59f5146eaffef14 | [
"MIT"
] | null | null | null | Video_Generation_with_Detections.py | ludvikalkhoury/Sperm-Heads-Segmentation-and-Localization-using-Urbano-s-Method | 30580d02d0981d59376fbec6b59f5146eaffef14 | [
"MIT"
] | null | null | null | Video_Generation_with_Detections.py | ludvikalkhoury/Sperm-Heads-Segmentation-and-Localization-using-Urbano-s-Method | 30580d02d0981d59376fbec6b59f5146eaffef14 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
Initial_Frame = 900
Final_Frame = 1190
video_name = 'Sample2 with detections.avi'
frame = cv2.imread("image/Seg_frame%d.jpg" % Initial_Frame)
height, width, layers = frame.shape
fps = 15
video = cv2.VideoWriter(video_name, 0, fps, (width,height))
for x in range(Initial_Frame,Final_Frame+1,1):
frame = cv2.imread("image/Seg_frame%d.jpg" % x)
video.write(frame)
print(round(((x - Initial_Frame) / (Final_Frame - Initial_Frame)) * 100, 2), '%')
cv2.destroyAllWindows()
video.release() | 22.4 | 86 | 0.676786 | import cv2
import numpy as np
Initial_Frame = 900
Final_Frame = 1190
video_name = 'Sample2 with detections.avi'
frame = cv2.imread("image/Seg_frame%d.jpg" % Initial_Frame)
height, width, layers = frame.shape
fps = 15
video = cv2.VideoWriter(video_name, 0, fps, (width,height))
for x in range(Initial_Frame,Final_Frame+1,1):
frame = cv2.imread("image/Seg_frame%d.jpg" % x)
video.write(frame)
print(round(((x - Initial_Frame) / (Final_Frame - Initial_Frame)) * 100, 2), '%')
cv2.destroyAllWindows()
video.release() | true | true |
f71c41f8731671ad3e1d3a2df155f7aa93679845 | 5,182 | py | Python | instances/optimization/20210422-1717/pas1/inst-20210422-1717-c30-pas1.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/optimization/20210422-1717/pas1/inst-20210422-1717-c30-pas1.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/optimization/20210422-1717/pas1/inst-20210422-1717-c30-pas1.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PERIODS
"""
numPeriods = 180
"""
STOPS
"""
numStations = 13
station_names = (
"Hamburg Hbf", # 0
"Landwehr", # 1
"Hasselbrook", # 2
"Wansbeker Chaussee*", # 3
"Friedrichsberg*", # 4
"Barmbek*", # 5
"Alte Woehr (Stadtpark)", # 6
"Ruebenkamp (City Nord)", # 7
"Ohlsdorf*", # 8
"Kornweg", # 9
"Hoheneichen", # 10
"Wellingsbuettel", # 11
"Poppenbuettel*", # 12
)
numStops = 26
stops_position = (
(0, 0), # Stop 0
(2, 0), # Stop 1
(3, 0), # Stop 2
(4, 0), # Stop 3
(5, 0), # Stop 4
(6, 0), # Stop 5
(7, 0), # Stop 6
(8, 0), # Stop 7
(9, 0), # Stop 8
(11, 0), # Stop 9
(13, 0), # Stop 10
(14, 0), # Stop 11
(15, 0), # Stop 12
(15, 1), # Stop 13
(15, 1), # Stop 14
(13, 1), # Stop 15
(12, 1), # Stop 16
(11, 1), # Stop 17
(10, 1), # Stop 18
(9, 1), # Stop 19
(8, 1), # Stop 20
(7, 1), # Stop 21
(6, 1), # Stop 22
(4, 1), # Stop 23
(2, 1), # Stop 24
(1, 1), # Stop 25
)
stops_distance = (
(0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 0
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 1
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 2
(0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 3
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 4
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 5
(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 6
(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 7
(0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 8
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 9
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 10
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 11
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 12
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 13
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 14
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 15
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 16
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0), # Stop 17
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0), # Stop 18
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0), # Stop 19
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), # Stop 20
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), # Stop 21
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0), # Stop 22
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0), # Stop 23
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2), # Stop 24
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 25
)
station_start = 0
"""
TRAMS
"""
numTrams = 18
tram_capacity = 514
tram_capacity_cargo = 304
tram_capacity_min_passenger = 208
tram_capacity_min_cargo = 0
tram_speed = 1
tram_headway = 1
tram_min_service = 1
tram_max_service = 10
min_time_next_tram = 0.333
tram_travel_deviation = 0.167
"""
PASSENGERS
"""
passenger_set = "pas-20210422-1717-int1"
passenger_service_time_board = 0.0145
passenger_service_time_alight = 0.0145
"""
CARGO
"""
numCargo = 30
cargo_size = 4
cargo_station_destination = (
5, # 0
5, # 1
5, # 2
8, # 3
5, # 4
12, # 5
8, # 6
4, # 7
12, # 8
3, # 9
12, # 10
12, # 11
4, # 12
4, # 13
12, # 14
3, # 15
5, # 16
4, # 17
3, # 18
12, # 19
5, # 20
8, # 21
12, # 22
8, # 23
3, # 24
4, # 25
12, # 26
12, # 27
3, # 28
4, # 29
)
cargo_release = (
5, # 0
8, # 1
16, # 2
22, # 3
24, # 4
25, # 5
26, # 6
27, # 7
32, # 8
33, # 9
34, # 10
35, # 11
37, # 12
37, # 13
38, # 14
41, # 15
44, # 16
45, # 17
46, # 18
47, # 19
48, # 20
49, # 21
57, # 22
61, # 23
62, # 24
67, # 25
70, # 26
70, # 27
71, # 28
72, # 29
)
cargo_station_deadline = (
176, # 0
171, # 1
155, # 2
123, # 3
126, # 4
91, # 5
36, # 6
87, # 7
141, # 8
163, # 9
108, # 10
144, # 11
76, # 12
47, # 13
97, # 14
114, # 15
142, # 16
55, # 17
56, # 18
57, # 19
118, # 20
160, # 21
139, # 22
71, # 23
82, # 24
77, # 25
80, # 26
169, # 27
129, # 28
99, # 29
)
cargo_max_delay = 3
cargo_service_time_load = 0.3333333333333333
cargo_service_time_unload = 0.25
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 8991598675325360468762009371570610170
#index for seed sequence child
child_seed_index = (
0, # 0
)
| 20.563492 | 108 | 0.446353 |
numPeriods = 180
numStations = 13
station_names = (
"Hamburg Hbf",
"Landwehr",
"Hasselbrook",
"Wansbeker Chaussee*",
"Friedrichsberg*",
"Barmbek*",
"Alte Woehr (Stadtpark)",
"Ruebenkamp (City Nord)",
"Ohlsdorf*",
"Kornweg",
"Hoheneichen",
"Wellingsbuettel",
"Poppenbuettel*",
)
numStops = 26
stops_position = (
(0, 0),
(2, 0),
(3, 0),
(4, 0),
(5, 0),
(6, 0),
(7, 0),
(8, 0),
(9, 0),
(11, 0),
(13, 0),
(14, 0),
(15, 0),
(15, 1),
(15, 1),
(13, 1),
(12, 1),
(11, 1),
(10, 1),
(9, 1),
(8, 1),
(7, 1),
(6, 1),
(4, 1),
(2, 1),
(1, 1),
)
stops_distance = (
(0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2),
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
)
station_start = 0
numTrams = 18
tram_capacity = 514
tram_capacity_cargo = 304
tram_capacity_min_passenger = 208
tram_capacity_min_cargo = 0
tram_speed = 1
tram_headway = 1
tram_min_service = 1
tram_max_service = 10
min_time_next_tram = 0.333
tram_travel_deviation = 0.167
passenger_set = "pas-20210422-1717-int1"
passenger_service_time_board = 0.0145
passenger_service_time_alight = 0.0145
numCargo = 30
cargo_size = 4
cargo_station_destination = (
5,
5,
5,
8,
5,
12,
8,
4,
12,
3,
12,
12,
4,
4,
12,
3,
5,
4,
3,
12,
5,
8,
12,
8,
3,
4,
12,
12,
3,
4,
)
cargo_release = (
5,
8,
16,
22,
24,
25,
26,
27,
32,
33,
34,
35,
37,
37,
38,
41,
44,
45,
46,
47,
48,
49,
57,
61,
62,
67,
70,
70,
71,
72,
)
cargo_station_deadline = (
176,
171,
155,
123,
126,
91,
36,
87,
141,
163,
108,
144,
76,
47,
97,
114,
142,
55,
56,
57,
118,
160,
139,
71,
82,
77,
80,
169,
129,
99,
)
cargo_max_delay = 3
cargo_service_time_load = 0.3333333333333333
cargo_service_time_unload = 0.25
entropy = 8991598675325360468762009371570610170
child_seed_index = (
0,
)
| true | true |
f71c4289fb96b9b13572b2e0265ec5d04f90d215 | 3,523 | py | Python | nn_interpretability/model/model_repository.py | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 | [
"MIT"
] | 41 | 2020-10-13T18:46:32.000Z | 2022-02-21T15:52:50.000Z | nn_interpretability/model/model_repository.py | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 | [
"MIT"
] | 4 | 2021-07-11T12:38:03.000Z | 2022-03-08T14:47:38.000Z | nn_interpretability/model/model_repository.py | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 | [
"MIT"
] | 7 | 2020-10-21T13:03:16.000Z | 2022-03-07T11:45:00.000Z | import os
import torch
from pathlib import Path
from nn_interpretability.model.definition.am_mnist_classifier import AMCNN
from nn_interpretability.model.definition.mc_dropout_cnn import CNN_Dropout
from nn_interpretability.model.definition.general_mnist_cnn import GeneralCNN
from nn_interpretability.model.definition.mnist_generator import MNISTGenerator
from nn_interpretability.model.definition.mnist_discriminator import MNISTDiscriminator
from nn_interpretability.model.definition.cam_mnist_classifier import CAMMNISTClassifier
from nn_interpretability.model.definition.pretrained_dc_generator import PretrainedDCGANGenerator
from nn_interpretability.model.definition.cam_mnist_classifier_2 import CAMMNISTExtendedClassifier
class ModelRepository:
MODELS_PATH = str(Path(__file__).parent.parent.parent.joinpath('models')) + "/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@staticmethod
def get_general_mnist_cnn(path: str = None):
model = GeneralCNN()
if path is not None:
if os.path.exists(ModelRepository.MODELS_PATH + path):
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cnn_dropout(path: str = None):
model = CNN_Dropout()
if path is not None:
if os.path.exists(ModelRepository.MODELS_PATH + path):
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cam_classifier(path: str = None):
model = CAMMNISTClassifier()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cam_extended_classifier(path: str = None):
model = CAMMNISTExtendedClassifier()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_am_classifier(path: str = None):
model = AMCNN()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_pretrained_dcgan_generator():
"""
Source of the pretrained model is:
https://github.com/csinva/gan-vae-pretrained-pytorch
:return:
"""
path = 'pretrained_dcgan_generator.pth'
model = PretrainedDCGANGenerator()
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_mnist_generator(latent_dim: int = 128, path: str = None):
model = MNISTGenerator(latent_dim=latent_dim)
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_mnist_discriminator(path: str = None):
model = MNISTDiscriminator()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def save(model, model_name):
torch.save(model.state_dict(), ModelRepository.MODELS_PATH + model_name)
return model
@staticmethod
def _load(model, model_name):
model.load_state_dict(torch.load(ModelRepository.MODELS_PATH + model_name, map_location=ModelRepository.device))
return model.to(ModelRepository.device)
| 32.62037 | 121 | 0.705081 | import os
import torch
from pathlib import Path
from nn_interpretability.model.definition.am_mnist_classifier import AMCNN
from nn_interpretability.model.definition.mc_dropout_cnn import CNN_Dropout
from nn_interpretability.model.definition.general_mnist_cnn import GeneralCNN
from nn_interpretability.model.definition.mnist_generator import MNISTGenerator
from nn_interpretability.model.definition.mnist_discriminator import MNISTDiscriminator
from nn_interpretability.model.definition.cam_mnist_classifier import CAMMNISTClassifier
from nn_interpretability.model.definition.pretrained_dc_generator import PretrainedDCGANGenerator
from nn_interpretability.model.definition.cam_mnist_classifier_2 import CAMMNISTExtendedClassifier
class ModelRepository:
MODELS_PATH = str(Path(__file__).parent.parent.parent.joinpath('models')) + "/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@staticmethod
def get_general_mnist_cnn(path: str = None):
model = GeneralCNN()
if path is not None:
if os.path.exists(ModelRepository.MODELS_PATH + path):
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cnn_dropout(path: str = None):
model = CNN_Dropout()
if path is not None:
if os.path.exists(ModelRepository.MODELS_PATH + path):
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cam_classifier(path: str = None):
model = CAMMNISTClassifier()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cam_extended_classifier(path: str = None):
model = CAMMNISTExtendedClassifier()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_am_classifier(path: str = None):
model = AMCNN()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_pretrained_dcgan_generator():
path = 'pretrained_dcgan_generator.pth'
model = PretrainedDCGANGenerator()
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_mnist_generator(latent_dim: int = 128, path: str = None):
model = MNISTGenerator(latent_dim=latent_dim)
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_mnist_discriminator(path: str = None):
model = MNISTDiscriminator()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def save(model, model_name):
torch.save(model.state_dict(), ModelRepository.MODELS_PATH + model_name)
return model
@staticmethod
def _load(model, model_name):
model.load_state_dict(torch.load(ModelRepository.MODELS_PATH + model_name, map_location=ModelRepository.device))
return model.to(ModelRepository.device)
| true | true |
f71c4337eebf63c1b6c456cfeb478e85e64e7d70 | 4,660 | py | Python | cherrytree/github_utils.py | preset-io/cherrytree | 647b7acfb8f95c6a874386183860fdf17cace49b | [
"Apache-2.0"
] | 1 | 2021-04-06T18:25:58.000Z | 2021-04-06T18:25:58.000Z | cherrytree/github_utils.py | preset-io/cherrytree | 647b7acfb8f95c6a874386183860fdf17cace49b | [
"Apache-2.0"
] | null | null | null | cherrytree/github_utils.py | preset-io/cherrytree | 647b7acfb8f95c6a874386183860fdf17cace49b | [
"Apache-2.0"
] | null | null | null | import os
import re
from collections import OrderedDict
from typing import Generator, List, Optional, Reversible
import delegator
from git import Commit
from git.exc import InvalidGitRepositoryError
from git.repo import Repo
from github import Github
from github.Label import Label
from github.Issue import Issue
from github.GithubException import UnknownObjectException
from github.Repository import Repository
from cherrytree.classes import CherryTreeExecutionException
# PRs are either of form "Merge pull request #nnn from..." or "...(#nnn)"
PR_REGEX = re.compile(r"(^Merge pull request #(\d+) from|\(#(\d+)\)$)")
def get_github_instance() -> Github:
token = os.environ.get("GITHUB_TOKEN")
if not token:
raise Exception("Env var 'GITHUB_TOKEN' is missing")
return Github(token)
def get_repo(repo: str) -> Repository:
g = get_github_instance()
return g.get_repo(repo)
def get_issues_from_labels(repo: str, label: str, prs_only: bool = False) -> List[Issue]:
label_objects: List[Label] = []
gh_repo = get_repo(repo)
try:
label_objects.append(gh_repo.get_label(label))
except UnknownObjectException:
# unknown label
return []
issues = gh_repo.get_issues(labels=label_objects, state="all")
if prs_only:
return [o for o in issues if o.pull_request]
return [o for o in issues]
def get_issue(repo: str, id_: int) -> Optional[Issue]:
gh_repo = get_repo(repo)
try:
return gh_repo.get_issue(id_)
except UnknownObjectException:
# unknown id
return None
def get_commits(repo: str, branch: str, since=None):
"""Get commit objects from a branch, over a limited period"""
gh_repo = get_repo(repo)
branch_object = gh_repo.get_branch(branch)
sha = branch_object.commit.sha
if since:
commits = gh_repo.get_commits(sha=sha, since=since)
else:
commits = gh_repo.get_commits(sha=sha)
return commits
def commit_pr_number(commit: Commit) -> Optional[int]:
"""Given a commit object, returns the PR number"""
res = PR_REGEX.search(commit.summary)
if res:
groups = res.groups()
return int(groups[1] or groups[2])
return None
def get_commit_pr_map(commits: Reversible[Commit]):
"""Given a list of commits and prs, returns a map of pr_number to commit"""
d = OrderedDict()
for commit in reversed(commits):
pr_number = commit_pr_number(commit)
if pr_number:
d[pr_number] = commit
return d
def truncate_str(value: str, width: int = 90) -> str:
cont_str = "..."
trunc_value = value[: width - len(cont_str)].strip()
if len(trunc_value) < len(value.strip()):
trunc_value = f"{trunc_value}{cont_str}"
return f"{trunc_value:<{width}}"
def git_get_current_head() -> str:
output = os_system("git status | head -1")
match = re.match("(?:HEAD detached at|On branch) (.*)", output)
if not match:
return ""
return match.group(1)
def os_system(cmd, raise_on_error=True) -> str:
p = delegator.run(cmd)
if raise_on_error and p.return_code != 0:
raise CherryTreeExecutionException(p.err)
return p.out
def check_if_branch_exists(branch: str) -> bool:
current_head = git_get_current_head()
try:
os_system(f"git checkout {branch}")
except CherryTreeExecutionException:
return False
os_system(f"git checkout {current_head}")
return True
def deduplicate_prs(prs: List[Issue]) -> List[Issue]:
pr_set = set()
ret: List[Issue] = []
for pr in prs:
if pr.number not in pr_set:
ret.append(pr)
pr_set.add(pr.number)
return ret
def get_git_repo() -> Repo:
"""
Find the path containing the git repo. Start by checking the current working
directory, and proceed up the directory tree if a git repo can't be found.
returns: Paath to closest git repo
raises FileNotFoundError: if no git repo is found in the current path
"""
def _traverse_dirs(path: str) -> Generator[str, None, None]:
# first yield the current directory
yield path
# then start yielding parents until we reach the root
while True:
parent = os.path.dirname(path)
if path != parent:
yield parent
path = parent
else:
break
cwd = os.getcwd()
for dir_ in _traverse_dirs(cwd):
try:
repo = Repo(dir_)
return repo
except InvalidGitRepositoryError:
pass
raise FileNotFoundError("No git repo found in path: {}". format(cwd))
| 29.308176 | 89 | 0.657296 | import os
import re
from collections import OrderedDict
from typing import Generator, List, Optional, Reversible
import delegator
from git import Commit
from git.exc import InvalidGitRepositoryError
from git.repo import Repo
from github import Github
from github.Label import Label
from github.Issue import Issue
from github.GithubException import UnknownObjectException
from github.Repository import Repository
from cherrytree.classes import CherryTreeExecutionException
PR_REGEX = re.compile(r"(^Merge pull request #(\d+) from|\(#(\d+)\)$)")
def get_github_instance() -> Github:
token = os.environ.get("GITHUB_TOKEN")
if not token:
raise Exception("Env var 'GITHUB_TOKEN' is missing")
return Github(token)
def get_repo(repo: str) -> Repository:
g = get_github_instance()
return g.get_repo(repo)
def get_issues_from_labels(repo: str, label: str, prs_only: bool = False) -> List[Issue]:
label_objects: List[Label] = []
gh_repo = get_repo(repo)
try:
label_objects.append(gh_repo.get_label(label))
except UnknownObjectException:
return []
issues = gh_repo.get_issues(labels=label_objects, state="all")
if prs_only:
return [o for o in issues if o.pull_request]
return [o for o in issues]
def get_issue(repo: str, id_: int) -> Optional[Issue]:
gh_repo = get_repo(repo)
try:
return gh_repo.get_issue(id_)
except UnknownObjectException:
return None
def get_commits(repo: str, branch: str, since=None):
gh_repo = get_repo(repo)
branch_object = gh_repo.get_branch(branch)
sha = branch_object.commit.sha
if since:
commits = gh_repo.get_commits(sha=sha, since=since)
else:
commits = gh_repo.get_commits(sha=sha)
return commits
def commit_pr_number(commit: Commit) -> Optional[int]:
res = PR_REGEX.search(commit.summary)
if res:
groups = res.groups()
return int(groups[1] or groups[2])
return None
def get_commit_pr_map(commits: Reversible[Commit]):
d = OrderedDict()
for commit in reversed(commits):
pr_number = commit_pr_number(commit)
if pr_number:
d[pr_number] = commit
return d
def truncate_str(value: str, width: int = 90) -> str:
cont_str = "..."
trunc_value = value[: width - len(cont_str)].strip()
if len(trunc_value) < len(value.strip()):
trunc_value = f"{trunc_value}{cont_str}"
return f"{trunc_value:<{width}}"
def git_get_current_head() -> str:
output = os_system("git status | head -1")
match = re.match("(?:HEAD detached at|On branch) (.*)", output)
if not match:
return ""
return match.group(1)
def os_system(cmd, raise_on_error=True) -> str:
p = delegator.run(cmd)
if raise_on_error and p.return_code != 0:
raise CherryTreeExecutionException(p.err)
return p.out
def check_if_branch_exists(branch: str) -> bool:
current_head = git_get_current_head()
try:
os_system(f"git checkout {branch}")
except CherryTreeExecutionException:
return False
os_system(f"git checkout {current_head}")
return True
def deduplicate_prs(prs: List[Issue]) -> List[Issue]:
pr_set = set()
ret: List[Issue] = []
for pr in prs:
if pr.number not in pr_set:
ret.append(pr)
pr_set.add(pr.number)
return ret
def get_git_repo() -> Repo:
def _traverse_dirs(path: str) -> Generator[str, None, None]:
yield path
while True:
parent = os.path.dirname(path)
if path != parent:
yield parent
path = parent
else:
break
cwd = os.getcwd()
for dir_ in _traverse_dirs(cwd):
try:
repo = Repo(dir_)
return repo
except InvalidGitRepositoryError:
pass
raise FileNotFoundError("No git repo found in path: {}". format(cwd))
| true | true |
f71c439040784630188bbe6360a6d737525bd96e | 7,063 | py | Python | NiaPy/algorithms/basic/gso.py | tuahk/NiaPy | c863d801fda8e1949a3ca716a4de7c7ca3d0ea16 | [
"MIT"
] | null | null | null | NiaPy/algorithms/basic/gso.py | tuahk/NiaPy | c863d801fda8e1949a3ca716a4de7c7ca3d0ea16 | [
"MIT"
] | null | null | null | NiaPy/algorithms/basic/gso.py | tuahk/NiaPy | c863d801fda8e1949a3ca716a4de7c7ca3d0ea16 | [
"MIT"
] | null | null | null | # encoding=utf8
# pylint: disable=mixed-indentation, trailing-whitespace, line-too-long, multiple-statements, attribute-defined-outside-init, logging-not-lazy, no-self-use, redefined-builtin, singleton-comparison, unused-argument, arguments-differ, no-else-return
import logging
from scipy.spatial.distance import euclidean
from numpy import full, apply_along_axis, argmin, copy, sum, inf, fmax, pi, where
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['GlowwormSwarmOptimization', 'GlowwormSwarmOptimizationV1', 'GlowwormSwarmOptimizationV2', 'GlowwormSwarmOptimizationV3']
class GlowwormSwarmOptimization(Algorithm):
r"""Implementation of glowwarm swarm optimization.
**Algorithm:** Glowwarm Swarm Optimization Algorithm
**Date:** 2018
**Authors:** Klemen Berkovič
**License:** MIT
**Reference URL:** https://www.springer.com/gp/book/9783319515946
**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.
"""
def __init__(self, **kwargs):
if kwargs.get('name', None) == None: Algorithm.__init__(self, name='GlowwormSwarmOptimization', sName='GSO', **kwargs)
else: Algorithm.__init__(self, **kwargs)
def setParameters(self, n=25, l0=5, nt=5, rho=0.4, gamma=0.6, beta=0.08, s=0.03, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments:**
n {integer} -- number of glowworms in population
l0 {real} -- initial luciferin quantity for each glowworm
nt {real} --
rs {real} -- maximum sensing range
rho {real} -- luciferin decay constant
gamma {real} -- luciferin enhancement constant
beta {real} --
s {real} --
"""
self.n, self.l0, self.nt, self.rho, self.gamma, self.beta, self.s = n, l0, nt, rho, gamma, beta, s
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def randMove(self, i):
j = i
while i == j: j = self.randint(self.n)
return j
def getNeighbors(self, i, r, GS, L):
N = full(self.n, 0)
for j, gw in enumerate(GS): N[j] = 1 if i != j and euclidean(GS[i], gw) <= r and L[i] >= L[j] else 0
return N
def probabilityes(self, i, N, L):
d, P = sum(L[where(N == 1)] - L[i]), full(self.n, .0)
for j in range(self.n): P[i] = ((L[j] - L[i]) / d) if N[j] == 1 else 0
return P
def moveSelect(self, pb, i):
r, b_l, b_u = self.rand(), 0, 0
for j in range(self.n):
b_l, b_u = b_u, b_u + pb[i]
if b_l < r < b_u: return j
return self.randint(self.n)
def calcLuciferin(self, L, GS_f): return (1 - self.rho) * L + self.gamma * GS_f
def rangeUpdate(self, R, N, rs): return R + self.beta * (self.nt - sum(N))
def getBest(self, GS, GS_f, xb, xb_f):
ib = argmin(GS_f)
if GS_f[ib] < xb_f: return GS[ib], GS_f[ib]
else: return xb, xb_f
def runTask(self, task):
rs = euclidean(full(task.D, 0), task.bRange)
GS, GS_f, L, R = self.uniform(task.Lower, task.Upper, [self.n, task.D]), full(self.n, inf), full(self.n, self.l0), full(self.n, rs)
xb, xb_f = None, inf
while not task.stopCondI():
GSo, Ro, GS_f = copy(GS), copy(R), apply_along_axis(task.eval, 1, GS)
xb, xb_f = self.getBest(GS, GS_f, xb, xb_f)
L = self.calcLuciferin(L, GS_f)
N = [self.getNeighbors(i, Ro[i], GSo, L) for i in range(self.n)]
P = [self.probabilityes(i, N[i], L) for i in range(self.n)]
j = [self.moveSelect(P[i], i) for i in range(self.n)]
for i in range(self.n): GS[i] = task.repair(GSo[i] + self.s * ((GSo[j[i]] - GSo[i]) / (euclidean(GSo[j[i]], GSo[i]) + 1e-31)))
for i in range(self.n): R[i] = max(0, min(rs, self.rangeUpdate(Ro[i], N[i], rs)))
return xb, xb_f
class GlowwormSwarmOptimizationV1(GlowwormSwarmOptimization):
r"""Implementation of glowwarm swarm optimization.
**Algorithm:** Glowwarm Swarm Optimization Algorithm
**Date:** 2018
**Authors:** Klemen Berkovič
**License:** MIT
**Reference URL:** https://www.springer.com/gp/book/9783319515946
**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.
"""
def __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV1', sName='GSOv1', **kwargs)
def setParameters(self, **kwargs):
self.__setParams(**kwargs)
GlowwormSwarmOptimization.setParameters(self, **kwargs)
def __setParams(self, alpha=0.2, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments:**
alpha {real} --
"""
self.alpha = alpha
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def calcLuciferin(self, L, GS_f): return fmax(0, (1 - self.rho) * L + self.gamma * GS_f)
def rangeUpdate(self, R, N, rs): return rs / (1 + self.beta * (sum(N) / (pi * rs ** 2)))
class GlowwormSwarmOptimizationV2(GlowwormSwarmOptimization):
r"""Implementation of glowwarm swarm optimization.
**Algorithm:** Glowwarm Swarm Optimization Algorithm
**Date:** 2018
**Authors:** Klemen Berkovič
**License:** MIT
**Reference URL:** https://www.springer.com/gp/book/9783319515946
**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.
"""
def __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)
def setParameters(self, **kwargs):
self.__setParams(alpha=kwargs.pop('alpha', 0.2), **kwargs)
GlowwormSwarmOptimization.setParameters(self, **kwargs)
def __setParams(self, alpha=0.2, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments:**
beta1 {real} --
s {real} --
"""
self.alpha = alpha
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def rangeUpdate(self, P, N, rs): return self.alpha + (rs - self.alpha) / (1 + self.beta * sum(N))
class GlowwormSwarmOptimizationV3(GlowwormSwarmOptimization):
r"""Implementation of glowwarm swarm optimization.
**Algorithm:** Glowwarm Swarm Optimization Algorithm
**Date:** 2018
**Authors:** Klemen Berkovič
**License:** MIT
**Reference URL:** https://www.springer.com/gp/book/9783319515946
**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.
"""
def __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)
def setParameters(self, **kwargs):
self.__setParams(beta1=kwargs.pop('beta1', 0.2), **kwargs)
GlowwormSwarmOptimization.setParameters(self, **kwargs)
def __setParams(self, beta1=0.2, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments:**
beta1 {real} --
s {real} --
"""
self.beta1 = beta1
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def rangeUpdate(self, R, N, rs): return R + (self.beta * sum(N)) if sum(N) < self.nt else (-self.beta1 * sum(N))
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 33.794258 | 247 | 0.68781 |
import logging
from scipy.spatial.distance import euclidean
from numpy import full, apply_along_axis, argmin, copy, sum, inf, fmax, pi, where
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['GlowwormSwarmOptimization', 'GlowwormSwarmOptimizationV1', 'GlowwormSwarmOptimizationV2', 'GlowwormSwarmOptimizationV3']
class GlowwormSwarmOptimization(Algorithm):
def __init__(self, **kwargs):
if kwargs.get('name', None) == None: Algorithm.__init__(self, name='GlowwormSwarmOptimization', sName='GSO', **kwargs)
else: Algorithm.__init__(self, **kwargs)
def setParameters(self, n=25, l0=5, nt=5, rho=0.4, gamma=0.6, beta=0.08, s=0.03, **ukwargs):
self.n, self.l0, self.nt, self.rho, self.gamma, self.beta, self.s = n, l0, nt, rho, gamma, beta, s
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def randMove(self, i):
j = i
while i == j: j = self.randint(self.n)
return j
def getNeighbors(self, i, r, GS, L):
N = full(self.n, 0)
for j, gw in enumerate(GS): N[j] = 1 if i != j and euclidean(GS[i], gw) <= r and L[i] >= L[j] else 0
return N
def probabilityes(self, i, N, L):
d, P = sum(L[where(N == 1)] - L[i]), full(self.n, .0)
for j in range(self.n): P[i] = ((L[j] - L[i]) / d) if N[j] == 1 else 0
return P
def moveSelect(self, pb, i):
r, b_l, b_u = self.rand(), 0, 0
for j in range(self.n):
b_l, b_u = b_u, b_u + pb[i]
if b_l < r < b_u: return j
return self.randint(self.n)
def calcLuciferin(self, L, GS_f): return (1 - self.rho) * L + self.gamma * GS_f
def rangeUpdate(self, R, N, rs): return R + self.beta * (self.nt - sum(N))
def getBest(self, GS, GS_f, xb, xb_f):
ib = argmin(GS_f)
if GS_f[ib] < xb_f: return GS[ib], GS_f[ib]
else: return xb, xb_f
def runTask(self, task):
rs = euclidean(full(task.D, 0), task.bRange)
GS, GS_f, L, R = self.uniform(task.Lower, task.Upper, [self.n, task.D]), full(self.n, inf), full(self.n, self.l0), full(self.n, rs)
xb, xb_f = None, inf
while not task.stopCondI():
GSo, Ro, GS_f = copy(GS), copy(R), apply_along_axis(task.eval, 1, GS)
xb, xb_f = self.getBest(GS, GS_f, xb, xb_f)
L = self.calcLuciferin(L, GS_f)
N = [self.getNeighbors(i, Ro[i], GSo, L) for i in range(self.n)]
P = [self.probabilityes(i, N[i], L) for i in range(self.n)]
j = [self.moveSelect(P[i], i) for i in range(self.n)]
for i in range(self.n): GS[i] = task.repair(GSo[i] + self.s * ((GSo[j[i]] - GSo[i]) / (euclidean(GSo[j[i]], GSo[i]) + 1e-31)))
for i in range(self.n): R[i] = max(0, min(rs, self.rangeUpdate(Ro[i], N[i], rs)))
return xb, xb_f
class GlowwormSwarmOptimizationV1(GlowwormSwarmOptimization):
def __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV1', sName='GSOv1', **kwargs)
def setParameters(self, **kwargs):
self.__setParams(**kwargs)
GlowwormSwarmOptimization.setParameters(self, **kwargs)
def __setParams(self, alpha=0.2, **ukwargs):
self.alpha = alpha
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def calcLuciferin(self, L, GS_f): return fmax(0, (1 - self.rho) * L + self.gamma * GS_f)
def rangeUpdate(self, R, N, rs): return rs / (1 + self.beta * (sum(N) / (pi * rs ** 2)))
class GlowwormSwarmOptimizationV2(GlowwormSwarmOptimization):
def __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)
def setParameters(self, **kwargs):
self.__setParams(alpha=kwargs.pop('alpha', 0.2), **kwargs)
GlowwormSwarmOptimization.setParameters(self, **kwargs)
def __setParams(self, alpha=0.2, **ukwargs):
self.alpha = alpha
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def rangeUpdate(self, P, N, rs): return self.alpha + (rs - self.alpha) / (1 + self.beta * sum(N))
class GlowwormSwarmOptimizationV3(GlowwormSwarmOptimization):
def __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)
def setParameters(self, **kwargs):
self.__setParams(beta1=kwargs.pop('beta1', 0.2), **kwargs)
GlowwormSwarmOptimization.setParameters(self, **kwargs)
def __setParams(self, beta1=0.2, **ukwargs):
self.beta1 = beta1
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def rangeUpdate(self, R, N, rs): return R + (self.beta * sum(N)) if sum(N) < self.nt else (-self.beta1 * sum(N))
| true | true |
f71c43a114c10204ad2b5ff2693265ff01dc5894 | 64 | py | Python | dr/__init__.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | 7 | 2019-06-09T13:03:18.000Z | 2022-02-19T08:50:51.000Z | dr/__init__.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | 1 | 2019-07-12T23:59:46.000Z | 2021-11-21T04:09:09.000Z | dr/__init__.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | 3 | 2020-05-01T13:18:25.000Z | 2021-03-30T11:52:33.000Z | import dr.dist
import dr.experiment
import dr.gym
import dr.ppo
| 12.8 | 20 | 0.8125 | import dr.dist
import dr.experiment
import dr.gym
import dr.ppo
| true | true |
f71c441128b684fbb0542eeb3decdebe626fc0b9 | 377 | py | Python | plugin_info.py | Attolab/pymodaq_plugins_smaract | 109808255d784bd98b875fb4886c90a1f0e9ff7b | [
"CECILL-B"
] | null | null | null | plugin_info.py | Attolab/pymodaq_plugins_smaract | 109808255d784bd98b875fb4886c90a1f0e9ff7b | [
"CECILL-B"
] | null | null | null | plugin_info.py | Attolab/pymodaq_plugins_smaract | 109808255d784bd98b875fb4886c90a1f0e9ff7b | [
"CECILL-B"
] | null | null | null | SHORT_PLUGIN_NAME = 'smaract'
package_url = 'https://github.com/CEMES-CNRS/pymodaq_plugins_samarct'
description = 'Set of PyMoDAQ plugins for linear actuators from Smaract' \
'(SLC positioners). MCS and MCS2 controllers are supported.'
author = 'David Bresteau'
author_email = 'david.bresteau@cea.fr'
# packages required for your plugin:
packages_required = []
| 34.272727 | 74 | 0.748011 | SHORT_PLUGIN_NAME = 'smaract'
package_url = 'https://github.com/CEMES-CNRS/pymodaq_plugins_samarct'
description = 'Set of PyMoDAQ plugins for linear actuators from Smaract' \
'(SLC positioners). MCS and MCS2 controllers are supported.'
author = 'David Bresteau'
author_email = 'david.bresteau@cea.fr'
packages_required = []
| true | true |
f71c454e2944e16b15e7400432d9858fbd2966f8 | 4,678 | py | Python | cuesdk/capi.py | CorsairOfficial/cue-sdk-python | 8385369725be852eac4bd8e4323ea6661c8603e4 | [
"MIT"
] | 34 | 2020-03-25T08:57:23.000Z | 2022-03-26T16:30:06.000Z | cuesdk/capi.py | CorsairOfficial/cue-sdk-python | 8385369725be852eac4bd8e4323ea6661c8603e4 | [
"MIT"
] | 12 | 2020-03-25T08:56:28.000Z | 2022-02-18T15:20:51.000Z | cuesdk/capi.py | CorsairOfficial/cue-sdk-python | 8385369725be852eac4bd8e4323ea6661c8603e4 | [
"MIT"
] | 17 | 2020-07-24T13:29:06.000Z | 2022-02-02T22:13:43.000Z | import os
import platform
import sys
from ctypes import (CDLL, CFUNCTYPE, POINTER, sizeof, c_bool, c_char, c_int32,
c_void_p)
from .enums import (CorsairAccessMode, CorsairError, CorsairLedId,
CorsairEventId, CorsairDevicePropertyId)
from .structs import (CorsairProtocolDetails, CorsairDeviceInfo,
CorsairLedPosition, CorsairLedPositions, CorsairLedColor,
CorsairEvent)
__all__ = ['CorsairNativeApi']
def get_library_path_windows():
suffix = '.x64' if sizeof(c_void_p) == 8 else ''
lib_name = 'CUESDK' + suffix + '_2017.dll'
return os.path.join(os.path.dirname(__file__), 'bin', lib_name)
def get_library_path_mac():
lib_name = 'libCUESDK.dylib'
return os.path.join(os.path.dirname(__file__), 'bin', lib_name)
def load_library(library_path):
try:
return CDLL(library_path)
except OSError:
print("Unable to load the library %s" % library_path)
sys.exit()
class CorsairNativeApi():
def __init__(self, libpath):
if libpath is None:
system = platform.system()
if system == "Windows":
libpath = get_library_path_windows()
elif system == "Darwin":
libpath = get_library_path_mac()
lib = load_library(libpath)
def create_func(fn, restype, argtypes):
f = lib.__getattr__(fn)
f.restype = restype
f.argtypes = argtypes
return f
self.CorsairSetLedsColorsBufferByDeviceIndex = create_func(
'CorsairSetLedsColorsBufferByDeviceIndex', c_bool,
[c_int32, c_int32, POINTER(CorsairLedColor)])
self.CorsairSetLedsColorsFlushBuffer = create_func(
'CorsairSetLedsColorsFlushBuffer', c_bool, None)
self.CallbackFunc = CFUNCTYPE(c_void_p, c_bool, CorsairError)
self.CorsairSetLedsColorsFlushBufferAsync = create_func(
'CorsairSetLedsColorsFlushBufferAsync', c_bool,
[self.CallbackFunc, c_void_p])
self.CorsairGetLedsColors = create_func(
'CorsairGetLedsColors', c_bool,
[c_int32, POINTER(CorsairLedColor)])
self.CorsairGetLedsColorsByDeviceIndex = create_func(
'CorsairGetLedsColorsByDeviceIndex', c_bool,
[c_int32, c_int32, POINTER(CorsairLedColor)])
self.CorsairGetDeviceCount = create_func('CorsairGetDeviceCount',
c_int32, None)
self.CorsairGetDeviceInfo = create_func('CorsairGetDeviceInfo',
POINTER(CorsairDeviceInfo),
[c_int32])
self.CorsairGetLedPositions = create_func('CorsairGetLedPositions',
POINTER(CorsairLedPositions),
None)
self.CorsairGetLedPositionsByDeviceIndex = create_func(
'CorsairGetLedPositionsByDeviceIndex',
POINTER(CorsairLedPositions), [c_int32])
self.CorsairGetLedIdForKeyName = create_func(
'CorsairGetLedIdForKeyName', CorsairLedId, [c_char])
self.CorsairRequestControl = create_func('CorsairRequestControl',
c_bool, [CorsairAccessMode])
self.CorsairPerformProtocolHandshake = create_func(
'CorsairPerformProtocolHandshake', CorsairProtocolDetails, None)
self.CorsairGetLastError = create_func('CorsairGetLastError',
CorsairError, None)
self.CorsairReleaseControl = create_func('CorsairReleaseControl',
c_bool, [CorsairAccessMode])
self.CorsairSetLayerPriority = create_func('CorsairSetLayerPriority',
c_bool, [c_int32])
c_bool_p = POINTER(c_bool)
self.CorsairGetBoolPropertyValue = create_func(
'CorsairGetBoolPropertyValue', c_bool,
[c_int32, CorsairDevicePropertyId, c_bool_p])
c_int32_p = POINTER(c_int32)
self.CorsairGetInt32PropertyValue = create_func(
'CorsairGetInt32PropertyValue', c_bool,
[c_int32, CorsairDevicePropertyId, c_int32_p])
self.EventHandler = CFUNCTYPE(None, c_void_p, POINTER(CorsairEvent))
self.CorsairSubscribeForEvents = create_func(
'CorsairSubscribeForEvents', c_bool, [self.EventHandler, c_void_p])
self.CorsairUnsubscribeFromEvents = create_func(
'CorsairUnsubscribeFromEvents', c_bool, None)
| 46.316832 | 79 | 0.621633 | import os
import platform
import sys
from ctypes import (CDLL, CFUNCTYPE, POINTER, sizeof, c_bool, c_char, c_int32,
c_void_p)
from .enums import (CorsairAccessMode, CorsairError, CorsairLedId,
CorsairEventId, CorsairDevicePropertyId)
from .structs import (CorsairProtocolDetails, CorsairDeviceInfo,
CorsairLedPosition, CorsairLedPositions, CorsairLedColor,
CorsairEvent)
__all__ = ['CorsairNativeApi']
def get_library_path_windows():
suffix = '.x64' if sizeof(c_void_p) == 8 else ''
lib_name = 'CUESDK' + suffix + '_2017.dll'
return os.path.join(os.path.dirname(__file__), 'bin', lib_name)
def get_library_path_mac():
lib_name = 'libCUESDK.dylib'
return os.path.join(os.path.dirname(__file__), 'bin', lib_name)
def load_library(library_path):
try:
return CDLL(library_path)
except OSError:
print("Unable to load the library %s" % library_path)
sys.exit()
class CorsairNativeApi():
def __init__(self, libpath):
if libpath is None:
system = platform.system()
if system == "Windows":
libpath = get_library_path_windows()
elif system == "Darwin":
libpath = get_library_path_mac()
lib = load_library(libpath)
def create_func(fn, restype, argtypes):
f = lib.__getattr__(fn)
f.restype = restype
f.argtypes = argtypes
return f
self.CorsairSetLedsColorsBufferByDeviceIndex = create_func(
'CorsairSetLedsColorsBufferByDeviceIndex', c_bool,
[c_int32, c_int32, POINTER(CorsairLedColor)])
self.CorsairSetLedsColorsFlushBuffer = create_func(
'CorsairSetLedsColorsFlushBuffer', c_bool, None)
self.CallbackFunc = CFUNCTYPE(c_void_p, c_bool, CorsairError)
self.CorsairSetLedsColorsFlushBufferAsync = create_func(
'CorsairSetLedsColorsFlushBufferAsync', c_bool,
[self.CallbackFunc, c_void_p])
self.CorsairGetLedsColors = create_func(
'CorsairGetLedsColors', c_bool,
[c_int32, POINTER(CorsairLedColor)])
self.CorsairGetLedsColorsByDeviceIndex = create_func(
'CorsairGetLedsColorsByDeviceIndex', c_bool,
[c_int32, c_int32, POINTER(CorsairLedColor)])
self.CorsairGetDeviceCount = create_func('CorsairGetDeviceCount',
c_int32, None)
self.CorsairGetDeviceInfo = create_func('CorsairGetDeviceInfo',
POINTER(CorsairDeviceInfo),
[c_int32])
self.CorsairGetLedPositions = create_func('CorsairGetLedPositions',
POINTER(CorsairLedPositions),
None)
self.CorsairGetLedPositionsByDeviceIndex = create_func(
'CorsairGetLedPositionsByDeviceIndex',
POINTER(CorsairLedPositions), [c_int32])
self.CorsairGetLedIdForKeyName = create_func(
'CorsairGetLedIdForKeyName', CorsairLedId, [c_char])
self.CorsairRequestControl = create_func('CorsairRequestControl',
c_bool, [CorsairAccessMode])
self.CorsairPerformProtocolHandshake = create_func(
'CorsairPerformProtocolHandshake', CorsairProtocolDetails, None)
self.CorsairGetLastError = create_func('CorsairGetLastError',
CorsairError, None)
self.CorsairReleaseControl = create_func('CorsairReleaseControl',
c_bool, [CorsairAccessMode])
self.CorsairSetLayerPriority = create_func('CorsairSetLayerPriority',
c_bool, [c_int32])
c_bool_p = POINTER(c_bool)
self.CorsairGetBoolPropertyValue = create_func(
'CorsairGetBoolPropertyValue', c_bool,
[c_int32, CorsairDevicePropertyId, c_bool_p])
c_int32_p = POINTER(c_int32)
self.CorsairGetInt32PropertyValue = create_func(
'CorsairGetInt32PropertyValue', c_bool,
[c_int32, CorsairDevicePropertyId, c_int32_p])
self.EventHandler = CFUNCTYPE(None, c_void_p, POINTER(CorsairEvent))
self.CorsairSubscribeForEvents = create_func(
'CorsairSubscribeForEvents', c_bool, [self.EventHandler, c_void_p])
self.CorsairUnsubscribeFromEvents = create_func(
'CorsairUnsubscribeFromEvents', c_bool, None)
| true | true |
f71c45e85feabb8c601d3ee30aa42ca3ae609193 | 20,007 | py | Python | gt_kai.py | d-ks/gym_torcs_kai | b9e1659a18ea8a788d0c6aeb7b1111c0284b23ac | [
"MIT"
] | null | null | null | gt_kai.py | d-ks/gym_torcs_kai | b9e1659a18ea8a788d0c6aeb7b1111c0284b23ac | [
"MIT"
] | null | null | null | gt_kai.py | d-ks/gym_torcs_kai | b9e1659a18ea8a788d0c6aeb7b1111c0284b23ac | [
"MIT"
] | null | null | null | # Gym-TORCS-Kai Environment for Reinforcement Learning in TORCS
# original author : Naoto Yoshida
# (https://github.com/ugo-nama-kun/gym_torcs)
# modified version author : Daiko Kishikawa
#
# This environment is under modification. (2019.12)
#
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
sys.path.append("./gym_torcs_kai")
import snakeoil3_gym as snakeoil3
import os
import time
class TorcsKaiEnv(gym.Env):
# the speed limit starts when the number of steps exceeds this
terminal_judge_start = 500
# episode terminates when the car is running slower than this limit
termination_limit_progress = 5
# whether to initialize when resetting the environment
initial_reset = True
def __init__(self, throttle=False, gear_change=False):
print("=== Hello, this is Gym-TORCS-Kai. ===")
############################ PARAMETERS OF DRIVING ############################
""" throttle (bool) : usage of the throttle control in TORCS. """
""" gear_change (bool) : usage of the gear control in TORCS. """
""" obsdim (int) : the number of observation (state input) dimensions."""
# Currently, three types of dimensions are supported: "2", "31", "79".
# "2" : the minimum number of dimensions required for driving.
# "31" : the number of dimensions required for a single agent to drive normally.
# "79" : the number of dimensions using all available inputs.
""" maximum_distance (float) : the maximum distance when finish driving. """
""" default_speed (float) : the target speed for acceleration/deceleration. """
self.throttle = throttle
self.gear_change = gear_change
self.obsdim = 31
self.maximum_distance = 1908.32
self.default_speed = 100
##################################################################################
print("--> throttle : ", self.throttle)
print("--> gear : ", self.gear_change)
print("--> dim. of observ. : ", self.obsdim)
print("--> max. dist. : ", self.maximum_distance, " m")
print("--> targ. speed : ", self.default_speed, "km/h")
# Initialization of the driving in TORCS.
self.initial_run = True
# variable for calculating Y-axis acceleration
self.speedY = 0
self.time = 0
# variable for recording the current number of steps
self.time_step = 0
# the range of reward function
self.reward_range = (-10, 10)
self.testmode = False
# lists for recording vehicle status
self.Yaclist = []
self.poshis = []
self.anglehis = []
self.sphis = []
# launch TORCS system
os.system("pkill torcs")
time.sleep(0.5)
if self.obsdim == 79:
os.system("torcs &")
else:
os.system("torcs -nofuel -nodamage -nolaptime &")
time.sleep(0.5)
os.system("sh ./gym_torcs_kai/autostart.sh")
time.sleep(0.5)
"""
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=False) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
"""
# definitions of action space ranges
if throttle is False:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))
else:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))
# definitions of observation space ranges
if self.obsdim == 79:
high = np.array([np.pi, # angle
np.inf, # curLapTime
np.inf, # damage
np.inf, # distFromStart
np.inf, # distRaced
# focus (5 dim.)
200, 200, 200, 200, 200,
np.inf, # fuel
6, # gear
np.inf, # lastLapTime
# opponents (36 dim.)
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
np.inf, # racePos
np.inf, # rpm
np.inf, # speedX
np.inf, # speedY
np.inf, # speedZ
# track (19 dim.)
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200,
np.inf, # trackPos
# wheelSpinVel (4 dim.)
np.inf, np.inf, np.inf, np.inf,
np.inf, # z
])
low = np.array([-np.pi, # angle
0, # curLapTime
0, # damage
0, # distFromStart
0, # distRaced
# focus (5 dim.)
0, 0, 0, 0, 0,
0, # fuel
-1, # gear
0, # lastLapTime
# opponents (36 dim.)
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, # racePos
0, # rpm
-np.inf, # speedX
-np.inf, # speedY
-np.inf, # speedZ
# track (19 dim.)
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0,
-np.inf, # trackPos
# wheelSpinVel (4 dim.)
0, 0, 0, 0,
-np.inf, # z
])
elif self.obsdim == 2:
high = np.array([np.pi, # angle
np.inf]) # trackPos
low = np.array([-np.pi, # angle
-np.inf]) # trackPos
elif self.obsdim == 31:
high = np.array([np.pi, # angle
6, # gear
np.inf, # rpm
np.inf, # speedX
np.inf, # speedY
np.inf, # speedZ
# track (19 dim.)
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200,
np.inf, # trackPos
# wheelSpinVel (4 dim.)
np.inf, np.inf, np.inf, np.inf,
np.inf, # z
])
low = np.array([-np.pi, # angle
-1, # gear
0, # rpm
-np.inf, # speedX
-np.inf, # speedY
-np.inf, # speedZ
# track (19 dim.)
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0,
-np.inf, # trackPos
# wheelSpinVel (4 dim.)
0, 0, 0, 0,
-np.inf, # z
])
else:
low = None
high = None
self.observation_space = spaces.Box(low=low, high=high)
# For evaluation episodes, set to “test mode” to not display logs.
def testset(self, test):
self.testmode = test
# Set learning parameter
def set_params(self, throttle, gear, dim, max_dist, targ_speed):
#params: [throttle, gear, dim, max_dist, targ_speed]
self.throttle = throttle
self.gear_change = gear
self.obsdim = dim
self.maximum_distance = max_dist
self.default_speed = targ_speed
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# "step" function
def step(self, u):
# convert thisAction to the actual torcs actionstr
client = self.client
this_action = self.agent_to_torcs(u)
# apply actions in TORCS
action_torcs = client.R.d
# steering control from the agent
action_torcs["steer"] = this_action["steer"] # in [-1, 1]
# simple automatic throttle control by Snakeoil
if self.throttle is False:
target_speed = self.default_speed
if client.S.d["speedX"] < target_speed - (client.R.d["steer"] * 50):
if client.R.d["accel"] + 0.1 <= 1:
client.R.d["accel"] += 0.1
else:
if client.R.d["accel"] - 0.1 >= 0:
client.R.d["accel"] -= 0.1
if client.S.d["speedX"] < 10:
if (client.S.d["speedX"] + 0.1) != 0:
client.R.d["accel"] += 1 / (client.S.d["speedX"] + 0.1)
# traction control system
if (client.S.d["wheelSpinVel"][2] + client.S.d["wheelSpinVel"][3]) - (
client.S.d["wheelSpinVel"][0] + client.S.d["wheelSpinVel"][1]
) > 5:
action_torcs["accel"] -= 0.2
else:
action_torcs["accel"] = this_action["accel"]
# gear control from agent
if self.gear_change is True:
action_torcs["gear"] = this_action["gear"]
else:
# automatic gear control
action_torcs["gear"] = 1
if client.S.d["speedX"] > 50:
action_torcs["gear"] = 2
if client.S.d["speedX"] > 80:
action_torcs["gear"] = 3
if client.S.d["speedX"] > 110:
action_torcs["gear"] = 4
if client.S.d["speedX"] > 140:
action_torcs["gear"] = 5
if client.S.d["speedX"] > 170:
action_torcs["gear"] = 6
# one-step dynamics update #################################
# apply actions into TORCS
client.respond_to_server()
# get the response from TORCS
client.get_servers_input()
# get the current full-observation from TORCS
obs = client.S.d
# make an observation from a raw observation vector from TORCS
self.observation = self.make_observaton(obs)
# calculation of progress
progress = np.array(obs["speedX"]) * np.cos(obs["angle"])
# Designed Reward Function #######################################
# This reward function enables agents to learn stable high-speed driving
# with low Y-axis acceleration.
# This reward function was designed after trial and error by me.
if (obs["curLapTime"] - self.time) > 0:
Yac = (obs["speedY"] - self.speedY) / (obs["curLapTime"] - self.time)
else:
Yac = 0
self.speedY = obs["speedY"]
self.time = obs["curLapTime"]
self.Yaclist.append(Yac)
self.poshis.append(obs["trackPos"])
self.anglehis.append(obs["angle"])
self.sphis.append(obs["speedX"])
# reward for the low Y-axis acceleration
eta_Yac = 1
r_Yac = 1 / ((Yac / eta_Yac) ** 2 + 1)
# reward for the small angle : 0 ~ 1
eta_angle = 0.01
r_angle = 1 / ((obs["angle"] / eta_angle) ** 2 + 1)
# reward for the small position from center : 0 ~ 1
eta_pos = 0.01
r_trackPos = 1 / ((obs["trackPos"] / eta_pos) ** 2 + 1)
# reward for the high X-axis speed : 0 ~ 1
maxspeed = 100
if obs["speedX"] >= 0:
r_speed = min(obs["speedX"] / maxspeed, 1)
else:
r_speed = 0
# reward function: -1 ~ 1
reward = 0.2 * r_angle + 0.2 * r_trackPos + 0.3 * r_speed + 0.3 * r_Yac
Yac_threshold = 3.530394 # 0.1G
if np.abs(Yac) > Yac_threshold:
reward = -min(np.abs(Yac) / 250, 1)
# Termination judgement #########################
track = np.array(obs["track"])
# episode terminates when the car is out of track
if track.min() < 0:
reward = -10
client.R.d["meta"] = True
# episode terminates if the progress of agent is little
if self.terminal_judge_start < self.time_step:
if progress < self.termination_limit_progress:
reward = -10
client.R.d["meta"] = True
# episode terminates if the agent runs backward
if np.cos(obs["angle"]) < 0 or obs["distRaced"] < 0:
reward = -10
client.R.d["meta"] = True
# episode terminates when the agent reaches the maximum distance
if obs["distRaced"] >= self.maximum_distance:
reward = 10
client.R.d["meta"] = True
if client.R.d["meta"] is True: # send a reset signal
poshis = np.array(self.poshis)
anglehis = np.array(self.anglehis)
sphis = np.array(self.sphis)
Yachis = np.array(self.Yaclist)
# For training episodes, display information about the vehicle in the finished driving
if self.testmode == False:
print("---------------------------------------------------------")
print("---> raced: ", obs["distRaced"], " m <---")
print("--- maxYac: ", np.max(Yachis), " km/h/s ---")
print("--- minYac: ", np.min(Yachis), " km/h/s ---")
if abs(np.max(Yachis)) >= abs(np.min(Yachis)):
absmaxYac = abs(np.max(Yachis))
else:
absmaxYac = abs(np.min(Yachis))
print("--- absmaxYac: ", absmaxYac, " km/h/s ---")
print("--- meanYac: ", np.mean(Yachis), " km/h/s +- ", np.std(Yachis), "---")
print("--- medianYac: ", np.median(Yachis), " km/h/s ---")
print("--- trackPos_mean: ", np.mean(poshis), " +- ", np.std(poshis), " ---")
print("--- angle_mean : ", np.mean(anglehis), " rad +- ", np.std(anglehis), " ---")
print("--- speedX_mean: ", np.mean(sphis), " km/h +- ", np.std(sphis), " ---")
print("---------------------------------------------------------")
self.initial_run = False
client.respond_to_server()
self.time_step += 1
return self.get_obs(), reward, client.R.d["meta"], {}
def reset(self, relaunch=False):
self.time_step = 0
# If not true, send a reset signal to TORCS when the reset function is called
if self.initial_reset is not True:
self.client.R.d["meta"] = True
self.client.respond_to_server()
## TENTATIVE. Restarting TORCS for every episode will cause the memory leak bug!
if relaunch is True:
self.reset_torcs()
# Modify here if you use multiple tracks in the environment
# Open new UDP in vtorcs
self.client = snakeoil3.Client(p=3101, vision=False)
self.client.MAX_STEPS = np.inf
client = self.client
# get the initial input from TORCS
client.get_servers_input()
# get the current full observation from TORCS
obs = client.S.d
self.observation = self.make_observaton(obs)
# reset variables and lists
self.speedY = obs["speedY"]
self.time = obs["curLapTime"]
self.Yaclist = []
self.poshis = []
self.anglehis = []
self.sphis = []
self.initial_reset = False
return self.get_obs()
def close(self):
os.system("pkill torcs")
def render(self, mode="human"):
# TORCS has a monitor of driving, so this method omitted.
pass
####################################### making observation ############################################
def get_obs(self):
return self.observation
def reset_torcs(self):
os.system("pkill torcs")
time.sleep(0.5)
if self.obsdim == 79:
os.system("torcs &")
elif self.obsdim == 2:
os.system("torcs -nofuel -nodamage -nolaptime &")
else:
os.system("torcs -nofuel -nodamage -nolaptime &")
time.sleep(0.5)
os.system("sh ./gym_torcs_kai/autostart.sh")
time.sleep(0.5)
def agent_to_torcs(self, u):
torcs_action = {"steer": u[0]}
if self.throttle is True: # throttle action is enabled
torcs_action.update({"accel": u[1]})
if self.gear_change is True: # gear change action is enabled
torcs_action.update({"gear": u[2]})
return torcs_action
def make_observaton(self, raw_obs):
if self.obsdim == 79:
obs1 = np.array(
[
raw_obs["angle"],
raw_obs["curLapTime"],
raw_obs["damage"],
raw_obs["distFromStart"],
raw_obs["distRaced"],
]
)
focus = raw_obs["focus"]
obs2 = np.array([raw_obs["fuel"], raw_obs["gear"], raw_obs["lastLapTime"]])
opponents = raw_obs["opponents"]
obs3 = np.array(
[
raw_obs["racePos"],
raw_obs["rpm"],
raw_obs["speedX"],
raw_obs["speedY"],
raw_obs["speedZ"],
]
)
track = raw_obs["track"]
trackPos = np.array([raw_obs["trackPos"]])
wheelSpinVel = raw_obs["wheelSpinVel"]
z = np.array(raw_obs["z"])
observ = np.hstack(
[obs1, focus, obs2, opponents, obs3, track, trackPos, wheelSpinVel, z]
)
return observ
elif self.obsdim == 2:
return np.array([raw_obs["angle"], raw_obs["trackPos"]])
elif self.obsdim == 31:
obs1 = np.array(
[
raw_obs["angle"],
raw_obs["gear"],
raw_obs["rpm"],
raw_obs["speedX"],
raw_obs["speedY"],
raw_obs["speedZ"],
]
)
trackPos = np.array([raw_obs["trackPos"]])
z = np.array(raw_obs["z"])
observ = np.hstack(
[obs1, raw_obs["track"], trackPos, raw_obs["wheelSpinVel"], z]
)
return observ
else:
return None
| 35.410619 | 107 | 0.450992 |
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
sys.path.append("./gym_torcs_kai")
import snakeoil3_gym as snakeoil3
import os
import time
class TorcsKaiEnv(gym.Env):
terminal_judge_start = 500
termination_limit_progress = 5
initial_reset = True
def __init__(self, throttle=False, gear_change=False):
print("=== Hello, this is Gym-TORCS-Kai. ===")
0, 0, 0, 0,
-np.inf,
])
else:
low = None
high = None
self.observation_space = spaces.Box(low=low, high=high)
def testset(self, test):
self.testmode = test
def set_params(self, throttle, gear, dim, max_dist, targ_speed):
self.throttle = throttle
self.gear_change = gear
self.obsdim = dim
self.maximum_distance = max_dist
self.default_speed = targ_speed
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
client = self.client
this_action = self.agent_to_torcs(u)
action_torcs = client.R.d
action_torcs["steer"] = this_action["steer"]
if self.throttle is False:
target_speed = self.default_speed
if client.S.d["speedX"] < target_speed - (client.R.d["steer"] * 50):
if client.R.d["accel"] + 0.1 <= 1:
client.R.d["accel"] += 0.1
else:
if client.R.d["accel"] - 0.1 >= 0:
client.R.d["accel"] -= 0.1
if client.S.d["speedX"] < 10:
if (client.S.d["speedX"] + 0.1) != 0:
client.R.d["accel"] += 1 / (client.S.d["speedX"] + 0.1)
if (client.S.d["wheelSpinVel"][2] + client.S.d["wheelSpinVel"][3]) - (
client.S.d["wheelSpinVel"][0] + client.S.d["wheelSpinVel"][1]
) > 5:
action_torcs["accel"] -= 0.2
else:
action_torcs["accel"] = this_action["accel"]
if self.gear_change is True:
action_torcs["gear"] = this_action["gear"]
else:
action_torcs["gear"] = 1
if client.S.d["speedX"] > 50:
action_torcs["gear"] = 2
if client.S.d["speedX"] > 80:
action_torcs["gear"] = 3
if client.S.d["speedX"] > 110:
action_torcs["gear"] = 4
if client.S.d["speedX"] > 140:
action_torcs["gear"] = 5
if client.S.d["speedX"] > 170:
action_torcs["gear"] = 6
250, 1)
"] = True
if np.cos(obs["angle"]) < 0 or obs["distRaced"] < 0:
reward = -10
client.R.d["meta"] = True
if obs["distRaced"] >= self.maximum_distance:
reward = 10
client.R.d["meta"] = True
if client.R.d["meta"] is True:
poshis = np.array(self.poshis)
anglehis = np.array(self.anglehis)
sphis = np.array(self.sphis)
Yachis = np.array(self.Yaclist)
if self.testmode == False:
print("---------------------------------------------------------")
print("---> raced: ", obs["distRaced"], " m <---")
print("--- maxYac: ", np.max(Yachis), " km/h/s ---")
print("--- minYac: ", np.min(Yachis), " km/h/s ---")
if abs(np.max(Yachis)) >= abs(np.min(Yachis)):
absmaxYac = abs(np.max(Yachis))
else:
absmaxYac = abs(np.min(Yachis))
print("--- absmaxYac: ", absmaxYac, " km/h/s ---")
print("--- meanYac: ", np.mean(Yachis), " km/h/s +- ", np.std(Yachis), "---")
print("--- medianYac: ", np.median(Yachis), " km/h/s ---")
print("--- trackPos_mean: ", np.mean(poshis), " +- ", np.std(poshis), " ---")
print("--- angle_mean : ", np.mean(anglehis), " rad +- ", np.std(anglehis), " ---")
print("--- speedX_mean: ", np.mean(sphis), " km/h +- ", np.std(sphis), " ---")
print("---------------------------------------------------------")
self.initial_run = False
client.respond_to_server()
self.time_step += 1
return self.get_obs(), reward, client.R.d["meta"], {}
def reset(self, relaunch=False):
self.time_step = 0
if self.initial_reset is not True:
self.client.R.d["meta"] = True
self.client.respond_to_server()
self.client = snakeoil3.Client(p=3101, vision=False)
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input()
obs = client.S.d
self.observation = self.make_observaton(obs)
self.speedY = obs["speedY"]
self.time = obs["curLapTime"]
self.Yaclist = []
self.poshis = []
self.anglehis = []
self.sphis = []
self.initial_reset = False
return self.get_obs()
def close(self):
os.system("pkill torcs")
def render(self, mode="human"):
pass
| true | true |
f71c466cbf91fb905a3d1819ad2004dbe30abd3a | 775 | py | Python | app/mixin/assets.py | swelanauguste/refactored-sniffle | 1c0ea2f4d07a74d694ae3409b8b2ea3d57b9db4f | [
"MIT"
] | null | null | null | app/mixin/assets.py | swelanauguste/refactored-sniffle | 1c0ea2f4d07a74d694ae3409b8b2ea3d57b9db4f | [
"MIT"
] | null | null | null | app/mixin/assets.py | swelanauguste/refactored-sniffle | 1c0ea2f4d07a74d694ae3409b8b2ea3d57b9db4f | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
from django.views.generic import TemplateView
User = settings.AUTH_USER_MODEL
class TimeStampMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User,
blank=True,
null=True,
related_name="%(class)s_created_by",
on_delete=models.SET_DEFAULT, default=1
)
updated_by = models.ForeignKey(
User,
blank=True,
null=True,
related_name="%(class)s_updated_by",
on_delete=models.SET_DEFAULT, default=1,
)
class Meta:
abstract = True
class IndexView(TemplateView):
template_name = "index.html"
| 22.794118 | 56 | 0.676129 | from django.conf import settings
from django.db import models
from django.views.generic import TemplateView
User = settings.AUTH_USER_MODEL
class TimeStampMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User,
blank=True,
null=True,
related_name="%(class)s_created_by",
on_delete=models.SET_DEFAULT, default=1
)
updated_by = models.ForeignKey(
User,
blank=True,
null=True,
related_name="%(class)s_updated_by",
on_delete=models.SET_DEFAULT, default=1,
)
class Meta:
abstract = True
class IndexView(TemplateView):
template_name = "index.html"
| true | true |
f71c4861f8de557647d8e90f974fff337027d1d9 | 14,233 | py | Python | mnist128.py | samgregoost/self_supervised_large | 9c0c33cf374a1d5112519939012a64bca98c5f8d | [
"MIT"
] | 1 | 2021-08-25T08:32:19.000Z | 2021-08-25T08:32:19.000Z | mnist128.py | samgregoost/self_supervised_large | 9c0c33cf374a1d5112519939012a64bca98c5f8d | [
"MIT"
] | null | null | null | mnist128.py | samgregoost/self_supervised_large | 9c0c33cf374a1d5112519939012a64bca98c5f8d | [
"MIT"
] | null | null | null | from __future__ import print_function
import tensorflow as tf
import numpy as np
import random
import TensorflowUtils as utils
import read_MITSceneParsingDataParis as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "50", "batch size for training")
tf.flags.DEFINE_string("logs_dir", "/scratch1/ram095/nips20/logs_mnist128/", "path to logs directory")
tf.flags.DEFINE_string("data_dir", "/scratch1/ram095/nips20/paris_street", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = int(1e5 + 1)
NUM_OF_CLASSESS = 3
IMAGE_SIZE = 128
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current, name=name)
if FLAGS.debug:
utils.add_activation_summary(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
return net
'''
def decoder(image):
model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("decoder"):
image_net = vgg_net(weights, processed_image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
return pool5
'''
def inference(image, keep_prob,z):
"""
Semantic segmentation network definition
:param image: input image. Should have values in range 0-255
:param keep_prob:
:return:
"""
print("setting up vgg initialized conv layers ...")
model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("inference"):
image_net = vgg_net(weights, processed_image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if FLAGS.debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
W8 = utils.weight_variable([1, 1, 4096, 150], name="W8")
b8 = utils.bias_variable([150], name="b8")
# W_h = utils.weight_variable([1, 7, 7, 4], name="Wh")
conv8 = tf.reshape(utils.conv2d_basic(relu_dropout7, W8, b8),[-1,4*4*150])
fc1 = tf.reshape(tf.layers.dense(conv8,4*4*150,activation = tf.nn.relu),[-1,4,4,150])
concat1 = tf.concat([fc1, z],axis = 3)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
print("###########################################################")
print(fc1)
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])
W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([3], name="b_t3")
conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8))
annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), conv_t3
def train(loss_val, var_list):
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
# print(len(var_list))
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def train_z(loss,Z):
return tf.gradients(ys = loss, xs = Z)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z")
# pred_annotation, logits = inference(image, keep_probability,z)
# tf.summary.image("input_image", image, max_outputs=2)
# tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
# tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
# labels=tf.squeeze(annotation, squeeze_dims=[3]),
# name="entropy")))
mask_ = tf.ones([FLAGS.batch_size,64,64,3])
mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])
mask2__ = tf.ones([FLAGS.batch_size,78,78,3])
mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])
mask2 = mask2_ - mask
pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss0 = tf.reduce_mean(tf.abs(z))
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))
# loss2 = tf.reduce_mean(tf.square((image - logits)*mask2))
# loss = loss1 + loss2 + loss0
# loss = tf.reduce_mean(tf.squared_difference(logits ,annotation ))
loss_summary = tf.summary.scalar("entropy", loss)
grads = train_z(loss,z)
trainable_var = tf.trainable_variables()
if FLAGS.debug:
for var in trainable_var:
utils.add_to_regularization_and_summary(var)
train_op = train(loss, trainable_var)
print("Setting up summary op...")
summary_op = tf.summary.merge_all()
print("Setting up image reader...")
train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)
print(len(train_records))
print(len(valid_records))
print("Setting up dataset reader")
image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
if FLAGS.mode == 'train':
train_dataset_reader = dataset.BatchDatset(train_records, image_options)
validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)
sess = tf.Session()
print("Setting up Saver...")
saver = tf.train.Saver()
# create two summary writers to show training loss and validation loss in the same graph
# need to create two folders 'train' and 'validation' inside FLAGS.logs_dir
train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation')
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
if FLAGS.mode == "train":
for itr in xrange(MAX_ITERATION):
train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)
z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))
# print(train_images)
feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}
#train_images[:,50:100,50:100,:] =0
v = 0
for p in range(10):
z_ol = np.copy(z_)
# print("666666666666666666666666666666666666666")
z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)
print("Step: %d, z_step: %d, Train_loss:%g" % (itr,p,z_loss))
# print(z_)
g = sess.run([grads],feed_dict=feed_dict)
v_prev = np.copy(v)
# print(g[0][0].shape)
v = 0.001*v - 0.1*g[0][0]
z_ += 0.001 * v_prev + (1+0.001)*v
# z_ = np.clip(z_, -1.0, 1.0)
# print(v.shape)
# print(z_.shape)
feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}
sess.run(train_op, feed_dict=feed_dict)
if itr % 10 == 0:
train_loss, summary_str = sess.run([loss, loss_summary], feed_dict=feed_dict)
print("Step: %d, Train_loss:%g" % (itr, train_loss))
train_writer.add_summary(summary_str, itr)
if itr % 500 == 0:
valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
valid_loss, summary_sva = sess.run([loss, loss_summary], feed_dict={image: valid_images, annotation: valid_annotations,
keep_probability: 1.0, z: z_})
print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))
# add validation loss to TensorBoard
validation_writer.add_summary(summary_sva, itr)
saver.save(sess, FLAGS.logs_dir + "model_z_center.ckpt", 500)
elif FLAGS.mode == "visualize":
valid_images, valid_annotations = validation_dataset_reader.get_random_batch(50)
z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))
feed_dict = {image: valid_images, annotation: valid_annotations, keep_probability: 0.85, z: z_}
v= 0
for p in range(50):
z_ol = np.copy(z_)
# print("666666666666666666666666666666666666666")
z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)
print("z_step: %d, Train_loss:%g" % (p,z_loss))
# print(z_)
g = sess.run([grads],feed_dict=feed_dict)
v_prev = np.copy(v)
# print(g[0][0].shape)
v = 0.001*v - 0.1*g[0][0]
z_ += 0.001 * v_prev + (1+0.001)*v
# z_ = np.clip(z_, -1.0, 1.0)
pred = sess.run(logits, feed_dict={image: valid_images, annotation: valid_annotations,z:z_,
keep_probability: 1.0})
valid_images_masked = (1-sess.run(mask))*valid_images
predicted_patch = sess.run(mask) * pred
pred = valid_images_masked + predicted_patch
# valid_annotations = np.squeeze(valid_annotations, axis=3)
# pred = np.squeeze(pred, axis=3)
print(valid_images.shape)
print(valid_annotations.shape)
print(pred.shape)
for itr in range(FLAGS.batch_size):
utils.save_image(valid_images_masked[itr].astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="predz_" + str(5+itr))
print("Saved image: %d" % itr)
if __name__ == "__main__":
tf.app.run()
| 42.486567 | 135 | 0.622848 | from __future__ import print_function
import tensorflow as tf
import numpy as np
import random
import TensorflowUtils as utils
import read_MITSceneParsingDataParis as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "50", "batch size for training")
tf.flags.DEFINE_string("logs_dir", "/scratch1/ram095/nips20/logs_mnist128/", "path to logs directory")
tf.flags.DEFINE_string("data_dir", "/scratch1/ram095/nips20/paris_street", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = int(1e5 + 1)
NUM_OF_CLASSESS = 3
IMAGE_SIZE = 128
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current, name=name)
if FLAGS.debug:
utils.add_activation_summary(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
return net
def inference(image, keep_prob,z):
print("setting up vgg initialized conv layers ...")
model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("inference"):
image_net = vgg_net(weights, processed_image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if FLAGS.debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
W8 = utils.weight_variable([1, 1, 4096, 150], name="W8")
b8 = utils.bias_variable([150], name="b8")
conv8 = tf.reshape(utils.conv2d_basic(relu_dropout7, W8, b8),[-1,4*4*150])
fc1 = tf.reshape(tf.layers.dense(conv8,4*4*150,activation = tf.nn.relu),[-1,4,4,150])
concat1 = tf.concat([fc1, z],axis = 3)
print("###########################################################")
print(fc1)
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])
W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([3], name="b_t3")
conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8))
annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), conv_t3
def train(loss_val, var_list):
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def train_z(loss,Z):
return tf.gradients(ys = loss, xs = Z)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z")
mask_ = tf.ones([FLAGS.batch_size,64,64,3])
mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])
mask2__ = tf.ones([FLAGS.batch_size,78,78,3])
mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])
mask2 = mask2_ - mask
pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))
loss_summary = tf.summary.scalar("entropy", loss)
grads = train_z(loss,z)
trainable_var = tf.trainable_variables()
if FLAGS.debug:
for var in trainable_var:
utils.add_to_regularization_and_summary(var)
train_op = train(loss, trainable_var)
print("Setting up summary op...")
summary_op = tf.summary.merge_all()
print("Setting up image reader...")
train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)
print(len(train_records))
print(len(valid_records))
print("Setting up dataset reader")
image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
if FLAGS.mode == 'train':
train_dataset_reader = dataset.BatchDatset(train_records, image_options)
validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)
sess = tf.Session()
print("Setting up Saver...")
saver = tf.train.Saver()
train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation')
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
if FLAGS.mode == "train":
for itr in xrange(MAX_ITERATION):
train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)
z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))
feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}
v = 0
for p in range(10):
z_ol = np.copy(z_)
z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)
print("Step: %d, z_step: %d, Train_loss:%g" % (itr,p,z_loss))
g = sess.run([grads],feed_dict=feed_dict)
v_prev = np.copy(v)
v = 0.001*v - 0.1*g[0][0]
z_ += 0.001 * v_prev + (1+0.001)*v
feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}
sess.run(train_op, feed_dict=feed_dict)
if itr % 10 == 0:
train_loss, summary_str = sess.run([loss, loss_summary], feed_dict=feed_dict)
print("Step: %d, Train_loss:%g" % (itr, train_loss))
train_writer.add_summary(summary_str, itr)
if itr % 500 == 0:
valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
valid_loss, summary_sva = sess.run([loss, loss_summary], feed_dict={image: valid_images, annotation: valid_annotations,
keep_probability: 1.0, z: z_})
print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))
validation_writer.add_summary(summary_sva, itr)
saver.save(sess, FLAGS.logs_dir + "model_z_center.ckpt", 500)
elif FLAGS.mode == "visualize":
valid_images, valid_annotations = validation_dataset_reader.get_random_batch(50)
z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))
feed_dict = {image: valid_images, annotation: valid_annotations, keep_probability: 0.85, z: z_}
v= 0
for p in range(50):
z_ol = np.copy(z_)
z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)
print("z_step: %d, Train_loss:%g" % (p,z_loss))
g = sess.run([grads],feed_dict=feed_dict)
v_prev = np.copy(v)
v = 0.001*v - 0.1*g[0][0]
z_ += 0.001 * v_prev + (1+0.001)*v
pred = sess.run(logits, feed_dict={image: valid_images, annotation: valid_annotations,z:z_,
keep_probability: 1.0})
valid_images_masked = (1-sess.run(mask))*valid_images
predicted_patch = sess.run(mask) * pred
pred = valid_images_masked + predicted_patch
print(valid_images.shape)
print(valid_annotations.shape)
print(pred.shape)
for itr in range(FLAGS.batch_size):
utils.save_image(valid_images_masked[itr].astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="predz_" + str(5+itr))
print("Saved image: %d" % itr)
if __name__ == "__main__":
tf.app.run()
| true | true |
f71c490d7b16466aa2d3b6d909be2ecacb018f6b | 70 | py | Python | hashlib/hashlib/__init__.py | badgeteam/micropython-lib | fca0235c166ebbada489d88c42fc549267832797 | [
"PSF-2.0"
] | null | null | null | hashlib/hashlib/__init__.py | badgeteam/micropython-lib | fca0235c166ebbada489d88c42fc549267832797 | [
"PSF-2.0"
] | null | null | null | hashlib/hashlib/__init__.py | badgeteam/micropython-lib | fca0235c166ebbada489d88c42fc549267832797 | [
"PSF-2.0"
] | 2 | 2017-11-21T16:53:03.000Z | 2021-07-29T08:47:14.000Z | from .sha256 import sha224, sha256
from .sha512 import sha384, sha512
| 23.333333 | 34 | 0.8 | from .sha256 import sha224, sha256
from .sha512 import sha384, sha512
| true | true |
f71c4b2b7c7dee5107676ddd03075e0b5134b81f | 2,488 | py | Python | HDF5Saver.py | sizhky/carla-dataset-runner | a670d981d29de78460cd90b1d4949ee4b71d0ade | [
"MIT"
] | 48 | 2019-12-28T11:08:27.000Z | 2022-03-24T09:22:51.000Z | HDF5Saver.py | sizhky/carla-dataset-runner | a670d981d29de78460cd90b1d4949ee4b71d0ade | [
"MIT"
] | 6 | 2020-02-01T21:47:21.000Z | 2021-12-10T13:19:41.000Z | HDF5Saver.py | sizhky/carla-dataset-runner | a670d981d29de78460cd90b1d4949ee4b71d0ade | [
"MIT"
] | 19 | 2020-01-08T08:19:08.000Z | 2022-03-24T08:39:53.000Z | import h5py
import numpy as np
class HDF5Saver:
def __init__(self, sensor_width, sensor_height, file_path_to_save="data/carla_dataset.hdf5"):
self.sensor_width = sensor_width
self.sensor_height = sensor_height
self.file = h5py.File(file_path_to_save, "w")
# Creating groups to store each type of data
self.rgb_group = self.file.create_group("rgb")
self.depth_group = self.file.create_group("depth")
self.ego_speed_group = self.file.create_group("ego_speed")
self.bounding_box_group = self.file.create_group("bounding_box")
self.bb_vehicles_group = self.bounding_box_group.create_group("vehicles")
self.bb_walkers_group = self.bounding_box_group.create_group("walkers")
self.timestamp_group = self.file.create_group("timestamps")
# Storing metadata
self.file.attrs['sensor_width'] = sensor_width
self.file.attrs['sensor_height'] = sensor_height
self.file.attrs['simulation_synchronization_type'] = "syncd"
self.rgb_group.attrs['channels'] = 'R,G,B'
self.ego_speed_group.attrs['x,y,z_velocity'] = 'in m/s'
self.bounding_box_group.attrs['data_description'] = 'Each 4 entries in the same row present one individual actor in the scene.'
self.bounding_box_group.attrs['bbox_format'] = '[xmin, ymin, xmax, ymax] (top left coords; right bottom coords)' \
'the vector has been flattened; therefore the data must' \
'be captured in blocks of 4 elements'
self.timestamp_group.attrs['time_format'] = "current time in MILISSECONDS since the unix epoch " \
"(time.time()*1000 in python3)"
def record_data(self, rgb_array, depth_array, bounding_box, ego_speed, timestamp):
timestamp = str(timestamp)
self.rgb_group.create_dataset(timestamp, data=rgb_array)
self.depth_group.create_dataset(timestamp, data=depth_array)
self.ego_speed_group.create_dataset(timestamp, data=ego_speed)
self.bb_vehicles_group.create_dataset(timestamp, data=bounding_box[0])
self.bb_walkers_group.create_dataset(timestamp, data=bounding_box[1])
def record_all_timestamps(self, timestamps_list):
self.timestamp_group.create_dataset("timestamps", data=np.array(timestamps_list))
def close_HDF5(self):
self.file.close()
| 54.086957 | 135 | 0.671624 | import h5py
import numpy as np
class HDF5Saver:
def __init__(self, sensor_width, sensor_height, file_path_to_save="data/carla_dataset.hdf5"):
self.sensor_width = sensor_width
self.sensor_height = sensor_height
self.file = h5py.File(file_path_to_save, "w")
self.rgb_group = self.file.create_group("rgb")
self.depth_group = self.file.create_group("depth")
self.ego_speed_group = self.file.create_group("ego_speed")
self.bounding_box_group = self.file.create_group("bounding_box")
self.bb_vehicles_group = self.bounding_box_group.create_group("vehicles")
self.bb_walkers_group = self.bounding_box_group.create_group("walkers")
self.timestamp_group = self.file.create_group("timestamps")
self.file.attrs['sensor_width'] = sensor_width
self.file.attrs['sensor_height'] = sensor_height
self.file.attrs['simulation_synchronization_type'] = "syncd"
self.rgb_group.attrs['channels'] = 'R,G,B'
self.ego_speed_group.attrs['x,y,z_velocity'] = 'in m/s'
self.bounding_box_group.attrs['data_description'] = 'Each 4 entries in the same row present one individual actor in the scene.'
self.bounding_box_group.attrs['bbox_format'] = '[xmin, ymin, xmax, ymax] (top left coords; right bottom coords)' \
'the vector has been flattened; therefore the data must' \
'be captured in blocks of 4 elements'
self.timestamp_group.attrs['time_format'] = "current time in MILISSECONDS since the unix epoch " \
"(time.time()*1000 in python3)"
def record_data(self, rgb_array, depth_array, bounding_box, ego_speed, timestamp):
timestamp = str(timestamp)
self.rgb_group.create_dataset(timestamp, data=rgb_array)
self.depth_group.create_dataset(timestamp, data=depth_array)
self.ego_speed_group.create_dataset(timestamp, data=ego_speed)
self.bb_vehicles_group.create_dataset(timestamp, data=bounding_box[0])
self.bb_walkers_group.create_dataset(timestamp, data=bounding_box[1])
def record_all_timestamps(self, timestamps_list):
self.timestamp_group.create_dataset("timestamps", data=np.array(timestamps_list))
def close_HDF5(self):
self.file.close()
| true | true |
f71c4b4a5eb25c7ff024fb30f9b4ce405c736e0b | 544 | py | Python | manage.py | almazkun/PythonDjangoMozilaTut | 39e7c4d1ab9fbfe85abe90d94585fc7315617d1d | [
"MIT"
] | 1 | 2019-01-16T05:39:42.000Z | 2019-01-16T05:39:42.000Z | manage.py | almazkun/PythonDjangoMozilaTut | 39e7c4d1ab9fbfe85abe90d94585fc7315617d1d | [
"MIT"
] | null | null | null | manage.py | almazkun/PythonDjangoMozilaTut | 39e7c4d1ab9fbfe85abe90d94585fc7315617d1d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'visitkoreakz.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34 | 76 | 0.689338 |
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'visitkoreakz.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f71c4b6de69a49fef47431af49540a00c27168f7 | 622 | py | Python | src/libs/components/customsmarttile.py | loghinalexandru/blackboard-greenboard | 80332bf7709e602a4d5ada31b3cf95801c06190f | [
"MIT"
] | null | null | null | src/libs/components/customsmarttile.py | loghinalexandru/blackboard-greenboard | 80332bf7709e602a4d5ada31b3cf95801c06190f | [
"MIT"
] | null | null | null | src/libs/components/customsmarttile.py | loghinalexandru/blackboard-greenboard | 80332bf7709e602a4d5ada31b3cf95801c06190f | [
"MIT"
] | null | null | null | import kivy
from functools import partial
kivy.require('2.0.0')
from kivymd.uix.imagelist import SmartTile
from constants import Screen
class CustomSmartTile(SmartTile):
def __init__(self, **kwargs):
super(CustomSmartTile, self).__init__(**kwargs)
self.height = '240dp'
self.size_hint_y = None
self.box_color = [0, 0, 0, 0]
self.on_press = partial(self._maximize, self.source)
def _maximize(self, file):
self.parent.parent.parent.manager.get_screen(Screen.ImageView.value).file_name = file
self.parent.parent.parent.manager.current = Screen.ImageView.value | 36.588235 | 93 | 0.709003 | import kivy
from functools import partial
kivy.require('2.0.0')
from kivymd.uix.imagelist import SmartTile
from constants import Screen
class CustomSmartTile(SmartTile):
def __init__(self, **kwargs):
super(CustomSmartTile, self).__init__(**kwargs)
self.height = '240dp'
self.size_hint_y = None
self.box_color = [0, 0, 0, 0]
self.on_press = partial(self._maximize, self.source)
def _maximize(self, file):
self.parent.parent.parent.manager.get_screen(Screen.ImageView.value).file_name = file
self.parent.parent.parent.manager.current = Screen.ImageView.value | true | true |
f71c4cb2928d71b7feb87e22f1a85505c3468626 | 1,416 | py | Python | processing/data_collection/gazette/spiders/es_associacao_municipios.py | marlesson/diario-oficial | 6c2b3e41d1d08a1fd47517ed55ac22ae888c88b3 | [
"MIT"
] | 3 | 2018-06-05T02:33:03.000Z | 2018-06-05T14:20:02.000Z | processing/data_collection/gazette/spiders/es_associacao_municipios.py | marlesson/diario-oficial | 6c2b3e41d1d08a1fd47517ed55ac22ae888c88b3 | [
"MIT"
] | 4 | 2018-06-07T14:55:53.000Z | 2018-06-29T12:37:12.000Z | processing/data_collection/gazette/spiders/es_associacao_municipios.py | marlesson/diario-oficial | 6c2b3e41d1d08a1fd47517ed55ac22ae888c88b3 | [
"MIT"
] | 1 | 2018-08-24T22:32:27.000Z | 2018-08-24T22:32:27.000Z | from dateparser import parse
import datetime as dt
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class EsAssociacaoMunicipiosSpider(BaseGazetteSpider):
TERRITORY_ID = '3200000'
name = 'es_associacao_municipios'
allowed_domains = ['diariomunicipales.org.br']
start_urls = ['https://diariomunicipales.org.br/?r=site/edicoes&Edicao_page=1']
def parse(self, response):
"""
@url https://diariomunicipales.org.br/?r=site/edicoes&Edicao_page=1
@returns items 15 15
@returns requests 1 1
@scrapes date file_urls is_extra_edition territory_id power scraped_at
"""
for gazette_node in response.css('.items tbody tr'):
url = gazette_node.css('[download]::attr(href)').extract_first()
date = gazette_node.css('td::text')[1].extract()
date = parse(date, languages=['pt']).date()
yield Gazette(
date=date,
file_urls=[url],
is_extra_edition=False,
territory_id=self.TERRITORY_ID,
power='executive',
scraped_at=dt.datetime.utcnow(),
)
css_path = '.pagination .next:not(.disabled) a::attr(href)'
next_page_url = response.css(css_path).extract_first()
if next_page_url:
yield response.follow(next_page_url)
| 35.4 | 83 | 0.633475 | from dateparser import parse
import datetime as dt
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class EsAssociacaoMunicipiosSpider(BaseGazetteSpider):
TERRITORY_ID = '3200000'
name = 'es_associacao_municipios'
allowed_domains = ['diariomunicipales.org.br']
start_urls = ['https://diariomunicipales.org.br/?r=site/edicoes&Edicao_page=1']
def parse(self, response):
for gazette_node in response.css('.items tbody tr'):
url = gazette_node.css('[download]::attr(href)').extract_first()
date = gazette_node.css('td::text')[1].extract()
date = parse(date, languages=['pt']).date()
yield Gazette(
date=date,
file_urls=[url],
is_extra_edition=False,
territory_id=self.TERRITORY_ID,
power='executive',
scraped_at=dt.datetime.utcnow(),
)
css_path = '.pagination .next:not(.disabled) a::attr(href)'
next_page_url = response.css(css_path).extract_first()
if next_page_url:
yield response.follow(next_page_url)
| true | true |
f71c4f6668cb080ef2ce8616c2f028e0b74e850d | 6,404 | py | Python | test/functional/mempool_persist.py | likloadm/arielcoin | bd26479189fbdbea6e6f783c9d898054ae8740b0 | [
"MIT"
] | 3 | 2022-02-24T01:44:26.000Z | 2022-03-04T12:13:08.000Z | test/functional/mempool_persist.py | likloadm/arielcoin | bd26479189fbdbea6e6f783c9d898054ae8740b0 | [
"MIT"
] | 2 | 2022-03-21T05:41:51.000Z | 2022-03-21T17:12:13.000Z | test/functional/mempool_persist.py | likloadm/arielcoin | bd26479189fbdbea6e6f783c9d898054ae8740b0 | [
"MIT"
] | 6 | 2022-02-23T10:54:43.000Z | 2022-03-24T09:05:45.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transactions in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
from decimal import Decimal
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
# Give bitcoind a second to reload the mempool
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give bitcoind a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent arielcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| 47.088235 | 207 | 0.698626 |
from decimal import Decimal
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent arielcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| true | true |
f71c4fd86e03f9140408817a88d200d10a2703a4 | 345 | py | Python | day07/day7_part1.py | briannamcdonald/advent-of-code-2021 | c67e3c02a84db1a0cceeefef34a8cddba311484e | [
"MIT"
] | 1 | 2021-12-05T15:04:23.000Z | 2021-12-05T15:04:23.000Z | day07/day7_part1.py | briannamcdonald/advent-of-code-2021 | c67e3c02a84db1a0cceeefef34a8cddba311484e | [
"MIT"
] | null | null | null | day07/day7_part1.py | briannamcdonald/advent-of-code-2021 | c67e3c02a84db1a0cceeefef34a8cddba311484e | [
"MIT"
] | null | null | null | def main():
data = open("day07/input.txt", "r")
lines = [line for line in data]
crabs = [int(fish.strip()) for fish in lines[0].split(",")]
median_val = sorted(crabs)[len(crabs) // 2]
fuel_sum = 0
for crab in crabs:
fuel_sum += abs(crab - median_val)
print(fuel_sum)
if __name__ == "__main__":
main()
| 21.5625 | 63 | 0.582609 | def main():
data = open("day07/input.txt", "r")
lines = [line for line in data]
crabs = [int(fish.strip()) for fish in lines[0].split(",")]
median_val = sorted(crabs)[len(crabs) // 2]
fuel_sum = 0
for crab in crabs:
fuel_sum += abs(crab - median_val)
print(fuel_sum)
if __name__ == "__main__":
main()
| true | true |
f71c503591cb93ca4d82cc8039bd0f901a19b722 | 578 | py | Python | utils/utility.py | qiyuangong/Relational_Transaction_Anon | 954cca4b073b4d0532814ac7cf77ab1ae8068c9c | [
"MIT"
] | 1 | 2020-04-16T13:40:39.000Z | 2020-04-16T13:40:39.000Z | utils/utility.py | qiyuangong/Relational_Transaction_Anon | 954cca4b073b4d0532814ac7cf77ab1ae8068c9c | [
"MIT"
] | null | null | null | utils/utility.py | qiyuangong/Relational_Transaction_Anon | 954cca4b073b4d0532814ac7cf77ab1ae8068c9c | [
"MIT"
] | 2 | 2015-12-17T01:20:39.000Z | 2019-03-10T13:51:21.000Z | """
shared functions
"""
#!/usr/bin/env python
#coding=utf-8
def cmp_str(element1, element2):
"""
compare number in str format correctley
"""
try:
return cmp(float(element1), float(element2))
except ValueError:
return cmp(element1, element2)
def list_to_str(value_list, cmpfun=cmp, sep=';'):
"""covert sorted str list (sorted by cmpfun) to str
value (splited by sep). This fuction is value safe, which means
value_list will not be changed.
"""
temp = value_list[:]
temp.sort(cmp=cmpfun)
return sep.join(temp)
| 21.407407 | 67 | 0.650519 |
def cmp_str(element1, element2):
try:
return cmp(float(element1), float(element2))
except ValueError:
return cmp(element1, element2)
def list_to_str(value_list, cmpfun=cmp, sep=';'):
temp = value_list[:]
temp.sort(cmp=cmpfun)
return sep.join(temp)
| true | true |
f71c5062942eba1d7faec207ed2e124ae3fb0e61 | 603 | py | Python | app.py | zachmerrill/pyrdle | 573035cecbe3ee5cae36562a2e3b53ea4f2950a0 | [
"MIT"
] | null | null | null | app.py | zachmerrill/pyrdle | 573035cecbe3ee5cae36562a2e3b53ea4f2950a0 | [
"MIT"
] | null | null | null | app.py | zachmerrill/pyrdle | 573035cecbe3ee5cae36562a2e3b53ea4f2950a0 | [
"MIT"
] | null | null | null | from game import Game
if __name__ == '__main__':
# Initialize the game
game = Game()
# Game loop
while True:
# Get the user's guess
guess = input('Guess a word: ').lower()
# Check the guess
game.check_guess(guess)
# Print the board
game.print_board()
# Check if the game is won
if game.is_won():
print('You won!\n')
game.print_share()
break
# Check if the game is lost
if game.is_lost():
print('You lost!\n')
game.print_share()
break
| 25.125 | 47 | 0.512438 | from game import Game
if __name__ == '__main__':
game = Game()
while True:
guess = input('Guess a word: ').lower()
# Check the guess
game.check_guess(guess)
# Print the board
game.print_board()
# Check if the game is won
if game.is_won():
print('You won!\n')
game.print_share()
break
# Check if the game is lost
if game.is_lost():
print('You lost!\n')
game.print_share()
break
| true | true |
f71c51f3a090afb75de8ec1e60a2bd9eb1fb35d1 | 935 | py | Python | nnsvs/logger.py | nicolalandro/nnsvs | 45da00218dd0a445c8483f11ac891c6ef00d3925 | [
"MIT"
] | 72 | 2020-04-19T16:14:09.000Z | 2020-05-02T04:02:05.000Z | nnsvs/logger.py | nicolalandro/nnsvs | 45da00218dd0a445c8483f11ac891c6ef00d3925 | [
"MIT"
] | 1 | 2020-04-19T16:28:03.000Z | 2020-05-02T13:49:13.000Z | nnsvs/logger.py | nicolalandro/nnsvs | 45da00218dd0a445c8483f11ac891c6ef00d3925 | [
"MIT"
] | 3 | 2020-04-20T02:34:31.000Z | 2020-04-26T01:04:35.000Z | # coding: utf-8
from __future__ import absolute_import, print_function, with_statement
import logging
import os
from os.path import dirname
format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
def getLogger(verbose=0, filename=None, name="nnsvs"):
logger = logging.getLogger(name)
if verbose >= 100:
logger.setLevel(logging.DEBUG)
elif verbose > 0:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARN)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format))
# logger.addHandler(stream_handler)
if filename is not None:
os.makedirs(dirname(filename), exist_ok=True)
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(format))
logger.addHandler(file_handler)
return logger
| 29.21875 | 73 | 0.71123 |
from __future__ import absolute_import, print_function, with_statement
import logging
import os
from os.path import dirname
format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
def getLogger(verbose=0, filename=None, name="nnsvs"):
logger = logging.getLogger(name)
if verbose >= 100:
logger.setLevel(logging.DEBUG)
elif verbose > 0:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARN)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format))
if filename is not None:
os.makedirs(dirname(filename), exist_ok=True)
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(format))
logger.addHandler(file_handler)
return logger
| true | true |
f71c525adccaf762f27c3b40fe128ecd416463bd | 6,309 | py | Python | dico/base/model.py | dico-api/dico | 0bb80e2bb8dd66bb5078e52c9e37c180b7c80319 | [
"MIT"
] | 37 | 2021-08-23T00:16:42.000Z | 2022-02-22T23:19:47.000Z | dico/base/model.py | dico-api/dico | 0bb80e2bb8dd66bb5078e52c9e37c180b7c80319 | [
"MIT"
] | 14 | 2021-08-07T09:01:29.000Z | 2022-02-27T15:19:36.000Z | dico/base/model.py | eunwoo1104/dico | 0bb80e2bb8dd66bb5078e52c9e37c180b7c80319 | [
"MIT"
] | 9 | 2021-08-25T04:14:05.000Z | 2022-02-27T15:08:49.000Z | import copy
import typing
from ..model.snowflake import Snowflake
if typing.TYPE_CHECKING:
from ..api import APIClient
class CopyableObject:
def copy(self):
return copy.deepcopy(self)
class EventBase:
def __init__(self, client: "APIClient", resp: dict):
self.raw: dict = resp
self.client: "APIClient" = client
self._dont_dispatch: bool = False
@classmethod
def create(cls, client, resp: dict):
return cls(client, resp)
class DiscordObjectBase(CopyableObject):
TYPING = typing.Union[
int, str, Snowflake, "DiscordObjectBase", typing.Type["DiscordObjectBase"]
]
RESPONSE = typing.Union["DiscordObjectBase", typing.Awaitable["DiscordObjectBase"]]
RESPONSE_AS_LIST = typing.Union[
typing.List["DiscordObjectBase"],
typing.Awaitable[typing.List["DiscordObjectBase"]],
]
_cache_type = None
def __init__(self, client: "APIClient", resp: dict, **kwargs: typing.Any):
resp.update(kwargs)
# self._cache_type = None
self.raw: dict = resp.copy()
self.id: Snowflake = Snowflake(resp["id"])
self.client: "APIClient" = client
def __int__(self) -> int:
return int(self.id)
def __eq__(self, other):
return int(self.id) == int(other)
def __ne__(self, other):
return int(self.id) != int(other)
def __hash__(self):
return hash(self.id)
def update(self, new_resp: dict, **kwargs: typing.Any):
orig = self.raw
for k, v in new_resp.items():
if orig.get(k) != v:
orig[k] = v
self.__init__(self.client, orig, **kwargs)
@classmethod
def create(cls, client: "APIClient", resp: dict, **kwargs: typing.Any):
ensure_cache_type = kwargs.pop("ensure_cache_type", cls._cache_type)
prevent_caching = kwargs.pop("prevent_caching", False)
maybe_exist = client.has_cache and client.cache.get(
resp["id"], ensure_cache_type
)
if maybe_exist:
if prevent_caching:
maybe_exist = maybe_exist.copy()
maybe_exist.update(resp, **kwargs)
"""
orig = maybe_exist.raw
for k, v in resp.items():
if orig.get(k) != v:
orig[k] = v
maybe_exist.__init__(client, orig, **kwargs)
"""
return maybe_exist
else:
ret = cls(client, resp, **kwargs)
if client.has_cache and not prevent_caching:
client.cache.add(ret.id, ret._cache_type, ret)
if hasattr(ret, "guild_id") and ret.guild_id:
client.cache.get_guild_container(ret.guild_id).add(
ret.id, ret._cache_type, ret
)
return ret
class AbstractObject(dict):
RESPONSE = typing.Union["AbstractObject", typing.Awaitable["AbstractObject"]]
RESPONSE_AS_LIST = typing.Union[
typing.List["AbstractObject"], typing.Awaitable[typing.List["AbstractObject"]]
]
def __init__(self, resp: dict):
super().__init__(**resp)
def __getattr__(self, item):
return self.get(item)
def __setattr__(self, key, value):
self[key] = value
class FlagBase:
def __init__(self, *args: str, **kwargs: bool):
self.values: typing.Dict[str, int] = {
x: getattr(self, x) for x in dir(self) if isinstance(getattr(self, x), int)
}
self.value: int = 0
for x in args:
if x.upper() not in self.values:
raise AttributeError(f"invalid name: `{x}`")
self.value |= self.values[x.upper()]
for k, v in kwargs.items():
if k.upper() not in self.values:
raise AttributeError(f"invalid name: `{k}`")
if v:
self.value |= self.values[k.upper()]
def __int__(self) -> int:
return self.value
def __getattr__(self, item):
if item.startswith("__"):
return self.__getattribute__(item)
return self.has(item)
def __iter__(self):
for k, v in self.values.items():
if self.has(k):
yield v
def has(self, name: str) -> bool:
if name.upper() not in self.values:
raise AttributeError(f"invalid name: `{name}`")
return (self.value & self.values[name.upper()]) == self.values[name.upper()]
def __setattr__(self, key, value):
orig = key
key = key.upper()
if orig in ["value", "values"] or key not in self.values.keys():
return super().__setattr__(orig, value)
if not isinstance(value, bool):
raise TypeError(f"only type `bool` is supported.")
has_value = self.has(key)
if value and not has_value:
self.value |= self.values[key]
elif not value and has_value:
self.value &= ~self.values[key]
def add(self, value: str):
return self.__setattr__(value, True)
def remove(self, value: str):
return self.__setattr__(value, False)
@classmethod
def from_value(cls, value: int):
ret = cls()
ret.value = value
return ret
class TypeBase:
def __init__(self, value):
self.values: typing.Dict[int, str] = {
getattr(self, x): x for x in dir(self) if isinstance(getattr(self, x), int)
}
self.value: int = value
if self.value not in self.values:
raise AttributeError(f"invalid value: {value}")
def __str__(self) -> str:
return self.values[self.value]
def __int__(self) -> int:
return self.value
def __getattr__(self, item):
if item.startswith("__"):
return self.__getattribute__(item)
return self.is_type(item)
def is_type(self, name: str) -> bool:
values = {y: x for x, y in self.values.items()}
if name.upper() not in values:
raise AttributeError(f"invalid name: `{name}`")
return self.value == values[name.upper()]
@classmethod
def to_string(cls, value: int) -> str:
values = {
getattr(cls, x): x for x in dir(cls) if isinstance(getattr(cls, x), int)
}
return values.get(value)
| 31.232673 | 87 | 0.579648 | import copy
import typing
from ..model.snowflake import Snowflake
if typing.TYPE_CHECKING:
from ..api import APIClient
class CopyableObject:
def copy(self):
return copy.deepcopy(self)
class EventBase:
def __init__(self, client: "APIClient", resp: dict):
self.raw: dict = resp
self.client: "APIClient" = client
self._dont_dispatch: bool = False
@classmethod
def create(cls, client, resp: dict):
return cls(client, resp)
class DiscordObjectBase(CopyableObject):
TYPING = typing.Union[
int, str, Snowflake, "DiscordObjectBase", typing.Type["DiscordObjectBase"]
]
RESPONSE = typing.Union["DiscordObjectBase", typing.Awaitable["DiscordObjectBase"]]
RESPONSE_AS_LIST = typing.Union[
typing.List["DiscordObjectBase"],
typing.Awaitable[typing.List["DiscordObjectBase"]],
]
_cache_type = None
def __init__(self, client: "APIClient", resp: dict, **kwargs: typing.Any):
resp.update(kwargs)
self.raw: dict = resp.copy()
self.id: Snowflake = Snowflake(resp["id"])
self.client: "APIClient" = client
def __int__(self) -> int:
return int(self.id)
def __eq__(self, other):
return int(self.id) == int(other)
def __ne__(self, other):
return int(self.id) != int(other)
def __hash__(self):
return hash(self.id)
def update(self, new_resp: dict, **kwargs: typing.Any):
orig = self.raw
for k, v in new_resp.items():
if orig.get(k) != v:
orig[k] = v
self.__init__(self.client, orig, **kwargs)
@classmethod
def create(cls, client: "APIClient", resp: dict, **kwargs: typing.Any):
ensure_cache_type = kwargs.pop("ensure_cache_type", cls._cache_type)
prevent_caching = kwargs.pop("prevent_caching", False)
maybe_exist = client.has_cache and client.cache.get(
resp["id"], ensure_cache_type
)
if maybe_exist:
if prevent_caching:
maybe_exist = maybe_exist.copy()
maybe_exist.update(resp, **kwargs)
return maybe_exist
else:
ret = cls(client, resp, **kwargs)
if client.has_cache and not prevent_caching:
client.cache.add(ret.id, ret._cache_type, ret)
if hasattr(ret, "guild_id") and ret.guild_id:
client.cache.get_guild_container(ret.guild_id).add(
ret.id, ret._cache_type, ret
)
return ret
class AbstractObject(dict):
RESPONSE = typing.Union["AbstractObject", typing.Awaitable["AbstractObject"]]
RESPONSE_AS_LIST = typing.Union[
typing.List["AbstractObject"], typing.Awaitable[typing.List["AbstractObject"]]
]
def __init__(self, resp: dict):
super().__init__(**resp)
def __getattr__(self, item):
return self.get(item)
def __setattr__(self, key, value):
self[key] = value
class FlagBase:
def __init__(self, *args: str, **kwargs: bool):
self.values: typing.Dict[str, int] = {
x: getattr(self, x) for x in dir(self) if isinstance(getattr(self, x), int)
}
self.value: int = 0
for x in args:
if x.upper() not in self.values:
raise AttributeError(f"invalid name: `{x}`")
self.value |= self.values[x.upper()]
for k, v in kwargs.items():
if k.upper() not in self.values:
raise AttributeError(f"invalid name: `{k}`")
if v:
self.value |= self.values[k.upper()]
def __int__(self) -> int:
return self.value
def __getattr__(self, item):
if item.startswith("__"):
return self.__getattribute__(item)
return self.has(item)
def __iter__(self):
for k, v in self.values.items():
if self.has(k):
yield v
def has(self, name: str) -> bool:
if name.upper() not in self.values:
raise AttributeError(f"invalid name: `{name}`")
return (self.value & self.values[name.upper()]) == self.values[name.upper()]
def __setattr__(self, key, value):
orig = key
key = key.upper()
if orig in ["value", "values"] or key not in self.values.keys():
return super().__setattr__(orig, value)
if not isinstance(value, bool):
raise TypeError(f"only type `bool` is supported.")
has_value = self.has(key)
if value and not has_value:
self.value |= self.values[key]
elif not value and has_value:
self.value &= ~self.values[key]
def add(self, value: str):
return self.__setattr__(value, True)
def remove(self, value: str):
return self.__setattr__(value, False)
@classmethod
def from_value(cls, value: int):
ret = cls()
ret.value = value
return ret
class TypeBase:
def __init__(self, value):
self.values: typing.Dict[int, str] = {
getattr(self, x): x for x in dir(self) if isinstance(getattr(self, x), int)
}
self.value: int = value
if self.value not in self.values:
raise AttributeError(f"invalid value: {value}")
def __str__(self) -> str:
return self.values[self.value]
def __int__(self) -> int:
return self.value
def __getattr__(self, item):
if item.startswith("__"):
return self.__getattribute__(item)
return self.is_type(item)
def is_type(self, name: str) -> bool:
values = {y: x for x, y in self.values.items()}
if name.upper() not in values:
raise AttributeError(f"invalid name: `{name}`")
return self.value == values[name.upper()]
@classmethod
def to_string(cls, value: int) -> str:
values = {
getattr(cls, x): x for x in dir(cls) if isinstance(getattr(cls, x), int)
}
return values.get(value)
| true | true |
f71c526a7dbf0c3e5e4a74e675dd8614cf5c1f83 | 4,367 | py | Python | src/python/gh_api_repo_metrics.py | pamelarussell/github-bioinformatics | 0e7184cae57426c25cfa0e838637d34adf0a59e7 | [
"MIT"
] | 32 | 2018-05-14T20:34:08.000Z | 2022-03-22T12:37:19.000Z | src/python/gh_api_repo_metrics.py | pamelarussell/github-bioinformatics | 0e7184cae57426c25cfa0e838637d34adf0a59e7 | [
"MIT"
] | null | null | null | src/python/gh_api_repo_metrics.py | pamelarussell/github-bioinformatics | 0e7184cae57426c25cfa0e838637d34adf0a59e7 | [
"MIT"
] | 6 | 2018-07-11T17:15:07.000Z | 2021-08-02T19:51:40.000Z | import argparse
from bigquery import get_client
from gh_api import curr_commit_master
from gh_api import repo
from util import create_bq_table, push_bq_records
from util import get_repo_names, curr_time_utc
from util import unique_vals
parser = argparse.ArgumentParser()
parser.add_argument('--proj', action = 'store', dest = 'proj', required = True,
help = 'BigQuery project name')
parser.add_argument('--json_key', action = 'store', dest = 'json_key', required = True,
help = 'JSON key file for BigQuery dataset')
parser.add_argument('--ds', action = 'store', dest = 'ds', required = True,
help = 'BigQuery dataset to write table to')
parser.add_argument('--table', action = 'store', dest = 'table', required = True,
help = 'BigQuery table to write to')
parser.add_argument('--sheet', action = 'store', dest = 'sheet', required = True,
help = 'Google Sheet with use_repo as a column')
parser.add_argument('--gh_user', action = 'store', dest = 'gh_username', required = True,
help = 'GitHub username for API')
parser.add_argument('--gh_oauth_key', action = 'store', dest = 'gh_oauth_key', required = True,
help = '(String) GitHub oauth key')
args = parser.parse_args()
proj = args.proj
json_key = args.json_key
dataset = args.ds
table = args.table
sheet = args.sheet
gh_username = args.gh_username
gh_oauth_key = args.gh_oauth_key
# Get repo names
print("\nGetting repo names from spreadsheet")
repos = get_repo_names(sheet, json_key)
print("There are %s repos with use_repo = 1.\n" % len(repos))
# Using BigQuery-Python https://github.com/tylertreat/BigQuery-Python
print('\nGetting BigQuery client\n')
client = get_client(json_key_file=json_key, readonly=False, swallow_results=True)
# Check which repos are already in the table
existing_repos = unique_vals(client, proj, dataset, table, "repo_name")
if len(existing_repos) > 0:
repos = [repo for repo in repos if repo not in existing_repos]
print("Only getting data for %s repos not yet analyzed" %len(repos))
# Create the output table if necessary
schema = [
{'name': 'repo_name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'api_url', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'html_url', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'description', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'is_fork', 'type': 'BOOLEAN', 'mode': 'NULLABLE'},
{'name': 'stargazers_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'watchers_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'forks_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'open_issues_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'subscribers_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'curr_commit_master', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'time_accessed', 'type': 'STRING', 'mode': 'NULLABLE'}
]
if not client.check_table(dataset, table):
create_bq_table(client, dataset, table, schema)
def get_record(repo_name):
r = repo.Repo(repo_name, gh_username, gh_oauth_key)
curr_time = curr_time_utc()
curr_commit = curr_commit_master(repo_name, gh_username, gh_oauth_key)
return {'repo_name': r.get_repo_name(),
'api_url': r.get_gh_api_url(),
'html_url': r.get_html_url(),
'description': r.get_description(),
'is_fork': r.is_fork(),
'stargazers_count': r.get_stargazers_count(),
'watchers_count': r.get_watchers_count(),
'forks_count': r.get_forks_count(),
'open_issues_count': r.get_open_issues_count(),
'subscribers_count': r.get_subscribers_count(),
'curr_commit_master': curr_commit,
'time_accessed': curr_time}
print("Getting repo info from GitHub API")
records = []
num_done = 0
for repo_name in repos:
try:
records.append(get_record(repo_name))
except UnicodeEncodeError:
print("Skipping repo %s" % repo_name)
num_done = num_done + 1
if num_done % 100 == 0:
print("Finished %s repos. Pushing records." % num_done)
push_bq_records(client, dataset, table, records)
records.clear()
push_bq_records(client, dataset, table, records) # Last batch
| 41.590476 | 96 | 0.654912 | import argparse
from bigquery import get_client
from gh_api import curr_commit_master
from gh_api import repo
from util import create_bq_table, push_bq_records
from util import get_repo_names, curr_time_utc
from util import unique_vals
parser = argparse.ArgumentParser()
parser.add_argument('--proj', action = 'store', dest = 'proj', required = True,
help = 'BigQuery project name')
parser.add_argument('--json_key', action = 'store', dest = 'json_key', required = True,
help = 'JSON key file for BigQuery dataset')
parser.add_argument('--ds', action = 'store', dest = 'ds', required = True,
help = 'BigQuery dataset to write table to')
parser.add_argument('--table', action = 'store', dest = 'table', required = True,
help = 'BigQuery table to write to')
parser.add_argument('--sheet', action = 'store', dest = 'sheet', required = True,
help = 'Google Sheet with use_repo as a column')
parser.add_argument('--gh_user', action = 'store', dest = 'gh_username', required = True,
help = 'GitHub username for API')
parser.add_argument('--gh_oauth_key', action = 'store', dest = 'gh_oauth_key', required = True,
help = '(String) GitHub oauth key')
args = parser.parse_args()
proj = args.proj
json_key = args.json_key
dataset = args.ds
table = args.table
sheet = args.sheet
gh_username = args.gh_username
gh_oauth_key = args.gh_oauth_key
print("\nGetting repo names from spreadsheet")
repos = get_repo_names(sheet, json_key)
print("There are %s repos with use_repo = 1.\n" % len(repos))
print('\nGetting BigQuery client\n')
client = get_client(json_key_file=json_key, readonly=False, swallow_results=True)
existing_repos = unique_vals(client, proj, dataset, table, "repo_name")
if len(existing_repos) > 0:
repos = [repo for repo in repos if repo not in existing_repos]
print("Only getting data for %s repos not yet analyzed" %len(repos))
schema = [
{'name': 'repo_name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'api_url', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'html_url', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'description', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'is_fork', 'type': 'BOOLEAN', 'mode': 'NULLABLE'},
{'name': 'stargazers_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'watchers_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'forks_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'open_issues_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'subscribers_count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'curr_commit_master', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'time_accessed', 'type': 'STRING', 'mode': 'NULLABLE'}
]
if not client.check_table(dataset, table):
create_bq_table(client, dataset, table, schema)
def get_record(repo_name):
r = repo.Repo(repo_name, gh_username, gh_oauth_key)
curr_time = curr_time_utc()
curr_commit = curr_commit_master(repo_name, gh_username, gh_oauth_key)
return {'repo_name': r.get_repo_name(),
'api_url': r.get_gh_api_url(),
'html_url': r.get_html_url(),
'description': r.get_description(),
'is_fork': r.is_fork(),
'stargazers_count': r.get_stargazers_count(),
'watchers_count': r.get_watchers_count(),
'forks_count': r.get_forks_count(),
'open_issues_count': r.get_open_issues_count(),
'subscribers_count': r.get_subscribers_count(),
'curr_commit_master': curr_commit,
'time_accessed': curr_time}
print("Getting repo info from GitHub API")
records = []
num_done = 0
for repo_name in repos:
try:
records.append(get_record(repo_name))
except UnicodeEncodeError:
print("Skipping repo %s" % repo_name)
num_done = num_done + 1
if num_done % 100 == 0:
print("Finished %s repos. Pushing records." % num_done)
push_bq_records(client, dataset, table, records)
records.clear()
push_bq_records(client, dataset, table, records)
| true | true |
f71c529b8ce1931c25f8a957d2a88be00a3047b0 | 551 | py | Python | list/list10.py | liyuanyuan11/Python | d94cc7ab39e56c6e24bfc741a30da77590d1d220 | [
"MIT"
] | null | null | null | list/list10.py | liyuanyuan11/Python | d94cc7ab39e56c6e24bfc741a30da77590d1d220 | [
"MIT"
] | null | null | null | list/list10.py | liyuanyuan11/Python | d94cc7ab39e56c6e24bfc741a30da77590d1d220 | [
"MIT"
] | null | null | null | roadSign=[]
roadSign.append("Johnson's house")
roadSign.append("Fox streetlamp")
roadSign.append("Guang Hualu kindergarten")
roadSign.append("Dog rescue center")
roadSign.append("Samll street park")
roadSign.append("Ri Tan School")
print(roadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
print(roadSign) | 26.238095 | 43 | 0.811252 | roadSign=[]
roadSign.append("Johnson's house")
roadSign.append("Fox streetlamp")
roadSign.append("Guang Hualu kindergarten")
roadSign.append("Dog rescue center")
roadSign.append("Samll street park")
roadSign.append("Ri Tan School")
print(roadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
nextRoadSign=roadSign.pop()
print(nextRoadSign)
print(roadSign) | true | true |
f71c52fff7f80a35b0dd2c37b0c08e7e9f367495 | 2,489 | py | Python | src/pretalx/orga/management/commands/import_schedule.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | null | null | null | src/pretalx/orga/management/commands/import_schedule.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | null | null | null | src/pretalx/orga/management/commands/import_schedule.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | null | null | null | import datetime as dt
from xml.etree import ElementTree as ET
from django.core.management.base import BaseCommand
from django.db import transaction
from django_scopes import scopes_disabled
from pretalx.event.models import Event, Organiser, Team
from pretalx.person.models import User
class Command(BaseCommand):
help = "Imports a frab xml export"
def add_arguments(self, parser):
parser.add_argument("path", type=str)
@transaction.atomic
def handle(self, *args, **options):
from pretalx.schedule.utils import process_frab
path = options.get("path")
tree = ET.parse(path)
root = tree.getroot()
event_data = root.find("conference")
event = Event.objects.filter(
slug__iexact=event_data.find("acronym").text
).first()
with scopes_disabled():
if not event:
event = self.create_event(event_data)
team = event.organiser.teams.filter(
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
).first() or self.create_team(
str(event.name) + " Organisers", event.organiser
)
for user in User.objects.filter(is_administrator=True):
team.members.add(user)
team.save()
self.stdout.write(self.style.SUCCESS(process_frab(root, event)))
def create_event(self, event_data):
name = event_data.find("title").text
organiser = Organiser.objects.create(
name=name, slug=event_data.find("acronym").text
)
event = Event(
name=name,
organiser=organiser,
slug=event_data.find("acronym").text,
date_from=dt.datetime.strptime(
event_data.find("start").text, "%Y-%m-%d"
).date(),
date_to=dt.datetime.strptime(
event_data.find("end").text, "%Y-%m-%d"
).date(),
)
event.save()
self.create_team(name + " Organisers", organiser)
return event
def create_team(self, name, organiser):
return Team.objects.create(
name=name,
organiser=organiser,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
)
| 32.324675 | 72 | 0.599839 | import datetime as dt
from xml.etree import ElementTree as ET
from django.core.management.base import BaseCommand
from django.db import transaction
from django_scopes import scopes_disabled
from pretalx.event.models import Event, Organiser, Team
from pretalx.person.models import User
class Command(BaseCommand):
help = "Imports a frab xml export"
def add_arguments(self, parser):
parser.add_argument("path", type=str)
@transaction.atomic
def handle(self, *args, **options):
from pretalx.schedule.utils import process_frab
path = options.get("path")
tree = ET.parse(path)
root = tree.getroot()
event_data = root.find("conference")
event = Event.objects.filter(
slug__iexact=event_data.find("acronym").text
).first()
with scopes_disabled():
if not event:
event = self.create_event(event_data)
team = event.organiser.teams.filter(
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
).first() or self.create_team(
str(event.name) + " Organisers", event.organiser
)
for user in User.objects.filter(is_administrator=True):
team.members.add(user)
team.save()
self.stdout.write(self.style.SUCCESS(process_frab(root, event)))
def create_event(self, event_data):
name = event_data.find("title").text
organiser = Organiser.objects.create(
name=name, slug=event_data.find("acronym").text
)
event = Event(
name=name,
organiser=organiser,
slug=event_data.find("acronym").text,
date_from=dt.datetime.strptime(
event_data.find("start").text, "%Y-%m-%d"
).date(),
date_to=dt.datetime.strptime(
event_data.find("end").text, "%Y-%m-%d"
).date(),
)
event.save()
self.create_team(name + " Organisers", organiser)
return event
def create_team(self, name, organiser):
return Team.objects.create(
name=name,
organiser=organiser,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
)
| true | true |
f71c54f151171f4326ad7dc1ec48ae4b139e8083 | 1,412 | py | Python | python/com/nfs/select/selectMa20Stock.py | nfsli926/stock | aea312bbee2a48043800d45af8d7e14c99f566c5 | [
"Apache-2.0"
] | 1 | 2015-10-25T05:37:00.000Z | 2015-10-25T05:37:00.000Z | python/com/nfs/select/selectMa20Stock.py | nfsli926/stock | aea312bbee2a48043800d45af8d7e14c99f566c5 | [
"Apache-2.0"
] | null | null | null | python/com/nfs/select/selectMa20Stock.py | nfsli926/stock | aea312bbee2a48043800d45af8d7e14c99f566c5 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import pandas as pd
import MySQLdb
import python.com.nfs.util.MailUtil as mailutil
import sys
# ========== 从原始csv文件中导入股票数据,以浦发银行sh600000为例
# 导入数据 - 注意:这里请填写数据文件在您电脑中的路径
'''
1、循环股票代码
2、最新的一条数据20均线在最高与最低之间
2、取出最新的5条数据
3、最后一条的均线数据大于倒数第二条
4、写入数据库,发送 或者写入df,形成xls文件
5、发送
'''
try:
reload(sys)
sys.setdefaultencoding('utf-8')
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock',charset="utf8")
cursor = conn.cursor()
sql ="select * from stock_basic"
cursor.execute(sql)
ma20List = []
for row in cursor.fetchall():
stockno = str(row[0])
stockname = row[1]
print stockname
stock_data = pd.read_sql("select * from stock_day_data where code='"+stockno+"' order by code, date desc limit 5 ",conn)
if stock_data.iloc[2,10]>=stock_data.iloc[1,10] and stock_data.iloc[1,10]< stock_data.iloc[0,10]:
print "sssssssssss" +stockno
ma20List.append([stockno,stockname])
print ma20List
content = ""
for ma in ma20List:
print ma[0]+ma[1]
content = content +ma[0]+","+ma[1].encode('utf8')
#print content
#content = content+"</table></body></html>"
print content
mailutil.send_mail("20日均线数据", content, None)
cursor.close()
conn.close()
#stock_data.to_csv('selectma20stock.csv', index=False)
except Exception,e:
print e.message
| 26.641509 | 129 | 0.652975 |
import pandas as pd
import MySQLdb
import python.com.nfs.util.MailUtil as mailutil
import sys
'''
1、循环股票代码
2、最新的一条数据20均线在最高与最低之间
2、取出最新的5条数据
3、最后一条的均线数据大于倒数第二条
4、写入数据库,发送 或者写入df,形成xls文件
5、发送
'''
try:
reload(sys)
sys.setdefaultencoding('utf-8')
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock',charset="utf8")
cursor = conn.cursor()
sql ="select * from stock_basic"
cursor.execute(sql)
ma20List = []
for row in cursor.fetchall():
stockno = str(row[0])
stockname = row[1]
print stockname
stock_data = pd.read_sql("select * from stock_day_data where code='"+stockno+"' order by code, date desc limit 5 ",conn)
if stock_data.iloc[2,10]>=stock_data.iloc[1,10] and stock_data.iloc[1,10]< stock_data.iloc[0,10]:
print "sssssssssss" +stockno
ma20List.append([stockno,stockname])
print ma20List
content = ""
for ma in ma20List:
print ma[0]+ma[1]
content = content +ma[0]+","+ma[1].encode('utf8')
print content
mailutil.send_mail("20日均线数据", content, None)
cursor.close()
conn.close()
except Exception,e:
print e.message
| false | true |
f71c54fd629fe7b2eed4f7bb9d796bb0a3a708f2 | 17,077 | py | Python | ignite/contrib/engines/common.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | 1 | 2020-11-08T16:27:24.000Z | 2020-11-08T16:27:24.000Z | ignite/contrib/engines/common.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | null | null | null | ignite/contrib/engines/common.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | null | null | null | from functools import partial
import warnings
import numbers
from collections.abc import Sequence, Mapping
import torch
import torch.distributed as dist
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import TerminateOnNan, ModelCheckpoint, EarlyStopping
from ignite.contrib.metrics import GpuInfo
from ignite.contrib.handlers import ProgressBar
from ignite.contrib.handlers import VisdomLogger
from ignite.contrib.handlers import TensorboardLogger, global_step_from_engine
import ignite.contrib.handlers.tensorboard_logger as tb_logger_module
import ignite.contrib.handlers.visdom_logger as visdom_logger_module
from ignite.contrib.handlers import MLflowLogger
import ignite.contrib.handlers.mlflow_logger as mlflow_logger_module
from ignite.contrib.handlers import PolyaxonLogger
import ignite.contrib.handlers.polyaxon_logger as polyaxon_logger_module
def setup_common_training_handlers(
trainer,
train_sampler=None,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=False,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save (dict, optional): dictionary with objects to save in the checkpoint. This is used with
:class:`~ignite.handlers.ModelCheckpoint`.
save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path (str, optional): output path to indicate where `to_save` objects are stored.
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names (list/tuple): list of names associated with `update_function` output dictionary.
with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached
with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer.
log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar.
device (str of torch.device, optional): Optional device specification in case of distributed computation usage.
"""
kwargs = dict(
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
device=device,
)
if dist.is_available() and dist.is_initialized():
_setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **kwargs)
else:
if train_sampler is not None:
warnings.warn(
"Argument train_sampler distributed sampler used to call `set_epoch` method on epoch "
"started event, but no distributed setting detected",
UserWarning,
)
_setup_common_training_handlers(trainer, **kwargs)
setup_common_distrib_training_handlers = setup_common_training_handlers
def _setup_common_training_handlers(
trainer,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=True,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
if lr_scheduler is not None:
if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler):
trainer.add_event_handler(Events.ITERATION_COMPLETED, lambda engine: lr_scheduler.step())
else:
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
if to_save is not None:
if output_path is None:
raise ValueError("If to_save argument is provided then output_path argument should be also defined")
checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix="training")
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)
if with_gpu_stats:
GpuInfo().attach(trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters))
if output_names is not None:
def output_transform(x, index, name):
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise ValueError(
"Unhandled type of update_function's output. "
"It should either mapping or sequence, but given {}".format(type(x))
)
for i, n in enumerate(output_names):
RunningAverage(
output_transform=partial(output_transform, index=i, name=n), epoch_bound=False, device=device
).attach(trainer, n)
if with_pbars:
if with_pbar_on_iters:
ProgressBar(persist=False).attach(
trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)
)
ProgressBar(persist=True, bar_format="").attach(
trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED
)
def _setup_common_distrib_training_handlers(
trainer,
train_sampler=None,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=True,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError("Distributed setting is not initialized, please call `dist.init_process_group` before.")
_setup_common_training_handlers(
trainer,
to_save=None,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=(dist.get_rank() == 0) and with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
device=device,
)
if train_sampler is not None:
if not callable(getattr(train_sampler, "set_epoch", None)):
raise TypeError("Train sampler should have `set_epoch` method")
@trainer.on(Events.EPOCH_STARTED)
def distrib_set_epoch(engine):
train_sampler.set_epoch(engine.state.epoch - 1)
if dist.get_rank() == 0:
if to_save is not None:
if output_path is None:
raise ValueError("If to_save argument is provided then output_path argument should be also defined")
checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix="training")
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)
def empty_cuda_cache(_):
torch.cuda.empty_cache()
import gc
gc.collect()
def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters):
if optimizers is not None:
from torch.optim.optimizer import Optimizer
if not isinstance(optimizers, (Optimizer, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if evaluators is not None:
if not isinstance(evaluators, (Engine, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if log_every_iters is None:
log_every_iters = 1
logger.attach(
trainer,
log_handler=logger_module.OutputHandler(tag="training", metric_names="all"),
event_name=Events.ITERATION_COMPLETED(every=log_every_iters),
)
if optimizers is not None:
# Log optimizer parameters
if isinstance(optimizers, Optimizer):
optimizers = {None: optimizers}
for k, optimizer in optimizers.items():
logger.attach(
trainer,
log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name="lr", tag=k),
event_name=Events.ITERATION_STARTED(every=log_every_iters),
)
if evaluators is not None:
# Log evaluation metrics
if isinstance(evaluators, Engine):
evaluators = {"validation": evaluators}
for k, evaluator in evaluators.items():
gst = global_step_from_engine(trainer)
logger.attach(
evaluator,
log_handler=logger_module.OutputHandler(tag=k, metric_names="all", global_step_transform=gst),
event_name=Events.COMPLETED,
)
def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100):
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
output_path (str): logging directory path
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
Returns:
TensorboardLogger
"""
tb_logger = TensorboardLogger(log_dir=output_path)
setup_any_logging(tb_logger, tb_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters)
return tb_logger
def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
**kwargs: kwargs to pass into VisdomLogger
Returns:
VisdomLogger
"""
vis_logger = VisdomLogger(**kwargs)
setup_any_logging(
vis_logger, visdom_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return vis_logger
def setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
Returns:
MLflowLogger
"""
mlflow_logger = MLflowLogger()
setup_any_logging(
mlflow_logger, mlflow_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return mlflow_logger
def setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
Returns:
PolyaxonLogger
"""
plx_logger = PolyaxonLogger()
setup_any_logging(
plx_logger, polyaxon_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return plx_logger
def get_default_score_fn(metric_name):
def wrapper(engine):
score = engine.state.metrics[metric_name]
return score
return wrapper
def save_best_model_by_val_score(output_path, evaluator, model, metric_name, n_saved=3, trainer=None, tag="val"):
"""Method adds a handler to `evaluator` to save best models based on the score (named by `metric_name`)
provided by `evaluator`.
Args:
output_path (str): output path to indicate where to save best models
evaluator (Engine): evaluation engine used to provide the score
model (nn.Module): model to store
metric_name (str): metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
n_saved (int, optional): number of best models to store
trainer (Engine, optional): trainer engine to fetch the epoch when saving the best model.
tag (str, optional): score name prefix: `{tag}_{metric_name}`. By default, tag is "val".
"""
global_step_transform = None
if trainer is not None:
global_step_transform = global_step_from_engine(trainer)
best_model_handler = ModelCheckpoint(
dirname=output_path,
filename_prefix="best",
n_saved=n_saved,
global_step_transform=global_step_transform,
score_name="{}_{}".format(tag, metric_name.lower()),
score_function=get_default_score_fn(metric_name),
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {"model": model,})
def add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name):
"""Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`.
Args:
patience (int): number of events to wait if no improvement and then stop the training.
evaluator (Engine): evaluation engine used to provide the score
trainer (Engine): trainer engine to stop the run if no improvement.
metric_name (str): metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
"""
es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, es_handler)
| 42.061576 | 119 | 0.701997 | from functools import partial
import warnings
import numbers
from collections.abc import Sequence, Mapping
import torch
import torch.distributed as dist
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import TerminateOnNan, ModelCheckpoint, EarlyStopping
from ignite.contrib.metrics import GpuInfo
from ignite.contrib.handlers import ProgressBar
from ignite.contrib.handlers import VisdomLogger
from ignite.contrib.handlers import TensorboardLogger, global_step_from_engine
import ignite.contrib.handlers.tensorboard_logger as tb_logger_module
import ignite.contrib.handlers.visdom_logger as visdom_logger_module
from ignite.contrib.handlers import MLflowLogger
import ignite.contrib.handlers.mlflow_logger as mlflow_logger_module
from ignite.contrib.handlers import PolyaxonLogger
import ignite.contrib.handlers.polyaxon_logger as polyaxon_logger_module
def setup_common_training_handlers(
trainer,
train_sampler=None,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=False,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
kwargs = dict(
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
device=device,
)
if dist.is_available() and dist.is_initialized():
_setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **kwargs)
else:
if train_sampler is not None:
warnings.warn(
"Argument train_sampler distributed sampler used to call `set_epoch` method on epoch "
"started event, but no distributed setting detected",
UserWarning,
)
_setup_common_training_handlers(trainer, **kwargs)
setup_common_distrib_training_handlers = setup_common_training_handlers
def _setup_common_training_handlers(
trainer,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=True,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
if lr_scheduler is not None:
if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler):
trainer.add_event_handler(Events.ITERATION_COMPLETED, lambda engine: lr_scheduler.step())
else:
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
if to_save is not None:
if output_path is None:
raise ValueError("If to_save argument is provided then output_path argument should be also defined")
checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix="training")
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)
if with_gpu_stats:
GpuInfo().attach(trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters))
if output_names is not None:
def output_transform(x, index, name):
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise ValueError(
"Unhandled type of update_function's output. "
"It should either mapping or sequence, but given {}".format(type(x))
)
for i, n in enumerate(output_names):
RunningAverage(
output_transform=partial(output_transform, index=i, name=n), epoch_bound=False, device=device
).attach(trainer, n)
if with_pbars:
if with_pbar_on_iters:
ProgressBar(persist=False).attach(
trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)
)
ProgressBar(persist=True, bar_format="").attach(
trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED
)
def _setup_common_distrib_training_handlers(
trainer,
train_sampler=None,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=True,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError("Distributed setting is not initialized, please call `dist.init_process_group` before.")
_setup_common_training_handlers(
trainer,
to_save=None,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=(dist.get_rank() == 0) and with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
device=device,
)
if train_sampler is not None:
if not callable(getattr(train_sampler, "set_epoch", None)):
raise TypeError("Train sampler should have `set_epoch` method")
@trainer.on(Events.EPOCH_STARTED)
def distrib_set_epoch(engine):
train_sampler.set_epoch(engine.state.epoch - 1)
if dist.get_rank() == 0:
if to_save is not None:
if output_path is None:
raise ValueError("If to_save argument is provided then output_path argument should be also defined")
checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix="training")
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)
def empty_cuda_cache(_):
torch.cuda.empty_cache()
import gc
gc.collect()
def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters):
if optimizers is not None:
from torch.optim.optimizer import Optimizer
if not isinstance(optimizers, (Optimizer, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if evaluators is not None:
if not isinstance(evaluators, (Engine, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if log_every_iters is None:
log_every_iters = 1
logger.attach(
trainer,
log_handler=logger_module.OutputHandler(tag="training", metric_names="all"),
event_name=Events.ITERATION_COMPLETED(every=log_every_iters),
)
if optimizers is not None:
# Log optimizer parameters
if isinstance(optimizers, Optimizer):
optimizers = {None: optimizers}
for k, optimizer in optimizers.items():
logger.attach(
trainer,
log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name="lr", tag=k),
event_name=Events.ITERATION_STARTED(every=log_every_iters),
)
if evaluators is not None:
# Log evaluation metrics
if isinstance(evaluators, Engine):
evaluators = {"validation": evaluators}
for k, evaluator in evaluators.items():
gst = global_step_from_engine(trainer)
logger.attach(
evaluator,
log_handler=logger_module.OutputHandler(tag=k, metric_names="all", global_step_transform=gst),
event_name=Events.COMPLETED,
)
def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100):
tb_logger = TensorboardLogger(log_dir=output_path)
setup_any_logging(tb_logger, tb_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters)
return tb_logger
def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
vis_logger = VisdomLogger(**kwargs)
setup_any_logging(
vis_logger, visdom_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return vis_logger
def setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):
mlflow_logger = MLflowLogger()
setup_any_logging(
mlflow_logger, mlflow_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return mlflow_logger
def setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):
plx_logger = PolyaxonLogger()
setup_any_logging(
plx_logger, polyaxon_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return plx_logger
def get_default_score_fn(metric_name):
def wrapper(engine):
score = engine.state.metrics[metric_name]
return score
return wrapper
def save_best_model_by_val_score(output_path, evaluator, model, metric_name, n_saved=3, trainer=None, tag="val"):
global_step_transform = None
if trainer is not None:
global_step_transform = global_step_from_engine(trainer)
best_model_handler = ModelCheckpoint(
dirname=output_path,
filename_prefix="best",
n_saved=n_saved,
global_step_transform=global_step_transform,
score_name="{}_{}".format(tag, metric_name.lower()),
score_function=get_default_score_fn(metric_name),
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {"model": model,})
def add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name):
es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, es_handler)
| true | true |
f71c550e53e9a2417f70c36c35d7b793ff32d391 | 30,900 | py | Python | mindspore/python/mindspore/common/parameter.py | zhz44/mindspore | 6044d34074c8505dd4b02c0a05419cbc32a43f86 | [
"Apache-2.0"
] | null | null | null | mindspore/python/mindspore/common/parameter.py | zhz44/mindspore | 6044d34074c8505dd4b02c0a05419cbc32a43f86 | [
"Apache-2.0"
] | null | null | null | mindspore/python/mindspore/common/parameter.py | zhz44/mindspore | 6044d34074c8505dd4b02c0a05419cbc32a43f86 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parameter for cell."""
from copy import copy
import numbers
import numpy as np
from mindspore import log as logger
from .._c_expression import ParamInfo
from . import dtype as mstype
from .. import context
from ..parallel._utils import _get_parallel_mode
from .initializer import initializer
from .tensor import Tensor
from .._checkparam import Validator
from .._c_expression import Tensor as Tensor_
from ..parallel._tensor import _get_slice_index
from ..parallel._auto_parallel_context import auto_parallel_context
from ..parallel._ps_context import _is_role_worker, _is_role_pserver, _is_role_sched, _clone_hash_table, _is_fl_mode
from ..parallel._ps_context import _reinsert_hash_table_size
from ..parallel._ps_context import _insert_weight_init_info, _insert_accumu_init_info
from .seed import _get_global_and_op_seed
__all__ = ['Parameter', 'ParameterTuple']
PARAMETER_NAME_DEFAULT = "Parameter"
PARAMETER_NAME_PREFIX_MAX_LEN = 1024
def _is_in_parallel_mode():
"""Get parallel mode."""
return auto_parallel_context().get_parallel_mode() in ["semi_auto_parallel", "auto_parallel"]
def init_to_value(init):
"""
Get value of initializer.
Returns:
Value of the initializer.
Raises:
ValueError: The value of the argument 'init' is not correct.
"""
if isinstance(init, str):
if init == 'zeros':
return 0.0
if init == 'ones':
return 1.0
raise ValueError("The argument 'init' should be one of values in ['zeros', 'ones'].")
if isinstance(init, numbers.Number):
return float(init)
raise ValueError("The argument 'init' should be number or string, but got {}.".format(type(init)))
class Parameter(Tensor_):
"""
`Parameter` is a `Tensor` subclass, when they are assigned as Cell attributes they are automatically added to
the list of its parameters, and will appear e.g. in `cell.get_parameters()` iterator.
Note:
In auto_parallel mode of "semi_auto_parallel" and "auto_parallel", if init `Parameter` by
a `Tensor`, the type of Parameter will be `Tensor`. `Tensor`
will save the shape and type info of a tensor with no memory usage. The shape can be changed while
compiling for auto-parallel. Call `init_data` will return a Tensor Parameter with initialized data.
If there is an operator in the network that requires part of the inputs to be Parameter,
then the Parameters as this part of the inputs are not allowed to be cast.
Give each `Parameter` a unique name to facilitate subsequent operations and updates.
If there are two or more `Parameter` objects with the same name in a network,
will be prompted to set a unique name when defining.
Args:
default_input (Union[Tensor, int, float, numpy.ndarray, list]): Parameter data,
to initialize the parameter data.
name (str): Name of the parameter. Default: None.
1) If the parameter is not given a name, the default name is its variable name. For example, the name of
param_a below is name_a, and the name of param_b is the variable name param_b.
.. code-block::
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
self.param_b = Parameter(Tensor([2], ms.float32))
2) If parameter in list or tuple is not given a name, will give it a unique name. For example, the names of
parameters below are **Parameter$1** and **Parameter$2**.
.. code-block::
self.param_list = [Parameter(Tensor([3], ms.float32)),
Parameter(Tensor([4], ms.float32))]
3) If the parameter is given a name, and the same name exists between different parameters, an exception
will be thrown. For example, "its name 'name_a' already exists." will be thrown.
.. code-block::
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
self.param_tuple = (Parameter(Tensor([5], ms.float32), name="name_a"),
Parameter(Tensor([6], ms.float32)))
4) If a parameter appear multiple times in list or tuple, check the name of the object only once. For
example, the following example will not throw an exception.
.. code-block::
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
self.param_tuple = (self.param_a, self.param_a)
requires_grad (bool): True if the parameter requires gradient. Default: True.
layerwise_parallel (bool): When layerwise_parallel is true in data/hybrid parallel mode,
broadcast and gradients communication would not be applied to parameters. Default: False.
parallel_optimizer (bool): It is used to filter the weight shard operation in semi auto or auto parallel
mode. It works only when enable parallel optimizer in `mindspore.context.set_auto_parallel_context()`.
Default: True.
Examples:
>>> import numpy as np
>>> from mindspore import Parameter, Tensor
>>> import mindspore.ops as ops
>>> import mindspore.nn as nn
>>> import mindspore
>>>
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.matmul = ops.MatMul()
... self.weight = Parameter(Tensor(np.ones((1, 2)), mindspore.float32), name="w", requires_grad=True)
...
... def construct(self, x):
... out = self.matmul(self.weight, x)
... return out
>>> net = Net()
>>> x = Tensor(np.ones((2, 1)), mindspore.float32)
>>> print(net(x))
[[2.]]
>>> net.weight.set_data(Tensor(np.zeros((1, 2)), mindspore.float32))
>>> print(net(x))
[[0.]]
"""
__base_type__ = {}
def __new__(cls, default_input, *args, **kwargs):
init_data_flag = bool(isinstance(default_input, Tensor) and default_input.has_init)
input_class, *class_init_args = Parameter._get_parameter_new_args(default_input)
new_type = Parameter._get_base_class(input_class)
obj = input_class.__new__(new_type)
input_class.__init__(obj, *class_init_args)
# it's better to make the Initializer a kind of tensor.
obj.init_mode = None
obj.is_default_input_init = init_data_flag
if obj.has_init:
obj.init_mode = default_input
return obj
def __reduce_ex__(self, _):
data = self
if self.init_mode is not None:
data = self.init_mode
else:
# cast to break deep infinite loop while deepcopy
data = Tensor(self)
return (
Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True):
self.param_info = ParamInfo()
self.init_in_server = False
self.cache_enable = False
self.name = name
self.requires_grad = requires_grad
self.layerwise_parallel = layerwise_parallel
self.parallel_optimizer = parallel_optimizer
# this flag for tensor copy data.
self.init_flag = False
# this flag is for ge variable copy data.
self.is_init = False
self._inited_param = None
self._sliced = False
self.is_param_ps = False
self.push_weight_to_server = False
self.pull_weight_from_server = False
self.requires_aggr = True
self._cast_type = None
self._unique = False
self.is_in_parallel = _is_in_parallel_mode()
self._pipeline_stage_list = []
if isinstance(default_input, (Tensor_, Tensor)):
Tensor_.__init__(self, default_input.dtype, default_input.shape)
elif isinstance(default_input, int):
Tensor_.__init__(self, mstype.int64, ())
elif isinstance(default_input, float):
Tensor_.__init__(self, mstype.float32, ())
elif isinstance(default_input, (np.ndarray, list)):
Tensor_.__init__(self, default_input)
else:
raise TypeError(f"The type of the argument 'default_input' must be in ['Tensor', 'int', 'float',"
f" 'numpy.ndarray', 'list']. But got type {type(default_input)}.")
self.param_info.parameter_shape = self.shape
def __deepcopy__(self, memodict):
new_obj = Parameter(self)
new_obj.name = self.name
new_obj._inited_param = self._inited_param
return new_obj
@staticmethod
def _get_base_class(input_class):
input_class_name = Parameter.__name__
if input_class_name in Parameter.__base_type__:
new_type = Parameter.__base_type__[input_class_name]
else:
new_type = type(input_class_name, (Parameter, input_class), {})
Parameter.__base_type__[input_class_name] = new_type
return new_type
@staticmethod
def _get_parameter_new_args(data):
"""Set `set_data` of current `Parameter`."""
if isinstance(data, bool):
raise ValueError('Parameter data can not be `bool`')
if isinstance(data, Tensor) and data.has_init:
if not _is_fl_mode():
if _is_in_parallel_mode() or _is_role_worker() or _is_role_sched() or _is_role_pserver():
# do not init data while in auto parallel.
return (Tensor, None, data.dtype, data.shape, data.init)
data = data.init_data().asnumpy()
elif isinstance(data, Tensor):
# make a copy of Tensor to init the parameter
return (Tensor, data.asnumpy(),)
if isinstance(data, int):
return (Tensor, data, mstype.int32)
if isinstance(data, float):
return (Tensor, data, mstype.float32)
return (Tensor, data)
def __str__(self):
return f'Parameter (name={self.name}, shape={self.shape}, dtype={self.dtype}, ' \
f'requires_grad={self.requires_grad})'
def __repr__(self):
return self.__str__()
def __parameter__(self):
"""For parse check."""
def set_param_ps(self, init_in_server=False):
"""
Set whether the trainable parameter is updated by parameter server and whether the
trainable parameter is initialized on server.
Note:
It only works when a running task is in the parameter server mode.
Args:
init_in_server (bool): Whether trainable parameter updated by parameter server is
initialized on server. Default: False.
"""
if not(_is_role_worker() or _is_role_pserver() or _is_role_sched()):
raise RuntimeError("Must complete following two steps before calling set_param_ps: \n"
"1. context.set_ps_context(enable_ps=True) \n"
"2. export MS_ROLE environment variable \n"
"Please refer to the official website for detailed usage.")
self.is_param_ps = True
self.init_in_server = init_in_server
self.param_info.init_in_server = init_in_server
def set_param_fl(self, push_to_server=False, pull_from_server=False, requires_aggr=True):
"""
Set the way of parameter and server interaction.
Args:
push_to_server (bool): Whether the parameter should be pushed to server. Default: False.
pull_from_server (bool): Whether the parameter should be pulled from server. Default: False.
requires_aggr (bool): Whether the parameter should be aggregated in the server. Default: True.
"""
if push_to_server:
self.push_weight_to_server = True
if pull_from_server:
self.pull_weight_from_server = True
if not requires_aggr:
self.requires_aggr = False
self.param_info.requires_aggr = False
@property
def inited_param(self):
"""
Get the new parameter after call the init_data.
Default is a None, If `self` is a Parameter without data, after call the
`init_data` the initialized Parameter with data will be recorded here.
"""
return self._inited_param
@property
def name(self):
"""Get the name of the parameter."""
return self.param_info.name
@name.setter
def name(self, name_):
"""
Define a name for the parameter.
Args:
name_ (`str` or `None`): The name of the parameter. When the parameter is None or an empty string,
the default value `PARAMETER_NAME_DEFAULT` is used.
"""
if name_ is None:
name_ = PARAMETER_NAME_DEFAULT
elif isinstance(name_, str):
name_ = name_.strip()
if name_ == '':
name_ = PARAMETER_NAME_DEFAULT
if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:
raise ValueError("The length of the '{}' name should be less than {}.".
format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
else:
raise ValueError("The type of the Parameter's name should be 'string' or 'None', "
"but got {}.".format(type(name_)))
if _is_role_worker() and self.cache_enable:
if len(self.shape) != 2:
raise RuntimeError("The dims of parameter '{}' must be 2, but got {}."
.format(self.name, len(self.shape)))
_reinsert_hash_table_size(name_, self.param_info.name, self.shape[0], self.shape[1])
self.param_info.name = name_
@property
def sliced(self):
"""Get slice status of the parameter."""
return self._sliced
@sliced.setter
def sliced(self, sliced_):
self._sliced = sliced_
@property
def comm_fusion(self):
"""
Get the fusion type (int) for communication operators corresponding to this parameter.
In `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode, some communication operators used for parameters or
gradients aggregation are inserted automatically. The value of fusion must be greater than or equal to 0.
When the value of fusion is 0, operators will not be fused together.
"""
return self.param_info.comm_fusion
@comm_fusion.setter
def comm_fusion(self, comm_fusion_):
if context.get_context("mode") == context.PYNATIVE_MODE and "auto_parallel" in _get_parallel_mode():
raise RuntimeError(
"`comm_fusion` does not support PYNATIVE_MODE in AUTO_PARALLEL and SEMI_AUTO_PARALLEL mode.")
Validator.check_non_negative_int(comm_fusion_)
self.param_info.comm_fusion = comm_fusion_
@property
def parallel_optimizer_comm_recompute(self):
"""
Get the communication recompute status(bool) of optimizer parallel for the parameter.
In `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode, when applying parallel optimizer, some AllGather operators
used for parameters gathering are inserted automatically. It is used to control the recompute attr for those
AllGather operators.
Note:
- Only `Graph` mode is supported.
- It is recommended to use cell.recompute(parallel_optimizer_comm_recompute=True/False) to configure
the AllGather operators introducing by parallel optimizer rather than using this interface directly.
"""
return self.param_info.parallel_optimizer_comm_recompute
@parallel_optimizer_comm_recompute.setter
def parallel_optimizer_comm_recompute(self, parallel_optimizer_comm_recompute_):
Validator.check_bool(parallel_optimizer_comm_recompute_)
self.param_info.parallel_optimizer_comm_recompute = parallel_optimizer_comm_recompute_
@property
def unique(self):
"""Whether the parameter is already unique or not."""
return self._unique
@unique.setter
def unique(self, unique_):
self._unique = unique_
def clone(self, init='same'):
"""
Clone the parameter.
Args:
init (Union[Tensor, str, numbers.Number]): Initialize the shape and dtype of the parameter.
If `init` is a `Tensor` or `numbers.Number`, clone a new parameter with the same shape
and dtype, and the data of the new parameter will be set according to `init`. If `init`
is a `str`, the `init` should be the alias of the class inheriting from `Initializer`.
For example, if `init` is 'same', clone a new parameter with the same data, shape, and
dtype. Default: 'same'.
Returns:
Parameter, a new parameter.
"""
x = copy(self)
x.param_info = self.param_info.clone()
x.is_init = False
x.init = self.init
x.is_param_ps = self.is_param_ps
x.init_in_server = self.init_in_server
x.cache_enable = self.cache_enable
x.requires_aggr = self.requires_aggr
if self.cache_shape:
x.cache_shape = self.cache_shape
if init != 'same':
shape = self.shape
dtype = self.dtype
x.set_data(initializer(init, shape=shape, dtype=dtype))
return x
@property
def layerwise_parallel(self):
"""
Get the layerwise parallel status(bool) of the parameter.
When layerwise_parallel is true in `DATA_PARALLEL` and `HYBRID_PARALLEL` parallel mode, broadcast and gradients
communication would not be applied to parameters.
"""
return self.param_info.layerwise_parallel
@layerwise_parallel.setter
def layerwise_parallel(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `layerwise_parallel` must be bool type.")
self.param_info.layerwise_parallel = value
@property
def parallel_optimizer(self):
"""
Get the optimizer parallel status(bool) of the parameter.
It is used to filter the weight shard operation in `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode. It works only
when enable parallel optimizer in `mindspore.context.set_auto_parallel_context()`.
"""
return self.param_info.parallel_optimizer
@parallel_optimizer.setter
def parallel_optimizer(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `parallel_optimizer` must be bool type.")
self.param_info.parallel_optimizer = value
@property
def cache_enable(self):
"""Return whether the parameter is cache enable."""
return self.param_info.cache_enable
@cache_enable.setter
def cache_enable(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `cache_enable` must be bool type.")
self.param_info.cache_enable = value
@property
def cache_shape(self):
"""Return the cache shape corresponding to the parameter if use cache."""
return self.param_info.cache_shape
@cache_shape.setter
def cache_shape(self, value):
if not isinstance(value, (tuple, list)):
raise TypeError("The argument `cache_shape` must be tuple or list type.")
self.param_info.cache_shape = value
@property
def requires_grad(self):
"""
Return whether the parameter requires gradient.
The main function of requires_grad is to tell auto grad to start recording operations on a Tensor.
If a Tensor has requires_grad=False, then Tensor requires_grad will make auto grad start recording
operations on the tensor.
"""
return self.param_info.requires_grad
@requires_grad.setter
def requires_grad(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `requires_grad` must be bool type")
self.param_info.requires_grad = value
@property
def data(self):
"""Return the parameter object."""
return self
def _update_tensor_data(self, data):
"""Update the parameter by a Tensor."""
if isinstance(self, Tensor):
self.init_flag = False
self.init = None
return self.assign_value(data)
new_param = Parameter(data, self.name, self.requires_grad)
new_param.param_info = self.param_info
return new_param
def add_pipeline_stage(self, stage):
logger.warning(f"This interface may be deleted in the future.")
if not isinstance(stage, int) or stage < 0:
raise TypeError("`stage` must be a positive number of int type")
self._pipeline_stage_list.append(stage)
def _raise_type_error(self, incoming):
raise TypeError(f"Incoming Parameter dtype can not be converted to current dtype implicitly. "
f"Current dtype is {self.dtype}, and incoming is {incoming}. "
f"Use .set_dtype(xxx) to change the dtype.")
@staticmethod
def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init,
incoming_tensor_is_init, slice_shape=False):
if incoming_tensor_is_init and not current_tensor_is_init:
raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
"Please initialize 'data' before call this method.")
if tuple(current_shape) != tuple(data_shape):
# If Slice create Parameter shape can be change.
if not slice_shape:
raise ValueError(f"Can not change the shape of Parameter which has been initialized."
f" Current shape is {current_shape}, and incoming is {data_shape}.")
def set_data(self, data, slice_shape=False):
"""
Set Parameter's data.
Args:
data (Union[Tensor, int, float]): New data.
slice_shape (bool): If slice the parameter is set to true, the shape is not checked for consistency.
Default: False.
Returns:
Parameter, the parameter after set data.
"""
if not isinstance(data, (Tensor, int, float)):
raise TypeError(f"Parameter data must be [`Tensor`, `int`, `float`] or a kind of `Tensor` "
f"(like `Tensor`). But with type {type(data)}.")
if isinstance(data, (int, float)):
if self.dtype in mstype.int_type and isinstance(data, float):
self._raise_type_error(mstype.float_)
data = Tensor(data, self.dtype)
# both not init.
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
slice_shape)
if self.dtype != data.dtype:
if mstype.implicit_conversion_seq[self.dtype] < mstype.implicit_conversion_seq[data.dtype]:
self._raise_type_error(data.dtype)
else:
from mindspore.ops import functional as F
if isinstance(data, Tensor) and data.init is not None:
data.init_data()
data = F.cast(data, self.dtype)
if isinstance(data, Tensor) and data.has_init:
# The parameter has been initialized, directly update by the data
if current_tensor_is_init:
self._update_tensor_data(data.init_data())
else:
# also update the related inited parameter data
if self.inited_param is not None:
self.inited_param.set_data(data)
self.init_mode = data
elif incoming_tensor_is_init or current_tensor_is_init:
self._update_tensor_data(data)
self.sliced = slice_shape
return self
def _get_init_data_args(self, layout=None):
init_data_args = ()
if layout:
if not isinstance(layout, tuple):
raise TypeError("The argument 'layout' should be tuple, but got {}.".format(type(layout)))
if len(layout) < 6:
raise ValueError("The length of 'layout' must be larger than 5, but got {}.".format(len(layout)))
slice_index = int(_get_slice_index(layout[0], layout[1]))
init_data_args += (slice_index, layout[2], layout[5])
return init_data_args
def init_data(self, layout=None, set_sliced=False):
"""
Initialize the parameter's data.
Args:
layout (Union[None, tuple]): The parameter's layout info.
layout [dev_mat, tensor_map, slice_shape, filed_size, uniform_split, opt_shard_group]. Default: None.
It's not None only in 'SEMI_AUTO_PARALLEL' or 'AUTO_PARALLEL' mode.
- dev_mat (list(int)): The parameter's device matrix.
- tensor_map (list(int)): The parameter's tensor map.
- slice_shape (list(int)): The parameter's slice shape.
- filed_size (int): The parameter's filed size.
- uniform_split (bool): Whether the parameter is split evenly.
- opt_shard_group (str): The group of the parameter while running optimizer parallel.
set_sliced (bool): True if the parameter is set sliced after initializing the data.
Default: False.
Raises:
RuntimeError: If it is from Initializer, and parallel mode has changed after the Initializer created.
ValueError: If the length of the layout is less than 6.
TypeError: If `layout` is not tuple.
Returns:
Parameter, the `Parameter` after initializing data. If current `Parameter` was already initialized before,
returns the same initialized `Parameter`.
"""
if self.is_default_input_init and self.is_in_parallel != _is_in_parallel_mode():
raise RuntimeError("Must set or change parallel mode before any Tensor created.")
if self.init_mode is None:
return self
if self.inited_param is not None:
return self.inited_param
if _is_role_worker() and self.cache_enable:
global_seed, op_seed = _get_global_and_op_seed()
_insert_weight_init_info(self.name, global_seed, op_seed)
init_data_args = self._get_init_data_args(layout)
if _is_role_pserver():
return self
if self.init_in_server and self.is_param_ps and isinstance(self.init_mode, Tensor) and \
self.init_mode.init is not None and (_is_role_worker() or _is_role_sched()):
if self.cache_enable:
data = self.init_mode.init_data(*init_data_args)
else:
data = self.init_mode.init_data(0, [1])
else:
data = self.init_mode.init_data(*init_data_args)
obj = self._update_tensor_data(data)
if id(obj) != id(self):
self._inited_param = obj
obj.init_mode = None
obj.sliced = set_sliced
return obj
class ParameterTuple(tuple):
"""
Inherited from tuple, ParameterTuple is used to save multiple parameter.
Note:
It is used to store the parameters of the network into the parameter tuple collection.
"""
def __new__(cls, iterable):
"""Create instance object of ParameterTuple."""
data = tuple(iterable)
ids = set()
names = set()
for x in data:
if not isinstance(x, Parameter):
raise TypeError(f"ParameterTuple input should be `Parameter` collection."
f"But got a {type(iterable)}, {iterable}")
if id(x) not in ids:
if x.name in names:
raise ValueError("The value {} , its name '{}' already exists. "
"Please set a unique name for the parameter.".format(x, x.name))
names.add(x.name)
ids.add(id(x))
return tuple.__new__(ParameterTuple, tuple(data))
def clone(self, prefix, init='same'):
"""
Clone the parameters in ParameterTuple element-wisely to generate a new ParameterTuple.
Args:
prefix (str): Namespace of parameter, the prefix string will be added to the names of parameters
in parametertuple.
init (Union[Tensor, str, numbers.Number]): Clone the shape and dtype of Parameters in ParameterTuple and
set data according to `init`. Default: 'same'.
If `init` is a `Tensor` , set the new Parameter data to the input Tensor.
If `init` is `numbers.Number` , set the new Parameter data to the input number.
If `init` is a `str`, data will be seted according to the initialization method of the same name in
the `Initializer`.
If `init` is 'same', the new Parameter has the same value with the original Parameter.
Returns:
Tuple, the new Parameter tuple.
"""
Validator.check_str_by_regular(prefix)
new = []
for x in self:
x1 = x.clone(init)
x1.name = prefix + "." + x1.name
new.append(x1)
if not x1.cache_enable:
continue
if _is_role_worker():
_clone_hash_table(x.name, x1.name)
_insert_accumu_init_info(x1.name, init_to_value(init))
return ParameterTuple(new)
def __parameter_tuple__(self):
"""For parse check."""
| 43.036212 | 120 | 0.631489 |
from copy import copy
import numbers
import numpy as np
from mindspore import log as logger
from .._c_expression import ParamInfo
from . import dtype as mstype
from .. import context
from ..parallel._utils import _get_parallel_mode
from .initializer import initializer
from .tensor import Tensor
from .._checkparam import Validator
from .._c_expression import Tensor as Tensor_
from ..parallel._tensor import _get_slice_index
from ..parallel._auto_parallel_context import auto_parallel_context
from ..parallel._ps_context import _is_role_worker, _is_role_pserver, _is_role_sched, _clone_hash_table, _is_fl_mode
from ..parallel._ps_context import _reinsert_hash_table_size
from ..parallel._ps_context import _insert_weight_init_info, _insert_accumu_init_info
from .seed import _get_global_and_op_seed
__all__ = ['Parameter', 'ParameterTuple']
PARAMETER_NAME_DEFAULT = "Parameter"
PARAMETER_NAME_PREFIX_MAX_LEN = 1024
def _is_in_parallel_mode():
return auto_parallel_context().get_parallel_mode() in ["semi_auto_parallel", "auto_parallel"]
def init_to_value(init):
if isinstance(init, str):
if init == 'zeros':
return 0.0
if init == 'ones':
return 1.0
raise ValueError("The argument 'init' should be one of values in ['zeros', 'ones'].")
if isinstance(init, numbers.Number):
return float(init)
raise ValueError("The argument 'init' should be number or string, but got {}.".format(type(init)))
class Parameter(Tensor_):
__base_type__ = {}
def __new__(cls, default_input, *args, **kwargs):
init_data_flag = bool(isinstance(default_input, Tensor) and default_input.has_init)
input_class, *class_init_args = Parameter._get_parameter_new_args(default_input)
new_type = Parameter._get_base_class(input_class)
obj = input_class.__new__(new_type)
input_class.__init__(obj, *class_init_args)
obj.init_mode = None
obj.is_default_input_init = init_data_flag
if obj.has_init:
obj.init_mode = default_input
return obj
def __reduce_ex__(self, _):
data = self
if self.init_mode is not None:
data = self.init_mode
else:
# cast to break deep infinite loop while deepcopy
data = Tensor(self)
return (
Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True):
self.param_info = ParamInfo()
self.init_in_server = False
self.cache_enable = False
self.name = name
self.requires_grad = requires_grad
self.layerwise_parallel = layerwise_parallel
self.parallel_optimizer = parallel_optimizer
# this flag for tensor copy data.
self.init_flag = False
# this flag is for ge variable copy data.
self.is_init = False
self._inited_param = None
self._sliced = False
self.is_param_ps = False
self.push_weight_to_server = False
self.pull_weight_from_server = False
self.requires_aggr = True
self._cast_type = None
self._unique = False
self.is_in_parallel = _is_in_parallel_mode()
self._pipeline_stage_list = []
if isinstance(default_input, (Tensor_, Tensor)):
Tensor_.__init__(self, default_input.dtype, default_input.shape)
elif isinstance(default_input, int):
Tensor_.__init__(self, mstype.int64, ())
elif isinstance(default_input, float):
Tensor_.__init__(self, mstype.float32, ())
elif isinstance(default_input, (np.ndarray, list)):
Tensor_.__init__(self, default_input)
else:
raise TypeError(f"The type of the argument 'default_input' must be in ['Tensor', 'int', 'float',"
f" 'numpy.ndarray', 'list']. But got type {type(default_input)}.")
self.param_info.parameter_shape = self.shape
def __deepcopy__(self, memodict):
new_obj = Parameter(self)
new_obj.name = self.name
new_obj._inited_param = self._inited_param
return new_obj
@staticmethod
def _get_base_class(input_class):
input_class_name = Parameter.__name__
if input_class_name in Parameter.__base_type__:
new_type = Parameter.__base_type__[input_class_name]
else:
new_type = type(input_class_name, (Parameter, input_class), {})
Parameter.__base_type__[input_class_name] = new_type
return new_type
@staticmethod
def _get_parameter_new_args(data):
if isinstance(data, bool):
raise ValueError('Parameter data can not be `bool`')
if isinstance(data, Tensor) and data.has_init:
if not _is_fl_mode():
if _is_in_parallel_mode() or _is_role_worker() or _is_role_sched() or _is_role_pserver():
# do not init data while in auto parallel.
return (Tensor, None, data.dtype, data.shape, data.init)
data = data.init_data().asnumpy()
elif isinstance(data, Tensor):
# make a copy of Tensor to init the parameter
return (Tensor, data.asnumpy(),)
if isinstance(data, int):
return (Tensor, data, mstype.int32)
if isinstance(data, float):
return (Tensor, data, mstype.float32)
return (Tensor, data)
def __str__(self):
return f'Parameter (name={self.name}, shape={self.shape}, dtype={self.dtype}, ' \
f'requires_grad={self.requires_grad})'
def __repr__(self):
return self.__str__()
def __parameter__(self):
def set_param_ps(self, init_in_server=False):
if not(_is_role_worker() or _is_role_pserver() or _is_role_sched()):
raise RuntimeError("Must complete following two steps before calling set_param_ps: \n"
"1. context.set_ps_context(enable_ps=True) \n"
"2. export MS_ROLE environment variable \n"
"Please refer to the official website for detailed usage.")
self.is_param_ps = True
self.init_in_server = init_in_server
self.param_info.init_in_server = init_in_server
def set_param_fl(self, push_to_server=False, pull_from_server=False, requires_aggr=True):
if push_to_server:
self.push_weight_to_server = True
if pull_from_server:
self.pull_weight_from_server = True
if not requires_aggr:
self.requires_aggr = False
self.param_info.requires_aggr = False
@property
def inited_param(self):
return self._inited_param
@property
def name(self):
return self.param_info.name
@name.setter
def name(self, name_):
if name_ is None:
name_ = PARAMETER_NAME_DEFAULT
elif isinstance(name_, str):
name_ = name_.strip()
if name_ == '':
name_ = PARAMETER_NAME_DEFAULT
if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:
raise ValueError("The length of the '{}' name should be less than {}.".
format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
else:
raise ValueError("The type of the Parameter's name should be 'string' or 'None', "
"but got {}.".format(type(name_)))
if _is_role_worker() and self.cache_enable:
if len(self.shape) != 2:
raise RuntimeError("The dims of parameter '{}' must be 2, but got {}."
.format(self.name, len(self.shape)))
_reinsert_hash_table_size(name_, self.param_info.name, self.shape[0], self.shape[1])
self.param_info.name = name_
@property
def sliced(self):
return self._sliced
@sliced.setter
def sliced(self, sliced_):
self._sliced = sliced_
@property
def comm_fusion(self):
return self.param_info.comm_fusion
@comm_fusion.setter
def comm_fusion(self, comm_fusion_):
if context.get_context("mode") == context.PYNATIVE_MODE and "auto_parallel" in _get_parallel_mode():
raise RuntimeError(
"`comm_fusion` does not support PYNATIVE_MODE in AUTO_PARALLEL and SEMI_AUTO_PARALLEL mode.")
Validator.check_non_negative_int(comm_fusion_)
self.param_info.comm_fusion = comm_fusion_
@property
def parallel_optimizer_comm_recompute(self):
return self.param_info.parallel_optimizer_comm_recompute
@parallel_optimizer_comm_recompute.setter
def parallel_optimizer_comm_recompute(self, parallel_optimizer_comm_recompute_):
Validator.check_bool(parallel_optimizer_comm_recompute_)
self.param_info.parallel_optimizer_comm_recompute = parallel_optimizer_comm_recompute_
@property
def unique(self):
return self._unique
@unique.setter
def unique(self, unique_):
self._unique = unique_
def clone(self, init='same'):
x = copy(self)
x.param_info = self.param_info.clone()
x.is_init = False
x.init = self.init
x.is_param_ps = self.is_param_ps
x.init_in_server = self.init_in_server
x.cache_enable = self.cache_enable
x.requires_aggr = self.requires_aggr
if self.cache_shape:
x.cache_shape = self.cache_shape
if init != 'same':
shape = self.shape
dtype = self.dtype
x.set_data(initializer(init, shape=shape, dtype=dtype))
return x
@property
def layerwise_parallel(self):
return self.param_info.layerwise_parallel
@layerwise_parallel.setter
def layerwise_parallel(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `layerwise_parallel` must be bool type.")
self.param_info.layerwise_parallel = value
@property
def parallel_optimizer(self):
return self.param_info.parallel_optimizer
@parallel_optimizer.setter
def parallel_optimizer(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `parallel_optimizer` must be bool type.")
self.param_info.parallel_optimizer = value
@property
def cache_enable(self):
return self.param_info.cache_enable
@cache_enable.setter
def cache_enable(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `cache_enable` must be bool type.")
self.param_info.cache_enable = value
@property
def cache_shape(self):
return self.param_info.cache_shape
@cache_shape.setter
def cache_shape(self, value):
if not isinstance(value, (tuple, list)):
raise TypeError("The argument `cache_shape` must be tuple or list type.")
self.param_info.cache_shape = value
@property
def requires_grad(self):
return self.param_info.requires_grad
@requires_grad.setter
def requires_grad(self, value=True):
if not isinstance(value, bool):
raise TypeError("The argument `requires_grad` must be bool type")
self.param_info.requires_grad = value
@property
def data(self):
return self
def _update_tensor_data(self, data):
if isinstance(self, Tensor):
self.init_flag = False
self.init = None
return self.assign_value(data)
new_param = Parameter(data, self.name, self.requires_grad)
new_param.param_info = self.param_info
return new_param
def add_pipeline_stage(self, stage):
logger.warning(f"This interface may be deleted in the future.")
if not isinstance(stage, int) or stage < 0:
raise TypeError("`stage` must be a positive number of int type")
self._pipeline_stage_list.append(stage)
def _raise_type_error(self, incoming):
raise TypeError(f"Incoming Parameter dtype can not be converted to current dtype implicitly. "
f"Current dtype is {self.dtype}, and incoming is {incoming}. "
f"Use .set_dtype(xxx) to change the dtype.")
@staticmethod
def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init,
incoming_tensor_is_init, slice_shape=False):
if incoming_tensor_is_init and not current_tensor_is_init:
raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
"Please initialize 'data' before call this method.")
if tuple(current_shape) != tuple(data_shape):
if not slice_shape:
raise ValueError(f"Can not change the shape of Parameter which has been initialized."
f" Current shape is {current_shape}, and incoming is {data_shape}.")
def set_data(self, data, slice_shape=False):
if not isinstance(data, (Tensor, int, float)):
raise TypeError(f"Parameter data must be [`Tensor`, `int`, `float`] or a kind of `Tensor` "
f"(like `Tensor`). But with type {type(data)}.")
if isinstance(data, (int, float)):
if self.dtype in mstype.int_type and isinstance(data, float):
self._raise_type_error(mstype.float_)
data = Tensor(data, self.dtype)
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
slice_shape)
if self.dtype != data.dtype:
if mstype.implicit_conversion_seq[self.dtype] < mstype.implicit_conversion_seq[data.dtype]:
self._raise_type_error(data.dtype)
else:
from mindspore.ops import functional as F
if isinstance(data, Tensor) and data.init is not None:
data.init_data()
data = F.cast(data, self.dtype)
if isinstance(data, Tensor) and data.has_init:
if current_tensor_is_init:
self._update_tensor_data(data.init_data())
else:
if self.inited_param is not None:
self.inited_param.set_data(data)
self.init_mode = data
elif incoming_tensor_is_init or current_tensor_is_init:
self._update_tensor_data(data)
self.sliced = slice_shape
return self
def _get_init_data_args(self, layout=None):
init_data_args = ()
if layout:
if not isinstance(layout, tuple):
raise TypeError("The argument 'layout' should be tuple, but got {}.".format(type(layout)))
if len(layout) < 6:
raise ValueError("The length of 'layout' must be larger than 5, but got {}.".format(len(layout)))
slice_index = int(_get_slice_index(layout[0], layout[1]))
init_data_args += (slice_index, layout[2], layout[5])
return init_data_args
def init_data(self, layout=None, set_sliced=False):
if self.is_default_input_init and self.is_in_parallel != _is_in_parallel_mode():
raise RuntimeError("Must set or change parallel mode before any Tensor created.")
if self.init_mode is None:
return self
if self.inited_param is not None:
return self.inited_param
if _is_role_worker() and self.cache_enable:
global_seed, op_seed = _get_global_and_op_seed()
_insert_weight_init_info(self.name, global_seed, op_seed)
init_data_args = self._get_init_data_args(layout)
if _is_role_pserver():
return self
if self.init_in_server and self.is_param_ps and isinstance(self.init_mode, Tensor) and \
self.init_mode.init is not None and (_is_role_worker() or _is_role_sched()):
if self.cache_enable:
data = self.init_mode.init_data(*init_data_args)
else:
data = self.init_mode.init_data(0, [1])
else:
data = self.init_mode.init_data(*init_data_args)
obj = self._update_tensor_data(data)
if id(obj) != id(self):
self._inited_param = obj
obj.init_mode = None
obj.sliced = set_sliced
return obj
class ParameterTuple(tuple):
def __new__(cls, iterable):
data = tuple(iterable)
ids = set()
names = set()
for x in data:
if not isinstance(x, Parameter):
raise TypeError(f"ParameterTuple input should be `Parameter` collection."
f"But got a {type(iterable)}, {iterable}")
if id(x) not in ids:
if x.name in names:
raise ValueError("The value {} , its name '{}' already exists. "
"Please set a unique name for the parameter.".format(x, x.name))
names.add(x.name)
ids.add(id(x))
return tuple.__new__(ParameterTuple, tuple(data))
def clone(self, prefix, init='same'):
Validator.check_str_by_regular(prefix)
new = []
for x in self:
x1 = x.clone(init)
x1.name = prefix + "." + x1.name
new.append(x1)
if not x1.cache_enable:
continue
if _is_role_worker():
_clone_hash_table(x.name, x1.name)
_insert_accumu_init_info(x1.name, init_to_value(init))
return ParameterTuple(new)
def __parameter_tuple__(self):
| true | true |
f71c553ec28422c6f3e48889698abd8e2489bc38 | 6,512 | py | Python | rootpy/logger/__init__.py | masonproffitt/rootpy | 3926935e1f2100d8ba68070c2ab44055d4800f73 | [
"BSD-3-Clause"
] | 146 | 2015-01-04T15:16:44.000Z | 2022-01-27T11:29:31.000Z | rootpy/logger/__init__.py | masonproffitt/rootpy | 3926935e1f2100d8ba68070c2ab44055d4800f73 | [
"BSD-3-Clause"
] | 143 | 2015-01-07T00:20:42.000Z | 2021-11-04T07:48:26.000Z | rootpy/logger/__init__.py | masonproffitt/rootpy | 3926935e1f2100d8ba68070c2ab44055d4800f73 | [
"BSD-3-Clause"
] | 56 | 2015-01-30T11:11:07.000Z | 2022-03-28T09:42:06.000Z | """
:py:mod:`rootpy` overrides the default logging class, inserting a check that
there exists a default logging handler. If there is not, it adds one.
In additon, this can be used to intercept ROOT's log messages and redirect them
through python's logging subsystem
Example use:
.. sourcecode:: python
# Disable colored logging (not needed if writing into a file,
# this is automatic).
# Must be done before :py:mod:`rootpy` logs any messages.
import logging; logging.basicConfig(level=logging.DEBUG)
from rootpy import log; log = log["/myapp"]
log.debug("Hello") # Results in "DEBUG:myapp] Hello"
# Suppress all myapp debug and info messages
log.setLevel(log.WARNING)
log.debug("Hello") # No effect
mymod = log["mymod"]
mymod.warning("Hello") # Results in "WARNING:myapp.mymod] Hello"
# Suppress all rootpy debug and info messages
log["/rootpy"].setLevel(log.WARNING)
# Suppress messages coming from TCanvas like
# INFO:ROOT.TCanvas.Print] png file /path/to/file.png has been created
log["/ROOT.TCanvas.Print"].setLevel(log.WARNING)
# Suppress warning messages coming the ``TClass`` constructor:
log["/ROOT.TClass.TClass"].setLevel(log.ERROR)
# Precisely remove messages containing the text "no dictionary for class"
# (doesn't work when attached to parent logger)
import logging
class NoDictMessagesFilter(logging.Filter):
def filter(self, record):
return "no dictionary for class" not in record.msg
log["/ROOT.TClass.TClass"].addFilter(NoDictMessagesFilter())
# Turn ROOT errors into exceptions
from rootpy.logger.magic import DANGER
DANGER.enable = True
import ROOT
ROOT.Error("test", "Test fatal")
# Result:
# ERROR:ROOT.test] Test fatal
# Traceback (most recent call last):
# File "test.py", line 36, in <module>
# ROOT.Fatal("test", "Test fatal")
# File "test.py", line 36, in <module>
# ROOT.Fatal("test", "Test fatal")
# File "rootpy/logger/roothandler.py", line 40, in python_logging_error_handler
# raise ROOTError(level, location, msg)
# rootpy.ROOTError: level=6000, loc='test', msg='Test fatal'
# Primitive function tracing:
@log.trace()
def salut():
return
@log.trace()
def hello(what):
salut()
return "42"
hello("world")
# Result:
# DEBUG:myapp.trace.hello] > ('world',) {}
# DEBUG:myapp.trace.salut] > () {}
# DEBUG:myapp.trace.salut] < return None [0.00 sec]
# DEBUG:myapp.trace.hello] < return 42 [0.00 sec]
"""
from __future__ import absolute_import
import logging
import os
import re
import sys
import threading
from functools import wraps
from time import time
from .utils import check_tty
from .extended_logger import ExtendedLogger
logging.setLoggerClass(ExtendedLogger)
log = logging.getLogger("rootpy")
if not os.environ.get("DEBUG", False):
log.setLevel(log.INFO)
from .formatter import CustomFormatter, CustomColoredFormatter
def check_tty_handler(handler):
if not hasattr(handler, "stream"):
return False
return check_tty(handler.stream)
log_root = logging.getLogger()
if not log_root.handlers:
# Add a handler to the top-level logger if it doesn't already have one
handler = logging.StreamHandler()
if check_tty_handler(handler):
handler.setFormatter(CustomColoredFormatter())
else:
handler.setFormatter(CustomFormatter())
log_root.addHandler(handler)
# Make the top-level logger as verbose as possible.
# Log messages that make it to the screen are controlled by the handler
log_root.setLevel(logging.DEBUG)
l = logging.getLogger("rootpy.logger")
l.debug("Adding rootpy's default logging handler to the root logger")
from .magic import set_error_handler
from .roothandler import python_logging_error_handler
__all__ = [
'log_trace',
'set_error_handler',
'python_logging_error_handler',
'LogFilter',
'LiteralFilter',
]
class TraceDepth(threading.local):
value = -1
trace_depth = TraceDepth()
def log_trace(logger, level=logging.DEBUG, show_enter=True, show_exit=True):
"""
log a statement on function entry and exit
"""
def wrap(function):
l = logger.getChild(function.__name__).log
@wraps(function)
def thunk(*args, **kwargs):
global trace_depth
trace_depth.value += 1
try:
start = time()
if show_enter:
l(level, "{0}> {1} {2}".format(" "*trace_depth.value,
args, kwargs))
try:
result = function(*args, **kwargs)
except:
_, result, _ = sys.exc_info()
raise
finally:
if show_exit:
l(level, "{0}< return {1} [{2:.2f} sec]".format(
" "*trace_depth.value, result, time() - start))
finally:
trace_depth.value -= 1
return result
return thunk
return wrap
class LogFilter(logging.Filter):
def __init__(self, logger, message_regex):
logging.Filter.__init__(self)
self.logger = logger
self.message_regex = re.compile(message_regex)
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.removeFilter(self)
def filter(self, record):
return not self.message_regex.match(record.getMessage())
class LiteralFilter(logging.Filter):
def __init__(self, literals):
logging.Filter.__init__(self)
self.literals = literals
def filter(self, record):
return record.getMessage() not in self.literals
# filter superfluous ROOT warnings
for histtype in 'CSIFD':
for dimen in '123':
log["/ROOT.TH{0}{1}.Add".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to add histograms with different axis limits",]))
log["/ROOT.TH{0}{1}.Divide".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to divide histograms with different axis limits",]))
log["/ROOT.TH{0}{1}.Multiply".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to multiply histograms with different axis limits",]))
| 31.157895 | 85 | 0.640971 | from __future__ import absolute_import
import logging
import os
import re
import sys
import threading
from functools import wraps
from time import time
from .utils import check_tty
from .extended_logger import ExtendedLogger
logging.setLoggerClass(ExtendedLogger)
log = logging.getLogger("rootpy")
if not os.environ.get("DEBUG", False):
log.setLevel(log.INFO)
from .formatter import CustomFormatter, CustomColoredFormatter
def check_tty_handler(handler):
if not hasattr(handler, "stream"):
return False
return check_tty(handler.stream)
log_root = logging.getLogger()
if not log_root.handlers:
handler = logging.StreamHandler()
if check_tty_handler(handler):
handler.setFormatter(CustomColoredFormatter())
else:
handler.setFormatter(CustomFormatter())
log_root.addHandler(handler)
# Make the top-level logger as verbose as possible.
# Log messages that make it to the screen are controlled by the handler
log_root.setLevel(logging.DEBUG)
l = logging.getLogger("rootpy.logger")
l.debug("Adding rootpy's default logging handler to the root logger")
from .magic import set_error_handler
from .roothandler import python_logging_error_handler
__all__ = [
'log_trace',
'set_error_handler',
'python_logging_error_handler',
'LogFilter',
'LiteralFilter',
]
class TraceDepth(threading.local):
value = -1
trace_depth = TraceDepth()
def log_trace(logger, level=logging.DEBUG, show_enter=True, show_exit=True):
def wrap(function):
l = logger.getChild(function.__name__).log
@wraps(function)
def thunk(*args, **kwargs):
global trace_depth
trace_depth.value += 1
try:
start = time()
if show_enter:
l(level, "{0}> {1} {2}".format(" "*trace_depth.value,
args, kwargs))
try:
result = function(*args, **kwargs)
except:
_, result, _ = sys.exc_info()
raise
finally:
if show_exit:
l(level, "{0}< return {1} [{2:.2f} sec]".format(
" "*trace_depth.value, result, time() - start))
finally:
trace_depth.value -= 1
return result
return thunk
return wrap
class LogFilter(logging.Filter):
def __init__(self, logger, message_regex):
logging.Filter.__init__(self)
self.logger = logger
self.message_regex = re.compile(message_regex)
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.removeFilter(self)
def filter(self, record):
return not self.message_regex.match(record.getMessage())
class LiteralFilter(logging.Filter):
def __init__(self, literals):
logging.Filter.__init__(self)
self.literals = literals
def filter(self, record):
return record.getMessage() not in self.literals
for histtype in 'CSIFD':
for dimen in '123':
log["/ROOT.TH{0}{1}.Add".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to add histograms with different axis limits",]))
log["/ROOT.TH{0}{1}.Divide".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to divide histograms with different axis limits",]))
log["/ROOT.TH{0}{1}.Multiply".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to multiply histograms with different axis limits",]))
| true | true |
f71c5653c53151aeb46ae97e28196d989957f8df | 586 | py | Python | data/scripts/templates/object/mobile/shared_swirl_prong_hue.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/mobile/shared_swirl_prong_hue.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/mobile/shared_swirl_prong_hue.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_swirl_prong_hue.iff"
result.attribute_template_id = 9
result.stfName("monster_name","swirl_prong")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/player_pet.py")
result.options_mask = 0x100
result.pvp_status = PVPSTATUS.PvPStatus_None
#### END MODIFICATIONS ####
return result | 29.3 | 70 | 0.750853 | true | true | |
f71c568fa66d427f4b77e6e61c5dc1c0fa4fdaf1 | 815 | py | Python | src/lists/tests/test_home_page.py | dmitricus/django-docker | 46e99dc4f3d902e7fda56f85260358f80f505297 | [
"MIT"
] | null | null | null | src/lists/tests/test_home_page.py | dmitricus/django-docker | 46e99dc4f3d902e7fda56f85260358f80f505297 | [
"MIT"
] | null | null | null | src/lists/tests/test_home_page.py | dmitricus/django-docker | 46e99dc4f3d902e7fda56f85260358f80f505297 | [
"MIT"
] | null | null | null | from django.urls import resolve
from django.test import TestCase
from django.http import HttpRequest
from lists.views import home_page
class HomePageTest(TestCase):
"""Тест домашней страницы"""
def test_root_url_resolve_to_home_page_view(self):
"""Тест: корневой url преобразуется в представление домашней страницы"""
found = resolve('/lists/')
self.assertEqual(found.func, home_page)
def test_home_page_returns_correct_html(self):
"""Тест: домашняя страница возвращает правильный html"""
request = HttpRequest()
response = home_page(request)
html = response.content.decode('utf-8')
self.assertTrue(html.startswith('<html>'))
self.assertIn('<title>To-Do lists</title>', html)
self.assertTrue(html.endswith('</html>'))
| 33.958333 | 80 | 0.696933 | from django.urls import resolve
from django.test import TestCase
from django.http import HttpRequest
from lists.views import home_page
class HomePageTest(TestCase):
def test_root_url_resolve_to_home_page_view(self):
found = resolve('/lists/')
self.assertEqual(found.func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
html = response.content.decode('utf-8')
self.assertTrue(html.startswith('<html>'))
self.assertIn('<title>To-Do lists</title>', html)
self.assertTrue(html.endswith('</html>'))
| true | true |
f71c57de636f2f24120b2ff23112503d33da311e | 2,056 | py | Python | auth/oauth.py | fiam/wapi | e3f06505c18d5842bc186a5578c0fc3cc9b3a920 | [
"MIT"
] | 2 | 2016-05-09T02:32:38.000Z | 2019-04-21T12:07:51.000Z | auth/oauth.py | ingenieroariel/wapi | f083ae93f498262c634db20e6c27c3c4705cc1ec | [
"MIT"
] | null | null | null | auth/oauth.py | ingenieroariel/wapi | f083ae93f498262c634db20e6c27c3c4705cc1ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2008 Alberto García Hierro <fiam@rm-fr.net>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from wapi.auth.base import ApiAuth
from oauthsp.request import OAuthRequest
from oauthsp.exceptions import OAuthError
class ApiAuthOAuth(ApiAuth):
def login(self, request):
oauth_request = OAuthRequest(request)
if oauth_request.is_oauth():
try:
oauth_request.validate_access()
except OAuthError, e:
return e.get_response()
request.oauth = oauth_request
request.user = oauth_request.token.user
return None
def login_required(self, request):
"""Returns a response indicating the user needs to log in"""
response = HttpResponse(_('Authorization Required'))
response['WWW-Authenticate'] = 'OAuth realm="%s"' % self.__class__.realm
response.status_code = 401
return response
| 40.313725 | 80 | 0.726167 |
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from wapi.auth.base import ApiAuth
from oauthsp.request import OAuthRequest
from oauthsp.exceptions import OAuthError
class ApiAuthOAuth(ApiAuth):
def login(self, request):
oauth_request = OAuthRequest(request)
if oauth_request.is_oauth():
try:
oauth_request.validate_access()
except OAuthError, e:
return e.get_response()
request.oauth = oauth_request
request.user = oauth_request.token.user
return None
def login_required(self, request):
"""Returns a response indicating the user needs to log in"""
response = HttpResponse(_('Authorization Required'))
response['WWW-Authenticate'] = 'OAuth realm="%s"' % self.__class__.realm
response.status_code = 401
return response
| false | true |
f71c5890e794a661a56497593ddf0dfcf0ad6fc7 | 9,686 | py | Python | main.py | trueleo/python-teletootbot | d033a79b05a13d10bd2d4f0bd68d4ecaa47f3cb3 | [
"MIT"
] | 4 | 2019-04-19T12:58:51.000Z | 2021-07-27T01:12:41.000Z | main.py | trueleo/python-teletootbot | d033a79b05a13d10bd2d4f0bd68d4ecaa47f3cb3 | [
"MIT"
] | null | null | null | main.py | trueleo/python-teletootbot | d033a79b05a13d10bd2d4f0bd68d4ecaa47f3cb3 | [
"MIT"
] | null | null | null | from telegram.ext import MessageHandler, Filters, CommandHandler, Updater
from mastodon import MastodonIllegalArgumentError, MastodonUnauthorizedError
import DataHandler
import threading
import os
import sys
import logging
import certifi
import urllib3
import re
bot_token = '<your bot token here>'
# secretfile = open('secretbot', 'r')
# secret = secretfile.readline().rstrip('\n')
# bot_token = secret
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
test_visibility = 'public'
group_media_queue = {}
lookup_dict = {}
tootObject = DataHandler.mastodonapi.TootObject
def geturl(url_string):
man = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where() ,num_pools=1)
response = man.urlopen('GET', url_string)
rurl = response.geturl()
return re.search(r'([://a-z.0-9]+/)', rurl, re.I).group(0)
def load_account(chat_id, force_reload=False):
try:
if force_reload:
raise KeyError
return lookup_dict[chat_id]
except KeyError:
account = DataHandler.account_object(chat_id)
lookup_dict[chat_id] = account
return account
def download(file_id, telegram_file_object):
file_url = telegram_file_object.file_path
file_ext = re.search(r'\.[0-9a-z]+$', file_url).group()
media_name = 'media-' + str(file_id) + file_ext
telegram_file_object.download(media_name)
return media_name
def process_group_media(chat_id, key):
files = group_media_queue.pop(key)
toot_object = tootObject()
for file_tuple in files:
file_id = file_tuple[0]
telegram_file_object = file_tuple[1]
caption = file_tuple[2]
media_name = download(file_id, telegram_file_object)
toot_object.append(text=caption, media=media_name)
tooting(chat_id, toot_object, test_visibility)
for media in toot_object.medias:
os.remove(media)
def add_to_group_media_queue(chat_id, group_id, file_id, telegram_file_object, caption):
key = str(chat_id) + str(group_id)
try:
media_container = group_media_queue[key]
except KeyError:
threading.Timer(20, process_group_media, [chat_id, key]).start()
media_container = []
group_media_queue[key] = media_container
finally:
media_container.append( (file_id, telegram_file_object, caption) )
def tooting(chat_id, tootobject, visibility):
load_account(chat_id).toot(tootobject, visibility)
def reply(context, chat_id, text):
context.bot.send_message(chat_id=chat_id, text=text, parse_mode='markdown')
def start(update, context):
context.bot.send_message(chat_id=update.message.chat_id,
text="Toot to Mastodon using this bot. See /help")
def add(update, context):
chat_id = update.message.chat_id
try:
assert len(context.args) == 3
except AssertionError:
reply(context, chat_id, 'usage:`\n/add <user_email> <password> <full_instance_url>`\nexample: `/add john@doe.com cyberpunk277 https://mastodon.social/`')
return
else:
username = context.args[0]
instance = geturl(context.args[2])
password = context.args[1]
try:
new_account = DataHandler.insert_account( chat_id,
username,
instance,
password)
reply(context, chat_id, 'Account added successfully')
except MastodonIllegalArgumentError:
reply(context, chat_id, 'Authentication failed')
reply(context, chat_id, 'usage:`\n/add <user_email> <password> <full_instance_url>`\nexample: `\add john@doe.com cyberpunk277 https://mastodon.social/`')
except MastodonUnauthorizedError:
reply(context, chat_id, 'Authentication failed')
except DataHandler.InsertError:
reply(context, chat_id, 'Account already registered')
except:
reply(context, chat_id, 'Oops!, Something gone wrong. Check and try again')
else:
if isinstance(new_account, DataHandler.mastodonapi.MastodonAccount) and (DataHandler.number_of_accounts(chat_id) == 1):
lookup_dict[chat_id] = new_account
DataHandler.upsert_user(chat_id, 1)
reply(context, chat_id, 'Great!, You can use /listall to list your currently registered accounts')
def setdefault(update, context):
chat_id = update.message.chat_id
number_of_accounts = DataHandler.number_of_accounts(chat_id)
if number_of_accounts == 0:
reply(context, chat_id, 'You have not registered any mastodon account yet')
return
if number_of_accounts == 1:
acc = DataHandler.account_info(chat_id)
reply(context, chat_id, "Your only registered account is `{}` at `{}`".format(acc[0], acc[1]))
return
try:
newDefault = int(context.args[0])
if newDefault <= number_of_accounts:
DataHandler.upsert_user(chat_id, newDefault)
accountObj = load_account(chat_id, force_reload=True)
reply(context, chat_id,
"Now you can toot to your account `{}` at `{}`".format(
accountObj.user,
accountObj.instance))
else:
reply(context, chat_id,
"You need to specify right account number as given in /listall")
except:
reply(context, chat_id, "`/setdefault` <number>")
def delete(update, context):
chat_id = update.message.chat_id
number_of_accounts = DataHandler.number_of_accounts(chat_id)
if number_of_accounts == 0:
reply(context, chat_id,
'You don\'t have any registered account(s) to delete')
elif number_of_accounts == 1:
DataHandler.delete_user(chat_id)
lookup_dict.pop(chat_id)
else:
try:
acc_num = int(context.args[0])
if acc_num > number_of_accounts:
reply(context, chat_id, "You need to specify right account number as given in /listall")
return
current_default = DataHandler.get_default_acc(chat_id)
id_to_delete = DataHandler.account_id(chat_id, acc_num)
DataHandler.delete_account(id_to_delete)
if id_to_delete == current_default:
DataHandler.upsert_user(chat_id, 1)
load_account(chat_id, force_reload=True)
account_info_tuple = DataHandler.account_info(chat_id)
reply(context, chat_id, 'Your current default account is now set to {username} @ {instance}'.format(
username=account_info_tuple[0],
instance=account_info_tuple[1]))
except:
reply(context, chat_id, '`usage:`\n`/delete <number>`')
def deleteall(update, context):
chat_id = update.message.chat_id
try:
assert (context.args[0] == 'yes')
except:
reply(context, chat_id, '`NOTE: delete all registered accounts \nusage:\n/deleteall yes`')
else:
DataHandler.delete_user(chat_id)
try:
lookup_dict.pop(chat_id)
except KeyError:
pass
def listall(update, context):
chat_id = update.message.chat_id
text = DataHandler.all_accounts(chat_id)
reply(context, chat_id, "currenly registered accounts\n" + text)
def media(update, context):
chat_id = update.message.chat_id
file_id = update.message.photo[-1].file_id
new_file = context.bot.get_file(file_id)
if update.message.media_group_id:
add_to_group_media_queue(chat_id, update.message.media_group_id,
file_id, new_file, update.message.caption)
else:
try:
media_name = download(file_id, new_file)
tooting(chat_id, tootObject(update.message.caption, media_name), test_visibility)
except DataHandler.NoDataError:
reply(context, chat_id, 'Please add an account first using /add')
def text(update, context):
chat_id = update.message.chat_id
try:
tooting(chat_id, tootObject(update.message.text), test_visibility)
except DataHandler.NoDataError:
reply(context, chat_id, 'Please add an account first using /add')
def helpcommand(update, context):
chat_id = update.message.chat_id
reply(context, chat_id, "With TeleToot Bot you can to post on any Mastodon account's public timeline. Currently you can only post on one account at a time although you can authenticate various accounts and switch between them\n`availible commands:\n`/add\n/listall\n/setdefault\n/delete\n/deleteall")
reply(context, chat_id, "To start tooting using your mastodon account send `/add <registered email> <password> <instance_url>`. See /add for more detail")
updater = Updater(bot_token, use_context=True)
dispatcher = updater.dispatcher
list_of_commands = [start, add, listall, setdefault, delete, deleteall]
def load_commands(commands):
for command in commands:
dispatcher.add_handler(CommandHandler(command.__name__, command))
load_commands(list_of_commands)
media_handler = MessageHandler(Filters.photo | (Filters.text & Filters.photo),
media, pass_job_queue=True)
text_handler = MessageHandler(Filters.text, text)
dispatcher.add_handler(media_handler)
dispatcher.add_handler(text_handler)
dispatcher.add_handler(CommandHandler('help', helpcommand))
updater.start_polling(poll_interval=1.0, timeout=60)
updater.idle()
| 40.024793 | 305 | 0.661986 | from telegram.ext import MessageHandler, Filters, CommandHandler, Updater
from mastodon import MastodonIllegalArgumentError, MastodonUnauthorizedError
import DataHandler
import threading
import os
import sys
import logging
import certifi
import urllib3
import re
bot_token = '<your bot token here>'
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
test_visibility = 'public'
group_media_queue = {}
lookup_dict = {}
tootObject = DataHandler.mastodonapi.TootObject
def geturl(url_string):
man = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where() ,num_pools=1)
response = man.urlopen('GET', url_string)
rurl = response.geturl()
return re.search(r'([://a-z.0-9]+/)', rurl, re.I).group(0)
def load_account(chat_id, force_reload=False):
try:
if force_reload:
raise KeyError
return lookup_dict[chat_id]
except KeyError:
account = DataHandler.account_object(chat_id)
lookup_dict[chat_id] = account
return account
def download(file_id, telegram_file_object):
file_url = telegram_file_object.file_path
file_ext = re.search(r'\.[0-9a-z]+$', file_url).group()
media_name = 'media-' + str(file_id) + file_ext
telegram_file_object.download(media_name)
return media_name
def process_group_media(chat_id, key):
files = group_media_queue.pop(key)
toot_object = tootObject()
for file_tuple in files:
file_id = file_tuple[0]
telegram_file_object = file_tuple[1]
caption = file_tuple[2]
media_name = download(file_id, telegram_file_object)
toot_object.append(text=caption, media=media_name)
tooting(chat_id, toot_object, test_visibility)
for media in toot_object.medias:
os.remove(media)
def add_to_group_media_queue(chat_id, group_id, file_id, telegram_file_object, caption):
key = str(chat_id) + str(group_id)
try:
media_container = group_media_queue[key]
except KeyError:
threading.Timer(20, process_group_media, [chat_id, key]).start()
media_container = []
group_media_queue[key] = media_container
finally:
media_container.append( (file_id, telegram_file_object, caption) )
def tooting(chat_id, tootobject, visibility):
load_account(chat_id).toot(tootobject, visibility)
def reply(context, chat_id, text):
context.bot.send_message(chat_id=chat_id, text=text, parse_mode='markdown')
def start(update, context):
context.bot.send_message(chat_id=update.message.chat_id,
text="Toot to Mastodon using this bot. See /help")
def add(update, context):
chat_id = update.message.chat_id
try:
assert len(context.args) == 3
except AssertionError:
reply(context, chat_id, 'usage:`\n/add <user_email> <password> <full_instance_url>`\nexample: `/add john@doe.com cyberpunk277 https://mastodon.social/`')
return
else:
username = context.args[0]
instance = geturl(context.args[2])
password = context.args[1]
try:
new_account = DataHandler.insert_account( chat_id,
username,
instance,
password)
reply(context, chat_id, 'Account added successfully')
except MastodonIllegalArgumentError:
reply(context, chat_id, 'Authentication failed')
reply(context, chat_id, 'usage:`\n/add <user_email> <password> <full_instance_url>`\nexample: `\add john@doe.com cyberpunk277 https://mastodon.social/`')
except MastodonUnauthorizedError:
reply(context, chat_id, 'Authentication failed')
except DataHandler.InsertError:
reply(context, chat_id, 'Account already registered')
except:
reply(context, chat_id, 'Oops!, Something gone wrong. Check and try again')
else:
if isinstance(new_account, DataHandler.mastodonapi.MastodonAccount) and (DataHandler.number_of_accounts(chat_id) == 1):
lookup_dict[chat_id] = new_account
DataHandler.upsert_user(chat_id, 1)
reply(context, chat_id, 'Great!, You can use /listall to list your currently registered accounts')
def setdefault(update, context):
chat_id = update.message.chat_id
number_of_accounts = DataHandler.number_of_accounts(chat_id)
if number_of_accounts == 0:
reply(context, chat_id, 'You have not registered any mastodon account yet')
return
if number_of_accounts == 1:
acc = DataHandler.account_info(chat_id)
reply(context, chat_id, "Your only registered account is `{}` at `{}`".format(acc[0], acc[1]))
return
try:
newDefault = int(context.args[0])
if newDefault <= number_of_accounts:
DataHandler.upsert_user(chat_id, newDefault)
accountObj = load_account(chat_id, force_reload=True)
reply(context, chat_id,
"Now you can toot to your account `{}` at `{}`".format(
accountObj.user,
accountObj.instance))
else:
reply(context, chat_id,
"You need to specify right account number as given in /listall")
except:
reply(context, chat_id, "`/setdefault` <number>")
def delete(update, context):
chat_id = update.message.chat_id
number_of_accounts = DataHandler.number_of_accounts(chat_id)
if number_of_accounts == 0:
reply(context, chat_id,
'You don\'t have any registered account(s) to delete')
elif number_of_accounts == 1:
DataHandler.delete_user(chat_id)
lookup_dict.pop(chat_id)
else:
try:
acc_num = int(context.args[0])
if acc_num > number_of_accounts:
reply(context, chat_id, "You need to specify right account number as given in /listall")
return
current_default = DataHandler.get_default_acc(chat_id)
id_to_delete = DataHandler.account_id(chat_id, acc_num)
DataHandler.delete_account(id_to_delete)
if id_to_delete == current_default:
DataHandler.upsert_user(chat_id, 1)
load_account(chat_id, force_reload=True)
account_info_tuple = DataHandler.account_info(chat_id)
reply(context, chat_id, 'Your current default account is now set to {username} @ {instance}'.format(
username=account_info_tuple[0],
instance=account_info_tuple[1]))
except:
reply(context, chat_id, '`usage:`\n`/delete <number>`')
def deleteall(update, context):
chat_id = update.message.chat_id
try:
assert (context.args[0] == 'yes')
except:
reply(context, chat_id, '`NOTE: delete all registered accounts \nusage:\n/deleteall yes`')
else:
DataHandler.delete_user(chat_id)
try:
lookup_dict.pop(chat_id)
except KeyError:
pass
def listall(update, context):
chat_id = update.message.chat_id
text = DataHandler.all_accounts(chat_id)
reply(context, chat_id, "currenly registered accounts\n" + text)
def media(update, context):
chat_id = update.message.chat_id
file_id = update.message.photo[-1].file_id
new_file = context.bot.get_file(file_id)
if update.message.media_group_id:
add_to_group_media_queue(chat_id, update.message.media_group_id,
file_id, new_file, update.message.caption)
else:
try:
media_name = download(file_id, new_file)
tooting(chat_id, tootObject(update.message.caption, media_name), test_visibility)
except DataHandler.NoDataError:
reply(context, chat_id, 'Please add an account first using /add')
def text(update, context):
chat_id = update.message.chat_id
try:
tooting(chat_id, tootObject(update.message.text), test_visibility)
except DataHandler.NoDataError:
reply(context, chat_id, 'Please add an account first using /add')
def helpcommand(update, context):
chat_id = update.message.chat_id
reply(context, chat_id, "With TeleToot Bot you can to post on any Mastodon account's public timeline. Currently you can only post on one account at a time although you can authenticate various accounts and switch between them\n`availible commands:\n`/add\n/listall\n/setdefault\n/delete\n/deleteall")
reply(context, chat_id, "To start tooting using your mastodon account send `/add <registered email> <password> <instance_url>`. See /add for more detail")
updater = Updater(bot_token, use_context=True)
dispatcher = updater.dispatcher
list_of_commands = [start, add, listall, setdefault, delete, deleteall]
def load_commands(commands):
for command in commands:
dispatcher.add_handler(CommandHandler(command.__name__, command))
load_commands(list_of_commands)
media_handler = MessageHandler(Filters.photo | (Filters.text & Filters.photo),
media, pass_job_queue=True)
text_handler = MessageHandler(Filters.text, text)
dispatcher.add_handler(media_handler)
dispatcher.add_handler(text_handler)
dispatcher.add_handler(CommandHandler('help', helpcommand))
updater.start_polling(poll_interval=1.0, timeout=60)
updater.idle()
| true | true |
f71c58960b4d96b75911c82859c3b22907774f53 | 864 | py | Python | texteditor.py | p10rahulm/python-basics | d8f6172d42c465382d672a6813dccfbe6dff45a7 | [
"MIT"
] | null | null | null | texteditor.py | p10rahulm/python-basics | d8f6172d42c465382d672a6813dccfbe6dff45a7 | [
"MIT"
] | null | null | null | texteditor.py | p10rahulm/python-basics | d8f6172d42c465382d672a6813dccfbe6dff45a7 | [
"MIT"
] | null | null | null |
from tkinter import *
import tkinter.filedialog as tkFileDialog
root = Tk("Text Editor")
text = Text(root)
text.grid()
def saveas():
global text
t = text.get("1.0", "end-1c")
savelocation= tkFileDialog.asksaveasfilename()
file1=open(savelocation, "w+")
file1.write(t)
file1.close()
button=Button(root, text="Save", command=saveas)
button.grid()
def FontHelvetica():
global text
text.config(font="Helvetica")
def FontCourier():
global text
text.config(font="Courier")
font=Menubutton(root, text="Font")
font.grid()
font.menu=Menu(font, tearoff=0)
font["menu"]=font.menu
Helvetica=IntVar()
arial=IntVar()
times=IntVar()
Courier=IntVar()
font.menu.add_checkbutton(label="Courier", variable=Courier,command=FontCourier)
font.menu.add_checkbutton(label="Helvetica", variable=Helvetica,command=FontHelvetica)
root.mainloop() | 23.351351 | 86 | 0.725694 |
from tkinter import *
import tkinter.filedialog as tkFileDialog
root = Tk("Text Editor")
text = Text(root)
text.grid()
def saveas():
global text
t = text.get("1.0", "end-1c")
savelocation= tkFileDialog.asksaveasfilename()
file1=open(savelocation, "w+")
file1.write(t)
file1.close()
button=Button(root, text="Save", command=saveas)
button.grid()
def FontHelvetica():
global text
text.config(font="Helvetica")
def FontCourier():
global text
text.config(font="Courier")
font=Menubutton(root, text="Font")
font.grid()
font.menu=Menu(font, tearoff=0)
font["menu"]=font.menu
Helvetica=IntVar()
arial=IntVar()
times=IntVar()
Courier=IntVar()
font.menu.add_checkbutton(label="Courier", variable=Courier,command=FontCourier)
font.menu.add_checkbutton(label="Helvetica", variable=Helvetica,command=FontHelvetica)
root.mainloop() | true | true |
f71c58990649e0b8588522179070c5aec9ae9d99 | 93 | py | Python | moopy/comments/admin.py | qrizan/moopy | 10459351727710c77279f24f224786622abc91b8 | [
"MIT"
] | 1 | 2017-01-15T21:58:06.000Z | 2017-01-15T21:58:06.000Z | moopy/comments/admin.py | qrizan/moopy | 10459351727710c77279f24f224786622abc91b8 | [
"MIT"
] | null | null | null | moopy/comments/admin.py | qrizan/moopy | 10459351727710c77279f24f224786622abc91b8 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Comment
admin.site.register(Comment)
| 13.285714 | 32 | 0.806452 | from django.contrib import admin
from .models import Comment
admin.site.register(Comment)
| true | true |
f71c58e4c78d5b8915f7be9b2f64999c3533da13 | 3,659 | py | Python | Python Data Structure/A1Q1_Letian Xu.py | XULetian/Python_Fundamental | b901014a3257649cf1b3205ee6c17ba35184cad7 | [
"MIT"
] | null | null | null | Python Data Structure/A1Q1_Letian Xu.py | XULetian/Python_Fundamental | b901014a3257649cf1b3205ee6c17ba35184cad7 | [
"MIT"
] | null | null | null | Python Data Structure/A1Q1_Letian Xu.py | XULetian/Python_Fundamental | b901014a3257649cf1b3205ee6c17ba35184cad7 | [
"MIT"
] | null | null | null | # Letian Xu
# 01/10/2019
# I have not given or received any unauthorized assistance on this assignment.
def overlap(s1,s2):
'''
Arguments: s1 and s2 represent two lists, also represent a square.
Return: The area of the overlap of the two squares, if the squares do not overlap, return 0.
'''
# Use the range function to get a list for the horizontal side of a square;
# and the set() method to get intersection of two lists;
# this the lenth of two squares' overlap on x-axis.
x1 = list(range(s1[0],s1[0]+s1[2]))
x2 = list(range(s2[0],s2[0]+s2[2]))
x = len(set(x1) & set(x2))
# and this is the lenth of two squares' overlap on y-axis
y1 = list(range(s1[1],s1[1]+s1[2]))
y2 = list(range(s2[1],s2[1]+s2[2]))
y = len(set(y1) & set(y2))
# If the one of the lenth was 0, the result will return 0.
return x*y
totalScore = 0
S1 = [1,5,3]
S2 = [5,6,2]
S3 = [2,1,2]
S4 = [9,6,2]
S5 = [7,2,3]
S6 = [3,2,5]
S7 = [5,3,1]
#---------- ---------- ---------- ---------- ----------
print( "Test 1: " + str(S1) + str(S6) )
print( "Correct Answer: 2" )
r1 = overlap(S1,S6)
r2 = overlap(S6,S1)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 2:
s1 = s1 + 1
if r2 == 2:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print( "Test 2: " + str(S2) + str(S6) )
print( "Correct Answer: 2" )
r1 = overlap(S2,S6)
r2 = overlap(S6,S2)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 2:
s1 = s1 + 1
if r2 == 2:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print( "Test 3: " + str(S3) + str(S6) )
print( "Correct Answer: 1" )
r1 = overlap(S3,S6)
r2 = overlap(S6,S3)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 1:
s1 = s1 + 1
if r2 == 1:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print( "Test 4: " + str(S4) + str(S6) )
print( "Correct Answer: 0" )
r1 = overlap(S4,S6)
r2 = overlap(S6,S4)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 0:
s1 = s1 + 1
if r2 == 0:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print( "Test 5: " + str(S5) + str(S6) )
print( "Correct Answer: 3" )
r1 = overlap(S5,S6)
r2 = overlap(S6,S5)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 3:
s1 = s1 + 1
if r2 == 3:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print( "Test 6: " + str(S6) + str(S6) )
print( "Correct Answer: 25" )
r1 = overlap(S6,S6)
r2 = overlap(S6,S6)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 25:
s1 = s1 + 1
if r2 == 25:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print( "Test 7: " + str(S7) + str(S6) )
print( "Correct Answer: 1" )
r1 = overlap(S7,S6)
r2 = overlap(S6,S7)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 1:
s1 = s1 + 1
if r2 == 1:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
#---------- ---------- ---------- ---------- ----------
print ( "Total Score: " + str(totalScore) )
print ( "Percentage: " + str(100*totalScore/14) )
| 22.447853 | 96 | 0.496037 |
def overlap(s1,s2):
x1 = list(range(s1[0],s1[0]+s1[2]))
x2 = list(range(s2[0],s2[0]+s2[2]))
x = len(set(x1) & set(x2))
# and this is the lenth of two squares' overlap on y-axis
y1 = list(range(s1[1],s1[1]+s1[2]))
y2 = list(range(s2[1],s2[1]+s2[2]))
y = len(set(y1) & set(y2))
return x*y
totalScore = 0
S1 = [1,5,3]
S2 = [5,6,2]
S3 = [2,1,2]
S4 = [9,6,2]
S5 = [7,2,3]
S6 = [3,2,5]
S7 = [5,3,1]
print( "Test 1: " + str(S1) + str(S6) )
print( "Correct Answer: 2" )
r1 = overlap(S1,S6)
r2 = overlap(S6,S1)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 2:
s1 = s1 + 1
if r2 == 2:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print( "Test 2: " + str(S2) + str(S6) )
print( "Correct Answer: 2" )
r1 = overlap(S2,S6)
r2 = overlap(S6,S2)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 2:
s1 = s1 + 1
if r2 == 2:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print( "Test 3: " + str(S3) + str(S6) )
print( "Correct Answer: 1" )
r1 = overlap(S3,S6)
r2 = overlap(S6,S3)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 1:
s1 = s1 + 1
if r2 == 1:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print( "Test 4: " + str(S4) + str(S6) )
print( "Correct Answer: 0" )
r1 = overlap(S4,S6)
r2 = overlap(S6,S4)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 0:
s1 = s1 + 1
if r2 == 0:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print( "Test 5: " + str(S5) + str(S6) )
print( "Correct Answer: 3" )
r1 = overlap(S5,S6)
r2 = overlap(S6,S5)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 3:
s1 = s1 + 1
if r2 == 3:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print( "Test 6: " + str(S6) + str(S6) )
print( "Correct Answer: 25" )
r1 = overlap(S6,S6)
r2 = overlap(S6,S6)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 25:
s1 = s1 + 1
if r2 == 25:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print( "Test 7: " + str(S7) + str(S6) )
print( "Correct Answer: 1" )
r1 = overlap(S7,S6)
r2 = overlap(S6,S7)
print( "Result 1: " + str(r1) )
print( "Result 2: " + str(r2) )
s1 = 0
if r1 == 1:
s1 = s1 + 1
if r2 == 1:
s1 = s1 + 1
print( "Score: " + str(s1) )
print()
totalScore = totalScore + s1
print ( "Total Score: " + str(totalScore) )
print ( "Percentage: " + str(100*totalScore/14) )
| true | true |
f71c5a07ef97232bc9ba249f594bbcbea596ebe9 | 747 | py | Python | pywhatkit/wwd.py | apoorvthedude/PyWhatKit | edc60ad44c11d4700b531be1bb86639ff3fce270 | [
"MIT"
] | 2 | 2022-01-15T00:53:30.000Z | 2022-01-16T22:56:23.000Z | pywhatkit/wwd.py | apoorvthedude/PyWhatKit | edc60ad44c11d4700b531be1bb86639ff3fce270 | [
"MIT"
] | null | null | null | pywhatkit/wwd.py | apoorvthedude/PyWhatKit | edc60ad44c11d4700b531be1bb86639ff3fce270 | [
"MIT"
] | null | null | null | import webbrowser as web
def tutorial_hindi() -> None:
"""Watch tutorial on how to use this library on YouTube in Hindi"""
web.open("https://youtu.be/o6WV9zFJg1o")
def tutorial_english() -> None:
"""Watch tutorial on how to use this library on YouTube in English"""
web.open("https://youtu.be/vpfrwpO_HKY")
def developer_contact() -> None:
"""Contact information of developer for feedbacks"""
link = "https://github.com/Ankit404butfound/PyWhatKit"
print(f"You can reach out to us on GitHub {link} for help regarding any issues related to the module.")
def join_discord() -> None:
"""Opens the invite link for the discord server"""
web.open("https://discord.gg/62Yf5mushu")
| 27.666667 | 108 | 0.670683 | import webbrowser as web
def tutorial_hindi() -> None:
web.open("https://youtu.be/o6WV9zFJg1o")
def tutorial_english() -> None:
web.open("https://youtu.be/vpfrwpO_HKY")
def developer_contact() -> None:
link = "https://github.com/Ankit404butfound/PyWhatKit"
print(f"You can reach out to us on GitHub {link} for help regarding any issues related to the module.")
def join_discord() -> None:
web.open("https://discord.gg/62Yf5mushu")
| true | true |
f71c5aa86d59a64428303d427cd50c450b530ad6 | 6,559 | py | Python | tilde/parsers/__init__.py | fossabot/tilde-1 | 143810a711f00dc1c64a6eb10573986dddadfcef | [
"MIT"
] | null | null | null | tilde/parsers/__init__.py | fossabot/tilde-1 | 143810a711f00dc1c64a6eb10573986dddadfcef | [
"MIT"
] | null | null | null | tilde/parsers/__init__.py | fossabot/tilde-1 | 143810a711f00dc1c64a6eb10573986dddadfcef | [
"MIT"
] | null | null | null |
# Generic parser schema
# with the default values
# Author: Evgeny Blokhin
import os, sys
import re
import time
import math
import random
import hashlib
import base64
from ase.data import chemical_symbols
class Output:
def __init__(self, filename='', calcset=False):
self._filename = filename # for quick and cheap checksums (NB never generate checksum from the entire calc file, which may be huge)
self.data = '' # file contents holder; may be empty for some parsers!
self._checksum = None # NB do not use directly
self._calcset = calcset
self._nested_depth = 0
self.download_size = 0
self.related_files = []
if self._calcset:
self.info = {}
return
self._starttime = time.time()
self.structures = [] # list of ASE objects with additional properties
self.convergence = [] # zero-point energy convergence (I)
self.tresholds = [] # optimization convergence, list of 5 lists (II)
self.ncycles = [] # number of cycles at each optimisation step
self.electrons = {
#'rgkmax': None,
'basis_set': None, # format depends on ansatz:
# LCAO Gaussians: {'bs': {}, 'ps': {}}
# PWs and LAPW: [atom1, ...]
'eigvals': {}, # raw eigenvalues {k:{alpha:[], beta:[]},}
'projected': [], # raw eigenvalues [..., ...] for total DOS smearing
'dos': {}, # in advance pre-computed DOS
'bands': {} # in advance pre-computed band structure
}
# NB own properties for CRYSTAL: impacts, proj_eigv_impacts, e_proj_eigvals (TODO)
self.phonons = {
'modes': {},
'irreps': {},
'ir_active': {},
'raman_active': {},
'ph_eigvecs': {},
'ph_k_degeneracy': {},
'dfp_disps': [],
'dfp_magnitude': None,
'dielectric_tensor':False,
'zpe': None,
'td': None
}
self.elastic = {}
# modules output object
self.apps = {}
# classification and technical info object
# NB API call *classify* extends it with the new items
self.info = {
'warns': [],
'framework': 0x0, # code name
'prog': 'unknown version', # code version
'perf': None, # benchmarking
'location': filename,
'finished': 0x0,
'duration': None,
'input': None,
'energy': None, # in eV
'standard': '',
'formula': '',
'dims': False, # cell volume
'periodicity':0x0,
'natom': 0,
'elements': [],
'contents': [],
'lack': False,
'expanded': False,
'tags': [],
'etype': 0x0,
'bandgap': None, # in eV
'bandgaptype':0x0,
'optgeom': False,
'calctypes': [],
'H': None,
'H_types': [],
'tol': None,
'k': None,
'kshift': None,
'smear': None, # in a.u.
'smeartype': None,
'spin': 0x0,
'lockstate': None,
'ansatz': 0x0,
'techs': [],
'dtype': 0x0
}
@classmethod
def iparse(cls, filename):
return [cls(filename)]
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __repr__(self):
out = ''
for repr in dir(self):
if not hasattr(getattr(self, repr), '__call__') and repr != '__doc__':
if repr == 'structures' and len(getattr(self, repr)):
if len(getattr(self, repr)) > 1:
out += repr + " ->\nINITIAL:\n" + str( getattr(self, repr)[0] ) + "\nFINAL:\n" + str( getattr(self, repr)[-1] ) + "\n\n"
else:
out += repr + " -> " + str( getattr(self, repr)[-1] ) + "\n\n"
else:
str_repr = str( getattr(self, repr) )
if len(str_repr) < 2000:
out += repr + ' -> ' + str_repr + "\n\n"
else:
out += repr + ' -> ' + str_repr[:1000] + '...\n\n'
return out
def warning(self, msg):
self.info['warns'].append(msg)
def get_checksum(self):
'''
Retrieve unique hash in a cross-platform manner:
this is how calculation identity is determined
'''
if self._checksum:
return self._checksum
if not self._filename:
raise RuntimeError('Source calc file is required in order to properly save the data!')
calc_checksum = hashlib.sha224()
struc_repr = ""
for ase_obj in self.structures:
struc_repr += "%3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f " % tuple(map(abs, [ase_obj.cell[0][0], ase_obj.cell[0][1], ase_obj.cell[0][2], ase_obj.cell[1][0], ase_obj.cell[1][1], ase_obj.cell[1][2], ase_obj.cell[2][0], ase_obj.cell[2][1], ase_obj.cell[2][2]])) # NB beware of length & minus zeros
for atom in ase_obj:
struc_repr += "%s %3.6f %3.6f %3.6f " % tuple(map(abs, [chemical_symbols.index(atom.symbol), atom.x, atom.y, atom.z])) # NB beware of length & minus zeros
if self.info["energy"] is None:
energy = str(None)
else:
energy = str(round(self.info['energy'], 11 - int(math.log10(math.fabs(self.info['energy'])))))
calc_checksum.update((
struc_repr + "\n" +
energy + "\n" +
self.info['prog'] + "\n" +
str(self.info['input']) + "\n" +
str(sum([2**x for x in self.info['calctypes']]))
).encode('ascii')) # NB this is fixed and should not be changed
result = base64.b32encode(calc_checksum.digest()).decode('ascii')
result = result[:result.index('=')] + 'CI'
return result
def benchmark(self):
self.info['perf'] = "%1.2f" % (time.time() - self._starttime)
| 35.646739 | 318 | 0.479646 |
import os, sys
import re
import time
import math
import random
import hashlib
import base64
from ase.data import chemical_symbols
class Output:
def __init__(self, filename='', calcset=False):
self._filename = filename
self.data = ''
self._checksum = None
self._calcset = calcset
self._nested_depth = 0
self.download_size = 0
self.related_files = []
if self._calcset:
self.info = {}
return
self._starttime = time.time()
self.structures = []
self.convergence = []
self.tresholds = []
self.ncycles = []
self.electrons = {
'basis_set': None,
'eigvals': {},
'projected': [],
'dos': {},
'bands': {}
}
self.phonons = {
'modes': {},
'irreps': {},
'ir_active': {},
'raman_active': {},
'ph_eigvecs': {},
'ph_k_degeneracy': {},
'dfp_disps': [],
'dfp_magnitude': None,
'dielectric_tensor':False,
'zpe': None,
'td': None
}
self.elastic = {}
self.apps = {}
self.info = {
'warns': [],
'framework': 0x0,
'prog': 'unknown version',
'perf': None,
'location': filename,
'finished': 0x0,
'duration': None,
'input': None,
'energy': None,
'standard': '',
'formula': '',
'dims': False,
'periodicity':0x0,
'natom': 0,
'elements': [],
'contents': [],
'lack': False,
'expanded': False,
'tags': [],
'etype': 0x0,
'bandgap': None,
'bandgaptype':0x0,
'optgeom': False,
'calctypes': [],
'H': None,
'H_types': [],
'tol': None,
'k': None,
'kshift': None,
'smear': None,
'smeartype': None,
'spin': 0x0,
'lockstate': None,
'ansatz': 0x0,
'techs': [],
'dtype': 0x0
}
@classmethod
def iparse(cls, filename):
return [cls(filename)]
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __repr__(self):
out = ''
for repr in dir(self):
if not hasattr(getattr(self, repr), '__call__') and repr != '__doc__':
if repr == 'structures' and len(getattr(self, repr)):
if len(getattr(self, repr)) > 1:
out += repr + " ->\nINITIAL:\n" + str( getattr(self, repr)[0] ) + "\nFINAL:\n" + str( getattr(self, repr)[-1] ) + "\n\n"
else:
out += repr + " -> " + str( getattr(self, repr)[-1] ) + "\n\n"
else:
str_repr = str( getattr(self, repr) )
if len(str_repr) < 2000:
out += repr + ' -> ' + str_repr + "\n\n"
else:
out += repr + ' -> ' + str_repr[:1000] + '...\n\n'
return out
def warning(self, msg):
self.info['warns'].append(msg)
def get_checksum(self):
if self._checksum:
return self._checksum
if not self._filename:
raise RuntimeError('Source calc file is required in order to properly save the data!')
calc_checksum = hashlib.sha224()
struc_repr = ""
for ase_obj in self.structures:
struc_repr += "%3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f " % tuple(map(abs, [ase_obj.cell[0][0], ase_obj.cell[0][1], ase_obj.cell[0][2], ase_obj.cell[1][0], ase_obj.cell[1][1], ase_obj.cell[1][2], ase_obj.cell[2][0], ase_obj.cell[2][1], ase_obj.cell[2][2]]))
for atom in ase_obj:
struc_repr += "%s %3.6f %3.6f %3.6f " % tuple(map(abs, [chemical_symbols.index(atom.symbol), atom.x, atom.y, atom.z]))
if self.info["energy"] is None:
energy = str(None)
else:
energy = str(round(self.info['energy'], 11 - int(math.log10(math.fabs(self.info['energy'])))))
calc_checksum.update((
struc_repr + "\n" +
energy + "\n" +
self.info['prog'] + "\n" +
str(self.info['input']) + "\n" +
str(sum([2**x for x in self.info['calctypes']]))
).encode('ascii'))
result = base64.b32encode(calc_checksum.digest()).decode('ascii')
result = result[:result.index('=')] + 'CI'
return result
def benchmark(self):
self.info['perf'] = "%1.2f" % (time.time() - self._starttime)
| true | true |
f71c5c0fa48035e54718849c2b4a1ba58fa91295 | 4,789 | py | Python | Models/opt_torch.py | tarkantemizoz/Cost-Sensitive-Learning | 083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb | [
"Apache-2.0"
] | 2 | 2021-03-05T08:06:17.000Z | 2021-04-13T21:03:12.000Z | Models/opt_torch.py | tarkantemizoz/Cost-Sensitive-Learning | 083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb | [
"Apache-2.0"
] | null | null | null | Models/opt_torch.py | tarkantemizoz/Cost-Sensitive-Learning | 083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb | [
"Apache-2.0"
] | 1 | 2021-03-10T18:10:30.000Z | 2021-03-10T18:10:30.000Z | # coding: utf-8
# Copyright 2020 Tarkan Temizoz
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from Models.linearnet import LinearNet
class Optimization:
""" A helper class to train, test and diagnose Cost-sensitive Logistic Regression
Attributes:
model: CSLR model.
optimizer: Optimizer of the network.
train_return: List of train returns.
val_return: List of validation returns.
validation: Whether there is validation data.
batch_size: Batch-size of the network.
n_epochs: Total number of epochs.
n_steps: Number of epochs to evaluate the results
"""
def __init__(self, model, optimizer, config):
"""Initialises CLSR.
Args:
model: CSLR model.
optimizer: Optimizer of the network.
config: Configuration of the network.
"""
self.model = model
self.optimizer = optimizer
self.train_return = []
self.val_return = []
self.validation = False
self.batch_size = config.get("batch_size",32)
self.n_epochs = config.get("n_epochs", 1000)
self.n_steps = config.get("n_steps", self.n_epochs)
@staticmethod
def batch(iterable, n):
"""Creates batches."""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def train(self, x_train, r_train, x_val=None, r_val=None):
"""Applies simple feed-forward network to an input.
Args:
x_train: train features
r_train: train returns
x_val: validation features
r_val: validation returns
"""
if x_val is not None or r_val is not None:
self.validation = True
start_time = time.time()
for epoch in range(self.n_epochs):
x_shuff, r_shuff = shuffle(x_train, r_train)
self.model.train()
for j in self.batch(range(0, len(x_shuff)),self.batch_size):
if len(j) < 2:
break
x_batch = x_shuff[j]
r_batch = r_shuff[j]
self.optimizer.zero_grad()
outputs, _, _ = self.model(x_batch)
loss = -torch.mul(outputs, r_batch).sum()
loss.backward()
self.optimizer.step()
returns_train, _, _ = self.evaluate(x_train, r_train)
self.train_return.append(returns_train)
if self.validation is True:
returns_val, _, _ = self.evaluate(x_val, r_val)
self.val_return.append(returns_val)
if ((epoch+1) % self.n_steps == 0):
elapsed = time.time() - start_time
print(
("Epoch %d Train Return: %.3f.") % (epoch + 1, self.train_return[-1]),
((" Validation Return: %.3f. Elapsed time: %.3fs.")
% (self.val_return[-1], elapsed)
if self.validation is True else
" Elapsed time: %.3fs."
% elapsed)
)
start_time = time.time()
def evaluate(self, x_test, r_test):
"""Evaluates simple feed-forward network to an input.
Args:
x_test: features of the evaluated data
r_test: returns of the evaluated data
Returns:
Triple of Tensors for: (Total returns, decision variables, probabilities)
"""
with torch.no_grad():
outputs, probs, _ = self.model(x_test)
returns = torch.mul(outputs, r_test).sum()
return returns, outputs, probs
def plot_return(self):
"""Draws a plot, Trains Returns vs Test Returns"""
plt.plot(self.train_return, label="Train Return")
plt.plot(self.val_return, label="Test Return")
plt.legend()
plt.title("Returns")
| 35.738806 | 91 | 0.554187 |
import time
import torch
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from Models.linearnet import LinearNet
class Optimization:
def __init__(self, model, optimizer, config):
self.model = model
self.optimizer = optimizer
self.train_return = []
self.val_return = []
self.validation = False
self.batch_size = config.get("batch_size",32)
self.n_epochs = config.get("n_epochs", 1000)
self.n_steps = config.get("n_steps", self.n_epochs)
@staticmethod
def batch(iterable, n):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def train(self, x_train, r_train, x_val=None, r_val=None):
if x_val is not None or r_val is not None:
self.validation = True
start_time = time.time()
for epoch in range(self.n_epochs):
x_shuff, r_shuff = shuffle(x_train, r_train)
self.model.train()
for j in self.batch(range(0, len(x_shuff)),self.batch_size):
if len(j) < 2:
break
x_batch = x_shuff[j]
r_batch = r_shuff[j]
self.optimizer.zero_grad()
outputs, _, _ = self.model(x_batch)
loss = -torch.mul(outputs, r_batch).sum()
loss.backward()
self.optimizer.step()
returns_train, _, _ = self.evaluate(x_train, r_train)
self.train_return.append(returns_train)
if self.validation is True:
returns_val, _, _ = self.evaluate(x_val, r_val)
self.val_return.append(returns_val)
if ((epoch+1) % self.n_steps == 0):
elapsed = time.time() - start_time
print(
("Epoch %d Train Return: %.3f.") % (epoch + 1, self.train_return[-1]),
((" Validation Return: %.3f. Elapsed time: %.3fs.")
% (self.val_return[-1], elapsed)
if self.validation is True else
" Elapsed time: %.3fs."
% elapsed)
)
start_time = time.time()
def evaluate(self, x_test, r_test):
with torch.no_grad():
outputs, probs, _ = self.model(x_test)
returns = torch.mul(outputs, r_test).sum()
return returns, outputs, probs
def plot_return(self):
plt.plot(self.train_return, label="Train Return")
plt.plot(self.val_return, label="Test Return")
plt.legend()
plt.title("Returns")
| true | true |
f71c5d5f8d7d8019ce24e7f8738fe2a3db585bd9 | 12,026 | py | Python | grabcut.py | nong-fu/grabcut | 19a43eed7597ffae456349e4f0568da2f8f1f25c | [
"Apache-2.0"
] | null | null | null | grabcut.py | nong-fu/grabcut | 19a43eed7597ffae456349e4f0568da2f8f1f25c | [
"Apache-2.0"
] | null | null | null | grabcut.py | nong-fu/grabcut | 19a43eed7597ffae456349e4f0568da2f8f1f25c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import sys
from pathlib import Path
import webbrowser
import numpy as np
import cv2
from PIL import Image
from PyQt5.QtCore import QDir, Qt, pyqtSlot, pyqtSignal
from PyQt5.QtGui import QImage, QPixmap, QColor
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QWidget,
QMessageBox, QFileDialog, QLabel, QSpinBox, QPushButton,
QActionGroup, QAction, QSizePolicy, QHBoxLayout,
)
from ui_grabcut import Ui_MainWindow
class Canvas(QLabel):
"""Canvas for drawing mask layer on Image.
"""
mousePressed = pyqtSignal()
mouseMoved = pyqtSignal(int, int, int, int)
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
# self.setStyleSheet("border: 1px solid red;")
self.last_x, self.last_y = None, None
def mousePressEvent(self, e):
self.mousePressed.emit()
def mouseMoveEvent(self, e):
x, y = e.x(), e.y()
if self.last_x is None:
self.last_x, self.last_y = x, y
return
self.mouseMoved.emit(self.last_x, self.last_y, x, y)
self.last_x, self.last_y = x, y
def mouseReleaseEvent(self, e):
self.last_x, self.last_y = None, None
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# orign image data
self.img = None
# mask layer for grabcut
self.mask = None
# history masks for undo
self.masks = []
# grabcut algorithm param iterCount
self.iterCount = 5
# canvas image cache
self.imgWithMask = None
# mask mode to color, don't use dict, too slow!
self.mode2color = (
# cv2.GC_BGD == 0
np.array([0, 0, 255], dtype=np.uint8),
# cv2.GC_FGD == 1
np.array([0, 255, 0], dtype=np.uint8),
# cv2.GC_PR_BGD == 2
np.array([0, 0, 120], dtype=np.uint8),
# cv2.GC_PR_FGD == 3
np.array([0, 120, 0], dtype=np.uint8),
)
# NONE mean none of (BGD/FGD/PR_BGD/PR_FGD)
self.GC_NONE = 255
# mask layer alpha
self.alpha = 0.3
self.imgPath = Path.cwd()
self.penSize = 40
# init ui order matter
self.initUI()
def grabCut(self, iterCount):
if self.img is None:
self.showMessage("No image")
return
# avoid grabCut crash
if not np.any((self.mask == cv2.GC_FGD) | (self.mask == cv2.GC_PR_FGD)):
self.showMessage("no GC_FGD or GC_PR_FGD")
return
# before grabcut, save mask to stack
self.pushMask()
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
_ = cv2.grabCut(self.img, self.mask, None, bgdModel,
fgdModel, iterCount, cv2.GC_INIT_WITH_MASK)
self.drawPartialImgWithMask(self.masks[-1], self.mask)
# display result
self.ui.displayResultAction.setChecked(True)
self.repaint()
def drawingMask(self, x1, y1, x2, y2):
"""drawing an small partial of the mask layer,
which is a small line segment.
"""
if self.img is None:
return
# when hidden mask or display result, don't draw mask
if self.ui.hiddenMaskAction.isChecked() or \
self.ui.displayResultAction.isChecked():
return
if self.ui.prFgdAction.isChecked():
mode = cv2.GC_PR_FGD
elif self.ui.prBgdAction.isChecked():
mode = cv2.GC_PR_BGD
elif self.ui.fgdAction.isChecked():
mode = cv2.GC_FGD
else: # bgdAction
mode = cv2.GC_BGD
cv2.line(self.mask, (x1, y1), (x2, y2), mode, self.penSize)
partialMask = np.zeros(self.mask.shape, np.uint8)
# GC_BGD is 0, can't use 0 as default
partialMask.fill(self.GC_NONE)
cv2.line(partialMask, (x1, y1), (x2, y2), mode, self.penSize)
indices = np.where(partialMask != self.GC_NONE)
if indices[0].size == 0:
# nothing new in partialMask
return
self.imgWithMask[indices] = (1 - self.alpha)*self.img[indices] + \
self.alpha*self.mode2color[mode]
self.repaint()
def pushMask(self):
"""push a mask to history list masks for undo.
"""
# if mask hasn't changed
if len(self.masks) > 0 and np.array_equal(self.masks[-1], self.mask):
return
self.masks.append(self.mask.copy())
def drawPartialImgWithMask(self, curMask, newMask):
"""draw partial imgWithMask.
mask changed from curMask to newMask, only draw the changed part.
"""
# redraw partial imgWithMask
indices = np.where(curMask != newMask)
if indices[0].size == 0:
# two masks are equal
return
self.imgWithMask[indices] = (1-self.alpha)*self.img[indices] + \
self.alpha*np.array([self.mode2color[m] for m in newMask[indices]])
def getResult(self):
"""use mask cuf off forground area as final result.
"""
result_mask = np.where((self.mask == 2) | (
self.mask == 0), 0, 1).astype('uint8')
return self.img*result_mask[:, :, np.newaxis]
@pyqtSlot(name="on_displayResultAction_triggered")
@pyqtSlot(name="on_hiddenMaskAction_triggered")
def repaint(self):
"""repaint cavans.
"""
if self.img is None:
self.showMessage("No image")
return
if self.ui.displayResultAction.isChecked():
img = self.getResult()
elif self.ui.hiddenMaskAction.isChecked():
img = self.img
else:
img = self.imgWithMask
# convert opencv image to qt image
height, width, _ = img.shape
bytesOfLine = 3*width
image = QImage(img.tobytes(), width, height,
bytesOfLine, QImage.Format_RGB888).rgbSwapped()
self.canvas.setPixmap(QPixmap.fromImage(image))
def initUI(self):
# merge designer ui
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# right box on toolbar
rightBox = QWidget(self.ui.toolBar)
boxLayout = QHBoxLayout()
# grabcut iterCount spinbox
boxLayout.addWidget(QLabel("iterCount"))
self.iterCountSpinBox = QSpinBox(self)
self.iterCountSpinBox.setRange(1, 100)
self.iterCountSpinBox.setValue(5)
boxLayout.addWidget(self.iterCountSpinBox)
boxLayout.addStretch(1)
# pen size spinbox
boxLayout.addWidget(QLabel("pen"))
self.penSizeSpinBox = QSpinBox(self)
self.penSizeSpinBox.setRange(1, 500)
self.penSizeSpinBox.setSingleStep(5)
self.penSizeSpinBox.setValue(40)
boxLayout.addWidget(self.penSizeSpinBox)
rightBox.setLayout(boxLayout)
self.ui.toolBar.addWidget(rightBox)
self.canvas = Canvas(self)
self.ui.scrollArea.setWidget(self.canvas)
# canvas align center in scroll area
self.ui.scrollArea.setAlignment(Qt.AlignCenter)
# fixed canvas that make it easier to select mask layer
self.canvas.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
# 4 types of mask layer flags
actionGroup = QActionGroup(self)
actionGroup.addAction(self.ui.fgdAction)
actionGroup.addAction(self.ui.bgdAction)
actionGroup.addAction(self.ui.prFgdAction)
actionGroup.addAction(self.ui.prBgdAction)
# handle events
self.ui.exitAction.triggered.connect(self.close)
self.penSizeSpinBox.valueChanged.connect(self.setPenSize)
self.iterCountSpinBox.valueChanged.connect(self.setIterCount)
self.ui.opencvAction.triggered.connect(lambda: webbrowser.open(
'https://opencv-python-tutroals.readthedocs.io/en/'
'latest/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html'
))
self.canvas.mousePressed.connect(self.pushMask)
self.canvas.mouseMoved.connect(self.drawingMask)
self.resetUiToDrawMaskMode()
def resetUiToDrawMaskMode(self):
"""reset ui to draw mask mode.
"""
self.ui.prFgdAction.setChecked(True)
self.ui.displayResultAction.setChecked(False)
self.ui.hiddenMaskAction.setChecked(False)
def setPenSize(self, v):
self.penSize = v
def setIterCount(self, v):
self.iterCount = v
def showMessage(self, msg):
self.ui.statusbar.showMessage(msg)
@pyqtSlot(name="on_openAction_triggered")
def openImage(self):
fileName, _ = QFileDialog.getOpenFileName(
self, "Open File", str(self.imgPath))
if not fileName:
return
imgFile = Path(fileName)
self.imgPath = imgFile.parent
# cv2.imread can't read image that path contain chinese characters,
# so this is a workaround.
# self.img = cv2.imread(fileName)
data = np.fromfile(fileName, dtype=np.uint8)
self.img = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
# discarding alpha channel
self.img = self.img[:,:,:3]
self.reset()
@pyqtSlot(name="on_saveAction_triggered")
def saveResult(self):
if self.img is None:
self.showMessage("no result to save")
return
fileName, _ = QFileDialog.getSaveFileName(
self, "Save File", str(self.imgPath))
if not fileName:
return
imgFile = Path(fileName)
self.imgPath = imgFile.parent
# default save as png
if not imgFile.suffix:
imgFile = imgFile.with_suffix('.png')
result = self.getResult()
# cv2.imwrite can't write image that path contain chinese characters.
im = Image.fromarray(result)
im.save(imgFile.as_posix())
@pyqtSlot(name="on_exportMaskAction_triggered")
def exportMask(self):
if self.mask is None or not self.mask.any():
self.showMessage("no mask")
return
fileName, _ = QFileDialog.getSaveFileName(
self, "Save Mask", str(self.imgPath))
if not fileName:
return
imgFile = Path(fileName)
self.imgPath = imgFile.parent
# default save as png
if not imgFile.suffix:
imgFile = imgFile.with_suffix('.png')
im = Image.fromarray(self.mask)
im.save(imgFile.as_posix())
@pyqtSlot(name="on_undoAction_triggered")
def undo(self):
if len(self.masks) == 0:
self.showMessage("undo stack is empty")
return
prevMask = self.masks.pop()
self.drawPartialImgWithMask(self.mask, prevMask)
self.mask = prevMask
# after undo, uncheck display result and hidden mask
self.resetUiToDrawMaskMode()
self.repaint()
@pyqtSlot(name="on_resetAction_triggered")
def reset(self):
if self.img is None:
self.showMessage("No image")
return
self.mask = np.zeros(self.img.shape[:2], np.uint8)
self.mask.fill(cv2.GC_PR_BGD)
self.masks = []
# re-create imgWidthMask
self.imgWithMask = np.zeros(self.img.shape, np.uint8)
self.imgWithMask[...] = (1-self.alpha)*self.img + \
self.alpha*self.mode2color[cv2.GC_PR_BGD]
self.resetUiToDrawMaskMode()
self.repaint()
@pyqtSlot(name="on_grabCutAction_triggered")
def runGrabCut(self):
self.grabCut(self.iterCount)
@pyqtSlot(name="on_singleStepAction_triggered")
def runGrabCutSingleStep(self):
self.grabCut(1)
def closeEvent(self, evt):
# maybe popup a dialog to ask user accept or ignore
evt.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| 31.564304 | 80 | 0.606187 |
import sys
from pathlib import Path
import webbrowser
import numpy as np
import cv2
from PIL import Image
from PyQt5.QtCore import QDir, Qt, pyqtSlot, pyqtSignal
from PyQt5.QtGui import QImage, QPixmap, QColor
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QWidget,
QMessageBox, QFileDialog, QLabel, QSpinBox, QPushButton,
QActionGroup, QAction, QSizePolicy, QHBoxLayout,
)
from ui_grabcut import Ui_MainWindow
class Canvas(QLabel):
mousePressed = pyqtSignal()
mouseMoved = pyqtSignal(int, int, int, int)
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.last_x, self.last_y = None, None
def mousePressEvent(self, e):
self.mousePressed.emit()
def mouseMoveEvent(self, e):
x, y = e.x(), e.y()
if self.last_x is None:
self.last_x, self.last_y = x, y
return
self.mouseMoved.emit(self.last_x, self.last_y, x, y)
self.last_x, self.last_y = x, y
def mouseReleaseEvent(self, e):
self.last_x, self.last_y = None, None
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.img = None
self.mask = None
self.masks = []
self.iterCount = 5
self.imgWithMask = None
self.mode2color = (
# cv2.GC_BGD == 0
np.array([0, 0, 255], dtype=np.uint8),
# cv2.GC_FGD == 1
np.array([0, 255, 0], dtype=np.uint8),
# cv2.GC_PR_BGD == 2
np.array([0, 0, 120], dtype=np.uint8),
# cv2.GC_PR_FGD == 3
np.array([0, 120, 0], dtype=np.uint8),
)
# NONE mean none of (BGD/FGD/PR_BGD/PR_FGD)
self.GC_NONE = 255
# mask layer alpha
self.alpha = 0.3
self.imgPath = Path.cwd()
self.penSize = 40
# init ui order matter
self.initUI()
def grabCut(self, iterCount):
if self.img is None:
self.showMessage("No image")
return
# avoid grabCut crash
if not np.any((self.mask == cv2.GC_FGD) | (self.mask == cv2.GC_PR_FGD)):
self.showMessage("no GC_FGD or GC_PR_FGD")
return
# before grabcut, save mask to stack
self.pushMask()
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
_ = cv2.grabCut(self.img, self.mask, None, bgdModel,
fgdModel, iterCount, cv2.GC_INIT_WITH_MASK)
self.drawPartialImgWithMask(self.masks[-1], self.mask)
# display result
self.ui.displayResultAction.setChecked(True)
self.repaint()
def drawingMask(self, x1, y1, x2, y2):
if self.img is None:
return
# when hidden mask or display result, don't draw mask
if self.ui.hiddenMaskAction.isChecked() or \
self.ui.displayResultAction.isChecked():
return
if self.ui.prFgdAction.isChecked():
mode = cv2.GC_PR_FGD
elif self.ui.prBgdAction.isChecked():
mode = cv2.GC_PR_BGD
elif self.ui.fgdAction.isChecked():
mode = cv2.GC_FGD
else:
mode = cv2.GC_BGD
cv2.line(self.mask, (x1, y1), (x2, y2), mode, self.penSize)
partialMask = np.zeros(self.mask.shape, np.uint8)
partialMask.fill(self.GC_NONE)
cv2.line(partialMask, (x1, y1), (x2, y2), mode, self.penSize)
indices = np.where(partialMask != self.GC_NONE)
if indices[0].size == 0:
# nothing new in partialMask
return
self.imgWithMask[indices] = (1 - self.alpha)*self.img[indices] + \
self.alpha*self.mode2color[mode]
self.repaint()
def pushMask(self):
# if mask hasn't changed
if len(self.masks) > 0 and np.array_equal(self.masks[-1], self.mask):
return
self.masks.append(self.mask.copy())
def drawPartialImgWithMask(self, curMask, newMask):
indices = np.where(curMask != newMask)
if indices[0].size == 0:
return
self.imgWithMask[indices] = (1-self.alpha)*self.img[indices] + \
self.alpha*np.array([self.mode2color[m] for m in newMask[indices]])
def getResult(self):
result_mask = np.where((self.mask == 2) | (
self.mask == 0), 0, 1).astype('uint8')
return self.img*result_mask[:, :, np.newaxis]
@pyqtSlot(name="on_displayResultAction_triggered")
@pyqtSlot(name="on_hiddenMaskAction_triggered")
def repaint(self):
if self.img is None:
self.showMessage("No image")
return
if self.ui.displayResultAction.isChecked():
img = self.getResult()
elif self.ui.hiddenMaskAction.isChecked():
img = self.img
else:
img = self.imgWithMask
height, width, _ = img.shape
bytesOfLine = 3*width
image = QImage(img.tobytes(), width, height,
bytesOfLine, QImage.Format_RGB888).rgbSwapped()
self.canvas.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
rightBox = QWidget(self.ui.toolBar)
boxLayout = QHBoxLayout()
boxLayout.addWidget(QLabel("iterCount"))
self.iterCountSpinBox = QSpinBox(self)
self.iterCountSpinBox.setRange(1, 100)
self.iterCountSpinBox.setValue(5)
boxLayout.addWidget(self.iterCountSpinBox)
boxLayout.addStretch(1)
boxLayout.addWidget(QLabel("pen"))
self.penSizeSpinBox = QSpinBox(self)
self.penSizeSpinBox.setRange(1, 500)
self.penSizeSpinBox.setSingleStep(5)
self.penSizeSpinBox.setValue(40)
boxLayout.addWidget(self.penSizeSpinBox)
rightBox.setLayout(boxLayout)
self.ui.toolBar.addWidget(rightBox)
self.canvas = Canvas(self)
self.ui.scrollArea.setWidget(self.canvas)
self.ui.scrollArea.setAlignment(Qt.AlignCenter)
self.canvas.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
actionGroup = QActionGroup(self)
actionGroup.addAction(self.ui.fgdAction)
actionGroup.addAction(self.ui.bgdAction)
actionGroup.addAction(self.ui.prFgdAction)
actionGroup.addAction(self.ui.prBgdAction)
self.ui.exitAction.triggered.connect(self.close)
self.penSizeSpinBox.valueChanged.connect(self.setPenSize)
self.iterCountSpinBox.valueChanged.connect(self.setIterCount)
self.ui.opencvAction.triggered.connect(lambda: webbrowser.open(
'https://opencv-python-tutroals.readthedocs.io/en/'
'latest/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html'
))
self.canvas.mousePressed.connect(self.pushMask)
self.canvas.mouseMoved.connect(self.drawingMask)
self.resetUiToDrawMaskMode()
def resetUiToDrawMaskMode(self):
self.ui.prFgdAction.setChecked(True)
self.ui.displayResultAction.setChecked(False)
self.ui.hiddenMaskAction.setChecked(False)
def setPenSize(self, v):
self.penSize = v
def setIterCount(self, v):
self.iterCount = v
def showMessage(self, msg):
self.ui.statusbar.showMessage(msg)
@pyqtSlot(name="on_openAction_triggered")
def openImage(self):
fileName, _ = QFileDialog.getOpenFileName(
self, "Open File", str(self.imgPath))
if not fileName:
return
imgFile = Path(fileName)
self.imgPath = imgFile.parent
# so this is a workaround.
# self.img = cv2.imread(fileName)
data = np.fromfile(fileName, dtype=np.uint8)
self.img = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
# discarding alpha channel
self.img = self.img[:,:,:3]
self.reset()
@pyqtSlot(name="on_saveAction_triggered")
def saveResult(self):
if self.img is None:
self.showMessage("no result to save")
return
fileName, _ = QFileDialog.getSaveFileName(
self, "Save File", str(self.imgPath))
if not fileName:
return
imgFile = Path(fileName)
self.imgPath = imgFile.parent
# default save as png
if not imgFile.suffix:
imgFile = imgFile.with_suffix('.png')
result = self.getResult()
# cv2.imwrite can't write image that path contain chinese characters.
im = Image.fromarray(result)
im.save(imgFile.as_posix())
@pyqtSlot(name="on_exportMaskAction_triggered")
def exportMask(self):
if self.mask is None or not self.mask.any():
self.showMessage("no mask")
return
fileName, _ = QFileDialog.getSaveFileName(
self, "Save Mask", str(self.imgPath))
if not fileName:
return
imgFile = Path(fileName)
self.imgPath = imgFile.parent
if not imgFile.suffix:
imgFile = imgFile.with_suffix('.png')
im = Image.fromarray(self.mask)
im.save(imgFile.as_posix())
@pyqtSlot(name="on_undoAction_triggered")
def undo(self):
if len(self.masks) == 0:
self.showMessage("undo stack is empty")
return
prevMask = self.masks.pop()
self.drawPartialImgWithMask(self.mask, prevMask)
self.mask = prevMask
self.resetUiToDrawMaskMode()
self.repaint()
@pyqtSlot(name="on_resetAction_triggered")
def reset(self):
if self.img is None:
self.showMessage("No image")
return
self.mask = np.zeros(self.img.shape[:2], np.uint8)
self.mask.fill(cv2.GC_PR_BGD)
self.masks = []
self.imgWithMask = np.zeros(self.img.shape, np.uint8)
self.imgWithMask[...] = (1-self.alpha)*self.img + \
self.alpha*self.mode2color[cv2.GC_PR_BGD]
self.resetUiToDrawMaskMode()
self.repaint()
@pyqtSlot(name="on_grabCutAction_triggered")
def runGrabCut(self):
self.grabCut(self.iterCount)
@pyqtSlot(name="on_singleStepAction_triggered")
def runGrabCutSingleStep(self):
self.grabCut(1)
def closeEvent(self, evt):
evt.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| true | true |
f71c5d93e6fff0721c0bfffe7881cdff8bdc9c08 | 952 | py | Python | scripts/runAll.py | acatwithacomputer/proteus | 80dfad95da6ab4d18a88a035f55c26b03540a864 | [
"MIT"
] | null | null | null | scripts/runAll.py | acatwithacomputer/proteus | 80dfad95da6ab4d18a88a035f55c26b03540a864 | [
"MIT"
] | 13 | 2018-02-08T23:22:59.000Z | 2020-12-06T19:40:32.000Z | scripts/runAll.py | acatwithacomputer/proteus | 80dfad95da6ab4d18a88a035f55c26b03540a864 | [
"MIT"
] | 1 | 2020-02-17T03:25:34.000Z | 2020-02-17T03:25:34.000Z | #! /usr/bin/env python
from __future__ import print_function
import sys
import os
import glob
pFiles = glob.glob('*_p.py')
caseDict = {}
for pf in pFiles:
caseDict[pf] = set(glob.glob(pf[:-5]+'*_n.py'))
#fix cases were problem name is a subset of some other problem name
for pf1 in pFiles:
for pf2 in pFiles:
if pf2.find(pf1[:-4]):
nf1Set=set(glob.glob(pf1[:-5]+'*_n.py'))
caseDict[pf2] -= nf1Set
for pf in pFiles:
print(pf)
print(caseDict[pf])
for p,nList in caseDict.items():
if len(nList) == 0:
sys.stdout.write("\n----------------Skipping "+p+". No n file----------------------\n")
sys.stdout.flush()
else:
for n in nList:
args = ('proteusRun.py',p,n,'-l 4','-b','runAllBatch.py')
sys.stdout.write("\n----------------Running "+p+"---"+n+"\n")
sys.stdout.flush()
os.spawnvpe(os.P_WAIT,'proteusRun.py',args,os.environ)
| 31.733333 | 96 | 0.553571 |
from __future__ import print_function
import sys
import os
import glob
pFiles = glob.glob('*_p.py')
caseDict = {}
for pf in pFiles:
caseDict[pf] = set(glob.glob(pf[:-5]+'*_n.py'))
for pf1 in pFiles:
for pf2 in pFiles:
if pf2.find(pf1[:-4]):
nf1Set=set(glob.glob(pf1[:-5]+'*_n.py'))
caseDict[pf2] -= nf1Set
for pf in pFiles:
print(pf)
print(caseDict[pf])
for p,nList in caseDict.items():
if len(nList) == 0:
sys.stdout.write("\n----------------Skipping "+p+". No n file----------------------\n")
sys.stdout.flush()
else:
for n in nList:
args = ('proteusRun.py',p,n,'-l 4','-b','runAllBatch.py')
sys.stdout.write("\n----------------Running "+p+"---"+n+"\n")
sys.stdout.flush()
os.spawnvpe(os.P_WAIT,'proteusRun.py',args,os.environ)
| true | true |
f71c5df90825c721c8d73769c1b51879fc9c7df2 | 2,706 | py | Python | runloop/adminx.py | luqin/firefly | 2e5ab17f2d20deb3c68c927f6208ea89db7c639d | [
"MIT"
] | null | null | null | runloop/adminx.py | luqin/firefly | 2e5ab17f2d20deb3c68c927f6208ea89db7c639d | [
"MIT"
] | 9 | 2020-03-24T16:45:25.000Z | 2022-03-11T23:40:51.000Z | runloop/adminx.py | luqin/firefly | 2e5ab17f2d20deb3c68c927f6208ea89db7c639d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from django.forms import ModelMultipleChoiceField
from django.utils.translation import ugettext as _
import xadmin
from .xadmin_action import RunloopAction
from .models import RunLoopGroup, Orders
ACTION_NAME = {
'add': _('Can add %s'),
'change': _('Can change %s'),
'edit': _('Can edit %s'),
'delete': _('Can delete %s'),
'view': _('Can view %s'),
}
def get_stock_name(p):
action = p.codename.split('_')[0]
if action in ACTION_NAME:
return ACTION_NAME[action] % str(p.content_type)
else:
return p.co_name
class StockModelMultipleChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, p):
return get_stock_name(p)
@xadmin.sites.register(RunLoopGroup)
class RunLoopGroupAdmin(object):
list_display = ("name", "start", "end", "status", "description", 'link',)
list_display_links = ("name",)
# readony_fields = ("status", )
exclude = ['status']
list_quick_filter = [{"field": "name", "limit": 10}]
search_fields = ["name"]
reversion_enable = True
style_fields = {"factor_buys": "checkbox-inline", "factor_sells": "checkbox-inline", "positions": "radio-inline",
"stocks": "m2m_transfer"}
# def get_field_attrs(self, db_field, **kwargs):
# print("db_field", db_field)
# attrs = super(RunLoopGroupAdmin, self).get_field_attrs(db_field, **kwargs)
# if db_field.name == 'stocks':
# attrs['form_class'] = StockModelMultipleChoiceField
# return attrs
actions = [RunloopAction]
def link(self, instance):
if instance.status == 'done':
return "<a href='%s/k' target='_blank'>%s</a>" % (
instance.id, '买卖点') + " <a href='%s/returns' target='_blank'>%s</a>" % (instance.id, '收益')
else:
return ""
link.short_description = '<div style="width: 100px;">报表</div>'
link.allow_tags = True
link.is_column = False
@xadmin.sites.register(Orders)
class OrdersAdmin(object):
list_display = (
"run_loop_group", "stock", "profit", "profit_cg_hunder", "buy_date", "buy_price", "buy_cnt", "buy_factor",
"sell_date", "sell_price", "sell_type_extra", "sell_type")
list_display_links = ("stock",)
# readony_fields = ("status", )
# exclude = ['status']
list_quick_filter = [{"field": "stock", "limit": 10}]
search_fields = ["stock"]
reversion_enable = True
# xadmin.sites.site.register(HostGroup, HostGroupAdmin)
# xadmin.sites.site.register(MaintainLog, MaintainLogAdmin)
# xadmin.sites.site.register(IDC, IDCAdmin)
# xadmin.sites.site.register(AccessRecord, AccessRecordAdmin)
| 30.404494 | 117 | 0.64745 | from __future__ import absolute_import
from django.forms import ModelMultipleChoiceField
from django.utils.translation import ugettext as _
import xadmin
from .xadmin_action import RunloopAction
from .models import RunLoopGroup, Orders
ACTION_NAME = {
'add': _('Can add %s'),
'change': _('Can change %s'),
'edit': _('Can edit %s'),
'delete': _('Can delete %s'),
'view': _('Can view %s'),
}
def get_stock_name(p):
action = p.codename.split('_')[0]
if action in ACTION_NAME:
return ACTION_NAME[action] % str(p.content_type)
else:
return p.co_name
class StockModelMultipleChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, p):
return get_stock_name(p)
@xadmin.sites.register(RunLoopGroup)
class RunLoopGroupAdmin(object):
list_display = ("name", "start", "end", "status", "description", 'link',)
list_display_links = ("name",)
exclude = ['status']
list_quick_filter = [{"field": "name", "limit": 10}]
search_fields = ["name"]
reversion_enable = True
style_fields = {"factor_buys": "checkbox-inline", "factor_sells": "checkbox-inline", "positions": "radio-inline",
"stocks": "m2m_transfer"}
actions = [RunloopAction]
def link(self, instance):
if instance.status == 'done':
return "<a href='%s/k' target='_blank'>%s</a>" % (
instance.id, '买卖点') + " <a href='%s/returns' target='_blank'>%s</a>" % (instance.id, '收益')
else:
return ""
link.short_description = '<div style="width: 100px;">报表</div>'
link.allow_tags = True
link.is_column = False
@xadmin.sites.register(Orders)
class OrdersAdmin(object):
list_display = (
"run_loop_group", "stock", "profit", "profit_cg_hunder", "buy_date", "buy_price", "buy_cnt", "buy_factor",
"sell_date", "sell_price", "sell_type_extra", "sell_type")
list_display_links = ("stock",)
list_quick_filter = [{"field": "stock", "limit": 10}]
search_fields = ["stock"]
reversion_enable = True
| true | true |
f71c5ef0b1d632892fa3fd528d707dc54828ea6e | 16,431 | py | Python | pychron/mv/focus/autofocus_manager.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | 1 | 2019-02-27T21:57:44.000Z | 2019-02-27T21:57:44.000Z | pychron/mv/focus/autofocus_manager.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | 80 | 2018-07-17T20:10:20.000Z | 2021-08-17T15:38:24.000Z | pychron/mv/focus/autofocus_manager.py | AGESLDEO/pychron | 1a81e05d9fba43b797f335ceff6837c016633bcf | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from __future__ import print_function
import six.moves.cPickle as pickle
from traits.api import Bool, Any, Instance, Button, Property, Event, on_trait_change
from traitsui.api import View, Item, Handler, HGroup
# ============= standard library imports ========================
# from threading import Thread
from threading import Event as TEvent
from numpy import linspace, argmin, argmax, random, asarray
import time
import os
# ============= local library imports ==========================
from pychron.core.time_series.time_series import smooth
from pychron.image.cv_wrapper import grayspace, crop, get_focus_measure
# from pychron.image.cvwrapper import grayspace, get_focus_measure, crop, resize
from scipy.ndimage.measurements import variance
from scipy.ndimage.filters import generic_gradient_magnitude, sobel
from scipy.ndimage import sum as ndsum
from pychron.paths import paths
from pychron.managers.manager import Manager
from pychron.image.image import Image
# from pychron.machine_vision.focus_parameters import FocusParameters
# from pychron.image.image_editor import ImageEditor
from pychron.graph.graph import Graph
from pychron.mv.focus.focus_parameters import FocusParameters
from pychron.core.ui.image_editor import ImageEditor
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.ui.thread import Thread
class ConfigureHandler(Handler):
def closed(self, info, isok):
if isok:
info.object.dump_parameters()
class AutoFocusManager(Manager):
"""
currently uses passive focus techniques
see
http://en.wikipedia.org/wiki/Autofocus
"""
video = Any
laser_manager = Any
stage_controller = Any
canvas = Any
parameters = Instance(FocusParameters)
configure_button = Button('configure')
autofocus_button = Event
autofocus_label = Property(depends_on='autofocusing')
autofocusing = Bool
# threading event for cancel signal
_evt_autofocusing = None
image = Instance(Image, ())
graph = None
def dump_parameters(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
self.info('dumping parameters to {}'.format(p))
with open(p, 'wb') as f:
pickle.dump(self.parameters, f)
def load_parameter(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
if os.path.isfile(p):
with open(p, 'rb') as f:
try:
params = pickle.load(f)
self.info('loading parameters from {}'.format(p))
if not isinstance(params, FocusParameters):
self.info('out of date parameters file. using default')
params = FocusParameters()
return params
except Exception as e:
print('autofocus load parameter', e)
return FocusParameters()
else:
return FocusParameters()
def passive_focus(self, block=False, **kw):
self._evt_autofocusing = TEvent()
self._evt_autofocusing.clear()
# manager = self.laser_manager
oper = self.parameters.operator
self.info('passive focus. operator = {}'.format(oper))
g = self.graph
if not g:
g = Graph(plotcontainer_dict=dict(padding=10),
window_x=0.70,
window_y=20,
window_width=325,
window_height=325,
window_title='Autofocus'
)
self.graph = g
g.clear()
g.new_plot(padding=[40, 10, 10, 40],
xtitle='Z (mm)',
ytitle='Focus Measure ({})'.format(oper)
)
g.new_series()
g.new_series()
invoke_in_main_thread(self._open_graph)
target = self._passive_focus
self._passive_focus_thread = Thread(name='autofocus', target=target,
args=(self._evt_autofocusing,
),
kwargs=kw
)
self._passive_focus_thread.start()
if block:
# while 1:
# if not self._passive_focus_thread.isRunning():
# break
# time.sleep(0.25)
self._passive_focus_thread.join()
def _open_graph(self):
ui = self.graph.edit_traits()
self.add_window(ui)
def stop_focus(self):
if self.stage_controller:
self.stage_controller.stop()
self.info('autofocusing stopped by user')
def _passive_focus(self, stop_signal, set_zoom=True):
'''
sweep z looking for max focus measure
FMgrad= roberts or sobel (sobel removes noise)
FMvar = intensity variance
'''
self.autofocusing = True
manager = self.laser_manager
fstart = self.parameters.fstart
fend = self.parameters.fend
step_scalar = self.parameters.step_scalar
zoom = self.parameters.zoom
operator = self.parameters.operator
steps = step_scalar * (max(fend, fstart) - min(fend, fstart)) + 1
prev_zoom = None
if set_zoom and \
manager is not None and \
zoom:
motor = manager.get_motor('zoom')
if motor:
prev_zoom = motor.data_position
self.info('setting zoom: {}'.format(zoom))
manager.set_motor('zoom', zoom, block=True)
time.sleep(1.5)
args = self._do_focusing(fstart, fend, steps, operator)
if manager is not None:
if prev_zoom is not None:
self.info('returning to previous zoom: {}'.format(prev_zoom))
manager.set_motor('zoom', prev_zoom, block=True)
if args:
mi, fmi, ma, fma = args
self.info('''passive focus results:Operator={}
ImageGradmin={} (z={})
ImageGradmax={}, (z={})'''.format(operator, mi, fmi, ma, fma))
focus_pos = fma
self.graph.add_vertical_rule(focus_pos)
self.graph.redraw()
# self.graph.add_vertical_rule(fma)
self.info('calculated focus z= {}'.format(focus_pos))
# if set_z:
controller = self.stage_controller
if controller is not None:
if not stop_signal.isSet():
controller.single_axis_move('z', focus_pos, block=True)
controller._z_position = focus_pos
controller.z_progress = focus_pos
self.autofocusing = False
def _cancel_sweep(self, vo):
if self._evt_autofocusing.isSet():
# return to original velocity
self.autofocusing = False
self._reset_velocity(vo)
return True
def _reset_velocity(self, vo):
if self.stage_controller:
pdict = dict(velocity=vo, key='z')
self.stage_controller.set_single_axis_motion_parameters(pdict=pdict)
def _do_focusing(self, start, end, steps, operator):
screen_roi = self._get_roi()
self._add_focus_area_rect(*screen_roi)
src = self._load_source()
src = asarray(src)
h, w, _d = src.shape
cx = w / 2.
cy = h / 2.
cw = self.parameters.crop_width
ch = self.parameters.crop_height
roi = cx, cy, cw, ch
'''
start the z in motion and take pictures as you go
query stage_controller to get current z
'''
self.info('focus sweep start={} end={}'.format(start, end))
# move to start position
controller = self.stage_controller
if controller:
vo = controller.axes['z'].velocity
if self._cancel_sweep(vo):
return
self.graph.set_x_limits(min(start, end), max(start, end), pad=2)
# sweep 1 and velocity 1
self._do_sweep(start, end, velocity=self.parameters.velocity_scalar1)
fms, focussteps = self._collect_focus_measures(operator, roi)
if not (fms and focussteps):
return
# reached end of sweep
# calculate a nominal focal point
args = self._calculate_nominal_focal_point(fms, focussteps)
if not args:
return
nfocal = args[3]
nwin = self.parameters.negative_window
pwin = self.parameters.positive_window
if self._cancel_sweep(vo):
return
nstart, nend = max(0, nfocal - nwin), nfocal + pwin
# mi = min(min(nstart, nend), min(start, end))
# ma = max(max(nstart, nend), max(start, end))
# self.graph.set_x_limits(mi, ma, pad=2)
time.sleep(1)
# do a slow tight sweep around the nominal focal point
self._do_sweep(nstart, nend, velocity=self.parameters.velocity_scalar2)
fms, focussteps = self._collect_focus_measures(operator, roi, series=1)
self._reset_velocity(vo)
else:
focussteps = linspace(0, 10, 11)
fms = -(focussteps - 5) ** 2 + 10 + random.random(11)
self.info('frames analyzed {}'.format(len(fms)))
# self.canvas.markupcontainer.pop('croprect')
return self._calculate_nominal_focal_point(fms, focussteps)
def _do_sweep(self, start, end, velocity=None):
controller = self.stage_controller
controller.single_axis_move('z', start, block=True)
# time.sleep(0.1)
# explicitly check for motion
# controller.block(axis='z')
if velocity:
vo = controller.axes['z'].velocity
controller.set_single_axis_motion_parameters(pdict=dict(velocity=vo * velocity,
key='z'))
self.info('starting sweep from {}'.format(controller.z_progress))
# pause before moving to end
time.sleep(0.25)
controller.single_axis_move('z', end, update=100, immediate=True)
def _collect_focus_measures(self, operator, roi, series=0):
controller = self.stage_controller
focussteps = []
fms = []
if controller.timer:
p = controller.timer.get_interval()
self.debug('controller timer period {}'.format(p))
pz = controller.z_progress
while 1:
src = self._load_source()
x = controller.z_progress
if x != pz:
y = self._calculate_focus_measure(src, operator, roi)
self.graph.add_datum((x, y), series=series)
focussteps.append(x)
fms.append(y)
pz = x
if not (controller.timer.isActive() and \
not self._evt_autofocusing.isSet()):
break
time.sleep(p)
self.debug('sweep finished')
return fms, focussteps
def _calculate_nominal_focal_point(self, fms, focussteps):
if fms:
sfms = smooth(fms)
if sfms is not None:
self.graph.new_series(focussteps, sfms)
self.graph.redraw()
fmi = focussteps[argmin(sfms)]
fma = focussteps[argmax(sfms)]
mi = min(sfms)
ma = max(sfms)
return mi, fmi, ma, fma
def _calculate_focus_measure(self, src, operator, roi):
'''
see
IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
FOR DIGITAL STILL CAMERA
DOI 10.1109/30.468047
and
http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
'''
# need to resize to 640,480. this is the space the roi is in
# s = resize(grayspace(pychron), 640, 480)
src = grayspace(src)
v = crop(src, *roi)
di = dict(var=lambda x:variance(x),
laplace=lambda x: get_focus_measure(x, 'laplace'),
sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
)
func = di[operator]
return func(v)
def image_view(self):
v = View(Item('image', show_label=False, editor=ImageEditor(),
width=640,
height=480,
style='custom'))
return v
def traits_view(self):
v = View(
HGroup(self._button_factory('autofocus_button', 'autofocus_label'),
Item('configure_button', show_label=False),
show_border=True,
label='Autofocus'
)
)
return v
def configure_view(self):
v = View(Item('parameters', style='custom', show_label=False),
handler=ConfigureHandler,
buttons=['OK', 'Cancel'],
kind='livemodal',
title='Configure Autofocus',
x=0.80,
y=0.05
)
return v
def _load_source(self):
src = self.video.get_frame()
return src
# if pychron:
# return Image.new_frame(pychron)
# self.image.load(pychron)
# return self.image.source_frame
def _get_roi(self):
w = self.parameters.crop_width
h = self.parameters.crop_height
cx, cy = self.canvas.get_center_rect_position(w, h)
# cw, ch = self.canvas.outer_bounds
# print w, h, cw, ch
# cx = cw / 2. - w / 2.
# cy = ch / 2. - h / 2.
# cx = (cw - w) / 2.
# cy = (ch - h) / 2.
# cx = (640 * self.canvas.scaling - w) / 2
# cy = (480 * self.canvas.scaling - h) / 2
roi = cx, cy, w, h
return roi
def _add_focus_area_rect(self, cx, cy, w, h):
# pl = self.canvas.padding_left
# pb = self.canvas.padding_bottom
self.canvas.remove_item('croprect')
self.canvas.add_markup_rect(cx, cy, w, h, identifier='croprect')
def _autofocus_button_fired(self):
if not self.autofocusing:
self.autofocusing = True
self.passive_focus()
else:
self.autofocusing = False
self._evt_autofocusing.set()
self.stop_focus()
def _configure_button_fired(self):
self._crop_rect_update()
self.edit_traits(view='configure_view', kind='livemodal')
self.canvas.remove_item('croprect')
# try:
# self.canvas.markupcontainer.pop('croprect')
# except KeyError:
# pass
@on_trait_change('parameters:[_crop_width,_crop_height]')
def _crop_rect_update(self):
roi = self._get_roi()
self._add_focus_area_rect(*roi)
def _get_autofocus_label(self):
return 'Autofocus' if not self.autofocusing else 'Stop'
def _parameters_default(self):
return self.load_parameter()
def _autofocusing_changed(self, new):
if not new:
self.canvas.remove_item('croprect')
# ===============================================================================
# Deprecated
# ===============================================================================
# ============= EOF =====================================
| 33.261134 | 93 | 0.559369 |
from __future__ import absolute_import
from __future__ import print_function
import six.moves.cPickle as pickle
from traits.api import Bool, Any, Instance, Button, Property, Event, on_trait_change
from traitsui.api import View, Item, Handler, HGroup
from threading import Event as TEvent
from numpy import linspace, argmin, argmax, random, asarray
import time
import os
from pychron.core.time_series.time_series import smooth
from pychron.image.cv_wrapper import grayspace, crop, get_focus_measure
from scipy.ndimage.measurements import variance
from scipy.ndimage.filters import generic_gradient_magnitude, sobel
from scipy.ndimage import sum as ndsum
from pychron.paths import paths
from pychron.managers.manager import Manager
from pychron.image.image import Image
from pychron.graph.graph import Graph
from pychron.mv.focus.focus_parameters import FocusParameters
from pychron.core.ui.image_editor import ImageEditor
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.ui.thread import Thread
class ConfigureHandler(Handler):
def closed(self, info, isok):
if isok:
info.object.dump_parameters()
class AutoFocusManager(Manager):
video = Any
laser_manager = Any
stage_controller = Any
canvas = Any
parameters = Instance(FocusParameters)
configure_button = Button('configure')
autofocus_button = Event
autofocus_label = Property(depends_on='autofocusing')
autofocusing = Bool
_evt_autofocusing = None
image = Instance(Image, ())
graph = None
def dump_parameters(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
self.info('dumping parameters to {}'.format(p))
with open(p, 'wb') as f:
pickle.dump(self.parameters, f)
def load_parameter(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
if os.path.isfile(p):
with open(p, 'rb') as f:
try:
params = pickle.load(f)
self.info('loading parameters from {}'.format(p))
if not isinstance(params, FocusParameters):
self.info('out of date parameters file. using default')
params = FocusParameters()
return params
except Exception as e:
print('autofocus load parameter', e)
return FocusParameters()
else:
return FocusParameters()
def passive_focus(self, block=False, **kw):
self._evt_autofocusing = TEvent()
self._evt_autofocusing.clear()
oper = self.parameters.operator
self.info('passive focus. operator = {}'.format(oper))
g = self.graph
if not g:
g = Graph(plotcontainer_dict=dict(padding=10),
window_x=0.70,
window_y=20,
window_width=325,
window_height=325,
window_title='Autofocus'
)
self.graph = g
g.clear()
g.new_plot(padding=[40, 10, 10, 40],
xtitle='Z (mm)',
ytitle='Focus Measure ({})'.format(oper)
)
g.new_series()
g.new_series()
invoke_in_main_thread(self._open_graph)
target = self._passive_focus
self._passive_focus_thread = Thread(name='autofocus', target=target,
args=(self._evt_autofocusing,
),
kwargs=kw
)
self._passive_focus_thread.start()
if block:
self._passive_focus_thread.join()
def _open_graph(self):
ui = self.graph.edit_traits()
self.add_window(ui)
def stop_focus(self):
if self.stage_controller:
self.stage_controller.stop()
self.info('autofocusing stopped by user')
def _passive_focus(self, stop_signal, set_zoom=True):
self.autofocusing = True
manager = self.laser_manager
fstart = self.parameters.fstart
fend = self.parameters.fend
step_scalar = self.parameters.step_scalar
zoom = self.parameters.zoom
operator = self.parameters.operator
steps = step_scalar * (max(fend, fstart) - min(fend, fstart)) + 1
prev_zoom = None
if set_zoom and \
manager is not None and \
zoom:
motor = manager.get_motor('zoom')
if motor:
prev_zoom = motor.data_position
self.info('setting zoom: {}'.format(zoom))
manager.set_motor('zoom', zoom, block=True)
time.sleep(1.5)
args = self._do_focusing(fstart, fend, steps, operator)
if manager is not None:
if prev_zoom is not None:
self.info('returning to previous zoom: {}'.format(prev_zoom))
manager.set_motor('zoom', prev_zoom, block=True)
if args:
mi, fmi, ma, fma = args
self.info('''passive focus results:Operator={}
ImageGradmin={} (z={})
ImageGradmax={}, (z={})'''.format(operator, mi, fmi, ma, fma))
focus_pos = fma
self.graph.add_vertical_rule(focus_pos)
self.graph.redraw()
self.info('calculated focus z= {}'.format(focus_pos))
controller = self.stage_controller
if controller is not None:
if not stop_signal.isSet():
controller.single_axis_move('z', focus_pos, block=True)
controller._z_position = focus_pos
controller.z_progress = focus_pos
self.autofocusing = False
def _cancel_sweep(self, vo):
if self._evt_autofocusing.isSet():
self.autofocusing = False
self._reset_velocity(vo)
return True
def _reset_velocity(self, vo):
if self.stage_controller:
pdict = dict(velocity=vo, key='z')
self.stage_controller.set_single_axis_motion_parameters(pdict=pdict)
def _do_focusing(self, start, end, steps, operator):
screen_roi = self._get_roi()
self._add_focus_area_rect(*screen_roi)
src = self._load_source()
src = asarray(src)
h, w, _d = src.shape
cx = w / 2.
cy = h / 2.
cw = self.parameters.crop_width
ch = self.parameters.crop_height
roi = cx, cy, cw, ch
self.info('focus sweep start={} end={}'.format(start, end))
controller = self.stage_controller
if controller:
vo = controller.axes['z'].velocity
if self._cancel_sweep(vo):
return
self.graph.set_x_limits(min(start, end), max(start, end), pad=2)
self._do_sweep(start, end, velocity=self.parameters.velocity_scalar1)
fms, focussteps = self._collect_focus_measures(operator, roi)
if not (fms and focussteps):
return
args = self._calculate_nominal_focal_point(fms, focussteps)
if not args:
return
nfocal = args[3]
nwin = self.parameters.negative_window
pwin = self.parameters.positive_window
if self._cancel_sweep(vo):
return
nstart, nend = max(0, nfocal - nwin), nfocal + pwin
time.sleep(1)
self._do_sweep(nstart, nend, velocity=self.parameters.velocity_scalar2)
fms, focussteps = self._collect_focus_measures(operator, roi, series=1)
self._reset_velocity(vo)
else:
focussteps = linspace(0, 10, 11)
fms = -(focussteps - 5) ** 2 + 10 + random.random(11)
self.info('frames analyzed {}'.format(len(fms)))
return self._calculate_nominal_focal_point(fms, focussteps)
def _do_sweep(self, start, end, velocity=None):
controller = self.stage_controller
controller.single_axis_move('z', start, block=True)
if velocity:
vo = controller.axes['z'].velocity
controller.set_single_axis_motion_parameters(pdict=dict(velocity=vo * velocity,
key='z'))
self.info('starting sweep from {}'.format(controller.z_progress))
time.sleep(0.25)
controller.single_axis_move('z', end, update=100, immediate=True)
def _collect_focus_measures(self, operator, roi, series=0):
controller = self.stage_controller
focussteps = []
fms = []
if controller.timer:
p = controller.timer.get_interval()
self.debug('controller timer period {}'.format(p))
pz = controller.z_progress
while 1:
src = self._load_source()
x = controller.z_progress
if x != pz:
y = self._calculate_focus_measure(src, operator, roi)
self.graph.add_datum((x, y), series=series)
focussteps.append(x)
fms.append(y)
pz = x
if not (controller.timer.isActive() and \
not self._evt_autofocusing.isSet()):
break
time.sleep(p)
self.debug('sweep finished')
return fms, focussteps
def _calculate_nominal_focal_point(self, fms, focussteps):
if fms:
sfms = smooth(fms)
if sfms is not None:
self.graph.new_series(focussteps, sfms)
self.graph.redraw()
fmi = focussteps[argmin(sfms)]
fma = focussteps[argmax(sfms)]
mi = min(sfms)
ma = max(sfms)
return mi, fmi, ma, fma
def _calculate_focus_measure(self, src, operator, roi):
src = grayspace(src)
v = crop(src, *roi)
di = dict(var=lambda x:variance(x),
laplace=lambda x: get_focus_measure(x, 'laplace'),
sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
)
func = di[operator]
return func(v)
def image_view(self):
v = View(Item('image', show_label=False, editor=ImageEditor(),
width=640,
height=480,
style='custom'))
return v
def traits_view(self):
v = View(
HGroup(self._button_factory('autofocus_button', 'autofocus_label'),
Item('configure_button', show_label=False),
show_border=True,
label='Autofocus'
)
)
return v
def configure_view(self):
v = View(Item('parameters', style='custom', show_label=False),
handler=ConfigureHandler,
buttons=['OK', 'Cancel'],
kind='livemodal',
title='Configure Autofocus',
x=0.80,
y=0.05
)
return v
def _load_source(self):
src = self.video.get_frame()
return src
def _get_roi(self):
w = self.parameters.crop_width
h = self.parameters.crop_height
cx, cy = self.canvas.get_center_rect_position(w, h)
roi = cx, cy, w, h
return roi
def _add_focus_area_rect(self, cx, cy, w, h):
self.canvas.remove_item('croprect')
self.canvas.add_markup_rect(cx, cy, w, h, identifier='croprect')
def _autofocus_button_fired(self):
if not self.autofocusing:
self.autofocusing = True
self.passive_focus()
else:
self.autofocusing = False
self._evt_autofocusing.set()
self.stop_focus()
def _configure_button_fired(self):
self._crop_rect_update()
self.edit_traits(view='configure_view', kind='livemodal')
self.canvas.remove_item('croprect')
@on_trait_change('parameters:[_crop_width,_crop_height]')
def _crop_rect_update(self):
roi = self._get_roi()
self._add_focus_area_rect(*roi)
def _get_autofocus_label(self):
return 'Autofocus' if not self.autofocusing else 'Stop'
def _parameters_default(self):
return self.load_parameter()
def _autofocusing_changed(self, new):
if not new:
self.canvas.remove_item('croprect')
| true | true |
f71c5f4fe3f324377d2b741bc0a1bb84b3949735 | 602 | py | Python | gdrive_sync/migrations/0002_drivefile_video.py | mitodl/ocw-studio | 949f96ec0647064f8d495ebdd22d66eea7d024a5 | [
"BSD-3-Clause"
] | 2 | 2020-08-07T15:55:41.000Z | 2021-08-16T18:28:09.000Z | gdrive_sync/migrations/0002_drivefile_video.py | mitodl/ocw-studio | 949f96ec0647064f8d495ebdd22d66eea7d024a5 | [
"BSD-3-Clause"
] | 924 | 2020-08-10T17:54:19.000Z | 2022-03-31T21:15:17.000Z | gdrive_sync/migrations/0002_drivefile_video.py | mitodl/ocw-studio | 949f96ec0647064f8d495ebdd22d66eea7d024a5 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.12 on 2021-08-09 17:27
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("videos", "0001_initial"),
("gdrive_sync", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="drivefile",
name="video",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="videos.video",
),
),
]
| 23.153846 | 61 | 0.546512 |
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("videos", "0001_initial"),
("gdrive_sync", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="drivefile",
name="video",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="videos.video",
),
),
]
| true | true |
f71c5f5935f04be92603bb1df6e9e04ca6fbc926 | 9,829 | py | Python | main.py | GautamGunecha/KBC-Quiz | 9e78435aaf2a88c6dc2bd38324d9e0b807ac909d | [
"MIT"
] | null | null | null | main.py | GautamGunecha/KBC-Quiz | 9e78435aaf2a88c6dc2bd38324d9e0b807ac909d | [
"MIT"
] | null | null | null | main.py | GautamGunecha/KBC-Quiz | 9e78435aaf2a88c6dc2bd38324d9e0b807ac909d | [
"MIT"
] | null | null | null | # Python Quiz Game
import time
from pygame import mixer
mixer.init()
mixer.music.load("Audio/KBCMAIN.mp3")
mixer.music.set_volume(0.2)
mixer.music.play()
print("Let's Play Kaun Banega Crorepati")
name = input("Please Enter your Name: ")
print("Welcome", name)
decision = input("Do you want to play KBC(Yes/No): ")
decision = decision.lower()
if decision != "yes":
print(name, "Have a nice Day")
quit()
print("Let's Begin :)")
print("On Screen Your 1st Question")
print("")
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 1: International Literacy Day is observed on?")
correct = "Choose correct Option: "
print(correct)
print('''
A) Sep 8 B) Nov 28
C) May 2 D) Sep 22
''')
answer = input("Answer: ")
print("Option", answer, " locked kiya jey")
answer = answer.lower()
if (answer == 'a'):
print("Sahi Javaab", name)
print("Price Money Earned: Rs1000/-")
else:
print("Better Luck Next Time")
print("Correct Answer: A) Sep 8")
exit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 2: In which group of places the Kumbha Mela is held every twelve years?")
print(correct)
print('''
A) Ujjain, Puri, Prayag, Haridwar B) Prayag, Haridwar, Ujjain, Nasik
C) Rameshwaram, Puri, Badrinath, Dwarika D) Chittakoot, Ujjain, Prayad, Haridwar
''')
answer = input("Answer: ")
print("Option", answer, " locked kiya jey")
answer = answer.lower()
if(answer == 'b'):
print("Sahi Javaab", name)
print("Price Money Earned: Rs2000/-")
else:
print("Better Luck Next Time")
print("Correct Answer: B) Prayag, Haridwar, Ujjain, Nasik")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 3) Which day is observed as the World Standards Day")
print(correct)
print('''
A) June 26 B) Oct 14
C) Nov 15 D) Dec 2
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs3000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Oct 14")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 4) Where did Perseverance rover successfully land in 2021")
print(correct)
print('''
A) Mars B) Venus
C) Jupiter D) Ganymede
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs5000/-")
else:
print("Better Luck next Time")
print("Correct Answer: A) Mars")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 5) Name the person who was also known as Deshbandhu?")
print(correct)
print('''
A) S.Radhakrishnan B) G.K. Gokhale
C) Chittaranjan Das D) Madan Mohan Malviya
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs10000")
else:
print("Better Luck next Time")
print("Correct Answer: C) Chittaranjan Das")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 6) The capital of Uttarakhand is")
print(correct)
print('''
A) Masoorie B) Dehra Dun
C) Nainital D) Mumbai
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs20,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Dehra Dun")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 7) Geet Govind is a famous creation of")
print(correct)
print('''
A) Bana Bhatt B) Jayadev
C) Kalidas D) Bharat Muni
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs40,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) Kalidas")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 8) Which of the followin represents the Finance Commissions that have been set-up so far")
print(correct)
print('''
A) 10 B) 11
C) 12 D) 13
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'd':
print("Sahi Javaab", name)
print("Price Money Earned: Rs80,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) 13")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 9) According to the Constitution of India, which of the following is NOT one of the main organs of the Goverment")
print(correct)
print('''
A) Legislature B) Bureaucracy
C) Executive D) Judiciar
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs1,60,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Bureaucracy")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 10) Panchayati Raj comes under?")
print(correct)
print('''
A) Residual list B) Concurrent list
C) State list D) Union list
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs3,20,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) State List")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 11) Harshcharita and KadamBari are the works of")
print(correct)
print('''
A) Kalhan B) Bana Bhatta
C) Panini D) Patanjali
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs6,40,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Bana Bhatta")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 12) When did the war of Americans Independence take place?")
print(correct)
print('''
A) 1770 B) 1772
C) 1774 D) 1776
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'd':
print("Sahi Javaab", name)
print("Price Money Earned: Rs12,50,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: D) 1776")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 13) The river was also called as the Ganges of the South, Name the river from the given options")
print(correct)
print('''
A) Godavari B) Krishna
C) Cauvery D) Yamuna
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs25,00,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) Cauvery")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 14) Which Indian state is inhabited by 'Jaintiya tribes")
print(correct)
print('''
A) Arunachal Pradesh B) Manipur
C) Mizoram D) Meghalaya
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs50,00,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) Arunachal Pradesh")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 15) In the World Boxing Championships 2017, who won India its first medal?")
print(correct)
print('''
A) Gautav Bidhuri B) Alexzander
C) Tarzan D) Mitsuda
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs1,00,00,000")
else:
print("Better Luck next Time")
print("Correct Answer: A) Gaurav Bidhuri")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 16) What is the height of Siachen Glacier at eastern Karakoram range in the Himalya Mountains?")
print(correct)
print('''
A) 5400 Metre B) 6400 Metre
C) 5600 Metre D) 8500 Metre
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs7,00,00,000")
else:
print("Better Luck next Time")
print("Correct Answer: A) 5400 Metre")
quit()
mixer.music.load("Audio/AmitabhSir.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Congratulations! You are now crorepati", name)
| 29.428144 | 126 | 0.630786 |
import time
from pygame import mixer
mixer.init()
mixer.music.load("Audio/KBCMAIN.mp3")
mixer.music.set_volume(0.2)
mixer.music.play()
print("Let's Play Kaun Banega Crorepati")
name = input("Please Enter your Name: ")
print("Welcome", name)
decision = input("Do you want to play KBC(Yes/No): ")
decision = decision.lower()
if decision != "yes":
print(name, "Have a nice Day")
quit()
print("Let's Begin :)")
print("On Screen Your 1st Question")
print("")
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 1: International Literacy Day is observed on?")
correct = "Choose correct Option: "
print(correct)
print('''
A) Sep 8 B) Nov 28
C) May 2 D) Sep 22
''')
answer = input("Answer: ")
print("Option", answer, " locked kiya jey")
answer = answer.lower()
if (answer == 'a'):
print("Sahi Javaab", name)
print("Price Money Earned: Rs1000/-")
else:
print("Better Luck Next Time")
print("Correct Answer: A) Sep 8")
exit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 2: In which group of places the Kumbha Mela is held every twelve years?")
print(correct)
print('''
A) Ujjain, Puri, Prayag, Haridwar B) Prayag, Haridwar, Ujjain, Nasik
C) Rameshwaram, Puri, Badrinath, Dwarika D) Chittakoot, Ujjain, Prayad, Haridwar
''')
answer = input("Answer: ")
print("Option", answer, " locked kiya jey")
answer = answer.lower()
if(answer == 'b'):
print("Sahi Javaab", name)
print("Price Money Earned: Rs2000/-")
else:
print("Better Luck Next Time")
print("Correct Answer: B) Prayag, Haridwar, Ujjain, Nasik")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 3) Which day is observed as the World Standards Day")
print(correct)
print('''
A) June 26 B) Oct 14
C) Nov 15 D) Dec 2
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs3000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Oct 14")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 4) Where did Perseverance rover successfully land in 2021")
print(correct)
print('''
A) Mars B) Venus
C) Jupiter D) Ganymede
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs5000/-")
else:
print("Better Luck next Time")
print("Correct Answer: A) Mars")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 5) Name the person who was also known as Deshbandhu?")
print(correct)
print('''
A) S.Radhakrishnan B) G.K. Gokhale
C) Chittaranjan Das D) Madan Mohan Malviya
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs10000")
else:
print("Better Luck next Time")
print("Correct Answer: C) Chittaranjan Das")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 6) The capital of Uttarakhand is")
print(correct)
print('''
A) Masoorie B) Dehra Dun
C) Nainital D) Mumbai
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs20,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Dehra Dun")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 7) Geet Govind is a famous creation of")
print(correct)
print('''
A) Bana Bhatt B) Jayadev
C) Kalidas D) Bharat Muni
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs40,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) Kalidas")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 8) Which of the followin represents the Finance Commissions that have been set-up so far")
print(correct)
print('''
A) 10 B) 11
C) 12 D) 13
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'd':
print("Sahi Javaab", name)
print("Price Money Earned: Rs80,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) 13")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 9) According to the Constitution of India, which of the following is NOT one of the main organs of the Goverment")
print(correct)
print('''
A) Legislature B) Bureaucracy
C) Executive D) Judiciar
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs1,60,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Bureaucracy")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 10) Panchayati Raj comes under?")
print(correct)
print('''
A) Residual list B) Concurrent list
C) State list D) Union list
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs3,20,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) State List")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 11) Harshcharita and KadamBari are the works of")
print(correct)
print('''
A) Kalhan B) Bana Bhatta
C) Panini D) Patanjali
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'b':
print("Sahi Javaab", name)
print("Price Money Earned: Rs6,40,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: B) Bana Bhatta")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 12) When did the war of Americans Independence take place?")
print(correct)
print('''
A) 1770 B) 1772
C) 1774 D) 1776
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'd':
print("Sahi Javaab", name)
print("Price Money Earned: Rs12,50,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: D) 1776")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 13) The river was also called as the Ganges of the South, Name the river from the given options")
print(correct)
print('''
A) Godavari B) Krishna
C) Cauvery D) Yamuna
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'c':
print("Sahi Javaab", name)
print("Price Money Earned: Rs25,00,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) Cauvery")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 14) Which Indian state is inhabited by 'Jaintiya tribes")
print(correct)
print('''
A) Arunachal Pradesh B) Manipur
C) Mizoram D) Meghalaya
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs50,00,000/-")
else:
print("Better Luck next Time")
print("Correct Answer: C) Arunachal Pradesh")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 15) In the World Boxing Championships 2017, who won India its first medal?")
print(correct)
print('''
A) Gautav Bidhuri B) Alexzander
C) Tarzan D) Mitsuda
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs1,00,00,000")
else:
print("Better Luck next Time")
print("Correct Answer: A) Gaurav Bidhuri")
quit()
mixer.music.load("Audio/KBC-T.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Ques 16) What is the height of Siachen Glacier at eastern Karakoram range in the Himalya Mountains?")
print(correct)
print('''
A) 5400 Metre B) 6400 Metre
C) 5600 Metre D) 8500 Metre
''')
answer = input("Answer: ")
print("Option", answer, "locked kiya jey")
answer = answer.lower()
if answer == 'a':
print("Sahi Javaab", name)
print("Price Money Earned: Rs7,00,00,000")
else:
print("Better Luck next Time")
print("Correct Answer: A) 5400 Metre")
quit()
mixer.music.load("Audio/AmitabhSir.mp3")
mixer.music.set_volume(0.3)
mixer.music.play()
print("Congratulations! You are now crorepati", name)
| true | true |
f71c608e5b1cc741cd572c1921ad56281e09b9eb | 2,400 | py | Python | contrib/python/Jinja2/tests/test_utils.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 1 | 2019-01-26T02:58:50.000Z | 2019-01-26T02:58:50.000Z | contrib/python/Jinja2/tests/test_utils.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 1 | 2019-07-29T12:25:53.000Z | 2019-07-29T12:25:53.000Z | contrib/python/Jinja2/tests/test_utils.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 1 | 2020-11-11T16:56:19.000Z | 2020-11-11T16:56:19.000Z | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.utils
~~~~~~~~~~~~~~~~~~~~~~
Tests utilities jinja uses.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import gc
import pytest
import pickle
from jinja2.utils import LRUCache, escape, object_type_repr, urlize, \
select_autoescape
@pytest.mark.utils
@pytest.mark.lrucache
class TestLRUCache(object):
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert 'a' in d and 'c' in d and 'd' in d and 'b' not in d
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
@pytest.mark.utils
@pytest.mark.helpers
class TestHelpers(object):
def test_object_type_repr(self):
class X(object):
pass
assert object_type_repr(42) == 'int object'
assert object_type_repr([]) == 'list object'
assert object_type_repr(X()) == 'test_utils.X object'
assert object_type_repr(None) == 'None'
assert object_type_repr(Ellipsis) == 'Ellipsis'
def test_autoescape_select(self):
func = select_autoescape(
enabled_extensions=('html', '.htm'),
disabled_extensions=('txt',),
default_for_string='STRING',
default='NONE',
)
assert func(None) == 'STRING'
assert func('unknown.foo') == 'NONE'
assert func('foo.html') == True
assert func('foo.htm') == True
assert func('foo.txt') == False
assert func('FOO.HTML') == True
assert func('FOO.TXT') == False
@pytest.mark.utils
@pytest.mark.escapeUrlizeTarget
class TestEscapeUrlizeTarget(object):
def test_escape_urlize_target(self):
url = "http://example.org"
target = "<script>"
assert urlize(url, target=target) == ('<a href="http://example.org"'
' target="<script>">'
'http://example.org</a>')
| 27.586207 | 76 | 0.558333 |
import gc
import pytest
import pickle
from jinja2.utils import LRUCache, escape, object_type_repr, urlize, \
select_autoescape
@pytest.mark.utils
@pytest.mark.lrucache
class TestLRUCache(object):
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert 'a' in d and 'c' in d and 'd' in d and 'b' not in d
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
@pytest.mark.utils
@pytest.mark.helpers
class TestHelpers(object):
def test_object_type_repr(self):
class X(object):
pass
assert object_type_repr(42) == 'int object'
assert object_type_repr([]) == 'list object'
assert object_type_repr(X()) == 'test_utils.X object'
assert object_type_repr(None) == 'None'
assert object_type_repr(Ellipsis) == 'Ellipsis'
def test_autoescape_select(self):
func = select_autoescape(
enabled_extensions=('html', '.htm'),
disabled_extensions=('txt',),
default_for_string='STRING',
default='NONE',
)
assert func(None) == 'STRING'
assert func('unknown.foo') == 'NONE'
assert func('foo.html') == True
assert func('foo.htm') == True
assert func('foo.txt') == False
assert func('FOO.HTML') == True
assert func('FOO.TXT') == False
@pytest.mark.utils
@pytest.mark.escapeUrlizeTarget
class TestEscapeUrlizeTarget(object):
def test_escape_urlize_target(self):
url = "http://example.org"
target = "<script>"
assert urlize(url, target=target) == ('<a href="http://example.org"'
' target="<script>">'
'http://example.org</a>')
| true | true |
f71c6158b5131e8416bc7f571ce69e11b023703b | 52,287 | py | Python | dendropy/test/test_datamodel_tree_list.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | dendropy/test/test_datamodel_tree_list.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | dendropy/test/test_datamodel_tree_list.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # !/usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Tests for dendropy.TreeList.
"""
import copy
import sys
import unittest
import collections
import dendropy
import random
from dendropy.test.support import dendropytest
from dendropy.test.support import curated_test_tree
from dendropy.test.support import curated_test_tree_list
from dendropy.test.support import compare_and_validate
class TestTreeListBasicOperations(dendropytest.ExtendedTestCase):
def test_insert_simple_list_foreign_namespace(self):
for idx in range(6):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist), 5)
self.assertEqual(len(tlist._trees), 5)
original_tns = tlist.taxon_namespace
tree = curated_test_tree_list.get_tree()
tlist.insert(idx, tree)
self.assertEqual(len(tlist), 6)
self.assertEqual(len(tlist._trees), 6)
self.assertIs(tlist.taxon_namespace, original_tns)
self.assertIn(tree, tlist)
self.assertIs(tree.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(len(tlist.taxon_namespace), 7)
for t1 in tlist:
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_insert_simple_list_native_namespace(self):
for idx in range(6):
tns = dendropy.TaxonNamespace()
tlist = curated_test_tree_list.get_tree_list(5, taxon_namespace=tns)
self.assertEqual(len(tlist), 5)
self.assertEqual(len(tlist._trees), 5)
original_tns = tlist.taxon_namespace
tree = curated_test_tree_list.get_tree(taxon_namespace=tns)
tlist.insert(idx, tree)
self.assertEqual(len(tlist), 6)
self.assertEqual(len(tlist._trees), 6)
self.assertIs(tlist.taxon_namespace, original_tns)
self.assertIn(tree, tlist)
self.assertIs(tree.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(len(tlist.taxon_namespace), 7)
for t1 in tlist:
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_append_simple_list_foreign_namespace(self):
tlist, trees = curated_test_tree_list.get_tree_list_and_list_of_trees(num_trees=curated_test_tree_list.DEFAULT_NUM_TREES)
original_tns = tlist.taxon_namespace
for t in trees:
tlist.append(t)
self.assertEqual(len(tlist), curated_test_tree_list.DEFAULT_NUM_TREES)
self.assertIs(tlist.taxon_namespace, original_tns)
# self.assertEqual(len(tlist.taxon_namespace), len(tlist[0].tax_labels))
self.assertEqual(len(tlist.taxon_namespace), 7)
for t1, t2 in zip(tlist, trees):
self.assertIs(t1, t2)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_append_simple_list_same_namespace(self):
tns = dendropy.TaxonNamespace()
tlist, trees = curated_test_tree_list.get_tree_list_and_list_of_trees(
num_trees=curated_test_tree_list.DEFAULT_NUM_TREES,
tree_list_taxon_namespace=tns,
list_of_trees_taxon_namespace=tns)
original_tns = tlist.taxon_namespace
for t in trees:
tlist.append(t)
self.assertEqual(len(tlist), curated_test_tree_list.DEFAULT_NUM_TREES)
self.assertIs(tlist.taxon_namespace, original_tns)
# self.assertEqual(len(tlist.taxon_namespace), len(tlist[0].tax_labels))
self.assertEqual(len(tlist.taxon_namespace), 7)
for t1, t2 in zip(tlist, trees):
self.assertIs(t1, t2)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_iadd_from_another_tree_list_different_namespace(self):
tlist = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist.taxon_namespace
original_tlist_len = len(tlist)
original_tree_labels = [t.label for t in tlist]
self.assertEqual(len(original_tree_labels), len(tlist))
self.assertEqual(original_tlist_len, 3)
tlist_source = curated_test_tree_list.get_tree_list(num_trees=5)
self.assertEqual(len(tlist_source), 5)
source_tree_labels = [t.label for t in tlist_source]
self.assertEqual(len(source_tree_labels), len(tlist_source))
tlist += tlist_source
self.assertEqual(len(tlist), original_tlist_len + len(tlist_source))
self.assertIs(tlist.taxon_namespace, original_tns)
# self.assertEqual(len(tlist.taxon_namespace), len(tlist[0].tax_labels))
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = original_tree_labels + source_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
self.assertNotIn(t1, tlist_source)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_iadd_from_list_of_trees_different_namespace(self):
tlist = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist.taxon_namespace
original_tlist_len = len(tlist)
original_tree_labels = [t.label for t in tlist]
self.assertEqual(len(original_tree_labels), len(tlist))
self.assertEqual(original_tlist_len, 3)
source_trees = curated_test_tree_list.get_trees(
num_trees=5,
taxon_namespace=None,
label=None,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.assertEqual(len(source_trees), 5)
source_tree_labels = [t.label for t in source_trees]
self.assertEqual(len(source_tree_labels), len(source_trees))
tlist += source_trees
self.assertEqual(len(tlist), original_tlist_len + len(source_trees))
self.assertIs(tlist.taxon_namespace, original_tns)
# self.assertEqual(len(tlist.taxon_namespace), len(tlist[0].tax_labels))
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = original_tree_labels + source_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
if tlabel in source_tree_labels:
self.assertIn(t1, source_trees)
else:
self.assertNotIn(t1, source_trees)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_add_from_another_tree_list_different_namespace(self):
tlist_source1 = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist_source1.taxon_namespace
source1_tree_labels = [t.label for t in tlist_source1]
self.assertEqual(len(source1_tree_labels), len(tlist_source1))
self.assertEqual(len(tlist_source1), 3)
tlist_source2 = curated_test_tree_list.get_trees(num_trees=5)
self.assertEqual(len(tlist_source2), 5)
source2_tree_labels = [t.label for t in tlist_source2]
self.assertEqual(len(source2_tree_labels), len(tlist_source2))
tlist = tlist_source1 + tlist_source2
self.assertEqual(len(tlist_source1), 3)
self.assertEqual(len(tlist_source2), 5)
self.assertEqual(len(tlist), len(tlist_source1) + len(tlist_source2))
self.assertIs(tlist.taxon_namespace, original_tns)
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = source1_tree_labels + source2_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
if t1.label in source1_tree_labels:
self.assertNotIn(t1, tlist_source1)
self.assertNotIn(t1, tlist_source2)
else:
self.assertNotIn(t1, tlist_source1)
self.assertIn(t1, tlist_source2)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_contains(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist._trees), len(tlist))
self.assertEqual(len(tlist), 5)
trees = curated_test_tree_list.get_trees(5)
self.assertEqual(len(trees), 5)
for t in tlist:
self.assertTrue(t in tlist._trees)
self.assertTrue(t in tlist)
for t in trees:
self.assertFalse(t in tlist._trees)
self.assertFalse(t in tlist)
tlist += trees
for t in trees:
self.assertTrue(t in tlist._trees)
self.assertTrue(t in tlist)
def test_delitem(self):
tsize = 5
for del_idx in range(-tsize, tsize):
tlist = curated_test_tree_list.get_tree_list(tsize)
original_trees = list(tlist._trees)
self.assertIn(original_trees[del_idx], tlist._trees)
del tlist[del_idx]
self.assertNotIn(original_trees[del_idx], tlist._trees)
self.assertEqual(len(tlist), tsize - 1)
del original_trees[del_idx]
self.assertEqual(tlist._trees, original_trees)
def test_iter(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist), 5)
self.assertEqual(len(tlist._trees), len(tlist))
for t1, t2 in zip(tlist, tlist._trees):
self.assertIs(t1, t2)
def test_reversed(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist), 5)
self.assertEqual(len(tlist._trees), len(tlist))
for t1, t2 in zip(reversed(tlist), reversed(tlist._trees)):
self.assertIs(t1, t2)
def test_getitem_simple(self):
tsize = 5
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
for idx in range(-tsize, tsize):
self.assertIs(tlist[idx], tlist._trees[idx])
self.assertTrue(isinstance(tlist[idx], dendropy.Tree))
def test_getitem_slice(self):
tsize = 5
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
for a in range(-tsize, tsize):
for b in range(-tsize, tsize):
for step in range(-tsize, tsize):
if step == 0:
continue
tt = tlist[a:b:step]
k = tlist._trees[a:b:step]
self.assertEqual(len(tt), len(k))
for t1, t2 in zip(tt, k):
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
def test_setitem_simple(self):
tsize = 5
for idx in range(-tsize, tsize):
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
old_tree = tlist[idx]
new_tree = curated_test_tree_list.get_tree()
tlist[idx] = new_tree
self.assertIs(tlist[idx], new_tree)
self.assertIsNot(tlist[idx], old_tree)
self.assertIn(new_tree, tlist)
self.assertNotIn(old_tree, tlist)
self.assertIs(new_tree.taxon_namespace,
tlist.taxon_namespace)
self.assertEqual(len(tlist.taxon_namespace), 7)
for tree in tlist:
for nd in tree:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_setitem_slice_from_list(self):
tsize = 5
for a in range(-tsize, tsize):
for b in range(-tsize, tsize):
for step in range(-tsize, tsize):
if step == 0:
continue
slice_obj = slice(a, b, step)
slice_len = len(range(*slice_obj.indices(tsize)))
if slice_len <= 0:
continue
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
copy_list = list(tlist._trees)
source = curated_test_tree_list.get_trees(slice_len)
tlist[a:b:step] = source
copy_list[a:b:step] = source
expected_tree_labels = [t.label for t in copy_list]
self.assertEqual(len(tlist), len(copy_list))
self.assertEqual(len(tlist), len(tlist._trees))
self.assertEqual(len(tlist.taxon_namespace), 7)
for t1, t2, tlabel in zip(tlist, copy_list, expected_tree_labels):
self.assertIs(t1, t2)
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertEqual(t1.label, tlabel)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_setitem_slice_from_tree_list(self):
tsize = 5
for a in range(-tsize, tsize):
for b in range(-tsize, tsize):
for step in range(-tsize, tsize):
if step == 0:
continue
slice_obj = slice(a, b, step)
slice_indexes = list(range(*slice_obj.indices(tsize)))
slice_len = len(slice_indexes)
if slice_len <= 0:
continue
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
copy_list = list(tlist._trees)
source = curated_test_tree_list.get_tree_list(slice_len)
copy_list[a:b:step] = source._trees
tlist[a:b:step] = source
expected_tree_labels = [t.label for t in copy_list]
self.assertEqual(len(tlist), len(copy_list))
self.assertEqual(len(tlist), len(tlist._trees))
self.assertEqual(len(tlist.taxon_namespace), 7)
for idx, (t1, t2, tlabel) in enumerate(zip(tlist, copy_list, expected_tree_labels)):
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertEqual(t1.label, tlabel)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
if idx in slice_indexes:
self.assertIsNot(t1, t2)
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertNotIn(t2, tlist)
self.assertNotIn(t2, tlist._trees)
self.assertNotIn(t1, source)
self.assertNotIn(t1, source._trees)
else:
self.assertIs(t1, t2)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_clear(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist._trees), 5)
tlist.clear()
self.assertEqual(len(tlist), 0)
self.assertEqual(len(tlist._trees), 0)
def test_extend_from_another_tree_list_different_namespace(self):
tlist = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist.taxon_namespace
original_tlist_len = len(tlist)
original_tree_labels = [t.label for t in tlist]
self.assertEqual(len(original_tree_labels), len(tlist))
self.assertEqual(original_tlist_len, 3)
tlist_source = curated_test_tree_list.get_tree_list(num_trees=5)
self.assertEqual(len(tlist_source), 5)
source_tree_labels = [t.label for t in tlist_source]
self.assertEqual(len(source_tree_labels), len(tlist_source))
tlist.extend(tlist_source)
self.assertEqual(len(tlist), original_tlist_len + len(tlist_source))
self.assertIs(tlist.taxon_namespace, original_tns)
# self.assertEqual(len(tlist.taxon_namespace), len(tlist[0].tax_labels))
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = original_tree_labels + source_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
self.assertNotIn(t1, tlist_source)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_extend_from_list_of_trees_different_namespace(self):
tlist = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist.taxon_namespace
original_tlist_len = len(tlist)
original_tree_labels = [t.label for t in tlist]
self.assertEqual(len(original_tree_labels), len(tlist))
self.assertEqual(original_tlist_len, 3)
source_trees = curated_test_tree_list.get_trees(
num_trees=5,
taxon_namespace=None,
label=None,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.assertEqual(len(source_trees), 5)
source_tree_labels = [t.label for t in source_trees]
self.assertEqual(len(source_tree_labels), len(source_trees))
tlist.extend(source_trees)
self.assertEqual(len(tlist), original_tlist_len + len(source_trees))
self.assertIs(tlist.taxon_namespace, original_tns)
# self.assertEqual(len(tlist.taxon_namespace), len(tlist[0].tax_labels))
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = original_tree_labels + source_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
if tlabel in source_tree_labels:
self.assertIn(t1, source_trees)
else:
self.assertNotIn(t1, source_trees)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_index(self):
tlist = curated_test_tree_list.get_tree_list(5)
for idx, t in enumerate(tlist):
self.assertIs(t, tlist[idx])
self.assertEqual(tlist.index(t), idx)
def test_pop1(self):
tlist = curated_test_tree_list.get_tree_list(5)
k = tlist[-1]
t = tlist.pop()
self.assertIs(t, k)
self.assertEqual(len(tlist), 4)
self.assertNotIn(t, tlist)
def test_pop2(self):
for idx in range(5):
tlist = curated_test_tree_list.get_tree_list(5)
k = tlist[idx]
t = tlist.pop(idx)
self.assertIs(t, k)
self.assertEqual(len(tlist), 4)
self.assertNotIn(t, tlist)
def test_remove(self):
tlist = curated_test_tree_list.get_tree_list(5)
t = tlist[0]
tlist.remove(t)
self.assertEqual(len(tlist), 4)
self.assertNotIn(t, tlist)
def test_remove(self):
tlist = curated_test_tree_list.get_tree_list(5)
clist = list(tlist._trees)
tlist.reverse()
clist.reverse()
for t1, t2 in zip(tlist, clist):
self.assertIs(t1, t2)
def test_sort(self):
for r in (True, False):
tlist = curated_test_tree_list.get_tree_list(5)
clist = list(tlist._trees)
tlist.sort(key=lambda x: x.label, reverse=r)
clist.sort(key=lambda x: x.label, reverse=r)
for t1, t2 in zip(tlist, clist):
self.assertIs(t1, t2)
class TreeListCreatingAndCloning(
curated_test_tree.CuratedTestTree,
compare_and_validate.Comparator,
unittest.TestCase):
def add_tree_annotations(self, tree):
for idx, nd in enumerate(tree):
if idx % 2 == 0:
nd.edge.label = "E{}".format(idx)
nd.edge.length = idx
an1 = nd.annotations.add_new("a{}".format(idx),
"{}{}{}".format(nd.label, nd.taxon, idx))
an2 = nd.annotations.add_bound_attribute("label")
an3 = an1.annotations.add_bound_attribute("name")
ae1 = nd.edge.annotations.add_new("a{}".format(idx),
"{}{}".format(nd.edge.label, idx))
ae2 = nd.edge.annotations.add_bound_attribute("label")
ae3 = ae1.annotations.add_bound_attribute("name")
tree.annotations.add_new("a", 0)
tree.label = "hello"
b = tree.annotations.add_bound_attribute("label")
b.annotations.add_new("c", 3)
def add_tree_list_annotations(self, tree_list):
tree_list.annotations.add_new("a", 0)
tree_list.label = "hello"
b = tree_list.annotations.add_bound_attribute("label")
b.annotations.add_new("c", 3)
def add_taxon_namespace_annotations(self, tns):
for idx, taxon in enumerate(tns):
a = taxon.annotations.add_new("!color", str(idx))
a.annotations.add_new("setbytest", "a")
def setUp(self):
self.num_trees = 5
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.original_taxon_labels = [t.label for t in tree1.taxon_namespace]
assert len(self.original_taxon_labels) == len(anodes1)
def get_tree_list(self):
tlist1 = dendropy.TreeList()
self.num_trees = 5
for idx in range(self.num_trees):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=tlist1.taxon_namespace)
self.add_tree_annotations(tree1)
tlist1.append(tree1)
self.add_tree_list_annotations(tlist1)
self.add_taxon_namespace_annotations(tlist1.taxon_namespace)
return tlist1
def test_shallow_copy_with_initializer_list(self):
tlist1 = self.get_tree_list()
trees = tlist1._trees
tlist2 = dendropy.TreeList(trees)
self.assertEqual(len(tlist2), self.num_trees)
for tcopy, toriginal in zip(tlist2, trees):
self.assertIs(tcopy, toriginal)
self.assertIs(tcopy.taxon_namespace, tlist2.taxon_namespace)
def test_clone0(self):
tlist1 = self.get_tree_list()
for tlist2 in (
tlist1.clone(0),
):
self.assertIs(tlist2.taxon_namespace, tlist1.taxon_namespace)
self.assertEqual(len(tlist2), self.num_trees)
for tcopy, toriginal in zip(tlist2, tlist1):
self.assertIs(tcopy, toriginal)
self.assertIs(tcopy.taxon_namespace, tlist2.taxon_namespace)
def test_taxon_namespace_scoped_copy(self):
tlist1 = self.get_tree_list()
for tlist2 in (
tlist1.clone(1),
dendropy.TreeList(tlist1),
tlist1.taxon_namespace_scoped_copy(),):
self.compare_distinct_tree_list(tlist2, tlist1,
taxon_namespace_scoped=True,
compare_tree_annotations=True,
compare_taxon_annotations=True)
def test_deepcopy_including_namespace(self):
tlist1 = self.get_tree_list()
for idx, tlist2 in enumerate((
tlist1.clone(2),
copy.deepcopy(tlist1),
)):
self.compare_distinct_tree_list(tlist2, tlist1,
taxon_namespace_scoped=False,
compare_tree_annotations=True,
compare_taxon_annotations=True)
def test_deepcopy_excluding_namespace(self):
tlist1 = self.get_tree_list()
tlist2 = dendropy.TreeList(tlist1,
taxon_namespace=dendropy.TaxonNamespace())
self.compare_distinct_tree_list(tlist2, tlist1,
taxon_namespace_scoped=False,
compare_tree_annotations=True,
compare_taxon_annotations=False)
class TreeListIdentity(unittest.TestCase):
def setUp(self):
self.tns = dendropy.TaxonNamespace()
self.t1 = dendropy.TreeList(label="a", taxon_namespace=self.tns)
self.t2 = dendropy.TreeList(label="a", taxon_namespace=self.tns)
self.t3 = dendropy.TreeList(label="a")
def test_equal(self):
# two distinct |TreeList| objects are equal
# if they have the same namespace and trees
trees = [dendropy.Tree() for i in range(5)]
for tree in trees:
self.t1._trees.append(tree)
self.t2._trees.append(tree)
self.assertEqual(self.t1, self.t2)
def test_unequal1(self):
# two distinct |TreeList| objects are equal
# if they have the same namespace and trees
trees1 = [dendropy.Tree() for i in range(5)]
for tree in trees1:
self.t1._trees.append(tree)
trees2 = [dendropy.Tree() for i in range(5)]
for tree in trees2:
self.t2._trees.append(tree)
self.assertNotEqual(self.t1, self.t2)
def test_unequal2(self):
# two distinct |TreeList| objects are equal
# if they have the same namespace and trees
trees1 = [dendropy.Tree() for i in range(5)]
for tree in trees1:
self.t1._trees.append(tree)
self.t3._trees.append(tree)
self.assertNotEqual(self.t1, self.t3)
def test_hash_dict_membership(self):
k = {}
k[self.t1] = 1
k[self.t2] = 2
self.assertEqual(len(k), 2)
self.assertEqual(k[self.t1], 1)
self.assertEqual(k[self.t2], 2)
self.assertIn(self.t1, k)
self.assertIn(self.t2, k)
del k[self.t1]
self.assertNotIn(self.t1, k)
self.assertIn(self.t2, k)
self.assertEqual(len(k), 1)
k1 = {self.t1: 1}
k2 = {self.t2: 1}
self.assertIn(self.t1, k1)
self.assertIn(self.t2, k2)
self.assertNotIn(self.t2, k1)
self.assertNotIn(self.t1, k2)
def test_hash_set_membership(self):
k = set()
k.add(self.t1)
k.add(self.t2)
self.assertEqual(len(k), 2)
self.assertIn(self.t1, k)
self.assertIn(self.t2, k)
k.discard(self.t1)
self.assertNotIn(self.t1, k)
self.assertIn(self.t2, k)
self.assertEqual(len(k), 1)
k1 = {self.t1: 1}
k2 = {self.t2: 1}
self.assertIn(self.t1, k1)
self.assertIn(self.t2, k2)
self.assertNotIn(self.t2, k1)
self.assertNotIn(self.t1, k2)
class TestTreeListUpdateTaxonNamespace(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
trees = []
for idx in range(5):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=True,
suppress_leaf_node_taxa=True)
trees.append(tree1)
self.expected_labels = set()
self.expected_taxa = set()
node_label_to_taxon_label_map = {
"a" : "z01",
"b" : "<NONE>",
"c" : "z03",
"e" : "z04",
"f" : "z05",
"g" : "z06",
"h" : None,
"i" : None,
"j" : "z09",
"k" : "z10",
"l" : "z11",
"m" : "<NONE>",
"n" : None,
"o" : "z14",
"p" : "z15",
}
registry = {}
for tree_idx, tree in enumerate(trees):
for nd in tree:
if nd.label is not None:
if tree_idx > 3:
nd.label = node_label_to_taxon_label_map[nd.label]
if nd.label == "<NONE>":
try:
t = registry[None]
except KeyError:
t = dendropy.Taxon(label=None)
registry[None] = t
self.expected_labels.add(None)
else:
try:
t = registry[nd.label]
except KeyError:
t = dendropy.Taxon(label=nd.label)
registry[nd.label] = t
self.expected_labels.add(nd.label)
nd.taxon = t
self.expected_taxa.add(nd.taxon)
self.tree_list = dendropy.TreeList()
self.tree_list._trees = trees
def test_noop_update_with_no_taxa(self):
trees = []
tns = dendropy.TaxonNamespace()
for idx in range(5):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=True,
suppress_leaf_node_taxa=True,
taxon_namespace=tns)
trees.append(tree1)
tlst = dendropy.TreeList(taxon_namespace=tns)
tlst._trees = trees
original_tns = tlst.taxon_namespace
self.assertEqual(len(original_tns), 0)
tlst.update_taxon_namespace()
self.assertIs(tlst.taxon_namespace, original_tns)
for tree in tlst:
self.assertIs(tree.taxon_namespace, tlst.taxon_namespace)
self.assertEqual(len(original_tns), 0)
def test_update(self):
original_tns = self.tree_list.taxon_namespace
self.assertEqual(len(original_tns), 0)
self.tree_list.update_taxon_namespace()
self.tree_list.update_taxon_namespace()
self.tree_list.update_taxon_namespace()
for tree in self.tree_list:
self.assertIs(tree.taxon_namespace, self.tree_list.taxon_namespace)
self.assertIs(self.tree_list.taxon_namespace, original_tns)
new_taxa = [t for t in original_tns]
new_labels = [t.label for t in original_tns]
self.assertCountEqual(new_taxa, self.expected_taxa)
self.assertCountEqual(new_labels, self.expected_labels)
class TestTreeListMigrateAndReconstructTaxonNamespace(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
tns = dendropy.TaxonNamespace()
trees = []
for idx in range(8):
tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=True,
suppress_leaf_node_taxa=True,
taxon_namespace=tns)
trees.append(tree)
self.node_label_to_taxon_label_map = {
"a" : "a",
"b" : "a",
"c" : "2",
"e" : "2",
"f" : "b",
"g" : "B",
"h" : "B",
"i" : "h",
"j" : "H",
"k" : "h",
"l" : None,
"m" : None,
"n" : "H",
"o" : "J",
"p" : "j",
}
self.original_taxa = []
registry = {}
for tree in trees:
for idx, nd in enumerate(tree):
try:
t = registry[nd.label]
except KeyError:
taxon_label = self.node_label_to_taxon_label_map[nd.label]
t = dendropy.Taxon(label=taxon_label)
registry[nd.label] = t
self.original_taxa.append(t)
tree.taxon_namespace.add_taxon(t)
nd.taxon = t
nd.original_taxon = t
assert len(tree.taxon_namespace) == len(self.node_label_to_taxon_label_map)
assert len(tree.taxon_namespace) == len(self.original_taxa)
self.tree_list = dendropy.TreeList(taxon_namespace=tns)
self.tree_list._trees = trees
def verify_taxon_namespace_reconstruction(self,
unify_taxa_by_label=False,
case_sensitive_label_mapping=True,
original_tns=None,
redundant_taxa=False):
if unify_taxa_by_label:
if not case_sensitive_label_mapping:
expected_labels = []
for label in self.node_label_to_taxon_label_map.values():
if label is None:
expected_labels.append(label)
else:
label = label.upper()
if label not in expected_labels:
expected_labels.append(label)
else:
expected_labels = list(set(label for label in self.node_label_to_taxon_label_map.values()))
else:
expected_labels = [label for label in self.node_label_to_taxon_label_map.values()]
for tree in self.tree_list:
seen_taxa = []
self.assertIs(tree.taxon_namespace, self.tree_list.taxon_namespace)
for nd in tree:
self.assertIsNot(nd.taxon, nd.original_taxon)
if not case_sensitive_label_mapping and nd.taxon.label is not None:
self.assertEqual(nd.taxon.label.upper(), nd.original_taxon.label.upper())
self.assertEqual(self.node_label_to_taxon_label_map[nd.label].upper(), nd.taxon.label.upper())
else:
self.assertEqual(nd.taxon.label, nd.original_taxon.label)
self.assertEqual(self.node_label_to_taxon_label_map[nd.label], nd.taxon.label)
self.assertNotIn(nd.original_taxon, tree.taxon_namespace)
self.assertIn(nd.original_taxon, self.original_taxa)
self.assertIn(nd.taxon, tree.taxon_namespace)
self.assertNotIn(nd.taxon, self.original_taxa)
if original_tns is not None:
self.assertNotIn(nd.taxon, original_tns)
if nd.taxon not in seen_taxa:
seen_taxa.append(nd.taxon)
else:
self.assertTrue(unify_taxa_by_label or redundant_taxa)
if not case_sensitive_label_mapping:
self.assertIn(nd.taxon.label, [t.label for t in seen_taxa])
else:
if nd.taxon.label is None:
self.assertIs(nd.original_taxon.label, None)
self.assertEqual([t.label for t in seen_taxa].count(None), 1)
else:
x1 = [t.label.upper() for t in seen_taxa if t.label is not None]
self.assertIn(nd.taxon.label.upper(), x1)
self.assertEqual(len(seen_taxa), len(tree.taxon_namespace))
if not case_sensitive_label_mapping:
seen_labels = [(t.label.upper() if t.label is not None else None) for t in seen_taxa]
else:
seen_labels = [t.label for t in seen_taxa]
c1 = collections.Counter(expected_labels)
c2 = collections.Counter(seen_labels)
self.assertEqual(c1, c2)
self.assertEqual(len(tree.taxon_namespace), len(expected_labels))
if not unify_taxa_by_label and not redundant_taxa:
self.assertEqual(len(tree.taxon_namespace), len(self.node_label_to_taxon_label_map))
def test_basic_reconstruction(self):
tns = dendropy.TaxonNamespace()
trees = []
for idx in range(5):
tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=tns)
trees.append(tree)
tree_list = dendropy.TreeList(taxon_namespace=tns)
tree_list._trees = trees
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
tree_list.taxon_namespace = new_tns
tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=False)
self.assertIsNot(tree_list.taxon_namespace, tns)
self.assertIs(tree_list.taxon_namespace, new_tns)
self.assertEqual(len(tree_list.taxon_namespace), len(tns))
original_labels = [t.label for t in tns]
new_labels = [t.label for t in new_tns]
self.assertCountEqual(new_labels, original_labels)
for tree in tree_list:
self.assertIs(tree.taxon_namespace, tree_list.taxon_namespace)
for nd in tree:
if nd.taxon is not None:
self.assertIn(nd.taxon, tree.taxon_namespace)
self.assertNotIn(nd.taxon, tns)
def test_reconstruct_taxon_namespace_non_unifying(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list._taxon_namespace = new_tns
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
self.tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=False)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=False,
case_sensitive_label_mapping=True)
def test_reconstruct_taxon_namespace_unifying_case_sensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list._taxon_namespace = new_tns
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
self.tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=True,
original_tns=original_tns)
def test_reconstruct_taxon_namespace_unifying_case_insensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = False
self.tree_list._taxon_namespace = new_tns
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
self.tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=False,
original_tns=original_tns)
def test_basic_migration(self):
tns = dendropy.TaxonNamespace()
trees = []
for idx in range(5):
tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=tns)
trees.append(tree)
tree_list = dendropy.TreeList(taxon_namespace=tns)
tree_list._trees = trees
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
tree_list.taxon_namespace = new_tns
tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=False)
self.assertIsNot(tree_list.taxon_namespace, tns)
self.assertIs(tree_list.taxon_namespace, new_tns)
self.assertEqual(len(tree_list.taxon_namespace), len(tns))
original_labels = [t.label for t in tns]
new_labels = [t.label for t in new_tns]
self.assertCountEqual(new_labels, original_labels)
for tree in tree_list:
self.assertIs(tree.taxon_namespace, tree_list.taxon_namespace)
for nd in tree:
if nd.taxon is not None:
self.assertIn(nd.taxon, tree.taxon_namespace)
self.assertNotIn(nd.taxon, tns)
def test_migrate_taxon_namespace_non_unifying(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=False)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=False,
case_sensitive_label_mapping=True,
original_tns=original_tns)
def test_migrate_taxon_namespace_unifying_case_sensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=True,
original_tns=original_tns)
def test_migrate_taxon_namespace_unifying_case_insensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = False
self.tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=False,
original_tns=original_tns)
class TestTreeListAppend(
curated_test_tree.CuratedTestTree,
unittest.TestCase):
def setUp(self):
self.native_tns = dendropy.TaxonNamespace()
self.tree_list = dendropy.TreeList(taxon_namespace=self.native_tns)
self.foreign_tns = dendropy.TaxonNamespace()
self.foreign_tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=self.foreign_tns)
for nd in self.foreign_tree:
nd.original_taxon = nd.taxon
self.check_tns = dendropy.TaxonNamespace()
self.check_tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=self.check_tns)
def test_append_default(self):
self.assertIsNot(self.tree_list.taxon_namespace, self.foreign_tree.taxon_namespace)
self.tree_list.append(self.foreign_tree)
self.assertEqual(len(self.tree_list), 1)
self.assertIn(self.foreign_tree, self.tree_list)
self.assertIs(self.foreign_tree, self.tree_list[0])
self.assertIs(self.tree_list.taxon_namespace, self.native_tns)
self.assertIs(self.foreign_tree.taxon_namespace, self.tree_list.taxon_namespace)
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.foreign_tns))
for nd in self.foreign_tree:
if nd.taxon:
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
self.assertIsNot(nd.taxon, nd.original_taxon)
self.assertIn(nd.original_taxon, self.foreign_tns)
self.assertNotIn(nd.original_taxon, self.tree_list.taxon_namespace)
self.assertEqual(nd.taxon.label, nd.original_taxon.label)
def test_append_migrate_matching_labels(self):
kwargs_groups = [
{"taxon_import_strategy": "migrate", "unify_taxa_by_label": True},
{"taxon_import_strategy": "migrate", "unify_taxa_by_label": False},
{"taxon_import_strategy": "add", },
]
for kwargs in kwargs_groups:
self.setUp()
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
native_tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=self.native_tns)
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.postorder_sequence))
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.foreign_tns))
original_tns_len = len(self.tree_list.taxon_namespace)
self.tree_list.append(self.foreign_tree, **kwargs)
self.assertEqual(len(self.tree_list), 1)
self.assertIn(self.foreign_tree, self.tree_list)
self.assertIs(self.foreign_tree, self.tree_list[0])
self.assertIs(self.foreign_tree.taxon_namespace, self.tree_list.taxon_namespace)
if kwargs["taxon_import_strategy"] == "add":
self.assertEqual(len(self.tree_list.taxon_namespace),
original_tns_len + len(self.foreign_tns))
for nd in self.foreign_tree:
self.assertIn(nd.taxon, self.foreign_tns)
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
else:
if "unify_taxa_by_label" not in kwargs or not kwargs["unify_taxa_by_label"]:
self.assertEqual(len(self.tree_list.taxon_namespace),
original_tns_len + len(self.foreign_tns))
else:
self.assertEqual(len(self.tree_list.taxon_namespace), original_tns_len)
for nd in self.foreign_tree:
self.assertNotIn(nd.taxon, self.foreign_tns)
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
def test_append_add(self):
self.assertIsNot(self.tree_list.taxon_namespace, self.foreign_tree.taxon_namespace)
self.tree_list.append(self.foreign_tree,
taxon_import_strategy="add")
self.assertEqual(len(self.tree_list), 1)
self.assertIn(self.foreign_tree, self.tree_list)
self.assertIs(self.foreign_tree, self.tree_list[0])
self.assertIs(self.tree_list.taxon_namespace, self.native_tns)
self.assertIs(self.foreign_tree.taxon_namespace, self.tree_list.taxon_namespace)
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.foreign_tns))
for nd in self.foreign_tree:
if nd.taxon:
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
self.assertIs(nd.taxon, nd.original_taxon)
self.assertIn(nd.original_taxon, self.foreign_tns)
self.assertIn(nd.original_taxon, self.tree_list.taxon_namespace)
class TestTreeListTaxa(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
self.tree_list = dendropy.TreeList()
self.expected_taxa = None
for i in range(10):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
taxon_namespace=self.tree_list.taxon_namespace,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.tree_list.append(tree1)
if self.expected_taxa is None:
self.expected_taxa = set([nd.taxon for nd in anodes1 if nd.taxon is not None])
def test_basic_taxa(self):
self.assertEqual(self.tree_list.poll_taxa(), self.expected_taxa)
class TestTreeListPurgeTaxonNamespace(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
self.tree_list = dendropy.TreeList()
self.expected_taxa = None
for i in range(10):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
taxon_namespace=self.tree_list.taxon_namespace,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.tree_list.append(tree1)
if self.expected_taxa is None:
self.expected_taxa = set([nd.taxon for nd in anodes1 if nd.taxon is not None])
def test_noop_purge(self):
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
self.tree_list.purge_taxon_namespace()
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
def test_basic_purge(self):
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
added_taxa = set(self.expected_taxa)
for label in ("z1", "z2", "z3", "z4"):
t = self.tree_list.taxon_namespace.new_taxon(label=label)
added_taxa.add(t)
self.assertEqual(set(self.tree_list.taxon_namespace), added_taxa)
self.tree_list.purge_taxon_namespace()
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
class TreeListCreation(unittest.TestCase):
def test_create_with_taxon_namespace(self):
tns = dendropy.TaxonNamespace()
tt = dendropy.TreeList(label="a", taxon_namespace=tns)
self.assertEqual(tt.label, "a")
self.assertIs(tt.taxon_namespace, tns)
class TestSpecialTreeListConstruction(
unittest.TestCase):
def test_construction_from_another_tree_different_label(self):
tlist1 = dendropy.TreeList()
tlist1.label = "tlist1"
self.assertEqual(tlist1.label, "tlist1")
tlist2 = dendropy.TreeList(tlist1, label="tlist2")
self.assertEqual(tlist2.label, "tlist2")
self.assertNotEqual(tlist1.label, "tlist2")
self.assertNotEqual(tlist1.label, tlist2.label)
if __name__ == "__main__":
unittest.main()
| 44.499574 | 129 | 0.613785 |
ls))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
if tlabel in source_tree_labels:
self.assertIn(t1, source_trees)
else:
self.assertNotIn(t1, source_trees)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_add_from_another_tree_list_different_namespace(self):
tlist_source1 = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist_source1.taxon_namespace
source1_tree_labels = [t.label for t in tlist_source1]
self.assertEqual(len(source1_tree_labels), len(tlist_source1))
self.assertEqual(len(tlist_source1), 3)
tlist_source2 = curated_test_tree_list.get_trees(num_trees=5)
self.assertEqual(len(tlist_source2), 5)
source2_tree_labels = [t.label for t in tlist_source2]
self.assertEqual(len(source2_tree_labels), len(tlist_source2))
tlist = tlist_source1 + tlist_source2
self.assertEqual(len(tlist_source1), 3)
self.assertEqual(len(tlist_source2), 5)
self.assertEqual(len(tlist), len(tlist_source1) + len(tlist_source2))
self.assertIs(tlist.taxon_namespace, original_tns)
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = source1_tree_labels + source2_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
if t1.label in source1_tree_labels:
self.assertNotIn(t1, tlist_source1)
self.assertNotIn(t1, tlist_source2)
else:
self.assertNotIn(t1, tlist_source1)
self.assertIn(t1, tlist_source2)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_contains(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist._trees), len(tlist))
self.assertEqual(len(tlist), 5)
trees = curated_test_tree_list.get_trees(5)
self.assertEqual(len(trees), 5)
for t in tlist:
self.assertTrue(t in tlist._trees)
self.assertTrue(t in tlist)
for t in trees:
self.assertFalse(t in tlist._trees)
self.assertFalse(t in tlist)
tlist += trees
for t in trees:
self.assertTrue(t in tlist._trees)
self.assertTrue(t in tlist)
def test_delitem(self):
tsize = 5
for del_idx in range(-tsize, tsize):
tlist = curated_test_tree_list.get_tree_list(tsize)
original_trees = list(tlist._trees)
self.assertIn(original_trees[del_idx], tlist._trees)
del tlist[del_idx]
self.assertNotIn(original_trees[del_idx], tlist._trees)
self.assertEqual(len(tlist), tsize - 1)
del original_trees[del_idx]
self.assertEqual(tlist._trees, original_trees)
def test_iter(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist), 5)
self.assertEqual(len(tlist._trees), len(tlist))
for t1, t2 in zip(tlist, tlist._trees):
self.assertIs(t1, t2)
def test_reversed(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist), 5)
self.assertEqual(len(tlist._trees), len(tlist))
for t1, t2 in zip(reversed(tlist), reversed(tlist._trees)):
self.assertIs(t1, t2)
def test_getitem_simple(self):
tsize = 5
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
for idx in range(-tsize, tsize):
self.assertIs(tlist[idx], tlist._trees[idx])
self.assertTrue(isinstance(tlist[idx], dendropy.Tree))
def test_getitem_slice(self):
tsize = 5
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
for a in range(-tsize, tsize):
for b in range(-tsize, tsize):
for step in range(-tsize, tsize):
if step == 0:
continue
tt = tlist[a:b:step]
k = tlist._trees[a:b:step]
self.assertEqual(len(tt), len(k))
for t1, t2 in zip(tt, k):
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
def test_setitem_simple(self):
tsize = 5
for idx in range(-tsize, tsize):
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
old_tree = tlist[idx]
new_tree = curated_test_tree_list.get_tree()
tlist[idx] = new_tree
self.assertIs(tlist[idx], new_tree)
self.assertIsNot(tlist[idx], old_tree)
self.assertIn(new_tree, tlist)
self.assertNotIn(old_tree, tlist)
self.assertIs(new_tree.taxon_namespace,
tlist.taxon_namespace)
self.assertEqual(len(tlist.taxon_namespace), 7)
for tree in tlist:
for nd in tree:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_setitem_slice_from_list(self):
tsize = 5
for a in range(-tsize, tsize):
for b in range(-tsize, tsize):
for step in range(-tsize, tsize):
if step == 0:
continue
slice_obj = slice(a, b, step)
slice_len = len(range(*slice_obj.indices(tsize)))
if slice_len <= 0:
continue
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
copy_list = list(tlist._trees)
source = curated_test_tree_list.get_trees(slice_len)
tlist[a:b:step] = source
copy_list[a:b:step] = source
expected_tree_labels = [t.label for t in copy_list]
self.assertEqual(len(tlist), len(copy_list))
self.assertEqual(len(tlist), len(tlist._trees))
self.assertEqual(len(tlist.taxon_namespace), 7)
for t1, t2, tlabel in zip(tlist, copy_list, expected_tree_labels):
self.assertIs(t1, t2)
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertEqual(t1.label, tlabel)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_setitem_slice_from_tree_list(self):
tsize = 5
for a in range(-tsize, tsize):
for b in range(-tsize, tsize):
for step in range(-tsize, tsize):
if step == 0:
continue
slice_obj = slice(a, b, step)
slice_indexes = list(range(*slice_obj.indices(tsize)))
slice_len = len(slice_indexes)
if slice_len <= 0:
continue
tlist = curated_test_tree_list.get_tree_list(tsize)
self.assertEqual(len(tlist), tsize)
self.assertEqual(len(tlist._trees), len(tlist))
copy_list = list(tlist._trees)
source = curated_test_tree_list.get_tree_list(slice_len)
copy_list[a:b:step] = source._trees
tlist[a:b:step] = source
expected_tree_labels = [t.label for t in copy_list]
self.assertEqual(len(tlist), len(copy_list))
self.assertEqual(len(tlist), len(tlist._trees))
self.assertEqual(len(tlist.taxon_namespace), 7)
for idx, (t1, t2, tlabel) in enumerate(zip(tlist, copy_list, expected_tree_labels)):
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertEqual(t1.label, tlabel)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
if idx in slice_indexes:
self.assertIsNot(t1, t2)
self.assertIn(t1, tlist)
self.assertIn(t1, tlist._trees)
self.assertNotIn(t2, tlist)
self.assertNotIn(t2, tlist._trees)
self.assertNotIn(t1, source)
self.assertNotIn(t1, source._trees)
else:
self.assertIs(t1, t2)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_clear(self):
tlist = curated_test_tree_list.get_tree_list(5)
self.assertEqual(len(tlist._trees), 5)
tlist.clear()
self.assertEqual(len(tlist), 0)
self.assertEqual(len(tlist._trees), 0)
def test_extend_from_another_tree_list_different_namespace(self):
tlist = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist.taxon_namespace
original_tlist_len = len(tlist)
original_tree_labels = [t.label for t in tlist]
self.assertEqual(len(original_tree_labels), len(tlist))
self.assertEqual(original_tlist_len, 3)
tlist_source = curated_test_tree_list.get_tree_list(num_trees=5)
self.assertEqual(len(tlist_source), 5)
source_tree_labels = [t.label for t in tlist_source]
self.assertEqual(len(source_tree_labels), len(tlist_source))
tlist.extend(tlist_source)
self.assertEqual(len(tlist), original_tlist_len + len(tlist_source))
self.assertIs(tlist.taxon_namespace, original_tns)
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = original_tree_labels + source_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
self.assertNotIn(t1, tlist_source)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_extend_from_list_of_trees_different_namespace(self):
tlist = curated_test_tree_list.get_tree_list(num_trees=3)
original_tns = tlist.taxon_namespace
original_tlist_len = len(tlist)
original_tree_labels = [t.label for t in tlist]
self.assertEqual(len(original_tree_labels), len(tlist))
self.assertEqual(original_tlist_len, 3)
source_trees = curated_test_tree_list.get_trees(
num_trees=5,
taxon_namespace=None,
label=None,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.assertEqual(len(source_trees), 5)
source_tree_labels = [t.label for t in source_trees]
self.assertEqual(len(source_tree_labels), len(source_trees))
tlist.extend(source_trees)
self.assertEqual(len(tlist), original_tlist_len + len(source_trees))
self.assertIs(tlist.taxon_namespace, original_tns)
self.assertEqual(len(tlist.taxon_namespace), 7)
expected_tree_labels = original_tree_labels + source_tree_labels
self.assertEqual(len(tlist), len(expected_tree_labels))
for t1, tlabel in zip(tlist, expected_tree_labels):
self.assertIn(t1, tlist)
if tlabel in source_tree_labels:
self.assertIn(t1, source_trees)
else:
self.assertNotIn(t1, source_trees)
self.assertIs(t1.taxon_namespace, tlist.taxon_namespace)
self.assertEqual(t1.label, tlabel)
for nd in t1:
self.assertIn(nd.taxon, tlist.taxon_namespace)
def test_index(self):
tlist = curated_test_tree_list.get_tree_list(5)
for idx, t in enumerate(tlist):
self.assertIs(t, tlist[idx])
self.assertEqual(tlist.index(t), idx)
def test_pop1(self):
tlist = curated_test_tree_list.get_tree_list(5)
k = tlist[-1]
t = tlist.pop()
self.assertIs(t, k)
self.assertEqual(len(tlist), 4)
self.assertNotIn(t, tlist)
def test_pop2(self):
for idx in range(5):
tlist = curated_test_tree_list.get_tree_list(5)
k = tlist[idx]
t = tlist.pop(idx)
self.assertIs(t, k)
self.assertEqual(len(tlist), 4)
self.assertNotIn(t, tlist)
def test_remove(self):
tlist = curated_test_tree_list.get_tree_list(5)
t = tlist[0]
tlist.remove(t)
self.assertEqual(len(tlist), 4)
self.assertNotIn(t, tlist)
def test_remove(self):
tlist = curated_test_tree_list.get_tree_list(5)
clist = list(tlist._trees)
tlist.reverse()
clist.reverse()
for t1, t2 in zip(tlist, clist):
self.assertIs(t1, t2)
def test_sort(self):
for r in (True, False):
tlist = curated_test_tree_list.get_tree_list(5)
clist = list(tlist._trees)
tlist.sort(key=lambda x: x.label, reverse=r)
clist.sort(key=lambda x: x.label, reverse=r)
for t1, t2 in zip(tlist, clist):
self.assertIs(t1, t2)
class TreeListCreatingAndCloning(
curated_test_tree.CuratedTestTree,
compare_and_validate.Comparator,
unittest.TestCase):
def add_tree_annotations(self, tree):
for idx, nd in enumerate(tree):
if idx % 2 == 0:
nd.edge.label = "E{}".format(idx)
nd.edge.length = idx
an1 = nd.annotations.add_new("a{}".format(idx),
"{}{}{}".format(nd.label, nd.taxon, idx))
an2 = nd.annotations.add_bound_attribute("label")
an3 = an1.annotations.add_bound_attribute("name")
ae1 = nd.edge.annotations.add_new("a{}".format(idx),
"{}{}".format(nd.edge.label, idx))
ae2 = nd.edge.annotations.add_bound_attribute("label")
ae3 = ae1.annotations.add_bound_attribute("name")
tree.annotations.add_new("a", 0)
tree.label = "hello"
b = tree.annotations.add_bound_attribute("label")
b.annotations.add_new("c", 3)
def add_tree_list_annotations(self, tree_list):
tree_list.annotations.add_new("a", 0)
tree_list.label = "hello"
b = tree_list.annotations.add_bound_attribute("label")
b.annotations.add_new("c", 3)
def add_taxon_namespace_annotations(self, tns):
for idx, taxon in enumerate(tns):
a = taxon.annotations.add_new("!color", str(idx))
a.annotations.add_new("setbytest", "a")
def setUp(self):
self.num_trees = 5
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.original_taxon_labels = [t.label for t in tree1.taxon_namespace]
assert len(self.original_taxon_labels) == len(anodes1)
def get_tree_list(self):
tlist1 = dendropy.TreeList()
self.num_trees = 5
for idx in range(self.num_trees):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=tlist1.taxon_namespace)
self.add_tree_annotations(tree1)
tlist1.append(tree1)
self.add_tree_list_annotations(tlist1)
self.add_taxon_namespace_annotations(tlist1.taxon_namespace)
return tlist1
def test_shallow_copy_with_initializer_list(self):
tlist1 = self.get_tree_list()
trees = tlist1._trees
tlist2 = dendropy.TreeList(trees)
self.assertEqual(len(tlist2), self.num_trees)
for tcopy, toriginal in zip(tlist2, trees):
self.assertIs(tcopy, toriginal)
self.assertIs(tcopy.taxon_namespace, tlist2.taxon_namespace)
def test_clone0(self):
tlist1 = self.get_tree_list()
for tlist2 in (
tlist1.clone(0),
):
self.assertIs(tlist2.taxon_namespace, tlist1.taxon_namespace)
self.assertEqual(len(tlist2), self.num_trees)
for tcopy, toriginal in zip(tlist2, tlist1):
self.assertIs(tcopy, toriginal)
self.assertIs(tcopy.taxon_namespace, tlist2.taxon_namespace)
def test_taxon_namespace_scoped_copy(self):
tlist1 = self.get_tree_list()
for tlist2 in (
tlist1.clone(1),
dendropy.TreeList(tlist1),
tlist1.taxon_namespace_scoped_copy(),):
self.compare_distinct_tree_list(tlist2, tlist1,
taxon_namespace_scoped=True,
compare_tree_annotations=True,
compare_taxon_annotations=True)
def test_deepcopy_including_namespace(self):
tlist1 = self.get_tree_list()
for idx, tlist2 in enumerate((
tlist1.clone(2),
copy.deepcopy(tlist1),
)):
self.compare_distinct_tree_list(tlist2, tlist1,
taxon_namespace_scoped=False,
compare_tree_annotations=True,
compare_taxon_annotations=True)
def test_deepcopy_excluding_namespace(self):
tlist1 = self.get_tree_list()
tlist2 = dendropy.TreeList(tlist1,
taxon_namespace=dendropy.TaxonNamespace())
self.compare_distinct_tree_list(tlist2, tlist1,
taxon_namespace_scoped=False,
compare_tree_annotations=True,
compare_taxon_annotations=False)
class TreeListIdentity(unittest.TestCase):
def setUp(self):
self.tns = dendropy.TaxonNamespace()
self.t1 = dendropy.TreeList(label="a", taxon_namespace=self.tns)
self.t2 = dendropy.TreeList(label="a", taxon_namespace=self.tns)
self.t3 = dendropy.TreeList(label="a")
def test_equal(self):
trees = [dendropy.Tree() for i in range(5)]
for tree in trees:
self.t1._trees.append(tree)
self.t2._trees.append(tree)
self.assertEqual(self.t1, self.t2)
def test_unequal1(self):
trees1 = [dendropy.Tree() for i in range(5)]
for tree in trees1:
self.t1._trees.append(tree)
trees2 = [dendropy.Tree() for i in range(5)]
for tree in trees2:
self.t2._trees.append(tree)
self.assertNotEqual(self.t1, self.t2)
def test_unequal2(self):
trees1 = [dendropy.Tree() for i in range(5)]
for tree in trees1:
self.t1._trees.append(tree)
self.t3._trees.append(tree)
self.assertNotEqual(self.t1, self.t3)
def test_hash_dict_membership(self):
k = {}
k[self.t1] = 1
k[self.t2] = 2
self.assertEqual(len(k), 2)
self.assertEqual(k[self.t1], 1)
self.assertEqual(k[self.t2], 2)
self.assertIn(self.t1, k)
self.assertIn(self.t2, k)
del k[self.t1]
self.assertNotIn(self.t1, k)
self.assertIn(self.t2, k)
self.assertEqual(len(k), 1)
k1 = {self.t1: 1}
k2 = {self.t2: 1}
self.assertIn(self.t1, k1)
self.assertIn(self.t2, k2)
self.assertNotIn(self.t2, k1)
self.assertNotIn(self.t1, k2)
def test_hash_set_membership(self):
k = set()
k.add(self.t1)
k.add(self.t2)
self.assertEqual(len(k), 2)
self.assertIn(self.t1, k)
self.assertIn(self.t2, k)
k.discard(self.t1)
self.assertNotIn(self.t1, k)
self.assertIn(self.t2, k)
self.assertEqual(len(k), 1)
k1 = {self.t1: 1}
k2 = {self.t2: 1}
self.assertIn(self.t1, k1)
self.assertIn(self.t2, k2)
self.assertNotIn(self.t2, k1)
self.assertNotIn(self.t1, k2)
class TestTreeListUpdateTaxonNamespace(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
trees = []
for idx in range(5):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=True,
suppress_leaf_node_taxa=True)
trees.append(tree1)
self.expected_labels = set()
self.expected_taxa = set()
node_label_to_taxon_label_map = {
"a" : "z01",
"b" : "<NONE>",
"c" : "z03",
"e" : "z04",
"f" : "z05",
"g" : "z06",
"h" : None,
"i" : None,
"j" : "z09",
"k" : "z10",
"l" : "z11",
"m" : "<NONE>",
"n" : None,
"o" : "z14",
"p" : "z15",
}
registry = {}
for tree_idx, tree in enumerate(trees):
for nd in tree:
if nd.label is not None:
if tree_idx > 3:
nd.label = node_label_to_taxon_label_map[nd.label]
if nd.label == "<NONE>":
try:
t = registry[None]
except KeyError:
t = dendropy.Taxon(label=None)
registry[None] = t
self.expected_labels.add(None)
else:
try:
t = registry[nd.label]
except KeyError:
t = dendropy.Taxon(label=nd.label)
registry[nd.label] = t
self.expected_labels.add(nd.label)
nd.taxon = t
self.expected_taxa.add(nd.taxon)
self.tree_list = dendropy.TreeList()
self.tree_list._trees = trees
def test_noop_update_with_no_taxa(self):
trees = []
tns = dendropy.TaxonNamespace()
for idx in range(5):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
suppress_internal_node_taxa=True,
suppress_leaf_node_taxa=True,
taxon_namespace=tns)
trees.append(tree1)
tlst = dendropy.TreeList(taxon_namespace=tns)
tlst._trees = trees
original_tns = tlst.taxon_namespace
self.assertEqual(len(original_tns), 0)
tlst.update_taxon_namespace()
self.assertIs(tlst.taxon_namespace, original_tns)
for tree in tlst:
self.assertIs(tree.taxon_namespace, tlst.taxon_namespace)
self.assertEqual(len(original_tns), 0)
def test_update(self):
original_tns = self.tree_list.taxon_namespace
self.assertEqual(len(original_tns), 0)
self.tree_list.update_taxon_namespace()
self.tree_list.update_taxon_namespace()
self.tree_list.update_taxon_namespace()
for tree in self.tree_list:
self.assertIs(tree.taxon_namespace, self.tree_list.taxon_namespace)
self.assertIs(self.tree_list.taxon_namespace, original_tns)
new_taxa = [t for t in original_tns]
new_labels = [t.label for t in original_tns]
self.assertCountEqual(new_taxa, self.expected_taxa)
self.assertCountEqual(new_labels, self.expected_labels)
class TestTreeListMigrateAndReconstructTaxonNamespace(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
tns = dendropy.TaxonNamespace()
trees = []
for idx in range(8):
tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=True,
suppress_leaf_node_taxa=True,
taxon_namespace=tns)
trees.append(tree)
self.node_label_to_taxon_label_map = {
"a" : "a",
"b" : "a",
"c" : "2",
"e" : "2",
"f" : "b",
"g" : "B",
"h" : "B",
"i" : "h",
"j" : "H",
"k" : "h",
"l" : None,
"m" : None,
"n" : "H",
"o" : "J",
"p" : "j",
}
self.original_taxa = []
registry = {}
for tree in trees:
for idx, nd in enumerate(tree):
try:
t = registry[nd.label]
except KeyError:
taxon_label = self.node_label_to_taxon_label_map[nd.label]
t = dendropy.Taxon(label=taxon_label)
registry[nd.label] = t
self.original_taxa.append(t)
tree.taxon_namespace.add_taxon(t)
nd.taxon = t
nd.original_taxon = t
assert len(tree.taxon_namespace) == len(self.node_label_to_taxon_label_map)
assert len(tree.taxon_namespace) == len(self.original_taxa)
self.tree_list = dendropy.TreeList(taxon_namespace=tns)
self.tree_list._trees = trees
def verify_taxon_namespace_reconstruction(self,
unify_taxa_by_label=False,
case_sensitive_label_mapping=True,
original_tns=None,
redundant_taxa=False):
if unify_taxa_by_label:
if not case_sensitive_label_mapping:
expected_labels = []
for label in self.node_label_to_taxon_label_map.values():
if label is None:
expected_labels.append(label)
else:
label = label.upper()
if label not in expected_labels:
expected_labels.append(label)
else:
expected_labels = list(set(label for label in self.node_label_to_taxon_label_map.values()))
else:
expected_labels = [label for label in self.node_label_to_taxon_label_map.values()]
for tree in self.tree_list:
seen_taxa = []
self.assertIs(tree.taxon_namespace, self.tree_list.taxon_namespace)
for nd in tree:
self.assertIsNot(nd.taxon, nd.original_taxon)
if not case_sensitive_label_mapping and nd.taxon.label is not None:
self.assertEqual(nd.taxon.label.upper(), nd.original_taxon.label.upper())
self.assertEqual(self.node_label_to_taxon_label_map[nd.label].upper(), nd.taxon.label.upper())
else:
self.assertEqual(nd.taxon.label, nd.original_taxon.label)
self.assertEqual(self.node_label_to_taxon_label_map[nd.label], nd.taxon.label)
self.assertNotIn(nd.original_taxon, tree.taxon_namespace)
self.assertIn(nd.original_taxon, self.original_taxa)
self.assertIn(nd.taxon, tree.taxon_namespace)
self.assertNotIn(nd.taxon, self.original_taxa)
if original_tns is not None:
self.assertNotIn(nd.taxon, original_tns)
if nd.taxon not in seen_taxa:
seen_taxa.append(nd.taxon)
else:
self.assertTrue(unify_taxa_by_label or redundant_taxa)
if not case_sensitive_label_mapping:
self.assertIn(nd.taxon.label, [t.label for t in seen_taxa])
else:
if nd.taxon.label is None:
self.assertIs(nd.original_taxon.label, None)
self.assertEqual([t.label for t in seen_taxa].count(None), 1)
else:
x1 = [t.label.upper() for t in seen_taxa if t.label is not None]
self.assertIn(nd.taxon.label.upper(), x1)
self.assertEqual(len(seen_taxa), len(tree.taxon_namespace))
if not case_sensitive_label_mapping:
seen_labels = [(t.label.upper() if t.label is not None else None) for t in seen_taxa]
else:
seen_labels = [t.label for t in seen_taxa]
c1 = collections.Counter(expected_labels)
c2 = collections.Counter(seen_labels)
self.assertEqual(c1, c2)
self.assertEqual(len(tree.taxon_namespace), len(expected_labels))
if not unify_taxa_by_label and not redundant_taxa:
self.assertEqual(len(tree.taxon_namespace), len(self.node_label_to_taxon_label_map))
def test_basic_reconstruction(self):
tns = dendropy.TaxonNamespace()
trees = []
for idx in range(5):
tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=tns)
trees.append(tree)
tree_list = dendropy.TreeList(taxon_namespace=tns)
tree_list._trees = trees
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
tree_list.taxon_namespace = new_tns
tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=False)
self.assertIsNot(tree_list.taxon_namespace, tns)
self.assertIs(tree_list.taxon_namespace, new_tns)
self.assertEqual(len(tree_list.taxon_namespace), len(tns))
original_labels = [t.label for t in tns]
new_labels = [t.label for t in new_tns]
self.assertCountEqual(new_labels, original_labels)
for tree in tree_list:
self.assertIs(tree.taxon_namespace, tree_list.taxon_namespace)
for nd in tree:
if nd.taxon is not None:
self.assertIn(nd.taxon, tree.taxon_namespace)
self.assertNotIn(nd.taxon, tns)
def test_reconstruct_taxon_namespace_non_unifying(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list._taxon_namespace = new_tns
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
self.tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=False)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=False,
case_sensitive_label_mapping=True)
def test_reconstruct_taxon_namespace_unifying_case_sensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list._taxon_namespace = new_tns
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
self.tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=True,
original_tns=original_tns)
def test_reconstruct_taxon_namespace_unifying_case_insensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = False
self.tree_list._taxon_namespace = new_tns
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
self.tree_list.reconstruct_taxon_namespace(unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=False,
original_tns=original_tns)
def test_basic_migration(self):
tns = dendropy.TaxonNamespace()
trees = []
for idx in range(5):
tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=tns)
trees.append(tree)
tree_list = dendropy.TreeList(taxon_namespace=tns)
tree_list._trees = trees
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
tree_list.taxon_namespace = new_tns
tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=False)
self.assertIsNot(tree_list.taxon_namespace, tns)
self.assertIs(tree_list.taxon_namespace, new_tns)
self.assertEqual(len(tree_list.taxon_namespace), len(tns))
original_labels = [t.label for t in tns]
new_labels = [t.label for t in new_tns]
self.assertCountEqual(new_labels, original_labels)
for tree in tree_list:
self.assertIs(tree.taxon_namespace, tree_list.taxon_namespace)
for nd in tree:
if nd.taxon is not None:
self.assertIn(nd.taxon, tree.taxon_namespace)
self.assertNotIn(nd.taxon, tns)
def test_migrate_taxon_namespace_non_unifying(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=False)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=False,
case_sensitive_label_mapping=True,
original_tns=original_tns)
def test_migrate_taxon_namespace_unifying_case_sensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = True
self.tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=True,
original_tns=original_tns)
def test_migrate_taxon_namespace_unifying_case_insensitive(self):
original_tns = self.tree_list.taxon_namespace
new_tns = dendropy.TaxonNamespace()
new_tns.is_case_sensitive = False
self.tree_list.migrate_taxon_namespace(
new_tns,
unify_taxa_by_label=True)
self.assertIsNot(self.tree_list.taxon_namespace, original_tns)
self.assertIs(self.tree_list.taxon_namespace, new_tns)
self.verify_taxon_namespace_reconstruction(
unify_taxa_by_label=True,
case_sensitive_label_mapping=False,
original_tns=original_tns)
class TestTreeListAppend(
curated_test_tree.CuratedTestTree,
unittest.TestCase):
def setUp(self):
self.native_tns = dendropy.TaxonNamespace()
self.tree_list = dendropy.TreeList(taxon_namespace=self.native_tns)
self.foreign_tns = dendropy.TaxonNamespace()
self.foreign_tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=self.foreign_tns)
for nd in self.foreign_tree:
nd.original_taxon = nd.taxon
self.check_tns = dendropy.TaxonNamespace()
self.check_tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=self.check_tns)
def test_append_default(self):
self.assertIsNot(self.tree_list.taxon_namespace, self.foreign_tree.taxon_namespace)
self.tree_list.append(self.foreign_tree)
self.assertEqual(len(self.tree_list), 1)
self.assertIn(self.foreign_tree, self.tree_list)
self.assertIs(self.foreign_tree, self.tree_list[0])
self.assertIs(self.tree_list.taxon_namespace, self.native_tns)
self.assertIs(self.foreign_tree.taxon_namespace, self.tree_list.taxon_namespace)
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.foreign_tns))
for nd in self.foreign_tree:
if nd.taxon:
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
self.assertIsNot(nd.taxon, nd.original_taxon)
self.assertIn(nd.original_taxon, self.foreign_tns)
self.assertNotIn(nd.original_taxon, self.tree_list.taxon_namespace)
self.assertEqual(nd.taxon.label, nd.original_taxon.label)
def test_append_migrate_matching_labels(self):
kwargs_groups = [
{"taxon_import_strategy": "migrate", "unify_taxa_by_label": True},
{"taxon_import_strategy": "migrate", "unify_taxa_by_label": False},
{"taxon_import_strategy": "add", },
]
for kwargs in kwargs_groups:
self.setUp()
self.assertEqual(len(self.tree_list.taxon_namespace), 0)
native_tree, anodes, lnodes, inodes = self.get_tree(
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False,
taxon_namespace=self.native_tns)
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.postorder_sequence))
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.foreign_tns))
original_tns_len = len(self.tree_list.taxon_namespace)
self.tree_list.append(self.foreign_tree, **kwargs)
self.assertEqual(len(self.tree_list), 1)
self.assertIn(self.foreign_tree, self.tree_list)
self.assertIs(self.foreign_tree, self.tree_list[0])
self.assertIs(self.foreign_tree.taxon_namespace, self.tree_list.taxon_namespace)
if kwargs["taxon_import_strategy"] == "add":
self.assertEqual(len(self.tree_list.taxon_namespace),
original_tns_len + len(self.foreign_tns))
for nd in self.foreign_tree:
self.assertIn(nd.taxon, self.foreign_tns)
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
else:
if "unify_taxa_by_label" not in kwargs or not kwargs["unify_taxa_by_label"]:
self.assertEqual(len(self.tree_list.taxon_namespace),
original_tns_len + len(self.foreign_tns))
else:
self.assertEqual(len(self.tree_list.taxon_namespace), original_tns_len)
for nd in self.foreign_tree:
self.assertNotIn(nd.taxon, self.foreign_tns)
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
def test_append_add(self):
self.assertIsNot(self.tree_list.taxon_namespace, self.foreign_tree.taxon_namespace)
self.tree_list.append(self.foreign_tree,
taxon_import_strategy="add")
self.assertEqual(len(self.tree_list), 1)
self.assertIn(self.foreign_tree, self.tree_list)
self.assertIs(self.foreign_tree, self.tree_list[0])
self.assertIs(self.tree_list.taxon_namespace, self.native_tns)
self.assertIs(self.foreign_tree.taxon_namespace, self.tree_list.taxon_namespace)
self.assertEqual(len(self.tree_list.taxon_namespace), len(self.foreign_tns))
for nd in self.foreign_tree:
if nd.taxon:
self.assertIn(nd.taxon, self.tree_list.taxon_namespace)
self.assertIs(nd.taxon, nd.original_taxon)
self.assertIn(nd.original_taxon, self.foreign_tns)
self.assertIn(nd.original_taxon, self.tree_list.taxon_namespace)
class TestTreeListTaxa(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
self.tree_list = dendropy.TreeList()
self.expected_taxa = None
for i in range(10):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
taxon_namespace=self.tree_list.taxon_namespace,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.tree_list.append(tree1)
if self.expected_taxa is None:
self.expected_taxa = set([nd.taxon for nd in anodes1 if nd.taxon is not None])
def test_basic_taxa(self):
self.assertEqual(self.tree_list.poll_taxa(), self.expected_taxa)
class TestTreeListPurgeTaxonNamespace(
curated_test_tree.CuratedTestTree,
dendropytest.ExtendedTestCase):
def setUp(self):
self.tree_list = dendropy.TreeList()
self.expected_taxa = None
for i in range(10):
tree1, anodes1, lnodes1, inodes1 = self.get_tree(
taxon_namespace=self.tree_list.taxon_namespace,
suppress_internal_node_taxa=False,
suppress_leaf_node_taxa=False)
self.tree_list.append(tree1)
if self.expected_taxa is None:
self.expected_taxa = set([nd.taxon for nd in anodes1 if nd.taxon is not None])
def test_noop_purge(self):
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
self.tree_list.purge_taxon_namespace()
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
def test_basic_purge(self):
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
added_taxa = set(self.expected_taxa)
for label in ("z1", "z2", "z3", "z4"):
t = self.tree_list.taxon_namespace.new_taxon(label=label)
added_taxa.add(t)
self.assertEqual(set(self.tree_list.taxon_namespace), added_taxa)
self.tree_list.purge_taxon_namespace()
self.assertEqual(set(self.tree_list.taxon_namespace), self.expected_taxa)
class TreeListCreation(unittest.TestCase):
def test_create_with_taxon_namespace(self):
tns = dendropy.TaxonNamespace()
tt = dendropy.TreeList(label="a", taxon_namespace=tns)
self.assertEqual(tt.label, "a")
self.assertIs(tt.taxon_namespace, tns)
class TestSpecialTreeListConstruction(
unittest.TestCase):
def test_construction_from_another_tree_different_label(self):
tlist1 = dendropy.TreeList()
tlist1.label = "tlist1"
self.assertEqual(tlist1.label, "tlist1")
tlist2 = dendropy.TreeList(tlist1, label="tlist2")
self.assertEqual(tlist2.label, "tlist2")
self.assertNotEqual(tlist1.label, "tlist2")
self.assertNotEqual(tlist1.label, tlist2.label)
if __name__ == "__main__":
unittest.main()
| true | true |
f71c6192795695fce398b118465ead2235f638d3 | 263 | py | Python | services/movies_billing/subscription_service/src/models/customer.py | fuodorov/yacinema | 43ad869575fbaab7c7056229538638666aa87110 | [
"MIT"
] | null | null | null | services/movies_billing/subscription_service/src/models/customer.py | fuodorov/yacinema | 43ad869575fbaab7c7056229538638666aa87110 | [
"MIT"
] | null | null | null | services/movies_billing/subscription_service/src/models/customer.py | fuodorov/yacinema | 43ad869575fbaab7c7056229538638666aa87110 | [
"MIT"
] | 1 | 2021-09-30T09:49:40.000Z | 2021-09-30T09:49:40.000Z | from sqlalchemy import String, ForeignKey, Integer, Enum
from .base import AbstractModel, RequiredColumn
class Customer(AbstractModel):
__tablename__ = "customer"
user_id = RequiredColumn(String(50))
stripe_customer_id = RequiredColumn(String(50))
| 26.3 | 56 | 0.775665 | from sqlalchemy import String, ForeignKey, Integer, Enum
from .base import AbstractModel, RequiredColumn
class Customer(AbstractModel):
__tablename__ = "customer"
user_id = RequiredColumn(String(50))
stripe_customer_id = RequiredColumn(String(50))
| true | true |
f71c61dd6a6ae22677f0c87104c88f4220333ae8 | 234 | py | Python | physionet-django/project/fileviews/image.py | Lucas-Mc/physionet-build | 77da5da6273cf3f5f2afce95dc5d0ce3302741ca | [
"BSD-3-Clause"
] | 36 | 2019-02-14T18:10:39.000Z | 2022-01-21T12:48:52.000Z | physionet-django/project/fileviews/image.py | Lucas-Mc/physionet-build | 77da5da6273cf3f5f2afce95dc5d0ce3302741ca | [
"BSD-3-Clause"
] | 1,051 | 2019-01-31T18:03:14.000Z | 2022-03-31T20:53:04.000Z | physionet-django/project/fileviews/image.py | Lucas-Mc/physionet-build | 77da5da6273cf3f5f2afce95dc5d0ce3302741ca | [
"BSD-3-Clause"
] | 13 | 2019-03-26T11:02:32.000Z | 2022-03-17T11:39:49.000Z | from project.fileviews.base import FileView
class ImageFileView(FileView):
"""
Class for displaying image files.
"""
def render(self, request):
return super().render(request, 'project/file_view_image.html')
| 21.272727 | 70 | 0.692308 | from project.fileviews.base import FileView
class ImageFileView(FileView):
def render(self, request):
return super().render(request, 'project/file_view_image.html')
| true | true |
f71c63d30ec047c7337fa37ae62317af4f4cdf92 | 2,398 | py | Python | model_zoo/official/cv/alexnet/export.py | dongkcs/mindspore | cd7df6dbf463ff3128e9181e9d0c779cecb81320 | [
"Apache-2.0"
] | 2 | 2020-11-23T13:46:37.000Z | 2020-12-20T02:02:38.000Z | model_zoo/official/cv/alexnet/export.py | dongkcs/mindspore | cd7df6dbf463ff3128e9181e9d0c779cecb81320 | [
"Apache-2.0"
] | null | null | null | model_zoo/official/cv/alexnet/export.py | dongkcs/mindspore | cd7df6dbf463ff3128e9181e9d0c779cecb81320 | [
"Apache-2.0"
] | 1 | 2021-01-01T08:35:01.000Z | 2021-01-01T08:35:01.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air and onnx models#################
python export.py
"""
import argparse
import numpy as np
import mindspore as ms
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.config import alexnet_cifar10_cfg, alexnet_imagenet_cfg
from src.alexnet import AlexNet
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classification')
parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['imagenet', 'cifar10'],
help='please choose dataset: imagenet or cifar10.')
parser.add_argument('--device_target', type=str, default="Ascend",
choices=['Ascend', 'GPU'],
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\
path where the trained ckpt file')
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
if args_opt.dataset_name == 'cifar10':
cfg = alexnet_cifar10_cfg
elif args_opt.dataset_name == 'imagenet':
cfg = alexnet_imagenet_cfg
else:
raise ValueError("dataset is not support.")
net = AlexNet(num_classes=cfg.num_classes)
param_dict = load_checkpoint(args_opt.ckpt_path)
load_param_into_net(net, param_dict)
input_arr = Tensor(np.random.uniform(0.0, 1.0, size=[1, 3, cfg.image_height, cfg.image_width]), ms.float32)
export(net, input_arr, file_name=cfg.air_name, file_format="AIR")
| 42.821429 | 111 | 0.685988 |
import argparse
import numpy as np
import mindspore as ms
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.config import alexnet_cifar10_cfg, alexnet_imagenet_cfg
from src.alexnet import AlexNet
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classification')
parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['imagenet', 'cifar10'],
help='please choose dataset: imagenet or cifar10.')
parser.add_argument('--device_target', type=str, default="Ascend",
choices=['Ascend', 'GPU'],
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\
path where the trained ckpt file')
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
if args_opt.dataset_name == 'cifar10':
cfg = alexnet_cifar10_cfg
elif args_opt.dataset_name == 'imagenet':
cfg = alexnet_imagenet_cfg
else:
raise ValueError("dataset is not support.")
net = AlexNet(num_classes=cfg.num_classes)
param_dict = load_checkpoint(args_opt.ckpt_path)
load_param_into_net(net, param_dict)
input_arr = Tensor(np.random.uniform(0.0, 1.0, size=[1, 3, cfg.image_height, cfg.image_width]), ms.float32)
export(net, input_arr, file_name=cfg.air_name, file_format="AIR")
| true | true |
f71c6605bc3c94744764c205c3291d67c5416f2f | 624 | py | Python | setup.py | tianjianjiang/bigscience-metadata | 3460c8d2bca2c818646feb3b49c50b351b51ad70 | [
"Apache-2.0"
] | null | null | null | setup.py | tianjianjiang/bigscience-metadata | 3460c8d2bca2c818646feb3b49c50b351b51ad70 | [
"Apache-2.0"
] | null | null | null | setup.py | tianjianjiang/bigscience-metadata | 3460c8d2bca2c818646feb3b49c50b351b51ad70 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
def req_file(filename):
with open(filename) as f:
content = f.readlines()
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
setup(
name="bsmetadata",
python_requires=">=3.7.11, <3.10",
version="0.1.0",
url="https://github.com/bigscience-workshop/metadata.git",
author="Multiple Authors",
author_email="xxx",
description="Codebase for including metadata (e.g., URLs, timestamps, HTML tags) during language model pretraining.",
packages=find_packages(),
install_requires=install_requires,
)
| 27.130435 | 121 | 0.698718 | from setuptools import find_packages, setup
def req_file(filename):
with open(filename) as f:
content = f.readlines()
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
setup(
name="bsmetadata",
python_requires=">=3.7.11, <3.10",
version="0.1.0",
url="https://github.com/bigscience-workshop/metadata.git",
author="Multiple Authors",
author_email="xxx",
description="Codebase for including metadata (e.g., URLs, timestamps, HTML tags) during language model pretraining.",
packages=find_packages(),
install_requires=install_requires,
)
| true | true |
f71c66238339944e28e835fcee02bfdd0f475eda | 1,171 | py | Python | setup.py | judoscale/judoscale-python | 3a2c82921ec5feb81d2691037b9bf13e94f7e5c9 | [
"MIT"
] | null | null | null | setup.py | judoscale/judoscale-python | 3a2c82921ec5feb81d2691037b9bf13e94f7e5c9 | [
"MIT"
] | null | null | null | setup.py | judoscale/judoscale-python | 3a2c82921ec5feb81d2691037b9bf13e94f7e5c9 | [
"MIT"
] | null | null | null | import setuptools
# To publish:
#
# - Update VERSION constant below
# - python3 -m pip install --upgrade build twine
# - rm -rf dist && python3 -m build
# - python3 -m twine upload dist/*
# - Username is __token__, password is token value
VERSION = "1.0.0rc1"
INSTALL_REQUIRES = ["requests<3.0.0"]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="judoscale-python",
version=VERSION,
author="Adam McCrea",
author_email="adam@adamlogic.com",
description="Official Python adapter for Judoscale—the advanced autoscaler for Heroku",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/judoscale/judoscale-python",
project_urls={
"Issue Tracker": "https://github.com/judoscale/judoscale-python/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=INSTALL_REQUIRES,
)
| 30.815789 | 91 | 0.674637 | import setuptools
VERSION = "1.0.0rc1"
INSTALL_REQUIRES = ["requests<3.0.0"]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="judoscale-python",
version=VERSION,
author="Adam McCrea",
author_email="adam@adamlogic.com",
description="Official Python adapter for Judoscale—the advanced autoscaler for Heroku",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/judoscale/judoscale-python",
project_urls={
"Issue Tracker": "https://github.com/judoscale/judoscale-python/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=INSTALL_REQUIRES,
)
| true | true |
f71c68260a4ae6f5fd97179a6c4e00a92144ea7f | 36,156 | py | Python | zerver/migrations/0001_initial.py | N-Shar-ma/zulip | 95303a9929424b55a1f7c7cce9313c4619a9533b | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:55.000Z | 2022-02-06T13:00:21.000Z | zerver/migrations/0001_initial.py | jai2201/zulip | 95303a9929424b55a1f7c7cce9313c4619a9533b | [
"Apache-2.0"
] | null | null | null | zerver/migrations/0001_initial.py | jai2201/zulip | 95303a9929424b55a1f7c7cce9313c4619a9533b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 1.11.2 on 2017-06-22 10:22
import bitfield.models
import django.contrib.auth.models
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchVectorField
from django.db import migrations, models
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models.functions import Upper
from zerver.models import generate_email_token_for_stream
def migrate_existing_attachment_data(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Attachment = apps.get_model("zerver", "Attachment")
Recipient = apps.get_model("zerver", "Recipient")
Stream = apps.get_model("zerver", "Stream")
attachments = Attachment.objects.all()
for entry in attachments:
owner = entry.owner
entry.realm = owner.realm
for message in entry.messages.all():
if owner == message.sender:
if message.recipient.type == Recipient.STREAM:
stream = Stream.objects.get(id=message.recipient.type_id)
is_realm_public = (
not stream.realm.is_zephyr_mirror_realm and not stream.invite_only
)
entry.is_realm_public = entry.is_realm_public or is_realm_public
entry.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0001_initial"),
]
if settings.POSTGRESQL_MISSING_DICTIONARIES:
fts_sql = """
CREATE TEXT SEARCH CONFIGURATION zulip.english_us_search (COPY=pg_catalog.english);
"""
else:
fts_sql = """
CREATE TEXT SEARCH DICTIONARY english_us_hunspell
(template = ispell, DictFile = en_us, AffFile = en_us, StopWords = zulip_english);
CREATE TEXT SEARCH CONFIGURATION zulip.english_us_search (COPY=pg_catalog.english);
ALTER TEXT SEARCH CONFIGURATION zulip.english_us_search
ALTER MAPPING FOR asciiword, asciihword, hword_asciipart, word, hword, hword_part
WITH english_us_hunspell, english_stem;
"""
fts_sql += """
CREATE FUNCTION escape_html(text) RETURNS text IMMUTABLE LANGUAGE 'sql' AS $$
SELECT replace(replace(replace(replace(replace($1, '&', '&'), '<', '<'),
'>', '>'), '"', '"'), '''', ''');
$$ ;
CREATE TABLE fts_update_log (id SERIAL PRIMARY KEY, message_id INTEGER NOT NULL);
CREATE FUNCTION do_notify_fts_update_log() RETURNS trigger LANGUAGE plpgsql AS
$$ BEGIN NOTIFY fts_update_log; RETURN NEW; END $$;
CREATE TRIGGER fts_update_log_notify AFTER INSERT ON fts_update_log
FOR EACH STATEMENT EXECUTE PROCEDURE do_notify_fts_update_log();
CREATE FUNCTION append_to_fts_update_log() RETURNS trigger LANGUAGE plpgsql AS
$$ BEGIN INSERT INTO fts_update_log (message_id) VALUES (NEW.id); RETURN NEW; END $$;
CREATE TRIGGER zerver_message_update_search_tsvector_async
BEFORE INSERT OR UPDATE OF subject, rendered_content ON zerver_message
FOR EACH ROW EXECUTE PROCEDURE append_to_fts_update_log();
"""
operations = [
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
("email", models.EmailField(db_index=True, max_length=75, unique=True)),
("is_staff", models.BooleanField(default=False)),
("is_active", models.BooleanField(default=True)),
("is_bot", models.BooleanField(default=False)),
("date_joined", models.DateTimeField(default=django.utils.timezone.now)),
("is_mirror_dummy", models.BooleanField(default=False)),
("full_name", models.CharField(max_length=100)),
("short_name", models.CharField(max_length=100)),
("pointer", models.IntegerField()),
("last_pointer_updater", models.CharField(max_length=64)),
("api_key", models.CharField(max_length=32)),
("enable_stream_desktop_notifications", models.BooleanField(default=True)),
("enable_stream_sounds", models.BooleanField(default=True)),
("enable_desktop_notifications", models.BooleanField(default=True)),
("enable_sounds", models.BooleanField(default=True)),
("enable_offline_email_notifications", models.BooleanField(default=True)),
("enable_offline_push_notifications", models.BooleanField(default=True)),
("enable_digest_emails", models.BooleanField(default=True)),
("default_desktop_notifications", models.BooleanField(default=True)),
(
"last_reminder",
models.DateTimeField(default=django.utils.timezone.now, null=True),
),
("rate_limits", models.CharField(default="", max_length=100)),
("default_all_public_streams", models.BooleanField(default=False)),
("enter_sends", models.NullBooleanField(default=True)),
("autoscroll_forever", models.BooleanField(default=False)),
("twenty_four_hour_time", models.BooleanField(default=False)),
(
"avatar_source",
models.CharField(
choices=[
("G", "Hosted by Gravatar"),
("U", "Uploaded by user"),
("S", "System generated"),
],
default="G",
max_length=1,
),
),
(
"tutorial_status",
models.CharField(
choices=[("W", "Waiting"), ("S", "Started"), ("F", "Finished")],
default="W",
max_length=1,
),
),
("onboarding_steps", models.TextField(default="[]")),
("invites_granted", models.IntegerField(default=0)),
("invites_used", models.IntegerField(default=0)),
("alert_words", models.TextField(default="[]")),
("muted_topics", models.TextField(default="[]")),
(
"bot_owner",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Client",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(db_index=True, max_length=30, unique=True)),
],
),
migrations.CreateModel(
name="DefaultStream",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
],
),
migrations.CreateModel(
name="Huddle",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("huddle_hash", models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name="Message",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("subject", models.CharField(db_index=True, max_length=60)),
("content", models.TextField()),
("rendered_content", models.TextField(null=True)),
("rendered_content_version", models.IntegerField(null=True)),
("pub_date", models.DateTimeField(db_index=True, verbose_name="date published")),
("last_edit_time", models.DateTimeField(null=True)),
("edit_history", models.TextField(null=True)),
("has_attachment", models.BooleanField(db_index=True, default=False)),
("has_image", models.BooleanField(db_index=True, default=False)),
("has_link", models.BooleanField(db_index=True, default=False)),
],
),
migrations.CreateModel(
name="PreregistrationUser",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=75)),
("invited_at", models.DateTimeField(auto_now=True)),
("status", models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name="PushDeviceToken",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("kind", models.PositiveSmallIntegerField(choices=[(1, "apns"), (2, "gcm")])),
("token", models.CharField(max_length=4096, unique=True)),
(
"last_updated",
models.DateTimeField(auto_now=True, default=django.utils.timezone.now),
),
("ios_app_id", models.TextField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="Realm",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("domain", models.CharField(db_index=True, max_length=40, unique=True)),
("name", models.CharField(max_length=40, null=True)),
("restricted_to_domain", models.BooleanField(default=True)),
("invite_required", models.BooleanField(default=False)),
("invite_by_admins_only", models.BooleanField(default=False)),
("mandatory_topics", models.BooleanField(default=False)),
("show_digest_email", models.BooleanField(default=True)),
("name_changes_disabled", models.BooleanField(default=False)),
("date_created", models.DateTimeField(default=django.utils.timezone.now)),
("deactivated", models.BooleanField(default=False)),
],
options={
"permissions": (("administer", "Administer a realm"),),
},
),
migrations.CreateModel(
name="RealmAlias",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("domain", models.CharField(db_index=True, max_length=80, unique=True)),
(
"realm",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="RealmEmoji",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.TextField()),
("img_url", models.TextField()),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="RealmFilter",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("pattern", models.TextField()),
("url_format_string", models.TextField()),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="Recipient",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("type_id", models.IntegerField(db_index=True)),
("type", models.PositiveSmallIntegerField(db_index=True)),
],
),
migrations.CreateModel(
name="Referral",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=75)),
("timestamp", models.DateTimeField(auto_now_add=True)),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="ScheduledJob",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("scheduled_timestamp", models.DateTimeField()),
("type", models.PositiveSmallIntegerField()),
("data", models.TextField()),
("filter_id", models.IntegerField(null=True)),
("filter_string", models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name="Stream",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(db_index=True, max_length=60)),
("invite_only", models.NullBooleanField(default=False)),
(
"email_token",
models.CharField(default=generate_email_token_for_stream, max_length=32),
),
("description", models.CharField(default="", max_length=1024)),
("date_created", models.DateTimeField(default=django.utils.timezone.now)),
("deactivated", models.BooleanField(default=False)),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("active", models.BooleanField(default=True)),
("in_home_view", models.NullBooleanField(default=True)),
("color", models.CharField(default="#c2c2c2", max_length=10)),
("desktop_notifications", models.BooleanField(default=True)),
("audible_notifications", models.BooleanField(default=True)),
("notifications", models.BooleanField(default=False)),
(
"recipient",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Recipient"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserActivity",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("query", models.CharField(db_index=True, max_length=50)),
("count", models.IntegerField()),
("last_visit", models.DateTimeField(verbose_name="last visit")),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserActivityInterval",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("start", models.DateTimeField(db_index=True, verbose_name="start time")),
("end", models.DateTimeField(db_index=True, verbose_name="end time")),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserMessage",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"flags",
bitfield.models.BitField(
[
"read",
"starred",
"collapsed",
"mentioned",
"wildcard_mentioned",
"summarize_in_home",
"summarize_in_stream",
"force_expand",
"force_collapse",
"has_alert_word",
"historical",
"is_me_message",
],
default=0,
),
),
(
"message",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Message"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserPresence",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("timestamp", models.DateTimeField(verbose_name="presence changed")),
("status", models.PositiveSmallIntegerField(default=1)),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.AlterUniqueTogether(
name="userpresence",
unique_together={("user_profile", "client")},
),
migrations.AlterUniqueTogether(
name="usermessage",
unique_together={("user_profile", "message")},
),
migrations.AlterUniqueTogether(
name="useractivity",
unique_together={("user_profile", "client", "query")},
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together={("user_profile", "recipient")},
),
migrations.AlterUniqueTogether(
name="stream",
unique_together={("name", "realm")},
),
migrations.AlterUniqueTogether(
name="recipient",
unique_together={("type", "type_id")},
),
migrations.AlterUniqueTogether(
name="realmfilter",
unique_together={("realm", "pattern")},
),
migrations.AlterUniqueTogether(
name="realmemoji",
unique_together={("realm", "name")},
),
migrations.AddField(
model_name="realm",
name="notifications_stream",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="preregistrationuser",
name="realm",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
migrations.AddField(
model_name="preregistrationuser",
name="referred_by",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="preregistrationuser",
name="streams",
field=models.ManyToManyField(null=True, to="zerver.Stream"),
),
migrations.AddField(
model_name="message",
name="recipient",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Recipient"
),
),
migrations.AddField(
model_name="message",
name="sender",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="message",
name="sending_client",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
migrations.AddField(
model_name="defaultstream",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AddField(
model_name="defaultstream",
name="stream",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Stream"
),
),
migrations.AlterUniqueTogether(
name="defaultstream",
unique_together={("realm", "stream")},
),
migrations.AddField(
model_name="userprofile",
name="default_events_register_stream",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="userprofile",
name="default_sending_stream",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="userprofile",
name="groups",
field=models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
migrations.AddField(
model_name="userprofile",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AddField(
model_name="userprofile",
name="user_permissions",
field=models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
migrations.AddField(
model_name="message",
name="search_tsvector",
field=SearchVectorField(null=True),
),
migrations.AddIndex(
model_name="message",
index=GinIndex(
"search_tsvector", fastupdate=False, name="zerver_message_search_tsvector"
),
),
migrations.RunSQL(
sql=fts_sql,
),
migrations.AlterModelManagers(
name="userprofile",
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name="preregistrationuser",
name="email",
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name="preregistrationuser",
name="streams",
field=models.ManyToManyField(to="zerver.Stream"),
),
migrations.AlterField(
model_name="pushdevicetoken",
name="last_updated",
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name="referral",
name="email",
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name="userprofile",
name="email",
field=models.EmailField(db_index=True, max_length=254, unique=True),
),
migrations.AlterField(
model_name="userprofile",
name="last_login",
field=models.DateTimeField(blank=True, null=True, verbose_name="last login"),
),
migrations.AddIndex(
model_name="message",
index=models.Index(Upper("subject"), name="upper_subject_idx"),
),
migrations.AddIndex(
model_name="stream",
index=models.Index(Upper("name"), name="upper_stream_name_idx"),
),
migrations.AddField(
model_name="userprofile",
name="left_side_userlist",
field=models.BooleanField(default=False),
),
migrations.AlterModelOptions(
name="realm",
options={
"permissions": (
("administer", "Administer a realm"),
("api_super_user", "Can send messages as other users for mirroring"),
)
},
),
migrations.AddIndex(
model_name="userprofile",
index=models.Index(Upper("email"), name="upper_userprofile_email_idx"),
),
migrations.AlterField(
model_name="userprofile",
name="is_active",
field=models.BooleanField(db_index=True, default=True),
),
migrations.AlterField(
model_name="userprofile",
name="is_bot",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddIndex(
model_name="preregistrationuser",
index=models.Index(Upper("email"), name="upper_preregistration_email_idx"),
),
migrations.AlterField(
model_name="userprofile",
name="enable_stream_desktop_notifications",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="userprofile",
name="enable_stream_sounds",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="is_api_super_user",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddField(
model_name="userprofile",
name="is_realm_admin",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="realmemoji",
name="img_url",
field=models.URLField(),
),
migrations.AlterField(
model_name="realmemoji",
name="name",
field=models.TextField(
validators=[
django.core.validators.MinLengthValidator(1),
django.core.validators.RegexValidator(regex="^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$"),
]
),
),
migrations.AlterField(
model_name="realmemoji",
name="img_url",
field=models.URLField(max_length=1000),
),
migrations.CreateModel(
name="Attachment",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("file_name", models.CharField(db_index=True, max_length=100)),
("path_id", models.TextField(db_index=True)),
(
"create_time",
models.DateTimeField(db_index=True, default=django.utils.timezone.now),
),
("messages", models.ManyToManyField(to="zerver.Message")),
(
"owner",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
("is_realm_public", models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name="realm",
name="create_stream_by_admins_only",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="bot_type",
field=models.PositiveSmallIntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name="realmemoji",
name="name",
field=models.TextField(
validators=[
django.core.validators.MinLengthValidator(1),
django.core.validators.RegexValidator(
message="Invalid characters in emoji name",
regex="^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$",
),
]
),
),
migrations.AddField(
model_name="preregistrationuser",
name="realm_creation",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="attachment",
name="realm",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="zerver.Realm",
),
),
migrations.RunPython(
code=migrate_existing_attachment_data,
elidable=True,
),
migrations.AddField(
model_name="subscription",
name="pin_to_top",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="default_language",
field=models.CharField(default="en", max_length=50),
),
migrations.AddField(
model_name="realm",
name="allow_message_editing",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="realm",
name="message_content_edit_limit_seconds",
field=models.IntegerField(default=600),
),
migrations.AddField(
model_name="realm",
name="default_language",
field=models.CharField(default="en", max_length=50),
),
migrations.AddField(
model_name="userprofile",
name="tos_version",
field=models.CharField(max_length=10, null=True),
),
]
| 38.545842 | 126 | 0.491841 |
import bitfield.models
import django.contrib.auth.models
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchVectorField
from django.db import migrations, models
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models.functions import Upper
from zerver.models import generate_email_token_for_stream
def migrate_existing_attachment_data(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Attachment = apps.get_model("zerver", "Attachment")
Recipient = apps.get_model("zerver", "Recipient")
Stream = apps.get_model("zerver", "Stream")
attachments = Attachment.objects.all()
for entry in attachments:
owner = entry.owner
entry.realm = owner.realm
for message in entry.messages.all():
if owner == message.sender:
if message.recipient.type == Recipient.STREAM:
stream = Stream.objects.get(id=message.recipient.type_id)
is_realm_public = (
not stream.realm.is_zephyr_mirror_realm and not stream.invite_only
)
entry.is_realm_public = entry.is_realm_public or is_realm_public
entry.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0001_initial"),
]
if settings.POSTGRESQL_MISSING_DICTIONARIES:
fts_sql = """
CREATE TEXT SEARCH CONFIGURATION zulip.english_us_search (COPY=pg_catalog.english);
"""
else:
fts_sql = """
CREATE TEXT SEARCH DICTIONARY english_us_hunspell
(template = ispell, DictFile = en_us, AffFile = en_us, StopWords = zulip_english);
CREATE TEXT SEARCH CONFIGURATION zulip.english_us_search (COPY=pg_catalog.english);
ALTER TEXT SEARCH CONFIGURATION zulip.english_us_search
ALTER MAPPING FOR asciiword, asciihword, hword_asciipart, word, hword, hword_part
WITH english_us_hunspell, english_stem;
"""
fts_sql += """
CREATE FUNCTION escape_html(text) RETURNS text IMMUTABLE LANGUAGE 'sql' AS $$
SELECT replace(replace(replace(replace(replace($1, '&', '&'), '<', '<'),
'>', '>'), '"', '"'), '''', ''');
$$ ;
CREATE TABLE fts_update_log (id SERIAL PRIMARY KEY, message_id INTEGER NOT NULL);
CREATE FUNCTION do_notify_fts_update_log() RETURNS trigger LANGUAGE plpgsql AS
$$ BEGIN NOTIFY fts_update_log; RETURN NEW; END $$;
CREATE TRIGGER fts_update_log_notify AFTER INSERT ON fts_update_log
FOR EACH STATEMENT EXECUTE PROCEDURE do_notify_fts_update_log();
CREATE FUNCTION append_to_fts_update_log() RETURNS trigger LANGUAGE plpgsql AS
$$ BEGIN INSERT INTO fts_update_log (message_id) VALUES (NEW.id); RETURN NEW; END $$;
CREATE TRIGGER zerver_message_update_search_tsvector_async
BEFORE INSERT OR UPDATE OF subject, rendered_content ON zerver_message
FOR EACH ROW EXECUTE PROCEDURE append_to_fts_update_log();
"""
operations = [
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
("email", models.EmailField(db_index=True, max_length=75, unique=True)),
("is_staff", models.BooleanField(default=False)),
("is_active", models.BooleanField(default=True)),
("is_bot", models.BooleanField(default=False)),
("date_joined", models.DateTimeField(default=django.utils.timezone.now)),
("is_mirror_dummy", models.BooleanField(default=False)),
("full_name", models.CharField(max_length=100)),
("short_name", models.CharField(max_length=100)),
("pointer", models.IntegerField()),
("last_pointer_updater", models.CharField(max_length=64)),
("api_key", models.CharField(max_length=32)),
("enable_stream_desktop_notifications", models.BooleanField(default=True)),
("enable_stream_sounds", models.BooleanField(default=True)),
("enable_desktop_notifications", models.BooleanField(default=True)),
("enable_sounds", models.BooleanField(default=True)),
("enable_offline_email_notifications", models.BooleanField(default=True)),
("enable_offline_push_notifications", models.BooleanField(default=True)),
("enable_digest_emails", models.BooleanField(default=True)),
("default_desktop_notifications", models.BooleanField(default=True)),
(
"last_reminder",
models.DateTimeField(default=django.utils.timezone.now, null=True),
),
("rate_limits", models.CharField(default="", max_length=100)),
("default_all_public_streams", models.BooleanField(default=False)),
("enter_sends", models.NullBooleanField(default=True)),
("autoscroll_forever", models.BooleanField(default=False)),
("twenty_four_hour_time", models.BooleanField(default=False)),
(
"avatar_source",
models.CharField(
choices=[
("G", "Hosted by Gravatar"),
("U", "Uploaded by user"),
("S", "System generated"),
],
default="G",
max_length=1,
),
),
(
"tutorial_status",
models.CharField(
choices=[("W", "Waiting"), ("S", "Started"), ("F", "Finished")],
default="W",
max_length=1,
),
),
("onboarding_steps", models.TextField(default="[]")),
("invites_granted", models.IntegerField(default=0)),
("invites_used", models.IntegerField(default=0)),
("alert_words", models.TextField(default="[]")),
("muted_topics", models.TextField(default="[]")),
(
"bot_owner",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Client",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(db_index=True, max_length=30, unique=True)),
],
),
migrations.CreateModel(
name="DefaultStream",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
],
),
migrations.CreateModel(
name="Huddle",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("huddle_hash", models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name="Message",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("subject", models.CharField(db_index=True, max_length=60)),
("content", models.TextField()),
("rendered_content", models.TextField(null=True)),
("rendered_content_version", models.IntegerField(null=True)),
("pub_date", models.DateTimeField(db_index=True, verbose_name="date published")),
("last_edit_time", models.DateTimeField(null=True)),
("edit_history", models.TextField(null=True)),
("has_attachment", models.BooleanField(db_index=True, default=False)),
("has_image", models.BooleanField(db_index=True, default=False)),
("has_link", models.BooleanField(db_index=True, default=False)),
],
),
migrations.CreateModel(
name="PreregistrationUser",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=75)),
("invited_at", models.DateTimeField(auto_now=True)),
("status", models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name="PushDeviceToken",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("kind", models.PositiveSmallIntegerField(choices=[(1, "apns"), (2, "gcm")])),
("token", models.CharField(max_length=4096, unique=True)),
(
"last_updated",
models.DateTimeField(auto_now=True, default=django.utils.timezone.now),
),
("ios_app_id", models.TextField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="Realm",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("domain", models.CharField(db_index=True, max_length=40, unique=True)),
("name", models.CharField(max_length=40, null=True)),
("restricted_to_domain", models.BooleanField(default=True)),
("invite_required", models.BooleanField(default=False)),
("invite_by_admins_only", models.BooleanField(default=False)),
("mandatory_topics", models.BooleanField(default=False)),
("show_digest_email", models.BooleanField(default=True)),
("name_changes_disabled", models.BooleanField(default=False)),
("date_created", models.DateTimeField(default=django.utils.timezone.now)),
("deactivated", models.BooleanField(default=False)),
],
options={
"permissions": (("administer", "Administer a realm"),),
},
),
migrations.CreateModel(
name="RealmAlias",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("domain", models.CharField(db_index=True, max_length=80, unique=True)),
(
"realm",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="RealmEmoji",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.TextField()),
("img_url", models.TextField()),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="RealmFilter",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("pattern", models.TextField()),
("url_format_string", models.TextField()),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="Recipient",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("type_id", models.IntegerField(db_index=True)),
("type", models.PositiveSmallIntegerField(db_index=True)),
],
),
migrations.CreateModel(
name="Referral",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=75)),
("timestamp", models.DateTimeField(auto_now_add=True)),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="ScheduledJob",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("scheduled_timestamp", models.DateTimeField()),
("type", models.PositiveSmallIntegerField()),
("data", models.TextField()),
("filter_id", models.IntegerField(null=True)),
("filter_string", models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name="Stream",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(db_index=True, max_length=60)),
("invite_only", models.NullBooleanField(default=False)),
(
"email_token",
models.CharField(default=generate_email_token_for_stream, max_length=32),
),
("description", models.CharField(default="", max_length=1024)),
("date_created", models.DateTimeField(default=django.utils.timezone.now)),
("deactivated", models.BooleanField(default=False)),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("active", models.BooleanField(default=True)),
("in_home_view", models.NullBooleanField(default=True)),
("color", models.CharField(default="#c2c2c2", max_length=10)),
("desktop_notifications", models.BooleanField(default=True)),
("audible_notifications", models.BooleanField(default=True)),
("notifications", models.BooleanField(default=False)),
(
"recipient",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Recipient"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserActivity",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("query", models.CharField(db_index=True, max_length=50)),
("count", models.IntegerField()),
("last_visit", models.DateTimeField(verbose_name="last visit")),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserActivityInterval",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("start", models.DateTimeField(db_index=True, verbose_name="start time")),
("end", models.DateTimeField(db_index=True, verbose_name="end time")),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserMessage",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"flags",
bitfield.models.BitField(
[
"read",
"starred",
"collapsed",
"mentioned",
"wildcard_mentioned",
"summarize_in_home",
"summarize_in_stream",
"force_expand",
"force_collapse",
"has_alert_word",
"historical",
"is_me_message",
],
default=0,
),
),
(
"message",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Message"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserPresence",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("timestamp", models.DateTimeField(verbose_name="presence changed")),
("status", models.PositiveSmallIntegerField(default=1)),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.AlterUniqueTogether(
name="userpresence",
unique_together={("user_profile", "client")},
),
migrations.AlterUniqueTogether(
name="usermessage",
unique_together={("user_profile", "message")},
),
migrations.AlterUniqueTogether(
name="useractivity",
unique_together={("user_profile", "client", "query")},
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together={("user_profile", "recipient")},
),
migrations.AlterUniqueTogether(
name="stream",
unique_together={("name", "realm")},
),
migrations.AlterUniqueTogether(
name="recipient",
unique_together={("type", "type_id")},
),
migrations.AlterUniqueTogether(
name="realmfilter",
unique_together={("realm", "pattern")},
),
migrations.AlterUniqueTogether(
name="realmemoji",
unique_together={("realm", "name")},
),
migrations.AddField(
model_name="realm",
name="notifications_stream",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="preregistrationuser",
name="realm",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
migrations.AddField(
model_name="preregistrationuser",
name="referred_by",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="preregistrationuser",
name="streams",
field=models.ManyToManyField(null=True, to="zerver.Stream"),
),
migrations.AddField(
model_name="message",
name="recipient",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Recipient"
),
),
migrations.AddField(
model_name="message",
name="sender",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="message",
name="sending_client",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
migrations.AddField(
model_name="defaultstream",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AddField(
model_name="defaultstream",
name="stream",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Stream"
),
),
migrations.AlterUniqueTogether(
name="defaultstream",
unique_together={("realm", "stream")},
),
migrations.AddField(
model_name="userprofile",
name="default_events_register_stream",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="userprofile",
name="default_sending_stream",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="userprofile",
name="groups",
field=models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
migrations.AddField(
model_name="userprofile",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AddField(
model_name="userprofile",
name="user_permissions",
field=models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
migrations.AddField(
model_name="message",
name="search_tsvector",
field=SearchVectorField(null=True),
),
migrations.AddIndex(
model_name="message",
index=GinIndex(
"search_tsvector", fastupdate=False, name="zerver_message_search_tsvector"
),
),
migrations.RunSQL(
sql=fts_sql,
),
migrations.AlterModelManagers(
name="userprofile",
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name="preregistrationuser",
name="email",
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name="preregistrationuser",
name="streams",
field=models.ManyToManyField(to="zerver.Stream"),
),
migrations.AlterField(
model_name="pushdevicetoken",
name="last_updated",
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name="referral",
name="email",
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name="userprofile",
name="email",
field=models.EmailField(db_index=True, max_length=254, unique=True),
),
migrations.AlterField(
model_name="userprofile",
name="last_login",
field=models.DateTimeField(blank=True, null=True, verbose_name="last login"),
),
migrations.AddIndex(
model_name="message",
index=models.Index(Upper("subject"), name="upper_subject_idx"),
),
migrations.AddIndex(
model_name="stream",
index=models.Index(Upper("name"), name="upper_stream_name_idx"),
),
migrations.AddField(
model_name="userprofile",
name="left_side_userlist",
field=models.BooleanField(default=False),
),
migrations.AlterModelOptions(
name="realm",
options={
"permissions": (
("administer", "Administer a realm"),
("api_super_user", "Can send messages as other users for mirroring"),
)
},
),
migrations.AddIndex(
model_name="userprofile",
index=models.Index(Upper("email"), name="upper_userprofile_email_idx"),
),
migrations.AlterField(
model_name="userprofile",
name="is_active",
field=models.BooleanField(db_index=True, default=True),
),
migrations.AlterField(
model_name="userprofile",
name="is_bot",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddIndex(
model_name="preregistrationuser",
index=models.Index(Upper("email"), name="upper_preregistration_email_idx"),
),
migrations.AlterField(
model_name="userprofile",
name="enable_stream_desktop_notifications",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="userprofile",
name="enable_stream_sounds",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="is_api_super_user",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddField(
model_name="userprofile",
name="is_realm_admin",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="realmemoji",
name="img_url",
field=models.URLField(),
),
migrations.AlterField(
model_name="realmemoji",
name="name",
field=models.TextField(
validators=[
django.core.validators.MinLengthValidator(1),
django.core.validators.RegexValidator(regex="^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$"),
]
),
),
migrations.AlterField(
model_name="realmemoji",
name="img_url",
field=models.URLField(max_length=1000),
),
migrations.CreateModel(
name="Attachment",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("file_name", models.CharField(db_index=True, max_length=100)),
("path_id", models.TextField(db_index=True)),
(
"create_time",
models.DateTimeField(db_index=True, default=django.utils.timezone.now),
),
("messages", models.ManyToManyField(to="zerver.Message")),
(
"owner",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
("is_realm_public", models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name="realm",
name="create_stream_by_admins_only",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="bot_type",
field=models.PositiveSmallIntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name="realmemoji",
name="name",
field=models.TextField(
validators=[
django.core.validators.MinLengthValidator(1),
django.core.validators.RegexValidator(
message="Invalid characters in emoji name",
regex="^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$",
),
]
),
),
migrations.AddField(
model_name="preregistrationuser",
name="realm_creation",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="attachment",
name="realm",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="zerver.Realm",
),
),
migrations.RunPython(
code=migrate_existing_attachment_data,
elidable=True,
),
migrations.AddField(
model_name="subscription",
name="pin_to_top",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="default_language",
field=models.CharField(default="en", max_length=50),
),
migrations.AddField(
model_name="realm",
name="allow_message_editing",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="realm",
name="message_content_edit_limit_seconds",
field=models.IntegerField(default=600),
),
migrations.AddField(
model_name="realm",
name="default_language",
field=models.CharField(default="en", max_length=50),
),
migrations.AddField(
model_name="userprofile",
name="tos_version",
field=models.CharField(max_length=10, null=True),
),
]
| true | true |
f71c69a2524bdf3501491590fb98b0dba43692e4 | 3,863 | py | Python | img_filter/img_advanced_filter.py | Gretacyh/images-downloader-fliter | ffe070026a45c741013a575a6a985d97e28d6fd7 | [
"ICU",
"MIT"
] | null | null | null | img_filter/img_advanced_filter.py | Gretacyh/images-downloader-fliter | ffe070026a45c741013a575a6a985d97e28d6fd7 | [
"ICU",
"MIT"
] | null | null | null | img_filter/img_advanced_filter.py | Gretacyh/images-downloader-fliter | ffe070026a45c741013a575a6a985d97e28d6fd7 | [
"ICU",
"MIT"
] | null | null | null | import os
import re
import cv2
import umap
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def global_std_pool2d(x):
"""2D global standard variation pooling"""
return torch.std(x.view(x.size()[0], x.size()[1], -1, 1), dim=2, keepdim=True)
class ResNet50(torch.nn.Module):
"""Modified ResNet50 for feature extraction"""
def __init__(self):
super(ResNet50, self).__init__()
self.features = nn.Sequential(*list(models.resnet50(pretrained=True).children())[:-2])
# 冻结模型
for p in self.features.parameters():
p.requires_grad = False
# 检测是否有GPU
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.to(self.device)
def forward(self, x):
# features@: 7->res5c
for ii, model in enumerate(self.features):
x = model(x)
if ii == 7:
features_mean = nn.functional.adaptive_avg_pool2d(x, 1)
features_std = global_std_pool2d(x)
return features_mean, features_std
# 提取图像特征
def get_img_feature(model, img_path):
img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)
img = torch.from_numpy(img)
img = img.to(model.device).float()
img = torch.unsqueeze(img, 0) # batch size 1
img = img.permute(0, 3, 1, 2)
feature = model(img)
return feature
# UMAP降维
def do_umap(features, channel=2, random_state=None):
model = umap.UMAP(n_components=channel, random_state=random_state)
return model.fit_transform(features), model
# t-SNE降维
def do_tsne(data, random_state=0):
tsne = TSNE(n_components=2, init='pca', random_state=random_state)
return tsne.fit_transform(data), tsne
# 绘制数据图像
def plot_embedding(data, type=None, text=None, title="", colors=None):
if type is None:
type = np.zeros_like(data[:, 0])
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min)
fig = plt.figure()
ax = plt.subplot(111)
for i in range(data.shape[0]):
if text is not None:
plt.text(data[i, 0], data[i, 1], str(text[i]),
color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]],
fontdict={'weight': 'bold', 'size': 8})
else:
plt.scatter(data[i, 0], data[i, 1], s=3,
color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]])
plt.xticks([])
plt.yticks([])
plt.title(title)
plt.show()
return fig
if __name__ == '__main__':
root_dir = "/root/yanghan/cat"
file_suffix = "jpeg|jpg|png"
remove_dir = root_dir + "/remove"
if not os.path.exists(remove_dir):
os.makedirs(remove_dir)
# 模型初始化
model = ResNet50()
# 提取图像特征
feature_list = []
name_list = []
for img_name in os.listdir(root_dir)[:]:
# 对处理文件的类型进行过滤
if re.search(file_suffix, img_name) is None:
continue
img_path = root_dir + "/" + img_name
mean, std = get_img_feature(model, img_path)
mean = mean.to('cpu').numpy().reshape(-1)
std = std.to('cpu').numpy().reshape(-1)
feature = np.concatenate((mean, std), 0)
print(feature.shape)
feature_list.append(feature)
name_list.append(img_name[7:10])
# 特征绘图
feature_list = np.array(feature_list)
name_list = np.array(name_list)
feature_list_tsne, _ = do_tsne(feature_list)
plot_embedding(feature_list_tsne, title="tsne", text=name_list)
feature_list_umap, _ = do_umap(feature_list)
plot_embedding(feature_list_umap, title="umap", text=name_list)
cv2.waitKey()
| 30.904 | 102 | 0.619467 | import os
import re
import cv2
import umap
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def global_std_pool2d(x):
return torch.std(x.view(x.size()[0], x.size()[1], -1, 1), dim=2, keepdim=True)
class ResNet50(torch.nn.Module):
def __init__(self):
super(ResNet50, self).__init__()
self.features = nn.Sequential(*list(models.resnet50(pretrained=True).children())[:-2])
for p in self.features.parameters():
p.requires_grad = False
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.to(self.device)
def forward(self, x):
for ii, model in enumerate(self.features):
x = model(x)
if ii == 7:
features_mean = nn.functional.adaptive_avg_pool2d(x, 1)
features_std = global_std_pool2d(x)
return features_mean, features_std
def get_img_feature(model, img_path):
img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)
img = torch.from_numpy(img)
img = img.to(model.device).float()
img = torch.unsqueeze(img, 0)
img = img.permute(0, 3, 1, 2)
feature = model(img)
return feature
def do_umap(features, channel=2, random_state=None):
model = umap.UMAP(n_components=channel, random_state=random_state)
return model.fit_transform(features), model
def do_tsne(data, random_state=0):
tsne = TSNE(n_components=2, init='pca', random_state=random_state)
return tsne.fit_transform(data), tsne
def plot_embedding(data, type=None, text=None, title="", colors=None):
if type is None:
type = np.zeros_like(data[:, 0])
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min)
fig = plt.figure()
ax = plt.subplot(111)
for i in range(data.shape[0]):
if text is not None:
plt.text(data[i, 0], data[i, 1], str(text[i]),
color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]],
fontdict={'weight': 'bold', 'size': 8})
else:
plt.scatter(data[i, 0], data[i, 1], s=3,
color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]])
plt.xticks([])
plt.yticks([])
plt.title(title)
plt.show()
return fig
if __name__ == '__main__':
root_dir = "/root/yanghan/cat"
file_suffix = "jpeg|jpg|png"
remove_dir = root_dir + "/remove"
if not os.path.exists(remove_dir):
os.makedirs(remove_dir)
model = ResNet50()
feature_list = []
name_list = []
for img_name in os.listdir(root_dir)[:]:
if re.search(file_suffix, img_name) is None:
continue
img_path = root_dir + "/" + img_name
mean, std = get_img_feature(model, img_path)
mean = mean.to('cpu').numpy().reshape(-1)
std = std.to('cpu').numpy().reshape(-1)
feature = np.concatenate((mean, std), 0)
print(feature.shape)
feature_list.append(feature)
name_list.append(img_name[7:10])
feature_list = np.array(feature_list)
name_list = np.array(name_list)
feature_list_tsne, _ = do_tsne(feature_list)
plot_embedding(feature_list_tsne, title="tsne", text=name_list)
feature_list_umap, _ = do_umap(feature_list)
plot_embedding(feature_list_umap, title="umap", text=name_list)
cv2.waitKey()
| true | true |
f71c6a899f50afd65c97f6d10559da3bf89ef5b8 | 162 | py | Python | querybuilder_rules/compat.py | Apkawa/django-querybuilder-rules | 430488f6e50be7ac74b5b757c9bef0d09e49301b | [
"MIT"
] | null | null | null | querybuilder_rules/compat.py | Apkawa/django-querybuilder-rules | 430488f6e50be7ac74b5b757c9bef0d09e49301b | [
"MIT"
] | null | null | null | querybuilder_rules/compat.py | Apkawa/django-querybuilder-rules | 430488f6e50be7ac74b5b757c9bef0d09e49301b | [
"MIT"
] | null | null | null | try:
from django.utils.encoding import smart_text, smart_str
except ImportError:
from django.utils.encoding import smart_unicode as smart_text, smart_str
| 32.4 | 76 | 0.808642 | try:
from django.utils.encoding import smart_text, smart_str
except ImportError:
from django.utils.encoding import smart_unicode as smart_text, smart_str
| true | true |
f71c6b46358edba34d7bcd2375000838bace4301 | 2,623 | py | Python | acme/tf/networks/__init__.py | RaoulDrake/acme | 16ad2f284ad7c038081454a9b820d8f424b3ad1f | [
"Apache-2.0"
] | 1 | 2021-06-17T16:11:43.000Z | 2021-06-17T16:11:43.000Z | acme/tf/networks/__init__.py | RaoulDrake/acme | 16ad2f284ad7c038081454a9b820d8f424b3ad1f | [
"Apache-2.0"
] | null | null | null | acme/tf/networks/__init__.py | RaoulDrake/acme | 16ad2f284ad7c038081454a9b820d8f424b3ad1f | [
"Apache-2.0"
] | null | null | null | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful network definitions."""
from acme.tf.networks.atari import AtariTorso
from acme.tf.networks.atari import DeepIMPALAAtariNetwork
from acme.tf.networks.atari import DQNAtariNetwork
from acme.tf.networks.atari import IMPALAAtariNetwork
from acme.tf.networks.atari import R2D2AtariNetwork
from acme.tf.networks.base import DistributionalModule
from acme.tf.networks.base import Module
from acme.tf.networks.base import RNNCore
from acme.tf.networks.continuous import LayerNormAndResidualMLP
from acme.tf.networks.continuous import LayerNormMLP
from acme.tf.networks.continuous import NearZeroInitializedLinear
from acme.tf.networks.discrete import DiscreteFilteredQNetwork
from acme.tf.networks.distributional import ApproximateMode
from acme.tf.networks.distributional import DiscreteValuedHead
from acme.tf.networks.distributional import MultivariateGaussianMixture
from acme.tf.networks.distributional import MultivariateNormalDiagHead
from acme.tf.networks.distributional import UnivariateGaussianMixture
from acme.tf.networks.distributions import DiscreteValuedDistribution
from acme.tf.networks.duelling import DuellingMLP
from acme.tf.networks.multihead import Multihead
from acme.tf.networks.multiplexers import CriticMultiplexer
from acme.tf.networks.noise import ClippedGaussian
from acme.tf.networks.policy_value import PolicyValueHead
from acme.tf.networks.recurrence import CriticDeepRNN
from acme.tf.networks.recurrence import RecurrentExpQWeightedPolicy
from acme.tf.networks.rescaling import ClipToSpec
from acme.tf.networks.rescaling import RescaleToSpec
from acme.tf.networks.rescaling import TanhToSpec
from acme.tf.networks.stochastic import ExpQWeightedPolicy
from acme.tf.networks.stochastic import StochasticMeanHead
from acme.tf.networks.stochastic import StochasticModeHead
from acme.tf.networks.stochastic import StochasticSamplingHead
from acme.tf.networks.vision import ResNetTorso
# For backwards compatibility.
GaussianMixtureHead = UnivariateGaussianMixture
| 48.574074 | 74 | 0.847884 |
from acme.tf.networks.atari import AtariTorso
from acme.tf.networks.atari import DeepIMPALAAtariNetwork
from acme.tf.networks.atari import DQNAtariNetwork
from acme.tf.networks.atari import IMPALAAtariNetwork
from acme.tf.networks.atari import R2D2AtariNetwork
from acme.tf.networks.base import DistributionalModule
from acme.tf.networks.base import Module
from acme.tf.networks.base import RNNCore
from acme.tf.networks.continuous import LayerNormAndResidualMLP
from acme.tf.networks.continuous import LayerNormMLP
from acme.tf.networks.continuous import NearZeroInitializedLinear
from acme.tf.networks.discrete import DiscreteFilteredQNetwork
from acme.tf.networks.distributional import ApproximateMode
from acme.tf.networks.distributional import DiscreteValuedHead
from acme.tf.networks.distributional import MultivariateGaussianMixture
from acme.tf.networks.distributional import MultivariateNormalDiagHead
from acme.tf.networks.distributional import UnivariateGaussianMixture
from acme.tf.networks.distributions import DiscreteValuedDistribution
from acme.tf.networks.duelling import DuellingMLP
from acme.tf.networks.multihead import Multihead
from acme.tf.networks.multiplexers import CriticMultiplexer
from acme.tf.networks.noise import ClippedGaussian
from acme.tf.networks.policy_value import PolicyValueHead
from acme.tf.networks.recurrence import CriticDeepRNN
from acme.tf.networks.recurrence import RecurrentExpQWeightedPolicy
from acme.tf.networks.rescaling import ClipToSpec
from acme.tf.networks.rescaling import RescaleToSpec
from acme.tf.networks.rescaling import TanhToSpec
from acme.tf.networks.stochastic import ExpQWeightedPolicy
from acme.tf.networks.stochastic import StochasticMeanHead
from acme.tf.networks.stochastic import StochasticModeHead
from acme.tf.networks.stochastic import StochasticSamplingHead
from acme.tf.networks.vision import ResNetTorso
GaussianMixtureHead = UnivariateGaussianMixture
| true | true |
f71c6bd2042d817dbc78da43dc684306860b7f69 | 1,029 | py | Python | pyquilted/pdf_printer.py | cocoroutine/pyquilted | dd8644043deec17608e00f46e3ac4562b8879603 | [
"MIT"
] | 1 | 2019-02-21T20:10:37.000Z | 2019-02-21T20:10:37.000Z | pyquilted/pdf_printer.py | cocoroutine/pyquilted | dd8644043deec17608e00f46e3ac4562b8879603 | [
"MIT"
] | null | null | null | pyquilted/pdf_printer.py | cocoroutine/pyquilted | dd8644043deec17608e00f46e3ac4562b8879603 | [
"MIT"
] | null | null | null | import pdfkit
class PdfPrinter:
"""A wrapper class around pdfkit functionality to print html to pdfs"""
@staticmethod
def from_file(infile, outfile):
options = {
"page-size": "Letter",
"dpi": "96",
"margin-top": "1in",
"margin-right": "1.25in",
"margin-bottom": "1in",
"margin-left": "1.25in",
"disable-smart-shrinking": None,
"zoom": 1,
}
pdfkit.from_file(infile, outfile, options=options)
@staticmethod
def from_string(document, outfile):
options = {
"page-size": "Letter",
"dpi": "96",
"margin-top": "1in",
"margin-right": "1.25in",
"margin-bottom": "1in",
"margin-left": "1.25in",
"disable-smart-shrinking": None,
"zoom": 1,
}
pdfkit.from_string(document, outfile, options=options)
| 31.181818 | 75 | 0.462585 | import pdfkit
class PdfPrinter:
@staticmethod
def from_file(infile, outfile):
options = {
"page-size": "Letter",
"dpi": "96",
"margin-top": "1in",
"margin-right": "1.25in",
"margin-bottom": "1in",
"margin-left": "1.25in",
"disable-smart-shrinking": None,
"zoom": 1,
}
pdfkit.from_file(infile, outfile, options=options)
@staticmethod
def from_string(document, outfile):
options = {
"page-size": "Letter",
"dpi": "96",
"margin-top": "1in",
"margin-right": "1.25in",
"margin-bottom": "1in",
"margin-left": "1.25in",
"disable-smart-shrinking": None,
"zoom": 1,
}
pdfkit.from_string(document, outfile, options=options)
| true | true |
f71c6bda42c3ee94832851be22cdd754e1af0b13 | 1,164 | py | Python | example_app/app.py | quanpower/flask_jsondash | 274c41bcbc754fd217b7dc4679c377bac912b88d | [
"MIT"
] | 1 | 2017-09-01T16:08:49.000Z | 2017-09-01T16:08:49.000Z | example_app/app.py | quanpower/flask_jsondash | 274c41bcbc754fd217b7dc4679c377bac912b88d | [
"MIT"
] | null | null | null | example_app/app.py | quanpower/flask_jsondash | 274c41bcbc754fd217b7dc4679c377bac912b88d | [
"MIT"
] | null | null | null | """This is an example app, demonstrating usage."""
import os
from flask import Flask
from flask_jsondash.charts_builder import charts
app = Flask(__name__)
app.config['SECRET_KEY'] = 'NOTSECURELOL'
app.config.update(
JSONDASH_FILTERUSERS=False,
JSONDASH_GLOBALDASH=True,
JSONDASH_GLOBAL_USER='global',
)
app.debug = True
app.register_blueprint(charts)
def _can_edit_global():
return True
def _can_delete():
return True
def _can_clone():
return True
def _get_username():
return 'anonymous'
# Config examples.
app.config['JSONDASH'] = dict(
metadata=dict(
created_by=_get_username,
username=_get_username,
),
static=dict(
js_path='js/vendor/',
css_path='css/vendor/',
),
auth=dict(
edit_global=_can_edit_global,
clone=_can_clone,
delete=_can_delete,
)
)
@app.route('/', methods=['GET'])
def index():
"""Sample index."""
return '<a href="/charts">Visit the charts blueprint.</a>'
if __name__ == '__main__':
PORT = int(os.getenv('PORT', 7777))
HOST = os.getenv('HOST', '0.0.0.0')
app.run(debug=True, host=HOST, port=PORT)
| 18.1875 | 62 | 0.652921 |
import os
from flask import Flask
from flask_jsondash.charts_builder import charts
app = Flask(__name__)
app.config['SECRET_KEY'] = 'NOTSECURELOL'
app.config.update(
JSONDASH_FILTERUSERS=False,
JSONDASH_GLOBALDASH=True,
JSONDASH_GLOBAL_USER='global',
)
app.debug = True
app.register_blueprint(charts)
def _can_edit_global():
return True
def _can_delete():
return True
def _can_clone():
return True
def _get_username():
return 'anonymous'
app.config['JSONDASH'] = dict(
metadata=dict(
created_by=_get_username,
username=_get_username,
),
static=dict(
js_path='js/vendor/',
css_path='css/vendor/',
),
auth=dict(
edit_global=_can_edit_global,
clone=_can_clone,
delete=_can_delete,
)
)
@app.route('/', methods=['GET'])
def index():
return '<a href="/charts">Visit the charts blueprint.</a>'
if __name__ == '__main__':
PORT = int(os.getenv('PORT', 7777))
HOST = os.getenv('HOST', '0.0.0.0')
app.run(debug=True, host=HOST, port=PORT)
| true | true |
f71c6d6149e0b6acd0b94e3af246e4aa23a4b08d | 10,495 | py | Python | grr/client/grr_response_client/client_actions/admin.py | tsehori/grr | 048506f22f74642bfe61749069a45ddf496fdab3 | [
"Apache-2.0"
] | 1 | 2021-07-01T01:43:06.000Z | 2021-07-01T01:43:06.000Z | grr/client/grr_response_client/client_actions/admin.py | tsehori/grr | 048506f22f74642bfe61749069a45ddf496fdab3 | [
"Apache-2.0"
] | 44 | 2021-05-14T22:49:24.000Z | 2022-03-13T21:54:02.000Z | grr/client/grr_response_client/client_actions/admin.py | tsehori/grr | 048506f22f74642bfe61749069a45ddf496fdab3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Lint as: python3
"""Client actions related to administrating the client and its configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import platform
import socket
import traceback
import cryptography
from cryptography.hazmat.backends import openssl
import pkg_resources
import psutil
import pytsk3
import yara
from grr_response_client import actions
from grr_response_client import communicator
from grr_response_client.client_actions import tempfiles
from grr_response_client.client_actions import timeline
from grr_response_core import config
from grr_response_core.lib import config_lib
from grr_response_core.lib import queues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
class Echo(actions.ActionPlugin):
"""Returns a message to the server."""
in_rdfvalue = rdf_client_action.EchoRequest
out_rdfvalues = [rdf_client_action.EchoRequest]
def Run(self, args):
self.SendReply(args)
def GetHostnameFromClient(args):
del args # Unused.
yield rdf_protodict.DataBlob(string=socket.gethostname())
class GetHostname(actions.ActionPlugin):
"""Retrieves the host name of the client."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, args):
for res in GetHostnameFromClient(args):
self.SendReply(res)
class GetPlatformInfo(actions.ActionPlugin):
"""Retrieves platform information."""
out_rdfvalues = [rdf_client.Uname]
def Run(self, unused_args):
"""Populate platform information into a Uname response."""
self.SendReply(rdf_client.Uname.FromCurrentSystem())
class Kill(actions.ActionPlugin):
"""A client action for terminating (killing) the client.
Used for testing process respawn.
"""
out_rdfvalues = [rdf_flows.GrrMessage]
def Run(self, unused_arg):
"""Run the kill."""
# Send a message back to the service to say that we are about to shutdown.
reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
# Queue up the response message, jump the queue.
self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS)
# Give the http thread some time to send the reply.
self.grr_worker.Sleep(10)
# Die ourselves.
logging.info("Dying on request.")
os._exit(242) # pylint: disable=protected-access
class GetConfiguration(actions.ActionPlugin):
"""Retrieves the running configuration parameters."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
BLOCKED_PARAMETERS = ["Client.private_key"]
def Run(self, unused_arg):
"""Retrieve the configuration except for the blocked parameters."""
out = self.out_rdfvalues[0]()
for descriptor in config.CONFIG.type_infos:
if descriptor.name in self.BLOCKED_PARAMETERS:
value = "[Redacted]"
else:
try:
value = config.CONFIG.Get(descriptor.name, default=None)
except (config_lib.Error, KeyError, AttributeError, ValueError) as e:
logging.info("Config reading error: %s", e)
continue
if value is not None:
out[descriptor.name] = value
self.SendReply(out)
class GetLibraryVersions(actions.ActionPlugin):
"""Retrieves version information for installed libraries."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
def GetSSLVersion(self):
return openssl.backend.openssl_version_text()
def GetCryptographyVersion(self):
return cryptography.__version__
def GetPSUtilVersion(self):
return ".".join(map(utils.SmartUnicode, psutil.version_info))
def GetProtoVersion(self):
return pkg_resources.get_distribution("protobuf").version
def GetTSKVersion(self):
return pytsk3.TSK_VERSION_STR
def GetPyTSKVersion(self):
return pytsk3.get_version()
def GetYaraVersion(self):
return yara.YARA_VERSION
library_map = {
"pytsk": GetPyTSKVersion,
"TSK": GetTSKVersion,
"cryptography": GetCryptographyVersion,
"SSL": GetSSLVersion,
"psutil": GetPSUtilVersion,
"yara": GetYaraVersion,
}
error_str = "Unable to determine library version: %s"
def Run(self, unused_arg):
result = self.out_rdfvalues[0]()
for lib, f in self.library_map.items():
try:
result[lib] = f(self)
except Exception: # pylint: disable=broad-except
result[lib] = self.error_str % traceback.format_exc()
self.SendReply(result)
class UpdateConfiguration(actions.ActionPlugin):
"""Updates configuration parameters on the client."""
in_rdfvalue = rdf_protodict.Dict
UPDATABLE_FIELDS = {"Client.foreman_check_frequency",
"Client.server_urls",
"Client.max_post_size",
"Client.max_out_queue",
"Client.poll_min",
"Client.poll_max",
"Client.rss_max"} # pyformat: disable
def _UpdateConfig(self, filtered_arg, config_obj):
for field, value in filtered_arg.items():
config_obj.Set(field, value)
try:
config_obj.Write()
except (IOError, OSError):
pass
def Run(self, arg):
"""Does the actual work."""
try:
if self.grr_worker.client.FleetspeakEnabled():
raise ValueError("Not supported on Fleetspeak enabled clients.")
except AttributeError:
pass
smart_arg = {str(field): value for field, value in arg.items()}
disallowed_fields = [
field for field in smart_arg
if field not in UpdateConfiguration.UPDATABLE_FIELDS
]
if disallowed_fields:
raise ValueError("Received an update request for restricted field(s) %s."
% ",".join(disallowed_fields))
if platform.system() != "Windows":
# Check config validity before really applying the changes. This isn't
# implemented for our Windows clients though, whose configs are stored in
# the registry, as opposed to in the filesystem.
canary_config = config.CONFIG.CopyConfig()
# Prepare a temporary file we'll write changes to.
with tempfiles.CreateGRRTempFile(mode="w+") as temp_fd:
temp_filename = temp_fd.name
# Write canary_config changes to temp_filename.
canary_config.SetWriteBack(temp_filename)
self._UpdateConfig(smart_arg, canary_config)
try:
# Assert temp_filename is usable by loading it.
canary_config.SetWriteBack(temp_filename)
# Wide exception handling passed here from config_lib.py...
except Exception: # pylint: disable=broad-except
logging.warning("Updated config file %s is not usable.", temp_filename)
raise
# If temp_filename works, remove it (if not, it's useful for debugging).
os.unlink(temp_filename)
# The changes seem to work, so push them to the real config.
self._UpdateConfig(smart_arg, config.CONFIG)
def GetClientInformation() -> rdf_client.ClientInformation:
return rdf_client.ClientInformation(
client_name=config.CONFIG["Client.name"],
client_binary_name=psutil.Process().name(),
client_description=config.CONFIG["Client.description"],
client_version=int(config.CONFIG["Source.version_numeric"]),
build_time=config.CONFIG["Client.build_time"],
labels=config.CONFIG.Get("Client.labels", default=None),
timeline_btime_support=timeline.BTIME_SUPPORT)
class GetClientInfo(actions.ActionPlugin):
"""Obtains information about the GRR client installed."""
out_rdfvalues = [rdf_client.ClientInformation]
def Run(self, unused_args):
self.SendReply(GetClientInformation())
class GetClientStats(actions.ActionPlugin):
"""This retrieves some stats about the GRR process."""
in_rdfvalue = rdf_client_action.GetClientStatsRequest
out_rdfvalues = [rdf_client_stats.ClientStats]
def Run(self, arg):
"""Returns the client stats."""
if arg is None:
arg = rdf_client_action.GetClientStatsRequest()
proc = psutil.Process(os.getpid())
meminfo = proc.memory_info()
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
create_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(proc.create_time())
response = rdf_client_stats.ClientStats(
RSS_size=meminfo.rss,
VMS_size=meminfo.vms,
memory_percent=proc.memory_percent(),
bytes_received=communicator.GRR_CLIENT_RECEIVED_BYTES.GetValue(),
bytes_sent=communicator.GRR_CLIENT_SENT_BYTES.GetValue(),
create_time=create_time,
boot_time=boot_time)
response.cpu_samples = self.grr_worker.stats_collector.CpuSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
response.io_samples = self.grr_worker.stats_collector.IOSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
self.Send(response)
def Send(self, response):
self.SendReply(response)
class GetClientStatsAuto(GetClientStats):
"""This class is used to send the reply to a well known flow on the server."""
def Send(self, response):
self.grr_worker.SendReply(
rdf_client_stats.ClientStats.Downsampled(response),
session_id=rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False)
class SendStartupInfo(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalues = [rdf_client.StartupInfo]
well_known_session_id = rdfvalue.SessionID(flow_name="Startup")
def Run(self, unused_arg, ttl=None):
"""Returns the startup information."""
logging.debug("Sending startup information.")
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
response = rdf_client.StartupInfo(
boot_time=boot_time, client_info=GetClientInformation())
self.grr_worker.SendReply(
response,
session_id=self.well_known_session_id,
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False,
ttl=ttl)
| 32.292308 | 80 | 0.723106 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import platform
import socket
import traceback
import cryptography
from cryptography.hazmat.backends import openssl
import pkg_resources
import psutil
import pytsk3
import yara
from grr_response_client import actions
from grr_response_client import communicator
from grr_response_client.client_actions import tempfiles
from grr_response_client.client_actions import timeline
from grr_response_core import config
from grr_response_core.lib import config_lib
from grr_response_core.lib import queues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
class Echo(actions.ActionPlugin):
in_rdfvalue = rdf_client_action.EchoRequest
out_rdfvalues = [rdf_client_action.EchoRequest]
def Run(self, args):
self.SendReply(args)
def GetHostnameFromClient(args):
del args
yield rdf_protodict.DataBlob(string=socket.gethostname())
class GetHostname(actions.ActionPlugin):
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, args):
for res in GetHostnameFromClient(args):
self.SendReply(res)
class GetPlatformInfo(actions.ActionPlugin):
out_rdfvalues = [rdf_client.Uname]
def Run(self, unused_args):
self.SendReply(rdf_client.Uname.FromCurrentSystem())
class Kill(actions.ActionPlugin):
out_rdfvalues = [rdf_flows.GrrMessage]
def Run(self, unused_arg):
reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS)
self.grr_worker.Sleep(10)
logging.info("Dying on request.")
os._exit(242)
class GetConfiguration(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
BLOCKED_PARAMETERS = ["Client.private_key"]
def Run(self, unused_arg):
out = self.out_rdfvalues[0]()
for descriptor in config.CONFIG.type_infos:
if descriptor.name in self.BLOCKED_PARAMETERS:
value = "[Redacted]"
else:
try:
value = config.CONFIG.Get(descriptor.name, default=None)
except (config_lib.Error, KeyError, AttributeError, ValueError) as e:
logging.info("Config reading error: %s", e)
continue
if value is not None:
out[descriptor.name] = value
self.SendReply(out)
class GetLibraryVersions(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
def GetSSLVersion(self):
return openssl.backend.openssl_version_text()
def GetCryptographyVersion(self):
return cryptography.__version__
def GetPSUtilVersion(self):
return ".".join(map(utils.SmartUnicode, psutil.version_info))
def GetProtoVersion(self):
return pkg_resources.get_distribution("protobuf").version
def GetTSKVersion(self):
return pytsk3.TSK_VERSION_STR
def GetPyTSKVersion(self):
return pytsk3.get_version()
def GetYaraVersion(self):
return yara.YARA_VERSION
library_map = {
"pytsk": GetPyTSKVersion,
"TSK": GetTSKVersion,
"cryptography": GetCryptographyVersion,
"SSL": GetSSLVersion,
"psutil": GetPSUtilVersion,
"yara": GetYaraVersion,
}
error_str = "Unable to determine library version: %s"
def Run(self, unused_arg):
result = self.out_rdfvalues[0]()
for lib, f in self.library_map.items():
try:
result[lib] = f(self)
except Exception:
result[lib] = self.error_str % traceback.format_exc()
self.SendReply(result)
class UpdateConfiguration(actions.ActionPlugin):
in_rdfvalue = rdf_protodict.Dict
UPDATABLE_FIELDS = {"Client.foreman_check_frequency",
"Client.server_urls",
"Client.max_post_size",
"Client.max_out_queue",
"Client.poll_min",
"Client.poll_max",
"Client.rss_max"}
def _UpdateConfig(self, filtered_arg, config_obj):
for field, value in filtered_arg.items():
config_obj.Set(field, value)
try:
config_obj.Write()
except (IOError, OSError):
pass
def Run(self, arg):
try:
if self.grr_worker.client.FleetspeakEnabled():
raise ValueError("Not supported on Fleetspeak enabled clients.")
except AttributeError:
pass
smart_arg = {str(field): value for field, value in arg.items()}
disallowed_fields = [
field for field in smart_arg
if field not in UpdateConfiguration.UPDATABLE_FIELDS
]
if disallowed_fields:
raise ValueError("Received an update request for restricted field(s) %s."
% ",".join(disallowed_fields))
if platform.system() != "Windows":
# implemented for our Windows clients though, whose configs are stored in
# the registry, as opposed to in the filesystem.
canary_config = config.CONFIG.CopyConfig()
# Prepare a temporary file we'll write changes to.
with tempfiles.CreateGRRTempFile(mode="w+") as temp_fd:
temp_filename = temp_fd.name
canary_config.SetWriteBack(temp_filename)
self._UpdateConfig(smart_arg, canary_config)
try:
canary_config.SetWriteBack(temp_filename)
except Exception:
logging.warning("Updated config file %s is not usable.", temp_filename)
raise
os.unlink(temp_filename)
# The changes seem to work, so push them to the real config.
self._UpdateConfig(smart_arg, config.CONFIG)
def GetClientInformation() -> rdf_client.ClientInformation:
return rdf_client.ClientInformation(
client_name=config.CONFIG["Client.name"],
client_binary_name=psutil.Process().name(),
client_description=config.CONFIG["Client.description"],
client_version=int(config.CONFIG["Source.version_numeric"]),
build_time=config.CONFIG["Client.build_time"],
labels=config.CONFIG.Get("Client.labels", default=None),
timeline_btime_support=timeline.BTIME_SUPPORT)
class GetClientInfo(actions.ActionPlugin):
out_rdfvalues = [rdf_client.ClientInformation]
def Run(self, unused_args):
self.SendReply(GetClientInformation())
class GetClientStats(actions.ActionPlugin):
in_rdfvalue = rdf_client_action.GetClientStatsRequest
out_rdfvalues = [rdf_client_stats.ClientStats]
def Run(self, arg):
if arg is None:
arg = rdf_client_action.GetClientStatsRequest()
proc = psutil.Process(os.getpid())
meminfo = proc.memory_info()
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
create_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(proc.create_time())
response = rdf_client_stats.ClientStats(
RSS_size=meminfo.rss,
VMS_size=meminfo.vms,
memory_percent=proc.memory_percent(),
bytes_received=communicator.GRR_CLIENT_RECEIVED_BYTES.GetValue(),
bytes_sent=communicator.GRR_CLIENT_SENT_BYTES.GetValue(),
create_time=create_time,
boot_time=boot_time)
response.cpu_samples = self.grr_worker.stats_collector.CpuSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
response.io_samples = self.grr_worker.stats_collector.IOSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
self.Send(response)
def Send(self, response):
self.SendReply(response)
class GetClientStatsAuto(GetClientStats):
def Send(self, response):
self.grr_worker.SendReply(
rdf_client_stats.ClientStats.Downsampled(response),
session_id=rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False)
class SendStartupInfo(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalues = [rdf_client.StartupInfo]
well_known_session_id = rdfvalue.SessionID(flow_name="Startup")
def Run(self, unused_arg, ttl=None):
logging.debug("Sending startup information.")
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
response = rdf_client.StartupInfo(
boot_time=boot_time, client_info=GetClientInformation())
self.grr_worker.SendReply(
response,
session_id=self.well_known_session_id,
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False,
ttl=ttl)
| true | true |
f71c6da0c8beda1979477eff278dde12b8d91849 | 1,317 | py | Python | social_fabric/network_template_processor.py | social-fabric/social-fabric | 5b6adacf4717865a262bf4364fac62f945c52f41 | [
"Apache-2.0"
] | null | null | null | social_fabric/network_template_processor.py | social-fabric/social-fabric | 5b6adacf4717865a262bf4364fac62f945c52f41 | [
"Apache-2.0"
] | null | null | null | social_fabric/network_template_processor.py | social-fabric/social-fabric | 5b6adacf4717865a262bf4364fac62f945c52f41 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020 - Neptunium Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from social_fabric.config_repo import ConfigRepo
class NetworkTemplateProcessor:
def __init__(self):
self.file_loader = FileSystemLoader(ConfigRepo.TEMPLATE_SRC_DIR)
self.env = Environment(loader=self.file_loader, undefined=StrictUndefined)
def process(self, filename, *args, **kwargs):
template = self.env.get_template(filename)
return template.render(*args, **kwargs)
if __name__ == '__main__':
config = {}
net_template_processor = NetworkTemplateProcessor()
output = net_template_processor.process('docker-compose-ca.yaml',
BCC_NETWORK_DOMAIN='orsnet',
BCC_CA_ADDR='ca.theobjects.com',
BCC_CA_PORT='7055',
BCC_CA_PUBLIC_CERT='ca.theobjects.com.cert.pem',
BCC_CA_PRIVATE_KEY='ca.theobjects.com.priv.key',
BCC_CA_ADMIN_NAME='admin', BCC_CA_ADMIN_PASSWORD='adminpw')
with open('/tmp/docker-compose.yaml', 'w') as f:
f.write(output)
| 38.735294 | 103 | 0.593014 |
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from social_fabric.config_repo import ConfigRepo
class NetworkTemplateProcessor:
def __init__(self):
self.file_loader = FileSystemLoader(ConfigRepo.TEMPLATE_SRC_DIR)
self.env = Environment(loader=self.file_loader, undefined=StrictUndefined)
def process(self, filename, *args, **kwargs):
template = self.env.get_template(filename)
return template.render(*args, **kwargs)
if __name__ == '__main__':
config = {}
net_template_processor = NetworkTemplateProcessor()
output = net_template_processor.process('docker-compose-ca.yaml',
BCC_NETWORK_DOMAIN='orsnet',
BCC_CA_ADDR='ca.theobjects.com',
BCC_CA_PORT='7055',
BCC_CA_PUBLIC_CERT='ca.theobjects.com.cert.pem',
BCC_CA_PRIVATE_KEY='ca.theobjects.com.priv.key',
BCC_CA_ADMIN_NAME='admin', BCC_CA_ADMIN_PASSWORD='adminpw')
with open('/tmp/docker-compose.yaml', 'w') as f:
f.write(output)
| true | true |
f71c6e32018acdc987579657fa46ba5d3cf922de | 6,231 | py | Python | crabageprediction/venv/Lib/site-packages/matplotlib/tests/test_sphinxext.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | crabageprediction/venv/Lib/site-packages/matplotlib/tests/test_sphinxext.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 5 | 2022-01-16T10:08:41.000Z | 2022-01-20T05:34:09.000Z | crabageprediction/venv/Lib/site-packages/matplotlib/tests/test_sphinxext.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 4 | 2022-02-04T22:58:27.000Z | 2022-02-14T19:29:18.000Z | """Tests for tinypages build using sphinx extensions."""
import filecmp
import os
from pathlib import Path
import shutil
from subprocess import Popen, PIPE
import sys
import pytest
pytest.importorskip('sphinx',
minversion=None if sys.version_info < (3, 10) else '4.1.3')
def test_tinypages(tmpdir):
source_dir = Path(tmpdir) / 'src'
shutil.copytree(Path(__file__).parent / 'tinypages', source_dir)
html_dir = source_dir / '_build' / 'html'
doctree_dir = source_dir / 'doctrees'
# Build the pages with warnings turned into errors
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', str(doctree_dir),
str(Path(__file__).parent / 'tinypages'), str(html_dir)]
# On CI, gcov emits warnings (due to agg headers being included with the
# same name in multiple extension modules -- but we don't care about their
# coverage anyways); hide them using GCOV_ERROR_FILE.
proc = Popen(
cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
env={**os.environ, "MPLBACKEND": "", "GCOV_ERROR_FILE": os.devnull})
out, err = proc.communicate()
# Build the pages with warnings turned into errors
build_sphinx_html(source_dir, doctree_dir, html_dir)
def plot_file(num):
return html_dir / f'some_plots-{num}.png'
def plot_directive_file(num):
# This is always next to the doctree dir.
return doctree_dir.parent / 'plot_directive' / f'some_plots-{num}.png'
range_10, range_6, range_4 = [plot_file(i) for i in range(1, 4)]
# Plot 5 is range(6) plot
assert filecmp.cmp(range_6, plot_file(5))
# Plot 7 is range(4) plot
assert filecmp.cmp(range_4, plot_file(7))
# Plot 11 is range(10) plot
assert filecmp.cmp(range_10, plot_file(11))
# Plot 12 uses the old range(10) figure and the new range(6) figure
assert filecmp.cmp(range_10, plot_file('12_00'))
assert filecmp.cmp(range_6, plot_file('12_01'))
# Plot 13 shows close-figs in action
assert filecmp.cmp(range_4, plot_file(13))
# Plot 14 has included source
html_contents = (html_dir / 'some_plots.html').read_bytes()
assert b'# Only a comment' in html_contents
# check plot defined in external file.
assert filecmp.cmp(range_4, html_dir / 'range4.png')
assert filecmp.cmp(range_6, html_dir / 'range6.png')
# check if figure caption made it into html file
assert b'This is the caption for plot 15.' in html_contents
# check if figure caption using :caption: made it into html file
assert b'Plot 17 uses the caption option.' in html_contents
# check if figure caption made it into html file
assert b'This is the caption for plot 18.' in html_contents
# check if the custom classes made it into the html file
assert b'plot-directive my-class my-other-class' in html_contents
# check that the multi-image caption is applied twice
assert html_contents.count(b'This caption applies to both plots.') == 2
# Plot 21 is range(6) plot via an include directive. But because some of
# the previous plots are repeated, the argument to plot_file() is only 17.
assert filecmp.cmp(range_6, plot_file(17))
# Modify the included plot
contents = (source_dir / 'included_plot_21.rst').read_text()
contents = contents.replace('plt.plot(range(6))', 'plt.plot(range(4))')
(source_dir / 'included_plot_21.rst').write_text(contents)
# Build the pages again and check that the modified file was updated
modification_times = [plot_directive_file(i).stat().st_mtime
for i in (1, 2, 3, 5)]
build_sphinx_html(source_dir, doctree_dir, html_dir)
assert filecmp.cmp(range_4, plot_file(17))
# Check that the plots in the plot_directive folder weren't changed.
# (plot_directive_file(1) won't be modified, but it will be copied to html/
# upon compilation, so plot_file(1) will be modified)
assert plot_directive_file(1).stat().st_mtime == modification_times[0]
assert plot_directive_file(2).stat().st_mtime == modification_times[1]
assert plot_directive_file(3).stat().st_mtime == modification_times[2]
assert filecmp.cmp(range_10, plot_file(1))
assert filecmp.cmp(range_6, plot_file(2))
assert filecmp.cmp(range_4, plot_file(3))
# Make sure that figures marked with context are re-created (but that the
# contents are the same)
assert plot_directive_file(5).stat().st_mtime > modification_times[3]
assert filecmp.cmp(range_6, plot_file(5))
def test_plot_html_show_source_link(tmpdir):
source_dir = Path(tmpdir) / 'src'
source_dir.mkdir()
parent = Path(__file__).parent
shutil.copyfile(parent / 'tinypages/conf.py', source_dir / 'conf.py')
shutil.copytree(parent / 'tinypages/_static', source_dir / '_static')
doctree_dir = source_dir / 'doctrees'
(source_dir / 'index.rst').write_text("""
.. plot::
plt.plot(range(2))
""")
# Make sure source scripts are created by default
html_dir1 = source_dir / '_build' / 'html1'
build_sphinx_html(source_dir, doctree_dir, html_dir1)
assert "index-1.py" in [p.name for p in html_dir1.iterdir()]
# Make sure source scripts are NOT created when
# plot_html_show_source_link` is False
html_dir2 = source_dir / '_build' / 'html2'
build_sphinx_html(source_dir, doctree_dir, html_dir2,
extra_args=['-D', 'plot_html_show_source_link=0'])
assert "index-1.py" not in [p.name for p in html_dir2.iterdir()]
def build_sphinx_html(source_dir, doctree_dir, html_dir, extra_args=None):
# Build the pages with warnings turned into errors
extra_args = [] if extra_args is None else extra_args
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', str(doctree_dir), str(source_dir), str(html_dir), *extra_args]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
env={**os.environ, "MPLBACKEND": ""})
out, err = proc.communicate()
assert proc.returncode == 0, \
f"sphinx build failed with stdout:\n{out}\nstderr:\n{err}\n"
if err:
pytest.fail(f"sphinx build emitted the following warnings:\n{err}")
assert html_dir.is_dir()
| 44.507143 | 79 | 0.69074 |
import filecmp
import os
from pathlib import Path
import shutil
from subprocess import Popen, PIPE
import sys
import pytest
pytest.importorskip('sphinx',
minversion=None if sys.version_info < (3, 10) else '4.1.3')
def test_tinypages(tmpdir):
source_dir = Path(tmpdir) / 'src'
shutil.copytree(Path(__file__).parent / 'tinypages', source_dir)
html_dir = source_dir / '_build' / 'html'
doctree_dir = source_dir / 'doctrees'
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', str(doctree_dir),
str(Path(__file__).parent / 'tinypages'), str(html_dir)]
# coverage anyways); hide them using GCOV_ERROR_FILE.
proc = Popen(
cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
env={**os.environ, "MPLBACKEND": "", "GCOV_ERROR_FILE": os.devnull})
out, err = proc.communicate()
# Build the pages with warnings turned into errors
build_sphinx_html(source_dir, doctree_dir, html_dir)
def plot_file(num):
return html_dir / f'some_plots-{num}.png'
def plot_directive_file(num):
# This is always next to the doctree dir.
return doctree_dir.parent / 'plot_directive' / f'some_plots-{num}.png'
range_10, range_6, range_4 = [plot_file(i) for i in range(1, 4)]
# Plot 5 is range(6) plot
assert filecmp.cmp(range_6, plot_file(5))
# Plot 7 is range(4) plot
assert filecmp.cmp(range_4, plot_file(7))
# Plot 11 is range(10) plot
assert filecmp.cmp(range_10, plot_file(11))
# Plot 12 uses the old range(10) figure and the new range(6) figure
assert filecmp.cmp(range_10, plot_file('12_00'))
assert filecmp.cmp(range_6, plot_file('12_01'))
# Plot 13 shows close-figs in action
assert filecmp.cmp(range_4, plot_file(13))
# Plot 14 has included source
html_contents = (html_dir / 'some_plots.html').read_bytes()
assert b'
# check plot defined in external file.
assert filecmp.cmp(range_4, html_dir / 'range4.png')
assert filecmp.cmp(range_6, html_dir / 'range6.png')
# check if figure caption made it into html file
assert b'This is the caption for plot 15.' in html_contents
# check if figure caption using :caption: made it into html file
assert b'Plot 17 uses the caption option.' in html_contents
# check if figure caption made it into html file
assert b'This is the caption for plot 18.' in html_contents
# check if the custom classes made it into the html file
assert b'plot-directive my-class my-other-class' in html_contents
# check that the multi-image caption is applied twice
assert html_contents.count(b'This caption applies to both plots.') == 2
# Plot 21 is range(6) plot via an include directive. But because some of
# the previous plots are repeated, the argument to plot_file() is only 17.
assert filecmp.cmp(range_6, plot_file(17))
# Modify the included plot
contents = (source_dir / 'included_plot_21.rst').read_text()
contents = contents.replace('plt.plot(range(6))', 'plt.plot(range(4))')
(source_dir / 'included_plot_21.rst').write_text(contents)
# Build the pages again and check that the modified file was updated
modification_times = [plot_directive_file(i).stat().st_mtime
for i in (1, 2, 3, 5)]
build_sphinx_html(source_dir, doctree_dir, html_dir)
assert filecmp.cmp(range_4, plot_file(17))
# Check that the plots in the plot_directive folder weren't changed.
# upon compilation, so plot_file(1) will be modified)
assert plot_directive_file(1).stat().st_mtime == modification_times[0]
assert plot_directive_file(2).stat().st_mtime == modification_times[1]
assert plot_directive_file(3).stat().st_mtime == modification_times[2]
assert filecmp.cmp(range_10, plot_file(1))
assert filecmp.cmp(range_6, plot_file(2))
assert filecmp.cmp(range_4, plot_file(3))
# Make sure that figures marked with context are re-created (but that the
# contents are the same)
assert plot_directive_file(5).stat().st_mtime > modification_times[3]
assert filecmp.cmp(range_6, plot_file(5))
def test_plot_html_show_source_link(tmpdir):
source_dir = Path(tmpdir) / 'src'
source_dir.mkdir()
parent = Path(__file__).parent
shutil.copyfile(parent / 'tinypages/conf.py', source_dir / 'conf.py')
shutil.copytree(parent / 'tinypages/_static', source_dir / '_static')
doctree_dir = source_dir / 'doctrees'
(source_dir / 'index.rst').write_text("""
.. plot::
plt.plot(range(2))
""")
# Make sure source scripts are created by default
html_dir1 = source_dir / '_build' / 'html1'
build_sphinx_html(source_dir, doctree_dir, html_dir1)
assert "index-1.py" in [p.name for p in html_dir1.iterdir()]
# Make sure source scripts are NOT created when
# plot_html_show_source_link` is False
html_dir2 = source_dir / '_build' / 'html2'
build_sphinx_html(source_dir, doctree_dir, html_dir2,
extra_args=['-D', 'plot_html_show_source_link=0'])
assert "index-1.py" not in [p.name for p in html_dir2.iterdir()]
def build_sphinx_html(source_dir, doctree_dir, html_dir, extra_args=None):
# Build the pages with warnings turned into errors
extra_args = [] if extra_args is None else extra_args
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', str(doctree_dir), str(source_dir), str(html_dir), *extra_args]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
env={**os.environ, "MPLBACKEND": ""})
out, err = proc.communicate()
assert proc.returncode == 0, \
f"sphinx build failed with stdout:\n{out}\nstderr:\n{err}\n"
if err:
pytest.fail(f"sphinx build emitted the following warnings:\n{err}")
assert html_dir.is_dir()
| true | true |
f71c6f7492e390d017ea6f6fa7414b737b27c660 | 7,147 | py | Python | distributed/tests/test_as_completed.py | abduhbm/distributed | d99752e030e2534c1f064865e2241289fedfe5a9 | [
"BSD-3-Clause"
] | null | null | null | distributed/tests/test_as_completed.py | abduhbm/distributed | d99752e030e2534c1f064865e2241289fedfe5a9 | [
"BSD-3-Clause"
] | null | null | null | distributed/tests/test_as_completed.py | abduhbm/distributed | d99752e030e2534c1f064865e2241289fedfe5a9 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
from collections.abc import Iterator
from operator import add
import queue
import random
from time import sleep
import pytest
from tornado import gen
from distributed.client import _as_completed, as_completed, _first_completed, wait
from distributed.metrics import time
from distributed.utils import CancelledError
from distributed.utils_test import gen_cluster, inc, throws
from distributed.utils_test import client, cluster_fixture, loop # noqa: F401
@gen_cluster(client=True)
def test__as_completed(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
q = queue.Queue()
yield _as_completed([x, y, z], q)
assert q.qsize() == 3
assert {q.get(), q.get(), q.get()} == {x, y, z}
result = yield _first_completed([x, y, z])
assert result in [x, y, z]
def test_as_completed(client):
x = client.submit(inc, 1)
y = client.submit(inc, 2)
z = client.submit(inc, 1)
seq = as_completed([x, y, z])
assert seq.count() == 3
assert isinstance(seq, Iterator)
assert set(seq) == {x, y, z}
assert seq.count() == 0
assert list(as_completed([])) == []
def test_as_completed_with_non_futures(client):
with pytest.raises(TypeError):
list(as_completed([1, 2, 3]))
def test_as_completed_add(client):
total = 0
expected = sum(map(inc, range(10)))
futures = client.map(inc, range(10))
ac = as_completed(futures)
for future in ac:
result = future.result()
total += result
if random.random() < 0.5:
future = client.submit(add, future, 10)
ac.add(future)
expected += result + 10
assert total == expected
def test_as_completed_update(client):
total = 0
todo = list(range(10))
expected = sum(map(inc, todo))
ac = as_completed([])
while todo or not ac.is_empty():
if todo:
work, todo = todo[:4], todo[4:]
ac.update(client.map(inc, work))
batch = ac.next_batch(block=True)
total += sum(r.result() for r in batch)
assert total == expected
def test_as_completed_repeats(client):
ac = as_completed()
x = client.submit(inc, 1)
ac.add(x)
ac.add(x)
assert next(ac) is x
assert next(ac) is x
with pytest.raises(StopIteration):
next(ac)
ac.add(x)
assert next(ac) is x
def test_as_completed_is_empty(client):
ac = as_completed()
assert ac.is_empty()
x = client.submit(inc, 1)
ac.add(x)
assert not ac.is_empty()
assert next(ac) is x
assert ac.is_empty()
def test_as_completed_cancel(client):
x = client.submit(inc, 1)
y = client.submit(inc, 1)
ac = as_completed([x, y])
x.cancel()
assert next(ac) is x or y
assert next(ac) is y or x
with pytest.raises(queue.Empty):
ac.queue.get(timeout=0.1)
res = list(as_completed([x, y, x]))
assert len(res) == 3
assert set(res) == {x, y}
assert res.count(x) == 2
def test_as_completed_cancel_last(client):
w = client.submit(inc, 0.3)
x = client.submit(inc, 1)
y = client.submit(inc, 0.3)
@gen.coroutine
def _():
yield gen.sleep(0.1)
yield w.cancel(asynchronous=True)
yield y.cancel(asynchronous=True)
client.loop.add_callback(_)
ac = as_completed([x, y])
result = set(ac)
assert result == {x, y}
@gen_cluster(client=True)
def test_async_for_py2_equivalent(c, s, a, b):
futures = c.map(sleep, [0.01] * 3, pure=False)
seq = as_completed(futures)
x = yield seq.__anext__()
y = yield seq.__anext__()
z = yield seq.__anext__()
assert x.done()
assert y.done()
assert z.done()
assert x.key != y.key
with pytest.raises(StopAsyncIteration):
yield seq.__anext__()
@gen_cluster(client=True)
def test_as_completed_error_async(c, s, a, b):
x = c.submit(throws, 1)
y = c.submit(inc, 1)
ac = as_completed([x, y])
first = yield ac.__anext__()
second = yield ac.__anext__()
result = {first, second}
assert result == {x, y}
assert x.status == "error"
assert y.status == "finished"
def test_as_completed_error(client):
x = client.submit(throws, 1)
y = client.submit(inc, 1)
ac = as_completed([x, y])
result = set(ac)
assert result == {x, y}
assert x.status == "error"
assert y.status == "finished"
def test_as_completed_with_results(client):
x = client.submit(throws, 1)
y = client.submit(inc, 5)
z = client.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True)
y.cancel()
with pytest.raises(RuntimeError) as exc:
res = list(ac)
assert str(exc.value) == "hello!"
@gen_cluster(client=True)
def test_as_completed_with_results_async(c, s, a, b):
x = c.submit(throws, 1)
y = c.submit(inc, 5)
z = c.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True)
yield y.cancel()
with pytest.raises(RuntimeError) as exc:
first = yield ac.__anext__()
second = yield ac.__anext__()
third = yield ac.__anext__()
assert str(exc.value) == "hello!"
def test_as_completed_with_results_no_raise(client):
x = client.submit(throws, 1)
y = client.submit(inc, 5)
z = client.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True, raise_errors=False)
y.cancel()
res = list(ac)
dd = {r[0]: r[1:] for r in res}
assert set(dd.keys()) == {y, x, z}
assert x.status == "error"
assert y.status == "cancelled"
assert z.status == "finished"
assert isinstance(dd[y][0], CancelledError) or dd[y][0] == 6
assert isinstance(dd[x][0][1], RuntimeError)
assert dd[z][0] == 2
@gen_cluster(client=True)
async def test_str(c, s, a, b):
futures = c.map(inc, range(3))
ac = as_completed(futures)
assert "waiting=3" in str(ac)
assert "waiting=3" in repr(ac)
assert "done=0" in str(ac)
assert "done=0" in repr(ac)
await ac.__anext__()
start = time()
while "done=2" not in str(ac):
await asyncio.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
def test_as_completed_with_results_no_raise_async(c, s, a, b):
x = c.submit(throws, 1)
y = c.submit(inc, 5)
z = c.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True, raise_errors=False)
c.loop.add_callback(y.cancel)
first = yield ac.__anext__()
second = yield ac.__anext__()
third = yield ac.__anext__()
res = [first, second, third]
dd = {r[0]: r[1:] for r in res}
assert set(dd.keys()) == {y, x, z}
assert x.status == "error"
assert y.status == "cancelled"
assert z.status == "finished"
assert isinstance(dd[y][0], CancelledError)
assert isinstance(dd[x][0][1], RuntimeError)
assert dd[z][0] == 2
@gen_cluster(client=True, timeout=None)
async def test_clear(c, s, a, b):
futures = c.map(inc, range(3))
ac = as_completed(futures)
await wait(futures)
ac.clear()
with pytest.raises(StopAsyncIteration):
await ac.__anext__()
del futures
while s.tasks:
await asyncio.sleep(0.3)
| 24.644828 | 82 | 0.622219 | import asyncio
from collections.abc import Iterator
from operator import add
import queue
import random
from time import sleep
import pytest
from tornado import gen
from distributed.client import _as_completed, as_completed, _first_completed, wait
from distributed.metrics import time
from distributed.utils import CancelledError
from distributed.utils_test import gen_cluster, inc, throws
from distributed.utils_test import client, cluster_fixture, loop
@gen_cluster(client=True)
def test__as_completed(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
q = queue.Queue()
yield _as_completed([x, y, z], q)
assert q.qsize() == 3
assert {q.get(), q.get(), q.get()} == {x, y, z}
result = yield _first_completed([x, y, z])
assert result in [x, y, z]
def test_as_completed(client):
x = client.submit(inc, 1)
y = client.submit(inc, 2)
z = client.submit(inc, 1)
seq = as_completed([x, y, z])
assert seq.count() == 3
assert isinstance(seq, Iterator)
assert set(seq) == {x, y, z}
assert seq.count() == 0
assert list(as_completed([])) == []
def test_as_completed_with_non_futures(client):
with pytest.raises(TypeError):
list(as_completed([1, 2, 3]))
def test_as_completed_add(client):
total = 0
expected = sum(map(inc, range(10)))
futures = client.map(inc, range(10))
ac = as_completed(futures)
for future in ac:
result = future.result()
total += result
if random.random() < 0.5:
future = client.submit(add, future, 10)
ac.add(future)
expected += result + 10
assert total == expected
def test_as_completed_update(client):
total = 0
todo = list(range(10))
expected = sum(map(inc, todo))
ac = as_completed([])
while todo or not ac.is_empty():
if todo:
work, todo = todo[:4], todo[4:]
ac.update(client.map(inc, work))
batch = ac.next_batch(block=True)
total += sum(r.result() for r in batch)
assert total == expected
def test_as_completed_repeats(client):
ac = as_completed()
x = client.submit(inc, 1)
ac.add(x)
ac.add(x)
assert next(ac) is x
assert next(ac) is x
with pytest.raises(StopIteration):
next(ac)
ac.add(x)
assert next(ac) is x
def test_as_completed_is_empty(client):
ac = as_completed()
assert ac.is_empty()
x = client.submit(inc, 1)
ac.add(x)
assert not ac.is_empty()
assert next(ac) is x
assert ac.is_empty()
def test_as_completed_cancel(client):
x = client.submit(inc, 1)
y = client.submit(inc, 1)
ac = as_completed([x, y])
x.cancel()
assert next(ac) is x or y
assert next(ac) is y or x
with pytest.raises(queue.Empty):
ac.queue.get(timeout=0.1)
res = list(as_completed([x, y, x]))
assert len(res) == 3
assert set(res) == {x, y}
assert res.count(x) == 2
def test_as_completed_cancel_last(client):
w = client.submit(inc, 0.3)
x = client.submit(inc, 1)
y = client.submit(inc, 0.3)
@gen.coroutine
def _():
yield gen.sleep(0.1)
yield w.cancel(asynchronous=True)
yield y.cancel(asynchronous=True)
client.loop.add_callback(_)
ac = as_completed([x, y])
result = set(ac)
assert result == {x, y}
@gen_cluster(client=True)
def test_async_for_py2_equivalent(c, s, a, b):
futures = c.map(sleep, [0.01] * 3, pure=False)
seq = as_completed(futures)
x = yield seq.__anext__()
y = yield seq.__anext__()
z = yield seq.__anext__()
assert x.done()
assert y.done()
assert z.done()
assert x.key != y.key
with pytest.raises(StopAsyncIteration):
yield seq.__anext__()
@gen_cluster(client=True)
def test_as_completed_error_async(c, s, a, b):
x = c.submit(throws, 1)
y = c.submit(inc, 1)
ac = as_completed([x, y])
first = yield ac.__anext__()
second = yield ac.__anext__()
result = {first, second}
assert result == {x, y}
assert x.status == "error"
assert y.status == "finished"
def test_as_completed_error(client):
x = client.submit(throws, 1)
y = client.submit(inc, 1)
ac = as_completed([x, y])
result = set(ac)
assert result == {x, y}
assert x.status == "error"
assert y.status == "finished"
def test_as_completed_with_results(client):
x = client.submit(throws, 1)
y = client.submit(inc, 5)
z = client.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True)
y.cancel()
with pytest.raises(RuntimeError) as exc:
res = list(ac)
assert str(exc.value) == "hello!"
@gen_cluster(client=True)
def test_as_completed_with_results_async(c, s, a, b):
x = c.submit(throws, 1)
y = c.submit(inc, 5)
z = c.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True)
yield y.cancel()
with pytest.raises(RuntimeError) as exc:
first = yield ac.__anext__()
second = yield ac.__anext__()
third = yield ac.__anext__()
assert str(exc.value) == "hello!"
def test_as_completed_with_results_no_raise(client):
x = client.submit(throws, 1)
y = client.submit(inc, 5)
z = client.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True, raise_errors=False)
y.cancel()
res = list(ac)
dd = {r[0]: r[1:] for r in res}
assert set(dd.keys()) == {y, x, z}
assert x.status == "error"
assert y.status == "cancelled"
assert z.status == "finished"
assert isinstance(dd[y][0], CancelledError) or dd[y][0] == 6
assert isinstance(dd[x][0][1], RuntimeError)
assert dd[z][0] == 2
@gen_cluster(client=True)
async def test_str(c, s, a, b):
futures = c.map(inc, range(3))
ac = as_completed(futures)
assert "waiting=3" in str(ac)
assert "waiting=3" in repr(ac)
assert "done=0" in str(ac)
assert "done=0" in repr(ac)
await ac.__anext__()
start = time()
while "done=2" not in str(ac):
await asyncio.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
def test_as_completed_with_results_no_raise_async(c, s, a, b):
x = c.submit(throws, 1)
y = c.submit(inc, 5)
z = c.submit(inc, 1)
ac = as_completed([x, y, z], with_results=True, raise_errors=False)
c.loop.add_callback(y.cancel)
first = yield ac.__anext__()
second = yield ac.__anext__()
third = yield ac.__anext__()
res = [first, second, third]
dd = {r[0]: r[1:] for r in res}
assert set(dd.keys()) == {y, x, z}
assert x.status == "error"
assert y.status == "cancelled"
assert z.status == "finished"
assert isinstance(dd[y][0], CancelledError)
assert isinstance(dd[x][0][1], RuntimeError)
assert dd[z][0] == 2
@gen_cluster(client=True, timeout=None)
async def test_clear(c, s, a, b):
futures = c.map(inc, range(3))
ac = as_completed(futures)
await wait(futures)
ac.clear()
with pytest.raises(StopAsyncIteration):
await ac.__anext__()
del futures
while s.tasks:
await asyncio.sleep(0.3)
| true | true |
f71c6fe7ef5eb499ee506fe71c00dd30ce728f85 | 14,768 | py | Python | pdf2image/pdf2image.py | ldevandiere/pdf2image | ba11f6a931337c889ac739f6b41a7e78690d6d32 | [
"MIT"
] | null | null | null | pdf2image/pdf2image.py | ldevandiere/pdf2image | ba11f6a931337c889ac739f6b41a7e78690d6d32 | [
"MIT"
] | null | null | null | pdf2image/pdf2image.py | ldevandiere/pdf2image | ba11f6a931337c889ac739f6b41a7e78690d6d32 | [
"MIT"
] | null | null | null | """
pdf2image is a light wrapper for the poppler-utils tools that can convert your
PDFs into Pillow images.
"""
import os
import platform
import tempfile
import types
import shutil
import pathlib
from subprocess import Popen, PIPE
from PIL import Image
from .generators import uuid_generator, counter_generator, ThreadSafeGenerator
from .parsers import (
parse_buffer_to_pgm,
parse_buffer_to_ppm,
parse_buffer_to_jpeg,
parse_buffer_to_png,
)
from .exceptions import (
PopplerNotInstalledError,
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError,
)
TRANSPARENT_FILE_TYPES = ["png", "tiff"]
PDFINFO_CONVERT_TO_INT = ["Pages"]
def convert_from_path(
pdf_path,
dpi=200,
output_folder=None,
first_page=None,
last_page=None,
fmt="ppm",
jpegopt=None,
thread_count=1,
userpw=None,
use_cropbox=False,
strict=False,
transparent=False,
single_file=False,
output_file=uuid_generator(),
poppler_path=None,
grayscale=False,
size=None,
paths_only=False,
use_pdftocairo=False,
):
"""
Description: Convert PDF to Image will throw whenever one of the condition is reached
Parameters:
pdf_path -> Path to the PDF that you want to convert
dpi -> Image quality in DPI (default 200)
output_folder -> Write the resulting images to a folder (instead of directly in memory)
first_page -> First page to process
last_page -> Last page to process before stopping
fmt -> Output image format
jpegopt -> jpeg options `quality`, `progressive`, and `optimize` (only for jpeg format)
thread_count -> How many threads we are allowed to spawn for processing
userpw -> PDF's password
use_cropbox -> Use cropbox instead of mediabox
strict -> When a Syntax Error is thrown, it will be raised as an Exception
transparent -> Output with a transparent background instead of a white one.
single_file -> Uses the -singlefile option from pdftoppm/pdftocairo
output_file -> What is the output filename or generator
poppler_path -> Path to look for poppler binaries
grayscale -> Output grayscale image(s)
size -> Size of the resulting image(s), uses the Pillow (width, height) standard
paths_only -> Don't load image(s), return paths instead (requires output_folder)
use_pdftocairo -> Use pdftocairo instead of pdftoppm, may help performance
"""
if use_pdftocairo and fmt == "ppm":
fmt = "png"
# We make sure that if passed arguments are Path objects, they're converted to strings
if isinstance(pdf_path, pathlib.PurePath):
pdf_path = pdf_path.as_posix()
if isinstance(output_folder, pathlib.PurePath):
output_folder = output_folder.as_posix()
if isinstance(poppler_path, pathlib.PurePath):
poppler_path = poppler_path.as_posix()
page_count = pdfinfo_from_path(pdf_path, userpw, poppler_path=poppler_path)["Pages"]
# We start by getting the output format, the buffer processing function and if we need pdftocairo
parsed_fmt, final_extension, parse_buffer_func, use_pdfcairo_format = _parse_format(
fmt, grayscale
)
# We use pdftocairo is the format requires it OR we need a transparent output
use_pdfcairo = (
use_pdftocairo
or use_pdfcairo_format
or (transparent and parsed_fmt in TRANSPARENT_FILE_TYPES)
)
poppler_version = _get_poppler_version(
"pdftocairo" if use_pdfcairo else "pdftoppm", poppler_path=poppler_path
)
if poppler_version <= 57:
jpegopt = None
# If output_file isn't a generator, it will be turned into one
if not isinstance(output_file, types.GeneratorType) and not isinstance(
output_file, ThreadSafeGenerator
):
if single_file:
output_file = iter([output_file])
else:
output_file = counter_generator(output_file)
if thread_count < 1:
thread_count = 1
if first_page is None:
first_page = 1
if last_page is None or last_page > page_count:
last_page = page_count
if first_page > last_page:
return []
auto_temp_dir = False
if output_folder is None and use_pdfcairo:
auto_temp_dir = True
output_folder = tempfile.mkdtemp()
# Recalculate page count based on first and last page
page_count = last_page - first_page + 1
if thread_count > page_count:
thread_count = page_count
reminder = page_count % thread_count
current_page = first_page
processes = []
for _ in range(thread_count):
thread_output_file = next(output_file)
# Get the number of pages the thread will be processing
thread_page_count = page_count // thread_count + int(reminder > 0)
# Build the command accordingly
args = _build_command(
["-r", str(dpi), pdf_path],
output_folder,
current_page,
current_page + thread_page_count - 1,
parsed_fmt,
jpegopt,
thread_output_file,
userpw,
use_cropbox,
transparent,
single_file,
grayscale,
size,
)
if use_pdfcairo:
args = [_get_command_path("pdftocairo", poppler_path)] + args
else:
args = [_get_command_path("pdftoppm", poppler_path)] + args
# Update page values
current_page = current_page + thread_page_count
reminder -= int(reminder > 0)
# Add poppler path to LD_LIBRARY_PATH
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
# Spawn the process and save its uuid
processes.append(
(thread_output_file, Popen(args, env=env, stdout=PIPE, stderr=PIPE))
)
images = []
for uid, proc in processes:
data, err = proc.communicate()
if b"Syntax Error" in err and strict:
raise PDFSyntaxError(err.decode("utf8", "ignore"))
if output_folder is not None:
images += _load_from_output_folder(
output_folder, uid, final_extension, paths_only, in_memory=auto_temp_dir
)
else:
images += parse_buffer_func(data)
if auto_temp_dir:
shutil.rmtree(output_folder)
return images
def convert_from_bytes(
pdf_file,
dpi=200,
output_folder=None,
first_page=None,
last_page=None,
fmt="ppm",
jpegopt=None,
thread_count=1,
userpw=None,
use_cropbox=False,
strict=False,
transparent=False,
single_file=False,
output_file=uuid_generator(),
poppler_path=None,
grayscale=False,
size=None,
paths_only=False,
use_pdftocairo=False,
):
"""
Description: Convert PDF to Image will throw whenever one of the condition is reached
Parameters:
pdf_file -> Bytes representing the PDF file
dpi -> Image quality in DPI
output_folder -> Write the resulting images to a folder (instead of directly in memory)
first_page -> First page to process
last_page -> Last page to process before stopping
fmt -> Output image format
jpegopt -> jpeg options `quality`, `progressive`, and `optimize` (only for jpeg format)
thread_count -> How many threads we are allowed to spawn for processing
userpw -> PDF's password
use_cropbox -> Use cropbox instead of mediabox
strict -> When a Syntax Error is thrown, it will be raised as an Exception
transparent -> Output with a transparent background instead of a white one.
single_file -> Uses the -singlefile option from pdftoppm/pdftocairo
output_file -> What is the output filename or generator
poppler_path -> Path to look for poppler binaries
grayscale -> Output grayscale image(s)
size -> Size of the resulting image(s), uses the Pillow (width, height) standard
paths_only -> Don't load image(s), return paths instead (requires output_folder)
use_pdftocairo -> Use pdftocairo instead of pdftoppm, may help performance
"""
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, "wb") as f:
f.write(pdf_file)
f.flush()
return convert_from_path(
f.name,
dpi=dpi,
output_folder=output_folder,
first_page=first_page,
last_page=last_page,
fmt=fmt,
jpegopt=jpegopt,
thread_count=thread_count,
userpw=userpw,
use_cropbox=use_cropbox,
strict=strict,
transparent=transparent,
single_file=single_file,
output_file=output_file,
poppler_path=poppler_path,
grayscale=grayscale,
size=size,
paths_only=paths_only,
use_pdftocairo=use_pdftocairo,
)
finally:
os.close(fh)
os.remove(temp_filename)
def _build_command(
args,
output_folder,
first_page,
last_page,
fmt,
jpegopt,
output_file,
userpw,
use_cropbox,
transparent,
single_file,
grayscale,
size,
):
if use_cropbox:
args.append("-cropbox")
if transparent and fmt in TRANSPARENT_FILE_TYPES:
args.append("-transp")
if first_page is not None:
args.extend(["-f", str(first_page)])
if last_page is not None:
args.extend(["-l", str(last_page)])
if fmt not in ["pgm", "ppm"]:
args.append("-" + fmt)
if fmt in ["jpeg", "jpg"] and jpegopt:
args.extend(["-jpegopt", _parse_jpegopt(jpegopt)])
if single_file:
args.append("-singlefile")
if output_folder is not None:
args.append(os.path.join(output_folder, output_file))
if userpw is not None:
args.extend(["-upw", userpw])
if grayscale:
args.append("-gray")
if size is None:
pass
elif isinstance(size, tuple) and len(size) == 2:
if size[0] is not None:
args.extend(["-scale-to-x", str(int(size[0]))])
else:
args.extend(["-scale-to-x", str(-1)])
if size[1] is not None:
args.extend(["-scale-to-y", str(int(size[1]))])
else:
args.extend(["-scale-to-y", str(-1)])
elif isinstance(size, tuple) and len(size) == 1:
args.extend(["-scale-to", str(int(size[0]))])
elif isinstance(size, int) or isinstance(size, float):
args.extend(["-scale-to", str(int(size))])
else:
raise ValueError("Size {} is not a tuple or an integer")
return args
def _parse_format(fmt, grayscale=False):
fmt = fmt.lower()
if fmt[0] == ".":
fmt = fmt[1:]
if fmt in ("jpeg", "jpg"):
return "jpeg", "jpg", parse_buffer_to_jpeg, False
if fmt == "png":
return "png", "png", parse_buffer_to_png, False
if fmt in ("tif", "tiff"):
return "tiff", "tif", None, True
if fmt == "ppm" and grayscale:
return "pgm", "pgm", parse_buffer_to_pgm, False
# Unable to parse the format so we'll use the default
return "ppm", "ppm", parse_buffer_to_ppm, False
def _parse_jpegopt(jpegopt):
parts = []
for k, v in jpegopt.items():
if v is True:
v = "y"
if v is False:
v = "n"
parts.append("{}={}".format(k, v))
return ",".join(parts)
def _get_command_path(command, poppler_path=None):
if platform.system() == "Windows":
command = command + ".exe"
if poppler_path is not None:
command = os.path.join(poppler_path, command)
return command
def _get_poppler_version(command, poppler_path=None):
command = [_get_command_path(command, poppler_path), "-v"]
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
proc = Popen(command, env=env, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
try:
# TODO: Make this more robust
return int(
err.decode("utf8", "ignore").split("\n")[0].split(" ")[-1].split(".")[1]
)
except:
# Lowest version that includes pdftocairo (2011)
return 17
def pdfinfo_from_path(pdf_path, userpw=None, poppler_path=None):
try:
command = [_get_command_path("pdfinfo", poppler_path), pdf_path]
if userpw is not None:
command.extend(["-upw", userpw])
# Add poppler path to LD_LIBRARY_PATH
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
proc = Popen(command, env=env, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
d = {}
for field in out.decode("utf8", "ignore").split("\n"):
sf = field.split(":")
key, value = sf[0], ":".join(sf[1:])
if key != "":
d[key] = (
int(value.strip())
if key in PDFINFO_CONVERT_TO_INT
else value.strip()
)
if "Pages" not in d:
raise ValueError
return d
except OSError:
raise PDFInfoNotInstalledError(
"Unable to get page count. Is poppler installed and in PATH?"
)
except ValueError:
raise PDFPageCountError(
"Unable to get page count.\n%s" % err.decode("utf8", "ignore")
)
def pdfinfo_from_bytes(pdf_file):
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, "wb") as f:
f.write(pdf_file)
f.flush()
return pdfinfo_from_path(temp_filename)
finally:
os.close(fh)
os.remove(temp_filename)
def _load_from_output_folder(
output_folder, output_file, ext, paths_only, in_memory=False
):
images = []
for f in sorted(os.listdir(output_folder)):
if f.startswith(output_file) and f.split(".")[-1] == ext:
if paths_only:
images.append(os.path.join(output_folder, f))
else:
images.append(Image.open(os.path.join(output_folder, f)))
if in_memory:
images[-1].load()
return images
| 31.090526 | 101 | 0.609223 |
import os
import platform
import tempfile
import types
import shutil
import pathlib
from subprocess import Popen, PIPE
from PIL import Image
from .generators import uuid_generator, counter_generator, ThreadSafeGenerator
from .parsers import (
parse_buffer_to_pgm,
parse_buffer_to_ppm,
parse_buffer_to_jpeg,
parse_buffer_to_png,
)
from .exceptions import (
PopplerNotInstalledError,
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError,
)
TRANSPARENT_FILE_TYPES = ["png", "tiff"]
PDFINFO_CONVERT_TO_INT = ["Pages"]
def convert_from_path(
pdf_path,
dpi=200,
output_folder=None,
first_page=None,
last_page=None,
fmt="ppm",
jpegopt=None,
thread_count=1,
userpw=None,
use_cropbox=False,
strict=False,
transparent=False,
single_file=False,
output_file=uuid_generator(),
poppler_path=None,
grayscale=False,
size=None,
paths_only=False,
use_pdftocairo=False,
):
if use_pdftocairo and fmt == "ppm":
fmt = "png"
if isinstance(pdf_path, pathlib.PurePath):
pdf_path = pdf_path.as_posix()
if isinstance(output_folder, pathlib.PurePath):
output_folder = output_folder.as_posix()
if isinstance(poppler_path, pathlib.PurePath):
poppler_path = poppler_path.as_posix()
page_count = pdfinfo_from_path(pdf_path, userpw, poppler_path=poppler_path)["Pages"]
# We start by getting the output format, the buffer processing function and if we need pdftocairo
parsed_fmt, final_extension, parse_buffer_func, use_pdfcairo_format = _parse_format(
fmt, grayscale
)
# We use pdftocairo is the format requires it OR we need a transparent output
use_pdfcairo = (
use_pdftocairo
or use_pdfcairo_format
or (transparent and parsed_fmt in TRANSPARENT_FILE_TYPES)
)
poppler_version = _get_poppler_version(
"pdftocairo" if use_pdfcairo else "pdftoppm", poppler_path=poppler_path
)
if poppler_version <= 57:
jpegopt = None
# If output_file isn't a generator, it will be turned into one
if not isinstance(output_file, types.GeneratorType) and not isinstance(
output_file, ThreadSafeGenerator
):
if single_file:
output_file = iter([output_file])
else:
output_file = counter_generator(output_file)
if thread_count < 1:
thread_count = 1
if first_page is None:
first_page = 1
if last_page is None or last_page > page_count:
last_page = page_count
if first_page > last_page:
return []
auto_temp_dir = False
if output_folder is None and use_pdfcairo:
auto_temp_dir = True
output_folder = tempfile.mkdtemp()
page_count = last_page - first_page + 1
if thread_count > page_count:
thread_count = page_count
reminder = page_count % thread_count
current_page = first_page
processes = []
for _ in range(thread_count):
thread_output_file = next(output_file)
thread_page_count = page_count // thread_count + int(reminder > 0)
args = _build_command(
["-r", str(dpi), pdf_path],
output_folder,
current_page,
current_page + thread_page_count - 1,
parsed_fmt,
jpegopt,
thread_output_file,
userpw,
use_cropbox,
transparent,
single_file,
grayscale,
size,
)
if use_pdfcairo:
args = [_get_command_path("pdftocairo", poppler_path)] + args
else:
args = [_get_command_path("pdftoppm", poppler_path)] + args
current_page = current_page + thread_page_count
reminder -= int(reminder > 0)
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
processes.append(
(thread_output_file, Popen(args, env=env, stdout=PIPE, stderr=PIPE))
)
images = []
for uid, proc in processes:
data, err = proc.communicate()
if b"Syntax Error" in err and strict:
raise PDFSyntaxError(err.decode("utf8", "ignore"))
if output_folder is not None:
images += _load_from_output_folder(
output_folder, uid, final_extension, paths_only, in_memory=auto_temp_dir
)
else:
images += parse_buffer_func(data)
if auto_temp_dir:
shutil.rmtree(output_folder)
return images
def convert_from_bytes(
pdf_file,
dpi=200,
output_folder=None,
first_page=None,
last_page=None,
fmt="ppm",
jpegopt=None,
thread_count=1,
userpw=None,
use_cropbox=False,
strict=False,
transparent=False,
single_file=False,
output_file=uuid_generator(),
poppler_path=None,
grayscale=False,
size=None,
paths_only=False,
use_pdftocairo=False,
):
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, "wb") as f:
f.write(pdf_file)
f.flush()
return convert_from_path(
f.name,
dpi=dpi,
output_folder=output_folder,
first_page=first_page,
last_page=last_page,
fmt=fmt,
jpegopt=jpegopt,
thread_count=thread_count,
userpw=userpw,
use_cropbox=use_cropbox,
strict=strict,
transparent=transparent,
single_file=single_file,
output_file=output_file,
poppler_path=poppler_path,
grayscale=grayscale,
size=size,
paths_only=paths_only,
use_pdftocairo=use_pdftocairo,
)
finally:
os.close(fh)
os.remove(temp_filename)
def _build_command(
args,
output_folder,
first_page,
last_page,
fmt,
jpegopt,
output_file,
userpw,
use_cropbox,
transparent,
single_file,
grayscale,
size,
):
if use_cropbox:
args.append("-cropbox")
if transparent and fmt in TRANSPARENT_FILE_TYPES:
args.append("-transp")
if first_page is not None:
args.extend(["-f", str(first_page)])
if last_page is not None:
args.extend(["-l", str(last_page)])
if fmt not in ["pgm", "ppm"]:
args.append("-" + fmt)
if fmt in ["jpeg", "jpg"] and jpegopt:
args.extend(["-jpegopt", _parse_jpegopt(jpegopt)])
if single_file:
args.append("-singlefile")
if output_folder is not None:
args.append(os.path.join(output_folder, output_file))
if userpw is not None:
args.extend(["-upw", userpw])
if grayscale:
args.append("-gray")
if size is None:
pass
elif isinstance(size, tuple) and len(size) == 2:
if size[0] is not None:
args.extend(["-scale-to-x", str(int(size[0]))])
else:
args.extend(["-scale-to-x", str(-1)])
if size[1] is not None:
args.extend(["-scale-to-y", str(int(size[1]))])
else:
args.extend(["-scale-to-y", str(-1)])
elif isinstance(size, tuple) and len(size) == 1:
args.extend(["-scale-to", str(int(size[0]))])
elif isinstance(size, int) or isinstance(size, float):
args.extend(["-scale-to", str(int(size))])
else:
raise ValueError("Size {} is not a tuple or an integer")
return args
def _parse_format(fmt, grayscale=False):
fmt = fmt.lower()
if fmt[0] == ".":
fmt = fmt[1:]
if fmt in ("jpeg", "jpg"):
return "jpeg", "jpg", parse_buffer_to_jpeg, False
if fmt == "png":
return "png", "png", parse_buffer_to_png, False
if fmt in ("tif", "tiff"):
return "tiff", "tif", None, True
if fmt == "ppm" and grayscale:
return "pgm", "pgm", parse_buffer_to_pgm, False
return "ppm", "ppm", parse_buffer_to_ppm, False
def _parse_jpegopt(jpegopt):
parts = []
for k, v in jpegopt.items():
if v is True:
v = "y"
if v is False:
v = "n"
parts.append("{}={}".format(k, v))
return ",".join(parts)
def _get_command_path(command, poppler_path=None):
if platform.system() == "Windows":
command = command + ".exe"
if poppler_path is not None:
command = os.path.join(poppler_path, command)
return command
def _get_poppler_version(command, poppler_path=None):
command = [_get_command_path(command, poppler_path), "-v"]
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
proc = Popen(command, env=env, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
try:
# TODO: Make this more robust
return int(
err.decode("utf8", "ignore").split("\n")[0].split(" ")[-1].split(".")[1]
)
except:
# Lowest version that includes pdftocairo (2011)
return 17
def pdfinfo_from_path(pdf_path, userpw=None, poppler_path=None):
try:
command = [_get_command_path("pdfinfo", poppler_path), pdf_path]
if userpw is not None:
command.extend(["-upw", userpw])
# Add poppler path to LD_LIBRARY_PATH
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
proc = Popen(command, env=env, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
d = {}
for field in out.decode("utf8", "ignore").split("\n"):
sf = field.split(":")
key, value = sf[0], ":".join(sf[1:])
if key != "":
d[key] = (
int(value.strip())
if key in PDFINFO_CONVERT_TO_INT
else value.strip()
)
if "Pages" not in d:
raise ValueError
return d
except OSError:
raise PDFInfoNotInstalledError(
"Unable to get page count. Is poppler installed and in PATH?"
)
except ValueError:
raise PDFPageCountError(
"Unable to get page count.\n%s" % err.decode("utf8", "ignore")
)
def pdfinfo_from_bytes(pdf_file):
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, "wb") as f:
f.write(pdf_file)
f.flush()
return pdfinfo_from_path(temp_filename)
finally:
os.close(fh)
os.remove(temp_filename)
def _load_from_output_folder(
output_folder, output_file, ext, paths_only, in_memory=False
):
images = []
for f in sorted(os.listdir(output_folder)):
if f.startswith(output_file) and f.split(".")[-1] == ext:
if paths_only:
images.append(os.path.join(output_folder, f))
else:
images.append(Image.open(os.path.join(output_folder, f)))
if in_memory:
images[-1].load()
return images
| true | true |
f71c70ebaffb4cb67ea8865b24dfc0fdb55a9000 | 2,986 | py | Python | resources/test_data/honeycomb/pbb/pbb.py | preym17/csit | 3151c98618c78e3782e48bbe4d9c8f906c126f69 | [
"Apache-2.0"
] | null | null | null | resources/test_data/honeycomb/pbb/pbb.py | preym17/csit | 3151c98618c78e3782e48bbe4d9c8f906c126f69 | [
"Apache-2.0"
] | null | null | null | resources/test_data/honeycomb/pbb/pbb.py | preym17/csit | 3151c98618c78e3782e48bbe4d9c8f906c126f69 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test variables for provider backbone bridge test suite."""
# pylint: disable=invalid-name
# Add pbb sub interface
# Configuration data
cfg_pbb_sub_if_1 = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ab",
"destination-address": "bb:bb:bb:bb:bb:bc",
"b-vlan-tag-vlan-id": "2223",
"outer-tag": "16",
"i-tag-isid": "12",
"interface-operation": "translate-2-1"
}
}
# Modify pbb sub interface
# Configuration data
cfg_pbb_sub_if_1_mod = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ac",
"destination-address": "bb:bb:bb:bb:bb:bd",
"b-vlan-tag-vlan-id": "2224",
"outer-tag": "17",
"i-tag-isid": "13",
"interface-operation": "push-2"
}
}
# Wrong configuration data
# Wrong source-address
cfg_pbb_sub_if_ID = '5'
cfg_pbb_sub_if_wrong_src_addr = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ag",
"destination-address": "bb:bb:bb:bb:bb:ce",
"b-vlan-tag-vlan-id": "2226",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
# Wrong destination-address
cfg_pbb_sub_if_wrong_dst_addr = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:cg",
"b-vlan-tag-vlan-id": "2226",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
# Wrong b-vlan-tag-vlan-id
cfg_pbb_sub_if_wrong_vlan_tag = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:ce",
"b-vlan-tag-vlan-id": "123456789",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
# Wrong i-tag-isid
cfg_pbb_sub_if_wrong_i_tag = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:ce",
"b-vlan-tag-vlan-id": "2226",
"outer-tag": "19",
"i-tag-isid": "167772152345",
"interface-operation": "pop-2"
}
}
# b-vlan-tag-vlan-id is missing
cfg_pbb_sub_if_no_vlan_tag = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:ce",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
| 28.711538 | 74 | 0.598794 |
cfg_pbb_sub_if_1 = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ab",
"destination-address": "bb:bb:bb:bb:bb:bc",
"b-vlan-tag-vlan-id": "2223",
"outer-tag": "16",
"i-tag-isid": "12",
"interface-operation": "translate-2-1"
}
}
cfg_pbb_sub_if_1_mod = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ac",
"destination-address": "bb:bb:bb:bb:bb:bd",
"b-vlan-tag-vlan-id": "2224",
"outer-tag": "17",
"i-tag-isid": "13",
"interface-operation": "push-2"
}
}
cfg_pbb_sub_if_ID = '5'
cfg_pbb_sub_if_wrong_src_addr = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ag",
"destination-address": "bb:bb:bb:bb:bb:ce",
"b-vlan-tag-vlan-id": "2226",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
cfg_pbb_sub_if_wrong_dst_addr = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:cg",
"b-vlan-tag-vlan-id": "2226",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
cfg_pbb_sub_if_wrong_vlan_tag = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:ce",
"b-vlan-tag-vlan-id": "123456789",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
cfg_pbb_sub_if_wrong_i_tag = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:ce",
"b-vlan-tag-vlan-id": "2226",
"outer-tag": "19",
"i-tag-isid": "167772152345",
"interface-operation": "pop-2"
}
}
cfg_pbb_sub_if_no_vlan_tag = {
"pbb-rewrite": {
"source-address": "aa:aa:aa:aa:aa:ae",
"destination-address": "bb:bb:bb:bb:bb:ce",
"outer-tag": "19",
"i-tag-isid": "15",
"interface-operation": "pop-2"
}
}
| true | true |
f71c717d6fee0d1489bbe83962abdd173aae3304 | 668 | py | Python | setup.py | ishine/fastHan | 09550a750bb06b89b81769b8786a7eb3f8ca5713 | [
"Apache-2.0"
] | 635 | 2020-06-11T12:32:53.000Z | 2022-03-31T09:31:32.000Z | setup.py | ishine/fastHan | 09550a750bb06b89b81769b8786a7eb3f8ca5713 | [
"Apache-2.0"
] | 37 | 2020-06-12T10:07:47.000Z | 2022-03-10T02:46:52.000Z | setup.py | ishine/fastHan | 09550a750bb06b89b81769b8786a7eb3f8ca5713 | [
"Apache-2.0"
] | 77 | 2020-06-11T17:08:17.000Z | 2022-03-30T05:40:10.000Z | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages()]
print(pkgs)
setup(
name='fastHan',
version='1.7',
url='https://github.com/fastnlp/fastHan',
description=(
'使用深度学习联合模型,解决中文分词、词性标注、依存分析、命名实体识别任务。'
),
long_description=readme,
long_description_content_type='text/markdown',
author='耿志超',
license='Apache License',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| 21.548387 | 53 | 0.652695 |
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages()]
print(pkgs)
setup(
name='fastHan',
version='1.7',
url='https://github.com/fastnlp/fastHan',
description=(
'使用深度学习联合模型,解决中文分词、词性标注、依存分析、命名实体识别任务。'
),
long_description=readme,
long_description_content_type='text/markdown',
author='耿志超',
license='Apache License',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| true | true |
f71c719a80b561157aaf6bc9411436cc6e44d60f | 3,425 | py | Python | minemeld/ft/vt.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 147 | 2016-07-22T18:15:49.000Z | 2022-03-26T23:32:44.000Z | minemeld/ft/vt.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 167 | 2016-07-27T07:02:25.000Z | 2021-12-16T16:26:52.000Z | minemeld/ft/vt.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 112 | 2016-07-22T07:14:29.000Z | 2022-03-24T18:43:12.000Z | # Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements:
- minemeld.ft.vt.Notifications, the Miner node for VirusTotal Notifications
feed
"""
import logging
import os
import yaml
from . import json
LOG = logging.getLogger(__name__)
_VT_NOTIFICATIONS = 'https://www.virustotal.com/intelligence/hunting/notifications-feed/?key='
class Notifications(json.SimpleJSON):
def __init__(self, name, chassis, config):
super(Notifications, self).__init__(name, chassis, config)
self.api_key = None
def configure(self):
self.config['url'] = None
self.config['extractor'] = 'notifications'
self.config['prefix'] = 'vt'
super(Notifications, self).configure()
self.side_config_path = self.config.get('side_config', None)
if self.side_config_path is None:
self.side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s_side_config.yml' % self.name
)
self._load_side_config()
def _load_side_config(self):
try:
with open(self.side_config_path, 'r') as f:
sconfig = yaml.safe_load(f)
except Exception as e:
LOG.error('%s - Error loading side config: %s', self.name, str(e))
return
self.api_key = sconfig.get('api_key', None)
if self.api_key is not None:
LOG.info('%s - api key set', self.name)
self.url = _VT_NOTIFICATIONS + self.api_key
def _process_item(self, item):
result = []
for htype in ['md5', 'sha256', 'sha1']:
value = {self.prefix+'_'+k: v for k, v in item.iteritems()}
indicator = value.pop(self.prefix+'_'+htype, None)
value['type'] = htype
if indicator is not None:
result.append([indicator, value])
return result
def _build_iterator(self, now):
if self.api_key is None:
LOG.info('%s - API key not set', self.name)
raise RuntimeError(
'%s - API Key not set' % self.name
)
return super(Notifications, self)._build_iterator(now)
def hup(self, source=None):
LOG.info('%s - hup received, reload side config', self.name)
self._load_side_config()
super(Notifications, self).hup(source=source)
@staticmethod
def gc(name, config=None):
json.SimpleJSON.gc(name, config=config)
side_config_path = None
if config is not None:
side_config_path = config.get('side_config', None)
if side_config_path is None:
side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}_side_config.yml'.format(name)
)
try:
os.remove(side_config_path)
except:
pass
| 30.580357 | 94 | 0.616058 |
import logging
import os
import yaml
from . import json
LOG = logging.getLogger(__name__)
_VT_NOTIFICATIONS = 'https://www.virustotal.com/intelligence/hunting/notifications-feed/?key='
class Notifications(json.SimpleJSON):
def __init__(self, name, chassis, config):
super(Notifications, self).__init__(name, chassis, config)
self.api_key = None
def configure(self):
self.config['url'] = None
self.config['extractor'] = 'notifications'
self.config['prefix'] = 'vt'
super(Notifications, self).configure()
self.side_config_path = self.config.get('side_config', None)
if self.side_config_path is None:
self.side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s_side_config.yml' % self.name
)
self._load_side_config()
def _load_side_config(self):
try:
with open(self.side_config_path, 'r') as f:
sconfig = yaml.safe_load(f)
except Exception as e:
LOG.error('%s - Error loading side config: %s', self.name, str(e))
return
self.api_key = sconfig.get('api_key', None)
if self.api_key is not None:
LOG.info('%s - api key set', self.name)
self.url = _VT_NOTIFICATIONS + self.api_key
def _process_item(self, item):
result = []
for htype in ['md5', 'sha256', 'sha1']:
value = {self.prefix+'_'+k: v for k, v in item.iteritems()}
indicator = value.pop(self.prefix+'_'+htype, None)
value['type'] = htype
if indicator is not None:
result.append([indicator, value])
return result
def _build_iterator(self, now):
if self.api_key is None:
LOG.info('%s - API key not set', self.name)
raise RuntimeError(
'%s - API Key not set' % self.name
)
return super(Notifications, self)._build_iterator(now)
def hup(self, source=None):
LOG.info('%s - hup received, reload side config', self.name)
self._load_side_config()
super(Notifications, self).hup(source=source)
@staticmethod
def gc(name, config=None):
json.SimpleJSON.gc(name, config=config)
side_config_path = None
if config is not None:
side_config_path = config.get('side_config', None)
if side_config_path is None:
side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}_side_config.yml'.format(name)
)
try:
os.remove(side_config_path)
except:
pass
| true | true |
f71c722935297413c44452739b1c5efe80dcce1c | 5,736 | py | Python | tests/test_ac.py | knovichikhin/pyemv | 4a07cb550f27618822b530c6aa954e0820020ae7 | [
"MIT"
] | 14 | 2020-11-01T11:44:41.000Z | 2022-03-24T15:53:23.000Z | tests/test_ac.py | manoutoftime/pyemv | 4a07cb550f27618822b530c6aa954e0820020ae7 | [
"MIT"
] | 3 | 2021-06-18T01:36:00.000Z | 2021-10-17T02:09:50.000Z | tests/test_ac.py | manoutoftime/pyemv | 4a07cb550f27618822b530c6aa954e0820020ae7 | [
"MIT"
] | 8 | 2020-10-09T20:23:39.000Z | 2022-03-31T00:56:47.000Z | import pytest
from pyemv import ac
def test_generate_ac_exception() -> None:
# SK < 16 bytes
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_ac(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAA"),
data=bytes.fromhex("12345678901214"),
)
# SK > 16 bytes
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_ac(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCC"),
data=bytes.fromhex("12345678901214"),
)
# Invalid padding type
with pytest.raises(
TypeError,
match="Padding type must be PaddingType Enum, not dict",
):
ac.generate_ac(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
data=bytes.fromhex("12345678901214"),
padding_type={}, # type: ignore
)
def test_generate_arpc_1_exception() -> None:
# SK < 16 bytes
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAA"),
arqc=bytes.fromhex("12345678"),
arpc_rc=bytes.fromhex("0000"),
)
# SK > 16 bytes
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCC"),
arqc=bytes.fromhex("12345678"),
arpc_rc=bytes.fromhex("0000"),
)
# ARQC < 8 bytes
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("12345678"),
arpc_rc=bytes.fromhex("0000"),
)
# ARQC > 16 bytes
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890ABCDEF12"),
arpc_rc=bytes.fromhex("0000"),
)
# ARPC-RC < 2 bytes
with pytest.raises(
ValueError,
match="ARPC-RC must be 2 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890123456"),
arpc_rc=bytes.fromhex("00"),
)
# ARPC-RC > 2 bytes
with pytest.raises(
ValueError,
match="ARPC-RC must be 2 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890123456"),
arpc_rc=bytes.fromhex("001122"),
)
def test_generate_arpc_2_exception() -> None:
# SK < 16 bytes
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAA"),
arqc=bytes.fromhex("12345678901214"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
# SK > 16 bytes
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCC"),
arqc=bytes.fromhex("12345678901214"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
# ARQC < 8 bytes
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("12345678901214"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
# ARQC > 16 bytes
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890ABCDEF12"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
# CSU < 4 bytes
with pytest.raises(
ValueError,
match="CSU must be 4 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890121456"),
csu=bytes.fromhex("123456"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
# CSU > 4 bytes
with pytest.raises(
ValueError,
match="CSU must be 4 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890121456"),
csu=bytes.fromhex("1234567890"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
# PAD > 8 bytes
with pytest.raises(
ValueError,
match="Proprietary Authentication Data must be 0-8 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890121456"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("123456789012345678"),
)
| 30.031414 | 84 | 0.591702 | import pytest
from pyemv import ac
def test_generate_ac_exception() -> None:
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_ac(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAA"),
data=bytes.fromhex("12345678901214"),
)
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_ac(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCC"),
data=bytes.fromhex("12345678901214"),
)
with pytest.raises(
TypeError,
match="Padding type must be PaddingType Enum, not dict",
):
ac.generate_ac(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
data=bytes.fromhex("12345678901214"),
padding_type={},
)
def test_generate_arpc_1_exception() -> None:
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAA"),
arqc=bytes.fromhex("12345678"),
arpc_rc=bytes.fromhex("0000"),
)
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCC"),
arqc=bytes.fromhex("12345678"),
arpc_rc=bytes.fromhex("0000"),
)
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("12345678"),
arpc_rc=bytes.fromhex("0000"),
)
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890ABCDEF12"),
arpc_rc=bytes.fromhex("0000"),
)
with pytest.raises(
ValueError,
match="ARPC-RC must be 2 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890123456"),
arpc_rc=bytes.fromhex("00"),
)
with pytest.raises(
ValueError,
match="ARPC-RC must be 2 bytes long",
):
ac.generate_arpc_1(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890123456"),
arpc_rc=bytes.fromhex("001122"),
)
def test_generate_arpc_2_exception() -> None:
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAA"),
arqc=bytes.fromhex("12345678901214"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
with pytest.raises(
ValueError,
match="Session Key must be a double length DES key",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCC"),
arqc=bytes.fromhex("12345678901214"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("12345678901214"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
with pytest.raises(
ValueError,
match="ARQC must be 8 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890ABCDEF12"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
with pytest.raises(
ValueError,
match="CSU must be 4 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890121456"),
csu=bytes.fromhex("123456"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
with pytest.raises(
ValueError,
match="CSU must be 4 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890121456"),
csu=bytes.fromhex("1234567890"),
prop_auth_data=bytes.fromhex("1234567890123456"),
)
with pytest.raises(
ValueError,
match="Proprietary Authentication Data must be 0-8 bytes long",
):
ac.generate_arpc_2(
sk_ac=bytes.fromhex("AAAAAAAAAAAAAAAABBBBBBBBBBBBBBBB"),
arqc=bytes.fromhex("1234567890121456"),
csu=bytes.fromhex("12345678"),
prop_auth_data=bytes.fromhex("123456789012345678"),
)
| true | true |
f71c735f5bbe363994606cb49a6742b243a951ed | 2,203 | py | Python | DeepJIT/train.py | ZZR0/ISSTA21-JIT-DP | c2916f7c3b1d235ff2858220886d6a7da068bf8a | [
"MIT"
] | 14 | 2021-07-12T07:29:57.000Z | 2022-01-18T07:01:46.000Z | DeepJIT/train.py | ZZR0/ISSTA21-JIT-DP | c2916f7c3b1d235ff2858220886d6a7da068bf8a | [
"MIT"
] | null | null | null | DeepJIT/train.py | ZZR0/ISSTA21-JIT-DP | c2916f7c3b1d235ff2858220886d6a7da068bf8a | [
"MIT"
] | 7 | 2021-05-19T21:51:36.000Z | 2022-03-29T13:57:54.000Z | from model import DeepJIT
import torch
from tqdm import tqdm
from utils import mini_batches_train, save
import torch.nn as nn
import os, datetime
def train_model(data, params):
data_pad_msg, data_pad_code, data_labels, dict_msg, dict_code = data
# set up parameters
params.cuda = (not params.no_cuda) and torch.cuda.is_available()
del params.no_cuda
params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]
# params.save_dir = os.path.join(params.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code)
if len(data_labels.shape) == 1:
params.class_num = 1
else:
params.class_num = data_labels.shape[1]
params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# create and train the defect model
model = DeepJIT(args=params)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=params.l2_reg_lambda)
criterion = nn.BCELoss()
for epoch in range(1, params.num_epochs + 1):
total_loss = 0
# building batches for training model
batches = mini_batches_train(X_msg=data_pad_msg, X_code=data_pad_code, Y=data_labels, mini_batch_size=params.batch_size)
for i, (batch) in enumerate(tqdm(batches)):
pad_msg, pad_code, labels = batch
if torch.cuda.is_available():
pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(
pad_code).cuda(), torch.cuda.FloatTensor(labels)
else:
pad_msg, pad_code, labels = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(
labels).float()
optimizer.zero_grad()
predict = model.forward(pad_msg, pad_code)
loss = criterion(predict, labels)
total_loss += loss
loss.backward()
optimizer.step()
print('Epoch %i / %i -- Total loss: %f' % (epoch, params.num_epochs, total_loss))
save(model, params.save_dir, 'epoch', epoch)
| 40.054545 | 128 | 0.635951 | from model import DeepJIT
import torch
from tqdm import tqdm
from utils import mini_batches_train, save
import torch.nn as nn
import os, datetime
def train_model(data, params):
data_pad_msg, data_pad_code, data_labels, dict_msg, dict_code = data
params.cuda = (not params.no_cuda) and torch.cuda.is_available()
del params.no_cuda
params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]
params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code)
if len(data_labels.shape) == 1:
params.class_num = 1
else:
params.class_num = data_labels.shape[1]
params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepJIT(args=params)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=params.l2_reg_lambda)
criterion = nn.BCELoss()
for epoch in range(1, params.num_epochs + 1):
total_loss = 0
batches = mini_batches_train(X_msg=data_pad_msg, X_code=data_pad_code, Y=data_labels, mini_batch_size=params.batch_size)
for i, (batch) in enumerate(tqdm(batches)):
pad_msg, pad_code, labels = batch
if torch.cuda.is_available():
pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(
pad_code).cuda(), torch.cuda.FloatTensor(labels)
else:
pad_msg, pad_code, labels = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(
labels).float()
optimizer.zero_grad()
predict = model.forward(pad_msg, pad_code)
loss = criterion(predict, labels)
total_loss += loss
loss.backward()
optimizer.step()
print('Epoch %i / %i -- Total loss: %f' % (epoch, params.num_epochs, total_loss))
save(model, params.save_dir, 'epoch', epoch)
| true | true |
f71c73b92c22513c6d322953955170aa4fd2838a | 524 | py | Python | BackEnd/testModel/mymodel/migrations/0012_auto_20180603_2214.py | WindyMen/BackEnd | d3e33b1b57734fcefda494793ed940e5b079c36b | [
"Apache-2.0"
] | 1 | 2018-06-28T02:35:15.000Z | 2018-06-28T02:35:15.000Z | BackEnd/testModel/mymodel/migrations/0012_auto_20180603_2214.py | WindyMen/BackEnd | d3e33b1b57734fcefda494793ed940e5b079c36b | [
"Apache-2.0"
] | null | null | null | BackEnd/testModel/mymodel/migrations/0012_auto_20180603_2214.py | WindyMen/BackEnd | d3e33b1b57734fcefda494793ed940e5b079c36b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.3 on 2018-06-03 14:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mymodel', '0011_auto_20180603_2208'),
]
operations = [
migrations.AlterField(
model_name='room',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owner_rooms', to='mymodel.User'),
),
]
| 26.2 | 140 | 0.624046 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mymodel', '0011_auto_20180603_2208'),
]
operations = [
migrations.AlterField(
model_name='room',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owner_rooms', to='mymodel.User'),
),
]
| true | true |
f71c73d0a50c3eaf71f966a8af555fc36e37ec03 | 1,026 | py | Python | sortosm.py | TimSC/osm-to-gps-map | cedae6752b16e6f3e02a75f0a0ef784fd70298bf | [
"Unlicense"
] | 1 | 2020-06-15T02:22:08.000Z | 2020-06-15T02:22:08.000Z | sortosm.py | TimSC/osm-to-gps-map | cedae6752b16e6f3e02a75f0a0ef784fd70298bf | [
"Unlicense"
] | null | null | null | sortosm.py | TimSC/osm-to-gps-map | cedae6752b16e6f3e02a75f0a0ef784fd70298bf | [
"Unlicense"
] | null | null | null | import xml.etree.ElementTree as ET
import bz2, sys
def SortOsm(inFina, outFina):
fi = bz2.BZ2File(inFina)
root = ET.fromstring(fi.read())
fi.close()
objDict = {}
for obj in root:
if 'id' in obj.attrib:
i = int(obj.attrib['id'])
#print obj.tag, i
if obj.tag not in objDict:
objDict[obj.tag] = {}
objDict[obj.tag][i] = obj
#for ty in objDict:
# print ty, len(objDict[ty]), objDict[ty].keys()
outRoot = ET.Element("osm")
outTree = ET.ElementTree(outRoot)
outRoot.attrib = root.attrib
if 'node' in objDict:
keys = objDict['node'].keys()
keys.sort()
for i in keys:
outRoot.append(objDict['node'][i])
if 'way' in objDict:
keys = objDict['way'].keys()
keys.sort()
for i in keys:
outRoot.append(objDict['way'][i])
if 'relation' in objDict:
keys = objDict['relation'].keys()
keys.sort()
for i in keys:
outRoot.append(objDict['relation'][i])
fiOut = bz2.BZ2File(outFina,"w")
outTree.write(fiOut,"utf-8")
if __name__=="__main__":
SortOsm(sys.argv[1], sys.argv[2])
| 20.52 | 49 | 0.645224 | import xml.etree.ElementTree as ET
import bz2, sys
def SortOsm(inFina, outFina):
fi = bz2.BZ2File(inFina)
root = ET.fromstring(fi.read())
fi.close()
objDict = {}
for obj in root:
if 'id' in obj.attrib:
i = int(obj.attrib['id'])
if obj.tag not in objDict:
objDict[obj.tag] = {}
objDict[obj.tag][i] = obj
outRoot = ET.Element("osm")
outTree = ET.ElementTree(outRoot)
outRoot.attrib = root.attrib
if 'node' in objDict:
keys = objDict['node'].keys()
keys.sort()
for i in keys:
outRoot.append(objDict['node'][i])
if 'way' in objDict:
keys = objDict['way'].keys()
keys.sort()
for i in keys:
outRoot.append(objDict['way'][i])
if 'relation' in objDict:
keys = objDict['relation'].keys()
keys.sort()
for i in keys:
outRoot.append(objDict['relation'][i])
fiOut = bz2.BZ2File(outFina,"w")
outTree.write(fiOut,"utf-8")
if __name__=="__main__":
SortOsm(sys.argv[1], sys.argv[2])
| true | true |
f71c7496c320b6938b1d6b9ee7d678add526adf7 | 4,780 | py | Python | salt/pillar/libvirt.py | hvnsweeting/salt | abc9d3a0b51e6f5c4738cf71c221daf8b46fddcf | [
"Apache-2.0"
] | 2 | 2015-09-21T14:13:30.000Z | 2016-02-12T11:33:46.000Z | salt/pillar/libvirt.py | hvnsweeting/salt | abc9d3a0b51e6f5c4738cf71c221daf8b46fddcf | [
"Apache-2.0"
] | null | null | null | salt/pillar/libvirt.py | hvnsweeting/salt | abc9d3a0b51e6f5c4738cf71c221daf8b46fddcf | [
"Apache-2.0"
] | 2 | 2017-01-05T16:14:59.000Z | 2019-01-31T23:15:25.000Z | # -*- coding: utf-8 -*-
'''
Load up the libvirt keys into Pillar for a given minion if said keys have been generated using the libvirt key runner
'''
from __future__ import absolute_import
# Don't "fix" the above docstring to put it on two lines, as the sphinx
# autosummary pulls only the first line for its description.
# Import python libs
import os
import subprocess
# Import salt libs
import salt.utils
def __virtual__():
return salt.utils.which('certtool') is not None
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
command): # pylint: disable=W0613
'''
Read in the generated libvirt keys
'''
key_dir = os.path.join(
__opts__['pki_dir'],
'libvirt',
minion_id)
cacert = os.path.join(__opts__['pki_dir'],
'libvirt',
'cacert.pem')
if not os.path.isdir(key_dir):
# No keys have been generated
gen_hyper_keys(minion_id)
ret = {}
for key in os.listdir(key_dir):
if not key.endswith('.pem'):
continue
fn_ = os.path.join(key_dir, key)
with salt.utils.fopen(fn_, 'r') as fp_:
ret['libvirt.{0}'.format(key)] = fp_.read()
with salt.utils.fopen(cacert, 'r') as fp_:
ret['libvirt.cacert.pem'] = fp_.read()
return ret
def gen_hyper_keys(minion_id,
country='US',
state='Utah',
locality='Salt Lake City',
organization='Salted'):
'''
Generate the keys to be used by libvirt hypervisors, this routine gens
the keys and applies them to the pillar for the hypervisor minions
'''
key_dir = os.path.join(
__opts__['pki_dir'],
'libvirt')
if not os.path.isdir(key_dir):
os.makedirs(key_dir)
cakey = os.path.join(key_dir, 'cakey.pem')
cacert = os.path.join(key_dir, 'cacert.pem')
cainfo = os.path.join(key_dir, 'ca.info')
if not os.path.isfile(cainfo):
with salt.utils.fopen(cainfo, 'w+') as fp_:
fp_.write('cn = salted\nca\ncert_signing_key')
if not os.path.isfile(cakey):
subprocess.call(
'certtool --generate-privkey > {0}'.format(cakey),
shell=True)
if not os.path.isfile(cacert):
cmd = ('certtool --generate-self-signed --load-privkey {0} '
'--template {1} --outfile {2}').format(cakey, cainfo, cacert)
subprocess.call(cmd, shell=True)
sub_dir = os.path.join(key_dir, minion_id)
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
priv = os.path.join(sub_dir, 'serverkey.pem')
cert = os.path.join(sub_dir, 'servercert.pem')
srvinfo = os.path.join(sub_dir, 'server.info')
cpriv = os.path.join(sub_dir, 'clientkey.pem')
ccert = os.path.join(sub_dir, 'clientcert.pem')
clientinfo = os.path.join(sub_dir, 'client.info')
if not os.path.isfile(srvinfo):
with salt.utils.fopen(srvinfo, 'w+') as fp_:
infodat = ('organization = salted\ncn = {0}\ntls_www_server'
'\nencryption_key\nsigning_key'
'\ndigitalSignature').format(
__grains__['fqdn'])
fp_.write(infodat)
if not os.path.isfile(priv):
subprocess.call(
'certtool --generate-privkey > {0}'.format(priv),
shell=True)
if not os.path.isfile(cert):
cmd = ('certtool --generate-certificate --load-privkey {0} '
'--load-ca-certificate {1} --load-ca-privkey {2} '
'--template {3} --outfile {4}'
).format(priv, cacert, cakey, srvinfo, cert)
subprocess.call(cmd, shell=True)
if not os.path.isfile(clientinfo):
with salt.utils.fopen(clientinfo, 'w+') as fp_:
infodat = ('country = {0}\nstate = {1}\nlocality = '
'{2}\norganization = {3}\ncn = {4}\n'
'tls_www_client\nencryption_key\nsigning_key\n'
'digitalSignature'
).format(
country,
state,
locality,
organization,
__grains__['fqdn'])
fp_.write(infodat)
if not os.path.isfile(cpriv):
subprocess.call(
'certtool --generate-privkey > {0}'.format(cpriv),
shell=True)
if not os.path.isfile(ccert):
cmd = ('certtool --generate-certificate --load-privkey {0} '
'--load-ca-certificate {1} --load-ca-privkey {2} '
'--template {3} --outfile {4}'
).format(cpriv, cacert, cakey, clientinfo, ccert)
subprocess.call(cmd, shell=True)
| 37.637795 | 117 | 0.561715 |
from __future__ import absolute_import
# autosummary pulls only the first line for its description.
# Import python libs
import os
import subprocess
# Import salt libs
import salt.utils
def __virtual__():
return salt.utils.which('certtool') is not None
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
command): # pylint: disable=W0613
key_dir = os.path.join(
__opts__['pki_dir'],
'libvirt',
minion_id)
cacert = os.path.join(__opts__['pki_dir'],
'libvirt',
'cacert.pem')
if not os.path.isdir(key_dir):
# No keys have been generated
gen_hyper_keys(minion_id)
ret = {}
for key in os.listdir(key_dir):
if not key.endswith('.pem'):
continue
fn_ = os.path.join(key_dir, key)
with salt.utils.fopen(fn_, 'r') as fp_:
ret['libvirt.{0}'.format(key)] = fp_.read()
with salt.utils.fopen(cacert, 'r') as fp_:
ret['libvirt.cacert.pem'] = fp_.read()
return ret
def gen_hyper_keys(minion_id,
country='US',
state='Utah',
locality='Salt Lake City',
organization='Salted'):
key_dir = os.path.join(
__opts__['pki_dir'],
'libvirt')
if not os.path.isdir(key_dir):
os.makedirs(key_dir)
cakey = os.path.join(key_dir, 'cakey.pem')
cacert = os.path.join(key_dir, 'cacert.pem')
cainfo = os.path.join(key_dir, 'ca.info')
if not os.path.isfile(cainfo):
with salt.utils.fopen(cainfo, 'w+') as fp_:
fp_.write('cn = salted\nca\ncert_signing_key')
if not os.path.isfile(cakey):
subprocess.call(
'certtool --generate-privkey > {0}'.format(cakey),
shell=True)
if not os.path.isfile(cacert):
cmd = ('certtool --generate-self-signed --load-privkey {0} '
'--template {1} --outfile {2}').format(cakey, cainfo, cacert)
subprocess.call(cmd, shell=True)
sub_dir = os.path.join(key_dir, minion_id)
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
priv = os.path.join(sub_dir, 'serverkey.pem')
cert = os.path.join(sub_dir, 'servercert.pem')
srvinfo = os.path.join(sub_dir, 'server.info')
cpriv = os.path.join(sub_dir, 'clientkey.pem')
ccert = os.path.join(sub_dir, 'clientcert.pem')
clientinfo = os.path.join(sub_dir, 'client.info')
if not os.path.isfile(srvinfo):
with salt.utils.fopen(srvinfo, 'w+') as fp_:
infodat = ('organization = salted\ncn = {0}\ntls_www_server'
'\nencryption_key\nsigning_key'
'\ndigitalSignature').format(
__grains__['fqdn'])
fp_.write(infodat)
if not os.path.isfile(priv):
subprocess.call(
'certtool --generate-privkey > {0}'.format(priv),
shell=True)
if not os.path.isfile(cert):
cmd = ('certtool --generate-certificate --load-privkey {0} '
'--load-ca-certificate {1} --load-ca-privkey {2} '
'--template {3} --outfile {4}'
).format(priv, cacert, cakey, srvinfo, cert)
subprocess.call(cmd, shell=True)
if not os.path.isfile(clientinfo):
with salt.utils.fopen(clientinfo, 'w+') as fp_:
infodat = ('country = {0}\nstate = {1}\nlocality = '
'{2}\norganization = {3}\ncn = {4}\n'
'tls_www_client\nencryption_key\nsigning_key\n'
'digitalSignature'
).format(
country,
state,
locality,
organization,
__grains__['fqdn'])
fp_.write(infodat)
if not os.path.isfile(cpriv):
subprocess.call(
'certtool --generate-privkey > {0}'.format(cpriv),
shell=True)
if not os.path.isfile(ccert):
cmd = ('certtool --generate-certificate --load-privkey {0} '
'--load-ca-certificate {1} --load-ca-privkey {2} '
'--template {3} --outfile {4}'
).format(cpriv, cacert, cakey, clientinfo, ccert)
subprocess.call(cmd, shell=True)
| true | true |
f71c74c4947034e3fa1939f004fe6def695c2676 | 2,684 | py | Python | unjupyter.py | milo-trujillo/unjupyter | 2ea86f67e39060ddffb109a2ab94bd074c169fed | [
"MIT"
] | null | null | null | unjupyter.py | milo-trujillo/unjupyter | 2ea86f67e39060ddffb109a2ab94bd074c169fed | [
"MIT"
] | null | null | null | unjupyter.py | milo-trujillo/unjupyter | 2ea86f67e39060ddffb109a2ab94bd074c169fed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json, sys, os, base64, hashlib, glob
def writeSource(f, src):
for line in src:
f.write(line)
def processOutputs(f, outputs):
for output in outputs:
if( "text" in output.keys() ):
f.write("```\n")
for line in output["text"]:
f.write(line)
f.write("\n```\n")
if( "data" in output.keys() ):
filetypes = output["data"].keys()
for filetype in filetypes:
category, extension = filetype.split("/")
if( category == "image" ):
data = output["data"][filetype]
raw = base64.b64decode(data)
filename = hashlib.md5(raw).hexdigest() + "." + extension
with open(filename, "wb") as image:
image.write(raw)
f.write("\n\n\n" % (category, extension, filename))
elif( category == "text" and extension == "plain" ):
data = output["data"][filetype]
f.write("```\n")
writeSource(f, data)
f.write("\n```\n\n")
elif( category == "text" and extension == "html" and "text/plain" in filetypes ):
sys.stderr.write("Info: Ignoring an 'html' output in favor of available plaintext\n")
elif( category == "text" and extension == "html" ):
sys.stderr.write("Info: Writing raw html because there is no plaintext counterpart :(\n")
data = output["data"][filetype]
writeSource(f, data)
f.write("\n\n")
else:
sys.stderr.write("WARNING: Skipping unsupported data type '%s'\n" % (filetype))
def convertNotebook(infile, outfile):
with open(outfile, "w") as md:
with open(infile, "r") as notebook:
data = json.load(notebook)
cells = data["cells"]
for cell in cells:
if( cell["cell_type"] == "markdown" ):
writeSource(md, cell["source"])
md.write("\n\n")
elif( cell["cell_type"] == "code" ):
if( len(cell["source"]) > 0 ):
md.write("```\n")
writeSource(md, cell["source"])
md.write("\n```\n\n")
if( len(cell["outputs"]) > 0 ):
md.write("Output:\n\n")
processOutputs(md, cell["outputs"])
md.write("\n")
sys.stderr.flush()
print("Notebook '%s' exported as '%s'" % (infile, outfile))
if __name__ == "__main__":
if( len(sys.argv) == 2 ):
if( os.path.isdir(sys.argv[1]) ):
for infile in glob.glob(sys.argv[1]+"/*.ipynb"):
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
else:
infile = sys.argv[1]
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
elif( len(sys.argv) == 3 ):
infile = sys.argv[1]
outfile = sys.argv[2]
convertNotebook(infile, outfile)
else:
sys.stderr.write("USAGE: %s <infile.ipynb> [outfile.md]\n")
sys.stderr.write(" or: %s <directory>\n")
sys.exit(1)
| 33.55 | 94 | 0.604322 |
import json, sys, os, base64, hashlib, glob
def writeSource(f, src):
for line in src:
f.write(line)
def processOutputs(f, outputs):
for output in outputs:
if( "text" in output.keys() ):
f.write("```\n")
for line in output["text"]:
f.write(line)
f.write("\n```\n")
if( "data" in output.keys() ):
filetypes = output["data"].keys()
for filetype in filetypes:
category, extension = filetype.split("/")
if( category == "image" ):
data = output["data"][filetype]
raw = base64.b64decode(data)
filename = hashlib.md5(raw).hexdigest() + "." + extension
with open(filename, "wb") as image:
image.write(raw)
f.write("\n\n\n" % (category, extension, filename))
elif( category == "text" and extension == "plain" ):
data = output["data"][filetype]
f.write("```\n")
writeSource(f, data)
f.write("\n```\n\n")
elif( category == "text" and extension == "html" and "text/plain" in filetypes ):
sys.stderr.write("Info: Ignoring an 'html' output in favor of available plaintext\n")
elif( category == "text" and extension == "html" ):
sys.stderr.write("Info: Writing raw html because there is no plaintext counterpart :(\n")
data = output["data"][filetype]
writeSource(f, data)
f.write("\n\n")
else:
sys.stderr.write("WARNING: Skipping unsupported data type '%s'\n" % (filetype))
def convertNotebook(infile, outfile):
with open(outfile, "w") as md:
with open(infile, "r") as notebook:
data = json.load(notebook)
cells = data["cells"]
for cell in cells:
if( cell["cell_type"] == "markdown" ):
writeSource(md, cell["source"])
md.write("\n\n")
elif( cell["cell_type"] == "code" ):
if( len(cell["source"]) > 0 ):
md.write("```\n")
writeSource(md, cell["source"])
md.write("\n```\n\n")
if( len(cell["outputs"]) > 0 ):
md.write("Output:\n\n")
processOutputs(md, cell["outputs"])
md.write("\n")
sys.stderr.flush()
print("Notebook '%s' exported as '%s'" % (infile, outfile))
if __name__ == "__main__":
if( len(sys.argv) == 2 ):
if( os.path.isdir(sys.argv[1]) ):
for infile in glob.glob(sys.argv[1]+"/*.ipynb"):
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
else:
infile = sys.argv[1]
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
elif( len(sys.argv) == 3 ):
infile = sys.argv[1]
outfile = sys.argv[2]
convertNotebook(infile, outfile)
else:
sys.stderr.write("USAGE: %s <infile.ipynb> [outfile.md]\n")
sys.stderr.write(" or: %s <directory>\n")
sys.exit(1)
| true | true |
f71c7536f0d8bae32792340fd5193c009dbbeef0 | 403 | py | Python | AIC21_Backend/asgi.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 3 | 2021-03-12T18:32:39.000Z | 2021-11-08T10:21:04.000Z | AIC21_Backend/asgi.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | null | null | null | AIC21_Backend/asgi.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 2 | 2021-01-29T14:52:53.000Z | 2022-03-05T10:24:24.000Z | """
ASGI config for AIC21_Backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AIC21_Backend.settings')
application = get_asgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AIC21_Backend.settings')
application = get_asgi_application()
| true | true |
f71c767d697d8a28a293c70fc345f6c9aac815fd | 296 | py | Python | TweetsToDB/main.py | lru-avershave/CapstoneProject | f74b4c73ffb0214a498b19f5f51481c529fa85a8 | [
"MIT"
] | 2 | 2020-01-15T06:38:34.000Z | 2020-01-22T20:42:19.000Z | TweetsToDB/main.py | lru-avershave/CapstoneProject | f74b4c73ffb0214a498b19f5f51481c529fa85a8 | [
"MIT"
] | null | null | null | TweetsToDB/main.py | lru-avershave/CapstoneProject | f74b4c73ffb0214a498b19f5f51481c529fa85a8 | [
"MIT"
] | 1 | 2020-01-15T20:11:48.000Z | 2020-01-15T20:11:48.000Z | import mongodb_setup as dbConnection
import TweetModel as TweetModel
# from watchdir import watch
from ImportText import collectTxt
class main():
try:
dbConnection
collectTxt()
# watch()
except KeyboardInterrupt:
print("Interrupted Main")
exit(0) | 21.142857 | 36 | 0.679054 | import mongodb_setup as dbConnection
import TweetModel as TweetModel
from ImportText import collectTxt
class main():
try:
dbConnection
collectTxt()
except KeyboardInterrupt:
print("Interrupted Main")
exit(0) | true | true |
f71c76b8aae27f9f54f39dc22abd7134629a2418 | 6,042 | py | Python | yateto/arch.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 2 | 2021-07-01T14:23:01.000Z | 2022-01-12T01:06:24.000Z | yateto/arch.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 14 | 2019-06-25T18:12:29.000Z | 2022-02-08T15:17:27.000Z | yateto/arch.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 3 | 2021-05-14T13:04:28.000Z | 2021-12-24T03:15:35.000Z | ##
# @file
# This file is part of SeisSol.
#
# @author Carsten Uphoff (c.uphoff AT tum.de, http://www5.in.tum.de/wiki/index.php/Carsten_Uphoff,_M.Sc.)
#
# @section LICENSE
# Copyright (c) 2015-2018, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
#
from .memory import DenseMemoryLayout
class Architecture(object):
def __init__(self,
name,
precision,
alignment,
enablePrefetch=False,
sub_name=None,
host_name=None):
"""
Args:
name (str): name of the compute (main) architecture.
sub_name (str): name of sub. architecture type e.g., a model of Nvidia streaming
multiprocessor (sm_60, sm_61, etc). In case of CPU, the field is equal to None
precision (str): either 'd' or 's' character which stands for 'double' or 'single' precision
alignment (int): length of a vector register (unit) in bytes
enablePrefetch (bool): indicates whether the compute (main) architecture supports
data prefetching
host_name (str): name of the host (CPU) architecture. If the code is intentend to be generated
to CPU-like architecture then the field should be equal to None
"""
self.name = name
self.sub_name = sub_name
self.host_name = host_name
self.precision = precision.upper()
if self.precision == 'D':
self.bytesPerReal = 8
self.typename = 'double'
self.epsilon = 2.22e-16
elif self.precision == 'S':
self.bytesPerReal = 4
self.typename = 'float'
self.epsilon = 1.19e-7
else:
raise ValueError(f'Unknown precision type {self.precision}')
self.alignment = alignment
assert self.alignment % self.bytesPerReal == 0
self.alignedReals = self.alignment // self.bytesPerReal
self.enablePrefetch = enablePrefetch
self.uintTypename = 'unsigned'
self.ulongTypename = 'unsigned long'
self._tmpStackLimit = 524288
def setTmpStackLimit(self, tmpStackLimit):
self._tmpStackLimit = tmpStackLimit
def alignedLower(self, index):
return index - index % self.alignedReals
def alignedUpper(self, index):
return index + (self.alignedReals - index % self.alignedReals) % self.alignedReals
def alignedShape(self, shape):
return (self.alignedUpper(shape[0]),) + shape[1:]
def checkAlignment(self, offset):
return offset % self.alignedReals == 0
def formatConstant(self, constant):
return str(constant) + ('f' if self.precision == 'S' else '')
def onHeap(self, numReals):
return (numReals * self.bytesPerReal) > self._tmpStackLimit
def _get_name_and_precision(ident):
return ident[1:], ident[0].upper()
def getArchitectureIdentifiedBy(ident):
name, precision = _get_name_and_precision(ident)
arch = {
'noarch': Architecture(name, precision, 16, False),
'wsm': Architecture(name, precision, 16, False),
'snb': Architecture(name, precision, 32, False),
'hsw': Architecture(name, precision, 32, False),
'skx': Architecture(name, precision, 64, True),
'knc': Architecture(name, precision, 64, False),
'knl': Architecture(name, precision, 64, True), # Libxsmm currently supports prefetch only for KNL kernels
'rome': Architecture(name, precision, 32, False),
'thunderx2t99': Architecture(name, precision, 16, False),
'power9': Architecture(name, precision, 16, False)
}
return arch[name]
def getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
compute_name, compute_precision = _get_name_and_precision(compute_ident)
host_name, host_precision = _get_name_and_precision(host_ident)
if (compute_precision != host_precision):
raise ValueError(f'Precision of host and compute arch. must be the same. '
f'Given: {host_ident}, {compute_ident}')
arch = {
'nvidia': Architecture(compute_name, compute_precision, 64, False, compute_sub_arch, host_name)
}
return arch[compute_name]
def useArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
if not (compute_sub_arch or host_ident):
arch = getArchitectureIdentifiedBy(compute_ident)
elif (compute_sub_arch and host_ident):
arch = getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch, host_ident)
else:
raise ValueError(f'given an incomplete set of input parameters: '
f'{compute_ident}, {compute_sub_arch}, {host_ident}')
DenseMemoryLayout.setAlignmentArch(arch)
return arch
| 38.484076 | 110 | 0.71665 |
from .memory import DenseMemoryLayout
class Architecture(object):
def __init__(self,
name,
precision,
alignment,
enablePrefetch=False,
sub_name=None,
host_name=None):
self.name = name
self.sub_name = sub_name
self.host_name = host_name
self.precision = precision.upper()
if self.precision == 'D':
self.bytesPerReal = 8
self.typename = 'double'
self.epsilon = 2.22e-16
elif self.precision == 'S':
self.bytesPerReal = 4
self.typename = 'float'
self.epsilon = 1.19e-7
else:
raise ValueError(f'Unknown precision type {self.precision}')
self.alignment = alignment
assert self.alignment % self.bytesPerReal == 0
self.alignedReals = self.alignment // self.bytesPerReal
self.enablePrefetch = enablePrefetch
self.uintTypename = 'unsigned'
self.ulongTypename = 'unsigned long'
self._tmpStackLimit = 524288
def setTmpStackLimit(self, tmpStackLimit):
self._tmpStackLimit = tmpStackLimit
def alignedLower(self, index):
return index - index % self.alignedReals
def alignedUpper(self, index):
return index + (self.alignedReals - index % self.alignedReals) % self.alignedReals
def alignedShape(self, shape):
return (self.alignedUpper(shape[0]),) + shape[1:]
def checkAlignment(self, offset):
return offset % self.alignedReals == 0
def formatConstant(self, constant):
return str(constant) + ('f' if self.precision == 'S' else '')
def onHeap(self, numReals):
return (numReals * self.bytesPerReal) > self._tmpStackLimit
def _get_name_and_precision(ident):
return ident[1:], ident[0].upper()
def getArchitectureIdentifiedBy(ident):
name, precision = _get_name_and_precision(ident)
arch = {
'noarch': Architecture(name, precision, 16, False),
'wsm': Architecture(name, precision, 16, False),
'snb': Architecture(name, precision, 32, False),
'hsw': Architecture(name, precision, 32, False),
'skx': Architecture(name, precision, 64, True),
'knc': Architecture(name, precision, 64, False),
'knl': Architecture(name, precision, 64, True),
'rome': Architecture(name, precision, 32, False),
'thunderx2t99': Architecture(name, precision, 16, False),
'power9': Architecture(name, precision, 16, False)
}
return arch[name]
def getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
compute_name, compute_precision = _get_name_and_precision(compute_ident)
host_name, host_precision = _get_name_and_precision(host_ident)
if (compute_precision != host_precision):
raise ValueError(f'Precision of host and compute arch. must be the same. '
f'Given: {host_ident}, {compute_ident}')
arch = {
'nvidia': Architecture(compute_name, compute_precision, 64, False, compute_sub_arch, host_name)
}
return arch[compute_name]
def useArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
if not (compute_sub_arch or host_ident):
arch = getArchitectureIdentifiedBy(compute_ident)
elif (compute_sub_arch and host_ident):
arch = getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch, host_ident)
else:
raise ValueError(f'given an incomplete set of input parameters: '
f'{compute_ident}, {compute_sub_arch}, {host_ident}')
DenseMemoryLayout.setAlignmentArch(arch)
return arch
| true | true |
f71c77a35b95b5244ed1a2f4cb8314b74edffc12 | 19,222 | py | Python | lib/spack/spack/test/install.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | lib/spack/spack/test/install.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | lib/spack/spack/test/install.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-07-19T20:31:27.000Z | 2021-07-19T21:14:14.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import pytest
import llnl.util.filesystem as fs
import spack.error
import spack.patch
import spack.repo
import spack.store
import spack.util.spack_json as sjson
from spack.package import (
InstallError,
PackageBase,
PackageStillNeededError,
_spack_build_envfile,
_spack_build_logfile,
_spack_configure_argsfile,
)
from spack.spec import Spec
def find_nothing(*args):
raise spack.repo.UnknownPackageError(
'Repo package access is disabled for test')
def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package')
spec.concretize()
assert spec.concrete
# Get the package
pkg = spec.package
try:
pkg.do_install()
spec._package = None
monkeypatch.setattr(spack.repo, 'get', find_nothing)
with pytest.raises(spack.repo.UnknownPackageError):
spec.package
pkg.do_uninstall()
except Exception:
pkg.remove_prefix()
raise
def mock_remove_prefix(*args):
raise MockInstallError(
"Intentional error",
"Mock remove_prefix method intentionally fails")
class RemovePrefixChecker(object):
def __init__(self, wrapped_rm_prefix):
self.removed = False
self.wrapped_rm_prefix = wrapped_rm_prefix
def remove_prefix(self):
self.removed = True
self.wrapped_rm_prefix()
class MockStage(object):
def __init__(self, wrapped_stage):
self.wrapped_stage = wrapped_stage
self.test_destroyed = False
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.destroy()
def destroy(self):
self.test_destroyed = True
self.wrapped_stage.destroy()
def create(self):
self.wrapped_stage.create()
def __getattr__(self, attr):
if attr == 'wrapped_stage':
# This attribute may not be defined at some point during unpickling
raise AttributeError()
return getattr(self.wrapped_stage, attr)
def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
instance_rm_prefix = pkg.remove_prefix
try:
pkg.succeed = False
pkg.remove_prefix = mock_remove_prefix
with pytest.raises(MockInstallError):
pkg.do_install()
assert os.path.isdir(pkg.prefix)
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
pkg.remove_prefix = rm_prefix_checker.remove_prefix
# must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True)
pkg.succeed = True
pkg.stage = MockStage(pkg.stage)
pkg.do_install(restage=True)
assert rm_prefix_checker.removed
assert pkg.stage.test_destroyed
assert pkg.installed
finally:
pkg.remove_prefix = instance_rm_prefix
def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec('dependent-install ^/' + dependency_hash)
dependent.concretize()
dependency.package.patches['dependency-install'] = [
spack.patch.UrlPatch(
dependent.package, 'file://fake.patch', sha256='unused-hash')]
assert dependent['dependency-install'] == dependency
def test_installed_dependency_request_conflicts(
install_mockery, mock_fetch, mutable_mock_repo):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec(
'conflicting-dependent ^/' + dependency_hash)
with pytest.raises(spack.error.UnsatisfiableSpecError):
dependent.concretize()
def test_install_dependency_symlinks_pkg(
install_mockery, mock_fetch, mutable_mock_repo):
"""Test dependency flattening/symlinks mock package."""
spec = Spec('flatten-deps')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Ensure dependency directory exists after the installation.
dependency_dir = os.path.join(pkg.prefix, 'dependency-install')
assert os.path.isdir(dependency_dir)
def test_install_times(
install_mockery, mock_fetch, mutable_mock_repo):
"""Test install times added."""
spec = Spec('dev-build-test-install-phases')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Ensure dependency directory exists after the installation.
install_times = os.path.join(pkg.prefix, ".spack", 'install_times.json')
assert os.path.isfile(install_times)
# Ensure the phases are included
with open(install_times, 'r') as timefile:
times = sjson.load(timefile.read())
# The order should be maintained
phases = [x['name'] for x in times['phases']]
total = sum([x['seconds'] for x in times['phases']])
for name in ['one', 'two', 'three', 'install']:
assert name in phases
# Give a generous difference threshold
assert abs(total - times['total']['seconds']) < 5
def test_flatten_deps(
install_mockery, mock_fetch, mutable_mock_repo):
"""Explicitly test the flattening code for coverage purposes."""
# Unfortunately, executing the 'flatten-deps' spec's installation does
# not affect code coverage results, so be explicit here.
spec = Spec('dependent-install')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Demonstrate that the directory does not appear under the spec
# prior to the flatten operation.
dependency_name = 'dependency-install'
assert dependency_name not in os.listdir(pkg.prefix)
# Flatten the dependencies and ensure the dependency directory is there.
spack.package.flatten_dependencies(spec, pkg.prefix)
dependency_dir = os.path.join(pkg.prefix, dependency_name)
assert os.path.isdir(dependency_dir)
@pytest.fixture()
def install_upstream(tmpdir_factory, gen_mock_layout, install_mockery):
"""Provides a function that installs a specified set of specs to an
upstream database. The function returns a store which points to the
upstream, as well as the upstream layout (for verifying that dependent
installs are using the upstream installs).
"""
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
prepared_db = spack.database.Database(mock_db_root)
upstream_layout = gen_mock_layout('/a/')
def _install_upstream(*specs):
for spec_str in specs:
s = spack.spec.Spec(spec_str).concretized()
prepared_db.add(s, upstream_layout)
downstream_root = str(tmpdir_factory.mktemp('mock_downstream_db_root'))
db_for_test = spack.database.Database(
downstream_root, upstream_dbs=[prepared_db]
)
store = spack.store.Store(downstream_root)
store.db = db_for_test
return store, upstream_layout
return _install_upstream
def test_installed_upstream_external(install_upstream, mock_fetch):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
s, _ = install_upstream('externaltool')
with spack.store.use_store(s):
dependent = spack.spec.Spec('externaltest')
dependent.concretize()
new_dependency = dependent['externaltool']
assert new_dependency.external
assert new_dependency.prefix == '/path/to/external_tool'
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
def test_installed_upstream(install_upstream, mock_fetch):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
s, upstream_layout = install_upstream('dependency-install')
with spack.store.use_store(s):
dependency = spack.spec.Spec('dependency-install').concretized()
dependent = spack.spec.Spec('dependent-install').concretized()
new_dependency = dependent['dependency-install']
assert new_dependency.package.installed_upstream
assert (new_dependency.prefix ==
upstream_layout.path_for_spec(dependency))
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
@pytest.mark.disable_clean_stage_check
def test_partial_install_keep_prefix(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
# Normally the stage should start unset, but other tests set it
pkg._stage = None
remove_prefix = spack.package.Package.remove_prefix
try:
# If remove_prefix is called at any point in this test, that is an
# error
pkg.succeed = False # make the build fail
spack.package.Package.remove_prefix = mock_remove_prefix
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install(keep_prefix=True)
assert os.path.exists(pkg.prefix)
# must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True)
pkg.succeed = True # make the build succeed
pkg.stage = MockStage(pkg.stage)
pkg.do_install(keep_prefix=True)
assert pkg.installed
assert not pkg.stage.test_destroyed
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_second_install_no_overwrite_first(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
remove_prefix = spack.package.Package.remove_prefix
try:
spack.package.Package.remove_prefix = mock_remove_prefix
pkg.succeed = True
pkg.do_install()
assert pkg.installed
# If Package.install is called after this point, it will fail
pkg.succeed = False
pkg.do_install()
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_install_prefix_collision_fails(config, mock_fetch, mock_packages, tmpdir):
"""
Test that different specs with coinciding install prefixes will fail
to install.
"""
projections = {'all': 'all-specs-project-to-this-prefix'}
store = spack.store.Store(str(tmpdir), projections=projections)
with spack.store.use_store(store):
with spack.config.override('config:checksum', False):
pkg_a = Spec('libelf@0.8.13').concretized().package
pkg_b = Spec('libelf@0.8.12').concretized().package
pkg_a.do_install()
with pytest.raises(InstallError, match="Install prefix collision"):
pkg_b.do_install()
def test_store(install_mockery, mock_fetch):
spec = Spec('cmake-client').concretized()
pkg = spec.package
pkg.do_install()
@pytest.mark.disable_clean_stage_check
def test_failing_build(install_mockery, mock_fetch, capfd):
spec = Spec('failing-build').concretized()
pkg = spec.package
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install()
assert 'InstallError: Expected Failure' in capfd.readouterr()[0]
class MockInstallError(spack.error.SpackError):
pass
def test_uninstall_by_spec_errors(mutable_database):
"""Test exceptional cases with the uninstall command."""
# Try to uninstall a spec that has not been installed
spec = Spec('dependent-install')
spec.concretize()
with pytest.raises(InstallError, match="is not installed"):
PackageBase.uninstall_by_spec(spec)
# Try an unforced uninstall of a spec with dependencies
rec = mutable_database.get_record('mpich')
with pytest.raises(PackageStillNeededError, match="Cannot uninstall"):
PackageBase.uninstall_by_spec(rec.spec)
@pytest.mark.disable_clean_stage_check
def test_nosource_pkg_install(
install_mockery, mock_fetch, mock_packages, capfd):
"""Test install phases with the nosource package."""
spec = Spec('nosource').concretized()
pkg = spec.package
# Make sure install works even though there is no associated code.
pkg.do_install()
out = capfd.readouterr()
assert "Installing dependency-install" in out[0]
assert "Missing a source id for nosource" in out[1]
def test_nosource_pkg_install_post_install(
install_mockery, mock_fetch, mock_packages):
"""Test install phases with the nosource package with post-install."""
spec = Spec('nosource-install').concretized()
pkg = spec.package
# Make sure both the install and post-install package methods work.
pkg.do_install()
# Ensure the file created in the package's `install` method exists.
install_txt = os.path.join(spec.prefix, 'install.txt')
assert os.path.isfile(install_txt)
# Ensure the file created in the package's `post-install` method exists.
post_install_txt = os.path.join(spec.prefix, 'post-install.txt')
assert os.path.isfile(post_install_txt)
def test_pkg_build_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = spec.package.log_path
assert log_path.endswith(_spack_build_logfile)
env_path = spec.package.env_path
assert env_path.endswith(_spack_build_envfile)
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous log filenames
older_log = 'spack-build.out'
fs.touch(older_log)
assert spec.package.log_path.endswith(older_log)
# Now check the newer log filename
last_log = 'spack-build.txt'
os.rename(older_log, last_log)
assert spec.package.log_path.endswith(last_log)
# Check the old environment file
last_env = 'spack-build.env'
os.rename(last_log, last_env)
assert spec.package.env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_pkg_install_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = os.path.join(spec.prefix, '.spack', _spack_build_logfile)
assert spec.package.install_log_path == log_path
env_path = os.path.join(spec.prefix, '.spack', _spack_build_envfile)
assert spec.package.install_env_path == env_path
args_path = os.path.join(spec.prefix, '.spack', _spack_configure_argsfile)
assert spec.package.install_configure_args_path == args_path
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous install log filenames
older_log = 'build.out'
fs.touch(older_log)
assert spec.package.install_log_path.endswith(older_log)
# Now check the newer install log filename
last_log = 'build.txt'
os.rename(older_log, last_log)
assert spec.package.install_log_path.endswith(last_log)
# Check the old install environment file
last_env = 'build.env'
os.rename(last_log, last_env)
assert spec.package.install_env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_log_install_without_build_files(install_mockery):
"""Test the installer log function when no build files are present."""
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
# Attempt installing log without the build log file
with pytest.raises(IOError, match="No such file or directory"):
spack.installer.log(spec.package)
def test_log_install_with_build_files(install_mockery, monkeypatch):
"""Test the installer's log function when have build files."""
config_log = 'config.log'
# Retain the original function for use in the monkey patch that is used
# to raise an exception under the desired condition for test coverage.
orig_install_fn = fs.install
def _install(src, dest):
orig_install_fn(src, dest)
if src.endswith(config_log):
raise Exception('Mock log install error')
monkeypatch.setattr(fs, 'install', _install)
spec = Spec('trivial-install-test-package').concretized()
# Set up mock build files and try again to include archive failure
log_path = spec.package.log_path
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
fs.touch(log_path)
fs.touch(spec.package.env_path)
fs.touch(spec.package.configure_args_path)
install_path = os.path.dirname(spec.package.install_log_path)
fs.mkdirp(install_path)
source = spec.package.stage.source_path
config = os.path.join(source, 'config.log')
fs.touchp(config)
spec.package.archive_files = ['missing', '..', config]
spack.installer.log(spec.package)
assert os.path.exists(spec.package.install_log_path)
assert os.path.exists(spec.package.install_env_path)
assert os.path.exists(spec.package.install_configure_args_path)
archive_dir = os.path.join(install_path, 'archived-files')
source_dir = os.path.dirname(source)
rel_config = os.path.relpath(config, source_dir)
assert os.path.exists(os.path.join(archive_dir, rel_config))
assert not os.path.exists(os.path.join(archive_dir, 'missing'))
expected_errs = [
'OUTSIDE SOURCE PATH', # for '..'
'FAILED TO ARCHIVE' # for rel_config
]
with open(os.path.join(archive_dir, 'errors.txt'), 'r') as fd:
for ln, expected in zip(fd, expected_errs):
assert expected in ln
# Cleanup
shutil.rmtree(log_dir)
def test_unconcretized_install(install_mockery, mock_fetch, mock_packages):
"""Test attempts to perform install phases with unconcretized spec."""
spec = Spec('trivial-install-test-package')
with pytest.raises(ValueError, match='must have a concrete spec'):
spec.package.do_install()
with pytest.raises(ValueError, match="only patch concrete packages"):
spec.package.do_patch()
def test_install_error():
try:
msg = 'test install error'
long_msg = 'this is the long version of test install error'
raise InstallError(msg, long_msg=long_msg)
except Exception as exc:
assert exc.__class__.__name__ == 'InstallError'
assert exc.message == msg
assert exc.long_message == long_msg
| 33.371528 | 83 | 0.70232 |
import os
import shutil
import pytest
import llnl.util.filesystem as fs
import spack.error
import spack.patch
import spack.repo
import spack.store
import spack.util.spack_json as sjson
from spack.package import (
InstallError,
PackageBase,
PackageStillNeededError,
_spack_build_envfile,
_spack_build_logfile,
_spack_configure_argsfile,
)
from spack.spec import Spec
def find_nothing(*args):
raise spack.repo.UnknownPackageError(
'Repo package access is disabled for test')
def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
spec = Spec('trivial-install-test-package')
spec.concretize()
assert spec.concrete
pkg = spec.package
try:
pkg.do_install()
spec._package = None
monkeypatch.setattr(spack.repo, 'get', find_nothing)
with pytest.raises(spack.repo.UnknownPackageError):
spec.package
pkg.do_uninstall()
except Exception:
pkg.remove_prefix()
raise
def mock_remove_prefix(*args):
raise MockInstallError(
"Intentional error",
"Mock remove_prefix method intentionally fails")
class RemovePrefixChecker(object):
def __init__(self, wrapped_rm_prefix):
self.removed = False
self.wrapped_rm_prefix = wrapped_rm_prefix
def remove_prefix(self):
self.removed = True
self.wrapped_rm_prefix()
class MockStage(object):
def __init__(self, wrapped_stage):
self.wrapped_stage = wrapped_stage
self.test_destroyed = False
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.destroy()
def destroy(self):
self.test_destroyed = True
self.wrapped_stage.destroy()
def create(self):
self.wrapped_stage.create()
def __getattr__(self, attr):
if attr == 'wrapped_stage':
raise AttributeError()
return getattr(self.wrapped_stage, attr)
def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
instance_rm_prefix = pkg.remove_prefix
try:
pkg.succeed = False
pkg.remove_prefix = mock_remove_prefix
with pytest.raises(MockInstallError):
pkg.do_install()
assert os.path.isdir(pkg.prefix)
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
pkg.remove_prefix = rm_prefix_checker.remove_prefix
spack.store.db.clear_failure(spec, True)
pkg.succeed = True
pkg.stage = MockStage(pkg.stage)
pkg.do_install(restage=True)
assert rm_prefix_checker.removed
assert pkg.stage.test_destroyed
assert pkg.installed
finally:
pkg.remove_prefix = instance_rm_prefix
def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec('dependent-install ^/' + dependency_hash)
dependent.concretize()
dependency.package.patches['dependency-install'] = [
spack.patch.UrlPatch(
dependent.package, 'file://fake.patch', sha256='unused-hash')]
assert dependent['dependency-install'] == dependency
def test_installed_dependency_request_conflicts(
install_mockery, mock_fetch, mutable_mock_repo):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec(
'conflicting-dependent ^/' + dependency_hash)
with pytest.raises(spack.error.UnsatisfiableSpecError):
dependent.concretize()
def test_install_dependency_symlinks_pkg(
install_mockery, mock_fetch, mutable_mock_repo):
spec = Spec('flatten-deps')
spec.concretize()
pkg = spec.package
pkg.do_install()
dependency_dir = os.path.join(pkg.prefix, 'dependency-install')
assert os.path.isdir(dependency_dir)
def test_install_times(
install_mockery, mock_fetch, mutable_mock_repo):
spec = Spec('dev-build-test-install-phases')
spec.concretize()
pkg = spec.package
pkg.do_install()
install_times = os.path.join(pkg.prefix, ".spack", 'install_times.json')
assert os.path.isfile(install_times)
with open(install_times, 'r') as timefile:
times = sjson.load(timefile.read())
phases = [x['name'] for x in times['phases']]
total = sum([x['seconds'] for x in times['phases']])
for name in ['one', 'two', 'three', 'install']:
assert name in phases
assert abs(total - times['total']['seconds']) < 5
def test_flatten_deps(
install_mockery, mock_fetch, mutable_mock_repo):
# not affect code coverage results, so be explicit here.
spec = Spec('dependent-install')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Demonstrate that the directory does not appear under the spec
# prior to the flatten operation.
dependency_name = 'dependency-install'
assert dependency_name not in os.listdir(pkg.prefix)
# Flatten the dependencies and ensure the dependency directory is there.
spack.package.flatten_dependencies(spec, pkg.prefix)
dependency_dir = os.path.join(pkg.prefix, dependency_name)
assert os.path.isdir(dependency_dir)
@pytest.fixture()
def install_upstream(tmpdir_factory, gen_mock_layout, install_mockery):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
prepared_db = spack.database.Database(mock_db_root)
upstream_layout = gen_mock_layout('/a/')
def _install_upstream(*specs):
for spec_str in specs:
s = spack.spec.Spec(spec_str).concretized()
prepared_db.add(s, upstream_layout)
downstream_root = str(tmpdir_factory.mktemp('mock_downstream_db_root'))
db_for_test = spack.database.Database(
downstream_root, upstream_dbs=[prepared_db]
)
store = spack.store.Store(downstream_root)
store.db = db_for_test
return store, upstream_layout
return _install_upstream
def test_installed_upstream_external(install_upstream, mock_fetch):
s, _ = install_upstream('externaltool')
with spack.store.use_store(s):
dependent = spack.spec.Spec('externaltest')
dependent.concretize()
new_dependency = dependent['externaltool']
assert new_dependency.external
assert new_dependency.prefix == '/path/to/external_tool'
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
def test_installed_upstream(install_upstream, mock_fetch):
s, upstream_layout = install_upstream('dependency-install')
with spack.store.use_store(s):
dependency = spack.spec.Spec('dependency-install').concretized()
dependent = spack.spec.Spec('dependent-install').concretized()
new_dependency = dependent['dependency-install']
assert new_dependency.package.installed_upstream
assert (new_dependency.prefix ==
upstream_layout.path_for_spec(dependency))
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
@pytest.mark.disable_clean_stage_check
def test_partial_install_keep_prefix(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
# Normally the stage should start unset, but other tests set it
pkg._stage = None
remove_prefix = spack.package.Package.remove_prefix
try:
# If remove_prefix is called at any point in this test, that is an
# error
pkg.succeed = False # make the build fail
spack.package.Package.remove_prefix = mock_remove_prefix
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install(keep_prefix=True)
assert os.path.exists(pkg.prefix)
# must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True)
pkg.succeed = True # make the build succeed
pkg.stage = MockStage(pkg.stage)
pkg.do_install(keep_prefix=True)
assert pkg.installed
assert not pkg.stage.test_destroyed
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_second_install_no_overwrite_first(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
remove_prefix = spack.package.Package.remove_prefix
try:
spack.package.Package.remove_prefix = mock_remove_prefix
pkg.succeed = True
pkg.do_install()
assert pkg.installed
# If Package.install is called after this point, it will fail
pkg.succeed = False
pkg.do_install()
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_install_prefix_collision_fails(config, mock_fetch, mock_packages, tmpdir):
projections = {'all': 'all-specs-project-to-this-prefix'}
store = spack.store.Store(str(tmpdir), projections=projections)
with spack.store.use_store(store):
with spack.config.override('config:checksum', False):
pkg_a = Spec('libelf@0.8.13').concretized().package
pkg_b = Spec('libelf@0.8.12').concretized().package
pkg_a.do_install()
with pytest.raises(InstallError, match="Install prefix collision"):
pkg_b.do_install()
def test_store(install_mockery, mock_fetch):
spec = Spec('cmake-client').concretized()
pkg = spec.package
pkg.do_install()
@pytest.mark.disable_clean_stage_check
def test_failing_build(install_mockery, mock_fetch, capfd):
spec = Spec('failing-build').concretized()
pkg = spec.package
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install()
assert 'InstallError: Expected Failure' in capfd.readouterr()[0]
class MockInstallError(spack.error.SpackError):
pass
def test_uninstall_by_spec_errors(mutable_database):
# Try to uninstall a spec that has not been installed
spec = Spec('dependent-install')
spec.concretize()
with pytest.raises(InstallError, match="is not installed"):
PackageBase.uninstall_by_spec(spec)
# Try an unforced uninstall of a spec with dependencies
rec = mutable_database.get_record('mpich')
with pytest.raises(PackageStillNeededError, match="Cannot uninstall"):
PackageBase.uninstall_by_spec(rec.spec)
@pytest.mark.disable_clean_stage_check
def test_nosource_pkg_install(
install_mockery, mock_fetch, mock_packages, capfd):
spec = Spec('nosource').concretized()
pkg = spec.package
# Make sure install works even though there is no associated code.
pkg.do_install()
out = capfd.readouterr()
assert "Installing dependency-install" in out[0]
assert "Missing a source id for nosource" in out[1]
def test_nosource_pkg_install_post_install(
install_mockery, mock_fetch, mock_packages):
spec = Spec('nosource-install').concretized()
pkg = spec.package
# Make sure both the install and post-install package methods work.
pkg.do_install()
# Ensure the file created in the package's `install` method exists.
install_txt = os.path.join(spec.prefix, 'install.txt')
assert os.path.isfile(install_txt)
post_install_txt = os.path.join(spec.prefix, 'post-install.txt')
assert os.path.isfile(post_install_txt)
def test_pkg_build_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = spec.package.log_path
assert log_path.endswith(_spack_build_logfile)
env_path = spec.package.env_path
assert env_path.endswith(_spack_build_envfile)
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous log filenames
older_log = 'spack-build.out'
fs.touch(older_log)
assert spec.package.log_path.endswith(older_log)
# Now check the newer log filename
last_log = 'spack-build.txt'
os.rename(older_log, last_log)
assert spec.package.log_path.endswith(last_log)
# Check the old environment file
last_env = 'spack-build.env'
os.rename(last_log, last_env)
assert spec.package.env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_pkg_install_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = os.path.join(spec.prefix, '.spack', _spack_build_logfile)
assert spec.package.install_log_path == log_path
env_path = os.path.join(spec.prefix, '.spack', _spack_build_envfile)
assert spec.package.install_env_path == env_path
args_path = os.path.join(spec.prefix, '.spack', _spack_configure_argsfile)
assert spec.package.install_configure_args_path == args_path
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous install log filenames
older_log = 'build.out'
fs.touch(older_log)
assert spec.package.install_log_path.endswith(older_log)
# Now check the newer install log filename
last_log = 'build.txt'
os.rename(older_log, last_log)
assert spec.package.install_log_path.endswith(last_log)
# Check the old install environment file
last_env = 'build.env'
os.rename(last_log, last_env)
assert spec.package.install_env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_log_install_without_build_files(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
# Attempt installing log without the build log file
with pytest.raises(IOError, match="No such file or directory"):
spack.installer.log(spec.package)
def test_log_install_with_build_files(install_mockery, monkeypatch):
config_log = 'config.log'
# Retain the original function for use in the monkey patch that is used
# to raise an exception under the desired condition for test coverage.
orig_install_fn = fs.install
def _install(src, dest):
orig_install_fn(src, dest)
if src.endswith(config_log):
raise Exception('Mock log install error')
monkeypatch.setattr(fs, 'install', _install)
spec = Spec('trivial-install-test-package').concretized()
# Set up mock build files and try again to include archive failure
log_path = spec.package.log_path
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
fs.touch(log_path)
fs.touch(spec.package.env_path)
fs.touch(spec.package.configure_args_path)
install_path = os.path.dirname(spec.package.install_log_path)
fs.mkdirp(install_path)
source = spec.package.stage.source_path
config = os.path.join(source, 'config.log')
fs.touchp(config)
spec.package.archive_files = ['missing', '..', config]
spack.installer.log(spec.package)
assert os.path.exists(spec.package.install_log_path)
assert os.path.exists(spec.package.install_env_path)
assert os.path.exists(spec.package.install_configure_args_path)
archive_dir = os.path.join(install_path, 'archived-files')
source_dir = os.path.dirname(source)
rel_config = os.path.relpath(config, source_dir)
assert os.path.exists(os.path.join(archive_dir, rel_config))
assert not os.path.exists(os.path.join(archive_dir, 'missing'))
expected_errs = [
'OUTSIDE SOURCE PATH', # for '..'
'FAILED TO ARCHIVE' # for rel_config
]
with open(os.path.join(archive_dir, 'errors.txt'), 'r') as fd:
for ln, expected in zip(fd, expected_errs):
assert expected in ln
# Cleanup
shutil.rmtree(log_dir)
def test_unconcretized_install(install_mockery, mock_fetch, mock_packages):
spec = Spec('trivial-install-test-package')
with pytest.raises(ValueError, match='must have a concrete spec'):
spec.package.do_install()
with pytest.raises(ValueError, match="only patch concrete packages"):
spec.package.do_patch()
def test_install_error():
try:
msg = 'test install error'
long_msg = 'this is the long version of test install error'
raise InstallError(msg, long_msg=long_msg)
except Exception as exc:
assert exc.__class__.__name__ == 'InstallError'
assert exc.message == msg
assert exc.long_message == long_msg
| true | true |
f71c77d1c0f627d4c0d8120689ae89c7e1a43d86 | 2,577 | py | Python | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-12-07T21:02:20.000Z | 2019-02-22T14:36:31.000Z | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 43 | 2018-11-30T11:31:43.000Z | 2019-04-03T16:09:06.000Z | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-11-29T00:31:29.000Z | 2019-02-22T18:50:28.000Z | import json
import os
import sys
import time
from agogosml.common.abstract_streaming_client import find_streaming_clients
from agogosml.tools.sender import send
from agogosml.tools.receiver import receive
eh_base_config = {
"EVENT_HUB_NAMESPACE": os.getenv("EVENT_HUB_NAMESPACE"),
"EVENT_HUB_NAME": os.getenv("EVENT_HUB_NAME_INPUT"),
"EVENT_HUB_SAS_POLICY": os.getenv("EVENT_HUB_SAS_POLICY"),
"EVENT_HUB_SAS_KEY": os.getenv("EVENT_HUB_SAS_KEY_INPUT"),
}
eh_send_config = {
**eh_base_config,
'LEASE_CONTAINER_NAME': os.getenv('LEASE_CONTAINER_NAME_INPUT')
}
eh_receive_config = {
**eh_base_config,
"AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"),
"AZURE_STORAGE_ACCESS_KEY": os.getenv("AZURE_STORAGE_ACCESS_KEY"),
"LEASE_CONTAINER_NAME": os.getenv("LEASE_CONTAINER_NAME_OUTPUT"),
"EVENT_HUB_CONSUMER_GROUP": os.getenv("EVENT_HUB_CONSUMER_GROUP"),
"TIMEOUT": 10,
}
kafka_base_config = {
'KAFKA_ADDRESS': os.getenv("KAFKA_ADDRESS"),
'TIMEOUT': os.getenv('KAFKA_TIMEOUT'),
# These configs are specific to Event Hub Head for Kafka
'EVENTHUB_KAFKA_CONNECTION_STRING': os.getenv('EVENTHUB_KAFKA_CONNECTION_STRING'),
'SSL_CERT_LOCATION': os.getenv('SSL_CERT_LOCATION') # /usr/local/etc/openssl/cert.pem
}
kafka_receive_config = {
**kafka_base_config,
'KAFKA_CONSUMER_GROUP': os.getenv('KAFKA_CONSUMER_GROUP'),
}
kafka_send_config = {
**kafka_base_config,
'KAFKA_TOPIC': os.getenv('KAFKA_TOPIC_INPUT')
}
def put_messages_on_input_queue(msg_type: str):
with open('test_messages.json', encoding='utf-8') as f:
test_messages = json.load(f)
send_client = find_streaming_clients()[msg_type]
send_config = {**eh_send_config, **kafka_send_config}
send(test_messages, send_client, send_config)
def receive_messages_on_queue(kafka_topic: str, msg_type: str):
receive_client = find_streaming_clients()[msg_type]
receive_config = {**eh_receive_config, **kafka_receive_config, **{'KAFKA_TOPIC': os.getenv(kafka_topic)}}
return receive(sys.stdout, receive_client, receive_config)
def cli():
msg_type = os.getenv("MESSAGING_TYPE")
put_messages_on_input_queue(msg_type)
time.sleep(3)
input_received = receive_messages_on_queue('KAFKA_TOPIC_INPUT', msg_type)
print(input_received)
time.sleep(20)
output_received = receive_messages_on_queue('KAFKA_TOPIC_OUTPUT', msg_type)
print(output_received)
if output_received == "[]":
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
cli()
| 28.955056 | 109 | 0.73962 | import json
import os
import sys
import time
from agogosml.common.abstract_streaming_client import find_streaming_clients
from agogosml.tools.sender import send
from agogosml.tools.receiver import receive
eh_base_config = {
"EVENT_HUB_NAMESPACE": os.getenv("EVENT_HUB_NAMESPACE"),
"EVENT_HUB_NAME": os.getenv("EVENT_HUB_NAME_INPUT"),
"EVENT_HUB_SAS_POLICY": os.getenv("EVENT_HUB_SAS_POLICY"),
"EVENT_HUB_SAS_KEY": os.getenv("EVENT_HUB_SAS_KEY_INPUT"),
}
eh_send_config = {
**eh_base_config,
'LEASE_CONTAINER_NAME': os.getenv('LEASE_CONTAINER_NAME_INPUT')
}
eh_receive_config = {
**eh_base_config,
"AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"),
"AZURE_STORAGE_ACCESS_KEY": os.getenv("AZURE_STORAGE_ACCESS_KEY"),
"LEASE_CONTAINER_NAME": os.getenv("LEASE_CONTAINER_NAME_OUTPUT"),
"EVENT_HUB_CONSUMER_GROUP": os.getenv("EVENT_HUB_CONSUMER_GROUP"),
"TIMEOUT": 10,
}
kafka_base_config = {
'KAFKA_ADDRESS': os.getenv("KAFKA_ADDRESS"),
'TIMEOUT': os.getenv('KAFKA_TIMEOUT'),
'EVENTHUB_KAFKA_CONNECTION_STRING': os.getenv('EVENTHUB_KAFKA_CONNECTION_STRING'),
'SSL_CERT_LOCATION': os.getenv('SSL_CERT_LOCATION')
}
kafka_receive_config = {
**kafka_base_config,
'KAFKA_CONSUMER_GROUP': os.getenv('KAFKA_CONSUMER_GROUP'),
}
kafka_send_config = {
**kafka_base_config,
'KAFKA_TOPIC': os.getenv('KAFKA_TOPIC_INPUT')
}
def put_messages_on_input_queue(msg_type: str):
with open('test_messages.json', encoding='utf-8') as f:
test_messages = json.load(f)
send_client = find_streaming_clients()[msg_type]
send_config = {**eh_send_config, **kafka_send_config}
send(test_messages, send_client, send_config)
def receive_messages_on_queue(kafka_topic: str, msg_type: str):
receive_client = find_streaming_clients()[msg_type]
receive_config = {**eh_receive_config, **kafka_receive_config, **{'KAFKA_TOPIC': os.getenv(kafka_topic)}}
return receive(sys.stdout, receive_client, receive_config)
def cli():
msg_type = os.getenv("MESSAGING_TYPE")
put_messages_on_input_queue(msg_type)
time.sleep(3)
input_received = receive_messages_on_queue('KAFKA_TOPIC_INPUT', msg_type)
print(input_received)
time.sleep(20)
output_received = receive_messages_on_queue('KAFKA_TOPIC_OUTPUT', msg_type)
print(output_received)
if output_received == "[]":
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
cli()
| true | true |
f71c78a611dd59c34a836099368a08f02076670b | 9,173 | py | Python | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Multivariate autoregressive model (vector autoregression).
Implements the following model (num_blocks = max(ar_order, ma_order + 1)):
y(t, 1) = \sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)
y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks
y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)
Where e(t) are Gaussian with zero mean and learned covariance.
Each element of ar_coefs and ma_coefs is a [num_features x num_features]
matrix. Each y(t, i) is a vector of length num_features. Indices in the above
equations are one-based. Initial conditions y(0, i) come from prior state (which
may either be learned or left as a constant with high prior covariance).
If ar_order > ma_order, the observation model is:
y(t, 1) + observation_noise(t)
If ma_order >= ar_order, it is (to observe the moving average component):
y(t, 1) + y(t, num_blocks) + observation_noise(t)
Where observation_noise(t) are Gaussian with zero mean and learned covariance.
This implementation uses a formulation which puts all of the autoregressive
coefficients in the transition equation for the observed component, which
enables learning using truncated backpropagation. Noise is not applied directly
to the observed component (with the exception of standard observation noise),
which further aids learning of the autoregressive coefficients when VARMA is in
an ensemble with other models (in which case having an observation noise term is
usually unavoidable).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class VARMA(state_space_model.StateSpaceModel):
"""A VARMA model implementation as a special case of the state space model."""
def __init__(self,
autoregressive_order,
moving_average_order,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Construct a VARMA model.
The size of the latent state for this model is:
num_features * max(autoregressive_order, moving_average_order + 1)
Square matrices of this size are constructed and multiplied.
Args:
autoregressive_order: The maximum autoregressive lag.
moving_average_order: The maximum moving average lag, after which
transient deviations are expected to return to their long-term mean.
configuration: A StateSpaceModelConfiguration object.
"""
self.ar_order = autoregressive_order
self.ma_order = moving_average_order
self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)
super(VARMA, self).__init__(configuration=configuration)
self.state_dimension = self.state_num_blocks * self.num_features
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
# TODO(allenl): Evaluate parameter transformations for AR/MA coefficients
# which improve interpretability/stability.
self.ar_coefs = variable_scope.get_variable(
name="ar_coefs",
shape=[self.num_features, self.num_features, self.ar_order],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.ma_coefs = variable_scope.get_variable(
name="ma_coefs",
initializer=array_ops.tile(
linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],
[self.ma_order, 1, 1]),
dtype=self.dtype)
super(VARMA, self)._define_parameters(
observation_transition_tradeoff_log=observation_transition_tradeoff_log)
def get_state_transition(self):
"""Construct state transition matrix from VARMA parameters.
Returns:
the state transition matrix. It has shape
[self.state_dimension, self.state_dimension].
"""
# Pad any unused AR blocks with zeros. The extra state is necessary if
# ma_order >= ar_order.
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)
def get_noise_transform(self):
"""Construct state noise transform matrix from VARMA parameters.
Returns:
the state noise transform matrix. It has shape
[self.state_dimension, self.num_features].
"""
# Noise is broadcast, through the moving average coefficients, to
# un-observed parts of the latent state.
ma_coefs_padded = array_ops.reshape(
array_ops.pad(self.ma_coefs,
[[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],
[0, 0]]),
[(self.state_num_blocks - 1) * self.num_features, self.num_features],
name="noise_transform")
# Deterministically apply noise to the oldest component.
return array_ops.concat(
[ma_coefs_padded,
linalg_ops.eye(self.num_features, dtype=self.dtype)],
axis=0)
def get_observation_model(self, times):
"""Construct observation model matrix from VARMA parameters.
Args:
times: A [batch size] vector indicating the times observation models are
requested for. Unused.
Returns:
the observation model matrix. It has shape
[self.num_features, self.state_dimension].
"""
del times # StateSpaceModel will broadcast along the batch dimension
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
# Add a second observed component which "catches" the accumulated moving
# average errors as they reach the end of the state. If ar_order >
# ma_order, this is unnecessary, since accumulated errors cycle naturally.
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
# Most state space models use only an explicit observation noise term to
# model deviations from expectations, and so a low initial transition noise
# parameter is helpful there. Since deviations from expectations are also
# modeled as transition noise in VARMA, we set its initial value based on a
# slight over-estimate empirical observation noise.
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
math_ops.maximum(
math_ops.reduce_mean(feature_variance), minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = tensor_shape.dimension_value(
state_noise_transform.shape[1])
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
| 45.636816 | 97 | 0.692903 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class VARMA(state_space_model.StateSpaceModel):
def __init__(self,
autoregressive_order,
moving_average_order,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.ar_order = autoregressive_order
self.ma_order = moving_average_order
self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)
super(VARMA, self).__init__(configuration=configuration)
self.state_dimension = self.state_num_blocks * self.num_features
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
self.ar_coefs = variable_scope.get_variable(
name="ar_coefs",
shape=[self.num_features, self.num_features, self.ar_order],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.ma_coefs = variable_scope.get_variable(
name="ma_coefs",
initializer=array_ops.tile(
linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],
[self.ma_order, 1, 1]),
dtype=self.dtype)
super(VARMA, self)._define_parameters(
observation_transition_tradeoff_log=observation_transition_tradeoff_log)
def get_state_transition(self):
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)
def get_noise_transform(self):
ma_coefs_padded = array_ops.reshape(
array_ops.pad(self.ma_coefs,
[[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],
[0, 0]]),
[(self.state_num_blocks - 1) * self.num_features, self.num_features],
name="noise_transform")
return array_ops.concat(
[ma_coefs_padded,
linalg_ops.eye(self.num_features, dtype=self.dtype)],
axis=0)
def get_observation_model(self, times):
del times
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
math_ops.maximum(
math_ops.reduce_mean(feature_variance), minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = tensor_shape.dimension_value(
state_noise_transform.shape[1])
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
| true | true |
f71c792738a6eb005cce3420d1463f363558dd6e | 898 | py | Python | Lms/migrations/versions/4b83761bf52a_users_table.py | stsl256/LMS_for_tinkoff | 5ace2a9d8f8e6c80660171502de6689f746535ed | [
"MIT"
] | null | null | null | Lms/migrations/versions/4b83761bf52a_users_table.py | stsl256/LMS_for_tinkoff | 5ace2a9d8f8e6c80660171502de6689f746535ed | [
"MIT"
] | null | null | null | Lms/migrations/versions/4b83761bf52a_users_table.py | stsl256/LMS_for_tinkoff | 5ace2a9d8f8e6c80660171502de6689f746535ed | [
"MIT"
] | 1 | 2020-12-09T00:41:26.000Z | 2020-12-09T00:41:26.000Z | """users table
Revision ID: 4b83761bf52a
Revises: 0d3bdf63aacc
Create Date: 2029-12-29 17:17:20.500426
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4b83761bf52a'
down_revision = '0d3bdf63aacc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('city', sa.String(length=64), nullable=True))
op.add_column('user', sa.Column('description', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('phone', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'phone')
op.drop_column('user', 'description')
op.drop_column('user', 'city')
# ### end Alembic commands ### | 29.933333 | 89 | 0.688196 | from alembic import op
import sqlalchemy as sa
revision = '4b83761bf52a'
down_revision = '0d3bdf63aacc'
branch_labels = None
depends_on = None
def upgrade():
add_column('user', sa.Column('phone', sa.String(length=64), nullable=True))
| true | true |
f71c7941417b4404871df8bb404ec9f2347ad2f0 | 1,254 | py | Python | var/spack/repos/builtin/packages/dpdk/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/dpdk/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/dpdk/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Dpdk(MakefilePackage):
"""DPDK is a set of libraries and drivers for fast packet processing.
It supports many processor architectures and both FreeBSD and Linux."""
homepage = "https://github.com/DPDK/dpdk"
url = "https://github.com/DPDK/dpdk/archive/v19.11.tar.gz"
version('20.02', sha256='29e56ea8e47e30110ecb881fa5a37125a865dd2d45b61f68e93e334caaab16b7')
version('19.11', sha256='ce1befb20a5e5c5399b326a39cfa23314a5229c0ced2553f53b09b1ae630706b')
version('19.08', sha256='1ceff1a6f4f8d5f6f62c1682097249227ac5225ccd9638e0af09f5411c681038')
version('19.05', sha256='5fea95cb726e6adaa506dab330e79563ccd4dacf03f126c826aabdced605d32b')
version('19.02', sha256='04885d32c86fff5aefcfffdb8257fed405233602dbcd22f8298be13c2e285a50')
conflicts('target=aarch64:', msg='DPDK is not supported on aarch64.')
depends_on('numactl')
def build(self, spec, prefix):
make('defconfig')
make()
def install(self, spec, prefix):
install_tree('.', prefix)
| 39.1875 | 95 | 0.748804 |
from spack.package import *
class Dpdk(MakefilePackage):
homepage = "https://github.com/DPDK/dpdk"
url = "https://github.com/DPDK/dpdk/archive/v19.11.tar.gz"
version('20.02', sha256='29e56ea8e47e30110ecb881fa5a37125a865dd2d45b61f68e93e334caaab16b7')
version('19.11', sha256='ce1befb20a5e5c5399b326a39cfa23314a5229c0ced2553f53b09b1ae630706b')
version('19.08', sha256='1ceff1a6f4f8d5f6f62c1682097249227ac5225ccd9638e0af09f5411c681038')
version('19.05', sha256='5fea95cb726e6adaa506dab330e79563ccd4dacf03f126c826aabdced605d32b')
version('19.02', sha256='04885d32c86fff5aefcfffdb8257fed405233602dbcd22f8298be13c2e285a50')
conflicts('target=aarch64:', msg='DPDK is not supported on aarch64.')
depends_on('numactl')
def build(self, spec, prefix):
make('defconfig')
make()
def install(self, spec, prefix):
install_tree('.', prefix)
| true | true |
f71c7bece95f106b2a9bb71db5ac6017fee41c58 | 1,757 | py | Python | spdx_lint/lint.py | sthagen/verbose-pancake | f12b38c8aea8aee8f7a593a4669dfe5e0a447ba5 | [
"MIT"
] | 1 | 2021-02-28T11:39:00.000Z | 2021-02-28T11:39:00.000Z | spdx_lint/lint.py | sthagen/verbose-pancake | f12b38c8aea8aee8f7a593a4669dfe5e0a447ba5 | [
"MIT"
] | 26 | 2021-02-28T12:07:04.000Z | 2021-02-28T13:04:27.000Z | spdx_lint/lint.py | sthagen/verbose-pancake | f12b38c8aea8aee8f7a593a4669dfe5e0a447ba5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=expression-not-assigned,line-too-long
SPDX_2_2_DCI_TV = {
"SPDXVersion": "SPDX-2.2",
"DataLicense": "CC0-1.0",
"SPDXID": "SPDXRef-DOCUMENT",
"DocumentName": "$_SINGLE_LINE",
"DocumentNamespace": "$_URI_MINUS_PART",
"[ExternalDocumentRef]": [
"DocumentRef-$_IDSTRING $_SPDX_DOCUMENT_URI $_PREFIX_COLON_CHECKSUM",
],
"[LicenseListVersion]": "$_MAJOR.$_MINOR",
"Creator": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
],
"Created": "%Y-%m-%dT%H:%M:%SZ",
"[CreatorComment]": "<text>$_MULTI_LINE_TEXT</text>",
"[DocumentComment]": "<text>$_MULTI_LINE_TEXT</text>",
}
SPDX_2_2_DCI_JSON = { # Reversed engineered from round trip conversion - TODO(sthagen) later use json schema
"SPDXID": "SPDXRef-DOCUMENT",
"spdxVersion": "SPDX-2.2",
"creationInfo": {
"created": "%Y-%m-%dT%H:%M:%SZ",
"creators": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
]
},
"name": "$_SINGLE_LINE",
"dataLicense": "CC0-1.0",
"documentNamespace": "$_URI_MINUS_PART",
}
def spdx_dci_is_valid(sbom):
"""Shallow key level validation for DCI part of SPDX documents."""
if not sbom:
return False
for key in SPDX_2_2_DCI_JSON.keys():
if key.startswith("["):
continue
try:
if not sbom.get(key):
return False
except AttributeError as e:
print(str(sbom), e) # TODO(sthagen) when I am a grown up, I want to really log
return True
| 31.375 | 109 | 0.592487 |
SPDX_2_2_DCI_TV = {
"SPDXVersion": "SPDX-2.2",
"DataLicense": "CC0-1.0",
"SPDXID": "SPDXRef-DOCUMENT",
"DocumentName": "$_SINGLE_LINE",
"DocumentNamespace": "$_URI_MINUS_PART",
"[ExternalDocumentRef]": [
"DocumentRef-$_IDSTRING $_SPDX_DOCUMENT_URI $_PREFIX_COLON_CHECKSUM",
],
"[LicenseListVersion]": "$_MAJOR.$_MINOR",
"Creator": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
],
"Created": "%Y-%m-%dT%H:%M:%SZ",
"[CreatorComment]": "<text>$_MULTI_LINE_TEXT</text>",
"[DocumentComment]": "<text>$_MULTI_LINE_TEXT</text>",
}
SPDX_2_2_DCI_JSON = {
"SPDXID": "SPDXRef-DOCUMENT",
"spdxVersion": "SPDX-2.2",
"creationInfo": {
"created": "%Y-%m-%dT%H:%M:%SZ",
"creators": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
]
},
"name": "$_SINGLE_LINE",
"dataLicense": "CC0-1.0",
"documentNamespace": "$_URI_MINUS_PART",
}
def spdx_dci_is_valid(sbom):
if not sbom:
return False
for key in SPDX_2_2_DCI_JSON.keys():
if key.startswith("["):
continue
try:
if not sbom.get(key):
return False
except AttributeError as e:
print(str(sbom), e)
return True
| true | true |
f71c7c09de030a029f096f3ac1471f0f9a979e3b | 6,549 | py | Python | packages/pytea/pytest/benchmarks/transformers/examples/question-answering/run_squad_trainer.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 1 | 2020-11-30T09:01:57.000Z | 2020-11-30T09:01:57.000Z | packages/pytea/pytest/benchmarks/transformers/examples/question-answering/run_squad_trainer.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | null | null | null | packages/pytea/pytest/benchmarks/transformers/examples/question-answering/run_squad_trainer.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for question-answering."""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import transformers
from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, HfArgumentParser, SquadDataset
from transformers import SquadDataTrainingArguments as DataTrainingArguments
from transformers import Trainer, TrainingArguments
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Prepare Question-Answering task
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
is_language_sensitive = hasattr(model.config, "lang2id")
train_dataset = (
SquadDataset(
data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir
)
if training_args.do_train
else None
)
eval_dataset = (
SquadDataset(
data_args,
tokenizer=tokenizer,
mode="dev",
is_language_sensitive=is_language_sensitive,
cache_dir=model_args.cache_dir,
)
if training_args.do_eval
else None
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 37.637931 | 133 | 0.703008 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import transformers
from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, HfArgumentParser, SquadDataset
from transformers import SquadDataTrainingArguments as DataTrainingArguments
from transformers import Trainer, TrainingArguments
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
is_language_sensitive = hasattr(model.config, "lang2id")
train_dataset = (
SquadDataset(
data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir
)
if training_args.do_train
else None
)
eval_dataset = (
SquadDataset(
data_args,
tokenizer=tokenizer,
mode="dev",
is_language_sensitive=is_language_sensitive,
cache_dir=model_args.cache_dir,
)
if training_args.do_eval
else None
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
def _mp_fn(index):
main()
if __name__ == "__main__":
main()
| true | true |
f71c7c547c5784ada99fdc35a9188f398ce31ecd | 123 | py | Python | adlmagics/adlmagics/__init__.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
] | 6 | 2018-06-06T08:37:53.000Z | 2020-06-01T13:13:13.000Z | adlmagics/adlmagics/__init__.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
] | 30 | 2018-06-08T02:47:18.000Z | 2018-07-25T07:07:07.000Z | adlmagics/adlmagics/__init__.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
] | 5 | 2018-06-06T08:37:55.000Z | 2021-01-07T09:15:15.000Z | from adlmagics.adlmagics_main import AdlMagics
def load_ipython_extension(ipython):
ipython.register_magics(AdlMagics) | 30.75 | 46 | 0.853659 | from adlmagics.adlmagics_main import AdlMagics
def load_ipython_extension(ipython):
ipython.register_magics(AdlMagics) | true | true |
f71c7ce5af65015396617f32ecb957271f0d1e12 | 1,295 | py | Python | soal2/acceptor.py | irff/uas | a68bdac5f13c61e21675b43a56e5407d11e3409c | [
"MIT"
] | null | null | null | soal2/acceptor.py | irff/uas | a68bdac5f13c61e21675b43a56e5407d11e3409c | [
"MIT"
] | null | null | null | soal2/acceptor.py | irff/uas | a68bdac5f13c61e21675b43a56e5407d11e3409c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from rabbitmq import RabbitMQ, Consumer, Publisher
import time
import json
import pika
import random
import threading
import sys
from datetime import datetime
from tinydb import TinyDB, Query
QUEUE_URL = '152.118.148.103'
QUEUE_PORT = '5672'
USERNAME = '1306398983'
PASSWORD = '446167'
VHOST = '1306398983'
EX_PAXOS = 'EX_PAXOS'
DIRECT = 'direct'
FANOUT = 'fanout'
class Acceptor(object):
def __init__(self, acceptor_id):
self.acceptor_id = acceptor_id
self.publisher = Publisher(
queue_url=QUEUE_URL,
queue_port=QUEUE_PORT,
username=USERNAME,
password=PASSWORD,
virtual_host=VHOST
)
self.consumer = Consumer(
queue_url=QUEUE_URL,
queue_port=QUEUE_PORT,
username=USERNAME,
password=PASSWORD,
virtual_host=VHOST
)
def callback(self, ch, method, properties, body):
print('Callback called! body={}'.format(body))
pass
# PARSING ACCEPTOR_ID DARI CLI PARAMETER & Jalankan Proposer
args = sys.argv
if len(args) > 1:
acceptor_id = args[1]
print 'ACCEPTOR_ID = {}'.format(acceptor_id)
proposer = Acceptor(acceptor_id)
else:
print('Usage: python proposer.py [ACCEPTOR_ID]')
| 23.545455 | 60 | 0.654826 |
from rabbitmq import RabbitMQ, Consumer, Publisher
import time
import json
import pika
import random
import threading
import sys
from datetime import datetime
from tinydb import TinyDB, Query
QUEUE_URL = '152.118.148.103'
QUEUE_PORT = '5672'
USERNAME = '1306398983'
PASSWORD = '446167'
VHOST = '1306398983'
EX_PAXOS = 'EX_PAXOS'
DIRECT = 'direct'
FANOUT = 'fanout'
class Acceptor(object):
def __init__(self, acceptor_id):
self.acceptor_id = acceptor_id
self.publisher = Publisher(
queue_url=QUEUE_URL,
queue_port=QUEUE_PORT,
username=USERNAME,
password=PASSWORD,
virtual_host=VHOST
)
self.consumer = Consumer(
queue_url=QUEUE_URL,
queue_port=QUEUE_PORT,
username=USERNAME,
password=PASSWORD,
virtual_host=VHOST
)
def callback(self, ch, method, properties, body):
print('Callback called! body={}'.format(body))
pass
args = sys.argv
if len(args) > 1:
acceptor_id = args[1]
print 'ACCEPTOR_ID = {}'.format(acceptor_id)
proposer = Acceptor(acceptor_id)
else:
print('Usage: python proposer.py [ACCEPTOR_ID]')
| false | true |
f71c7e0a03d097595b703379f84e0942a21fd206 | 4,909 | py | Python | kubernetes/client/models/v1beta1_self_subject_access_review_spec.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | 1 | 2019-07-12T05:38:06.000Z | 2019-07-12T05:38:06.000Z | kubernetes/client/models/v1beta1_self_subject_access_review_spec.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_self_subject_access_review_spec.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | 1 | 2021-05-18T12:25:56.000Z | 2021-05-18T12:25:56.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1SelfSubjectAccessReviewSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'non_resource_attributes': 'V1beta1NonResourceAttributes',
'resource_attributes': 'V1beta1ResourceAttributes'
}
attribute_map = {
'non_resource_attributes': 'nonResourceAttributes',
'resource_attributes': 'resourceAttributes'
}
def __init__(self, non_resource_attributes=None, resource_attributes=None):
"""
V1beta1SelfSubjectAccessReviewSpec - a model defined in Swagger
"""
self._non_resource_attributes = None
self._resource_attributes = None
self.discriminator = None
if non_resource_attributes is not None:
self.non_resource_attributes = non_resource_attributes
if resource_attributes is not None:
self.resource_attributes = resource_attributes
@property
def non_resource_attributes(self):
"""
Gets the non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
NonResourceAttributes describes information for a non-resource access request
:return: The non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:rtype: V1beta1NonResourceAttributes
"""
return self._non_resource_attributes
@non_resource_attributes.setter
def non_resource_attributes(self, non_resource_attributes):
"""
Sets the non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
NonResourceAttributes describes information for a non-resource access request
:param non_resource_attributes: The non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:type: V1beta1NonResourceAttributes
"""
self._non_resource_attributes = non_resource_attributes
@property
def resource_attributes(self):
"""
Gets the resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
ResourceAuthorizationAttributes describes information for a resource access request
:return: The resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:rtype: V1beta1ResourceAttributes
"""
return self._resource_attributes
@resource_attributes.setter
def resource_attributes(self, resource_attributes):
"""
Sets the resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
ResourceAuthorizationAttributes describes information for a resource access request
:param resource_attributes: The resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:type: V1beta1ResourceAttributes
"""
self._resource_attributes = resource_attributes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1SelfSubjectAccessReviewSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.670968 | 111 | 0.644938 |
from pprint import pformat
from six import iteritems
import re
class V1beta1SelfSubjectAccessReviewSpec(object):
swagger_types = {
'non_resource_attributes': 'V1beta1NonResourceAttributes',
'resource_attributes': 'V1beta1ResourceAttributes'
}
attribute_map = {
'non_resource_attributes': 'nonResourceAttributes',
'resource_attributes': 'resourceAttributes'
}
def __init__(self, non_resource_attributes=None, resource_attributes=None):
self._non_resource_attributes = None
self._resource_attributes = None
self.discriminator = None
if non_resource_attributes is not None:
self.non_resource_attributes = non_resource_attributes
if resource_attributes is not None:
self.resource_attributes = resource_attributes
@property
def non_resource_attributes(self):
return self._non_resource_attributes
@non_resource_attributes.setter
def non_resource_attributes(self, non_resource_attributes):
self._non_resource_attributes = non_resource_attributes
@property
def resource_attributes(self):
return self._resource_attributes
@resource_attributes.setter
def resource_attributes(self, resource_attributes):
self._resource_attributes = resource_attributes
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta1SelfSubjectAccessReviewSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71c7edc2ae9ca95fcb919548ce178feef3c1b16 | 2,805 | py | Python | st2common/tests/unit/test_triggers_registrar.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 2 | 2021-08-04T01:04:06.000Z | 2021-08-04T01:04:08.000Z | st2common/tests/unit/test_triggers_registrar.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 1 | 2022-03-31T03:53:22.000Z | 2022-03-31T03:53:22.000Z | st2common/tests/unit/test_triggers_registrar.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 1 | 2019-10-11T14:42:28.000Z | 2019-10-11T14:42:28.000Z | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import st2common.bootstrap.triggersregistrar as triggers_registrar
from st2common.persistence.trigger import Trigger
from st2common.persistence.trigger import TriggerType
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import get_fixtures_packs_base_path
__all__ = [
'TriggersRegistrarTestCase'
]
class TriggersRegistrarTestCase(CleanDbTestCase):
def test_register_all_triggers(self):
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
packs_base_path = get_fixtures_packs_base_path()
count = triggers_registrar.register_triggers(packs_base_paths=[packs_base_path])
self.assertEqual(count, 2)
# Verify TriggerTypeDB and corresponding TriggerDB objects have been created
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
def test_register_triggers_from_pack(self):
base_path = get_fixtures_packs_base_path()
pack_dir = os.path.join(base_path, 'dummy_pack_1')
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
count = triggers_registrar.register_triggers(pack_dir=pack_dir)
self.assertEqual(count, 2)
# Verify TriggerTypeDB and corresponding TriggerDB objects have been created
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
self.assertEqual(trigger_type_dbs[0].name, 'event_handler')
self.assertEqual(trigger_type_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].name, 'event_handler')
self.assertEqual(trigger_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].type, 'dummy_pack_1.event_handler')
self.assertEqual(trigger_type_dbs[1].name, 'head_sha_monitor')
self.assertEqual(trigger_type_dbs[1].pack, 'dummy_pack_1')
self.assertEqual(trigger_type_dbs[1].payload_schema['type'], 'object')
| 40.652174 | 88 | 0.745455 |
from __future__ import absolute_import
import os
import st2common.bootstrap.triggersregistrar as triggers_registrar
from st2common.persistence.trigger import Trigger
from st2common.persistence.trigger import TriggerType
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import get_fixtures_packs_base_path
__all__ = [
'TriggersRegistrarTestCase'
]
class TriggersRegistrarTestCase(CleanDbTestCase):
def test_register_all_triggers(self):
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
packs_base_path = get_fixtures_packs_base_path()
count = triggers_registrar.register_triggers(packs_base_paths=[packs_base_path])
self.assertEqual(count, 2)
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
def test_register_triggers_from_pack(self):
base_path = get_fixtures_packs_base_path()
pack_dir = os.path.join(base_path, 'dummy_pack_1')
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
count = triggers_registrar.register_triggers(pack_dir=pack_dir)
self.assertEqual(count, 2)
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
self.assertEqual(trigger_type_dbs[0].name, 'event_handler')
self.assertEqual(trigger_type_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].name, 'event_handler')
self.assertEqual(trigger_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].type, 'dummy_pack_1.event_handler')
self.assertEqual(trigger_type_dbs[1].name, 'head_sha_monitor')
self.assertEqual(trigger_type_dbs[1].pack, 'dummy_pack_1')
self.assertEqual(trigger_type_dbs[1].payload_schema['type'], 'object')
| true | true |
f71c8126e5ce154c4f9e4de6a8537b75a21c3612 | 1,486 | py | Python | examples/node_labels.py | venukarnati92/python-1 | 3fabf9ed9f4758fb5133975a58fc147471e91d9d | [
"Apache-2.0"
] | 4,417 | 2018-01-13T04:30:48.000Z | 2022-03-31T15:33:59.000Z | examples/node_labels.py | belajarqywok/python | b15bea16a87ad03136a4627941ac437582ea4657 | [
"Apache-2.0"
] | 1,414 | 2018-01-12T19:31:56.000Z | 2022-03-31T22:01:02.000Z | examples/node_labels.py | palnabarun/python | 6b01c95e1673c0787d3d688b361bfd995d62dd98 | [
"Apache-2.0"
] | 2,854 | 2018-01-14T08:57:33.000Z | 2022-03-31T01:41:56.000Z | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example demonstrates the following:
- Get a list of all the cluster nodes
- Iterate through each node list item
- Add or overwirite label "foo" with the value "bar"
- Remove the label "baz"
- Return the list of node with updated labels
"""
from kubernetes import client, config
def main():
config.load_kube_config()
api_instance = client.CoreV1Api()
body = {
"metadata": {
"labels": {
"foo": "bar",
"baz": None}
}
}
# Listing the cluster nodes
node_list = api_instance.list_node()
print("%s\t\t%s" % ("NAME", "LABELS"))
# Patching the node labels
for node in node_list.items:
api_response = api_instance.patch_node(node.metadata.name, body)
print("%s\t%s" % (node.metadata.name, node.metadata.labels))
if __name__ == '__main__':
main()
| 28.576923 | 74 | 0.662853 |
from kubernetes import client, config
def main():
config.load_kube_config()
api_instance = client.CoreV1Api()
body = {
"metadata": {
"labels": {
"foo": "bar",
"baz": None}
}
}
node_list = api_instance.list_node()
print("%s\t\t%s" % ("NAME", "LABELS"))
for node in node_list.items:
api_response = api_instance.patch_node(node.metadata.name, body)
print("%s\t%s" % (node.metadata.name, node.metadata.labels))
if __name__ == '__main__':
main()
| true | true |
f71c817e947e6fd2bca33380c21307542dc6f585 | 110,038 | py | Python | hermes/hermes_parser.py | scottfrazer/hermes | d82d916dd20da58c056b18dbb9b6c01a3700f3e1 | [
"MIT"
] | 14 | 2015-07-29T06:20:00.000Z | 2021-03-21T10:23:38.000Z | hermes/hermes_parser.py | scottfrazer/hermes | d82d916dd20da58c056b18dbb9b6c01a3700f3e1 | [
"MIT"
] | 32 | 2015-02-13T18:34:44.000Z | 2020-03-17T15:08:51.000Z | hermes/hermes_parser.py | scottfrazer/hermes | d82d916dd20da58c056b18dbb9b6c01a3700f3e1 | [
"MIT"
] | 8 | 2015-04-22T11:46:59.000Z | 2019-03-29T22:58:38.000Z |
import sys
import os
import re
import base64
import argparse
from collections import OrderedDict
# Common Code #
def parse_tree_string(parsetree, indent=None, b64_source=True, indent_level=0, debug=False):
indent_str = (' ' * indent * indent_level) if indent else ''
if isinstance(parsetree, ParseTree):
children = [parse_tree_string(child, indent, b64_source, indent_level+1, debug) for child in parsetree.children]
debug_str = parsetree.debug_str() if debug else ''
if indent is None or len(children) == 0:
return '{0}({1}: {2}{3})'.format(indent_str, parsetree.nonterminal, debug_str, ', '.join(children))
else:
return '{0}({1}:{2}\n{3}\n{4})'.format(
indent_str,
parsetree.nonterminal,
debug_str,
',\n'.join(children),
indent_str
)
elif isinstance(parsetree, Terminal):
return indent_str + parsetree.dumps(b64_source=b64_source)
def ast_string(ast, indent=None, b64_source=True, indent_level=0):
indent_str = (' ' * indent * indent_level) if indent else ''
next_indent_str = (' ' * indent * (indent_level+1)) if indent else ''
if isinstance(ast, Ast):
children = OrderedDict([(k, ast_string(v, indent, b64_source, indent_level+1)) for k, v in ast.attributes.items()])
if indent is None:
return '({0}: {1})'.format(
ast.name,
', '.join('{0}={1}'.format(k, v) for k, v in children.items())
)
else:
return '({0}:\n{1}\n{2})'.format(
ast.name,
',\n'.join(['{0}{1}={2}'.format(next_indent_str, k, v) for k, v in children.items()]),
indent_str
)
elif isinstance(ast, list):
children = [ast_string(element, indent, b64_source, indent_level+1) for element in ast]
if indent is None or len(children) == 0:
return '[{0}]'.format(', '.join(children))
else:
return '[\n{1}\n{0}]'.format(
indent_str,
',\n'.join(['{0}{1}'.format(next_indent_str, child) for child in children]),
)
elif isinstance(ast, Terminal):
return ast.dumps(b64_source=b64_source)
class Terminal:
def __init__(self, id, str, source_string, resource, line, col):
self.__dict__.update(locals())
def getId(self):
return self.id
def ast(self):
return self
def dumps(self, b64_source=True, **kwargs):
source_string = base64.b64encode(self.source_string.encode('utf-8')).decode('utf-8') if b64_source else self.source_string
return '<{resource}:{line}:{col} {terminal} "{source}">'.format(
resource=self.resource,
line=self.line,
col=self.col,
terminal=self.str,
source=source_string
)
def __str__(self):
return self.dumps()
class NonTerminal():
def __init__(self, id, str):
self.__dict__.update(locals())
self.list = False
def __str__(self):
return self.str
class AstTransform:
pass
class AstTransformSubstitution(AstTransform):
def __init__(self, idx):
self.__dict__.update(locals())
def __repr__(self):
return '$' + str(self.idx)
def __str__(self):
return self.__repr__()
class AstTransformNodeCreator(AstTransform):
def __init__( self, name, parameters ):
self.__dict__.update(locals())
def __repr__( self ):
return self.name + '( ' + ', '.join(['%s=$%s' % (k,str(v)) for k,v in self.parameters.items()]) + ' )'
def __str__(self):
return self.__repr__()
class AstList(list):
def ast(self):
retval = []
for ast in self:
retval.append(ast.ast())
return retval
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class ParseTree():
def __init__(self, nonterminal):
self.__dict__.update(locals())
self.children = []
self.astTransform = None
self.isExpr = False
self.isNud = False
self.isPrefix = False
self.isInfix = False
self.nudMorphemeCount = 0
self.isExprNud = False # true for rules like _expr := {_expr} + {...}
self.list_separator_id = None
self.list = False
def debug_str(self):
from copy import deepcopy
def h(v):
if v == False or v is None:
return str(v)
from xtermcolor import colorize
return colorize(str(v), ansi=190)
d = deepcopy(self.__dict__)
for key in ['self', 'nonterminal', 'children']:
del d[key]
f = {k: v for k, v in d.items() if v != False and v is not None}
return ' [{}]'.format(', '.join(['{}={}'.format(k,h(v)) for k,v in f.items()]))
def add(self, tree):
self.children.append( tree )
def ast(self):
if self.list == True:
r = AstList()
if len(self.children) == 0:
return r
for child in self.children:
if isinstance(child, Terminal) and self.list_separator_id is not None and child.id == self.list_separator_id:
continue
r.append(child.ast())
return r
elif self.isExpr:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
if idx == '$':
child = self.children[0]
elif isinstance(self.children[0], ParseTree) and \
self.children[0].isNud and \
not self.children[0].isPrefix and \
not self.isExprNud and \
not self.isInfix:
if idx < self.children[0].nudMorphemeCount:
child = self.children[0].children[idx]
else:
index = idx - self.children[0].nudMorphemeCount + 1
child = self.children[index]
elif len(self.children) == 1 and not isinstance(self.children[0], ParseTree) and not isinstance(self.children[0], list):
return self.children[0]
else:
child = self.children[idx]
parameters[name] = child.ast()
return Ast(self.astTransform.name, parameters)
else:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
parameters[name] = self.children[idx].ast()
return Ast(self.astTransform.name, parameters)
elif len(self.children):
return self.children[0].ast()
else:
return None
def dumps(self, indent=None, b64_source=True, debug=False):
args = locals()
del args['self']
return parse_tree_string(self, **args)
class Ast():
def __init__(self, name, attributes):
self.__dict__.update(locals())
def attr(self, attr):
return self.attributes[attr]
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class SyntaxError(Exception):
def __init__(self, message):
self.__dict__.update(locals())
def __str__(self):
return self.message
class TokenStream(list):
def __init__(self, arg=[]):
super(TokenStream, self).__init__(arg)
self.index = 0
def advance(self):
self.index += 1
return self.current()
def last(self):
return self[-1]
def current(self):
try:
return self[self.index]
except IndexError:
return None
class DefaultSyntaxErrorHandler:
def __init__(self):
self.errors = []
def _error(self, string):
error = SyntaxError(string)
self.errors.append(error)
return error
def unexpected_eof(self):
return self._error("Error: unexpected end of file")
def excess_tokens(self):
return self._error("Finished parsing without consuming all tokens.")
def unexpected_symbol(self, nonterminal, actual_terminal, expected_terminals, rule):
return self._error("Unexpected symbol (line {line}, col {col}) when parsing parse_{nt}. Expected {expected}, got {actual}.".format(
line=actual_terminal.line,
col=actual_terminal.col,
nt=nonterminal,
expected=', '.join(expected_terminals),
actual=actual_terminal
))
def no_more_tokens(self, nonterminal, expected_terminal, last_terminal):
return self._error("No more tokens. Expecting " + expected_terminal)
def invalid_terminal(self, nonterminal, invalid_terminal):
return self._error("Invalid symbol ID: {} ({})".format(invalid_terminal.id, invalid_terminal.string))
def unrecognized_token(self, string, line, col):
lines = string.split('\n')
bad_line = lines[line-1]
return self._error('Unrecognized token on line {}, column {}:\n\n{}\n{}'.format(
line, col, bad_line, ''.join([' ' for x in range(col-1)]) + '^'
))
def missing_list_items(self, method, required, found, last):
return self._error("List for {} requires {} items but only {} were found.".format(method, required, found))
def missing_terminator(self, method, terminator, last):
return self._error("List for "+method+" is missing a terminator")
class ParserContext:
def __init__(self, tokens, errors):
self.__dict__.update(locals())
self.nonterminal_string = None
self.rule_string = None
# Parser Code #
terminals = {
0: 'regex_enum',
1: 'dash',
2: 'lbrace',
3: 'arrow',
4: 'unary',
5: 'rsquare',
6: 'infix_rule_hint',
7: 'equals',
8: 'stack_push',
9: 'code_start',
10: 'langle',
11: 'no_group',
12: 'expr_rule_hint',
13: 'partials',
14: 'regex',
15: 'rbrace',
16: 'code',
17: 'identifier',
18: 'regex_partial',
19: 'rangle',
20: 'language',
21: 'integer',
22: 'left',
23: 'rparen',
24: 'right',
25: 'mixfix_rule_hint',
26: 'colon',
27: 'expression_divider',
28: 'prefix_rule_hint',
29: 'asterisk',
30: 'll1_rule_hint',
31: 'string',
32: 'lexer',
33: 'grammar',
34: 'terminal',
35: 'lsquare',
36: 'parser',
37: 'lparen',
38: 'comma',
39: 'action',
40: 'pipe',
41: 'parser_expression',
42: 'nonterminal',
43: 'mode',
44: 'nonterminal_reference',
45: 'null',
'regex_enum': 0,
'dash': 1,
'lbrace': 2,
'arrow': 3,
'unary': 4,
'rsquare': 5,
'infix_rule_hint': 6,
'equals': 7,
'stack_push': 8,
'code_start': 9,
'langle': 10,
'no_group': 11,
'expr_rule_hint': 12,
'partials': 13,
'regex': 14,
'rbrace': 15,
'code': 16,
'identifier': 17,
'regex_partial': 18,
'rangle': 19,
'language': 20,
'integer': 21,
'left': 22,
'rparen': 23,
'right': 24,
'mixfix_rule_hint': 25,
'colon': 26,
'expression_divider': 27,
'prefix_rule_hint': 28,
'asterisk': 29,
'll1_rule_hint': 30,
'string': 31,
'lexer': 32,
'grammar': 33,
'terminal': 34,
'lsquare': 35,
'parser': 36,
'lparen': 37,
'comma': 38,
'action': 39,
'pipe': 40,
'parser_expression': 41,
'nonterminal': 42,
'mode': 43,
'nonterminal_reference': 44,
'null': 45,
}
# table[nonterminal][terminal] = rule
table = [
[-1, -1, 16, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 72, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 70, -1, 71, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 73, -1, -1, -1, -1, -1, -1, -1, 74, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 86, -1, -1, -1, -1, -1, -1, -1, -1, -1, 85, -1, -1, 84, -1, -1, -1, -1, -1, -1, -1, 83, -1, -1, 87],
[-1, -1, -1, 49, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1],
[-1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 59, -1, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1],
[7, -1, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1],
[-1, -1, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, -1, -1, -1, 64, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 79, -1],
[21, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1, -1, 4, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 23, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 69, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, 63, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 61, -1, -1, 62, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, -1, -1, 51, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 80, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, 2, -1, -1, -1, -1, 2, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 38, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 24, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, -1, -1, 47, -1, 53, -1, -1, -1, -1, 53, 47, -1, -1, 52],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 82, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[35, -1, -1, -1, -1, -1, -1, -1, 35, 35, -1, 34, -1, 35, 35, 35, -1, 35, -1, -1, -1, -1, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 35, 34, -1, -1, -1, 35, -1, -1, -1, 35, -1, 35],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 41, -1, -1, -1, -1, 42, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, 32, -1, -1, -1, -1, -1, -1, -1, -1, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, -1, -1, -1, 33, -1, -1, -1, -1, -1, 39],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 76, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 57, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 56, -1, -1, -1, -1, -1, -1, -1, -1],
]
nonterminal_first = {
46: [2, -1],
47: [24, 4, 22],
48: [34, -1],
49: [36],
50: [34, 42, 17],
51: [34, 42, 21, 31, 45],
52: [3, -1],
53: [27, -1],
54: [0, 13, 43, 14, 9],
55: [43],
56: [34, 42, -1, 17],
57: [44, 17],
58: [0],
59: [32, 36, 41],
60: [37, -1],
61: [0, 13, 14, -1, 43, 9],
62: [36, -1, 41, 32],
63: [14, -1],
64: [37],
65: [34],
66: [12, 37],
67: [29, 1],
68: [30],
69: [37, 12, -1],
70: [9],
71: [28, 25, 6],
72: [34, -1, 3, 42, 17],
73: [17],
74: [32, 36, 41],
75: [-1, 17],
76: [35, 11],
77: [3, 34, -1, 42, 17],
78: [37],
79: [17],
80: [3, 34, 36, -1, 41, 42, 17, 45],
81: [34, -1, 39, 8, 17, 45],
82: [34, -1, 42, 17],
83: [-1, 17],
84: [2],
85: [14],
86: [27],
87: [29, 1],
88: [41],
89: [32],
90: [17],
91: [14, 0],
92: [35, 11, -1],
93: [-1, 17],
94: [36, 41],
95: [34, 39, 8, 17, 45],
96: [30, -1],
97: [33],
98: [13],
99: [3],
100: [34, -1, 31, 42, 21, 45],
101: [37, -1],
}
nonterminal_follow = {
46: [3],
47: [23],
48: [23],
49: [30, 32, 15, 36, 41],
50: [12, 30, 3, 34, 15, 37, 40, 42, 17],
51: [23, 38],
52: [27, 12, 30, 15, 37, 40],
53: [15, 12, 37, 3],
54: [0, 13, 14, 15, 43, 9],
55: [0, 13, 14, 15, 43, 9],
56: [15, 12, 37, 3],
57: [27, 12, 30, 15, 37, 40],
58: [0, 13, 14, 15, 43, 9],
59: [15, 36, 41, 32],
60: [15, 17],
61: [15],
62: [15],
63: [15],
64: [15, 17],
65: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
66: [15, 12, 37],
67: [26],
68: [15, 30],
69: [15],
70: [0, 13, 14, 15, 43, 9],
71: [15, 12, 37],
72: [15, 30, 40],
73: [23, 38],
74: [15, 36, 41, 32],
75: [15],
76: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
77: [15, 30],
78: [12],
79: [15, 17],
80: [15, 30],
81: [0, 13, 14, 15, 43, 9],
82: [12, 30, 3, 15, 37, 40],
83: [15, 23],
84: [3],
85: [15, 14],
86: [15, 12, 37, 3],
87: [23],
88: [30, 32, 15, 36, 41],
89: [15, 36, 41, 32],
90: [3, 12, 30, 34, 15, 37, 40, 42, 17],
91: [0, 13, 14, 15, 43, 9],
92: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
93: [23],
94: [30, 32, 15, 36, 41],
95: [0, 13, 14, 34, 15, 39, 43, 8, 9, 17, 45],
96: [15],
97: [-1],
98: [0, 13, 14, 15, 43, 9],
99: [27, 12, 30, 15, 37, 40],
100: [23],
101: [12],
}
rule_first = {
0: [32, 36, -1, 41],
1: [33],
2: [32, 36, 41],
3: [32],
4: [36, 41],
5: [0, 13, 14, -1, 43, 9],
6: [32],
7: [14, 0],
8: [43],
9: [13],
10: [9],
11: [9],
12: [14, -1],
13: [13],
14: [14],
15: [0],
16: [2],
17: [-1],
18: [34, -1, 39, 8, 17, 45],
19: [14],
20: [-1, 17],
21: [0],
22: [37],
23: [-1],
24: [17],
25: [-1, 17],
26: [37],
27: [2],
28: [34],
29: [34],
30: [-1],
31: [17],
32: [8],
33: [39],
34: [35, 11],
35: [-1],
36: [34],
37: [35],
38: [11],
39: [45],
40: [43],
41: [36],
42: [41],
43: [30, -1],
44: [36],
45: [30],
46: [34, 3, -1, 17, 42],
47: [3, 42, 34, 17, -1],
48: [34, 42, -1, 17],
49: [3],
50: [-1],
51: [34, 42, -1, 3, 17],
52: [45],
53: [36, 41],
54: [12, 37, -1],
55: [41],
56: [37],
57: [-1],
58: [12, 37],
59: [27],
60: [-1],
61: [25],
62: [28],
63: [6],
64: [34, 42, -1, 17],
65: [27],
66: [37],
67: [29, 1],
68: [29],
69: [1],
70: [22],
71: [24],
72: [4],
73: [34],
74: [42],
75: [17],
76: [3],
77: [-1, 17],
78: [17],
79: [44],
80: [17],
81: [31, 21, 34, -1, 42, 45],
82: [17],
83: [42],
84: [34],
85: [31],
86: [21],
87: [45],
}
nonterminal_rules = {
46: [
"$_gen3 = $regex_options",
"$_gen3 = :_empty",
],
47: [
"$associativity = :left",
"$associativity = :right",
"$associativity = :unary",
],
48: [
"$_gen8 = $terminal",
"$_gen8 = :_empty",
],
49: [
"$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
],
50: [
"$morpheme = :terminal",
"$morpheme = :nonterminal",
"$morpheme = $macro",
],
51: [
"$macro_parameter = :nonterminal",
"$macro_parameter = :terminal",
"$macro_parameter = :string",
"$macro_parameter = :integer",
"$macro_parameter = :null",
],
52: [
"$_gen13 = $ast_transform",
"$_gen13 = :_empty",
],
53: [
"$_gen16 = $led",
"$_gen16 = :_empty",
],
54: [
"$lexer_atom = $lexer_regex",
"$lexer_atom = $lexer_mode",
"$lexer_atom = $lexer_partials",
"$lexer_atom = $lexer_code",
],
55: [
"$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
],
56: [
"$nud = $_gen12",
],
57: [
"$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
"$ast_transform_sub = :nonterminal_reference",
],
58: [
"$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
],
59: [
"$body_element_sub = $lexer",
"$body_element_sub = $parser",
],
60: [
"$_gen6 = $regex_enumeration_options",
"$_gen6 = :_empty",
],
61: [
"$_gen1 = list($lexer_atom)",
],
62: [
"$_gen0 = list($body_element)",
],
63: [
"$_gen2 = list($regex_partial)",
],
64: [
"$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
],
65: [
"$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
],
66: [
"$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
],
67: [
"$binding_power_marker = :asterisk",
"$binding_power_marker = :dash",
],
68: [
"$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
],
69: [
"$_gen14 = list($expression_rule)",
],
70: [
"$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
],
71: [
"$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
"$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
"$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
],
72: [
"$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
],
73: [
"$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
],
74: [
"$body_element = $body_element_sub",
],
75: [
"$_gen5 = list($regex_enumeration)",
],
76: [
"$match_group = :lsquare :integer :rsquare -> $1",
"$match_group = :no_group",
],
77: [
"$_gen11 = list($rule,:pipe)",
],
78: [
"$binding_power = :lparen $precedence :rparen -> $1",
],
79: [
"$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
],
80: [
"$ll1_rule_rhs = $_gen11",
"$ll1_rule_rhs = :null -> NullProduction( )",
"$ll1_rule_rhs = $parser",
],
81: [
"$_gen4 = list($lexer_target)",
],
82: [
"$_gen12 = list($morpheme)",
],
83: [
"$_gen7 = list(:identifier,:comma)",
],
84: [
"$regex_options = :lbrace $_gen7 :rbrace -> $1",
],
85: [
"$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
],
86: [
"$led = :expression_divider $_gen12 -> $1",
],
87: [
"$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
],
88: [
"$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
],
89: [
"$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
],
90: [
"$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
],
91: [
"$lexer_regex = $enumerated_regex",
"$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
],
92: [
"$_gen9 = $match_group",
"$_gen9 = :_empty",
],
93: [
"$_gen17 = list($ast_parameter,:comma)",
],
94: [
"$parser = $parser_ll1",
"$parser = $parser_expression",
],
95: [
"$lexer_target = $terminal",
"$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
"$lexer_target = :stack_push",
"$lexer_target = :action",
"$lexer_target = :null -> Null( )",
],
96: [
"$_gen10 = list($ll1_rule)",
],
97: [
"$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
],
98: [
"$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
],
99: [
"$ast_transform = :arrow $ast_transform_sub -> $1",
],
100: [
"$_gen18 = list($macro_parameter,:comma)",
],
101: [
"$_gen15 = $binding_power",
"$_gen15 = :_empty",
],
}
rules = {
0: "$_gen0 = list($body_element)",
1: "$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
2: "$body_element = $body_element_sub",
3: "$body_element_sub = $lexer",
4: "$body_element_sub = $parser",
5: "$_gen1 = list($lexer_atom)",
6: "$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
7: "$lexer_atom = $lexer_regex",
8: "$lexer_atom = $lexer_mode",
9: "$lexer_atom = $lexer_partials",
10: "$lexer_atom = $lexer_code",
11: "$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
12: "$_gen2 = list($regex_partial)",
13: "$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
14: "$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
15: "$lexer_regex = $enumerated_regex",
16: "$_gen3 = $regex_options",
17: "$_gen3 = :_empty",
18: "$_gen4 = list($lexer_target)",
19: "$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
20: "$_gen5 = list($regex_enumeration)",
21: "$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
22: "$_gen6 = $regex_enumeration_options",
23: "$_gen6 = :_empty",
24: "$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
25: "$_gen7 = list(:identifier,:comma)",
26: "$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
27: "$regex_options = :lbrace $_gen7 :rbrace -> $1",
28: "$lexer_target = $terminal",
29: "$_gen8 = $terminal",
30: "$_gen8 = :_empty",
31: "$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
32: "$lexer_target = :stack_push",
33: "$lexer_target = :action",
34: "$_gen9 = $match_group",
35: "$_gen9 = :_empty",
36: "$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
37: "$match_group = :lsquare :integer :rsquare -> $1",
38: "$match_group = :no_group",
39: "$lexer_target = :null -> Null( )",
40: "$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
41: "$parser = $parser_ll1",
42: "$parser = $parser_expression",
43: "$_gen10 = list($ll1_rule)",
44: "$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
45: "$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
46: "$_gen11 = list($rule,:pipe)",
47: "$ll1_rule_rhs = $_gen11",
48: "$_gen12 = list($morpheme)",
49: "$_gen13 = $ast_transform",
50: "$_gen13 = :_empty",
51: "$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
52: "$ll1_rule_rhs = :null -> NullProduction( )",
53: "$ll1_rule_rhs = $parser",
54: "$_gen14 = list($expression_rule)",
55: "$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
56: "$_gen15 = $binding_power",
57: "$_gen15 = :_empty",
58: "$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
59: "$_gen16 = $led",
60: "$_gen16 = :_empty",
61: "$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
62: "$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
63: "$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
64: "$nud = $_gen12",
65: "$led = :expression_divider $_gen12 -> $1",
66: "$binding_power = :lparen $precedence :rparen -> $1",
67: "$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
68: "$binding_power_marker = :asterisk",
69: "$binding_power_marker = :dash",
70: "$associativity = :left",
71: "$associativity = :right",
72: "$associativity = :unary",
73: "$morpheme = :terminal",
74: "$morpheme = :nonterminal",
75: "$morpheme = $macro",
76: "$ast_transform = :arrow $ast_transform_sub -> $1",
77: "$_gen17 = list($ast_parameter,:comma)",
78: "$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
79: "$ast_transform_sub = :nonterminal_reference",
80: "$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
81: "$_gen18 = list($macro_parameter,:comma)",
82: "$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
83: "$macro_parameter = :nonterminal",
84: "$macro_parameter = :terminal",
85: "$macro_parameter = :string",
86: "$macro_parameter = :integer",
87: "$macro_parameter = :null",
}
def is_terminal(id): return isinstance(id, int) and 0 <= id <= 45
def parse(tokens, errors=None, start=None):
if errors is None:
errors = DefaultSyntaxErrorHandler()
if isinstance(tokens, str):
tokens = lex(tokens, 'string', errors)
ctx = ParserContext(tokens, errors)
tree = parse_grammar(ctx)
if tokens.current() != None:
raise ctx.errors.excess_tokens()
return tree
def expect(ctx, terminal_id):
current = ctx.tokens.current()
if not current:
raise ctx.errors.no_more_tokens(ctx.nonterminal, terminals[terminal_id], ctx.tokens.last())
if current.id != terminal_id:
raise ctx.errors.unexpected_symbol(ctx.nonterminal, current, [terminals[terminal_id]], ctx.rule)
next = ctx.tokens.advance()
if next and not is_terminal(next.id):
raise ctx.errors.invalid_terminal(ctx.nonterminal, next)
return current
def parse__gen18(ctx):
tree = ParseTree(NonTerminal(100, '_gen18'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen18"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[100]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(100)):
tree.add(parse_macro_parameter(ctx))
ctx.nonterminal = "_gen18" # Horrible -- because parse_* can reset this
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen5(ctx):
tree = ParseTree(NonTerminal(75, '_gen5'))
tree.list = True;
ctx.nonterminal = "_gen5"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[75]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(75)):
tree.add(parse_regex_enumeration(ctx))
ctx.nonterminal = "_gen5" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen11(ctx):
tree = ParseTree(NonTerminal(77, '_gen11'))
tree.list = True;
tree.list_separator_id = 40
ctx.nonterminal = "_gen11"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[77]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(77)):
tree.add(parse_rule(ctx))
ctx.nonterminal = "_gen11" # Horrible -- because parse_* can reset this
if ctx.tokens.current() is not None and ctx.tokens.current().id == 40:
tree.add(expect(ctx, 40));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen17(ctx):
tree = ParseTree(NonTerminal(93, '_gen17'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen17"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[93]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(93)):
tree.add(parse_ast_parameter(ctx))
ctx.nonterminal = "_gen17" # Horrible -- because parse_* can reset this
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen1(ctx):
tree = ParseTree(NonTerminal(61, '_gen1'))
tree.list = True;
ctx.nonterminal = "_gen1"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[61]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(61)):
tree.add(parse_lexer_atom(ctx))
ctx.nonterminal = "_gen1" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen10(ctx):
tree = ParseTree(NonTerminal(96, '_gen10'))
tree.list = True;
ctx.nonterminal = "_gen10"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[96]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(96)):
tree.add(parse_ll1_rule(ctx))
ctx.nonterminal = "_gen10" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen0(ctx):
tree = ParseTree(NonTerminal(62, '_gen0'))
tree.list = True;
ctx.nonterminal = "_gen0"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[62]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(62)):
tree.add(parse_body_element(ctx))
ctx.nonterminal = "_gen0" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen4(ctx):
tree = ParseTree(NonTerminal(81, '_gen4'))
tree.list = True;
ctx.nonterminal = "_gen4"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[81]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(81)):
tree.add(parse_lexer_target(ctx))
ctx.nonterminal = "_gen4" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen2(ctx):
tree = ParseTree(NonTerminal(63, '_gen2'))
tree.list = True;
ctx.nonterminal = "_gen2"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[63]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(63)):
tree.add(parse_regex_partial(ctx))
ctx.nonterminal = "_gen2" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen12(ctx):
tree = ParseTree(NonTerminal(82, '_gen12'))
tree.list = True;
ctx.nonterminal = "_gen12"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[82]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(82)):
tree.add(parse_morpheme(ctx))
ctx.nonterminal = "_gen12" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen7(ctx):
tree = ParseTree(NonTerminal(83, '_gen7'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen7"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[83]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(83)):
tree.add(expect(ctx, 17))
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen14(ctx):
tree = ParseTree(NonTerminal(69, '_gen14'))
tree.list = True;
ctx.nonterminal = "_gen14"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[69]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(69)):
tree.add(parse_expression_rule(ctx))
ctx.nonterminal = "_gen14" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen3(ctx):
current = ctx.tokens.current()
rule = table[0][current.id] if current else -1
tree = ParseTree(NonTerminal(46, '_gen3'))
ctx.nonterminal = "_gen3"
if current != None and current.id in nonterminal_follow[46] and current.id not in nonterminal_first[46]:
return tree
if current == None:
return tree
if rule == 16: # $_gen3 = $regex_options
ctx.rule = rules[16]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_associativity(ctx):
current = ctx.tokens.current()
rule = table[1][current.id] if current else -1
tree = ParseTree(NonTerminal(47, 'associativity'))
ctx.nonterminal = "associativity"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 70: # $associativity = :left
ctx.rule = rules[70]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 22) # :left
tree.add(t)
return tree
elif rule == 71: # $associativity = :right
ctx.rule = rules[71]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 24) # :right
tree.add(t)
return tree
elif rule == 72: # $associativity = :unary
ctx.rule = rules[72]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 4) # :unary
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[47] if x >=0],
rules[72]
)
def parse__gen8(ctx):
current = ctx.tokens.current()
rule = table[2][current.id] if current else -1
tree = ParseTree(NonTerminal(48, '_gen8'))
ctx.nonterminal = "_gen8"
if current != None and current.id in nonterminal_follow[48] and current.id not in nonterminal_first[48]:
return tree
if current == None:
return tree
if rule == 29: # $_gen8 = $terminal
ctx.rule = rules[29]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser_ll1(ctx):
current = ctx.tokens.current()
rule = table[3][current.id] if current else -1
tree = ParseTree(NonTerminal(49, 'parser_ll1'))
ctx.nonterminal = "parser_ll1"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 44: # $parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )
ctx.rule = rules[44]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('Parser', ast_parameters)
t = expect(ctx, 36) # :parser
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen10(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[49] if x >=0],
rules[44]
)
def parse_morpheme(ctx):
current = ctx.tokens.current()
rule = table[4][current.id] if current else -1
tree = ParseTree(NonTerminal(50, 'morpheme'))
ctx.nonterminal = "morpheme"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 73: # $morpheme = :terminal
ctx.rule = rules[73]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34) # :terminal
tree.add(t)
return tree
elif rule == 74: # $morpheme = :nonterminal
ctx.rule = rules[74]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
return tree
elif rule == 75: # $morpheme = $macro
ctx.rule = rules[75]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_macro(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[50] if x >=0],
rules[75]
)
def parse_macro_parameter(ctx):
current = ctx.tokens.current()
rule = table[5][current.id] if current else -1
tree = ParseTree(NonTerminal(51, 'macro_parameter'))
ctx.nonterminal = "macro_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 83: # $macro_parameter = :nonterminal
ctx.rule = rules[83]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
return tree
elif rule == 84: # $macro_parameter = :terminal
ctx.rule = rules[84]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34) # :terminal
tree.add(t)
return tree
elif rule == 85: # $macro_parameter = :string
ctx.rule = rules[85]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 31) # :string
tree.add(t)
return tree
elif rule == 86: # $macro_parameter = :integer
ctx.rule = rules[86]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 21) # :integer
tree.add(t)
return tree
elif rule == 87: # $macro_parameter = :null
ctx.rule = rules[87]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 45) # :null
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[51] if x >=0],
rules[87]
)
def parse__gen13(ctx):
current = ctx.tokens.current()
rule = table[6][current.id] if current else -1
tree = ParseTree(NonTerminal(52, '_gen13'))
ctx.nonterminal = "_gen13"
if current != None and current.id in nonterminal_follow[52] and current.id not in nonterminal_first[52]:
return tree
if current == None:
return tree
if rule == 49: # $_gen13 = $ast_transform
ctx.rule = rules[49]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_ast_transform(ctx)
tree.add(subtree)
return tree
return tree
def parse__gen16(ctx):
current = ctx.tokens.current()
rule = table[7][current.id] if current else -1
tree = ParseTree(NonTerminal(53, '_gen16'))
ctx.nonterminal = "_gen16"
if current != None and current.id in nonterminal_follow[53] and current.id not in nonterminal_first[53]:
return tree
if current == None:
return tree
if rule == 59: # $_gen16 = $led
ctx.rule = rules[59]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_led(ctx)
tree.add(subtree)
return tree
return tree
def parse_lexer_atom(ctx):
current = ctx.tokens.current()
rule = table[8][current.id] if current else -1
tree = ParseTree(NonTerminal(54, 'lexer_atom'))
ctx.nonterminal = "lexer_atom"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 7: # $lexer_atom = $lexer_regex
ctx.rule = rules[7]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_regex(ctx)
tree.add(subtree)
return tree
elif rule == 8: # $lexer_atom = $lexer_mode
ctx.rule = rules[8]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_mode(ctx)
tree.add(subtree)
return tree
elif rule == 9: # $lexer_atom = $lexer_partials
ctx.rule = rules[9]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_partials(ctx)
tree.add(subtree)
return tree
elif rule == 10: # $lexer_atom = $lexer_code
ctx.rule = rules[10]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_code(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[54] if x >=0],
rules[10]
)
def parse_lexer_mode(ctx):
current = ctx.tokens.current()
rule = table[9][current.id] if current else -1
tree = ParseTree(NonTerminal(55, 'lexer_mode'))
ctx.nonterminal = "lexer_mode"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 40: # $lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )
ctx.rule = rules[40]
ast_parameters = OrderedDict([
('name', 2),
('atoms', 5),
])
tree.astTransform = AstTransformNodeCreator('Mode', ast_parameters)
t = expect(ctx, 43) # :mode
tree.add(t)
t = expect(ctx, 10) # :langle
tree.add(t)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 19) # :rangle
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[55] if x >=0],
rules[40]
)
def parse_nud(ctx):
current = ctx.tokens.current()
rule = table[10][current.id] if current else -1
tree = ParseTree(NonTerminal(56, 'nud'))
ctx.nonterminal = "nud"
if current != None and current.id in nonterminal_follow[56] and current.id not in nonterminal_first[56]:
return tree
if current == None:
return tree
if rule == 64: # $nud = $_gen12
ctx.rule = rules[64]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_transform_sub(ctx):
current = ctx.tokens.current()
rule = table[11][current.id] if current else -1
tree = ParseTree(NonTerminal(57, 'ast_transform_sub'))
ctx.nonterminal = "ast_transform_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 78: # $ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )
ctx.rule = rules[78]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('AstTransformation', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen17(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
elif rule == 79: # $ast_transform_sub = :nonterminal_reference
ctx.rule = rules[79]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 44) # :nonterminal_reference
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[57] if x >=0],
rules[79]
)
def parse_enumerated_regex(ctx):
current = ctx.tokens.current()
rule = table[12][current.id] if current else -1
tree = ParseTree(NonTerminal(58, 'enumerated_regex'))
ctx.nonterminal = "enumerated_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 21: # $enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )
ctx.rule = rules[21]
ast_parameters = OrderedDict([
('enums', 2),
('onmatch', 5),
])
tree.astTransform = AstTransformNodeCreator('EnumeratedRegex', ast_parameters)
t = expect(ctx, 0) # :regex_enum
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen5(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
t = expect(ctx, 3) # :arrow
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[58] if x >=0],
rules[21]
)
def parse_body_element_sub(ctx):
current = ctx.tokens.current()
rule = table[13][current.id] if current else -1
tree = ParseTree(NonTerminal(59, 'body_element_sub'))
ctx.nonterminal = "body_element_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 3: # $body_element_sub = $lexer
ctx.rule = rules[3]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer(ctx)
tree.add(subtree)
return tree
elif rule == 4: # $body_element_sub = $parser
ctx.rule = rules[4]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[59] if x >=0],
rules[4]
)
def parse__gen6(ctx):
current = ctx.tokens.current()
rule = table[14][current.id] if current else -1
tree = ParseTree(NonTerminal(60, '_gen6'))
ctx.nonterminal = "_gen6"
if current != None and current.id in nonterminal_follow[60] and current.id not in nonterminal_first[60]:
return tree
if current == None:
return tree
if rule == 22: # $_gen6 = $regex_enumeration_options
ctx.rule = rules[22]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_enumeration_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_enumeration_options(ctx):
current = ctx.tokens.current()
rule = table[18][current.id] if current else -1
tree = ParseTree(NonTerminal(64, 'regex_enumeration_options'))
ctx.nonterminal = "regex_enumeration_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 26: # $regex_enumeration_options = :lparen $_gen7 :rparen -> $1
ctx.rule = rules[26]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[64] if x >=0],
rules[26]
)
def parse_terminal(ctx):
current = ctx.tokens.current()
rule = table[19][current.id] if current else -1
tree = ParseTree(NonTerminal(65, 'terminal'))
ctx.nonterminal = "terminal"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 36: # $terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )
ctx.rule = rules[36]
ast_parameters = OrderedDict([
('name', 0),
('group', 1),
])
tree.astTransform = AstTransformNodeCreator('Terminal', ast_parameters)
t = expect(ctx, 34) # :terminal
tree.add(t)
subtree = parse__gen9(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[65] if x >=0],
rules[36]
)
def parse_expression_rule(ctx):
current = ctx.tokens.current()
rule = table[20][current.id] if current else -1
tree = ParseTree(NonTerminal(66, 'expression_rule'))
ctx.nonterminal = "expression_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 58: # $expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )
ctx.rule = rules[58]
ast_parameters = OrderedDict([
('precedence', 0),
('nonterminal', 2),
('production', 4),
])
tree.astTransform = AstTransformNodeCreator('ExpressionRule', ast_parameters)
subtree = parse__gen15(ctx)
tree.add(subtree)
t = expect(ctx, 12) # :expr_rule_hint
tree.add(t)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
t = expect(ctx, 7) # :equals
tree.add(t)
subtree = parse_expression_rule_production(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[66] if x >=0],
rules[58]
)
def parse_binding_power_marker(ctx):
current = ctx.tokens.current()
rule = table[21][current.id] if current else -1
tree = ParseTree(NonTerminal(67, 'binding_power_marker'))
ctx.nonterminal = "binding_power_marker"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 68: # $binding_power_marker = :asterisk
ctx.rule = rules[68]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 29) # :asterisk
tree.add(t)
return tree
elif rule == 69: # $binding_power_marker = :dash
ctx.rule = rules[69]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 1) # :dash
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[67] if x >=0],
rules[69]
)
def parse_ll1_rule(ctx):
current = ctx.tokens.current()
rule = table[22][current.id] if current else -1
tree = ParseTree(NonTerminal(68, 'll1_rule'))
ctx.nonterminal = "ll1_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 45: # $ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )
ctx.rule = rules[45]
ast_parameters = OrderedDict([
('nonterminal', 1),
('production', 3),
])
tree.astTransform = AstTransformNodeCreator('Rule', ast_parameters)
t = expect(ctx, 30) # :ll1_rule_hint
tree.add(t)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
t = expect(ctx, 7) # :equals
tree.add(t)
subtree = parse_ll1_rule_rhs(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[68] if x >=0],
rules[45]
)
def parse_lexer_code(ctx):
current = ctx.tokens.current()
rule = table[24][current.id] if current else -1
tree = ParseTree(NonTerminal(70, 'lexer_code'))
ctx.nonterminal = "lexer_code"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 11: # $lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )
ctx.rule = rules[11]
ast_parameters = OrderedDict([
('language', 1),
('code', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerCode', ast_parameters)
t = expect(ctx, 9) # :code_start
tree.add(t)
t = expect(ctx, 20) # :language
tree.add(t)
t = expect(ctx, 16) # :code
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[70] if x >=0],
rules[11]
)
def parse_expression_rule_production(ctx):
current = ctx.tokens.current()
rule = table[25][current.id] if current else -1
tree = ParseTree(NonTerminal(71, 'expression_rule_production'))
ctx.nonterminal = "expression_rule_production"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 61: # $expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )
ctx.rule = rules[61]
ast_parameters = OrderedDict([
('nud', 1),
('nud_ast', 2),
('led', 3),
('ast', 4),
])
tree.astTransform = AstTransformNodeCreator('MixfixProduction', ast_parameters)
t = expect(ctx, 25) # :mixfix_rule_hint
tree.add(t)
subtree = parse_nud(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
subtree = parse__gen16(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 62: # $expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )
ctx.rule = rules[62]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('PrefixProduction', ast_parameters)
t = expect(ctx, 28) # :prefix_rule_hint
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 63: # $expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )
ctx.rule = rules[63]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('InfixProduction', ast_parameters)
t = expect(ctx, 6) # :infix_rule_hint
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[71] if x >=0],
rules[63]
)
def parse_rule(ctx):
current = ctx.tokens.current()
rule = table[26][current.id] if current else -1
tree = ParseTree(NonTerminal(72, 'rule'))
ctx.nonterminal = "rule"
if current != None and current.id in nonterminal_follow[72] and current.id not in nonterminal_first[72]:
return tree
if current == None:
return tree
if rule == 51: # $rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )
ctx.rule = rules[51]
ast_parameters = OrderedDict([
('morphemes', 0),
('ast', 1),
])
tree.astTransform = AstTransformNodeCreator('Production', ast_parameters)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_parameter(ctx):
current = ctx.tokens.current()
rule = table[27][current.id] if current else -1
tree = ParseTree(NonTerminal(73, 'ast_parameter'))
ctx.nonterminal = "ast_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 80: # $ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )
ctx.rule = rules[80]
ast_parameters = OrderedDict([
('name', 0),
('index', 2),
])
tree.astTransform = AstTransformNodeCreator('AstParameter', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 7) # :equals
tree.add(t)
t = expect(ctx, 44) # :nonterminal_reference
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[73] if x >=0],
rules[80]
)
def parse_body_element(ctx):
current = ctx.tokens.current()
rule = table[28][current.id] if current else -1
tree = ParseTree(NonTerminal(74, 'body_element'))
ctx.nonterminal = "body_element"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 2: # $body_element = $body_element_sub
ctx.rule = rules[2]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_body_element_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[74] if x >=0],
rules[2]
)
def parse_match_group(ctx):
current = ctx.tokens.current()
rule = table[30][current.id] if current else -1
tree = ParseTree(NonTerminal(76, 'match_group'))
ctx.nonterminal = "match_group"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 37: # $match_group = :lsquare :integer :rsquare -> $1
ctx.rule = rules[37]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 35) # :lsquare
tree.add(t)
t = expect(ctx, 21) # :integer
tree.add(t)
t = expect(ctx, 5) # :rsquare
tree.add(t)
return tree
elif rule == 38: # $match_group = :no_group
ctx.rule = rules[38]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 11) # :no_group
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[76] if x >=0],
rules[38]
)
def parse_binding_power(ctx):
current = ctx.tokens.current()
rule = table[32][current.id] if current else -1
tree = ParseTree(NonTerminal(78, 'binding_power'))
ctx.nonterminal = "binding_power"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 66: # $binding_power = :lparen $precedence :rparen -> $1
ctx.rule = rules[66]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse_precedence(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[78] if x >=0],
rules[66]
)
def parse_regex_enumeration(ctx):
current = ctx.tokens.current()
rule = table[33][current.id] if current else -1
tree = ParseTree(NonTerminal(79, 'regex_enumeration'))
ctx.nonterminal = "regex_enumeration"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 24: # $regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )
ctx.rule = rules[24]
ast_parameters = OrderedDict([
('language', 0),
('regex', 2),
('options', 3),
])
tree.astTransform = AstTransformNodeCreator('RegexEnum', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 26) # :colon
tree.add(t)
t = expect(ctx, 14) # :regex
tree.add(t)
subtree = parse__gen6(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[79] if x >=0],
rules[24]
)
def parse_ll1_rule_rhs(ctx):
current = ctx.tokens.current()
rule = table[34][current.id] if current else -1
tree = ParseTree(NonTerminal(80, 'll1_rule_rhs'))
ctx.nonterminal = "ll1_rule_rhs"
if current != None and current.id in nonterminal_follow[80] and current.id not in nonterminal_first[80]:
return tree
if current == None:
return tree
if rule == 47: # $ll1_rule_rhs = $_gen11
ctx.rule = rules[47]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen11(ctx)
tree.add(subtree)
return tree
elif rule == 52: # $ll1_rule_rhs = :null -> NullProduction( )
ctx.rule = rules[52]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('NullProduction', ast_parameters)
t = expect(ctx, 45) # :null
tree.add(t)
return tree
elif rule == 53: # $ll1_rule_rhs = $parser
ctx.rule = rules[53]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_options(ctx):
current = ctx.tokens.current()
rule = table[38][current.id] if current else -1
tree = ParseTree(NonTerminal(84, 'regex_options'))
ctx.nonterminal = "regex_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 27: # $regex_options = :lbrace $_gen7 :rbrace -> $1
ctx.rule = rules[27]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[84] if x >=0],
rules[27]
)
def parse_regex_partial(ctx):
current = ctx.tokens.current()
rule = table[39][current.id] if current else -1
tree = ParseTree(NonTerminal(85, 'regex_partial'))
ctx.nonterminal = "regex_partial"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 14: # $regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )
ctx.rule = rules[14]
ast_parameters = OrderedDict([
('regex', 0),
('name', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartial', ast_parameters)
t = expect(ctx, 14) # :regex
tree.add(t)
t = expect(ctx, 3) # :arrow
tree.add(t)
t = expect(ctx, 18) # :regex_partial
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[85] if x >=0],
rules[14]
)
def parse_led(ctx):
current = ctx.tokens.current()
rule = table[40][current.id] if current else -1
tree = ParseTree(NonTerminal(86, 'led'))
ctx.nonterminal = "led"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 65: # $led = :expression_divider $_gen12 -> $1
ctx.rule = rules[65]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 27) # :expression_divider
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[86] if x >=0],
rules[65]
)
def parse_precedence(ctx):
current = ctx.tokens.current()
rule = table[41][current.id] if current else -1
tree = ParseTree(NonTerminal(87, 'precedence'))
ctx.nonterminal = "precedence"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 67: # $precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )
ctx.rule = rules[67]
ast_parameters = OrderedDict([
('marker', 0),
('associativity', 2),
])
tree.astTransform = AstTransformNodeCreator('Precedence', ast_parameters)
subtree = parse_binding_power_marker(ctx)
tree.add(subtree)
t = expect(ctx, 26) # :colon
tree.add(t)
subtree = parse_associativity(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[87] if x >=0],
rules[67]
)
def parse_parser_expression(ctx):
current = ctx.tokens.current()
rule = table[42][current.id] if current else -1
tree = ParseTree(NonTerminal(88, 'parser_expression'))
ctx.nonterminal = "parser_expression"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 55: # $parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )
ctx.rule = rules[55]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('ExpressionParser', ast_parameters)
t = expect(ctx, 41) # :parser_expression
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen14(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[88] if x >=0],
rules[55]
)
def parse_lexer(ctx):
current = ctx.tokens.current()
rule = table[43][current.id] if current else -1
tree = ParseTree(NonTerminal(89, 'lexer'))
ctx.nonterminal = "lexer"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 6: # $lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )
ctx.rule = rules[6]
ast_parameters = OrderedDict([
('atoms', 2),
])
tree.astTransform = AstTransformNodeCreator('Lexer', ast_parameters)
t = expect(ctx, 32) # :lexer
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[89] if x >=0],
rules[6]
)
def parse_macro(ctx):
current = ctx.tokens.current()
rule = table[44][current.id] if current else -1
tree = ParseTree(NonTerminal(90, 'macro'))
ctx.nonterminal = "macro"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 82: # $macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )
ctx.rule = rules[82]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('Macro', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen18(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[90] if x >=0],
rules[82]
)
def parse_lexer_regex(ctx):
current = ctx.tokens.current()
rule = table[45][current.id] if current else -1
tree = ParseTree(NonTerminal(91, 'lexer_regex'))
ctx.nonterminal = "lexer_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 15: # $lexer_regex = $enumerated_regex
ctx.rule = rules[15]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_enumerated_regex(ctx)
tree.add(subtree)
return tree
elif rule == 19: # $lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )
ctx.rule = rules[19]
ast_parameters = OrderedDict([
('regex', 0),
('options', 1),
('onmatch', 3),
])
tree.astTransform = AstTransformNodeCreator('Regex', ast_parameters)
t = expect(ctx, 14) # :regex
tree.add(t)
subtree = parse__gen3(ctx)
tree.add(subtree)
t = expect(ctx, 3) # :arrow
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[91] if x >=0],
rules[19]
)
def parse__gen9(ctx):
current = ctx.tokens.current()
rule = table[46][current.id] if current else -1
tree = ParseTree(NonTerminal(92, '_gen9'))
ctx.nonterminal = "_gen9"
if current != None and current.id in nonterminal_follow[92] and current.id not in nonterminal_first[92]:
return tree
if current == None:
return tree
if rule == 34: # $_gen9 = $match_group
ctx.rule = rules[34]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_match_group(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser(ctx):
current = ctx.tokens.current()
rule = table[48][current.id] if current else -1
tree = ParseTree(NonTerminal(94, 'parser'))
ctx.nonterminal = "parser"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 41: # $parser = $parser_ll1
ctx.rule = rules[41]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_ll1(ctx)
tree.add(subtree)
return tree
elif rule == 42: # $parser = $parser_expression
ctx.rule = rules[42]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_expression(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[94] if x >=0],
rules[42]
)
def parse_lexer_target(ctx):
current = ctx.tokens.current()
rule = table[49][current.id] if current else -1
tree = ParseTree(NonTerminal(95, 'lexer_target'))
ctx.nonterminal = "lexer_target"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 28: # $lexer_target = $terminal
ctx.rule = rules[28]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
elif rule == 31: # $lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )
ctx.rule = rules[31]
ast_parameters = OrderedDict([
('name', 0),
('terminal', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerFunctionCall', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen8(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
elif rule == 32: # $lexer_target = :stack_push
ctx.rule = rules[32]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 8) # :stack_push
tree.add(t)
return tree
elif rule == 33: # $lexer_target = :action
ctx.rule = rules[33]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 39) # :action
tree.add(t)
return tree
elif rule == 39: # $lexer_target = :null -> Null( )
ctx.rule = rules[39]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('Null', ast_parameters)
t = expect(ctx, 45) # :null
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[95] if x >=0],
rules[39]
)
def parse_grammar(ctx):
current = ctx.tokens.current()
rule = table[51][current.id] if current else -1
tree = ParseTree(NonTerminal(97, 'grammar'))
ctx.nonterminal = "grammar"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 1: # $grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )
ctx.rule = rules[1]
ast_parameters = OrderedDict([
('body', 2),
])
tree.astTransform = AstTransformNodeCreator('Grammar', ast_parameters)
t = expect(ctx, 33) # :grammar
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen0(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[97] if x >=0],
rules[1]
)
def parse_lexer_partials(ctx):
current = ctx.tokens.current()
rule = table[52][current.id] if current else -1
tree = ParseTree(NonTerminal(98, 'lexer_partials'))
ctx.nonterminal = "lexer_partials"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 13: # $lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )
ctx.rule = rules[13]
ast_parameters = OrderedDict([
('list', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartials', ast_parameters)
t = expect(ctx, 13) # :partials
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen2(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[98] if x >=0],
rules[13]
)
def parse_ast_transform(ctx):
current = ctx.tokens.current()
rule = table[53][current.id] if current else -1
tree = ParseTree(NonTerminal(99, 'ast_transform'))
ctx.nonterminal = "ast_transform"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 76: # $ast_transform = :arrow $ast_transform_sub -> $1
ctx.rule = rules[76]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 3) # :arrow
tree.add(t)
subtree = parse_ast_transform_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[99] if x >=0],
rules[76]
)
def parse__gen15(ctx):
current = ctx.tokens.current()
rule = table[55][current.id] if current else -1
tree = ParseTree(NonTerminal(101, '_gen15'))
ctx.nonterminal = "_gen15"
if current != None and current.id in nonterminal_follow[101] and current.id not in nonterminal_first[101]:
return tree
if current == None:
return tree
if rule == 56: # $_gen15 = $binding_power
ctx.rule = rules[56]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_binding_power(ctx)
tree.add(subtree)
return tree
return tree
# Lexer Code #
# START USER CODE
# END USER CODE
def emit(ctx, terminal, source_string, line, col):
if terminal:
ctx.tokens.append(Terminal(terminals[terminal], terminal, source_string, ctx.resource, line, col))
def default_action(ctx, terminal, source_string, line, col):
emit(ctx, terminal, source_string, line, col)
def init():
return {}
def destroy(context):
pass
class LexerStackPush:
def __init__(self, mode):
self.mode = mode
class LexerAction:
def __init__(self, action):
self.action = action
class LexerContext:
def __init__(self, string, resource, errors, user_context):
self.__dict__.update(locals())
self.stack = ['default']
self.line = 1
self.col = 1
self.tokens = []
self.user_context = user_context
self.re_match = None # https://docs.python.org/3/library/re.html#match-objects
class HermesLexer:
regex = {
'default': OrderedDict([
(re.compile(r'(grammar)\s*({)'), [
# (terminal, group, function)
('grammar', 1, None),
('lbrace', 2, None),
LexerStackPush('grammar'),
]),
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
]),
'grammar': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'lexer'), [
# (terminal, group, function)
('lexer', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'parser'), [
# (terminal, group, function)
('parser', 0, None),
LexerStackPush('parser'),
]),
]),
'lexer': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'code<([a-z]+)>\s*<<\s*([a-zA-Z_]+)(?=\s)(.*?)(\2)', re.DOTALL), [
# (terminal, group, function)
('code_start', 2, None),
('language', 1, None),
('code', 3, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'<'), [
# (terminal, group, function)
('langle', 0, None),
]),
(re.compile(r'>'), [
# (terminal, group, function)
('rangle', 0, None),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r'\[\]'), [
# (terminal, group, function)
('no_group', 0, None),
]),
(re.compile(r'\['), [
# (terminal, group, function)
('lsquare', 0, None),
]),
(re.compile(r'\]'), [
# (terminal, group, function)
('rsquare', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
# (terminal, group, function)
('regex', 0, None),
LexerStackPush('regex_options'),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'@([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('stack_push', 1, None),
]),
(re.compile(r'%([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('action', 1, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'_[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('regex_partial', 0, None),
]),
(re.compile(r'null'), [
# (terminal, group, function)
('null', 0, None),
]),
(re.compile(r'mode'), [
# (terminal, group, function)
('mode', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'partials'), [
# (terminal, group, function)
('partials', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'enum'), [
# (terminal, group, function)
('regex_enum', 0, None),
LexerStackPush('regex_enum'),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
]),
'regex_enum': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r':'), [
# (terminal, group, function)
('colon', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
# (terminal, group, function)
('regex', 0, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
]),
'regex_options': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
LexerAction('pop'),
]),
]),
'parser': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\|'), [
# (terminal, group, function)
('pipe', 0, None),
]),
(re.compile(r'='), [
# (terminal, group, function)
('equals', 0, None),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r'null'), [
# (terminal, group, function)
('null', 0, None),
]),
(re.compile(r'parser\s*<\s*expression\s*>\s*({)'), [
# (terminal, group, function)
('parser_expression', None, None),
('lbrace', 1, None),
LexerStackPush('parser_expr'),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)(?=\s*\=)'), [
# (terminal, group, function)
('ll1_rule_hint', None, None),
('nonterminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
# (terminal, group, function)
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
# (terminal, group, function)
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
]),
'parser_expr': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'(\()(?=\s*[\*-])'), [
# (terminal, group, function)
('lparen', 1, None),
LexerStackPush('binding_power'),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r'<=>'), [
# (terminal, group, function)
('expression_divider', 0, None),
]),
(re.compile(r'\|'), [
# (terminal, group, function)
('pipe', 0, None),
]),
(re.compile(r'='), [
# (terminal, group, function)
('equals', 0, None),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*\1[ \t]+:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?![ \t]+(:|\$))'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('infix_rule_hint', None, None),
('nonterminal', 2, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('prefix_rule_hint', None, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)\s*(=)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 1, None),
('equals', 2, None),
('mixfix_rule_hint', None, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
# (terminal, group, function)
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
# (terminal, group, function)
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
]),
'binding_power': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\*'), [
# (terminal, group, function)
('asterisk', 0, None),
]),
(re.compile(r'-'), [
# (terminal, group, function)
('dash', 0, None),
]),
(re.compile(r':'), [
# (terminal, group, function)
('colon', 0, None),
]),
(re.compile(r'left'), [
# (terminal, group, function)
('left', 0, None),
]),
(re.compile(r'right'), [
# (terminal, group, function)
('right', 0, None),
]),
(re.compile(r'unary'), [
# (terminal, group, function)
('unary', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
LexerAction('pop'),
]),
]),
}
def _advance_line_col(self, string, length, line, col):
for i in range(length):
if string[i] == '\n':
line += 1
col = 1
else:
col += 1
return (line, col)
def _advance_string(self, ctx, string):
(ctx.line, ctx.col) = self._advance_line_col(string, len(string), ctx.line, ctx.col)
ctx.string = ctx.string[len(string):]
def _next(self, ctx, debug=False):
for regex, outputs in self.regex[ctx.stack[-1]].items():
if debug:
from xtermcolor import colorize
token_count = len(ctx.tokens)
print('{1} ({2}, {3}) regex: {0}'.format(
colorize(regex.pattern, ansi=40), colorize(ctx.string[:20].replace('\n', '\\n'), ansi=15), ctx.line, ctx.col)
)
match = regex.match(ctx.string)
if match:
ctx.re_match = match
for output in outputs:
if isinstance(output, tuple):
(terminal, group, function) = output
function = function if function else default_action
source_string = match.group(group) if group is not None else ''
(group_line, group_col) = self._advance_line_col(ctx.string, match.start(group) if group else 0, ctx.line, ctx.col)
function(
ctx,
terminal,
source_string,
group_line,
group_col
)
if debug:
print(' matched: {}'.format(colorize(match.group(0).replace('\n', '\\n'), ansi=3)))
for token in ctx.tokens[token_count:]:
print(' emit: [{}] [{}, {}] [{}] stack:{} context:{}'.format(
colorize(token.str, ansi=9),
colorize(str(token.line), ansi=5),
colorize(str(token.col), ansi=5),
colorize(token.source_string, ansi=3),
colorize(str(ctx.stack), ansi=4),
colorize(str(ctx.user_context), ansi=13)
))
token_count = len(ctx.tokens)
if isinstance(output, LexerStackPush):
ctx.stack.append(output.mode)
if debug:
print(' push on stack: {}'.format(colorize(output.mode, ansi=4)))
if isinstance(output, LexerAction):
if output.action == 'pop':
mode = ctx.stack.pop()
if debug:
print(' pop off stack: {}'.format(colorize(mode, ansi=4)))
self._advance_string(ctx, match.group(0))
return len(match.group(0)) > 0
return False
def lex(self, string, resource, errors=None, debug=False):
if errors is None:
errors = DefaultSyntaxErrorHandler()
string_copy = string
user_context = init()
ctx = LexerContext(string, resource, errors, user_context)
while len(ctx.string):
matched = self._next(ctx, debug)
if matched == False:
raise ctx.errors.unrecognized_token(string_copy, ctx.line, ctx.col)
destroy(ctx.user_context)
return ctx.tokens
def lex(source, resource, errors=None, debug=False):
return TokenStream(HermesLexer().lex(source, resource, errors, debug))
| 38.088612 | 189 | 0.520529 |
import sys
import os
import re
import base64
import argparse
from collections import OrderedDict
def parse_tree_string(parsetree, indent=None, b64_source=True, indent_level=0, debug=False):
indent_str = (' ' * indent * indent_level) if indent else ''
if isinstance(parsetree, ParseTree):
children = [parse_tree_string(child, indent, b64_source, indent_level+1, debug) for child in parsetree.children]
debug_str = parsetree.debug_str() if debug else ''
if indent is None or len(children) == 0:
return '{0}({1}: {2}{3})'.format(indent_str, parsetree.nonterminal, debug_str, ', '.join(children))
else:
return '{0}({1}:{2}\n{3}\n{4})'.format(
indent_str,
parsetree.nonterminal,
debug_str,
',\n'.join(children),
indent_str
)
elif isinstance(parsetree, Terminal):
return indent_str + parsetree.dumps(b64_source=b64_source)
def ast_string(ast, indent=None, b64_source=True, indent_level=0):
indent_str = (' ' * indent * indent_level) if indent else ''
next_indent_str = (' ' * indent * (indent_level+1)) if indent else ''
if isinstance(ast, Ast):
children = OrderedDict([(k, ast_string(v, indent, b64_source, indent_level+1)) for k, v in ast.attributes.items()])
if indent is None:
return '({0}: {1})'.format(
ast.name,
', '.join('{0}={1}'.format(k, v) for k, v in children.items())
)
else:
return '({0}:\n{1}\n{2})'.format(
ast.name,
',\n'.join(['{0}{1}={2}'.format(next_indent_str, k, v) for k, v in children.items()]),
indent_str
)
elif isinstance(ast, list):
children = [ast_string(element, indent, b64_source, indent_level+1) for element in ast]
if indent is None or len(children) == 0:
return '[{0}]'.format(', '.join(children))
else:
return '[\n{1}\n{0}]'.format(
indent_str,
',\n'.join(['{0}{1}'.format(next_indent_str, child) for child in children]),
)
elif isinstance(ast, Terminal):
return ast.dumps(b64_source=b64_source)
class Terminal:
def __init__(self, id, str, source_string, resource, line, col):
self.__dict__.update(locals())
def getId(self):
return self.id
def ast(self):
return self
def dumps(self, b64_source=True, **kwargs):
source_string = base64.b64encode(self.source_string.encode('utf-8')).decode('utf-8') if b64_source else self.source_string
return '<{resource}:{line}:{col} {terminal} "{source}">'.format(
resource=self.resource,
line=self.line,
col=self.col,
terminal=self.str,
source=source_string
)
def __str__(self):
return self.dumps()
class NonTerminal():
def __init__(self, id, str):
self.__dict__.update(locals())
self.list = False
def __str__(self):
return self.str
class AstTransform:
pass
class AstTransformSubstitution(AstTransform):
def __init__(self, idx):
self.__dict__.update(locals())
def __repr__(self):
return '$' + str(self.idx)
def __str__(self):
return self.__repr__()
class AstTransformNodeCreator(AstTransform):
def __init__( self, name, parameters ):
self.__dict__.update(locals())
def __repr__( self ):
return self.name + '( ' + ', '.join(['%s=$%s' % (k,str(v)) for k,v in self.parameters.items()]) + ' )'
def __str__(self):
return self.__repr__()
class AstList(list):
def ast(self):
retval = []
for ast in self:
retval.append(ast.ast())
return retval
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class ParseTree():
def __init__(self, nonterminal):
self.__dict__.update(locals())
self.children = []
self.astTransform = None
self.isExpr = False
self.isNud = False
self.isPrefix = False
self.isInfix = False
self.nudMorphemeCount = 0
self.isExprNud = False
self.list_separator_id = None
self.list = False
def debug_str(self):
from copy import deepcopy
def h(v):
if v == False or v is None:
return str(v)
from xtermcolor import colorize
return colorize(str(v), ansi=190)
d = deepcopy(self.__dict__)
for key in ['self', 'nonterminal', 'children']:
del d[key]
f = {k: v for k, v in d.items() if v != False and v is not None}
return ' [{}]'.format(', '.join(['{}={}'.format(k,h(v)) for k,v in f.items()]))
def add(self, tree):
self.children.append( tree )
def ast(self):
if self.list == True:
r = AstList()
if len(self.children) == 0:
return r
for child in self.children:
if isinstance(child, Terminal) and self.list_separator_id is not None and child.id == self.list_separator_id:
continue
r.append(child.ast())
return r
elif self.isExpr:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
if idx == '$':
child = self.children[0]
elif isinstance(self.children[0], ParseTree) and \
self.children[0].isNud and \
not self.children[0].isPrefix and \
not self.isExprNud and \
not self.isInfix:
if idx < self.children[0].nudMorphemeCount:
child = self.children[0].children[idx]
else:
index = idx - self.children[0].nudMorphemeCount + 1
child = self.children[index]
elif len(self.children) == 1 and not isinstance(self.children[0], ParseTree) and not isinstance(self.children[0], list):
return self.children[0]
else:
child = self.children[idx]
parameters[name] = child.ast()
return Ast(self.astTransform.name, parameters)
else:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
parameters[name] = self.children[idx].ast()
return Ast(self.astTransform.name, parameters)
elif len(self.children):
return self.children[0].ast()
else:
return None
def dumps(self, indent=None, b64_source=True, debug=False):
args = locals()
del args['self']
return parse_tree_string(self, **args)
class Ast():
def __init__(self, name, attributes):
self.__dict__.update(locals())
def attr(self, attr):
return self.attributes[attr]
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class SyntaxError(Exception):
def __init__(self, message):
self.__dict__.update(locals())
def __str__(self):
return self.message
class TokenStream(list):
def __init__(self, arg=[]):
super(TokenStream, self).__init__(arg)
self.index = 0
def advance(self):
self.index += 1
return self.current()
def last(self):
return self[-1]
def current(self):
try:
return self[self.index]
except IndexError:
return None
class DefaultSyntaxErrorHandler:
def __init__(self):
self.errors = []
def _error(self, string):
error = SyntaxError(string)
self.errors.append(error)
return error
def unexpected_eof(self):
return self._error("Error: unexpected end of file")
def excess_tokens(self):
return self._error("Finished parsing without consuming all tokens.")
def unexpected_symbol(self, nonterminal, actual_terminal, expected_terminals, rule):
return self._error("Unexpected symbol (line {line}, col {col}) when parsing parse_{nt}. Expected {expected}, got {actual}.".format(
line=actual_terminal.line,
col=actual_terminal.col,
nt=nonterminal,
expected=', '.join(expected_terminals),
actual=actual_terminal
))
def no_more_tokens(self, nonterminal, expected_terminal, last_terminal):
return self._error("No more tokens. Expecting " + expected_terminal)
def invalid_terminal(self, nonterminal, invalid_terminal):
return self._error("Invalid symbol ID: {} ({})".format(invalid_terminal.id, invalid_terminal.string))
def unrecognized_token(self, string, line, col):
lines = string.split('\n')
bad_line = lines[line-1]
return self._error('Unrecognized token on line {}, column {}:\n\n{}\n{}'.format(
line, col, bad_line, ''.join([' ' for x in range(col-1)]) + '^'
))
def missing_list_items(self, method, required, found, last):
return self._error("List for {} requires {} items but only {} were found.".format(method, required, found))
def missing_terminator(self, method, terminator, last):
return self._error("List for "+method+" is missing a terminator")
class ParserContext:
def __init__(self, tokens, errors):
self.__dict__.update(locals())
self.nonterminal_string = None
self.rule_string = None
terminals = {
0: 'regex_enum',
1: 'dash',
2: 'lbrace',
3: 'arrow',
4: 'unary',
5: 'rsquare',
6: 'infix_rule_hint',
7: 'equals',
8: 'stack_push',
9: 'code_start',
10: 'langle',
11: 'no_group',
12: 'expr_rule_hint',
13: 'partials',
14: 'regex',
15: 'rbrace',
16: 'code',
17: 'identifier',
18: 'regex_partial',
19: 'rangle',
20: 'language',
21: 'integer',
22: 'left',
23: 'rparen',
24: 'right',
25: 'mixfix_rule_hint',
26: 'colon',
27: 'expression_divider',
28: 'prefix_rule_hint',
29: 'asterisk',
30: 'll1_rule_hint',
31: 'string',
32: 'lexer',
33: 'grammar',
34: 'terminal',
35: 'lsquare',
36: 'parser',
37: 'lparen',
38: 'comma',
39: 'action',
40: 'pipe',
41: 'parser_expression',
42: 'nonterminal',
43: 'mode',
44: 'nonterminal_reference',
45: 'null',
'regex_enum': 0,
'dash': 1,
'lbrace': 2,
'arrow': 3,
'unary': 4,
'rsquare': 5,
'infix_rule_hint': 6,
'equals': 7,
'stack_push': 8,
'code_start': 9,
'langle': 10,
'no_group': 11,
'expr_rule_hint': 12,
'partials': 13,
'regex': 14,
'rbrace': 15,
'code': 16,
'identifier': 17,
'regex_partial': 18,
'rangle': 19,
'language': 20,
'integer': 21,
'left': 22,
'rparen': 23,
'right': 24,
'mixfix_rule_hint': 25,
'colon': 26,
'expression_divider': 27,
'prefix_rule_hint': 28,
'asterisk': 29,
'll1_rule_hint': 30,
'string': 31,
'lexer': 32,
'grammar': 33,
'terminal': 34,
'lsquare': 35,
'parser': 36,
'lparen': 37,
'comma': 38,
'action': 39,
'pipe': 40,
'parser_expression': 41,
'nonterminal': 42,
'mode': 43,
'nonterminal_reference': 44,
'null': 45,
}
table = [
[-1, -1, 16, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 72, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 70, -1, 71, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 73, -1, -1, -1, -1, -1, -1, -1, 74, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 86, -1, -1, -1, -1, -1, -1, -1, -1, -1, 85, -1, -1, 84, -1, -1, -1, -1, -1, -1, -1, 83, -1, -1, 87],
[-1, -1, -1, 49, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1],
[-1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 59, -1, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1],
[7, -1, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1],
[-1, -1, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, -1, -1, -1, 64, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 79, -1],
[21, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1, -1, 4, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 23, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 69, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, 63, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 61, -1, -1, 62, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, -1, -1, 51, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 80, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, 2, -1, -1, -1, -1, 2, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 38, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 24, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, -1, -1, 47, -1, 53, -1, -1, -1, -1, 53, 47, -1, -1, 52],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 82, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[35, -1, -1, -1, -1, -1, -1, -1, 35, 35, -1, 34, -1, 35, 35, 35, -1, 35, -1, -1, -1, -1, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 35, 34, -1, -1, -1, 35, -1, -1, -1, 35, -1, 35],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 41, -1, -1, -1, -1, 42, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, 32, -1, -1, -1, -1, -1, -1, -1, -1, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, -1, -1, -1, 33, -1, -1, -1, -1, -1, 39],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 76, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 57, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 56, -1, -1, -1, -1, -1, -1, -1, -1],
]
nonterminal_first = {
46: [2, -1],
47: [24, 4, 22],
48: [34, -1],
49: [36],
50: [34, 42, 17],
51: [34, 42, 21, 31, 45],
52: [3, -1],
53: [27, -1],
54: [0, 13, 43, 14, 9],
55: [43],
56: [34, 42, -1, 17],
57: [44, 17],
58: [0],
59: [32, 36, 41],
60: [37, -1],
61: [0, 13, 14, -1, 43, 9],
62: [36, -1, 41, 32],
63: [14, -1],
64: [37],
65: [34],
66: [12, 37],
67: [29, 1],
68: [30],
69: [37, 12, -1],
70: [9],
71: [28, 25, 6],
72: [34, -1, 3, 42, 17],
73: [17],
74: [32, 36, 41],
75: [-1, 17],
76: [35, 11],
77: [3, 34, -1, 42, 17],
78: [37],
79: [17],
80: [3, 34, 36, -1, 41, 42, 17, 45],
81: [34, -1, 39, 8, 17, 45],
82: [34, -1, 42, 17],
83: [-1, 17],
84: [2],
85: [14],
86: [27],
87: [29, 1],
88: [41],
89: [32],
90: [17],
91: [14, 0],
92: [35, 11, -1],
93: [-1, 17],
94: [36, 41],
95: [34, 39, 8, 17, 45],
96: [30, -1],
97: [33],
98: [13],
99: [3],
100: [34, -1, 31, 42, 21, 45],
101: [37, -1],
}
nonterminal_follow = {
46: [3],
47: [23],
48: [23],
49: [30, 32, 15, 36, 41],
50: [12, 30, 3, 34, 15, 37, 40, 42, 17],
51: [23, 38],
52: [27, 12, 30, 15, 37, 40],
53: [15, 12, 37, 3],
54: [0, 13, 14, 15, 43, 9],
55: [0, 13, 14, 15, 43, 9],
56: [15, 12, 37, 3],
57: [27, 12, 30, 15, 37, 40],
58: [0, 13, 14, 15, 43, 9],
59: [15, 36, 41, 32],
60: [15, 17],
61: [15],
62: [15],
63: [15],
64: [15, 17],
65: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
66: [15, 12, 37],
67: [26],
68: [15, 30],
69: [15],
70: [0, 13, 14, 15, 43, 9],
71: [15, 12, 37],
72: [15, 30, 40],
73: [23, 38],
74: [15, 36, 41, 32],
75: [15],
76: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
77: [15, 30],
78: [12],
79: [15, 17],
80: [15, 30],
81: [0, 13, 14, 15, 43, 9],
82: [12, 30, 3, 15, 37, 40],
83: [15, 23],
84: [3],
85: [15, 14],
86: [15, 12, 37, 3],
87: [23],
88: [30, 32, 15, 36, 41],
89: [15, 36, 41, 32],
90: [3, 12, 30, 34, 15, 37, 40, 42, 17],
91: [0, 13, 14, 15, 43, 9],
92: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
93: [23],
94: [30, 32, 15, 36, 41],
95: [0, 13, 14, 34, 15, 39, 43, 8, 9, 17, 45],
96: [15],
97: [-1],
98: [0, 13, 14, 15, 43, 9],
99: [27, 12, 30, 15, 37, 40],
100: [23],
101: [12],
}
rule_first = {
0: [32, 36, -1, 41],
1: [33],
2: [32, 36, 41],
3: [32],
4: [36, 41],
5: [0, 13, 14, -1, 43, 9],
6: [32],
7: [14, 0],
8: [43],
9: [13],
10: [9],
11: [9],
12: [14, -1],
13: [13],
14: [14],
15: [0],
16: [2],
17: [-1],
18: [34, -1, 39, 8, 17, 45],
19: [14],
20: [-1, 17],
21: [0],
22: [37],
23: [-1],
24: [17],
25: [-1, 17],
26: [37],
27: [2],
28: [34],
29: [34],
30: [-1],
31: [17],
32: [8],
33: [39],
34: [35, 11],
35: [-1],
36: [34],
37: [35],
38: [11],
39: [45],
40: [43],
41: [36],
42: [41],
43: [30, -1],
44: [36],
45: [30],
46: [34, 3, -1, 17, 42],
47: [3, 42, 34, 17, -1],
48: [34, 42, -1, 17],
49: [3],
50: [-1],
51: [34, 42, -1, 3, 17],
52: [45],
53: [36, 41],
54: [12, 37, -1],
55: [41],
56: [37],
57: [-1],
58: [12, 37],
59: [27],
60: [-1],
61: [25],
62: [28],
63: [6],
64: [34, 42, -1, 17],
65: [27],
66: [37],
67: [29, 1],
68: [29],
69: [1],
70: [22],
71: [24],
72: [4],
73: [34],
74: [42],
75: [17],
76: [3],
77: [-1, 17],
78: [17],
79: [44],
80: [17],
81: [31, 21, 34, -1, 42, 45],
82: [17],
83: [42],
84: [34],
85: [31],
86: [21],
87: [45],
}
nonterminal_rules = {
46: [
"$_gen3 = $regex_options",
"$_gen3 = :_empty",
],
47: [
"$associativity = :left",
"$associativity = :right",
"$associativity = :unary",
],
48: [
"$_gen8 = $terminal",
"$_gen8 = :_empty",
],
49: [
"$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
],
50: [
"$morpheme = :terminal",
"$morpheme = :nonterminal",
"$morpheme = $macro",
],
51: [
"$macro_parameter = :nonterminal",
"$macro_parameter = :terminal",
"$macro_parameter = :string",
"$macro_parameter = :integer",
"$macro_parameter = :null",
],
52: [
"$_gen13 = $ast_transform",
"$_gen13 = :_empty",
],
53: [
"$_gen16 = $led",
"$_gen16 = :_empty",
],
54: [
"$lexer_atom = $lexer_regex",
"$lexer_atom = $lexer_mode",
"$lexer_atom = $lexer_partials",
"$lexer_atom = $lexer_code",
],
55: [
"$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
],
56: [
"$nud = $_gen12",
],
57: [
"$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
"$ast_transform_sub = :nonterminal_reference",
],
58: [
"$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
],
59: [
"$body_element_sub = $lexer",
"$body_element_sub = $parser",
],
60: [
"$_gen6 = $regex_enumeration_options",
"$_gen6 = :_empty",
],
61: [
"$_gen1 = list($lexer_atom)",
],
62: [
"$_gen0 = list($body_element)",
],
63: [
"$_gen2 = list($regex_partial)",
],
64: [
"$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
],
65: [
"$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
],
66: [
"$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
],
67: [
"$binding_power_marker = :asterisk",
"$binding_power_marker = :dash",
],
68: [
"$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
],
69: [
"$_gen14 = list($expression_rule)",
],
70: [
"$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
],
71: [
"$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
"$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
"$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
],
72: [
"$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
],
73: [
"$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
],
74: [
"$body_element = $body_element_sub",
],
75: [
"$_gen5 = list($regex_enumeration)",
],
76: [
"$match_group = :lsquare :integer :rsquare -> $1",
"$match_group = :no_group",
],
77: [
"$_gen11 = list($rule,:pipe)",
],
78: [
"$binding_power = :lparen $precedence :rparen -> $1",
],
79: [
"$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
],
80: [
"$ll1_rule_rhs = $_gen11",
"$ll1_rule_rhs = :null -> NullProduction( )",
"$ll1_rule_rhs = $parser",
],
81: [
"$_gen4 = list($lexer_target)",
],
82: [
"$_gen12 = list($morpheme)",
],
83: [
"$_gen7 = list(:identifier,:comma)",
],
84: [
"$regex_options = :lbrace $_gen7 :rbrace -> $1",
],
85: [
"$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
],
86: [
"$led = :expression_divider $_gen12 -> $1",
],
87: [
"$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
],
88: [
"$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
],
89: [
"$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
],
90: [
"$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
],
91: [
"$lexer_regex = $enumerated_regex",
"$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
],
92: [
"$_gen9 = $match_group",
"$_gen9 = :_empty",
],
93: [
"$_gen17 = list($ast_parameter,:comma)",
],
94: [
"$parser = $parser_ll1",
"$parser = $parser_expression",
],
95: [
"$lexer_target = $terminal",
"$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
"$lexer_target = :stack_push",
"$lexer_target = :action",
"$lexer_target = :null -> Null( )",
],
96: [
"$_gen10 = list($ll1_rule)",
],
97: [
"$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
],
98: [
"$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
],
99: [
"$ast_transform = :arrow $ast_transform_sub -> $1",
],
100: [
"$_gen18 = list($macro_parameter,:comma)",
],
101: [
"$_gen15 = $binding_power",
"$_gen15 = :_empty",
],
}
rules = {
0: "$_gen0 = list($body_element)",
1: "$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
2: "$body_element = $body_element_sub",
3: "$body_element_sub = $lexer",
4: "$body_element_sub = $parser",
5: "$_gen1 = list($lexer_atom)",
6: "$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
7: "$lexer_atom = $lexer_regex",
8: "$lexer_atom = $lexer_mode",
9: "$lexer_atom = $lexer_partials",
10: "$lexer_atom = $lexer_code",
11: "$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
12: "$_gen2 = list($regex_partial)",
13: "$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
14: "$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
15: "$lexer_regex = $enumerated_regex",
16: "$_gen3 = $regex_options",
17: "$_gen3 = :_empty",
18: "$_gen4 = list($lexer_target)",
19: "$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
20: "$_gen5 = list($regex_enumeration)",
21: "$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
22: "$_gen6 = $regex_enumeration_options",
23: "$_gen6 = :_empty",
24: "$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
25: "$_gen7 = list(:identifier,:comma)",
26: "$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
27: "$regex_options = :lbrace $_gen7 :rbrace -> $1",
28: "$lexer_target = $terminal",
29: "$_gen8 = $terminal",
30: "$_gen8 = :_empty",
31: "$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
32: "$lexer_target = :stack_push",
33: "$lexer_target = :action",
34: "$_gen9 = $match_group",
35: "$_gen9 = :_empty",
36: "$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
37: "$match_group = :lsquare :integer :rsquare -> $1",
38: "$match_group = :no_group",
39: "$lexer_target = :null -> Null( )",
40: "$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
41: "$parser = $parser_ll1",
42: "$parser = $parser_expression",
43: "$_gen10 = list($ll1_rule)",
44: "$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
45: "$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
46: "$_gen11 = list($rule,:pipe)",
47: "$ll1_rule_rhs = $_gen11",
48: "$_gen12 = list($morpheme)",
49: "$_gen13 = $ast_transform",
50: "$_gen13 = :_empty",
51: "$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
52: "$ll1_rule_rhs = :null -> NullProduction( )",
53: "$ll1_rule_rhs = $parser",
54: "$_gen14 = list($expression_rule)",
55: "$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
56: "$_gen15 = $binding_power",
57: "$_gen15 = :_empty",
58: "$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
59: "$_gen16 = $led",
60: "$_gen16 = :_empty",
61: "$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
62: "$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
63: "$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
64: "$nud = $_gen12",
65: "$led = :expression_divider $_gen12 -> $1",
66: "$binding_power = :lparen $precedence :rparen -> $1",
67: "$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
68: "$binding_power_marker = :asterisk",
69: "$binding_power_marker = :dash",
70: "$associativity = :left",
71: "$associativity = :right",
72: "$associativity = :unary",
73: "$morpheme = :terminal",
74: "$morpheme = :nonterminal",
75: "$morpheme = $macro",
76: "$ast_transform = :arrow $ast_transform_sub -> $1",
77: "$_gen17 = list($ast_parameter,:comma)",
78: "$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
79: "$ast_transform_sub = :nonterminal_reference",
80: "$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
81: "$_gen18 = list($macro_parameter,:comma)",
82: "$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
83: "$macro_parameter = :nonterminal",
84: "$macro_parameter = :terminal",
85: "$macro_parameter = :string",
86: "$macro_parameter = :integer",
87: "$macro_parameter = :null",
}
def is_terminal(id): return isinstance(id, int) and 0 <= id <= 45
def parse(tokens, errors=None, start=None):
if errors is None:
errors = DefaultSyntaxErrorHandler()
if isinstance(tokens, str):
tokens = lex(tokens, 'string', errors)
ctx = ParserContext(tokens, errors)
tree = parse_grammar(ctx)
if tokens.current() != None:
raise ctx.errors.excess_tokens()
return tree
def expect(ctx, terminal_id):
current = ctx.tokens.current()
if not current:
raise ctx.errors.no_more_tokens(ctx.nonterminal, terminals[terminal_id], ctx.tokens.last())
if current.id != terminal_id:
raise ctx.errors.unexpected_symbol(ctx.nonterminal, current, [terminals[terminal_id]], ctx.rule)
next = ctx.tokens.advance()
if next and not is_terminal(next.id):
raise ctx.errors.invalid_terminal(ctx.nonterminal, next)
return current
def parse__gen18(ctx):
tree = ParseTree(NonTerminal(100, '_gen18'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen18"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[100]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(100)):
tree.add(parse_macro_parameter(ctx))
ctx.nonterminal = "_gen18"
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen5(ctx):
tree = ParseTree(NonTerminal(75, '_gen5'))
tree.list = True;
ctx.nonterminal = "_gen5"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[75]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(75)):
tree.add(parse_regex_enumeration(ctx))
ctx.nonterminal = "_gen5"
minimum = max(minimum - 1, 0)
return tree
def parse__gen11(ctx):
tree = ParseTree(NonTerminal(77, '_gen11'))
tree.list = True;
tree.list_separator_id = 40
ctx.nonterminal = "_gen11"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[77]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(77)):
tree.add(parse_rule(ctx))
ctx.nonterminal = "_gen11"
if ctx.tokens.current() is not None and ctx.tokens.current().id == 40:
tree.add(expect(ctx, 40));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen17(ctx):
tree = ParseTree(NonTerminal(93, '_gen17'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen17"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[93]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(93)):
tree.add(parse_ast_parameter(ctx))
ctx.nonterminal = "_gen17"
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen1(ctx):
tree = ParseTree(NonTerminal(61, '_gen1'))
tree.list = True;
ctx.nonterminal = "_gen1"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[61]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(61)):
tree.add(parse_lexer_atom(ctx))
ctx.nonterminal = "_gen1"
minimum = max(minimum - 1, 0)
return tree
def parse__gen10(ctx):
tree = ParseTree(NonTerminal(96, '_gen10'))
tree.list = True;
ctx.nonterminal = "_gen10"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[96]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(96)):
tree.add(parse_ll1_rule(ctx))
ctx.nonterminal = "_gen10"
minimum = max(minimum - 1, 0)
return tree
def parse__gen0(ctx):
tree = ParseTree(NonTerminal(62, '_gen0'))
tree.list = True;
ctx.nonterminal = "_gen0"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[62]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(62)):
tree.add(parse_body_element(ctx))
ctx.nonterminal = "_gen0"
minimum = max(minimum - 1, 0)
return tree
def parse__gen4(ctx):
tree = ParseTree(NonTerminal(81, '_gen4'))
tree.list = True;
ctx.nonterminal = "_gen4"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[81]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(81)):
tree.add(parse_lexer_target(ctx))
ctx.nonterminal = "_gen4"
minimum = max(minimum - 1, 0)
return tree
def parse__gen2(ctx):
tree = ParseTree(NonTerminal(63, '_gen2'))
tree.list = True;
ctx.nonterminal = "_gen2"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[63]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(63)):
tree.add(parse_regex_partial(ctx))
ctx.nonterminal = "_gen2"
minimum = max(minimum - 1, 0)
return tree
def parse__gen12(ctx):
tree = ParseTree(NonTerminal(82, '_gen12'))
tree.list = True;
ctx.nonterminal = "_gen12"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[82]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(82)):
tree.add(parse_morpheme(ctx))
ctx.nonterminal = "_gen12"
minimum = max(minimum - 1, 0)
return tree
def parse__gen7(ctx):
tree = ParseTree(NonTerminal(83, '_gen7'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen7"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[83]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(83)):
tree.add(expect(ctx, 17))
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen14(ctx):
tree = ParseTree(NonTerminal(69, '_gen14'))
tree.list = True;
ctx.nonterminal = "_gen14"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[69]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(69)):
tree.add(parse_expression_rule(ctx))
ctx.nonterminal = "_gen14"
minimum = max(minimum - 1, 0)
return tree
def parse__gen3(ctx):
current = ctx.tokens.current()
rule = table[0][current.id] if current else -1
tree = ParseTree(NonTerminal(46, '_gen3'))
ctx.nonterminal = "_gen3"
if current != None and current.id in nonterminal_follow[46] and current.id not in nonterminal_first[46]:
return tree
if current == None:
return tree
if rule == 16:
ctx.rule = rules[16]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_associativity(ctx):
current = ctx.tokens.current()
rule = table[1][current.id] if current else -1
tree = ParseTree(NonTerminal(47, 'associativity'))
ctx.nonterminal = "associativity"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 70:
ctx.rule = rules[70]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 22)
tree.add(t)
return tree
elif rule == 71:
ctx.rule = rules[71]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 24)
tree.add(t)
return tree
elif rule == 72:
ctx.rule = rules[72]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 4)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[47] if x >=0],
rules[72]
)
def parse__gen8(ctx):
current = ctx.tokens.current()
rule = table[2][current.id] if current else -1
tree = ParseTree(NonTerminal(48, '_gen8'))
ctx.nonterminal = "_gen8"
if current != None and current.id in nonterminal_follow[48] and current.id not in nonterminal_first[48]:
return tree
if current == None:
return tree
if rule == 29:
ctx.rule = rules[29]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser_ll1(ctx):
current = ctx.tokens.current()
rule = table[3][current.id] if current else -1
tree = ParseTree(NonTerminal(49, 'parser_ll1'))
ctx.nonterminal = "parser_ll1"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 44:
ctx.rule = rules[44]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('Parser', ast_parameters)
t = expect(ctx, 36)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen10(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[49] if x >=0],
rules[44]
)
def parse_morpheme(ctx):
current = ctx.tokens.current()
rule = table[4][current.id] if current else -1
tree = ParseTree(NonTerminal(50, 'morpheme'))
ctx.nonterminal = "morpheme"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 73:
ctx.rule = rules[73]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34)
tree.add(t)
return tree
elif rule == 74:
ctx.rule = rules[74]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42)
tree.add(t)
return tree
elif rule == 75:
ctx.rule = rules[75]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_macro(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[50] if x >=0],
rules[75]
)
def parse_macro_parameter(ctx):
current = ctx.tokens.current()
rule = table[5][current.id] if current else -1
tree = ParseTree(NonTerminal(51, 'macro_parameter'))
ctx.nonterminal = "macro_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 83:
ctx.rule = rules[83]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42)
tree.add(t)
return tree
elif rule == 84:
ctx.rule = rules[84]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34)
tree.add(t)
return tree
elif rule == 85:
ctx.rule = rules[85]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 31)
tree.add(t)
return tree
elif rule == 86:
ctx.rule = rules[86]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 21)
tree.add(t)
return tree
elif rule == 87:
ctx.rule = rules[87]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 45)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[51] if x >=0],
rules[87]
)
def parse__gen13(ctx):
current = ctx.tokens.current()
rule = table[6][current.id] if current else -1
tree = ParseTree(NonTerminal(52, '_gen13'))
ctx.nonterminal = "_gen13"
if current != None and current.id in nonterminal_follow[52] and current.id not in nonterminal_first[52]:
return tree
if current == None:
return tree
if rule == 49:
ctx.rule = rules[49]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_ast_transform(ctx)
tree.add(subtree)
return tree
return tree
def parse__gen16(ctx):
current = ctx.tokens.current()
rule = table[7][current.id] if current else -1
tree = ParseTree(NonTerminal(53, '_gen16'))
ctx.nonterminal = "_gen16"
if current != None and current.id in nonterminal_follow[53] and current.id not in nonterminal_first[53]:
return tree
if current == None:
return tree
if rule == 59:
ctx.rule = rules[59]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_led(ctx)
tree.add(subtree)
return tree
return tree
def parse_lexer_atom(ctx):
current = ctx.tokens.current()
rule = table[8][current.id] if current else -1
tree = ParseTree(NonTerminal(54, 'lexer_atom'))
ctx.nonterminal = "lexer_atom"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 7:
ctx.rule = rules[7]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_regex(ctx)
tree.add(subtree)
return tree
elif rule == 8:
ctx.rule = rules[8]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_mode(ctx)
tree.add(subtree)
return tree
elif rule == 9:
ctx.rule = rules[9]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_partials(ctx)
tree.add(subtree)
return tree
elif rule == 10:
ctx.rule = rules[10]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_code(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[54] if x >=0],
rules[10]
)
def parse_lexer_mode(ctx):
current = ctx.tokens.current()
rule = table[9][current.id] if current else -1
tree = ParseTree(NonTerminal(55, 'lexer_mode'))
ctx.nonterminal = "lexer_mode"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 40:
ctx.rule = rules[40]
ast_parameters = OrderedDict([
('name', 2),
('atoms', 5),
])
tree.astTransform = AstTransformNodeCreator('Mode', ast_parameters)
t = expect(ctx, 43)
tree.add(t)
t = expect(ctx, 10)
tree.add(t)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 19)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[55] if x >=0],
rules[40]
)
def parse_nud(ctx):
current = ctx.tokens.current()
rule = table[10][current.id] if current else -1
tree = ParseTree(NonTerminal(56, 'nud'))
ctx.nonterminal = "nud"
if current != None and current.id in nonterminal_follow[56] and current.id not in nonterminal_first[56]:
return tree
if current == None:
return tree
if rule == 64:
ctx.rule = rules[64]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_transform_sub(ctx):
current = ctx.tokens.current()
rule = table[11][current.id] if current else -1
tree = ParseTree(NonTerminal(57, 'ast_transform_sub'))
ctx.nonterminal = "ast_transform_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 78:
ctx.rule = rules[78]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('AstTransformation', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen17(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
elif rule == 79:
ctx.rule = rules[79]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 44)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[57] if x >=0],
rules[79]
)
def parse_enumerated_regex(ctx):
current = ctx.tokens.current()
rule = table[12][current.id] if current else -1
tree = ParseTree(NonTerminal(58, 'enumerated_regex'))
ctx.nonterminal = "enumerated_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 21:
ctx.rule = rules[21]
ast_parameters = OrderedDict([
('enums', 2),
('onmatch', 5),
])
tree.astTransform = AstTransformNodeCreator('EnumeratedRegex', ast_parameters)
t = expect(ctx, 0)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen5(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
t = expect(ctx, 3)
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[58] if x >=0],
rules[21]
)
def parse_body_element_sub(ctx):
current = ctx.tokens.current()
rule = table[13][current.id] if current else -1
tree = ParseTree(NonTerminal(59, 'body_element_sub'))
ctx.nonterminal = "body_element_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 3:
ctx.rule = rules[3]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer(ctx)
tree.add(subtree)
return tree
elif rule == 4:
ctx.rule = rules[4]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[59] if x >=0],
rules[4]
)
def parse__gen6(ctx):
current = ctx.tokens.current()
rule = table[14][current.id] if current else -1
tree = ParseTree(NonTerminal(60, '_gen6'))
ctx.nonterminal = "_gen6"
if current != None and current.id in nonterminal_follow[60] and current.id not in nonterminal_first[60]:
return tree
if current == None:
return tree
if rule == 22:
ctx.rule = rules[22]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_enumeration_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_enumeration_options(ctx):
current = ctx.tokens.current()
rule = table[18][current.id] if current else -1
tree = ParseTree(NonTerminal(64, 'regex_enumeration_options'))
ctx.nonterminal = "regex_enumeration_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 26:
ctx.rule = rules[26]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[64] if x >=0],
rules[26]
)
def parse_terminal(ctx):
current = ctx.tokens.current()
rule = table[19][current.id] if current else -1
tree = ParseTree(NonTerminal(65, 'terminal'))
ctx.nonterminal = "terminal"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 36:
ctx.rule = rules[36]
ast_parameters = OrderedDict([
('name', 0),
('group', 1),
])
tree.astTransform = AstTransformNodeCreator('Terminal', ast_parameters)
t = expect(ctx, 34)
tree.add(t)
subtree = parse__gen9(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[65] if x >=0],
rules[36]
)
def parse_expression_rule(ctx):
current = ctx.tokens.current()
rule = table[20][current.id] if current else -1
tree = ParseTree(NonTerminal(66, 'expression_rule'))
ctx.nonterminal = "expression_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 58:
ctx.rule = rules[58]
ast_parameters = OrderedDict([
('precedence', 0),
('nonterminal', 2),
('production', 4),
])
tree.astTransform = AstTransformNodeCreator('ExpressionRule', ast_parameters)
subtree = parse__gen15(ctx)
tree.add(subtree)
t = expect(ctx, 12)
tree.add(t)
t = expect(ctx, 42)
tree.add(t)
t = expect(ctx, 7)
tree.add(t)
subtree = parse_expression_rule_production(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[66] if x >=0],
rules[58]
)
def parse_binding_power_marker(ctx):
current = ctx.tokens.current()
rule = table[21][current.id] if current else -1
tree = ParseTree(NonTerminal(67, 'binding_power_marker'))
ctx.nonterminal = "binding_power_marker"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 68:
ctx.rule = rules[68]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 29)
tree.add(t)
return tree
elif rule == 69:
ctx.rule = rules[69]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 1)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[67] if x >=0],
rules[69]
)
def parse_ll1_rule(ctx):
current = ctx.tokens.current()
rule = table[22][current.id] if current else -1
tree = ParseTree(NonTerminal(68, 'll1_rule'))
ctx.nonterminal = "ll1_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 45:
ctx.rule = rules[45]
ast_parameters = OrderedDict([
('nonterminal', 1),
('production', 3),
])
tree.astTransform = AstTransformNodeCreator('Rule', ast_parameters)
t = expect(ctx, 30)
tree.add(t)
t = expect(ctx, 42)
tree.add(t)
t = expect(ctx, 7)
tree.add(t)
subtree = parse_ll1_rule_rhs(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[68] if x >=0],
rules[45]
)
def parse_lexer_code(ctx):
current = ctx.tokens.current()
rule = table[24][current.id] if current else -1
tree = ParseTree(NonTerminal(70, 'lexer_code'))
ctx.nonterminal = "lexer_code"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 11:
ctx.rule = rules[11]
ast_parameters = OrderedDict([
('language', 1),
('code', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerCode', ast_parameters)
t = expect(ctx, 9)
tree.add(t)
t = expect(ctx, 20)
tree.add(t)
t = expect(ctx, 16)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[70] if x >=0],
rules[11]
)
def parse_expression_rule_production(ctx):
current = ctx.tokens.current()
rule = table[25][current.id] if current else -1
tree = ParseTree(NonTerminal(71, 'expression_rule_production'))
ctx.nonterminal = "expression_rule_production"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 61:
ctx.rule = rules[61]
ast_parameters = OrderedDict([
('nud', 1),
('nud_ast', 2),
('led', 3),
('ast', 4),
])
tree.astTransform = AstTransformNodeCreator('MixfixProduction', ast_parameters)
t = expect(ctx, 25)
tree.add(t)
subtree = parse_nud(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
subtree = parse__gen16(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 62:
ctx.rule = rules[62]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('PrefixProduction', ast_parameters)
t = expect(ctx, 28)
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 63:
ctx.rule = rules[63]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('InfixProduction', ast_parameters)
t = expect(ctx, 6)
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[71] if x >=0],
rules[63]
)
def parse_rule(ctx):
current = ctx.tokens.current()
rule = table[26][current.id] if current else -1
tree = ParseTree(NonTerminal(72, 'rule'))
ctx.nonterminal = "rule"
if current != None and current.id in nonterminal_follow[72] and current.id not in nonterminal_first[72]:
return tree
if current == None:
return tree
if rule == 51:
ctx.rule = rules[51]
ast_parameters = OrderedDict([
('morphemes', 0),
('ast', 1),
])
tree.astTransform = AstTransformNodeCreator('Production', ast_parameters)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_parameter(ctx):
current = ctx.tokens.current()
rule = table[27][current.id] if current else -1
tree = ParseTree(NonTerminal(73, 'ast_parameter'))
ctx.nonterminal = "ast_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 80:
ctx.rule = rules[80]
ast_parameters = OrderedDict([
('name', 0),
('index', 2),
])
tree.astTransform = AstTransformNodeCreator('AstParameter', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 7)
tree.add(t)
t = expect(ctx, 44)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[73] if x >=0],
rules[80]
)
def parse_body_element(ctx):
current = ctx.tokens.current()
rule = table[28][current.id] if current else -1
tree = ParseTree(NonTerminal(74, 'body_element'))
ctx.nonterminal = "body_element"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 2:
ctx.rule = rules[2]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_body_element_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[74] if x >=0],
rules[2]
)
def parse_match_group(ctx):
current = ctx.tokens.current()
rule = table[30][current.id] if current else -1
tree = ParseTree(NonTerminal(76, 'match_group'))
ctx.nonterminal = "match_group"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 37:
ctx.rule = rules[37]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 35)
tree.add(t)
t = expect(ctx, 21)
tree.add(t)
t = expect(ctx, 5)
tree.add(t)
return tree
elif rule == 38:
ctx.rule = rules[38]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 11)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[76] if x >=0],
rules[38]
)
def parse_binding_power(ctx):
current = ctx.tokens.current()
rule = table[32][current.id] if current else -1
tree = ParseTree(NonTerminal(78, 'binding_power'))
ctx.nonterminal = "binding_power"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 66:
ctx.rule = rules[66]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37)
tree.add(t)
subtree = parse_precedence(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[78] if x >=0],
rules[66]
)
def parse_regex_enumeration(ctx):
current = ctx.tokens.current()
rule = table[33][current.id] if current else -1
tree = ParseTree(NonTerminal(79, 'regex_enumeration'))
ctx.nonterminal = "regex_enumeration"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 24:
ctx.rule = rules[24]
ast_parameters = OrderedDict([
('language', 0),
('regex', 2),
('options', 3),
])
tree.astTransform = AstTransformNodeCreator('RegexEnum', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 26)
tree.add(t)
t = expect(ctx, 14)
tree.add(t)
subtree = parse__gen6(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[79] if x >=0],
rules[24]
)
def parse_ll1_rule_rhs(ctx):
current = ctx.tokens.current()
rule = table[34][current.id] if current else -1
tree = ParseTree(NonTerminal(80, 'll1_rule_rhs'))
ctx.nonterminal = "ll1_rule_rhs"
if current != None and current.id in nonterminal_follow[80] and current.id not in nonterminal_first[80]:
return tree
if current == None:
return tree
if rule == 47:
ctx.rule = rules[47]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen11(ctx)
tree.add(subtree)
return tree
elif rule == 52:
ctx.rule = rules[52]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('NullProduction', ast_parameters)
t = expect(ctx, 45)
tree.add(t)
return tree
elif rule == 53:
ctx.rule = rules[53]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_options(ctx):
current = ctx.tokens.current()
rule = table[38][current.id] if current else -1
tree = ParseTree(NonTerminal(84, 'regex_options'))
ctx.nonterminal = "regex_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 27:
ctx.rule = rules[27]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[84] if x >=0],
rules[27]
)
def parse_regex_partial(ctx):
current = ctx.tokens.current()
rule = table[39][current.id] if current else -1
tree = ParseTree(NonTerminal(85, 'regex_partial'))
ctx.nonterminal = "regex_partial"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 14:
ctx.rule = rules[14]
ast_parameters = OrderedDict([
('regex', 0),
('name', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartial', ast_parameters)
t = expect(ctx, 14)
tree.add(t)
t = expect(ctx, 3)
tree.add(t)
t = expect(ctx, 18)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[85] if x >=0],
rules[14]
)
def parse_led(ctx):
current = ctx.tokens.current()
rule = table[40][current.id] if current else -1
tree = ParseTree(NonTerminal(86, 'led'))
ctx.nonterminal = "led"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 65:
ctx.rule = rules[65]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 27)
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[86] if x >=0],
rules[65]
)
def parse_precedence(ctx):
current = ctx.tokens.current()
rule = table[41][current.id] if current else -1
tree = ParseTree(NonTerminal(87, 'precedence'))
ctx.nonterminal = "precedence"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 67:
ctx.rule = rules[67]
ast_parameters = OrderedDict([
('marker', 0),
('associativity', 2),
])
tree.astTransform = AstTransformNodeCreator('Precedence', ast_parameters)
subtree = parse_binding_power_marker(ctx)
tree.add(subtree)
t = expect(ctx, 26)
tree.add(t)
subtree = parse_associativity(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[87] if x >=0],
rules[67]
)
def parse_parser_expression(ctx):
current = ctx.tokens.current()
rule = table[42][current.id] if current else -1
tree = ParseTree(NonTerminal(88, 'parser_expression'))
ctx.nonterminal = "parser_expression"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 55:
ctx.rule = rules[55]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('ExpressionParser', ast_parameters)
t = expect(ctx, 41)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen14(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[88] if x >=0],
rules[55]
)
def parse_lexer(ctx):
current = ctx.tokens.current()
rule = table[43][current.id] if current else -1
tree = ParseTree(NonTerminal(89, 'lexer'))
ctx.nonterminal = "lexer"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 6:
ctx.rule = rules[6]
ast_parameters = OrderedDict([
('atoms', 2),
])
tree.astTransform = AstTransformNodeCreator('Lexer', ast_parameters)
t = expect(ctx, 32)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[89] if x >=0],
rules[6]
)
def parse_macro(ctx):
current = ctx.tokens.current()
rule = table[44][current.id] if current else -1
tree = ParseTree(NonTerminal(90, 'macro'))
ctx.nonterminal = "macro"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 82:
ctx.rule = rules[82]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('Macro', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen18(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[90] if x >=0],
rules[82]
)
def parse_lexer_regex(ctx):
current = ctx.tokens.current()
rule = table[45][current.id] if current else -1
tree = ParseTree(NonTerminal(91, 'lexer_regex'))
ctx.nonterminal = "lexer_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 15:
ctx.rule = rules[15]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_enumerated_regex(ctx)
tree.add(subtree)
return tree
elif rule == 19:
ctx.rule = rules[19]
ast_parameters = OrderedDict([
('regex', 0),
('options', 1),
('onmatch', 3),
])
tree.astTransform = AstTransformNodeCreator('Regex', ast_parameters)
t = expect(ctx, 14)
tree.add(t)
subtree = parse__gen3(ctx)
tree.add(subtree)
t = expect(ctx, 3)
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[91] if x >=0],
rules[19]
)
def parse__gen9(ctx):
current = ctx.tokens.current()
rule = table[46][current.id] if current else -1
tree = ParseTree(NonTerminal(92, '_gen9'))
ctx.nonterminal = "_gen9"
if current != None and current.id in nonterminal_follow[92] and current.id not in nonterminal_first[92]:
return tree
if current == None:
return tree
if rule == 34:
ctx.rule = rules[34]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_match_group(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser(ctx):
current = ctx.tokens.current()
rule = table[48][current.id] if current else -1
tree = ParseTree(NonTerminal(94, 'parser'))
ctx.nonterminal = "parser"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 41:
ctx.rule = rules[41]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_ll1(ctx)
tree.add(subtree)
return tree
elif rule == 42:
ctx.rule = rules[42]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_expression(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[94] if x >=0],
rules[42]
)
def parse_lexer_target(ctx):
current = ctx.tokens.current()
rule = table[49][current.id] if current else -1
tree = ParseTree(NonTerminal(95, 'lexer_target'))
ctx.nonterminal = "lexer_target"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 28:
ctx.rule = rules[28]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
elif rule == 31:
ctx.rule = rules[31]
ast_parameters = OrderedDict([
('name', 0),
('terminal', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerFunctionCall', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen8(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
elif rule == 32:
ctx.rule = rules[32]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 8)
tree.add(t)
return tree
elif rule == 33:
ctx.rule = rules[33]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 39)
tree.add(t)
return tree
elif rule == 39:
ctx.rule = rules[39]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('Null', ast_parameters)
t = expect(ctx, 45)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[95] if x >=0],
rules[39]
)
def parse_grammar(ctx):
current = ctx.tokens.current()
rule = table[51][current.id] if current else -1
tree = ParseTree(NonTerminal(97, 'grammar'))
ctx.nonterminal = "grammar"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 1:
ctx.rule = rules[1]
ast_parameters = OrderedDict([
('body', 2),
])
tree.astTransform = AstTransformNodeCreator('Grammar', ast_parameters)
t = expect(ctx, 33)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen0(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[97] if x >=0],
rules[1]
)
def parse_lexer_partials(ctx):
current = ctx.tokens.current()
rule = table[52][current.id] if current else -1
tree = ParseTree(NonTerminal(98, 'lexer_partials'))
ctx.nonterminal = "lexer_partials"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 13:
ctx.rule = rules[13]
ast_parameters = OrderedDict([
('list', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartials', ast_parameters)
t = expect(ctx, 13)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen2(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[98] if x >=0],
rules[13]
)
def parse_ast_transform(ctx):
current = ctx.tokens.current()
rule = table[53][current.id] if current else -1
tree = ParseTree(NonTerminal(99, 'ast_transform'))
ctx.nonterminal = "ast_transform"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 76:
ctx.rule = rules[76]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 3)
tree.add(t)
subtree = parse_ast_transform_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[99] if x >=0],
rules[76]
)
def parse__gen15(ctx):
current = ctx.tokens.current()
rule = table[55][current.id] if current else -1
tree = ParseTree(NonTerminal(101, '_gen15'))
ctx.nonterminal = "_gen15"
if current != None and current.id in nonterminal_follow[101] and current.id not in nonterminal_first[101]:
return tree
if current == None:
return tree
if rule == 56:
ctx.rule = rules[56]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_binding_power(ctx)
tree.add(subtree)
return tree
return tree
def emit(ctx, terminal, source_string, line, col):
if terminal:
ctx.tokens.append(Terminal(terminals[terminal], terminal, source_string, ctx.resource, line, col))
def default_action(ctx, terminal, source_string, line, col):
emit(ctx, terminal, source_string, line, col)
def init():
return {}
def destroy(context):
pass
class LexerStackPush:
def __init__(self, mode):
self.mode = mode
class LexerAction:
def __init__(self, action):
self.action = action
class LexerContext:
def __init__(self, string, resource, errors, user_context):
self.__dict__.update(locals())
self.stack = ['default']
self.line = 1
self.col = 1
self.tokens = []
self.user_context = user_context
self.re_match = None exer:
regex = {
'default': OrderedDict([
(re.compile(r'(grammar)\s*({)'), [
('grammar', 1, None),
('lbrace', 2, None),
LexerStackPush('grammar'),
]),
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
]),
'grammar': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'lexer'), [
('lexer', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'parser'), [
('parser', 0, None),
LexerStackPush('parser'),
]),
]),
'lexer': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'code<([a-z]+)>\s*<<\s*([a-zA-Z_]+)(?=\s)(.*?)(\2)', re.DOTALL), [
('code_start', 2, None),
('language', 1, None),
('code', 3, None),
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'<'), [
('langle', 0, None),
]),
(re.compile(r'>'), [
('rangle', 0, None),
]),
(re.compile(r'\('), [
('lparen', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
]),
(re.compile(r'\[\]'), [
('no_group', 0, None),
]),
(re.compile(r'\['), [
('lsquare', 0, None),
]),
(re.compile(r'\]'), [
('rsquare', 0, None),
]),
(re.compile(r'[0-9]+'), [
('integer', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
('regex', 0, None),
LexerStackPush('regex_options'),
]),
(re.compile(r'->'), [
('arrow', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'@([a-zA-Z][a-zA-Z0-9_]*)'), [
('stack_push', 1, None),
]),
(re.compile(r'%([a-zA-Z][a-zA-Z0-9_]*)'), [
('action', 1, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
('terminal', 1, None),
]),
(re.compile(r'_[a-zA-Z][a-zA-Z0-9_]*'), [
('regex_partial', 0, None),
]),
(re.compile(r'null'), [
('null', 0, None),
]),
(re.compile(r'mode'), [
('mode', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'partials'), [
('partials', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'enum'), [
('regex_enum', 0, None),
LexerStackPush('regex_enum'),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
]),
'regex_enum': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'\('), [
('lparen', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
]),
(re.compile(r':'), [
('colon', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
('regex', 0, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
]),
'regex_options': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'}'), [
('rbrace', 0, None),
]),
(re.compile(r'->'), [
('arrow', 0, None),
LexerAction('pop'),
]),
]),
'parser': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\|'), [
('pipe', 0, None),
]),
(re.compile(r'='), [
('equals', 0, None),
]),
(re.compile(r'\('), [
('lparen', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'->'), [
('arrow', 0, None),
]),
(re.compile(r'null'), [
('null', 0, None),
]),
(re.compile(r'parser\s*<\s*expression\s*>\s*({)'), [
('parser_expression', None, None),
('lbrace', 1, None),
LexerStackPush('parser_expr'),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
('terminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)(?=\s*\=)'), [
('ll1_rule_hint', None, None),
('nonterminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
# (terminal, group, function)
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
]),
'parser_expr': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'(\()(?=\s*[\*-])'), [
# (terminal, group, function)
('lparen', 1, None),
LexerStackPush('binding_power'),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r'<=>'), [
# (terminal, group, function)
('expression_divider', 0, None),
]),
(re.compile(r'\|'), [
# (terminal, group, function)
('pipe', 0, None),
]),
(re.compile(r'='), [
# (terminal, group, function)
('equals', 0, None),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*\1[ \t]+:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?![ \t]+(:|\$))'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('infix_rule_hint', None, None),
('nonterminal', 2, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('prefix_rule_hint', None, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)\s*(=)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 1, None),
('equals', 2, None),
('mixfix_rule_hint', None, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
# (terminal, group, function)
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
('integer', 0, None),
]),
]),
'binding_power': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\*'), [
('asterisk', 0, None),
]),
(re.compile(r'-'), [
('dash', 0, None),
]),
(re.compile(r':'), [
('colon', 0, None),
]),
(re.compile(r'left'), [
('left', 0, None),
]),
(re.compile(r'right'), [
('right', 0, None),
]),
(re.compile(r'unary'), [
('unary', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
LexerAction('pop'),
]),
]),
}
def _advance_line_col(self, string, length, line, col):
for i in range(length):
if string[i] == '\n':
line += 1
col = 1
else:
col += 1
return (line, col)
def _advance_string(self, ctx, string):
(ctx.line, ctx.col) = self._advance_line_col(string, len(string), ctx.line, ctx.col)
ctx.string = ctx.string[len(string):]
def _next(self, ctx, debug=False):
for regex, outputs in self.regex[ctx.stack[-1]].items():
if debug:
from xtermcolor import colorize
token_count = len(ctx.tokens)
print('{1} ({2}, {3}) regex: {0}'.format(
colorize(regex.pattern, ansi=40), colorize(ctx.string[:20].replace('\n', '\\n'), ansi=15), ctx.line, ctx.col)
)
match = regex.match(ctx.string)
if match:
ctx.re_match = match
for output in outputs:
if isinstance(output, tuple):
(terminal, group, function) = output
function = function if function else default_action
source_string = match.group(group) if group is not None else ''
(group_line, group_col) = self._advance_line_col(ctx.string, match.start(group) if group else 0, ctx.line, ctx.col)
function(
ctx,
terminal,
source_string,
group_line,
group_col
)
if debug:
print(' matched: {}'.format(colorize(match.group(0).replace('\n', '\\n'), ansi=3)))
for token in ctx.tokens[token_count:]:
print(' emit: [{}] [{}, {}] [{}] stack:{} context:{}'.format(
colorize(token.str, ansi=9),
colorize(str(token.line), ansi=5),
colorize(str(token.col), ansi=5),
colorize(token.source_string, ansi=3),
colorize(str(ctx.stack), ansi=4),
colorize(str(ctx.user_context), ansi=13)
))
token_count = len(ctx.tokens)
if isinstance(output, LexerStackPush):
ctx.stack.append(output.mode)
if debug:
print(' push on stack: {}'.format(colorize(output.mode, ansi=4)))
if isinstance(output, LexerAction):
if output.action == 'pop':
mode = ctx.stack.pop()
if debug:
print(' pop off stack: {}'.format(colorize(mode, ansi=4)))
self._advance_string(ctx, match.group(0))
return len(match.group(0)) > 0
return False
def lex(self, string, resource, errors=None, debug=False):
if errors is None:
errors = DefaultSyntaxErrorHandler()
string_copy = string
user_context = init()
ctx = LexerContext(string, resource, errors, user_context)
while len(ctx.string):
matched = self._next(ctx, debug)
if matched == False:
raise ctx.errors.unrecognized_token(string_copy, ctx.line, ctx.col)
destroy(ctx.user_context)
return ctx.tokens
def lex(source, resource, errors=None, debug=False):
return TokenStream(HermesLexer().lex(source, resource, errors, debug))
| true | true |
f71c81a8b1726d61edd4af204b0813341e2fdc17 | 20,285 | py | Python | pkg/suggestion/v1beta1/nas/enas/service.py | Adarsh2910/katib | cd095d6a33401cfddee8188943b60cd12c950c33 | [
"Apache-2.0"
] | null | null | null | pkg/suggestion/v1beta1/nas/enas/service.py | Adarsh2910/katib | cd095d6a33401cfddee8188943b60cd12c950c33 | [
"Apache-2.0"
] | 669 | 2021-01-25T10:26:46.000Z | 2022-03-31T22:01:58.000Z | pkg/suggestion/v1beta1/nas/enas/service.py | Adarsh2910/katib | cd095d6a33401cfddee8188943b60cd12c950c33 | [
"Apache-2.0"
] | 1 | 2021-09-10T06:56:10.000Z | 2021-09-10T06:56:10.000Z | import logging
from logging import getLogger, StreamHandler, INFO
import json
import os
import tensorflow as tf
import grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.suggestion.v1beta1.nas.enas.Controller import Controller
from pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace
from pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (
parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)
from pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer
class EnasExperiment:
def __init__(self, request, logger):
self.logger = logger
self.experiment_name = request.experiment.name
self.experiment = request.experiment
self.num_trials = 1
self.tf_graph = tf.Graph()
self.ctrl_cache_file = "ctrl_cache/{}.ckpt".format(
self.experiment_name)
self.suggestion_step = 0
self.algorithm_settings = None
self.controller = None
self.num_layers = None
self.input_sizes = None
self.output_sizes = None
self.num_operations = None
self.search_space = None
self.opt_direction = None
self.objective_name = None
self.logger.info("-" * 100 + "\nSetting Up Suggestion for Experiment {}\n".format(
self.experiment_name) + "-" * 100)
self._get_experiment_param()
self._setup_controller()
self.logger.info(">>> Suggestion for Experiment {} has been initialized.\n".format(
self.experiment_name))
def _get_experiment_param(self):
# this function need to
# 1) get the number of layers
# 2) get the I/O size
# 3) get the available operations
# 4) get the optimization direction (i.e. minimize or maximize)
# 5) get the objective name
# 6) get the algorithm settings
# Get Search Space
self.opt_direction = self.experiment.spec.objective.type
self.objective_name = self.experiment.spec.objective.objective_metric_name
nas_config = self.experiment.spec.nas_config
graph_config = nas_config.graph_config
self.num_layers = int(graph_config.num_layers)
self.input_sizes = list(map(int, graph_config.input_sizes))
self.output_sizes = list(map(int, graph_config.output_sizes))
search_space_raw = nas_config.operations
search_space_object = SearchSpace(search_space_raw)
self.search_space = search_space_object.search_space
self.num_operations = search_space_object.num_operations
self.print_search_space()
# Get Experiment Algorithm Settings
settings_raw = self.experiment.spec.algorithm.algorithm_settings
self.algorithm_settings = parseAlgorithmSettings(settings_raw)
self.print_algorithm_settings()
def _setup_controller(self):
with self.tf_graph.as_default():
self.controller = Controller(
num_layers=self.num_layers,
num_operations=self.num_operations,
controller_hidden_size=self.algorithm_settings['controller_hidden_size'],
controller_temperature=self.algorithm_settings['controller_temperature'],
controller_tanh_const=self.algorithm_settings['controller_tanh_const'],
controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],
controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],
controller_learning_rate=self.algorithm_settings["controller_learning_rate"],
controller_skip_target=self.algorithm_settings['controller_skip_target'],
controller_skip_weight=self.algorithm_settings['controller_skip_weight'],
controller_name="Ctrl_" + self.experiment_name,
logger=self.logger)
self.controller.build_trainer()
def print_search_space(self):
if self.search_space is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(
">>> Search Space for Experiment {}".format(self.experiment_name))
for opt in self.search_space:
opt.print_op(self.logger)
self.logger.info(
"There are {} operations in total.\n".format(self.num_operations))
def print_algorithm_settings(self):
if self.algorithm_settings is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(">>> Parameters of LSTM Controller for Experiment {}\n".format(
self.experiment_name))
for spec in self.algorithm_settings:
if len(spec) > 22:
self.logger.info("{}:\t{}".format(
spec, self.algorithm_settings[spec]))
else:
self.logger.info("{}:\t\t{}".format(
spec, self.algorithm_settings[spec]))
self.logger.info("")
class EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):
def __init__(self, logger=None):
super(EnasService, self).__init__()
self.is_first_run = True
self.experiment = None
if logger == None:
self.logger = getLogger(__name__)
FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'
logging.basicConfig(format=FORMAT)
handler = StreamHandler()
handler.setLevel(INFO)
self.logger.setLevel(INFO)
self.logger.addHandler(handler)
self.logger.propagate = False
else:
self.logger = logger
if not os.path.exists("ctrl_cache/"):
os.makedirs("ctrl_cache/")
def ValidateAlgorithmSettings(self, request, context):
self.logger.info("Validate Algorithm Settings start")
graph_config = request.experiment.spec.nas_config.graph_config
# Validate GraphConfig
# Check InputSize
if not graph_config.input_sizes:
return self.SetValidateContextError(context, "Missing InputSizes in GraphConfig:\n{}".format(graph_config))
# Check OutputSize
if not graph_config.output_sizes:
return self.SetValidateContextError(context, "Missing OutputSizes in GraphConfig:\n{}".format(graph_config))
# Check NumLayers
if not graph_config.num_layers:
return self.SetValidateContextError(context, "Missing NumLayers in GraphConfig:\n{}".format(graph_config))
# Validate each operation
operations_list = list(
request.experiment.spec.nas_config.operations.operation)
for operation in operations_list:
# Check OperationType
if not operation.operation_type:
return self.SetValidateContextError(context, "Missing operationType in Operation:\n{}".format(operation))
# Check ParameterConfigs
if not operation.parameter_specs.parameters:
return self.SetValidateContextError(context, "Missing ParameterConfigs in Operation:\n{}".format(operation))
# Validate each ParameterConfig in Operation
parameters_list = list(operation.parameter_specs.parameters)
for parameter in parameters_list:
# Check Name
if not parameter.name:
return self.SetValidateContextError(context, "Missing Name in ParameterConfig:\n{}".format(parameter))
# Check ParameterType
if not parameter.parameter_type:
return self.SetValidateContextError(context, "Missing ParameterType in ParameterConfig:\n{}".format(parameter))
# Check List in Categorical or Discrete Type
if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:
if not parameter.feasible_space.list:
return self.SetValidateContextError(context, "Missing List in ParameterConfig.feasibleSpace:\n{}".format(parameter))
# Check Max, Min, Step in Int or Double Type
elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:
if not parameter.feasible_space.min and not parameter.feasible_space.max:
return self.SetValidateContextError(context, "Missing Max and Min in ParameterConfig.feasibleSpace:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):
return self.SetValidateContextError(context, "Step parameter should be > 0 in ParameterConfig.feasibleSpace:\n{}".format(parameter))
# Validate Algorithm Settings
settings_raw = request.experiment.spec.algorithm.algorithm_settings
for setting in settings_raw:
if setting.name in algorithmSettingsValidator.keys():
if setting.name in enableNoneSettingsList and setting.value == "None":
continue
setting_type = algorithmSettingsValidator[setting.name][0]
setting_range = algorithmSettingsValidator[setting.name][1]
try:
converted_value = setting_type(setting.value)
except:
return self.SetValidateContextError(context, "Algorithm Setting {} must be {} type".format(setting.name, setting_type.__name__))
if setting_type == float:
if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range ({}, {}]".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
elif converted_value < setting_range[0]:
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range [{}, {})".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
else:
return self.SetValidateContextError(context, "Unknown Algorithm Setting name: {}".format(setting.name))
self.logger.info("All Experiment Settings are Valid")
return api_pb2.ValidateAlgorithmSettingsReply()
def SetValidateContextError(self, context, error_message):
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error_message)
self.logger.info(error_message)
return api_pb2.ValidateAlgorithmSettingsReply()
def GetSuggestions(self, request, context):
if self.is_first_run:
self.experiment = EnasExperiment(request, self.logger)
experiment = self.experiment
if request.request_number > 0:
experiment.num_trials = request.request_number
self.logger.info("-" * 100 + "\nSuggestion Step {} for Experiment {}\n".format(
experiment.suggestion_step, experiment.experiment_name) + "-" * 100)
self.logger.info("")
self.logger.info(">>> RequestNumber:\t\t{}".format(experiment.num_trials))
self.logger.info("")
with experiment.tf_graph.as_default():
saver = tf.compat.v1.train.Saver()
ctrl = experiment.controller
controller_ops = {
"loss": ctrl.loss,
"entropy": ctrl.sample_entropy,
"grad_norm": ctrl.grad_norm,
"baseline": ctrl.baseline,
"skip_rate": ctrl.skip_rate,
"train_op": ctrl.train_op,
"train_step": ctrl.train_step,
"sample_arc": ctrl.sample_arc,
"child_val_accuracy": ctrl.child_val_accuracy,
}
if self.is_first_run:
self.logger.info(">>> First time running suggestion for {}. Random architecture will be given.".format(
experiment.experiment_name))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
# TODO: will use PVC to store the checkpoint to protect against unexpected suggestion pod restart
saver.save(sess, experiment.ctrl_cache_file)
self.is_first_run = False
else:
with tf.compat.v1.Session() as sess:
saver.restore(sess, experiment.ctrl_cache_file)
result = self.GetEvaluationResult(request.trials)
# TODO: (andreyvelich) I deleted this part, should it be handle by controller?
# Sometimes training container may fail and GetEvaluationResult() will return None
# In this case, the Suggestion will:
# 1. Firstly try to respawn the previous trials after waiting for RESPAWN_SLEEP seconds
# 2. If respawning the trials for RESPAWN_LIMIT times still cannot collect valid results,
# then fail the task because it may indicate that the training container has errors.
if result is None:
self.logger.warning(
">>> Suggestion has spawned trials, but they all failed.")
self.logger.warning(
">>> Please check whether the training container is correctly implemented")
self.logger.info(">>> Experiment {} failed".format(
experiment.experiment_name))
return []
# This LSTM network is designed to maximize the metrics
# However, if the user wants to minimize the metrics, we can take the negative of the result
if experiment.opt_direction == api_pb2.MINIMIZE:
result = -result
self.logger.info(">>> Suggestion updated. LSTM Controller Training\n")
log_every = experiment.algorithm_settings["controller_log_every_steps"]
for ctrl_step in range(1, experiment.algorithm_settings["controller_train_steps"]+1):
run_ops = [
controller_ops["loss"],
controller_ops["entropy"],
controller_ops["grad_norm"],
controller_ops["baseline"],
controller_ops["skip_rate"],
controller_ops["train_op"]
]
loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(
fetches=run_ops,
feed_dict={controller_ops["child_val_accuracy"]: result})
controller_step = sess.run(controller_ops["train_step"])
if ctrl_step % log_every == 0:
log_string = ""
log_string += "Controller Step: {} - ".format(controller_step)
log_string += "Loss: {:.4f} - ".format(loss)
log_string += "Entropy: {:.9} - ".format(entropy)
log_string += "Gradient Norm: {:.7f} - ".format(grad_norm)
log_string += "Baseline={:.4f} - ".format(baseline)
log_string += "Skip Rate={:.4f}".format(skip_rate)
self.logger.info(log_string)
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
organized_candidates = list()
parameter_assignments = list()
for i in range(experiment.num_trials):
arc = candidates[i].tolist()
organized_arc = [0 for _ in range(experiment.num_layers)]
record = 0
for l in range(experiment.num_layers):
organized_arc[l] = arc[record: record + l + 1]
record += l + 1
organized_candidates.append(organized_arc)
nn_config = dict()
nn_config['num_layers'] = experiment.num_layers
nn_config['input_sizes'] = experiment.input_sizes
nn_config['output_sizes'] = experiment.output_sizes
nn_config['embedding'] = dict()
for l in range(experiment.num_layers):
opt = organized_arc[l][0]
nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()
organized_arc_json = json.dumps(organized_arc)
nn_config_json = json.dumps(nn_config)
organized_arc_str = str(organized_arc_json).replace('\"', '\'')
nn_config_str = str(nn_config_json).replace('\"', '\'')
self.logger.info(
"\n>>> New Neural Network Architecture Candidate #{} (internal representation):".format(i))
self.logger.info(organized_arc_json)
self.logger.info("\n>>> Corresponding Seach Space Description:")
self.logger.info(nn_config_str)
parameter_assignments.append(
api_pb2.GetSuggestionsReply.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value=organized_arc_str
),
api_pb2.ParameterAssignment(
name="nn_config",
value=nn_config_str
)
]
)
)
self.logger.info("")
self.logger.info(">>> {} Trials were created for Experiment {}".format(
experiment.num_trials, experiment.experiment_name))
self.logger.info("")
experiment.suggestion_step += 1
return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)
def GetEvaluationResult(self, trials_list):
completed_trials = dict()
failed_trials = []
for t in trials_list:
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:
target_value = None
for metric in t.status.observation.metrics:
if metric.name == t.spec.objective.objective_metric_name:
target_value = metric.value
break
# Take only the first metric value
# In current cifar-10 training container this value is the latest
completed_trials[t.name] = float(target_value)
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:
failed_trials.append(t.name)
n_completed = len(completed_trials)
self.logger.info(">>> By now: {} Trials succeeded, {} Trials failed".format(
n_completed, len(failed_trials)))
for tname in completed_trials:
self.logger.info("Trial: {}, Value: {}".format(
tname, completed_trials[tname]))
for tname in failed_trials:
self.logger.info("Trial: {} was failed".format(tname))
if n_completed > 0:
avg_metrics = sum(completed_trials.values()) / n_completed
self.logger.info("The average is {}\n".format(avg_metrics))
return avg_metrics
| 46.956019 | 156 | 0.603944 | import logging
from logging import getLogger, StreamHandler, INFO
import json
import os
import tensorflow as tf
import grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.suggestion.v1beta1.nas.enas.Controller import Controller
from pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace
from pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (
parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)
from pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer
class EnasExperiment:
def __init__(self, request, logger):
self.logger = logger
self.experiment_name = request.experiment.name
self.experiment = request.experiment
self.num_trials = 1
self.tf_graph = tf.Graph()
self.ctrl_cache_file = "ctrl_cache/{}.ckpt".format(
self.experiment_name)
self.suggestion_step = 0
self.algorithm_settings = None
self.controller = None
self.num_layers = None
self.input_sizes = None
self.output_sizes = None
self.num_operations = None
self.search_space = None
self.opt_direction = None
self.objective_name = None
self.logger.info("-" * 100 + "\nSetting Up Suggestion for Experiment {}\n".format(
self.experiment_name) + "-" * 100)
self._get_experiment_param()
self._setup_controller()
self.logger.info(">>> Suggestion for Experiment {} has been initialized.\n".format(
self.experiment_name))
def _get_experiment_param(self):
self.opt_direction = self.experiment.spec.objective.type
self.objective_name = self.experiment.spec.objective.objective_metric_name
nas_config = self.experiment.spec.nas_config
graph_config = nas_config.graph_config
self.num_layers = int(graph_config.num_layers)
self.input_sizes = list(map(int, graph_config.input_sizes))
self.output_sizes = list(map(int, graph_config.output_sizes))
search_space_raw = nas_config.operations
search_space_object = SearchSpace(search_space_raw)
self.search_space = search_space_object.search_space
self.num_operations = search_space_object.num_operations
self.print_search_space()
settings_raw = self.experiment.spec.algorithm.algorithm_settings
self.algorithm_settings = parseAlgorithmSettings(settings_raw)
self.print_algorithm_settings()
def _setup_controller(self):
with self.tf_graph.as_default():
self.controller = Controller(
num_layers=self.num_layers,
num_operations=self.num_operations,
controller_hidden_size=self.algorithm_settings['controller_hidden_size'],
controller_temperature=self.algorithm_settings['controller_temperature'],
controller_tanh_const=self.algorithm_settings['controller_tanh_const'],
controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],
controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],
controller_learning_rate=self.algorithm_settings["controller_learning_rate"],
controller_skip_target=self.algorithm_settings['controller_skip_target'],
controller_skip_weight=self.algorithm_settings['controller_skip_weight'],
controller_name="Ctrl_" + self.experiment_name,
logger=self.logger)
self.controller.build_trainer()
def print_search_space(self):
if self.search_space is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(
">>> Search Space for Experiment {}".format(self.experiment_name))
for opt in self.search_space:
opt.print_op(self.logger)
self.logger.info(
"There are {} operations in total.\n".format(self.num_operations))
def print_algorithm_settings(self):
if self.algorithm_settings is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(">>> Parameters of LSTM Controller for Experiment {}\n".format(
self.experiment_name))
for spec in self.algorithm_settings:
if len(spec) > 22:
self.logger.info("{}:\t{}".format(
spec, self.algorithm_settings[spec]))
else:
self.logger.info("{}:\t\t{}".format(
spec, self.algorithm_settings[spec]))
self.logger.info("")
class EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):
def __init__(self, logger=None):
super(EnasService, self).__init__()
self.is_first_run = True
self.experiment = None
if logger == None:
self.logger = getLogger(__name__)
FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'
logging.basicConfig(format=FORMAT)
handler = StreamHandler()
handler.setLevel(INFO)
self.logger.setLevel(INFO)
self.logger.addHandler(handler)
self.logger.propagate = False
else:
self.logger = logger
if not os.path.exists("ctrl_cache/"):
os.makedirs("ctrl_cache/")
def ValidateAlgorithmSettings(self, request, context):
self.logger.info("Validate Algorithm Settings start")
graph_config = request.experiment.spec.nas_config.graph_config
if not graph_config.input_sizes:
return self.SetValidateContextError(context, "Missing InputSizes in GraphConfig:\n{}".format(graph_config))
if not graph_config.output_sizes:
return self.SetValidateContextError(context, "Missing OutputSizes in GraphConfig:\n{}".format(graph_config))
if not graph_config.num_layers:
return self.SetValidateContextError(context, "Missing NumLayers in GraphConfig:\n{}".format(graph_config))
operations_list = list(
request.experiment.spec.nas_config.operations.operation)
for operation in operations_list:
if not operation.operation_type:
return self.SetValidateContextError(context, "Missing operationType in Operation:\n{}".format(operation))
if not operation.parameter_specs.parameters:
return self.SetValidateContextError(context, "Missing ParameterConfigs in Operation:\n{}".format(operation))
parameters_list = list(operation.parameter_specs.parameters)
for parameter in parameters_list:
if not parameter.name:
return self.SetValidateContextError(context, "Missing Name in ParameterConfig:\n{}".format(parameter))
if not parameter.parameter_type:
return self.SetValidateContextError(context, "Missing ParameterType in ParameterConfig:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:
if not parameter.feasible_space.list:
return self.SetValidateContextError(context, "Missing List in ParameterConfig.feasibleSpace:\n{}".format(parameter))
elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:
if not parameter.feasible_space.min and not parameter.feasible_space.max:
return self.SetValidateContextError(context, "Missing Max and Min in ParameterConfig.feasibleSpace:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):
return self.SetValidateContextError(context, "Step parameter should be > 0 in ParameterConfig.feasibleSpace:\n{}".format(parameter))
settings_raw = request.experiment.spec.algorithm.algorithm_settings
for setting in settings_raw:
if setting.name in algorithmSettingsValidator.keys():
if setting.name in enableNoneSettingsList and setting.value == "None":
continue
setting_type = algorithmSettingsValidator[setting.name][0]
setting_range = algorithmSettingsValidator[setting.name][1]
try:
converted_value = setting_type(setting.value)
except:
return self.SetValidateContextError(context, "Algorithm Setting {} must be {} type".format(setting.name, setting_type.__name__))
if setting_type == float:
if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range ({}, {}]".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
elif converted_value < setting_range[0]:
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range [{}, {})".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
else:
return self.SetValidateContextError(context, "Unknown Algorithm Setting name: {}".format(setting.name))
self.logger.info("All Experiment Settings are Valid")
return api_pb2.ValidateAlgorithmSettingsReply()
def SetValidateContextError(self, context, error_message):
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error_message)
self.logger.info(error_message)
return api_pb2.ValidateAlgorithmSettingsReply()
def GetSuggestions(self, request, context):
if self.is_first_run:
self.experiment = EnasExperiment(request, self.logger)
experiment = self.experiment
if request.request_number > 0:
experiment.num_trials = request.request_number
self.logger.info("-" * 100 + "\nSuggestion Step {} for Experiment {}\n".format(
experiment.suggestion_step, experiment.experiment_name) + "-" * 100)
self.logger.info("")
self.logger.info(">>> RequestNumber:\t\t{}".format(experiment.num_trials))
self.logger.info("")
with experiment.tf_graph.as_default():
saver = tf.compat.v1.train.Saver()
ctrl = experiment.controller
controller_ops = {
"loss": ctrl.loss,
"entropy": ctrl.sample_entropy,
"grad_norm": ctrl.grad_norm,
"baseline": ctrl.baseline,
"skip_rate": ctrl.skip_rate,
"train_op": ctrl.train_op,
"train_step": ctrl.train_step,
"sample_arc": ctrl.sample_arc,
"child_val_accuracy": ctrl.child_val_accuracy,
}
if self.is_first_run:
self.logger.info(">>> First time running suggestion for {}. Random architecture will be given.".format(
experiment.experiment_name))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
self.is_first_run = False
else:
with tf.compat.v1.Session() as sess:
saver.restore(sess, experiment.ctrl_cache_file)
result = self.GetEvaluationResult(request.trials)
if result is None:
self.logger.warning(
">>> Suggestion has spawned trials, but they all failed.")
self.logger.warning(
">>> Please check whether the training container is correctly implemented")
self.logger.info(">>> Experiment {} failed".format(
experiment.experiment_name))
return []
if experiment.opt_direction == api_pb2.MINIMIZE:
result = -result
self.logger.info(">>> Suggestion updated. LSTM Controller Training\n")
log_every = experiment.algorithm_settings["controller_log_every_steps"]
for ctrl_step in range(1, experiment.algorithm_settings["controller_train_steps"]+1):
run_ops = [
controller_ops["loss"],
controller_ops["entropy"],
controller_ops["grad_norm"],
controller_ops["baseline"],
controller_ops["skip_rate"],
controller_ops["train_op"]
]
loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(
fetches=run_ops,
feed_dict={controller_ops["child_val_accuracy"]: result})
controller_step = sess.run(controller_ops["train_step"])
if ctrl_step % log_every == 0:
log_string = ""
log_string += "Controller Step: {} - ".format(controller_step)
log_string += "Loss: {:.4f} - ".format(loss)
log_string += "Entropy: {:.9} - ".format(entropy)
log_string += "Gradient Norm: {:.7f} - ".format(grad_norm)
log_string += "Baseline={:.4f} - ".format(baseline)
log_string += "Skip Rate={:.4f}".format(skip_rate)
self.logger.info(log_string)
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
organized_candidates = list()
parameter_assignments = list()
for i in range(experiment.num_trials):
arc = candidates[i].tolist()
organized_arc = [0 for _ in range(experiment.num_layers)]
record = 0
for l in range(experiment.num_layers):
organized_arc[l] = arc[record: record + l + 1]
record += l + 1
organized_candidates.append(organized_arc)
nn_config = dict()
nn_config['num_layers'] = experiment.num_layers
nn_config['input_sizes'] = experiment.input_sizes
nn_config['output_sizes'] = experiment.output_sizes
nn_config['embedding'] = dict()
for l in range(experiment.num_layers):
opt = organized_arc[l][0]
nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()
organized_arc_json = json.dumps(organized_arc)
nn_config_json = json.dumps(nn_config)
organized_arc_str = str(organized_arc_json).replace('\"', '\'')
nn_config_str = str(nn_config_json).replace('\"', '\'')
self.logger.info(
"\n>>> New Neural Network Architecture Candidate #{} (internal representation):".format(i))
self.logger.info(organized_arc_json)
self.logger.info("\n>>> Corresponding Seach Space Description:")
self.logger.info(nn_config_str)
parameter_assignments.append(
api_pb2.GetSuggestionsReply.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value=organized_arc_str
),
api_pb2.ParameterAssignment(
name="nn_config",
value=nn_config_str
)
]
)
)
self.logger.info("")
self.logger.info(">>> {} Trials were created for Experiment {}".format(
experiment.num_trials, experiment.experiment_name))
self.logger.info("")
experiment.suggestion_step += 1
return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)
def GetEvaluationResult(self, trials_list):
completed_trials = dict()
failed_trials = []
for t in trials_list:
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:
target_value = None
for metric in t.status.observation.metrics:
if metric.name == t.spec.objective.objective_metric_name:
target_value = metric.value
break
completed_trials[t.name] = float(target_value)
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:
failed_trials.append(t.name)
n_completed = len(completed_trials)
self.logger.info(">>> By now: {} Trials succeeded, {} Trials failed".format(
n_completed, len(failed_trials)))
for tname in completed_trials:
self.logger.info("Trial: {}, Value: {}".format(
tname, completed_trials[tname]))
for tname in failed_trials:
self.logger.info("Trial: {} was failed".format(tname))
if n_completed > 0:
avg_metrics = sum(completed_trials.values()) / n_completed
self.logger.info("The average is {}\n".format(avg_metrics))
return avg_metrics
| true | true |
f71c84d85474a8f5aa729fc1e185f9a029c9a09c | 6,121 | py | Python | Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py | SAP-samples/security-research-mi-gen-nn | 15627f73fcc497c87a67f41957f6b82881dff353 | [
"Apache-2.0"
] | 5 | 2020-02-21T15:13:57.000Z | 2021-08-05T15:18:40.000Z | Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py | SAP-samples/security-research-membership-inference-against-generative-networks | 15627f73fcc497c87a67f41957f6b82881dff353 | [
"Apache-2.0"
] | null | null | null | Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py | SAP-samples/security-research-membership-inference-against-generative-networks | 15627f73fcc497c87a67f41957f6b82881dff353 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import pickle
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
# import parameters
from cifar10_params import *
from utils import *
# tensorflow uses channels_last
# theano uses channels_first
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
# encoder architecture
x = Input(shape=original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
# mean and variance for latent variables
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
# sampling layer
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# decoder architecture
decoder_hid = Dense(int(intermediate_dim), activation='relu')
decoder_upsample = Dense(int(filters * img_rows / 2 * img_cols / 2), activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, int(img_rows / 2), int(img_cols / 2))
else:
output_shape = (batch_size, int(img_rows / 2), int(img_cols / 2), filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
# entire model
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
# load dataset
# (x_train, _), (x_test, y_test) = cifar10.load_data()
# x_train = x_train.astype('float32') / 255.
# x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
# x_test = x_test.astype('float32') / 255.
# x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
x_train, x_test = load_cifar10_with_validation(0.1, False)
# training
history = vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
# encoder from learned model
encoder = Model(x, z_mean)
# generator / decoder from learned model
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
# save all 3 models for future use - especially generator
vae.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_vae.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
encoder.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_encoder.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
generator.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_generator.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
# save training history
fname = './models/cifar10_ld_%d_conv_%d_id_%d_e_%d_history.pkl' % (latent_dim, num_conv, intermediate_dim, epochs)
with open(fname, 'wb') as file_pi:
pickle.dump(history.history, file_pi)
| 37.09697 | 123 | 0.663944 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import pickle
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from cifar10_params import *
from utils import *
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
x = Input(shape=original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
decoder_hid = Dense(int(intermediate_dim), activation='relu')
decoder_upsample = Dense(int(filters * img_rows / 2 * img_cols / 2), activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, int(img_rows / 2), int(img_cols / 2))
else:
output_shape = (batch_size, int(img_rows / 2), int(img_cols / 2), filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
x_train, x_test = load_cifar10_with_validation(0.1, False)
history = vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
encoder = Model(x, z_mean)
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
vae.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_vae.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
encoder.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_encoder.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
generator.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_generator.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
fname = './models/cifar10_ld_%d_conv_%d_id_%d_e_%d_history.pkl' % (latent_dim, num_conv, intermediate_dim, epochs)
with open(fname, 'wb') as file_pi:
pickle.dump(history.history, file_pi)
| true | true |
f71c8578ec45fa13ff3af1382cbd44bcc86f9bbe | 93 | py | Python | CVgallery/apps.py | siavashMehran/Portfolio | a592ec51122d96e8e336365fd3cd039a7f223221 | [
"MIT"
] | null | null | null | CVgallery/apps.py | siavashMehran/Portfolio | a592ec51122d96e8e336365fd3cd039a7f223221 | [
"MIT"
] | null | null | null | CVgallery/apps.py | siavashMehran/Portfolio | a592ec51122d96e8e336365fd3cd039a7f223221 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CvgalleryConfig(AppConfig):
name = 'CVgallery'
| 15.5 | 33 | 0.763441 | from django.apps import AppConfig
class CvgalleryConfig(AppConfig):
name = 'CVgallery'
| true | true |
f71c861ea7dd94eca7c2a5bcbc500411f6590433 | 2,705 | py | Python | castle/kivy_wrapper.py | chappers/castle | 0abdb4eed91c45b443c0de8f029dff983f921363 | [
"MIT"
] | null | null | null | castle/kivy_wrapper.py | chappers/castle | 0abdb4eed91c45b443c0de8f029dff983f921363 | [
"MIT"
] | 1 | 2020-11-22T22:00:13.000Z | 2020-11-22T22:00:13.000Z | castle/kivy_wrapper.py | chappers/castle | 0abdb4eed91c45b443c0de8f029dff983f921363 | [
"MIT"
] | null | null | null | """
A simple kivy wrapper
"""
import kivy
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.clock import Clock
"""
A really simple discrete environment to test for changing policies/environment
"""
import numpy as np
import random
from gym.spaces import Box, Discrete, Dict
import gym
from gym import Wrapper
class KivyWrapper(BoxLayout):
def __init__(self, env=None, **kwargs):
super(KivyWrapper, self).__init__(**kwargs)
self.env = env
self.action = None
self.info = Label(text="Starting Game", font_name="RobotoMono-Regular")
# self._trigger = Clock.schedule_interval(self.update, 1.0/60.0)
self.add_widget(self.info)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self, "text")
if self._keyboard.widget:
# If it exists, this widget is a VKeyboard object which you can use
# to change the keyboard layout.
pass
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def show_screen(self, board, info, update):
text = ""
if update and board is not None:
text += "\n".join(board)
text += "\n"
text += "\n".join(info)
self.info.text = text
def update(self, dt):
for idx in range(10):
if self.action == str(idx):
self.action = idx
if self.action is not None:
text_render, info, done = self.env.play(self.action)
else:
text_render, info = self.env.render()
self.show_screen(text_render, info, True)
self.action = None
def _keyboard_closed(self):
# print('My keyboard have been closed!')
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
key_register = modifiers + [text]
# print("Key input received is:\n{}".format(key_register))
self.action = text
# Keycode is composed of an integer + a string
# If we hit escape, release the keyboard
if keycode[1] == "escape":
keyboard.release()
# Return True to accept the key. Otherwise, it will be used by
# the system.
return True
def app_wrapper(env):
class KivyApp(App):
def build(self):
game = KivyWrapper(env=env)
game.env.reset()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
return KivyApp
| 29.725275 | 85 | 0.629945 |
import kivy
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.clock import Clock
import numpy as np
import random
from gym.spaces import Box, Discrete, Dict
import gym
from gym import Wrapper
class KivyWrapper(BoxLayout):
def __init__(self, env=None, **kwargs):
super(KivyWrapper, self).__init__(**kwargs)
self.env = env
self.action = None
self.info = Label(text="Starting Game", font_name="RobotoMono-Regular")
self.add_widget(self.info)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self, "text")
if self._keyboard.widget:
pass
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def show_screen(self, board, info, update):
text = ""
if update and board is not None:
text += "\n".join(board)
text += "\n"
text += "\n".join(info)
self.info.text = text
def update(self, dt):
for idx in range(10):
if self.action == str(idx):
self.action = idx
if self.action is not None:
text_render, info, done = self.env.play(self.action)
else:
text_render, info = self.env.render()
self.show_screen(text_render, info, True)
self.action = None
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
key_register = modifiers + [text]
self.action = text
if keycode[1] == "escape":
keyboard.release()
return True
def app_wrapper(env):
class KivyApp(App):
def build(self):
game = KivyWrapper(env=env)
game.env.reset()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
return KivyApp
| true | true |
f71c862ef26b8cf209313fbb5ff5c086291c53ca | 1,093 | py | Python | python/analysis/TargetScanDB.py | mjoppich/miRExplore | 32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1 | [
"Apache-2.0"
] | null | null | null | python/analysis/TargetScanDB.py | mjoppich/miRExplore | 32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1 | [
"Apache-2.0"
] | null | null | null | python/analysis/TargetScanDB.py | mjoppich/miRExplore | 32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1 | [
"Apache-2.0"
] | null | null | null | import re
from collections import defaultdict
from openpyxl import load_workbook
class TargetScanDB :
def __init__(self):
self.elems = []
self.gene2mirnas = defaultdict(list)
def make_dictionary(self):
for elem in self.elems:
self.gene2mirnas[elem[0]].append(elem)
@classmethod
def from_tsv(cls, filelocation="/mnt/c/ownCloud/data/miRExplore/targetscan/targetscan_ws_85.tsv"):
tsdb = TargetScanDB()
with open(filelocation, 'r') as fin:
for idx, row in enumerate(fin):
if idx == 0:
continue
arow = row.strip().split('\t')
gene = arow[0].upper()
mirna = arow[1]
score = float(arow[2])
percentile = int(arow[3])
mirna = mirna.replace('mmu-', '').replace('hsa-', '')
tsdb.elems.append((gene, mirna, score, percentile))
return tsdb
if __name__ == '__main__':
tsdb = TargetScanDB.from_tsv()
for x in tsdb.elems:
print(x)
| 19.517857 | 102 | 0.548948 | import re
from collections import defaultdict
from openpyxl import load_workbook
class TargetScanDB :
def __init__(self):
self.elems = []
self.gene2mirnas = defaultdict(list)
def make_dictionary(self):
for elem in self.elems:
self.gene2mirnas[elem[0]].append(elem)
@classmethod
def from_tsv(cls, filelocation="/mnt/c/ownCloud/data/miRExplore/targetscan/targetscan_ws_85.tsv"):
tsdb = TargetScanDB()
with open(filelocation, 'r') as fin:
for idx, row in enumerate(fin):
if idx == 0:
continue
arow = row.strip().split('\t')
gene = arow[0].upper()
mirna = arow[1]
score = float(arow[2])
percentile = int(arow[3])
mirna = mirna.replace('mmu-', '').replace('hsa-', '')
tsdb.elems.append((gene, mirna, score, percentile))
return tsdb
if __name__ == '__main__':
tsdb = TargetScanDB.from_tsv()
for x in tsdb.elems:
print(x)
| true | true |
f71c86d03bc2eedb4697b0730ac3f051ebb54808 | 15,522 | py | Python | rasa_nlu/project.py | osmanbaskaya/rasa_nlu | 4f0b5d0fd0d058e437e7d74369cef212fd0a345b | [
"Apache-2.0"
] | null | null | null | rasa_nlu/project.py | osmanbaskaya/rasa_nlu | 4f0b5d0fd0d058e437e7d74369cef212fd0a345b | [
"Apache-2.0"
] | 6 | 2020-09-26T00:52:34.000Z | 2022-02-10T01:37:38.000Z | rasa_nlu/project.py | esrel/rasa_nlu | 53840788e41b2daf957ec5d488281f70e238730f | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import tempfile
import zipfile
from threading import Lock, Thread
from typing import Text, List
import six
import time
from builtins import object
from requests.exceptions import InvalidURL, RequestException
from rasa_nlu import utils
from rasa_nlu.classifiers.keyword_intent_classifier import \
KeywordIntentClassifier
from rasa_nlu.model import Metadata, Interpreter
from rasa_nlu.utils import is_url, EndpointConfig
if six.PY2:
from StringIO import StringIO as IOReader
else:
from io import BytesIO as IOReader
logger = logging.getLogger(__name__)
MODEL_NAME_PREFIX = "model_"
FALLBACK_MODEL_NAME = "fallback"
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
def load_from_server(component_builder=None, # type: Optional[Text]
project=None, # type: Optional[Text]
project_dir=None, # type: Optional[Text]
remote_storage=None, # type: Optional[Text]
model_server=None, # type: Optional[EndpointConfig]
wait_time_between_pulls=None, # type: Optional[int]
):
# type: (...) -> Project
"""Load a persisted model from a server."""
project = Project(component_builder=component_builder,
project=project,
project_dir=project_dir,
remote_storage=remote_storage)
_update_model_from_server(model_server, project)
if wait_time_between_pulls:
# continuously pull the model every `wait_time_between_pulls` seconds
start_model_pulling_in_worker(model_server,
wait_time_between_pulls,
project)
return project
def _update_model_from_server(model_server, project):
# type: (EndpointConfig, Project) -> None
"""Load a zipped Rasa NLU model from a URL and update the passed
project."""
if not is_url(model_server.url):
raise InvalidURL(model_server)
model_directory = tempfile.mkdtemp()
new_model_fingerprint, filename = _pull_model_and_fingerprint(
model_server, model_directory, project.fingerprint)
if new_model_fingerprint:
model_name = _get_remote_model_name(filename)
project.fingerprint = new_model_fingerprint
project.update_model_from_dir_and_unload_others(model_directory,
model_name)
else:
logger.debug("No new model found at URL {}".format(model_server.url))
def _get_remote_model_name(filename):
# type: (Optional[Text]) -> Text
"""Get the name to save a model under that was fetched from a
remote server."""
if filename is not None: # use the filename header if present
return filename.strip(".zip")
else: # or else use a timestamp
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return MODEL_NAME_PREFIX + timestamp
def _pull_model_and_fingerprint(model_server, model_directory, fingerprint):
# type: (EndpointConfig, Text, Optional[Text]) -> (Optional[Text], Optional[Text])
"""Queries the model server and returns a tuple of containing the
response's <ETag> header which contains the model hash, and the
<filename> header containing the model name."""
header = {"If-None-Match": fingerprint}
try:
logger.debug("Requesting model from server {}..."
"".format(model_server.url))
response = model_server.request(method="GET",
headers=header,
timeout=DEFAULT_REQUEST_TIMEOUT)
except RequestException as e:
logger.warning("Tried to fetch model from server, but couldn't reach "
"server. We'll retry later... Error: {}."
"".format(e))
return None, None
if response.status_code == 204:
logger.debug("Model server returned 204 status code, indicating "
"that no new model is available. "
"Current fingerprint: {}".format(fingerprint))
return response.headers.get("ETag"), response.headers.get("filename")
elif response.status_code == 404:
logger.debug("Model server didn't find a model for our request. "
"Probably no one did train a model for the project "
"and tag combination yet.")
return None, None
elif response.status_code != 200:
logger.warn("Tried to fetch model from server, but server response "
"status code is {}. We'll retry later..."
"".format(response.status_code))
return None, None
zip_ref = zipfile.ZipFile(IOReader(response.content))
zip_ref.extractall(model_directory)
logger.debug("Unzipped model to {}"
"".format(os.path.abspath(model_directory)))
# get the new fingerprint and filename
return response.headers.get("ETag"), response.headers.get("filename")
def _run_model_pulling_worker(model_server, wait_time_between_pulls, project):
# type: (Text, int, Project) -> None
while True:
_update_model_from_server(model_server, project)
time.sleep(wait_time_between_pulls)
def start_model_pulling_in_worker(model_server, wait_time_between_pulls,
project):
# type: (Text, int, Project) -> None
worker = Thread(target=_run_model_pulling_worker,
args=(model_server, wait_time_between_pulls, project))
worker.setDaemon(True)
worker.start()
class Project(object):
def __init__(self,
component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
fingerprint=None):
self._component_builder = component_builder
self._models = {}
self.status = 0
self.current_training_processes = 0
self._reader_lock = Lock()
self._loader_lock = Lock()
self._writer_lock = Lock()
self._readers_count = 0
self._path = None
self._project = project
self.remote_storage = remote_storage
self.fingerprint = fingerprint
if project and project_dir:
self._path = os.path.join(project_dir, project)
self._search_for_models()
def _begin_read(self):
# Readers-writer lock basic double mutex implementation
self._reader_lock.acquire()
self._readers_count += 1
if self._readers_count == 1:
self._writer_lock.acquire()
self._reader_lock.release()
def _end_read(self):
self._reader_lock.acquire()
self._readers_count -= 1
if self._readers_count == 0:
self._writer_lock.release()
self._reader_lock.release()
def _load_local_model(self, requested_model_name=None):
if requested_model_name is None: # user want latest model
# NOTE: for better parse performance, currently although
# user may want latest model by set requested_model_name
# explicitly to None, we are not refresh model list
# from local and cloud which is pretty slow.
# User can specific requested_model_name to the latest model name,
# then model will be cached, this is a kind of workaround to
# refresh latest project model.
# BTW if refresh function is wanted, maybe add implement code to
# `_latest_project_model()` is a good choice.
logger.debug("No model specified. Using default")
return self._latest_project_model()
elif requested_model_name in self._models: # model exists in cache
return requested_model_name
return None # local model loading failed!
def _dynamic_load_model(self, requested_model_name=None):
# type: (Text) -> Text
# first try load from local cache
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
# now model not exists in model list cache
# refresh model list from local and cloud
# NOTE: if a malicious user sent lots of requests
# with not existing model will cause performance issue.
# because get anything from cloud is a time-consuming task
self._search_for_models()
# retry after re-fresh model cache
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
# still not found user specified model
logger.warn("Invalid model requested. Using default")
return self._latest_project_model()
def parse(self, text, time=None, requested_model_name=None):
self._begin_read()
model_name = self._dynamic_load_model(requested_model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
finally:
self._loader_lock.release()
response = self._models[model_name].parse(text, time)
response['project'] = self._project
response['model'] = model_name
self._end_read()
return response
def load_model(self):
self._begin_read()
status = False
model_name = self._dynamic_load_model()
logger.debug('Loading model %s', model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update_model_from_dir_and_unload_others(self,
model_dir, # type: Text
model_name # type: Text
):
# unload all loaded models
for model in self._list_loaded_models():
self.unload(model)
self._begin_read()
status = False
logger.debug('Loading model {} from directory {}'.format(
model_name, model_dir))
self._loader_lock.acquire()
try:
interpreter = self._interpreter_for_model(
model_name, model_dir)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update(self, model_name):
self._writer_lock.acquire()
self._models[model_name] = None
self._writer_lock.release()
def unload(self, model_name):
self._writer_lock.acquire()
try:
del self._models[model_name]
self._models[model_name] = None
return model_name
finally:
self._writer_lock.release()
def _latest_project_model(self):
"""Retrieves the latest trained model for an project"""
models = {model[len(MODEL_NAME_PREFIX):]: model
for model in self._models.keys()
if model.startswith(MODEL_NAME_PREFIX)}
if models:
time_list = [datetime.datetime.strptime(time, '%Y%m%d-%H%M%S')
for time, model in models.items()]
return models[max(time_list).strftime('%Y%m%d-%H%M%S')]
else:
return FALLBACK_MODEL_NAME
def _fallback_model(self):
meta = Metadata({"pipeline": [{
"name": "intent_classifier_keyword",
"class": utils.module_path_from_object(KeywordIntentClassifier())
}]}, "")
return Interpreter.create(meta, self._component_builder)
def _search_for_models(self):
model_names = (self._list_models_in_dir(self._path) +
self._list_models_in_cloud())
if not model_names:
if FALLBACK_MODEL_NAME not in self._models:
self._models[FALLBACK_MODEL_NAME] = self._fallback_model()
else:
for model in set(model_names):
if model not in self._models:
self._models[model] = None
def _interpreter_for_model(self, model_name, model_dir=None):
metadata = self._read_model_metadata(model_name, model_dir)
return Interpreter.create(metadata, self._component_builder)
def _read_model_metadata(self, model_name, model_dir):
if model_name is None:
data = Project._default_model_metadata()
return Metadata(data, model_name)
else:
if model_dir is not None:
path = model_dir
elif not os.path.isabs(model_name) and self._path:
path = os.path.join(self._path, model_name)
else:
path = model_name
# download model from cloud storage if needed and possible
if not os.path.isdir(path):
self._load_model_from_cloud(model_name, path)
return Metadata.load(path)
def as_dict(self):
return {'status': 'training' if self.status else 'ready',
'current_training_processes': self.current_training_processes,
'available_models': list(self._models.keys()),
'loaded_models': self._list_loaded_models()}
def _list_loaded_models(self):
models = []
for model, interpreter in self._models.items():
if interpreter is not None:
models.append(model)
return models
def _list_models_in_cloud(self):
# type: () -> List[Text]
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
return p.list_models(self._project)
else:
return []
except Exception as e:
logger.warn("Failed to list models of project {}. "
"{}".format(self._project, e))
return []
def _load_model_from_cloud(self, model_name, target_path):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
p.retrieve(model_name, self._project, target_path)
else:
raise RuntimeError("Unable to initialize persistor")
except Exception as e:
logger.warn("Using default interpreter, couldn't fetch "
"model: {}".format(e))
raise # re-raise this exception because nothing we can do now
@staticmethod
def _default_model_metadata():
return {
"language": None,
}
@staticmethod
def _list_models_in_dir(path):
if not path or not os.path.isdir(path):
return []
else:
return [os.path.relpath(model, path)
for model in utils.list_subdirectories(path)]
| 36.097674 | 86 | 0.616029 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import tempfile
import zipfile
from threading import Lock, Thread
from typing import Text, List
import six
import time
from builtins import object
from requests.exceptions import InvalidURL, RequestException
from rasa_nlu import utils
from rasa_nlu.classifiers.keyword_intent_classifier import \
KeywordIntentClassifier
from rasa_nlu.model import Metadata, Interpreter
from rasa_nlu.utils import is_url, EndpointConfig
if six.PY2:
from StringIO import StringIO as IOReader
else:
from io import BytesIO as IOReader
logger = logging.getLogger(__name__)
MODEL_NAME_PREFIX = "model_"
FALLBACK_MODEL_NAME = "fallback"
DEFAULT_REQUEST_TIMEOUT = 60 * 5
def load_from_server(component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
model_server=None,
wait_time_between_pulls=None,
):
project = Project(component_builder=component_builder,
project=project,
project_dir=project_dir,
remote_storage=remote_storage)
_update_model_from_server(model_server, project)
if wait_time_between_pulls:
start_model_pulling_in_worker(model_server,
wait_time_between_pulls,
project)
return project
def _update_model_from_server(model_server, project):
if not is_url(model_server.url):
raise InvalidURL(model_server)
model_directory = tempfile.mkdtemp()
new_model_fingerprint, filename = _pull_model_and_fingerprint(
model_server, model_directory, project.fingerprint)
if new_model_fingerprint:
model_name = _get_remote_model_name(filename)
project.fingerprint = new_model_fingerprint
project.update_model_from_dir_and_unload_others(model_directory,
model_name)
else:
logger.debug("No new model found at URL {}".format(model_server.url))
def _get_remote_model_name(filename):
if filename is not None:
return filename.strip(".zip")
else:
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return MODEL_NAME_PREFIX + timestamp
def _pull_model_and_fingerprint(model_server, model_directory, fingerprint):
header = {"If-None-Match": fingerprint}
try:
logger.debug("Requesting model from server {}..."
"".format(model_server.url))
response = model_server.request(method="GET",
headers=header,
timeout=DEFAULT_REQUEST_TIMEOUT)
except RequestException as e:
logger.warning("Tried to fetch model from server, but couldn't reach "
"server. We'll retry later... Error: {}."
"".format(e))
return None, None
if response.status_code == 204:
logger.debug("Model server returned 204 status code, indicating "
"that no new model is available. "
"Current fingerprint: {}".format(fingerprint))
return response.headers.get("ETag"), response.headers.get("filename")
elif response.status_code == 404:
logger.debug("Model server didn't find a model for our request. "
"Probably no one did train a model for the project "
"and tag combination yet.")
return None, None
elif response.status_code != 200:
logger.warn("Tried to fetch model from server, but server response "
"status code is {}. We'll retry later..."
"".format(response.status_code))
return None, None
zip_ref = zipfile.ZipFile(IOReader(response.content))
zip_ref.extractall(model_directory)
logger.debug("Unzipped model to {}"
"".format(os.path.abspath(model_directory)))
return response.headers.get("ETag"), response.headers.get("filename")
def _run_model_pulling_worker(model_server, wait_time_between_pulls, project):
while True:
_update_model_from_server(model_server, project)
time.sleep(wait_time_between_pulls)
def start_model_pulling_in_worker(model_server, wait_time_between_pulls,
project):
worker = Thread(target=_run_model_pulling_worker,
args=(model_server, wait_time_between_pulls, project))
worker.setDaemon(True)
worker.start()
class Project(object):
def __init__(self,
component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
fingerprint=None):
self._component_builder = component_builder
self._models = {}
self.status = 0
self.current_training_processes = 0
self._reader_lock = Lock()
self._loader_lock = Lock()
self._writer_lock = Lock()
self._readers_count = 0
self._path = None
self._project = project
self.remote_storage = remote_storage
self.fingerprint = fingerprint
if project and project_dir:
self._path = os.path.join(project_dir, project)
self._search_for_models()
def _begin_read(self):
self._reader_lock.acquire()
self._readers_count += 1
if self._readers_count == 1:
self._writer_lock.acquire()
self._reader_lock.release()
def _end_read(self):
self._reader_lock.acquire()
self._readers_count -= 1
if self._readers_count == 0:
self._writer_lock.release()
self._reader_lock.release()
def _load_local_model(self, requested_model_name=None):
if requested_model_name is None:
logger.debug("No model specified. Using default")
return self._latest_project_model()
elif requested_model_name in self._models:
return requested_model_name
return None
def _dynamic_load_model(self, requested_model_name=None):
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
self._search_for_models()
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
logger.warn("Invalid model requested. Using default")
return self._latest_project_model()
def parse(self, text, time=None, requested_model_name=None):
self._begin_read()
model_name = self._dynamic_load_model(requested_model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
finally:
self._loader_lock.release()
response = self._models[model_name].parse(text, time)
response['project'] = self._project
response['model'] = model_name
self._end_read()
return response
def load_model(self):
self._begin_read()
status = False
model_name = self._dynamic_load_model()
logger.debug('Loading model %s', model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update_model_from_dir_and_unload_others(self,
model_dir,
model_name
):
for model in self._list_loaded_models():
self.unload(model)
self._begin_read()
status = False
logger.debug('Loading model {} from directory {}'.format(
model_name, model_dir))
self._loader_lock.acquire()
try:
interpreter = self._interpreter_for_model(
model_name, model_dir)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update(self, model_name):
self._writer_lock.acquire()
self._models[model_name] = None
self._writer_lock.release()
def unload(self, model_name):
self._writer_lock.acquire()
try:
del self._models[model_name]
self._models[model_name] = None
return model_name
finally:
self._writer_lock.release()
def _latest_project_model(self):
models = {model[len(MODEL_NAME_PREFIX):]: model
for model in self._models.keys()
if model.startswith(MODEL_NAME_PREFIX)}
if models:
time_list = [datetime.datetime.strptime(time, '%Y%m%d-%H%M%S')
for time, model in models.items()]
return models[max(time_list).strftime('%Y%m%d-%H%M%S')]
else:
return FALLBACK_MODEL_NAME
def _fallback_model(self):
meta = Metadata({"pipeline": [{
"name": "intent_classifier_keyword",
"class": utils.module_path_from_object(KeywordIntentClassifier())
}]}, "")
return Interpreter.create(meta, self._component_builder)
def _search_for_models(self):
model_names = (self._list_models_in_dir(self._path) +
self._list_models_in_cloud())
if not model_names:
if FALLBACK_MODEL_NAME not in self._models:
self._models[FALLBACK_MODEL_NAME] = self._fallback_model()
else:
for model in set(model_names):
if model not in self._models:
self._models[model] = None
def _interpreter_for_model(self, model_name, model_dir=None):
metadata = self._read_model_metadata(model_name, model_dir)
return Interpreter.create(metadata, self._component_builder)
def _read_model_metadata(self, model_name, model_dir):
if model_name is None:
data = Project._default_model_metadata()
return Metadata(data, model_name)
else:
if model_dir is not None:
path = model_dir
elif not os.path.isabs(model_name) and self._path:
path = os.path.join(self._path, model_name)
else:
path = model_name
if not os.path.isdir(path):
self._load_model_from_cloud(model_name, path)
return Metadata.load(path)
def as_dict(self):
return {'status': 'training' if self.status else 'ready',
'current_training_processes': self.current_training_processes,
'available_models': list(self._models.keys()),
'loaded_models': self._list_loaded_models()}
def _list_loaded_models(self):
models = []
for model, interpreter in self._models.items():
if interpreter is not None:
models.append(model)
return models
def _list_models_in_cloud(self):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
return p.list_models(self._project)
else:
return []
except Exception as e:
logger.warn("Failed to list models of project {}. "
"{}".format(self._project, e))
return []
def _load_model_from_cloud(self, model_name, target_path):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
p.retrieve(model_name, self._project, target_path)
else:
raise RuntimeError("Unable to initialize persistor")
except Exception as e:
logger.warn("Using default interpreter, couldn't fetch "
"model: {}".format(e))
raise # re-raise this exception because nothing we can do now
@staticmethod
def _default_model_metadata():
return {
"language": None,
}
@staticmethod
def _list_models_in_dir(path):
if not path or not os.path.isdir(path):
return []
else:
return [os.path.relpath(model, path)
for model in utils.list_subdirectories(path)]
| true | true |
f71c881a51efe3fd38a5ddad27bb876a0a24ab7d | 8,497 | py | Python | pytype/tests/test_namedtuple.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_namedtuple.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_namedtuple.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | """Tests for the namedtuple implementation in collections_overlay.py."""
import textwrap
from pytype import file_utils
from pytype.overlays import collections_overlay
from pytype.pytd import escape
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class NamedtupleTests(test_base.TargetIndependentTest):
"""Tests for collections.namedtuple."""
def _namedtuple_ast(self, name, fields):
return collections_overlay.namedtuple_ast(name, fields, self.python_version)
def _namedtuple_def(self, suffix="", **kws):
"""Generate the expected pyi for a simple namedtuple definition.
Args:
suffix: Optionally, extra text to append to the pyi.
**kws: Must contain exactly one argument of the form
alias=(name, [<fields>]). For example, to generate a definition for
X = namedtuple("_X", "y z"), the method call should be
_namedtuple_def(X=("_X", ["y", "z"])).
Returns:
The expected pyi for the namedtuple instance.
"""
(alias, (name, fields)), = kws.items() # pylint: disable=unbalanced-tuple-unpacking
name = escape.pack_namedtuple(name, fields)
suffix += textwrap.dedent("""
collections = ... # type: module
{alias} = {name}""").format(alias=alias, name=name)
return pytd_utils.Print(self._namedtuple_ast(name, fields)) + "\n" + suffix
def test_basic_namedtuple(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", ["y", "z"])
a = X(y=1, z=2)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["y", "z"]), suffix="a = ... # type: X"))
def test_no_fields(self):
ty = self.Infer("""
import collections
F = collections.namedtuple("F", [])
a = F()
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("F", []), suffix="a = ... # type: F"))
def test_str_args(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "a b c")
b = S(1, 2, 3)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
S=("S", ["a", "b", "c"]), suffix="b = ... # type: S"))
def test_str_args2(self):
self.Check("""
import collections
collections.namedtuple("_", "a,b,c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a, b, c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a ,b")
""")
def test_bad_fieldnames(self):
self.InferWithErrors("""
import collections
collections.namedtuple("_", ["abc", "def", "ghi"]) # invalid-namedtuple-arg
collections.namedtuple("_", "_") # invalid-namedtuple-arg
collections.namedtuple("_", "a, 1") # invalid-namedtuple-arg
collections.namedtuple("_", "a, !") # invalid-namedtuple-arg
collections.namedtuple("_", "a, b, c, a") # invalid-namedtuple-arg
collections.namedtuple("1", "") # invalid-namedtuple-arg
""")
def test_rename(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "abc def ghi abc", rename=True)
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(S=("S", ["abc", "_1", "ghi", "_3"])))
def test_bad_initialize(self):
self.InferWithErrors("""
from collections import namedtuple
X = namedtuple("X", "y z")
a = X(1) # missing-parameter
b = X(y = 2) # missing-parameter
c = X(w = 3) # wrong-keyword-args
d = X(y = "hello", z = 4j) # works
""")
def test_class_name(self):
ty = self.Infer(
"""
import collections
F = collections.namedtuple("S", ['a', 'b', 'c'])
""")
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("S", ["a", "b", "c"])))
def test_constructors(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
g = X(1, 2, 3)
i = X._make((7, 8, 9))
j = X._make((10, 11, 12), tuple.__new__, len)
""")
def test_instance_types(self):
ty = self.Infer(
"""
import collections
X = collections.namedtuple("X", "a b c")
a = X._make((1, 2, 3))
""")
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["a", "b", "c"]), suffix="a = ... # type: X"))
def test_instantiate_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple('X', [('y', str), ('z', int)])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X() # missing-parameter[e1]
foo.X(0, "") # wrong-arg-types[e2]
foo.X(z="", y=0) # wrong-arg-types[e3]
foo.X("", 0)
foo.X(y="", z=0)
""", pythonpath=[d.path])
self.assertErrorRegexes(
errors, {"e1": r"y", "e2": r"str.*int", "e3": r"str.*int"})
def test_use_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X()._replace()
foo.X().nonsense # attribute-error[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"nonsense.*X"})
def test_subclass_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [("y", int)])): ...
""")
self.Check("""
import foo
class Y(foo.X):
def __new__(cls):
return super(Y, cls).__new__(cls, 0)
Y()
""", pythonpath=[d.path])
def test_varargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
args = None # type: list
X(*args)
""")
def test_kwargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
kwargs = None # type: dict
X(**kwargs)
""")
def test_name_conflict(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("_", [])
Y = collections.namedtuple("_", [])
Z = collections.namedtuple("_", "a")
""", deep=False)
name_x = escape.pack_namedtuple("_", [])
name_z = escape.pack_namedtuple("_", ["a"])
ast_x = self._namedtuple_ast(name_x, [])
ast_z = self._namedtuple_ast(name_z, ["a"])
ast = pytd_utils.Concat(ast_x, ast_z)
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
X = {name_x}
Y = {name_x}
Z = {name_z}""").format(name_x=name_x, name_z=name_z)
self.assertTypesMatchPytd(ty, expected)
def test_subclass(self):
ty = self.Infer("""
import collections
class X(collections.namedtuple("X", [])):
def __new__(cls, _):
return super(X, cls).__new__(cls)
""")
name = escape.pack_namedtuple("X", [])
ast = self._namedtuple_ast(name, [])
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
_TX = TypeVar("_TX", bound=X)
class X({name}):
def __new__(cls: Type[_TX], _) -> _TX: ...""").format(name=name)
self.assertTypesMatchPytd(ty, expected)
def test_subclass_replace(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y(1)._replace(a=2)
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_subclass_make(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y._make([1])
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_unpacking(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import NamedTuple
X = NamedTuple("X", [('a', str), ('b', int)])
""")
ty = self.Infer("""
import foo
v = None # type: foo.X
a, b = v
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
v = ... # type: foo.namedtuple_X_0
a = ... # type: str
b = ... # type: int
""")
test_base.main(globals(), __name__ == "__main__")
| 31.354244 | 88 | 0.564905 |
import textwrap
from pytype import file_utils
from pytype.overlays import collections_overlay
from pytype.pytd import escape
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class NamedtupleTests(test_base.TargetIndependentTest):
def _namedtuple_ast(self, name, fields):
return collections_overlay.namedtuple_ast(name, fields, self.python_version)
def _namedtuple_def(self, suffix="", **kws):
(alias, (name, fields)), = kws.items()
name = escape.pack_namedtuple(name, fields)
suffix += textwrap.dedent("""
collections = ... # type: module
{alias} = {name}""").format(alias=alias, name=name)
return pytd_utils.Print(self._namedtuple_ast(name, fields)) + "\n" + suffix
def test_basic_namedtuple(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", ["y", "z"])
a = X(y=1, z=2)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["y", "z"]), suffix="a = ... # type: X"))
def test_no_fields(self):
ty = self.Infer("""
import collections
F = collections.namedtuple("F", [])
a = F()
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("F", []), suffix="a = ... # type: F"))
def test_str_args(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "a b c")
b = S(1, 2, 3)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
S=("S", ["a", "b", "c"]), suffix="b = ... # type: S"))
def test_str_args2(self):
self.Check("""
import collections
collections.namedtuple("_", "a,b,c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a, b, c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a ,b")
""")
def test_bad_fieldnames(self):
self.InferWithErrors("""
import collections
collections.namedtuple("_", ["abc", "def", "ghi"]) # invalid-namedtuple-arg
collections.namedtuple("_", "_") # invalid-namedtuple-arg
collections.namedtuple("_", "a, 1") # invalid-namedtuple-arg
collections.namedtuple("_", "a, !") # invalid-namedtuple-arg
collections.namedtuple("_", "a, b, c, a") # invalid-namedtuple-arg
collections.namedtuple("1", "") # invalid-namedtuple-arg
""")
def test_rename(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "abc def ghi abc", rename=True)
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(S=("S", ["abc", "_1", "ghi", "_3"])))
def test_bad_initialize(self):
self.InferWithErrors("""
from collections import namedtuple
X = namedtuple("X", "y z")
a = X(1) # missing-parameter
b = X(y = 2) # missing-parameter
c = X(w = 3) # wrong-keyword-args
d = X(y = "hello", z = 4j) # works
""")
def test_class_name(self):
ty = self.Infer(
"""
import collections
F = collections.namedtuple("S", ['a', 'b', 'c'])
""")
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("S", ["a", "b", "c"])))
def test_constructors(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
g = X(1, 2, 3)
i = X._make((7, 8, 9))
j = X._make((10, 11, 12), tuple.__new__, len)
""")
def test_instance_types(self):
ty = self.Infer(
"""
import collections
X = collections.namedtuple("X", "a b c")
a = X._make((1, 2, 3))
""")
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["a", "b", "c"]), suffix="a = ... # type: X"))
def test_instantiate_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple('X', [('y', str), ('z', int)])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X() # missing-parameter[e1]
foo.X(0, "") # wrong-arg-types[e2]
foo.X(z="", y=0) # wrong-arg-types[e3]
foo.X("", 0)
foo.X(y="", z=0)
""", pythonpath=[d.path])
self.assertErrorRegexes(
errors, {"e1": r"y", "e2": r"str.*int", "e3": r"str.*int"})
def test_use_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X()._replace()
foo.X().nonsense # attribute-error[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"nonsense.*X"})
def test_subclass_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [("y", int)])): ...
""")
self.Check("""
import foo
class Y(foo.X):
def __new__(cls):
return super(Y, cls).__new__(cls, 0)
Y()
""", pythonpath=[d.path])
def test_varargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
args = None # type: list
X(*args)
""")
def test_kwargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
kwargs = None # type: dict
X(**kwargs)
""")
def test_name_conflict(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("_", [])
Y = collections.namedtuple("_", [])
Z = collections.namedtuple("_", "a")
""", deep=False)
name_x = escape.pack_namedtuple("_", [])
name_z = escape.pack_namedtuple("_", ["a"])
ast_x = self._namedtuple_ast(name_x, [])
ast_z = self._namedtuple_ast(name_z, ["a"])
ast = pytd_utils.Concat(ast_x, ast_z)
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
X = {name_x}
Y = {name_x}
Z = {name_z}""").format(name_x=name_x, name_z=name_z)
self.assertTypesMatchPytd(ty, expected)
def test_subclass(self):
ty = self.Infer("""
import collections
class X(collections.namedtuple("X", [])):
def __new__(cls, _):
return super(X, cls).__new__(cls)
""")
name = escape.pack_namedtuple("X", [])
ast = self._namedtuple_ast(name, [])
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
_TX = TypeVar("_TX", bound=X)
class X({name}):
def __new__(cls: Type[_TX], _) -> _TX: ...""").format(name=name)
self.assertTypesMatchPytd(ty, expected)
def test_subclass_replace(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y(1)._replace(a=2)
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_subclass_make(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y._make([1])
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_unpacking(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import NamedTuple
X = NamedTuple("X", [('a', str), ('b', int)])
""")
ty = self.Infer("""
import foo
v = None # type: foo.X
a, b = v
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
v = ... # type: foo.namedtuple_X_0
a = ... # type: str
b = ... # type: int
""")
test_base.main(globals(), __name__ == "__main__")
| true | true |
f71c885784aeccc154dd5cca2413ad6060ae4e6b | 3,087 | py | Python | tests/tests_hrv.py | raimonpv/NeuroKit | cb37d83ee20d6a13a91c4848aa435f41e979e203 | [
"MIT"
] | 1 | 2021-11-14T21:18:43.000Z | 2021-11-14T21:18:43.000Z | tests/tests_hrv.py | raimonpv/NeuroKit | cb37d83ee20d6a13a91c4848aa435f41e979e203 | [
"MIT"
] | null | null | null | tests/tests_hrv.py | raimonpv/NeuroKit | cb37d83ee20d6a13a91c4848aa435f41e979e203 | [
"MIT"
] | 1 | 2021-11-14T21:18:48.000Z | 2021-11-14T21:18:48.000Z | import numpy as np
import neurokit2 as nk
def test_hrv_time():
ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=70, random_state=42)
ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)
_, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)
hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)
hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)
assert np.all(hrv_fast["HRV_RMSSD"] < hrv_slow["HRV_RMSSD"])
assert np.all(hrv_fast["HRV_MeanNN"] < hrv_slow["HRV_MeanNN"])
assert np.all(hrv_fast["HRV_SDNN"] < hrv_slow["HRV_SDNN"])
assert np.all(hrv_fast["HRV_CVNN"] < hrv_slow["HRV_CVNN"])
assert np.all(hrv_fast["HRV_CVSD"] < hrv_slow["HRV_CVSD"])
assert np.all(hrv_fast["HRV_MedianNN"] < hrv_slow["HRV_MedianNN"])
assert np.all(hrv_fast["HRV_MadNN"] < hrv_slow["HRV_MadNN"])
assert np.all(hrv_fast["HRV_MCVNN"] < hrv_slow["HRV_MCVNN"])
assert np.all(hrv_fast["HRV_pNN50"] == hrv_slow["HRV_pNN50"])
assert np.all(hrv_fast["HRV_pNN20"] < hrv_slow["HRV_pNN20"])
assert np.all(hrv_fast["HRV_TINN"] < hrv_slow["HRV_TINN"])
assert np.all(hrv_fast["HRV_HTI"] > hrv_slow["HRV_HTI"])
def test_hrv_frequency():
# Test frequency domain
ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)
_, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)
hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)
ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)
_, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)
hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)
assert np.allclose(hrv1["HRV_HF"] - hrv2["HRV_HF"], 0, atol=1.5)
assert np.isnan(hrv1["HRV_LF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
assert np.isnan(hrv1["HRV_VLF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
def test_hrv():
ecg = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
ecg_hrv = nk.hrv(peaks, sampling_rate=1000)
columns = ['HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN',
'HRV_CVSD', 'HRV_MedianNN', 'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN',
'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI', 'HRV_ULF',
'HRV_VLF', 'HRV_LF', 'HRV_HF', 'HRV_VHF', 'HRV_LFHF', 'HRV_LFn',
'HRV_HFn', 'HRV_LnHF', 'HRV_SD1', 'HRV_SD2', 'HRV_SD1SD2', 'HRV_S',
'HRV_CSI', 'HRV_CVI', 'HRV_CSI_Modified', 'HRV_PIP', 'HRV_IALS',
'HRV_PSS', 'HRV_PAS', 'HRV_GI', 'HRV_SI', 'HRV_AI', 'HRV_PI',
'HRV_C1d', 'HRV_C1a', 'HRV_SD1d',
'HRV_SD1a', 'HRV_C2d',
'HRV_C2a', 'HRV_SD2d', 'HRV_SD2a',
'HRV_Cd', 'HRV_Ca', 'HRV_SDNNd',
'HRV_SDNNa', 'HRV_ApEn', 'HRV_SampEn']
assert all(elem in np.array(ecg_hrv.columns.values, dtype=object) for elem
in columns) | 44.73913 | 96 | 0.661808 | import numpy as np
import neurokit2 as nk
def test_hrv_time():
ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=70, random_state=42)
ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)
_, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)
hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)
hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)
assert np.all(hrv_fast["HRV_RMSSD"] < hrv_slow["HRV_RMSSD"])
assert np.all(hrv_fast["HRV_MeanNN"] < hrv_slow["HRV_MeanNN"])
assert np.all(hrv_fast["HRV_SDNN"] < hrv_slow["HRV_SDNN"])
assert np.all(hrv_fast["HRV_CVNN"] < hrv_slow["HRV_CVNN"])
assert np.all(hrv_fast["HRV_CVSD"] < hrv_slow["HRV_CVSD"])
assert np.all(hrv_fast["HRV_MedianNN"] < hrv_slow["HRV_MedianNN"])
assert np.all(hrv_fast["HRV_MadNN"] < hrv_slow["HRV_MadNN"])
assert np.all(hrv_fast["HRV_MCVNN"] < hrv_slow["HRV_MCVNN"])
assert np.all(hrv_fast["HRV_pNN50"] == hrv_slow["HRV_pNN50"])
assert np.all(hrv_fast["HRV_pNN20"] < hrv_slow["HRV_pNN20"])
assert np.all(hrv_fast["HRV_TINN"] < hrv_slow["HRV_TINN"])
assert np.all(hrv_fast["HRV_HTI"] > hrv_slow["HRV_HTI"])
def test_hrv_frequency():
ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)
_, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)
hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)
ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)
_, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)
hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)
assert np.allclose(hrv1["HRV_HF"] - hrv2["HRV_HF"], 0, atol=1.5)
assert np.isnan(hrv1["HRV_LF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
assert np.isnan(hrv1["HRV_VLF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
def test_hrv():
ecg = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
ecg_hrv = nk.hrv(peaks, sampling_rate=1000)
columns = ['HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN',
'HRV_CVSD', 'HRV_MedianNN', 'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN',
'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI', 'HRV_ULF',
'HRV_VLF', 'HRV_LF', 'HRV_HF', 'HRV_VHF', 'HRV_LFHF', 'HRV_LFn',
'HRV_HFn', 'HRV_LnHF', 'HRV_SD1', 'HRV_SD2', 'HRV_SD1SD2', 'HRV_S',
'HRV_CSI', 'HRV_CVI', 'HRV_CSI_Modified', 'HRV_PIP', 'HRV_IALS',
'HRV_PSS', 'HRV_PAS', 'HRV_GI', 'HRV_SI', 'HRV_AI', 'HRV_PI',
'HRV_C1d', 'HRV_C1a', 'HRV_SD1d',
'HRV_SD1a', 'HRV_C2d',
'HRV_C2a', 'HRV_SD2d', 'HRV_SD2a',
'HRV_Cd', 'HRV_Ca', 'HRV_SDNNd',
'HRV_SDNNa', 'HRV_ApEn', 'HRV_SampEn']
assert all(elem in np.array(ecg_hrv.columns.values, dtype=object) for elem
in columns) | true | true |
f71c887dca4cf691587ab051359773359de7010e | 3,226 | bzl | Python | build_tools/bazel/iree_lit_test.bzl | smit-hinsu/iree | a385d311b701cdc06cb825000ddb34c8a11c6eef | [
"Apache-2.0"
] | 1 | 2022-02-13T15:27:08.000Z | 2022-02-13T15:27:08.000Z | build_tools/bazel/iree_lit_test.bzl | iree-github-actions-bot/iree | 9982f10090527a1a86cd280b4beff9a579b96b38 | [
"Apache-2.0"
] | 1 | 2022-01-27T18:10:51.000Z | 2022-01-27T18:10:51.000Z | build_tools/bazel/iree_lit_test.bzl | iree-github-actions-bot/iree | 9982f10090527a1a86cd280b4beff9a579b96b38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Bazel macros for running lit tests."""
load(":lit_test.bzl", "lit_test", "lit_test_suite")
def iree_lit_test(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
"""A thin wrapper around lit_test with some opinionated settings.
See the base lit_test for more details on argument meanings.
Args:
name: name for the test.
cfg: string. lit config file.
tools: label_list. tools that should be included on the PATH.
llvm-symbolizer is added by default.
env: string_dict. Environment variables available to the test at runtime.
FILECHECK_OPTS=--enable-var-scope is added if FILECHECK_OPTS is not
already set.
**kwargs: additional keyword args to forward to the underyling lit_test.
"""
tools = tools or []
env = env or {}
# Always include llvm-symbolizer so we get useful stack traces. Maybe it
# would be better to force everyone to do this explicitly, but since
# forgetting wouldn't cause the test to fail, only make debugging harder
# when it does, I think better to hardcode it here.
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
def iree_lit_test_suite(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
"""A thin wrapper around lit_test_suite with some opinionated settings.
See the base lit_test for more details on argument meanings.
Args:
name: name for the test suite.
cfg: string. lit config file.
tools: label_list. tools that should be included on the PATH.
llvm-symbolizer is added by default.
env: string_dict. Environment variables available to the test at runtime.
FILECHECK_OPTS=--enable-var-scope is added if FILECHECK_OPTS is not
already set.
**kwargs: additional keyword args to forward to the underyling
lit_test_suite.
"""
tools = tools or []
env = env or {}
# Always include llvm-symbolizer so we get useful stack traces. Maybe it
# would be better to force everyone to do this explicitly, but since
# forgetting wouldn't cause the test to fail, only make debugging harder
# when it does, I think better to hardcode it here.
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test_suite(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
| 32.918367 | 79 | 0.6584 |
load(":lit_test.bzl", "lit_test", "lit_test_suite")
def iree_lit_test(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
tools = tools or []
env = env or {}
# when it does, I think better to hardcode it here.
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
def iree_lit_test_suite(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
tools = tools or []
env = env or {}
# Always include llvm-symbolizer so we get useful stack traces. Maybe it
# would be better to force everyone to do this explicitly, but since
# forgetting wouldn't cause the test to fail, only make debugging harder
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test_suite(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
| true | true |
f71c8959b58f25069e1143ec6f69c7935fd4843b | 8,176 | py | Python | safe/view.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 1 | 2019-05-06T19:40:43.000Z | 2019-05-06T19:40:43.000Z | safe/view.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 9 | 2019-12-04T22:57:46.000Z | 2022-02-10T07:15:11.000Z | safe/view.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 3 | 2019-05-01T20:41:33.000Z | 2019-10-03T20:57:00.000Z | from people.models import Village, Mother, Driver, HealthCenter, MotherDriverConnection
from django.http import JsonResponse, Http404
from django.core import serializers
from decouple import config
from .geokdbush.geokdbush import around, distance
import requests
import json
import time
FRONTLINE_KEY = config('FRONTLINESMS_SECRET')
MASTER_PHONE = config('MASTER_PHONE')
def village(request, id):
try:
v_obj = Village.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except Village.DoesNotExist:
raise Http404("Village does not exist")
return JsonResponse(data)
def healthcenter(request, id):
try:
v_obj = HealthCenter.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except HealthCenter.DoesNotExist:
raise Http404("HealthCenter does not exist")
return JsonResponse(data)
def mother(request, id):
try:
v_obj = Mother.objects.get(phone=id)
mom_lat = v_obj.latitude
mom_lon = v_obj.longitude
# get all the drivers registered
drivers = Driver.objects.values()
# build the list of drivers
driversLocList = []
for d in drivers:
if d["available"]:
driversLocList.append({
"name": d["name"],
"phone": d["phone"],
"lat": d["latitude"],
"lon": d["longitude"]
})
momloc = {"lon": mom_lon, "lat": mom_lat}
driversList = []
for d in driversLocList:
dist = distance(momloc["lon"], momloc["lat"], d["lon"], d["lat"])
driversList.append((d["name"], d["phone"], dist))
# time to sort the list - sort by 3rd item (distance)
def getKey(item):
return item[2]
closestList = sorted(driversList, key=getKey)
data = {
'name': v_obj.name,
'phone': v_obj.phone,
'village': v_obj.village,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
"Drivers": closestList
}
except Mother.DoesNotExist:
register_msg = "No entry found for " + id + \
"\nPlease reply with 'village' and your village name.\nFor example, 'village Iganga'"
url = 'https://cloud.frontlinesms.com/api/1/webhook'
payload = {"apiKey": FRONTLINE_KEY, "payload": {
"message": register_msg, "recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": register_msg})
# raise Http404("Mother does not exist")
print("MOTHER phone number", v_obj.phone)
# Populate many-to-many table (MotherDriverConnection)
MotherDriverConnection.objects.create(motherPhoneNumber=v_obj.phone, motherName=v_obj.name, motherVillage=v_obj.village, driverPhoneNumber=closestList[0][1], driverIsComing=False)
# ping the SMS server with closest driver
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Can you pick up a mother at "+ data["village"] + " village. " \
"\nIf yes, reply with '1', if no, reply with '2'."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": closestList[0][1]}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(data)
def regMother(request, id):
parsed = id.split('&', 1)
momPhone = parsed[0]
momVillage = parsed[1]
# see if village send via SMS is in the database
villages = Village.objects.values()
listVillages = list(villages)
try:
village = list(
filter(lambda v: v["name"].lower() == momVillage.lower(), listVillages))
except:
print("NOT FOUND VILLAGE")
return JsonResponse({"msg": "village " + momVillage + " not found."})
momObject = {
"name": "a mother",
"phone": momPhone,
"village": village[0]["name"],
"latitude": village[0]["latitude"],
"longitude": village[0]["longitude"],
}
# enter this mom into database
try:
query = Mother(name="mom", phone=momPhone,
village=village[0]["name"],
latitude=village[0]["latitude"],
longitude=village[0]["longitude"],)
query.save()
except:
# ToDo: send a text to person monitoring the system
return JsonResponse({"msg": "Error adding new mom to db"})
url = 'https://cloud.frontlinesms.com/api/1/webhook'
mom_msg = "You are registered. Please text 'driver' to request a pickup."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": mom_msg,
"recipients": [{"type": "mobile", "value": momPhone}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(momObject)
def driverOnOffDuty(request, id, onDutyFlag):
try:
m_obj = MotherDriverConnection.objects.filter(driverPhoneNumber=id).values()
json_res = []
for key in m_obj:
m_json = dict(key)
json_res.append(m_json)
if onDutyFlag == 1:
Driver.objects.filter(phone=id).update(available = False)
# build YES url to
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Please pick up " + \
json_res[0]["motherName"] + " at " + json_res[0]["motherVillage"] + \
" village. Her number is " + \
json_res[0]["motherPhoneNumber"] + "\nPlease text her to let her know you are on the way."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": json_res[0]["driverPhoneNumber"]}]}}
r = requests.post(url, data=json.dumps(payload))
# delete connection
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
return JsonResponse({"data": pickup_msg})
if onDutyFlag == 2:
flag = False
Driver.objects.filter(phone=id).update(available = flag)
# delete this connection
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
# API call here to get next driver/make new connection
mother(request, json_res[0]["motherPhoneNumber"])
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
return JsonResponse({"Driver":"Successfully updated"})
def driverOnline(request, id, onlineFlag):
try:
if onlineFlag == "online":
Driver.objects.filter(phone=id).update(available = True)
# build online url
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now online. Reply with 'offline' to go offline."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
if onlineFlag == "offline":
Driver.objects.filter(phone=id).update(available = False)
# build offline url
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now offline. Reply with 'online' to go online."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
| 40.676617 | 183 | 0.57999 | from people.models import Village, Mother, Driver, HealthCenter, MotherDriverConnection
from django.http import JsonResponse, Http404
from django.core import serializers
from decouple import config
from .geokdbush.geokdbush import around, distance
import requests
import json
import time
FRONTLINE_KEY = config('FRONTLINESMS_SECRET')
MASTER_PHONE = config('MASTER_PHONE')
def village(request, id):
try:
v_obj = Village.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except Village.DoesNotExist:
raise Http404("Village does not exist")
return JsonResponse(data)
def healthcenter(request, id):
try:
v_obj = HealthCenter.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except HealthCenter.DoesNotExist:
raise Http404("HealthCenter does not exist")
return JsonResponse(data)
def mother(request, id):
try:
v_obj = Mother.objects.get(phone=id)
mom_lat = v_obj.latitude
mom_lon = v_obj.longitude
drivers = Driver.objects.values()
driversLocList = []
for d in drivers:
if d["available"]:
driversLocList.append({
"name": d["name"],
"phone": d["phone"],
"lat": d["latitude"],
"lon": d["longitude"]
})
momloc = {"lon": mom_lon, "lat": mom_lat}
driversList = []
for d in driversLocList:
dist = distance(momloc["lon"], momloc["lat"], d["lon"], d["lat"])
driversList.append((d["name"], d["phone"], dist))
def getKey(item):
return item[2]
closestList = sorted(driversList, key=getKey)
data = {
'name': v_obj.name,
'phone': v_obj.phone,
'village': v_obj.village,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
"Drivers": closestList
}
except Mother.DoesNotExist:
register_msg = "No entry found for " + id + \
"\nPlease reply with 'village' and your village name.\nFor example, 'village Iganga'"
url = 'https://cloud.frontlinesms.com/api/1/webhook'
payload = {"apiKey": FRONTLINE_KEY, "payload": {
"message": register_msg, "recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": register_msg})
print("MOTHER phone number", v_obj.phone)
MotherDriverConnection.objects.create(motherPhoneNumber=v_obj.phone, motherName=v_obj.name, motherVillage=v_obj.village, driverPhoneNumber=closestList[0][1], driverIsComing=False)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Can you pick up a mother at "+ data["village"] + " village. " \
"\nIf yes, reply with '1', if no, reply with '2'."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": closestList[0][1]}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(data)
def regMother(request, id):
parsed = id.split('&', 1)
momPhone = parsed[0]
momVillage = parsed[1]
villages = Village.objects.values()
listVillages = list(villages)
try:
village = list(
filter(lambda v: v["name"].lower() == momVillage.lower(), listVillages))
except:
print("NOT FOUND VILLAGE")
return JsonResponse({"msg": "village " + momVillage + " not found."})
momObject = {
"name": "a mother",
"phone": momPhone,
"village": village[0]["name"],
"latitude": village[0]["latitude"],
"longitude": village[0]["longitude"],
}
try:
query = Mother(name="mom", phone=momPhone,
village=village[0]["name"],
latitude=village[0]["latitude"],
longitude=village[0]["longitude"],)
query.save()
except:
return JsonResponse({"msg": "Error adding new mom to db"})
url = 'https://cloud.frontlinesms.com/api/1/webhook'
mom_msg = "You are registered. Please text 'driver' to request a pickup."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": mom_msg,
"recipients": [{"type": "mobile", "value": momPhone}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(momObject)
def driverOnOffDuty(request, id, onDutyFlag):
try:
m_obj = MotherDriverConnection.objects.filter(driverPhoneNumber=id).values()
json_res = []
for key in m_obj:
m_json = dict(key)
json_res.append(m_json)
if onDutyFlag == 1:
Driver.objects.filter(phone=id).update(available = False)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Please pick up " + \
json_res[0]["motherName"] + " at " + json_res[0]["motherVillage"] + \
" village. Her number is " + \
json_res[0]["motherPhoneNumber"] + "\nPlease text her to let her know you are on the way."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": json_res[0]["driverPhoneNumber"]}]}}
r = requests.post(url, data=json.dumps(payload))
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
return JsonResponse({"data": pickup_msg})
if onDutyFlag == 2:
flag = False
Driver.objects.filter(phone=id).update(available = flag)
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
mother(request, json_res[0]["motherPhoneNumber"])
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
return JsonResponse({"Driver":"Successfully updated"})
def driverOnline(request, id, onlineFlag):
try:
if onlineFlag == "online":
Driver.objects.filter(phone=id).update(available = True)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now online. Reply with 'offline' to go offline."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
if onlineFlag == "offline":
Driver.objects.filter(phone=id).update(available = False)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now offline. Reply with 'online' to go online."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.