input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
is not None:
pulumi.set(__self__, "idle_timeout", idle_timeout)
if key is not None:
pulumi.set(__self__, "key", key)
if local_address is not None:
pulumi.set(__self__, "local_address", local_address)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if mtu is not None:
pulumi.set(__self__, "mtu", mtu)
if name is not None:
pulumi.set(__self__, "name", name)
if partition is not None:
pulumi.set(__self__, "partition", partition)
if profile is not None:
pulumi.set(__self__, "profile", profile)
if remote_address is not None:
pulumi.set(__self__, "remote_address", remote_address)
if secondary_address is not None:
pulumi.set(__self__, "secondary_address", secondary_address)
if tos is not None:
pulumi.set(__self__, "tos", tos)
if traffic_group is not None:
pulumi.set(__self__, "traffic_group", traffic_group)
if transparent is not None:
pulumi.set(__self__, "transparent", transparent)
if use_pmtu is not None:
pulumi.set(__self__, "use_pmtu", use_pmtu)
@property
@pulumi.getter(name="appService")
def app_service(self) -> Optional[pulumi.Input[str]]:
"""
The application service that the object belongs to
"""
return pulumi.get(self, "app_service")
@app_service.setter
def app_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_service", value)
@property
@pulumi.getter(name="autoLastHop")
def auto_last_hop(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether auto lasthop is enabled or not
"""
return pulumi.get(self, "auto_last_hop")
@auto_last_hop.setter
def auto_last_hop(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_last_hop", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User defined description
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="idleTimeout")
def idle_timeout(self) -> Optional[pulumi.Input[int]]:
"""
Specifies an idle timeout for wildcard tunnels in seconds
"""
return pulumi.get(self, "idle_timeout")
@idle_timeout.setter
def idle_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_timeout", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[int]]:
"""
The key field may represent different values depending on the type of the tunnel
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a local IP address. This option is required
"""
return pulumi.get(self, "local_address")
@local_address.setter
def local_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_address", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies how the tunnel carries traffic
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def mtu(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the maximum transmission unit (MTU) of the tunnel
"""
return pulumi.get(self, "mtu")
@mtu.setter
def mtu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mtu", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the tunnel
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def partition(self) -> Optional[pulumi.Input[str]]:
"""
Displays the admin-partition within which this component resides
"""
return pulumi.get(self, "partition")
@partition.setter
def partition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partition", value)
@property
@pulumi.getter
def profile(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the profile that you want to associate with the tunnel
"""
return pulumi.get(self, "profile")
@profile.setter
def profile(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "profile", value)
@property
@pulumi.getter(name="remoteAddress")
def remote_address(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a remote IP address
"""
return pulumi.get(self, "remote_address")
@remote_address.setter
def remote_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote_address", value)
@property
@pulumi.getter(name="secondaryAddress")
def secondary_address(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a secondary non-floating IP address when the local-address is set to a floating address
"""
return pulumi.get(self, "secondary_address")
@secondary_address.setter
def secondary_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_address", value)
@property
@pulumi.getter
def tos(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a value for insertion into the Type of Service (ToS) octet within the IP header of the encapsulating header of transmitted packets
"""
return pulumi.get(self, "tos")
@tos.setter
def tos(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tos", value)
@property
@pulumi.getter(name="trafficGroup")
def traffic_group(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a traffic-group for use with the tunnel
"""
return pulumi.get(self, "traffic_group")
@traffic_group.setter
def traffic_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "traffic_group", value)
@property
@pulumi.getter
def transparent(self) -> Optional[pulumi.Input[str]]:
"""
Enables or disables the tunnel to be transparent
"""
return pulumi.get(self, "transparent")
@transparent.setter
def transparent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transparent", value)
@property
@pulumi.getter(name="usePmtu")
def use_pmtu(self) -> Optional[pulumi.Input[str]]:
"""
Enables or disables the tunnel to use the PMTU (Path MTU) information provided by ICMP NeedFrag error messages
"""
return pulumi.get(self, "use_pmtu")
@use_pmtu.setter
def use_pmtu(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "use_pmtu", value)
class NetTunnel(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service: Optional[pulumi.Input[str]] = None,
auto_last_hop: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
idle_timeout: Optional[pulumi.Input[int]] = None,
key: Optional[pulumi.Input[int]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
partition: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
remote_address: Optional[pulumi.Input[str]] = None,
secondary_address: Optional[pulumi.Input[str]] = None,
tos: Optional[pulumi.Input[str]] = None,
traffic_group: Optional[pulumi.Input[str]] = None,
transparent: Optional[pulumi.Input[str]] = None,
use_pmtu: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`NetTunnel` Manages a tunnel configuration
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
example1 = f5bigip.NetTunnel("example1",
local_address="172.16.17.32",
name="example1",
profile="/Common/dslite")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service: The application service that the object belongs to
:param pulumi.Input[str] auto_last_hop: Specifies whether auto lasthop is enabled or not
:param pulumi.Input[str] description: User defined description
:param pulumi.Input[int] idle_timeout: Specifies an idle timeout for wildcard tunnels in seconds
:param pulumi.Input[int] key: The key field may represent different values depending on the type of the tunnel
:param pulumi.Input[str] local_address: Specifies a local IP address. This option is required
:param pulumi.Input[str] mode: Specifies how the tunnel carries traffic
:param pulumi.Input[int] mtu: Specifies the maximum transmission unit (MTU) of the tunnel
:param pulumi.Input[str] name: Name of the tunnel
:param pulumi.Input[str] partition: Displays the admin-partition within which this component resides
:param pulumi.Input[str] profile: Specifies the profile that you want to associate with the tunnel
:param pulumi.Input[str] remote_address: Specifies a remote IP address
:param pulumi.Input[str] secondary_address: Specifies a secondary non-floating IP address when the local-address is set to a floating address
:param pulumi.Input[str] tos: Specifies a value for insertion into the Type of Service (ToS) octet within the IP header of the encapsulating header of transmitted packets
:param pulumi.Input[str] traffic_group: Specifies a traffic-group for use with the tunnel
:param pulumi.Input[str] transparent: Enables or disables the tunnel to be transparent
:param pulumi.Input[str] use_pmtu: Enables or disables the tunnel to use the PMTU (Path MTU) information provided by ICMP NeedFrag error messages
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetTunnelArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`NetTunnel` Manages a tunnel configuration
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
example1 = f5bigip.NetTunnel("example1",
local_address="172.16.17.32",
name="example1",
profile="/Common/dslite")
```
:param str resource_name: The name of the resource.
:param NetTunnelArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetTunnelArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service: Optional[pulumi.Input[str]] = None,
auto_last_hop: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
idle_timeout: Optional[pulumi.Input[int]] = None,
key: Optional[pulumi.Input[int]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
partition: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
remote_address: Optional[pulumi.Input[str]] = None,
secondary_address: Optional[pulumi.Input[str]] = None,
tos: Optional[pulumi.Input[str]] = None,
traffic_group: Optional[pulumi.Input[str]] = None,
transparent: Optional[pulumi.Input[str]] = None,
use_pmtu: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetTunnelArgs.__new__(NetTunnelArgs)
__props__.__dict__["app_service"] = app_service
__props__.__dict__["auto_last_hop"] = auto_last_hop
__props__.__dict__["description"] = description
__props__.__dict__["idle_timeout"] = idle_timeout
__props__.__dict__["key"] = key
if local_address is None and not opts.urn:
raise TypeError("Missing required property 'local_address'")
__props__.__dict__["local_address"] = local_address
__props__.__dict__["mode"] = mode
__props__.__dict__["mtu"] = mtu
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["partition"] = partition
if profile is None and not opts.urn:
raise TypeError("Missing required property 'profile'")
__props__.__dict__["profile"] = profile
__props__.__dict__["remote_address"] = remote_address
__props__.__dict__["secondary_address"] = secondary_address
__props__.__dict__["tos"] = tos
__props__.__dict__["traffic_group"] = traffic_group
__props__.__dict__["transparent"] = transparent
__props__.__dict__["use_pmtu"] = use_pmtu
super(NetTunnel, __self__).__init__(
'f5bigip:index/netTunnel:NetTunnel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_service: Optional[pulumi.Input[str]] = None,
auto_last_hop: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
idle_timeout: Optional[pulumi.Input[int]] = None,
key: Optional[pulumi.Input[int]] = None,
| |
<gh_stars>0
import requests
import pandas as pd
from bs4 import BeautifulSoup
import time
import numpy as np
import random
import logging
def searates(country, country_abr):
response = requests.get(url='https://www.searates.com/maritime/' + country +'.html',)
soup = BeautifulSoup(response.content, 'html.parser')
# This is going to collect all of the ports in the given country
ports = []
for tr in soup.find_all('ul')[1:2]:
tds = tr.find_all('li')
for x in range ((len(tds))):
ports.append("%s" % \
(tds[x].text))
ports_details = []
# This is going to go through each port in the given country and collect all of the current port data on searates.com
for x in range(len(ports)):
link = ports[x].lower().replace(' ','_')
link = link.replace('-','_')
link = link.replace('(','')
link = link.replace(')','')
link = link.replace(',','')
link = link.replace("'",'')
link = link.replace('/','')
response = requests.get(url='https://www.searates.com/port/' + link + '_' + country_abr + '.htm',)
soup = BeautifulSoup(response.content, 'html.parser')
ports_details.append([ports[x]])
for tr in soup.find_all('table'):
tds = tr.find_all('tr')
for k in range ((len(tds))-1):
ports_details[x].append("%s" %
(tds[k+1].text))
time.sleep(random.uniform(.5, 9.9)) #This is to create a random time inbetween visits to the website to avoid being black/stopped
col_title = []
# Give col headers to the csv file.
if len(ports_details[0]) > 2:
for x in range(len(ports_details[0])):
x_split = ports_details[0][x].split(':',1)
col_title.append(x_split[0])
col_title[0] = ' Port Name'
else:
for x in range(len(ports_details[-1])):
x_split = ports_details[-1][x].split(':',1)
col_title.append(x_split[0])
col_title[0] = ' Port Name'
# Revomve any redundent information from the csv.
for x in range(len(ports_details)):
for k in range(len(ports_details[x])):
data = ports_details[x][k].split(':',1)
if len(data)>1:
ports_details[x][k] = data[1]
# Creates the data frame for the csv.
df = pd.DataFrame(ports_details)
df.columns = col_title
for x in range(len(df)):
name = df[' Port Name'][x].split()
if name[0] == 'Port':
new_name = ' '.join(name[1:])
df[' Port Name'][x] = new_name
df.to_csv(r'../../../../../../scraper/data/ports_vs.csv')
def WPI(country, country_abr):
# This just pulls the data from the already downloaded PUB150 database that has been exported from an Access Database file to csv file manually saved and exported as WPI_complete.csv
file = pd.read_csv(r'../../../../../../scraper/WPI_complete.csv')
df = pd.DataFrame(file)
df_new = df[df['Country '] == country_abr.upper()]
df_new.to_csv(r'../../../../../../scraper/WPI_Data.csv')
def combine_data_frames(country, country_abr):
file1 = pd.read_csv(r'../../../../../../scraper/data/ports_vs.csv')
file2 = pd.read_csv(r'../../../../../../scraper/WPI_Data.csv')
df1 = pd.DataFrame(file1)
df2 = pd.DataFrame(file2)
# Clearing all of the blank rows from the dataframe
df2 = df2.dropna(how='all')
# Creating a Latitude and longitude field in the PUB150 extracted data. This is because it does not list data in a readable long lat format.
lat = []
long = []
df2 = df2.reset_index(drop=True)
for x in range(len(df2)):
lat.append('-' +str(df2['Field4'][x]) + ' ' + str(df2['Field5'][x]) + ' ' + str(df2['Combo353'][x]))
long.append(str(int(df2['Field7'][x])) + ' ' + str(int(df2['Field8'][x])) + ' ' + str(df2['Combo214'][x]))
df2['Latitude'] = lat
df2['Longitude'] = long
# Removing white spaces from the columns
df1_col = list(df1.columns)
df2_col = list(df2.columns)
for x in range(len(df1_col)):
df1_col[x] = df1_col[x].strip()
for x in range(len(df2_col)):
df2_col[x] = df2_col[x].strip()
df2.columns = df2_col
df1.columns = df1_col
# Renaming columns so that they match in both dataframes and can easily be combined
df2 = df2.rename(columns = {
'1st Port of Entry' : 'First Port of Entry',
'ETA Message' : 'ETA Message Required',
'U.S. Representative' : 'USA Representative',
'Maximum Size Vessel' : 'Maximum Vessel Size',
'Overhead Limits' : 'Overhead Limit',
'Tide.1' : 'Mean Tide',
'100 Tons Plus' : '100+ Ton Lifts',
'50-100 Tons' : '50-100 Ton Lifts',
'25-49 Tons' : '25-49 Ton Lifts',
'0-24 Tons' : '0-24 Ton Lifts',
'Fixed' : 'Fixed Cranes',
'Mobile' : 'Mobile Cranes',
'Floating' : 'Floating Cranes',
'Electric Repair' : 'Electrical Repair',
'Nav Equipment' : 'Navigation Equipment',
'Repair' : 'Ship Repairs',
'Railway' : 'Marine Railroad Size',
'Drydock' : 'Drydock Size'
})
df1 = df1.rename(columns = {
'Local Assist' : 'Local Assistance',
'Assist' : 'Tug Assistance',
'Salvage' : 'Tug Salvage',
'Deratt Cert' : 'SSCC Cert',
'Radio Tel' : 'Radio Telephone',
'Med Moor' : 'Med. Moor',
'Ice' : 'Ice Moor',
'Beach' : 'Beach Moor',
})
for x in range(len(df2['Port Name'])):
name = df2['Port Name'][x].split()
if name[0] == 'PORT':
new_name = ' '.join(name[1:])
df2['Port Name'][x] = new_name
# Combining both dataframes into one
combine = [df1,df2]
result = pd.concat(combine, ignore_index = True,
keys= ['Port Name', 'Publication', 'Chart',
'Harbor Size', 'Harbor Type', 'Shelter', 'Tide', 'Swell',
'Other', 'Overhead Limit', 'Channel', 'Anchorage', 'Cargo Pier',
'Oil Terminal', 'Mean Tide', 'Maximum Vessel Size',
'Good Holding Ground', 'Turning Area', 'First Port of Entry',
'USA Representative', 'ETA Message Required', 'Compulsory', 'Available',
'Local Assistance', 'Advisable', 'Tug Salvage', 'Tug Assistance',
'Pratique', 'SSCC Cert', 'Other.1', 'Telephone', 'Telefax', 'Radio',
'Radio Telephone', 'Air', 'Rail', 'Wharves', 'Anchor', 'Med. Moor',
'Beach Moor', 'Ice Moor', 'Medical Facilities', 'Garbage Disposal',
'Degauss', 'Dirty Ballast', 'Fixed Cranes', 'Mobile Cranes',
'Floating Cranes', '100+ Ton Lifts', '50-100 Ton Lifts',
'25-49 Ton Lifts', '0-24 Ton Lifts', 'Longshore', 'Electrical', 'Steam',
'Navigation Equipment', 'Electrical Repair', 'Provisions', 'Water',
'Fuel Oil', 'Diesel Oil', 'Deck', 'Engine', 'Ship Repairs',
'Drydock Size', 'Marine Railroad Size', 'Latitude', 'Longitude'],
sort=False)
# Formating the combined data frame so theat the country abr is filled for all ports along with capitalizing all ports for uniform data
result['Country'] = df2['Country'][0]
result['Port Name'] = result['Port Name'].str.upper()
result = result.drop(columns = ['Combo214', 'Combo353', 'Field4', 'Field5', 'Field7', 'Field8',
'Unnamed: 0', 'Index No.', 'Region', 'Ice', 'Telefax'])
# Reordering to move the Country abr to the first column
first_column = result.pop('Country')
result.insert(0, 'Country', first_column)
# returns the csv of the combined data
result.to_csv(r'../../../../../../scraper/data/data_before_clean.csv')
clean_up(country, country_abr)
# remove dupliacte port info
def clean_up(country, country_abr):
file = pd.read_csv(r'../../../../../../scraper/data/data_before_clean.csv')
df = pd.DataFrame(file)
df = df.replace(np.nan, '', regex=True)
# This section is a very roundabout way to check for duplicates and prevent data loss when merging the data frames.
# It could be quicker but I could not figure out a way to make faster functions work without data loss.
df = df.replace(r'^\s*$', 'nan', regex=True)
data = [[x] for x in df.columns]
test = []
for x in range(len(df)):
w = 0
k = 0
name = df['Port Name'][x].split()
if name[0] == 'PORT':
name = name[1]
else:
name = ' '.join(name)
while k < len(df):
name2 = df['Port Name'][k].split()
if name2[0] == 'PORT':
name2 = name2[1]
else:
name2 = ' '.join(name2)
if name == name2 and x != k:
if k not in test:
test.append(x)
set1 = df.iloc[x]
set2 = df.iloc[k]
for j in range(len(data)):
if set1[j] == set2[j]:
data[j].append(set1[j])
elif set1[j] == 'nan':
data[j].append(set2[j])
elif set2[j] == 'nan':
data[j].append(set1[j])
else:
data[j].append(set1[j])
break
k+=1
if k == len(df):
set1 = df.iloc[x]
check = set1['Port Name']
check = check.split()
if check[-1] == 'HARBOR' or check[-1] == 'HARBOUR':
check = ' '.join(check[:-1])
if len(data[2]) > 1:
for x in data[2]:
if check in x:
w = 1
break
if w == 1:
break
for j in range(len(data)):
data[j].append(set1[j])
final = pd.DataFrame(data)
final = final.transpose()
final.columns = df.columns
final = final.drop([0])
final = final.replace('nan', '', regex=True)
# End of roundabout data loss fix.
# This section is cleaning the data into a format that was agreed to for the data reporting.
# These drops are data that at the time were said to be unneccesary however,
# this program will still scrap the data and drops it here incase at any point in the future it is deemed necessary to have these data points.
final = final.drop(['Unnamed: 0', 'UN/LOCODE', '800 Number', 'Max Draft', 'ETA Message Required', 'Other', 'Advisable', 'Local Assistance', 'Other.1', 'SSCC Cert', 'Telephone', 'Radio', 'Air', 'Telegraph',
'Radio Telephone', 'Ice Moor', 'Anchor', 'Beach Moor', 'Electrical Repair', 'Steam', 'Electrical', 'Navigation Equipment', 'Engine', 'Degauss', 'Garbage Disposal', 'Dirty Ballast'], axis = 1)
# This section is for cleaning data into a uniform standard done through hard coding along with decoding some of the encoded sections of data.
final['Harbor Size'].replace({
# HARBOR SIZE
'L': 'Large',
'M': 'Medium',
'S': 'Small' ,
'V': 'Very Small'
}, inplace=True)
final['Harbor Type'].replace({
# HARBOR TYPE
'RT' : 'River Tide Gate' ,
'LC' : 'Lake or Canal' ,
'OR' : 'Open Roadstead' ,
'TH' : 'Typhoon Harbor' ,
'RN' : 'River Natural',
'CN' : 'Coastal Natural',
'CB' : 'Coastal Breakwater',
'CT' : 'Coastal Tide Gate' ,
'RB' : 'RIVER BASIN',
'N' : 'NONE'
}, inplace=True)
final['Shelter'].replace({
# SHELTER AFFORDED
'E' : 'Excellent',
'G' : 'Good',
'F' : 'Fair',
| |
- 0.59*m.x672 - 0.59*m.x688 + 17.66*m.x711 + 17.66*m.x717 + 17.66*m.x742
- 48.98*m.x759 - 48.98*m.x769 - 48.98*m.x784 - 21.57*m.x801 - 21.57*m.x826 - 60.67*m.x869
- 60.67*m.x878 - 60.67*m.x887 + 17.14*m.x893 + 17.14*m.x911 + 17.14*m.x921 - 10.14*m.x940
- 10.14*m.x958 - 44.89*m.x985 - 44.89*m.x994 - 44.89*m.x1012 - 40.43*m.x1026 - 40.43*m.x1035
+ 6.34*m.x1072 - 2.76*m.x1095 - 49.6*m.x1157 - 60.67*m.x1210 - 44.89*m.x1236 - 40.43*m.x1244
<= 0)
m.c135 = Constraint(expr= - 21.16*m.x98 - 21.16*m.x104 - 56.01*m.x142 - 56.01*m.x151 - 56.01*m.x169 - 56.01*m.x177
- 68.35*m.x192 - 68.35*m.x202 - 68.35*m.x224 - 62.93*m.x243 - 62.93*m.x253 - 62.93*m.x266
- 62.93*m.x274 - 62.93*m.x284 - 38.99*m.x311 - 38.99*m.x321 - 38.99*m.x336
- 0.220000000000001*m.x368 - 0.220000000000001*m.x376 - 0.220000000000001*m.x386
- 61.67*m.x413 - 61.67*m.x423 - 61.67*m.x436 - 44.39*m.x467 - 44.39*m.x477 - 44.39*m.x483
- 44.39*m.x501 - 44.39*m.x509 - 60.02*m.x532 - 60.02*m.x542 - 60.02*m.x564 - 60.02*m.x574
- 13.58*m.x593 - 13.58*m.x603 - 13.58*m.x609 - 13.58*m.x634 - 13.58*m.x642 - 19.98*m.x672
- 19.98*m.x688 - 44.16*m.x711 - 44.16*m.x717 - 44.16*m.x742 - 19.66*m.x759 - 19.66*m.x769
- 19.66*m.x784 - 19.22*m.x801 - 19.22*m.x826 + 0.17*m.x869 + 0.17*m.x878 + 0.17*m.x887
- 51.96*m.x893 - 51.96*m.x911 - 51.96*m.x921 - 32*m.x940 - 32*m.x958 - 72.65*m.x985
- 72.65*m.x994 - 72.65*m.x1012 - 62.2*m.x1026 - 62.2*m.x1035 - 56.01*m.x1072 - 62.93*m.x1095
- 13.58*m.x1157 + 0.17*m.x1210 - 72.65*m.x1236 - 62.2*m.x1244 <= 0)
m.c136 = Constraint(expr= - 0.73*m.x98 - 0.73*m.x104 - 56.86*m.x142 - 56.86*m.x151 - 56.86*m.x169 - 56.86*m.x177
- 5.5*m.x192 - 5.5*m.x202 - 5.5*m.x224 - 75.8*m.x243 - 75.8*m.x253 - 75.8*m.x266 - 75.8*m.x274
- 75.8*m.x284 - 55.95*m.x311 - 55.95*m.x321 - 55.95*m.x336 - 73.31*m.x368 - 73.31*m.x376
- 73.31*m.x386 - 60.07*m.x413 - 60.07*m.x423 - 60.07*m.x436 - 14.15*m.x467 - 14.15*m.x477
- 14.15*m.x483 - 14.15*m.x501 - 14.15*m.x509 - 37.37*m.x532 - 37.37*m.x542 - 37.37*m.x564
- 37.37*m.x574 - 3.27*m.x593 - 3.27*m.x603 - 3.27*m.x609 - 3.27*m.x634 - 3.27*m.x642
- 57.42*m.x672 - 57.42*m.x688 - 52.81*m.x711 - 52.81*m.x717 - 52.81*m.x742 - 62.03*m.x759
- 62.03*m.x769 - 62.03*m.x784 - 62.54*m.x801 - 62.54*m.x826 - 52.78*m.x869 - 52.78*m.x878
- 52.78*m.x887 - 70.69*m.x893 - 70.69*m.x911 - 70.69*m.x921 - 55.66*m.x940 - 55.66*m.x958
- 35.19*m.x985 - 35.19*m.x994 - 35.19*m.x1012 - 5.22*m.x1026 - 5.22*m.x1035 - 56.86*m.x1072
- 75.8*m.x1095 - 3.27*m.x1157 - 52.78*m.x1210 - 35.19*m.x1236 - 5.22*m.x1244 <= 0)
m.c137 = Constraint(expr= - 1.53*m.x98 - 1.53*m.x104 - 20.74*m.x142 - 20.74*m.x151 - 20.74*m.x169 - 20.74*m.x177
- 12.68*m.x192 - 12.68*m.x202 - 12.68*m.x224 - 39.54*m.x243 - 39.54*m.x253 - 39.54*m.x266
- 39.54*m.x274 - 39.54*m.x284 - 13.87*m.x311 - 13.87*m.x321 - 13.87*m.x336 - 40.66*m.x368
- 40.66*m.x376 - 40.66*m.x386 - 10.71*m.x413 - 10.71*m.x423 - 10.71*m.x436 + 3.42*m.x467
+ 3.42*m.x477 + 3.42*m.x483 + 3.42*m.x501 + 3.42*m.x509 + 3.61*m.x532 + 3.61*m.x542
+ 3.61*m.x564 + 3.61*m.x574 - 17.86*m.x593 - 17.86*m.x603 - 17.86*m.x609 - 17.86*m.x634
- 17.86*m.x642 - 13.07*m.x672 - 13.07*m.x688 - 8.56*m.x711 - 8.56*m.x717 - 8.56*m.x742
- 61.21*m.x759 - 61.21*m.x769 - 61.21*m.x784 - 10.8*m.x801 - 10.8*m.x826 + 8.19*m.x869
+ 8.19*m.x878 + 8.19*m.x887 - 5.02*m.x893 - 5.02*m.x911 - 5.02*m.x921 - 65*m.x940 - 65*m.x958
- 62.3*m.x985 - 62.3*m.x994 - 62.3*m.x1012 - 31.74*m.x1026 - 31.74*m.x1035 - 20.74*m.x1072
- 39.54*m.x1095 - 17.86*m.x1157 + 8.19*m.x1210 - 62.3*m.x1236 - 31.74*m.x1244 <= 0)
m.c138 = Constraint(expr= - 19.22*m.x98 - 19.22*m.x104 - 16.17*m.x142 - 16.17*m.x151 - 16.17*m.x169 - 16.17*m.x177
- 62.83*m.x192 - 62.83*m.x202 - 62.83*m.x224 - 14.12*m.x243 - 14.12*m.x253 - 14.12*m.x266
- 14.12*m.x274 - 14.12*m.x284 - 65.79*m.x311 - 65.79*m.x321 - 65.79*m.x336 - 28.38*m.x368
- 28.38*m.x376 - 28.38*m.x386 - 17.99*m.x413 - 17.99*m.x423 - 17.99*m.x436 - 7.04*m.x467
- 7.04*m.x477 - 7.04*m.x483 - 7.04*m.x501 - 7.04*m.x509 - 56.35*m.x532 - 56.35*m.x542
- 56.35*m.x564 - 56.35*m.x574 - 28.1*m.x593 - 28.1*m.x603 - 28.1*m.x609 - 28.1*m.x634
- 28.1*m.x642 + 1.33*m.x672 + 1.33*m.x688 - 27.38*m.x711 - 27.38*m.x717 - 27.38*m.x742
- 0.67*m.x759 - 0.67*m.x769 - 0.67*m.x784 - 64.91*m.x801 - 64.91*m.x826 - 54.98*m.x869
- 54.98*m.x878 - 54.98*m.x887 - 8.56*m.x893 - 8.56*m.x911 - 8.56*m.x921 - 14.64*m.x940
- 14.64*m.x958 - 49.36*m.x985 - 49.36*m.x994 - 49.36*m.x1012 - 43.4*m.x1026 - 43.4*m.x1035
- 16.17*m.x1072 - 14.12*m.x1095 - 28.1*m.x1157 - 54.98*m.x1210 - 49.36*m.x1236 - 43.4*m.x1244
<= 0)
m.c139 = Constraint(expr= - 7.11*m.x98 - 7.11*m.x104 - 2.28*m.x142 - 2.28*m.x151 - 2.28*m.x169 - 2.28*m.x177
- 59.24*m.x192 - 59.24*m.x202 - 59.24*m.x224 - 55.4*m.x243 - 55.4*m.x253 - 55.4*m.x266
- 55.4*m.x274 - 55.4*m.x284 - 3.1*m.x311 - 3.1*m.x321 - 3.1*m.x336 - 45.36*m.x368
- 45.36*m.x376 - 45.36*m.x386 - 64.08*m.x413 - 64.08*m.x423 - 64.08*m.x436 - 28.23*m.x467
- 28.23*m.x477 - 28.23*m.x483 - 28.23*m.x501 - 28.23*m.x509 - 20.85*m.x532 - 20.85*m.x542
- 20.85*m.x564 - 20.85*m.x574 + 3.23*m.x593 + 3.23*m.x603 + 3.23*m.x609 + 3.23*m.x634
+ 3.23*m.x642 - 30.37*m.x672 - 30.37*m.x688 - 66.1*m.x711 - 66.1*m.x717 - 66.1*m.x742
- 14.35*m.x759 - 14.35*m.x769 - 14.35*m.x784 - 10.75*m.x801 - 10.75*m.x826 - 1.53*m.x869
- 1.53*m.x878 - 1.53*m.x887 - 4.02*m.x893 - 4.02*m.x911 - 4.02*m.x921 - 49.38*m.x940
- 49.38*m.x958 - 31.99*m.x985 - 31.99*m.x994 - 31.99*m.x1012 + 0.94*m.x1026 + 0.94*m.x1035
- 2.28*m.x1072 - 55.4*m.x1095 + 3.23*m.x1157 - 1.53*m.x1210 - 31.99*m.x1236 + 0.94*m.x1244
<= 0)
m.c140 = Constraint(expr= - 79.65*m.x98 - 79.65*m.x104 - 59.44*m.x142 - 59.44*m.x151 - 59.44*m.x169 - 59.44*m.x177
- 54.24*m.x192 - 54.24*m.x202 - 54.24*m.x224 - 55*m.x243 - 55*m.x253 - 55*m.x266 - 55*m.x274
- 55*m.x284 - 33.33*m.x311 - 33.33*m.x321 - 33.33*m.x336 - 47.35*m.x368 - 47.35*m.x376
- 47.35*m.x386 - 2.63*m.x413 - 2.63*m.x423 - 2.63*m.x436 - 13.23*m.x467 - 13.23*m.x477
- 13.23*m.x483 - 13.23*m.x501 - 13.23*m.x509 - 33.45*m.x532 - 33.45*m.x542 - 33.45*m.x564
- 33.45*m.x574 - 34.81*m.x593 - 34.81*m.x603 - 34.81*m.x609 - 34.81*m.x634 - 34.81*m.x642
- 9.3*m.x672 - 9.3*m.x688 - 58.84*m.x711 - 58.84*m.x717 - 58.84*m.x742 - 53.99*m.x759
- 53.99*m.x769 - 53.99*m.x784 - 9.12*m.x801 - 9.12*m.x826 - 68.46*m.x869 - 68.46*m.x878
- 68.46*m.x887 - 9.51*m.x893 - 9.51*m.x911 - 9.51*m.x921 - 4.63*m.x940 - 4.63*m.x958
- 60.95*m.x985 - 60.95*m.x994 - 60.95*m.x1012 - 40.71*m.x1026 - 40.71*m.x1035 - 59.44*m.x1072
- 55*m.x1095 - 34.81*m.x1157 - 68.46*m.x1210 - 60.95*m.x1236 - 40.71*m.x1244 <= 0)
m.c141 = Constraint(expr= - 3.92*m.x98 - 3.92*m.x104 - 58.03*m.x142 - 58.03*m.x151 - 58.03*m.x169 - 58.03*m.x177
- 62.37*m.x192 - 62.37*m.x202 - 62.37*m.x224 - 5.87*m.x243 - 5.87*m.x253 - 5.87*m.x266
- 5.87*m.x274 - 5.87*m.x284 - 28.58*m.x311 - 28.58*m.x321 - 28.58*m.x336 - 52.69*m.x368
- 52.69*m.x376 - 52.69*m.x386 - 41.26*m.x413 - 41.26*m.x423 - 41.26*m.x436 - 52.25*m.x467
- 52.25*m.x477 - 52.25*m.x483 - 52.25*m.x501 - 52.25*m.x509 - 6.9*m.x532 - 6.9*m.x542
- 6.9*m.x564 - 6.9*m.x574 - 69.57*m.x593 - 69.57*m.x603 - 69.57*m.x609 - 69.57*m.x634
- 69.57*m.x642 - 55.16*m.x672 - 55.16*m.x688 - 21.61*m.x711 - 21.61*m.x717 - 21.61*m.x742
- 40.22*m.x759 - 40.22*m.x769 - 40.22*m.x784 - 25.56*m.x801 - 25.56*m.x826 - 34.56*m.x869
- 34.56*m.x878 - 34.56*m.x887 - 26.93*m.x893 - 26.93*m.x911 - 26.93*m.x921 - 4.6*m.x940
- 4.6*m.x958 - 70.94*m.x985 - 70.94*m.x994 - 70.94*m.x1012 - 38.11*m.x1026 - 38.11*m.x1035
- 58.03*m.x1072 - 5.87*m.x1095 - 69.57*m.x1157 - 34.56*m.x1210 - 70.94*m.x1236 - 38.11*m.x1244
<= 0)
m.c142 = Constraint(expr= 3.83*m.x98 + 3.83*m.x104 - 25.68*m.x142 - 25.68*m.x151 - 25.68*m.x169 - 25.68*m.x177
- 8.47*m.x192 - 8.47*m.x202 - 8.47*m.x224 - 46.17*m.x243 - 46.17*m.x253 - 46.17*m.x266
- 46.17*m.x274 - 46.17*m.x284 + 10.77*m.x311 + 10.77*m.x321 + 10.77*m.x336 - 53.04*m.x368
- 53.04*m.x376 - 53.04*m.x386 - 55.13*m.x413 - 55.13*m.x423 - 55.13*m.x436 - 54.19*m.x467
- 54.19*m.x477 - 54.19*m.x483 - 54.19*m.x501 - 54.19*m.x509 + 19.24*m.x532 + 19.24*m.x542
+ 19.24*m.x564 + 19.24*m.x574 - 28.33*m.x593 - 28.33*m.x603 - 28.33*m.x609 - 28.33*m.x634
- 28.33*m.x642 - 55.34*m.x672 - 55.34*m.x688 - 41.04*m.x711 - 41.04*m.x717 - 41.04*m.x742
- 0.190000000000001*m.x759 - 0.190000000000001*m.x769 - 0.190000000000001*m.x784 + 10*m.x801
+ 10*m.x826 - 21.5*m.x869 - 21.5*m.x878 - 21.5*m.x887 + 2.12*m.x893 + 2.12*m.x911
+ 2.12*m.x921 - 51.41*m.x940 - 51.41*m.x958 - 35.78*m.x985 - 35.78*m.x994 - 35.78*m.x1012
- 42.74*m.x1026 - 42.74*m.x1035 - 25.68*m.x1072 - 46.17*m.x1095 - 28.33*m.x1157 - 21.5*m.x1210
- 35.78*m.x1236 - 42.74*m.x1244 <= 0)
m.c143 = Constraint(expr= - 7.13000000000001*m.x90 - 7.13000000000001*m.x122 - 7.13000000000001*m.x131 - 7.55*m.x162
- 12.97*m.x184 - 12.97*m.x217 - 12.97*m.x232 - 40.69*m.x275 - 40.69*m.x292 - 44.43*m.x303
- 44.43*m.x329 - 56.66*m.x345 - 56.66*m.x361 - 56.66*m.x377 - 56.66*m.x394 - 5.61*m.x405
- 5.61*m.x437 - 5.61*m.x448 - 53.04*m.x459 - 53.04*m.x494 - 53.04*m.x510 - 53.04*m.x521
- 18.47*m.x557 - 18.47*m.x565 - 18.47*m.x582 - 57.47*m.x627 - 57.47*m.x643 - 57.47*m.x654
- 27.2*m.x665 - 27.2*m.x689 - 27.2*m.x700 - 61.43*m.x735 - 72.17*m.x751 - 72.17*m.x777
- 59.63*m.x793 - 59.63*m.x819 - 59.63*m.x827 - 59.63*m.x838 - 69.86*m.x850 - 37.89*m.x861
- 78.35*m.x904 - 78.35*m.x912 - 78.35*m.x929 - 25.98*m.x951 - 25.98*m.x966 - 12.66*m.x977
- 12.66*m.x1005 - 8.54000000000001*m.x1019 - 8.54000000000001*m.x1036
- 8.54000000000001*m.x1047 - 7.13000000000001*m.x1062 - 53.04*m.x1136 - 27.2*m.x1167
- 61.43*m.x1178 - 25.98*m.x1227 <= 0)
m.c144 = Constraint(expr= - 87.16*m.x90 - 87.16*m.x122 - 87.16*m.x131 - 48.14*m.x162 - 42.23*m.x184 - 42.23*m.x217
- 42.23*m.x232 - 33.21*m.x275 - | |
import os
from pyglet.gl import *
import numpy
from pygly.shader import Shader
import pymesh.obj
from razorback.mesh import Mesh
class Data( object ):
shader_source = {
'vert': open(os.path.dirname(__file__) + '/obj.vert','r').read(),
'frag': open(os.path.dirname(__file__) + '/obj.frag','r').read()
}
_data = {}
@classmethod
def load( cls, filename ):
# check if the model has been loaded previously
if filename in Data._data:
# create a new mesh with the same data
return Data._data[ filename ]
data = cls( filename )
# store mesh for later
Data._data[ filename ] = data
return data
@classmethod
def unload( cls, filename ):
if filename in Data._data:
del Data._data[ filename ]
def __init__( self, filename = None, buffer = None ):
super( Data, self ).__init__()
self.meshes = {}
# create our shader
self.shader = Shader(
vert = Data.shader_source['vert'],
frag = Data.shader_source['frag']
)
self.shader.attribute( 0, 'in_position' )
self.shader.attribute( 1, 'in_texture_coord' )
self.shader.attribute( 2, 'in_normal' )
self.shader.frag_location( 'fragColor' )
self.shader.link()
# bind our uniform indices
self.shader.bind()
self.shader.uniformi( 'tex0', 0 )
self.shader.unbind()
self.obj = pymesh.obj.OBJ()
if filename != None:
self.obj.load( filename )
else:
self.obj.load_from_buffer( buffer )
self._load()
def _load( self ):
"""
Processes the data loaded by the MD2 Loader
"""
# convert the md2 data into data for the gpu
# first, load our vertex buffer objects
self._load_vertex_buffers()
def _load_vertex_buffers( self ):
# we need to convert from 3 lists with 3 sets of indices
# to 3 lists with 1 set of indices
# so for each index, we need to check if we already
# have a matching vertex, and if not, make one
raw_vertices = numpy.array( self.obj.model.vertices, dtype = 'float32' )
raw_texture_coords = numpy.array( self.obj.model.texture_coords, dtype = 'float32' )
raw_normals = numpy.array( self.obj.model.normals, dtype = 'float32' )
# calculate our number of vertices
# this way we can pre-allocate our arrays
num_vertices = 0
for mesh in self.obj.model.meshes:
num_vertices += len(mesh['points'])
# convert line strips to line segments
# (size - 1) * 2
for row in mesh['lines']:
num_vertices += ((len(row) - 1) * 2)
# convert triangle fans to triangles
# (size - 2) * 3
for row in mesh['faces']:
num_vertices += ((len(row) - 2) * 3)
vertices = numpy.empty( (num_vertices, 3), dtype = 'float32' )
texture_coords = numpy.empty( (num_vertices, 2), dtype = 'float32' )
normals = numpy.empty( (num_vertices, 3), dtype = 'float32' )
current_offset = 0
for mesh in self.obj.model.meshes:
print 'Name:', mesh[ 'name' ],
print 'Groups:', mesh['groups']
initial_offset = current_offset
# check if we need to create a point mesh
num_point_indices = len(mesh['points'])
if num_point_indices > 0:
points = numpy.array( mesh['points'] )
# ensure we don't have any 'None' values
points = numpy.ma.masked_values( points, None ).filled( 0 ).astype('int32')
# separate vertices, texture coords and normals
points_v = points[ :, 0 ].repeat( 3 )
points_t = points[ :, 1 ].repeat( 3 )
points_n = points[ :, 2 ].repeat( 3 )
v_d_indices = numpy.array([0,1,2], dtype = 'int32' ).tile( len(points) )
t_d_indices = numpy.array([0,1,2], dtype = 'int32' ).tile( len(points) )
t_d_indices = numpy.array([0,1,2], dtype = 'int32' ).tile( len(points) )
# extract the indices
vertices[ current_offset:num_point_indices ] = raw_vertices[ points_v, v_d_indices ]
texture_coords[ current_offset:num_point_indices ] = raw_texture_coords[ points_t, t_d_indices ]
normals[ current_offset:num_point_indices ] = raw_normals[ points_n, n_d_indices ]
# increment the current offset
current_offset += num_point_indices
# check if we need to create a line mesh
num_line_indices = len(mesh['lines'])
if num_line_indices > 0:
# each line tuple is a line strip
# the easiest way to render is to convert to
# line segments
def convert_to_lines( strip ):
previous = strip[ 0 ]
for point in strip[ 1: ]:
yield previous
yield point
previous = point
# convert each line strip into line segments
lines = numpy.array([
point
for strip in mesh['lines']
for point in convert_to_lines( strip )
])
# update the number of lines
num_line_indices = len(lines)
# ensure we don't have any 'None' values
lines = numpy.ma.masked_values( lines, None ).filled( 0 ).astype('int32')
# separate vertices, texture coords and normals
lines_v = lines[ :, 0 ]
lines_t = lines[ :, 1 ]
lines_n = lines[ :, 2 ]
# extract the indices
vertices[ current_offset:num_line_indices ] = raw_vertices[ lines_v ]
texture_coords[ current_offset:num_line_indices ] = raw_texture_coords[ lines_t ]
normals[ current_offset:num_line_indices ] = raw_normals[ lines_n ]
# increment the current offset
current_offset += num_line_indices
# check if we need to create a face mesh
num_face_indices = len(mesh['faces'])
if num_face_indices > 0:
# faces are stored as a list of triangle fans
# we need to covnert them to triangles
def convert_to_triangles( fan ):
# convert from triangle fan
# 0, 1, 2, 3, 4, 5
# to triangle list
# 0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5
start = fan[ 0 ]
previous = fan[ 1 ]
for point in fan[ 2: ]:
yield start
yield previous
yield point
previous = point
# convert each triangle face to triangles
faces = numpy.array([
value
for triangle in mesh['faces']
for value in convert_to_triangles( triangle )
])
# update the number of faces
num_face_indices = len(faces)
# replace None with invalid values
faces = numpy.ma.masked_values( faces, None ).filled( 0 ).astype('int32')
# separate vertices, texture coords and normals
faces_v = faces[ :, 0 ].repeat( 3 )
faces_t = faces[ :, 1 ].repeat( 2 )
faces_n = faces[ :, 2 ].repeat( 3 )
indices3 = numpy.tile([0,1,2], num_face_indices ).astype( 'int32' )
indices2 = numpy.tile([0,1], num_face_indices ).astype( 'int32' )
# extract the indices
start, end = current_offset, current_offset + num_face_indices
extracted_v = raw_vertices[ faces_v, indices3 ].reshape( num_face_indices, 3 )
vertices[ start:end ] = extracted_v
if raw_texture_coords.size > 0:
extracted_t = raw_texture_coords[ faces_t, indices2 ].reshape( num_face_indices, 2 )
texture_coords[ start:end ] = extracted_t
else:
texture_coords[ start:end ] = [ 0.0, 0.0 ]
if raw_normals.size > 0:
extracted_n = raw_normals[ faces_n, indices3 ].reshape( num_face_indices, 3 )
normals[ start:end ] = extracted_n
else:
normals[ start:end ] = [ 0.0, 0.0, 0.0 ]
# increment the current offset
current_offset += num_face_indices
# store our indices
gl_data = (
(initial_offset, num_point_indices),
(initial_offset + num_point_indices, num_line_indices),
(initial_offset + num_point_indices + num_line_indices, num_face_indices)
)
# add the mesh to each of the mesh groups
# each group has a list of meshes it owns
for group in mesh['groups']:
if group not in self.meshes:
self.meshes[ group ] = []
self.meshes[ group ].append( gl_data )
self.vao = (GLuint)()
glGenVertexArrays( 1, self.vao )
glBindVertexArray( self.vao )
# create our global vertex data
self.vbo = (GLuint * 3)()
glGenBuffers( 3, self.vbo )
vertices = vertices.flatten()
texture_coords = texture_coords.flatten()
normals = normals.flatten()
# create a VBO for our vertices
glBindBuffer( GL_ARRAY_BUFFER, self.vbo[ 0 ] )
glBufferData(
GL_ARRAY_BUFFER,
vertices.nbytes,
(GLfloat * len(vertices))(*vertices.flat),
GL_STATIC_DRAW
)
# create a VBO for our texture coordinates
glBindBuffer( GL_ARRAY_BUFFER, self.vbo[ 1 ] )
glBufferData(
GL_ARRAY_BUFFER,
texture_coords.nbytes,
(GLfloat * len(texture_coords))(*texture_coords.flat),
GL_STATIC_DRAW
)
# create a VBO for our normals
glBindBuffer( GL_ARRAY_BUFFER, self.vbo[ 2 ] )
glBufferData(
GL_ARRAY_BUFFER,
normals.nbytes,
(GLfloat * len(normals))(*normals.flat),
GL_STATIC_DRAW
)
# unbind our buffers
glBindBuffer( GL_ARRAY_BUFFER, 0 )
glBindVertexArray( 0 )
def render( self, projection, model_view, groups ):
self.shader.bind()
self.shader.uniform_matrixf( 'model_view', model_view.flat )
self.shader.uniform_matrixf( 'projection', projection.flat )
glBindVertexArray( self.vao )
# bind our global data
# vertices
glBindBuffer( GL_ARRAY_BUFFER, self.vbo[ 0 ] )
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 0, 0 )
glEnableVertexAttribArray( 0 )
# texture coords
glBindBuffer( GL_ARRAY_BUFFER, self.vbo[ 1 ] )
glVertexAttribPointer( 1, 2, GL_FLOAT, GL_FALSE, 0, 0 )
glEnableVertexAttribArray( 1 )
# normals
glBindBuffer( GL_ARRAY_BUFFER, self.vbo[ 2 ] )
glVertexAttribPointer( 2, 3, GL_FLOAT, GL_FALSE, 0, 0 )
glEnableVertexAttribArray( 2 )
# iterate through the specified groups
for group in groups:
# get the group
for mesh in self.meshes[ group ]:
points, lines, faces = mesh
point_start, point_count = points
line_start, line_count = lines
face_start, face_count = faces
if points[ 1 ] > 0:
glDrawArrays( GL_POINTS, point_start, point_count )
if lines[ 1 ] > 0:
glDrawArrays( GL_LINES, line_start, line_count )
if faces[ 1 ] > 0:
glDrawArrays( GL_TRIANGLES, | |
Compute month features for the last timestamp.
"""
n = len(time)
ans = np.zeros(n * 12)
indices = pd.to_datetime(time[:, -1]).month.values + np.arange(0, n * 12, 12)
ans[indices] = 1.0
return torch.tensor(ans.reshape(n, -1), dtype=torch.get_default_dtype())
@staticmethod
def _get_ts2vec(
x: np.ndarray,
time: np.ndarray,
):
# TODO after ts2vec model lands
pass
@staticmethod
def _get_last_hour_minute_feature(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""
Compute minute features for the last timestamp.
"""
pdt = pd.to_datetime(time[:, -1])
hr = pdt.hour.values + 1
minute = pdt.minute.values + 1
return torch.tensor(np.column_stack([hr, minute])).log()
def get_base_features(
self,
x: np.ndarray,
time: np.ndarray,
) -> Optional[Tensor]:
"""Compute selected base features, i.e., the features to be computed only once for each time series.
Args:
x: A `numpy.ndarry` object representing the values of the time series data.
time: A `numpy.ndarry` object representing the timestamps of the time series data.
Returns:
A `torch.Tensor` object representing the features.
"""
funcs = {
"tsfeatures": self._get_tsfeatures,
"ts2vec": self._get_ts2vec,
}
# get features by given feature types
features = []
for ft in self.feature_type:
if ft in funcs:
features.append(funcs[ft](x, time))
if len(features) > 0:
return torch.cat(features, 1)
return None
def get_on_the_fly_features(
self,
x: np.ndarray,
time: np.ndarray,
) -> Optional[Tensor]:
"""Compute selected on-the-fly features, i.e., the features to be computed when stepping through RNN.
Args:
x: A `numpy.ndarry` object representing the values of the time series data.
time: A `numpy.ndarry` object representing the timestamps of the time series data.
Returns:
A `torch.Tensor` object representing the features.
"""
funcs = {
"last_date": self._get_last_date_feature,
"simple_date": self._get_date_feature,
"last_hour": self._get_last_hour_feature,
"last_hour_minute": self._get_last_hour_minute_feature,
"last_month": self._get_last_month_feature,
}
# get features by given feature types
features = []
for ft in self.feature_type:
if ft in funcs:
features.append(funcs[ft](x, time))
if len(features) > 0:
return torch.cat(features, 1)
return None
def __eq__(self, gmfeature):
if isinstance(gmfeature, GMFeature):
if set(gmfeature.feature_type) == set(self.feature_type):
return True
return False
class GMParam:
"""A class for storing all the parameters of a global model.
This module storing all necessary information for building a global model object.
Attributes:
freq: A string or a `pandas.Timedelta` object representing the frequency of the time series.
input_window: An integer representing the length of the input window, i.e., the length of time series feeded into RNN.
fcst_window: An integer representing the length of the forecast window, i.e., the length of forecasts generated in one forecast step.
seasonality: Optional; An integer representing the seasonality period. Default is 1, which represents non-seasonality.
model_type: Optional; A string representing the type of neural network for global model. Can be either 'rnn' or 's2s'. Default is 'rnn'.
uplifting_ratio: Optional; A float representing the uplifting ratio, which is used for computing the offset value of a time series with negative values given that (max(TS)+offset)/(min(TS)+offset)=uplifing_ratio.
Default is 3.0.
gmfeature: Optional; A string, a list of strings or a :class:`GMFeature` object representing the time series features. Default is None, which means no time series features.
nn_structure: Optional; A list of lists of integers representing the NN structure of RNN (or encoder). Default value is [[1,3]].
decoder_nn_structure: Optional; A list of lists of integers representing the NN structure of decoder. Default is None, which takes the same NN structure as encoder when model_type is 's2s', else takes None.
cell_name: A string representing the name of NN cells, can be 'LSTM2Cell', 'S2Cell' or 'LSTM'. Default is 'LSTM'.
state_size: Optional; An integer representing the c state size. Default is 50.
h_size: Optional; An integer representing the h state size. When cell_name is 'LSTM2Cell' or 'S2Cell', h_size should be a positive integer which is less than state_size. Default is None, i.e., not specified.
optimizer: Optional; A string or a dictionary representing the name and the parameters of the optimizer for training NN. Default is {'name':'Adam', params:{'eps': 1e-7}}.
loss_function: Optonal; A string or a `torch.nn.modules.loss._Loss` object representing the loss function, can be 'Pinball' and 'AdjustedPinball'. Default is 'Pinball'.
quantile: Optional; A list of floats representing the forecast quantiles. Default value is [0.5,0.05,0.95,0.99].
training_quantile: Optional; A list of floats representing quantiles used for training. Default is None, which takes training_quantile the same value as quantile.
quantile_weight: Optional; A list of floats representing weights for quantiles during training. Default is None, which sets weight as torch.Tensor([1/n,...,1/n]), where n is the length of quantil.
validation_metric: Optional; A list of strings representing the names of the error metrics for validation. Default is None, which sets validation_metric as ['smape', 'sbias', 'exceed'].
batch_size: Optional; A dictionary representing the batch size schedule, whose keys are the epoch numbers and the corresponding values are the batch sizes. Default is None, which sets batch_size as {0:2,3:5,4:15,5:50,6:150,7:500}.
learning_rate: Optional; A dictionary representing the learning rate schedule, whose keys are epoch numbers and the corresponding values are the learning rates. Default is None, which sets learning_rate as {0: 1e-3, 2: 1e-3/3.}.
epoch_num: Optional; An integer representing the totoal number of epoches. Default is 8.
epoch_size: Optional; An integer representing the batches per epoch. Default is 3000.
init_seasonality: Optional; A list of two floats representing the lower and upper bounds for the initial seasonalities. Default is None, which sets init_seasonality as [0.1, 10.].
init_smoothing_params: Optional; A list of two floats representing initial values for smoothing parameters, i.e., level smoothing parameter and seasonality smoothing parameter. Default is None, which sets init_smoothing_params as [0.4, 0.6].
min_training_step_num: Optional; An integer representing the minimum number of training steps. Default is 4.
Minimum number of training steps.
min_training_step_length: Optional; An integer representing the minimum training step length. Default is min(1, seasonality-1).
soft_max_training_step_num: Optional; An integer representing the soft maxinum value for training step. Default is 10.
validation_step_num: Optional; An integer representing the maximum number of validation steps (maximum validation horizon = validation_step_num * fcst_window). Default is 3.
min_warming_up_step_num: Optional; An integer representing the mininum number of warming-up steps for forecasting. Default is 2.
fcst_step_num: Optional; An integer representing the maximum number of forecasting steps. Default is 1.
jit: Optional; A boolean specifying whether or not to jit every cell of the RNN. Default is False.
gmname: Optional; A string representing the name of the `GMParam` object. Default is None.
"""
def __init__(
self,
freq: Union[str, pd.Timedelta],
input_window: int,
fcst_window: int,
seasonality: int = 1,
model_type: str = "rnn",
uplifting_ratio: float = 3.0,
gmfeature: Union[
None, GMFeature, str, List[str]
] = None, # need to be changed once gmfeature is defined
nn_structure: Optional[List[List[int]]] = None,
decoder_nn_structure: Optional[List[List[int]]] = None,
cell_name: str = "LSTM",
state_size: int = 50,
h_size: Optional[int] = None,
optimizer: Optional[Union[str, Dict[str, Any]]] = None,
loss_function: Union[str, torch.nn.Module] = "Pinball",
quantile: Optional[List[float]] = None,
training_quantile: Optional[List[float]] = None,
quantile_weight: Optional[List[float]] = None,
validation_metric: Optional[List[float]] = None,
batch_size: Union[None, int, Dict[int, int]] = None,
learning_rate: Optional[Union[float, Dict[int, float]]] = None,
epoch_num: int = 8,
epoch_size: int = 3000,
init_seasonality: Optional[List[float]] = None,
init_smoothing_params: Optional[List[float]] = None,
min_training_step_num: int = 4,
min_training_step_length: int = -1,
soft_max_training_step_num: int = 10,
validation_step_num: int = 3,
min_warming_up_step_num: int = 2,
fcst_step_num: int = 1,
jit: bool = False,
gmname: Optional[str] = None,
) -> None:
self._valid_freq(freq)
self.model_type = self._valid_model_type(model_type)
# valid uplifiting ratio
if uplifting_ratio < 0:
msg = f"uplifting_ratio should be a positive float but receive {uplifting_ratio}."
logging.error(msg)
raise ValueError(msg)
self.uplifting_ratio = uplifting_ratio
self.nn_structure, self.decoder_nn_structure = self._valid_nn_structure(
nn_structure, decoder_nn_structure
)
self.cell_name = cell_name
self.state_size = state_size
self.h_size = h_size
batch_size = (
batch_size
if batch_size is not None
else {0: 2, 3: 5, 4: 15, 5: 50, 6: 150, 7: 500}
)
self.batch_size = self._valid_union_dict(batch_size, "batch_size", int, int)
learning_rate = (
learning_rate if learning_rate is not None else {0: 1e-3, 2: 1e-3 / 3.0}
)
self.learning_rate = self._valid_union_dict(
learning_rate, "learning_rate", int, float
)
self.loss_function = None
self._valid_loss_function(loss_function)
self.optimizer = None
self._valid_optimizer(optimizer)
self.quantile = quantile if quantile is not None else [0.5, 0.05, 0.95, 0.99]
self._valid_list(self.quantile, "quantile", float, 0, 1)
# additional check needed for filling NaNs during training.
if self.quantile[0] != 0.5:
msg = f"The first element | |
<reponame>wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
# -*- coding: utf-8 -*-
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
def resize_rotate(image, angle, l_, w_):
''' resize and rotate the figure. '''
image = cv2.resize(image, (image.shape[1], int(image.shape[0] / (3370 / 8651) * (w_ / l_))))
# grab the dimensions of the image and then determine the center.
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix and the sine and cosine.
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image.
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation.
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image.
return cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
def main():
''' Plot Fig4. '''
# Load general data.
img_ini_0 = mpimg.imread('../image/gray.png')
img_ini_1 = mpimg.imread('../image/red.png')
''' Plot Fig4_1. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3.8, 3.8 / 3 * 2))
plt.axis('equal')
plt.xlim((2.1, 2.1 + 12))
plt.ylim((-5.8, -5.8 + 8))
plt.xticks(np.arange(2.1, 2.1 + 12 + 0.1, 3), np.arange(0, 13, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-5.8, -5.8 + 8 + 0.1, 2), np.arange(0, 9, 2), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.08, bottom=0.11, top=0.96, right=0.93)
# Load data.
data = np.load('data/Fig4_1.npz')
veh_l_1, veh_l_2, veh_w_1, veh_w_2 = 5.85, 4.75, 1.75, 1.8
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0114 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0114 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0114 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x1'][-1], data['traj_S3_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0114 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x2'][-1], data['traj_S3_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x1'], data['traj_S3_y1'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x2'], data['traj_S3_y2'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig4_1.png', dpi=600)
plt.close()
''' Plot Fig4_2. '''
# Basic setup.
fig_size = (3.8, 3.8 / 3 * 2)
fig_lim = (12, 8)
fig, ax = plt.subplots(figsize=fig_size)
plt.axis('equal')
plt.xlim((25.5, 25.5 + fig_lim[0]))
plt.ylim((-6.6, -6.6 + fig_lim[1]))
plt.xticks(np.arange(25.5, 25.5 + fig_lim[0] + 0.1, 3), np.arange(0, 13, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-6.6, -6.6 + fig_lim[1] + 0.1, 2), np.arange(0, 9, 2), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.08, bottom=0.11, top=0.96, right=0.93)
# Load data.
data = np.load('data/Fig4_2.npz')
veh_l_1, veh_l_2, veh_w_1, veh_w_2 = 4.53, 4.51, 1.705, 1.725
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.010857 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.010857 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.010857 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x1'][-1], data['traj_S3_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.010857 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x2'][-1], data['traj_S3_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x1'], data['traj_S3_y1'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x2'], data['traj_S3_y2'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig4_2.png', dpi=600)
plt.close()
''' Plot Fig 4.2.1. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3, 1.8))
font1 = {'family': 'Arial', 'size': 14}
plt.xlabel("Time [ms]", font1, labelpad=-0.6)
plt.ylabel('Velocity [m/s]', font1, labelpad=3)
plt.xticks(np.arange(0, 47, 10), np.arange(0, 47, 10) * 10, family='Arial', fontsize=14)
plt.yticks([15, 20, 25, 30], family='Arial', fontsize=14)
plt.xlim([-4, 47])
plt.ylim([12, 33])
plt.subplots_adjust(left=0.22, wspace=0.25, hspace=0.25, bottom=0.25, top=0.96, right=0.97)
# Plot Fig4b_1.
plt.plot(data['traj_Re_V2'][60:], color='lightgray', linestyle='dashed', linewidth=2, zorder=10)
plt.plot(data['traj_S3_V2'][60:], color='#FF5050', linestyle='dashed', linewidth=2, zorder=7)
plt.plot(data['traj_Re_V1'][60:], color='lightgray', linestyle='-.', linewidth=2, zorder=5)
plt.plot(data['traj_S3_V1'][60:], color='#FF5050', linestyle='-.', linewidth=2, zorder=2)
# Show.
plt.show()
# plt.savefig('Fig4_2_1.png', dpi=600)
plt.close()
''' Plot Fig4_3. '''
# Basic setup.
fig_size = (3.8, 3.8 / 3 * 2)
fig_lim = (12, 8)
fig, ax = plt.subplots(figsize=fig_size)
plt.axis('equal')
plt.xlim((-3.3, -3.3 + fig_lim[0]))
plt.ylim((-7.4, -7.4 + fig_lim[1]))
plt.xticks(np.arange(-3.3, -3.3 + fig_lim[0] + 0.1, 3), np.arange(0, 13, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-7.4, -7.4 + fig_lim[1] + 0.1, 2), np.arange(0, 9, 2), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.08, bottom=0.11, top=0.96, right=0.93)
# Load data.
data = np.load('data/Fig4_3.npz')
veh_l_1, veh_l_2, veh_w_1, veh_w_2 = 4.616, 4.416, 1.783, 1.718
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.01149 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.01149 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.01149 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x1'][-1], data['traj_S3_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.01149 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x2'][-1], data['traj_S3_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x1'], data['traj_S3_y1'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x2'], data['traj_S3_y2'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig4_3.png', dpi=600)
plt.close()
''' Plot Fig4_4. '''
# Basic setup.
fig_size = (3.8, 3.8 / 3 * 2)
fig_lim = (12, 8)
fig, ax = plt.subplots(figsize=fig_size)
plt.axis('equal')
plt.xlim((0.5, 0.5 + fig_lim[0]))
plt.ylim((-0.2, -0.2 + fig_lim[1]))
plt.xticks(np.arange(0.5, 0.5 + fig_lim[0] + 0.1, 3), np.arange(0, 13, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-0.2, -0.2 + fig_lim[1] + 0.1, 2), np.arange(0, 9, 2), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.08, bottom=0.11, top=0.96, right=0.93)
# Load data.
data = np.load('data/Fig4_4.npz')
veh_l_1, veh_l_2, veh_w_1, veh_w_2 = 3.995, 4.07, 1.615, 1.615
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.01149 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.01149 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.01149 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x1'][-1], data['traj_S3_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.01149 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x2'][-1], data['traj_S3_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x1'], data['traj_S3_y1'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x2'], data['traj_S3_y2'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig4_4.png', dpi=600)
plt.close()
''' Plot Fig4_5. '''
# Basic setup.
fig_size = (3.8, 3.8 / 3 * 2)
fig_lim = (12, 8)
fig, ax = plt.subplots(figsize=fig_size)
plt.axis('equal')
plt.xlim((14.6, 14.6 + fig_lim[0]))
plt.ylim((-5.05, -5.05 + fig_lim[1]))
plt.xticks(np.arange(14.6, 14.6 + fig_lim[0] + 0.1, 3), np.arange(0, 13, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-5.05, -5.05 + fig_lim[1] + 0.1, 2), np.arange(0, 9, 2), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.08, bottom=0.11, top=0.96, right=0.93)
# Load data.
data = np.load('data/Fig4_5.npz')
veh_l_1, veh_l_2, veh_w_1, veh_w_2 = 4.95, 4.97, 1.78, 1.69
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.01149 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.01149 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.01149 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x1'][-1], data['traj_S3_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_S3_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.01149 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S3_x2'][-1], data['traj_S3_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color='gray', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x1'], data['traj_S3_y1'], color='#FF5050', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S3_x2'], | |
<reponame>cydenix/OpenGLCffi<gh_stars>0
DEF = '''
typedef int32_t khronos_int32_t;
typedef uint32_t khronos_uint32_t;
typedef int64_t khronos_int64_t;
typedef uint64_t khronos_uint64_t;
typedef signed char khronos_int8_t;
typedef unsigned char khronos_uint8_t;
typedef signed short int khronos_int16_t;
typedef unsigned short int khronos_uint16_t;
typedef signed long int khronos_intptr_t;
typedef unsigned long int khronos_uintptr_t;
typedef signed long int khronos_ssize_t;
typedef unsigned long int khronos_usize_t;
typedef float khronos_float_t;
typedef khronos_uint64_t khronos_utime_nanoseconds_t;
typedef khronos_int64_t khronos_stime_nanoseconds_t;
typedef ... Display;
typedef ... XExtData;
typedef int Bool;
typedef unsigned long Font;
typedef unsigned long Screen;
typedef unsigned long Status;
typedef unsigned long Window;
typedef unsigned long Pixmap;
typedef unsigned long Colormap;
typedef unsigned long VisualID;
typedef unsigned long XID;
typedef struct {
XExtData *ext_data;
VisualID visualid;
int class;
unsigned long red_mask, green_mask, blue_mask;
int bits_per_rgb;
int map_entries;
} Visual;
typedef struct {
Visual *visual;
VisualID visualid;
int screen;
int depth;
int class;
unsigned long red_mask;
unsigned long green_mask;
unsigned long blue_mask;
int colormap_size;
int bits_per_rgb;
} XVisualInfo;
extern Display *XOpenDisplay(const char*);
xcb_connection_t *XGetXCBConnection(Display *dpy);
enum XEventQueueOwner { XlibOwnsEventQueue = 0, XCBOwnsEventQueue };
void XSetEventQueueOwner(Display *dpy, enum XEventQueueOwner owner);
typedef ... GLbitfield;
typedef ... GLboolean;
typedef ... GLenum;
typedef ... GLfloat;
typedef ... GLint;
typedef ... GLintptr;
typedef ... GLsizei;
typedef ... GLsizeiptr;
typedef ... GLubyte;
typedef ... GLuint;
typedef ... DMbuffer;
typedef ... DMparams;
typedef ... VLNode;
typedef ... VLPath;
typedef ... VLServer;
typedef XID GLXFBConfigID;
typedef struct __GLXFBConfigRec *GLXFBConfig;
typedef XID GLXContextID;
typedef struct __GLXcontextRec *GLXContext;
typedef XID GLXPixmap;
typedef XID GLXDrawable;
typedef XID GLXWindow;
typedef XID GLXPbuffer;
typedef void (__GLXextFuncPtr)(void);
typedef XID GLXVideoCaptureDeviceNV;
typedef unsigned int GLXVideoDeviceNV;
typedef XID GLXVideoSourceSGIX;
typedef XID GLXFBConfigIDSGIX;
typedef struct __GLXFBConfigRec *GLXFBConfigSGIX;
typedef XID GLXPbufferSGIX;
typedef struct {
int event_type; /* GLX_DAMAGED or GLX_SAVED */
int draw_type; /* GLX_WINDOW or GLX_PBUFFER */
unsigned long serial; /* # of last request processed by server */
Bool send_event; /* true if this came for SendEvent request */
Display *display; /* display the event was read from */
GLXDrawable drawable; /* XID of Drawable */
unsigned int buffer_mask; /* mask indicating which buffers are affected */
unsigned int aux_buffer; /* which aux buffer was affected */
int x, y;
int width, height;
int count; /* if nonzero, at least this many more */
} GLXPbufferClobberEvent;
typedef struct {
int type;
unsigned long serial; /* # of last request processed by server */
Bool send_event; /* true if this came from a SendEvent request */
Display *display; /* Display the event was read from */
GLXDrawable drawable; /* drawable on which event was requested in event mask */
int event_type;
int64_t ust;
int64_t msc;
int64_t sbc;
} GLXBufferSwapComplete;
typedef union __GLXEvent {
GLXPbufferClobberEvent glxpbufferclobber;
GLXBufferSwapComplete glxbufferswapcomplete;
long pad[24];
} GLXEvent;
typedef struct {
int type;
unsigned long serial;
Bool send_event;
Display *display;
int extension;
int evtype;
GLXDrawable window;
Bool stereo_tree;
} GLXStereoNotifyEventEXT;
typedef struct {
int type;
unsigned long serial; /* # of last request processed by server */
Bool send_event; /* true if this came for SendEvent request */
Display *display; /* display the event was read from */
GLXDrawable drawable; /* i.d. of Drawable */
int event_type; /* GLX_DAMAGED_SGIX or GLX_SAVED_SGIX */
int draw_type; /* GLX_WINDOW_SGIX or GLX_PBUFFER_SGIX */
unsigned int mask; /* mask indicating which buffers are affected*/
int x, y;
int width, height;
int count; /* if nonzero, at least this many more */
} GLXBufferClobberEventSGIX;
typedef struct {
char pipeName[80]; /* Should be [GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX] */
int networkId;
} GLXHyperpipeNetworkSGIX;
typedef struct {
char pipeName[80]; /* Should be [GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX] */
int channel;
unsigned int participationType;
int timeSlice;
} GLXHyperpipeConfigSGIX;
typedef struct {
char pipeName[80]; /* Should be [GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX] */
int srcXOrigin, srcYOrigin, srcWidth, srcHeight;
int destXOrigin, destYOrigin, destWidth, destHeight;
} GLXPipeRect;
typedef struct {
char pipeName[80]; /* Should be [GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX] */
int XOrigin, YOrigin, maxHeight, maxWidth;
} GLXPipeRectLimits;
GLXContextID glXGetContextIDEXT(const GLXContext context);
Bool glXBindSwapBarrierNV(Display *dpy, GLuint group, GLuint barrier);
int glXBindVideoCaptureDeviceNV(Display *dpy, unsigned int video_capture_slot, GLXVideoCaptureDeviceNV device);
void glXLockVideoCaptureDeviceNV(Display *dpy, GLXVideoCaptureDeviceNV device);
GLXContext glXCreateContext(Display *dpy, XVisualInfo *vis, GLXContext shareList, Bool direct);
GLXPixmap glXCreateGLXPixmap(Display *dpy, XVisualInfo *visual, Pixmap pixmap);
int glXSendPbufferToVideoNV(Display *dpy, GLXPbuffer pbuf, int iBufferType, unsigned long *pulCounterPbuffer, GLboolean bBlock);
int glXBindVideoImageNV(Display *dpy, GLXVideoDeviceNV VideoDevice, GLXPbuffer pbuf, int iVideoBuffer);
void glXReleaseTexImageEXT(Display *dpy, GLXDrawable drawable, int buffer);
Bool glXJoinSwapGroupNV(Display *dpy, GLXDrawable drawable, GLuint group);
GLXContext glXCreateAssociatedContextAMD(unsigned int id, GLXContext share_list);
__GLXextFuncPtr glXGetProcAddress(const GLubyte *procName);
int glXSwapIntervalSGI(int interval);
int glXReleaseVideoImageNV(Display *dpy, GLXPbuffer pbuf);
void glXDestroyWindow(Display *dpy, GLXWindow win);
int glXBindHyperpipeSGIX(Display *dpy, int hpId);
void glXQueryDrawable(Display *dpy, GLXDrawable draw, int attribute, unsigned int *value);
Bool glXResetFrameCountNV(Display *dpy, int screen);
int glXBindChannelToWindowSGIX(Display *display, int screen, int channel, Window window);
void glXSwapBuffers(Display *dpy, GLXDrawable drawable);
Display * glXGetCurrentDisplay();
GLXPixmap glXCreateGLXPixmapWithConfigSGIX(Display *dpy, GLXFBConfigSGIX config, Pixmap pixmap);
XVisualInfo * glXChooseVisual(Display *dpy, int screen, int *attribList);
int glXQueryContextInfoEXT(Display *dpy, GLXContext context, int attribute, int *value);
GLXFBConfigSGIX * glXChooseFBConfigSGIX(Display *dpy, int screen, int *attrib_list, int *nelements);
Bool glXQueryCurrentRendererIntegerMESA(int attribute, unsigned int *value);
void glXDestroyPbuffer(Display *dpy, GLXPbuffer pbuf);
Bool glXWaitForSbcOML(Display *dpy, GLXDrawable drawable, int64_t target_sbc, int64_t *ust, int64_t *msc, int64_t *sbc);
GLXContext glXCreateAssociatedContextAttribsAMD(unsigned int id, GLXContext share_context, const int *attribList);
GLXHyperpipeConfigSGIX * glXQueryHyperpipeConfigSGIX(Display *dpy, int hpId, int *npipes);
void glXCushionSGI(Display *dpy, Window window, float cushion);
GLXPixmap glXCreatePixmap(Display *dpy, GLXFBConfig config, Pixmap pixmap, const int *attrib_list);
void glXSelectEvent(Display *dpy, GLXDrawable draw, unsigned long event_mask);
void glXGetSelectedEvent(Display *dpy, GLXDrawable draw, unsigned long *event_mask);
void glXSwapIntervalEXT(Display *dpy, GLXDrawable drawable, int interval);
Bool glXQueryExtension(Display *dpy, int *errorb, int *event);
Bool glXMakeCurrent(Display *dpy, GLXDrawable drawable, GLXContext ctx);
Bool glXQueryMaxSwapGroupsNV(Display *dpy, int screen, GLuint *maxGroups, GLuint *maxBarriers);
int glXQueryHyperpipeAttribSGIX(Display *dpy, int timeSlice, int attrib, int size, void *returnAttribList);
Bool glXQueryRendererIntegerMESA(Display *dpy, int screen, int renderer, int attribute, unsigned int *value);
Bool glXQueryMaxSwapBarriersSGIX(Display *dpy, int screen, int *max);
void glXGetSelectedEventSGIX(Display *dpy, GLXDrawable drawable, unsigned long *mask);
XVisualInfo * glXGetVisualFromFBConfigSGIX(Display *dpy, GLXFBConfigSGIX config);
int glXHyperpipeConfigSGIX(Display *dpy, int networkId, int npipes, GLXHyperpipeConfigSGIX *cfg, int *hpId);
GLXContext glXCreateContextAttribsARB(Display *dpy, GLXFBConfig config, GLXContext share_context, Bool direct, const int *attrib_list);
const char * glXQueryRendererStringMESA(Display *dpy, int screen, int renderer, int attribute);
int glXChannelRectSyncSGIX(Display *display, int screen, int channel, GLenum synctype);
int glXWaitVideoSyncSGI(int divisor, int remainder, unsigned int *count);
GLXPbuffer glXCreatePbuffer(Display *dpy, GLXFBConfig config, const int *attrib_list);
void glXSelectEventSGIX(Display *dpy, GLXDrawable drawable, unsigned long mask);
Bool glXDelayBeforeSwapNV(Display *dpy, GLXDrawable drawable, GLfloat seconds);
GLXContext glXImportContextEXT(Display *dpy, GLXContextID contextID);
int glXQueryChannelRectSGIX(Display *display, int screen, int channel, int *dx, int *dy, int *dw, int *dh);
unsigned int glXGetContextGPUIDAMD(GLXContext ctx);
void glXJoinSwapGroupSGIX(Display *dpy, GLXDrawable drawable, GLXDrawable member);
void glXDestroyGLXPixmap(Display *dpy, GLXPixmap pixmap);
Bool glXAssociateDMPbufferSGIX(Display *dpy, GLXPbufferSGIX pbuffer, DMparams *params, DMbuffer dmbuffer);
int glXGetVideoInfoNV(Display *dpy, int screen, GLXVideoDeviceNV VideoDevice, unsigned long *pulCounterOutputPbuffer, unsigned long *pulCounterOutputVideo);
GLXFBConfig * glXChooseFBConfig(Display *dpy, int screen, const int *attrib_list, int *nelements);
GLXContext glXGetCurrentAssociatedContextAMD();
int glXGetConfig(Display *dpy, XVisualInfo *visual, int attrib, int *value);
Bool glXGetSyncValuesOML(Display *dpy, GLXDrawable drawable, int64_t *ust, int64_t *msc, int64_t *sbc);
void glXBlitContextFramebufferAMD(GLXContext dstCtx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
void glXWaitX();
GLXContext glXGetCurrentContext();
Bool glXQueryFrameCountNV(Display *dpy, int screen, GLuint *count);
Bool glXDeleteAssociatedContextAMD(GLXContext ctx);
void glXWaitGL();
GLXVideoSourceSGIX glXCreateGLXVideoSourceSGIX(Display *display, int screen, VLServer server, VLPath path, int nodeClass, VLNode drainNode);
Display * glXGetCurrentDisplayEXT();
Bool glXSet3DfxModeMESA(int mode);
Bool glXQueryVersion(Display *dpy, int *maj, int *min);
GLXVideoCaptureDeviceNV * glXEnumerateVideoCaptureDevicesNV(Display *dpy, int screen, int *nelements);
int64_t glXSwapBuffersMscOML(Display *dpy, GLXDrawable drawable, int64_t target_msc, int64_t divisor, int64_t remainder);
int glXGetFBConfigAttribSGIX(Display *dpy, GLXFBConfigSGIX config, int attribute, int *value);
int glXQueryChannelDeltasSGIX(Display *display, int screen, int channel, int *x, int *y, int *w, int *h);
GLXPbufferSGIX glXCreateGLXPbufferSGIX(Display *dpy, GLXFBConfigSGIX config, unsigned int width, unsigned int height, int *attrib_list);
void glXDestroyPixmap(Display *dpy, GLXPixmap pixmap);
GLXContext glXCreateContextWithConfigSGIX(Display *dpy, GLXFBConfigSGIX config, int render_type, GLXContext share_list, Bool direct);
int glXQueryVideoCaptureDeviceNV(Display *dpy, GLXVideoCaptureDeviceNV device, int attribute, int *value);
void glXNamedCopyBufferSubDataNV(Display *dpy, GLXContext readCtx, GLXContext writeCtx, GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
unsigned int glXGetGPUIDsAMD(unsigned int maxCount, unsigned int *ids);
int glXGetGPUInfoAMD(unsigned int id, int property, GLenum dataType, unsigned int size, void *data);
Bool glXIsDirect(Display *dpy, GLXContext ctx);
int glXGetVideoSyncSGI(unsigned int *count);
int glXBindVideoDeviceNV(Display *dpy, unsigned int video_slot, unsigned int video_device, const int *attrib_list);
void glXBindTexImageEXT(Display *dpy, GLXDrawable drawable, int buffer, const int *attrib_list);
int glXHyperpipeAttribSGIX(Display *dpy, int timeSlice, int attrib, int size, void *attribList);
GLXPixmap glXCreateGLXPixmapMESA(Display *dpy, XVisualInfo *visual, Pixmap pixmap, Colormap cmap);
void glXDestroyContext(Display *dpy, GLXContext ctx);
void glXReleaseVideoCaptureDeviceNV(Display *dpy, GLXVideoCaptureDeviceNV device);
int glXReleaseVideoDeviceNV(Display *dpy, int screen, GLXVideoDeviceNV VideoDevice);
void glXDestroyGLXPbufferSGIX(Display *dpy, GLXPbufferSGIX pbuf);
GLXContext glXCreateNewContext(Display *dpy, GLXFBConfig config, int render_type, GLXContext share_list, Bool direct);
const char * glXQueryCurrentRendererStringMESA(int attribute);
Bool glXMakeCurrentReadSGI(Display *dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx);
unsigned int glXGetAGPOffsetMESA(const void *pointer);
Bool glXQuerySwapGroupNV(Display *dpy, GLXDrawable drawable, GLuint *group, GLuint *barrier);
const char * glXGetClientString(Display *dpy, int name);
void glXBindSwapBarrierSGIX(Display *dpy, GLXDrawable drawable, int barrier);
__GLXextFuncPtr glXGetProcAddressARB(const GLubyte *procName);
void glXDestroyGLXVideoSourceSGIX(Display *dpy, GLXVideoSourceSGIX glxvideosource);
void glXCopySubBufferMESA(Display *dpy, GLXDrawable drawable, int x, int y, int width, int height);
void glXCopyBufferSubDataNV(Display *dpy, GLXContext readCtx, GLXContext writeCtx, GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
XVisualInfo * glXGetVisualFromFBConfig(Display *dpy, GLXFBConfig config);
int glXChannelRectSGIX(Display *display, int screen, int channel, int x, int y, int | |
arg_path doesn’t exist, returns arg_default_value_to_return.
Arguments:
arg_default_value_to_return - This is the returned value, if arg_path isn't in the tree.
arg_bool_path_is_absolute - If True, starts from the entire tree's root node. If False, the method focuses on the children in
the sub tree.
Note: This is a wrapper to mimic dict’s pop method.
"""
_node = self._node_root if arg_bool_path_is_absolute else self
return _node.pop_path(arg_path=arg_path,
arg_default_value_to_return=arg_default_value_to_return,
arg_bool_path_is_absolute=arg_bool_path_is_absolute, )
#
# Public - pop - key
#
def pop_key(self, arg_key, arg_default_value_to_return=_OBJECT_FOR_RAISING_ERRORS):
"""
Returns the object stored in the child node at arg_key; and removes the node and all its sub nodes as well. If arg_key doesn’t exist,
returns arg_default_value_to_return.
If arg_key does not exist, and arg_default_value_to_return isn't set, then this method will raise an error.
Arguments:
arg_key - This obeys the same rules as a key used in a dict.
arg_default_value_to_return - This is the default value returned if the key does not exist.
"""
_node_or_object = self.pop_key_to_node_child(arg_key=arg_key,
arg_default_value_to_return=arg_default_value_to_return, )
if _node_or_object is arg_default_value_to_return:
return _node_or_object
else:
if _node_or_object._dict_of_keys_and_node_children:
return _node_or_object
else:
return _node_or_object._object_stored_in_node
def pop_key_to_node_child(self, arg_key, arg_default_value_to_return=_OBJECT_FOR_RAISING_ERRORS):
"""
Returns the child node located at arg_key; and removes the node and all its sub nodes.
If arg_key doesn’t exist, returns arg_default_value_to_return.
Note: This method is an explicit request for the node, instead of the object stored within it.
Arguments:
arg_key - This obeys the same rules as a normal dict key.
arg_default_value_to_return - This is the default value returned if arg_key doesn't exist.
"""
_node = None
if arg_default_value_to_return is _OBJECT_FOR_RAISING_ERRORS:
try:
_node = self._dict_of_keys_and_node_children[arg_key]
except KeyError:
self._raise_error_because_key_or_path_failed(arg_key_or_path=arg_key,
arg_node_start=self, )
else:
if arg_key in self._dict_of_keys_and_node_children.keys():
_node = self._dict_of_keys_and_node_children[arg_key]
else:
return arg_default_value_to_return
self._integrate_deallocate_node(arg_key=arg_key,
arg_node=_node, )
return _node
#
# Public - pop - node
#
def pop_node_child(self, arg_node_child, arg_bool_search_entire_tree=False, arg_default_value_to_return=None):
"""
Returns the path to the node; and removes the node. If arg_node doesn’t exist, returns arg_default_value_to_return.
Arguments:
arg_default_value_to_return - This is the default value returns if arg_node_child doesn't exist within the searched area.
arg_bool_search_entire_tree - Searches entire tree if True, and only the sub-tree if False.
Rational behind returning the path:
If the dev is using the node object, then they already have access to the object stored within the node. The info
that isn't necessarily easy to come by is what gets returned, which in this case would be the path.
"""
if arg_node_child._node_parent == None:
return arg_default_value_to_return
_path = self.get_path_to_node_child(arg_node_child=arg_node_child,
arg_bool_search_entire_tree=arg_bool_search_entire_tree,
arg_bool_raise_error_if_node_is_not_in_tree=False, )
if _path == None:
return arg_default_value_to_return
else:
self._integrate_deallocate_node(arg_key=_path[-1],
arg_node=arg_node_child, )
return _path
#
# Public - pop - path
#
def pop_path(self, arg_path, arg_bool_path_is_absolute=False, arg_default_value_to_return=_OBJECT_FOR_RAISING_ERRORS):
"""
This method is an explicit wrapper for pop()
If the node has no children, then this method return's the node stored at that location.
Otherwise, it returns the node itself.
Arguments:
arg_path - This is the path to the node. It can either be a delimited string, or a list of keys.
arg_default_value_to_return - This is the default value returned if arg_path does not exist in the searched area.
arg_bool_path_is_absolute - If True, starts from the entire tree's root node. If False, the method focuses on the children in
the sub tree.
"""
_node_or_default_object = self.pop_path_to_node_child(arg_path=arg_path,
arg_bool_path_is_absolute=arg_bool_path_is_absolute,
arg_default_value_to_return=arg_default_value_to_return, )
if _node_or_default_object is arg_default_value_to_return:
return _node_or_default_object
else:
if _node_or_default_object._dict_of_keys_and_node_children:
return _node_or_default_object
else:
return _node_or_default_object._object_stored_in_node
def pop_path_to_node_child(self, arg_path, arg_bool_path_is_absolute=False, arg_default_value_to_return=_OBJECT_FOR_RAISING_ERRORS):
"""
Returns the node located at the path, and removes this node.
Any child nodes remain attached to popped node, and no longer considered
part of the original data tree.
Example:
If the tree has the address: "1.2.3.4" and the user pops the node "1.2.3"
then "3" is removed from the tree. The node at "4" remains attached to the node
at "3", and becomes inaccessible to the tree popping the path.
Node at address "1.2" remains unaffected, other than its connection to node "3"
is severed.
Arguments:
arg_path - This is the path to the node. It can either be a delimited string, or a list of keys.
arg_default_value_to_return - This is the default value returned if arg_path does not exist in the searched area.
arg_bool_path_is_absolute - If True, starts from the entire tree's root node. If False, the method focuses on the children in
the sub tree.
"""
_node_start = self._node_root if arg_bool_path_is_absolute else self
item_node = _node_start
_list_of_path_parts = self.get_list_of_keys_from_path(arg_path)
if arg_default_value_to_return == _OBJECT_FOR_RAISING_ERRORS:
for item_path_part in _list_of_path_parts:
try:
item_node = item_node._dict_of_keys_and_node_children[item_path_part]
except KeyError:
self._raise_error_because_key_or_path_failed(arg_key_or_path=arg_path,
arg_node_start=_node_start, )
else:
for item_path_part in _list_of_path_parts:
if item_path_part in item_node._dict_of_keys_and_node_children.keys():
item_node = item_node._dict_of_keys_and_node_children[item_path_part]
else:
return arg_default_value_to_return
self._integrate_deallocate_node(arg_key=_list_of_path_parts[-1],
arg_node=item_node, )
return item_node
#
# Public - print
#
def print_object(self, arg_object, arg_name_for_object=None):
"""
This method prints information in a reasonably easy to read format, and
compensates for some formatting challenges in pprint.
Reminder: Processes like Cythonize do not like a self.print() method, so this
had to be changed to print_object.
Arguments:
arg_object - This can be pretty much anything.
arg_name_for_object - If this contains a value, then the name provided
is displayed above arg_object's printed information. If this value is None
then only arg_object's info will print.
"""
if not arg_name_for_object == None:
print(arg_name_for_object, "=", )
print("\n".join(self._print_info_get_list_of_strings_formatted(
arg_object)), "\n\n", )
def print_tree(self, arg_bool_search_entire_tree=False, arg_names_for_attributes_to_print=None):
"""
Prints output for the data tree.
Example code and output…
Code:
_dict_tree = Dict_tree_node()
_dict_tree.append_path( [ 1, 2, 3, ] )
_dict_tree.print_tree()
Output:
---PRINTING TREE---
--- PATH: (root) ---
--- PATH: 1 ---
--- PATH: 1.2 ---
--- PATH: 1.2.3 —
Code:
_dict_tree = Dict_tree_node()
_node = _dict_tree.append_path( [ 1, 2, 3, ] )
_node.set_object_stored_in_node( "EXAMPLE" )
_dict_tree.print_tree( arg_names_for_attributes_to_print = "_object_stored_in_node" )
Output:
---PRINTING TREE---
--- PATH: (root) ---
_object_stored_in_node = None
--- PATH: 1 ---
_object_stored_in_node = None
--- PATH: 1.2 ---
_object_stored_in_node = None
--- PATH: 1.2.3 ---
_object_stored_in_node = EXAMPLE
Arguments:
arg_bool_search_entire_tree - Searches entire tree if True, and only the sub-tree if False.
arg_names_for_attributes_to_print can be a single string, or a list of strings. This will include the attributes in the print output.
arg_bool_path_is_absolute - If True, starts from the entire tree's root node. If False, the method focuses on the children in
the sub tree.
"""
_list_of_names_for_attributes_to_print = sorted(
self._get_list_converted_from_object(arg_names_for_attributes_to_print))
print("---PRINTING TREE---\n")
_stack_to_process_pairs_paths_and_nodes = deque(
[[[], self._node_root if arg_bool_search_entire_tree else self, ]])
while _stack_to_process_pairs_paths_and_nodes:
item_path, item_node = _stack_to_process_pairs_paths_and_nodes.pop()
if _list_of_names_for_attributes_to_print == None:
self._print_node(arg_path=item_path,
arg_node=item_node, )
else:
self._print_node(arg_path=item_path,
arg_node=item_node,
arg_list_of_names_for_attributes_to_print=_list_of_names_for_attributes_to_print, )
if item_node._dict_of_keys_and_node_children:
for item_key_child, item_node_child in item_node._dict_of_keys_and_node_children.items():
item_path_child = [*item_path, item_key_child, ]
_stack_to_process_pairs_paths_and_nodes.append(
[item_path_child, item_node_child, ])
#
# Public - set
#
def set_object_stored_in_node(self, arg_object, arg_path=None, arg_bool_path_is_absolute=False):
"""
Stores arg_object in a node within the tree. If arg_path is not defined, arg_object is stored in the
current node. If arg_path is defined, then the method will store arg_object in the node located at the path.
Note: In keeping with dict’s regular behavior, if the path doesn’t exist, then the method will create create
the path.
Arguments:
arg_object - This can be any object that could be stored in a variable.
arg_path - This can be a delimited string, list of keys, or None. If the value is None, then the object set is an
attribute of the current node.
arg_bool_path_is_absolute - If True, starts from the entire tree's root node. If False, the method focuses on the children in
the sub tree.
"""
_node = self._node_root if arg_bool_path_is_absolute else self
if arg_path == None:
_node._object_stored_in_node = arg_object
return _node
else:
_node = _node.append_path(arg_path=arg_path)
_node._object_stored_in_node = arg_object
return _node
#
# Public - setup
#
def setup_tree_based_on_data_structure(self, arg_data, arg_keys_for_categorizing_nodes=None, arg_bool_search_entire_tree=False):
"""
This method takes arg_data, and builds the data tree based on arg_data’s contents.
arg_data can be one of the following:
-dict
-nested dict
-list of dicts
-another tree node
Note:
For users who want to use this library for importing data, I recommend Python’s built-in json
library for json data, and xml.etree cElementTree ( xml ) for actual parsing. Produce a nested dict,
or list of dicts | |
return np.sort(self.get_channel_order(
channel_orderfile=channel_orderfile)[n_cut:-n_cut])
def get_subband_from_channel(self, band, channel, channelorderfile=None,
yml=None):
"""Returns subband number given a channel number
Args
----
band : int
Which band we're working in.
channel : int
Ranges 0..(n_channels-1), cryo channel number.
channelorderfile : str or None, optional, default None
Path to file containing order of channels.
Returns
-------
subband : int
The subband the channel lives in.
"""
n_subbands = self.get_number_sub_bands(band, yml=yml)
n_channels = self.get_number_channels(band, yml=yml)
n_chanpersubband = n_channels / n_subbands
if channel > n_channels:
raise ValueError('channel number exceeds number of channels')
if channel < 0:
raise ValueError('channel number is less than zero!')
chanOrder = self.get_channel_order(band,channelorderfile)
idx = np.where(chanOrder == channel)[0]
subband = idx // n_chanpersubband
return int(subband)
def get_subband_centers(self, band, as_offset=True, hardcode=False,
yml=None):
""" returns frequency in MHz of subband centers
Args
----
band : int
Which band.
as_offset : bool, optional, default True
Whether to return as offset from band center.
"""
if hardcode:
#bandCenterMHz = 3.75 + 0.5*(band + 1)
digitizer_frequency_mhz = 614.4
n_subbands = 128
else:
digitizer_frequency_mhz = self.get_digitizer_frequency_mhz(band,
yml=yml)
n_subbands = self.get_number_sub_bands(band, yml=yml)
subband_width_MHz = 2 * digitizer_frequency_mhz / n_subbands
subbands = list(range(n_subbands))
subband_centers = (np.arange(1, n_subbands + 1) - n_subbands/2) * \
subband_width_MHz/2
if not as_offset:
subband_centers += self.get_band_center_mhz(band, yml=yml)
return subbands, subband_centers
def get_channels_in_subband(self, band, subband, channelorderfile=None):
"""
Returns channels in subband
Args
----
band : int
Which band.
subband : int
Subband number, ranges from 0..127.
channelorderfile : str or None, optional, default None
Path to file specifying channel order.
Returns
-------
subband_chans : int array
The channels in the subband.
"""
n_subbands = self.get_number_sub_bands(band)
n_channels = self.get_number_channels(band)
n_chanpersubband = int(n_channels / n_subbands)
if subband > n_subbands:
raise ValueError("subband requested exceeds number of subbands")
if subband < 0:
raise ValueError("requested subband less than zero")
chanOrder = self.get_channel_order(band,channelorderfile)
subband_chans = chanOrder[subband * n_chanpersubband : subband *
n_chanpersubband + n_chanpersubband]
return subband_chans
def iq_to_phase(self, i, q):
"""
Changes IQ to phase
"""
return np.unwrap(np.arctan2(q, i))
def hex_string_to_int(self, s):
"""
Converts hex string, which is an array of characters, into an int.
Args
----
s : character array
An array of chars to be turned into a single int.
Returns
-------
i : numpy.int
The 64 bit int.
"""
return np.int(''.join([chr(x) for x in s]),0)
def int_to_hex_string(self, i):
"""
Converts an int into a string of characters.
Args
----
i : int
A 64 bit int to convert into hex.
Returns
-------
s : char array
A character array representing the int.
"""
# Must be array length 300
s = np.zeros(300, dtype=int)
i_hex = hex(i)
for j in np.arange(len(i_hex)):
s[j] = ord(i_hex[j])
return s
def set_tes_bias_bipolar(self, bias_group, volt, do_enable=True,
flip_polarity=False, **kwargs):
"""
Set an individual TES bias group to the specified voltage, in
volts. Asserts if the requested bias group is not defined in
the pysmurf configuration file. The positive DAC in the bias
group is set to +volt/2, while the negative DAC in the bias
group is set to -volt/2.
Args
----
bias_group : int
The bias group.
volt : float
The TES bias to command in volts.
do_enable : bool, optional, default True
Sets the enable bit. Only must be done once.
flip_polarity : bool, optional, default False
Sets the voltage to volt*-1.
"""
# Make sure the requested bias group is in the list of defined
# bias groups.
bias_groups = self.bias_group_to_pair[:,0]
assert (bias_group in bias_groups),\
f'Bias group {bias_group} is not defined (available bias '+\
f' groups are {bias_groups}). Doing nothing!'
bias_order = bias_groups
dac_positives = self.bias_group_to_pair[:,1]
dac_negatives = self.bias_group_to_pair[:,2]
dac_idx = np.ravel(np.where(bias_order == bias_group))
dac_positive = dac_positives[dac_idx][0]
dac_negative = dac_negatives[dac_idx][0]
volts_pos = volt / 2
volts_neg = - volt / 2
if flip_polarity:
volts_pos *= -1
volts_neg *= -1
if do_enable:
self.set_rtm_slow_dac_enable(dac_positive, 2, **kwargs)
self.set_rtm_slow_dac_enable(dac_negative, 2, **kwargs)
self.set_rtm_slow_dac_volt(dac_positive, volts_pos, **kwargs)
self.set_rtm_slow_dac_volt(dac_negative, volts_neg, **kwargs)
def set_tes_bias_bipolar_array(self, bias_group_volt_array, do_enable=True, **kwargs):
"""
Set TES bipolar values for all DACs at once. Set using a
pyrogue array write, so should be much more efficient than
setting each TES bias one at a time (a single register
transaction vs. many). Only DACs assigned to TES bias groups
are touched by this function. The enable status and output
voltage of all DACs not assigned to a TES bias group are
maintained.
Args
----
bias_group_volt_array : float array
The TES bias to command in voltage for each bipolar TES
bias group. Should be (n_bias_groups,).
do_enable : bool, optional, default True
Set the enable bit for both DACs for every TES bias group.
"""
n_bias_groups = self._n_bias_groups
# in this function we're only touching the DACs defined in TES
# bias groups. Need to make sure we carry along the setting
# and enable of any DACs that are being used for something
# else.
dac_enable_array = self.get_rtm_slow_dac_enable_array()
dac_volt_array = self.get_rtm_slow_dac_volt_array()
if len(bias_group_volt_array) != n_bias_groups:
self.log("Received the wrong number of biases. Expected " +
f"an array of n_bias_groups={n_bias_groups} voltages",
self.LOG_ERROR)
else:
for bg in np.arange(n_bias_groups):
bias_order = self.bias_group_to_pair[:,0]
dac_positives = self.bias_group_to_pair[:,1]
dac_negatives = self.bias_group_to_pair[:,2]
bias_group_idx = np.ravel(np.where(bias_order == bg))
dac_positive = dac_positives[bias_group_idx][0] - 1 # freakin Mitch
dac_negative = dac_negatives[bias_group_idx][0] - 1 # 1 vs 0 indexing
volts_pos = bias_group_volt_array[bg] / 2
volts_neg = - bias_group_volt_array[bg] / 2
if do_enable:
dac_enable_array[dac_positive] = 2
dac_enable_array[dac_negative] = 2
dac_volt_array[dac_positive] = volts_pos
dac_volt_array[dac_negative] = volts_neg
if do_enable:
self.set_rtm_slow_dac_enable_array(dac_enable_array, **kwargs)
self.set_rtm_slow_dac_volt_array(dac_volt_array, **kwargs)
def set_tes_bias_off(self, **kwargs):
"""
Turns off all of the DACs assigned to a TES bias group in the
pysmurf configuration file.
"""
self.set_tes_bias_bipolar_array(np.zeros(self._n_bias_groups), **kwargs)
def get_tes_bias_bipolar(self, bias_group, return_raw=False, **kwargs):
"""
Returns the bias voltage in units of Volts for the requested
TES bias group.
Args
----
bias_group : int
The number of the bias group. Asserts if bias_group
requested is not defined in the pysmurf configuration
file.
return_raw : bool, optional, default False
If True, returns pos and neg terminal values.
Returns
-------
val : float
The bipolar output TES bias voltage for the requested bias
group. If return_raw=True, then returns a two element
float array containing the output voltages of the two DACs
assigned to the requested TES bias group.
"""
# Make sure the requested bias group is in the list of defined
# bias groups.
bias_groups = self.bias_group_to_pair[:,0]
assert (bias_group in bias_groups),\
f'Bias group {bias_group} is not defined (available bias groups are {bias_groups}). Doing nothing!'
bias_order = bias_groups
dac_positives = self.bias_group_to_pair[:,1]
dac_negatives = self.bias_group_to_pair[:,2]
dac_idx = np.ravel(np.where(bias_order == bias_group))
dac_positive = dac_positives[dac_idx][0]-1
dac_negative = dac_negatives[dac_idx][0]-1
volt_array = self.get_rtm_slow_dac_volt_array(**kwargs)
volts_pos = volt_array[dac_positive]
volts_neg = volt_array[dac_negative]
if return_raw:
return volts_pos, volts_neg
else:
return volts_pos - volts_neg
def get_tes_bias_bipolar_array(self, return_raw=False, **kwargs):
"""
Returns array of bias voltages per bias group in units of volts.
Currently hard coded to return the first 8 as (8,) array. I'm sorry -CY
Args
----
return_raw : bool, optional, default False
If True, returns +/- terminal vals as separate arrays
(pos, then negative)
"""
bias_order = self.bias_group_to_pair[:,0]
dac_positives = self.bias_group_to_pair[:,1]
dac_negatives = self.bias_group_to_pair[:,2]
n_bias_groups = self._n_bias_groups
bias_vals_pos = np.zeros((n_bias_groups,))
bias_vals_neg = np.zeros((n_bias_groups,))
volts_array = self.get_rtm_slow_dac_volt_array(**kwargs)
for idx in np.arange(n_bias_groups):
dac_idx = np.ravel(np.where(bias_order == idx))
dac_positive = dac_positives[dac_idx][0] - 1
dac_negative = dac_negatives[dac_idx][0] - 1
bias_vals_pos[idx] = volts_array[dac_positive]
bias_vals_neg[idx] = volts_array[dac_negative]
if return_raw:
return bias_vals_pos, bias_vals_neg
else:
return bias_vals_pos - bias_vals_neg
def set_amplifier_bias(self, bias_hemt=None, bias_50k=None, **kwargs):
"""
Sets the HEMT and 50 K amp (if present) voltages. If no
arguments given, looks for default biases in cfg
(amplifier:hemt_Vg and amplifier:LNA_Vg). If nothing found in
cfg file, does nothing to either bias. Enable is written to
both amplifier bias DACs regardless of whether or not they are
set to new values - need to check that this is ok. If user
specifies values those override cfg file defaults. Prints
resulting amplifier biases at the end with a short wait in
case there's latency between setting and reading.
Args
----
bias_hemt : float or None, optional default None
The HEMT bias voltage in units of | |
"""Vocabulary for theme-based transformer
Author: <NAME>
Email: <EMAIL>
Date: 2021/11/03
"""
import pickle
import numpy as np
import miditoolkit
import os
import math
from miditoolkit.midi import parser as mid_parser
from miditoolkit.midi import containers as ct
class Vocab(object):
def __init__(self):
"""initialize some vocabulary settings
"""
# split each beat into 4 subbeats
self.q_beat = 4
# dictionary for matching token ID to name and the other way around.
self.token2id = {}
self.id2token = {}
# midi pitch number : 1 ~ 127 (highest pitch)
self._pitch_bins = np.arange(start=1,stop=128)
# duration tokens 1~64 of self.q_beat
self._duration_bins = np.arange(start=1,stop=self.q_beat*16+1)
# velocity tokens 1~127 (corressponding to midi format)
self._velocity_bins = np.arange(start=1,stop=127)
# tempo tokens 17~197 (determined from our dataset)
self._tempo_bins = np.arange(start=17,stop=197,step=3)
# position(subbeat) tokens 0~15, indicate the relative position with in a bar
self._position_bins = np.arange(start=0,stop=16)
self.n_tokens = 0
self.token_type_base = {}
self.tracks = ["MELODY","BRIDGE","PIANO"]
self.build()
# vocab
# Note-On (129) : 0 (padding) ,1 ~ 127(highest pitch) , 128 (rest)
# Note-Duration : 1 ~ 16 beat * 3
# min resulution 1/12 notes
def build(self):
"""build our vocab
"""
self.token2id = {}
self.id2token = {}
self.n_tokens = 0
self.token2id['padding'] = 0
self.n_tokens += 1
# Note related tokens==================================================================
# Create Note-On tokens for each track
for track in self.tracks:
# Note-On
self.token_type_base = {'Note-On-{}'.format(track) : 1}
for i in self._pitch_bins:
self.token2id[ 'Note-On-{}_{}'.format(track,i) ] = self.n_tokens
self.n_tokens += 1
# Create Note-Duration tokens for each track
for track in self.tracks:
# Note-Duration
self.token_type_base['Note-Duration-{}'.format(track)] = self.n_tokens
for note_dur in self._duration_bins:
self.token2id[ 'Note-Duration-{}_{}'.format(track,note_dur) ] = self.n_tokens
self.n_tokens += 1
# Create Note-Velocity tokens for each track
for track in self.tracks:
# Note-Velocity
self.token_type_base['Note-Velocity-{}'.format(track)] = self.n_tokens
for vel in self._velocity_bins:
self.token2id[ 'Note-Velocity-{}_{}'.format(track,vel) ] = self.n_tokens
self.n_tokens += 1
# Metric related tokens==================================================================
# Tempo
self.token_type_base['Tempo'] = self.n_tokens
for tmp in self._tempo_bins:
self.token2id[ 'Tempo_{}'.format(tmp) ] = self.n_tokens
self.n_tokens += 1
# Positions
self.token_type_base['Position'] = self.n_tokens
for pos in self._position_bins:
self.token2id[ 'Position_{}'.format(pos) ] = self.n_tokens
self.n_tokens += 1
# Bar
self.token_type_base['Bar'] = self.n_tokens
self.token2id[ 'Bar' ] = self.n_tokens
self.n_tokens += 1
# Theme related tokens==================================================================
# Phrase annotation (not used in our final paper)
self.token_type_base['Phrase'] = self.n_tokens
self.token2id[ 'Phrase_Start' ] = self.n_tokens
self.n_tokens += 1
self.token2id[ 'Phrase_End' ] = self.n_tokens
self.n_tokens += 1
# Theme annotation
self.token_type_base['Theme'] = self.n_tokens
self.token2id[ 'Theme_Start' ] = self.n_tokens
self.n_tokens += 1
self.token2id[ 'Theme_End' ] = self.n_tokens
self.n_tokens += 1
for w , v in self.token2id.items():
self.id2token[v] = w
self.n_tokens = len(self.token2id)
def getPitch(self,input_event):
"""Return corresponding note pitch
if input_event is not a note, it returns -1
Args:
input_event (str or int): REMI Event Name or vocab ID
"""
if isinstance(input_event,int):
input_event = self.id2token[input_event]
elif isinstance(input_event,str):
pass
else:
try:
input_event = int(input_event)
input_event = self.id2token[input_event]
except:
raise TypeError("input_event should be int or str, input_event={}, type={}".format(input_event,type(input_event)))
if not input_event.startswith("Note-On"):
return -1
assert int(input_event.split("_")[1]) >=1 and int(input_event.split("_")[1]) <=127
return int(input_event.split("_")[1])
def midi2REMI(self,midi_path,quantize=True,trim_intro = True,trim_outro=True,include_bridge=False,extend_theme=False,bar_first=False,theme_annotations=True,verbose = False):
"""convert midi file to token representation
Args:
midi_path (str): the path of input midi file
trim_intro (bool, optional): ignore the intro part of the song. Defaults to True.
trim_outro (bool, optional): ignore the outro part of the song. Defaults to True.
include_bridge (bool, optional): ignore the intro part of the song. Defaults to False.
extend_theme (bool, optional): extend the theme region to at least MIN_MEL_NOTES=8 notes. Defaults to False.
bar_first (bool, optional): place Bar token in front of Theme-Start token. Defaults to False.
theme_annotations (bool, optional): including theme-realted tokens. Defaults to True.
verbose (bool, optional): print some message. Defaults to False.
Returns:
list: sequence of tokens
"""
MIN_MEL_NOTES = 8
midi_obj = mid_parser.MidiFile(midi_path)
# calculate the min step (in ticks) for REMI representation
min_step = midi_obj.ticks_per_beat * 4 / 16
# quantize
if quantize:
for i in range(len(midi_obj.instruments)):
for n in range(len(midi_obj.instruments[i].notes)):
midi_obj.instruments[i].notes[n].start = int(int(midi_obj.instruments[i].notes[n].start / min_step) * min_step)
midi_obj.instruments[i].notes[n].end = int(int(midi_obj.instruments[i].notes[n].end / min_step) * min_step)
if theme_annotations:
# select theme info track
theme_boundary_track = list(filter(lambda x: x.name=="theme info track",midi_obj.instruments))
assert len(theme_boundary_track) == 1
# parsing notes in each tracks (ignore BRIDGE)
notesAndtempos = []
midi_obj.instruments[0].notes = sorted(midi_obj.instruments[0].notes,key=lambda x: x.start)
# add notes
melody_start = sorted(midi_obj.instruments[0].notes,key=lambda x: x.start)[0].start
melody_end = sorted(midi_obj.instruments[0].notes,key=lambda x: x.start)[-1].end
for i in range(3):
if not include_bridge and midi_obj.instruments[i].name == "BRIDGE":
continue
if midi_obj.instruments[i].name == "Theme info track":
continue
notes = midi_obj.instruments[i].notes
for n in notes:
# assert (trim_intro and n.start>=melody_start or not trim_intro)
if trim_intro and n.start>=melody_start or not trim_intro:
if trim_outro and n.start<=melody_end or not trim_outro:
notesAndtempos.append({
"priority" : i+1,
"priority_1" : n.pitch,
"start_tick" : n.start,
"obj_type" : "Note-{}".format(midi_obj.instruments[i].name),
"obj" : n
})
# add tempos
for t in midi_obj.tempo_changes:
# assert (trim_intro and t.time>=melody_start or not trim_intro)
if trim_intro and t.time>=melody_start or not trim_intro or trim_intro:
if trim_outro and t.time<=melody_end or not trim_outro:
notesAndtempos.append({
"priority" : 0,
"priority_1" : 0,
"start_tick" : t.time,
"obj_type" : "Tempo",
"obj" : t
})
if trim_intro and len([x for x in midi_obj.tempo_changes if x.time==melody_start]) == 0:
t = [x for x in sorted(midi_obj.tempo_changes,key= lambda z: z.time) if x.time < melody_start]
if not len(t) == 0:
t = t[-1]
notesAndtempos.append({
"priority" : 0,
"priority_1" : 0,
"start_tick" : melody_start,
"obj_type" : "Tempo",
"obj" : t
})
notesAndtempos = sorted(notesAndtempos,key=lambda x: (x["start_tick"],x["priority"],-x["priority_1"]))
if theme_annotations:
theme_boundary_track = theme_boundary_track[0]
theme_boundary_pitch = min([x.pitch for x in theme_boundary_track.notes])
theme_boundaries = [ [x.start,x.end] for x in theme_boundary_track.notes if x.pitch == theme_boundary_pitch]
assert not len(theme_boundaries) == 0
if verbose:
print(theme_boundaries)
if extend_theme:
# extend theme region 8~9
for b_i,b in enumerate(theme_boundaries[:-1]):
melody_notes = [x for x in midi_obj.instruments[0].notes if x.start>= b[0] and x.start< theme_boundaries[b_i+1][0] ]
cur_bound = 0
for x in melody_notes:
if x.start < b[1]:
cur_bound += 1
else:
break
if cur_bound + 1 >= MIN_MEL_NOTES :
continue
# try to extend
extend_idx = min(MIN_MEL_NOTES,len(melody_notes)) - 1
theme_boundaries[b_i][1] = melody_notes[extend_idx].end
b_i = 0
in_theme = False
# group
bar_segments = []
bar_ticks = midi_obj.ticks_per_beat * 4
if verbose:
print("Bar tick length: {}".format(bar_ticks))
for bar_start_tick in range(0,notesAndtempos[-1]["start_tick"],bar_ticks):
if verbose:
print("Bar {} at tick: {}".format(bar_start_tick // bar_ticks,bar_start_tick))
bar_end_tick = bar_start_tick + bar_ticks
current_bar = []
bar_objs = list(filter(lambda x: x["start_tick"] >=bar_start_tick and x["start_tick"]< bar_end_tick,notesAndtempos))
bar_objs.insert(0,{"start_tick":-1})
if not bar_first:
if theme_annotations and not in_theme and theme_boundaries[b_i][0] == bar_start_tick:
current_bar.append("Theme_Start")
in_theme = True
if verbose:
print("Theme start")
if not in_theme and trim_intro and bar_start_tick+bar_ticks < melody_start:
if verbose:
print("into trimmed")
continue
current_bar.append("Bar")
else:
if not in_theme and trim_intro and bar_start_tick+bar_ticks < melody_start:
if verbose:
print("into trimmed")
continue
current_bar.append("Bar")
if theme_annotations and not in_theme and theme_boundaries[b_i][0] == bar_start_tick:
current_bar.append("Theme_Start")
in_theme = True
if verbose:
print("Theme start")
for i,obj in enumerate(bar_objs):
if obj["start_tick"]==-1 : continue
if not obj["start_tick"] == bar_objs[i-1]["start_tick"]:
# insert Position Event
pos = (obj["start_tick"] - bar_start_tick) / midi_obj.ticks_per_beat * self.q_beat
pos_index = np.argmin(abs(pos - self._position_bins)) # use the closest position
pos = self._position_bins[pos_index]
current_bar.append("Position_{}".format(pos))
if obj["obj_type"].startswith("Note"):
track_name = obj["obj_type"].split('-')[1].upper()
# add pitch
current_bar.append("Note-On-{}_{}".format(track_name,obj["obj"].pitch))
# add duration
dur = (obj["obj"].end - obj["obj"].start) / midi_obj.ticks_per_beat * self.q_beat
dur_index = np.argmin(abs(dur - self._duration_bins)) # use the closest position
dur = self._duration_bins[dur_index]
current_bar.append("Note-Duration-{}_{}".format(track_name,dur))
# add velocity
vel_index = np.argmin(abs(obj["obj"].velocity - self._velocity_bins)) # use the closest position
vel = self._velocity_bins[vel_index]
current_bar.append("Note-Velocity-{}_{}".format(track_name,vel))
elif obj["obj_type"].startswith("Tempo"):
# tempo
tmp_index = np.argmin(abs(obj["obj"].tempo - self._tempo_bins)) # use the closest position
tmp = self._tempo_bins[tmp_index]
current_bar.append(obj["obj_type"] + "_{}".format(tmp))
else:
# theme start end
current_bar.append(obj["obj_type"])
if theme_annotations and in_theme and theme_boundaries[b_i][1] == bar_start_tick + bar_ticks:
current_bar.append("Theme_End")
in_theme = False
if verbose:
print("Theme End")
if not b_i == len(theme_boundaries) - 1:
b_i += 1
bar_segments.extend(current_bar)
output_ids = [self.token2id[x] for x in bar_segments]
return output_ids
def preprocessREMI(self,remi_sequence,always_include=False,max_seq_len=512,strict=True,verbose=False):
"""Preprocess token sequence
slicing the sequence for training our models
Args:
remi_sequence (List): the music token seqeunce
always_include (Bool): selected the data including either Theme-Start or Theme-End
max_seq_len (Int): maximum sequence length for each data
strict (Bool): the returning sequence should always include Theme-Start
Return:
{
"src" | |
<gh_stars>1-10
#!/usr/bin/env python
# pylint: disable=C0301
# for the whitelist and the blacklist
# C0301: I'm ignoring this because breaking up error messages is painful
"""
_JobSubmitterPoller_t_
Submit jobs for execution.
"""
from __future__ import print_function, division
import logging
import os.path
import threading
import json
import time
from collections import defaultdict, Counter
try:
import cPickle as pickle
except ImportError:
import pickle
from Utils.Timers import timeFunction
from WMCore.DAOFactory import DAOFactory
from WMCore.WMExceptions import WM_JOB_ERROR_CODES
from WMCore.JobStateMachine.ChangeState import ChangeState
from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.DataStructs.JobPackage import JobPackage
from WMCore.FwkJobReport.Report import Report
from WMCore.WMException import WMException
from WMCore.BossAir.BossAirAPI import BossAirAPI
from WMCore.Services.ReqMgr.ReqMgr import ReqMgr
from WMCore.Services.ReqMgrAux.ReqMgrAux import ReqMgrAux
from WMComponent.JobSubmitter.JobSubmitAPI import availableScheddSlots
def jobSubmitCondition(jobStats):
for jobInfo in jobStats:
if jobInfo["Current"] >= jobInfo["Threshold"]:
return jobInfo["Condition"]
return "JobSubmitReady"
class JobSubmitterPollerException(WMException):
"""
_JobSubmitterPollerException_
This is the exception instance for
JobSubmitterPoller specific errors.
"""
pass
class JobSubmitterPoller(BaseWorkerThread):
"""
_JobSubmitterPoller_
The jobSubmitterPoller takes the jobs and organizes them into packages
before sending them to the individual plugin submitters.
"""
def __init__(self, config):
BaseWorkerThread.__init__(self)
myThread = threading.currentThread()
self.config = config
# DAO factory for WMBS objects
self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=logging, dbinterface=myThread.dbi)
# Libraries
self.resourceControl = ResourceControl()
self.changeState = ChangeState(self.config)
self.bossAir = BossAirAPI(config=self.config, insertStates=True)
self.hostName = self.config.Agent.hostName
self.repollCount = getattr(self.config.JobSubmitter, 'repollCount', 10000)
self.maxJobsPerPoll = int(getattr(self.config.JobSubmitter, 'maxJobsPerPoll', 1000))
self.maxJobsToCache = int(getattr(self.config.JobSubmitter, 'maxJobsToCache', 50000))
self.maxJobsThisCycle = self.maxJobsPerPoll # changes as per schedd limit
self.cacheRefreshSize = int(getattr(self.config.JobSubmitter, 'cacheRefreshSize', 30000))
self.skipRefreshCount = int(getattr(self.config.JobSubmitter, 'skipRefreshCount', 20))
self.packageSize = getattr(self.config.JobSubmitter, 'packageSize', 500)
self.collSize = getattr(self.config.JobSubmitter, 'collectionSize', self.packageSize * 1000)
self.maxTaskPriority = getattr(self.config.BossAir, 'maxTaskPriority', 1e7)
self.condorFraction = 0.75 # update during every algorithm cycle
self.condorOverflowFraction = 0.2
self.ioboundTypes = ('LogCollect', 'Merge', 'Cleanup', 'Harvesting')
self.drainGracePeriod = getattr(self.config.JobSubmitter, 'drainGraceTime', 2 * 24 * 60 * 60) # 2 days
# Used for speed draining the agent
self.enableAllSites = False
# Additions for caching-based JobSubmitter
self.jobsByPrio = {} # key'ed by the final job priority, which contains a set of job ids
self.jobDataCache = {} # key'ed by the job id, containing the whole job info dict
self.jobsToPackage = {}
self.locationDict = {}
self.drainSites = dict()
self.drainSitesSet = set()
self.abortSites = set()
self.refreshPollingCount = 0
try:
if not getattr(self.config.JobSubmitter, 'submitDir', None):
self.config.JobSubmitter.submitDir = self.config.JobSubmitter.componentDir
self.packageDir = os.path.join(self.config.JobSubmitter.submitDir, 'packages')
if not os.path.exists(self.packageDir):
os.makedirs(self.packageDir)
except OSError as ex:
msg = "Error while trying to create packageDir %s\n!"
msg += str(ex)
logging.error(msg)
logging.debug("PackageDir: %s", self.packageDir)
logging.debug("Config: %s", config)
raise JobSubmitterPollerException(msg)
# Now the DAOs
self.listJobsAction = self.daoFactory(classname="Jobs.ListForSubmitter")
self.setLocationAction = self.daoFactory(classname="Jobs.SetLocation")
self.locationAction = self.daoFactory(classname="Locations.GetSiteInfo")
self.setFWJRPathAction = self.daoFactory(classname="Jobs.SetFWJRPath")
self.listWorkflows = self.daoFactory(classname="Workflow.ListForSubmitter")
# Keep a record of the thresholds in memory
self.currentRcThresholds = {}
self.useReqMgrForCompletionCheck = getattr(self.config.TaskArchiver, 'useReqMgrForCompletionCheck', True)
if self.useReqMgrForCompletionCheck:
# only set up this when reqmgr is used (not Tier0)
self.reqmgr2Svc = ReqMgr(self.config.General.ReqMgr2ServiceURL)
self.abortedAndForceCompleteWorkflowCache = self.reqmgr2Svc.getAbortedAndForceCompleteRequestsFromMemoryCache()
self.reqAuxDB = ReqMgrAux(self.config.General.ReqMgr2ServiceURL)
else:
# Tier0 Case - just for the clarity (This private variable shouldn't be used
self.abortedAndForceCompleteWorkflowCache = None
return
def getPackageCollection(self, sandboxDir):
"""
_getPackageCollection_
Given a jobID figure out which packageCollection
it should belong in.
"""
rawList = os.listdir(sandboxDir)
collections = []
numberList = []
for entry in rawList:
if 'PackageCollection' in entry:
collections.append(entry)
# If we have no collections, return 0 (PackageCollection_0)
if len(collections) < 1:
return 0
# Loop over the list of PackageCollections
for collection in collections:
collectionPath = os.path.join(sandboxDir, collection)
packageList = os.listdir(collectionPath)
collectionNum = int(collection.split('_')[1])
if len(packageList) < self.collSize:
return collectionNum
else:
numberList.append(collectionNum)
# If we got here, then all collections are full. We'll need
# a new one. Find the highest number, increment by one
numberList.sort()
return numberList[-1] + 1
def addJobsToPackage(self, loadedJob):
"""
_addJobsToPackage_
Add a job to a job package and then return the batch ID for the job.
Packages are only written out to disk when they contain 100 jobs. The
flushJobsPackages() method must be called after all jobs have been added
to the cache and before they are actually submitted to make sure all the
job packages have been written to disk.
"""
if loadedJob["workflow"] not in self.jobsToPackage:
# First, let's pull all the information from the loadedJob
batchid = "%s-%s" % (loadedJob["id"], loadedJob["retry_count"])
sandboxDir = os.path.dirname(loadedJob["sandbox"])
# Second, assemble the jobPackage location
collectionIndex = self.getPackageCollection(sandboxDir)
collectionDir = os.path.join(sandboxDir,
'PackageCollection_%i' % collectionIndex,
'batch_%s' % batchid)
# Now create the package object
self.jobsToPackage[loadedJob["workflow"]] = {"batchid": batchid,
'id': loadedJob['id'],
"package": JobPackage(directory=collectionDir)}
jobPackage = self.jobsToPackage[loadedJob["workflow"]]["package"]
jobPackage[loadedJob["id"]] = loadedJob.getDataStructsJob()
batchDir = jobPackage['directory']
if len(jobPackage.keys()) == self.packageSize:
if not os.path.exists(batchDir):
os.makedirs(batchDir)
batchPath = os.path.join(batchDir, "JobPackage.pkl")
jobPackage.save(batchPath)
del self.jobsToPackage[loadedJob["workflow"]]
return batchDir
def flushJobPackages(self):
"""
_flushJobPackages_
Write any jobs packages to disk that haven't been written out already.
"""
workflowNames = self.jobsToPackage.keys()
for workflowName in workflowNames:
jobPackage = self.jobsToPackage[workflowName]["package"]
batchDir = jobPackage['directory']
if not os.path.exists(batchDir):
os.makedirs(batchDir)
batchPath = os.path.join(batchDir, "JobPackage.pkl")
jobPackage.save(batchPath)
del self.jobsToPackage[workflowName]
return
def hasToRefreshCache(self):
"""
_hasToRefreshCache_
Check whether we should update the job data cache (or update it
with new jobs in the created state) or if we just skip it.
"""
if self.cacheRefreshSize == -1 or len(self.jobDataCache) < self.cacheRefreshSize or\
self.refreshPollingCount >= self.skipRefreshCount:
self.refreshPollingCount = 0
return True
else:
self.refreshPollingCount += 1
logging.info("Skipping cache update to be submitted. (%s job in cache)", len(self.jobDataCache))
return False
def refreshCache(self):
"""
_refreshCache_
Query WMBS for all jobs in the 'created' state. For all jobs returned
from the query, check if they already exist in the cache. If they
don't, unpickle them and combine their site white and black list with
the list of locations they can run at. Add them to the cache.
Each entry in the cache is a tuple with five items:
- WMBS Job ID
- Retry count
- Batch ID
- Path to sanbox
- Path to cache directory
"""
# make a counter for jobs pending to sites in drain mode within the grace period
countDrainingJobs = 0
timeNow = int(time.time())
badJobs = dict([(x, []) for x in range(71101, 71106)])
newJobIds = set()
logging.info("Refreshing priority cache with currently %i jobs", len(self.jobDataCache))
newJobs = self.listJobsAction.execute(limitRows=self.maxJobsToCache)
if self.useReqMgrForCompletionCheck:
# if reqmgr is used (not Tier0 Agent) get the aborted/forceCompleted record
abortedAndForceCompleteRequests = self.abortedAndForceCompleteWorkflowCache.getData()
else:
abortedAndForceCompleteRequests = []
logging.info("Found %s new jobs to be submitted.", len(newJobs))
if self.enableAllSites:
logging.info("Agent is in speed drain mode. Submitting jobs to all possible locations.")
logging.info("Determining possible sites for new jobs...")
jobCount = 0
for newJob in newJobs:
jobCount += 1
if jobCount % 5000 == 0:
logging.info("Processed %d/%d new jobs.", jobCount, len(newJobs))
# whether newJob belongs to aborted or force-complete workflow, and skip it if it is.
if newJob['request_name'] in abortedAndForceCompleteRequests and \
newJob['task_type'] not in ['LogCollect', "Cleanup"]:
continue
jobID = newJob['id']
newJobIds.add(jobID)
if jobID in self.jobDataCache:
continue
pickledJobPath = os.path.join(newJob["cache_dir"], "job.pkl")
if not os.path.isfile(pickledJobPath):
# Then we have a problem - there's no file
logging.warning("Could not find pickled jobObject %s", pickledJobPath)
badJobs[71104].append(newJob)
continue
try:
with open(pickledJobPath, 'r') as jobHandle:
loadedJob = pickle.load(jobHandle)
except Exception as ex:
logging.warning("Failed to load job pickle object %s", pickledJobPath)
badJobs[71105].append(newJob)
continue
# figure out possible locations for job
possibleLocations = loadedJob["possiblePSN"]
# Create another set of locations that may change when a site goes white/black listed
# Does not care about the non_draining or aborted sites, they may change and that is the point
potentialLocations = set()
potentialLocations.update(possibleLocations)
# check if there is at least one site left to run the job
if len(possibleLocations) == 0:
newJob['fileLocations'] = loadedJob.get('fileLocations', [])
newJob['siteWhitelist'] = loadedJob.get('siteWhitelist', [])
newJob['siteBlacklist'] = loadedJob.get('siteBlacklist', [])
logging.warning("Input data location doesn't pass the site restrictions for job id: %s", jobID)
badJobs[71101].append(newJob)
continue
# if agent is in speed drain and has hit the threshold to submit to all sites, we can skip the logic below that exclude sites
if not self.enableAllSites:
# check for sites in aborted state and adjust the possible locations
nonAbortSites = [x for x in possibleLocations if x not in self.abortSites]
if nonAbortSites: # if there is at least a non aborted/down site then run there, otherwise fail the job
possibleLocations = nonAbortSites
else:
newJob['possibleSites'] = possibleLocations
logging.warning("Job id %s can only run at a site in Aborted state", jobID)
badJobs[71102].append(newJob)
continue
# try to remove draining sites if possible, this is | |
"character(len=*), intent({f_intent}) :: {c_var}",
],
f_c_arg_names=["{c_var}"],
pre_call=[
"char *{cxx_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
],
temps=["cfi"],
),
dict(
# Native argument which use CFI_desc_t.
name="c_mixin_arg_native_cfi",
iface_header=["ISO_Fortran_binding.h"],
cxx_local_var="pointer",
c_arg_decl=[
"CFI_cdesc_t *{c_var_cfi}",
],
f_arg_decl=[
"{f_type}, intent({f_intent}) :: {c_var}{f_assumed_shape}",
],
f_module_line="iso_c_binding:{f_kind}",
f_c_arg_names=["{c_var}"],
# pre_call=[
# "{c_type} *{cxx_var} = "
# "{cast_static}{c_type} *{cast1}{c_var_cfi}->base_addr{cast2};",
# ],
temps=["cfi", "extents", "lower"],
),
dict(
# Allocate copy of C pointer (requires +dimension)
name="c_mixin_native_cfi_allocatable",
post_call=[
"if ({cxx_var} != {nullptr}) {{+",
"{c_temp_lower_decl}"
"{c_temp_extents_decl}"
"int SH_ret = CFI_allocate({c_var_cfi}, \t{c_temp_lower_use},"
" \t{c_temp_extents_use}, \t0);",
"if (SH_ret == CFI_SUCCESS) {{+",
"{stdlib}memcpy({c_var_cfi}->base_addr, \t{cxx_var}, \t{c_var_cfi}->elem_len);",
#XXX "{C_memory_dtor_function}({cxx_var});",
"-}}",
"-}}",
],
),
dict(
# Convert C pointer to Fortran pointer
name="c_mixin_native_cfi_pointer",
post_call=[
"{{+",
"CFI_CDESC_T({rank}) {c_local_fptr};",
"CFI_cdesc_t *{c_local_cdesc} = {cast_reinterpret}CFI_cdesc_t *{cast1}&{c_local_fptr}{cast2};",
"void *{c_local_cptr} = const_cast<{c_type} *>({cxx_var});",
"{c_temp_extents_decl}"
"{c_temp_lower_decl}"
"int {c_local_err} = CFI_establish({c_local_cdesc},\t {c_local_cptr},"
"\t CFI_attribute_pointer,\t {cfi_type},"
"\t 0,\t {rank},\t {c_temp_extents_use});",
"if ({c_local_err} == CFI_SUCCESS) {{+",
"{c_local_err} = CFI_setpointer(\t{c_var_cfi},\t {c_local_cdesc},\t {c_temp_lower_use});",
"-}}",
"-}}",
],
local=["cptr", "fptr", "cdesc", "err"],
),
########################################
dict(
# c_in_native_*_cfi
# c_inout_native_*_cfi
name="c_in/inout_native_*_cfi",
mixin=[
"c_mixin_arg_native_cfi",
],
pre_call=[
"{cxx_type} *{cxx_var} = "
"{cast_static}{cxx_type} *{cast1}{c_var_cfi}->base_addr{cast2};",
],
),
########################################
dict(
name="c_in_char_*_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
# Null terminate string.
c_helper="ShroudStrAlloc ShroudStrFree",
pre_call=[
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"char *{cxx_var} = ShroudStrAlloc(\t"
"{c_var},\t {c_var_cfi}->elem_len,\t {c_blanknull});",
],
post_call=[
"ShroudStrFree({cxx_var});",
],
),
dict(
name="c_out_char_*_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
c_helper="ShroudStrBlankFill",
post_call=[
"ShroudStrBlankFill({cxx_var}, {c_var_cfi}->elem_len);"
],
),
dict(
name="c_inout_char_*_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
# Null terminate string.
c_helper="ShroudStrAlloc ShroudStrCopy ShroudStrFree",
pre_call=[
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"char *{cxx_var} = ShroudStrAlloc(\t"
"{c_var},\t {c_var_cfi}->elem_len,\t {c_blanknull});",
],
post_call=[
# nsrc=-1 will call strlen({cxx_var})
"ShroudStrCopy({c_var}, {c_var_cfi}->elem_len,"
"\t {cxx_var},\t -1);",
"ShroudStrFree({cxx_var});",
],
),
dict(
# Blank fill result.
name="c_function_char_scalar_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
c_impl_header=["<string.h>"],
cxx_impl_header=["<cstring>"],
cxx_local_var=None, # replace mixin
pre_call=[], # replace mixin
post_call=[
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"{stdlib}memset({c_var}, ' ', {c_var_cfi}->elem_len);",
"{c_var}[0] = {cxx_var};",
],
),
dict(
# Copy result into caller's buffer.
name="f_function_char_*_cfi",
arg_c_call=["{f_var}"],
need_wrapper=True,
),
dict(
# Copy result into caller's buffer.
name="c_function_char_*_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
cxx_local_var="result",
pre_call=[], # undo mixin
c_helper="ShroudStrCopy",
post_call=[
# XXX c_type is undefined
# nsrc=-1 will call strlen({cxx_var})
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"ShroudStrCopy({c_var}, {c_var_cfi}->elem_len,"
"\t {cxx_var},\t -1);",
],
return_type="void", # Convert to function.
),
dict(
name="c_function_char_*_cfi_allocatable",
mixin=[
"c_mixin_function_character",
],
return_type="void", # Convert to function.
f_c_arg_names=["{c_var}"],
f_arg_decl=[ # replace mixin
"character(len=:), intent({f_intent}), allocatable :: {c_var}",
],
cxx_local_var=None, # replace mixin
pre_call=[], # replace mixin
post_call=[
"if ({cxx_var} != {nullptr}) {{+",
"int SH_ret = CFI_allocate({c_var_cfi}, \t(CFI_index_t *) 0, \t(CFI_index_t *) 0, \tstrlen({cxx_var}));",
"if (SH_ret == CFI_SUCCESS) {{+",
"{stdlib}memcpy({c_var_cfi}->base_addr, \t{cxx_var}, \t{c_var_cfi}->elem_len);",
"-}}",
"-}}",
],
),
dict(
name="c_function_char_*_cfi_pointer",
mixin=[
"c_mixin_function_character",
],
return_type="void", # Convert to function.
f_c_arg_names=["{c_var}"],
f_arg_decl=[ # replace mixin
"character(len=:), intent({f_intent}), pointer :: {c_var}",
],
cxx_local_var=None, # replace mixin
pre_call=[], # replace mixin
post_call=[
# CFI_index_t nbar[1] = {3};
# CFI_CDESC_T(1) c_p;
# CFI_establish((CFI_cdesc_t* )&c_p, bar, CFI_attribute_pointer, CFI_type_int,
# nbar[0]*sizeof(int), 1, nbar);
# CFI_setpointer(f_p, (CFI_cdesc_t *)&c_p, NULL);
# CFI_index_t nbar[1] = {3};
"int {c_local_err};",
"if ({cxx_var} == {nullptr}) {{+",
"{c_local_err} = CFI_setpointer(\t{c_var_cfi},\t {nullptr},\t {nullptr});",
"-}} else {{+",
"CFI_CDESC_T(0) {c_local_fptr};",
"CFI_cdesc_t *{c_local_cdesc} = {cast_reinterpret}CFI_cdesc_t *{cast1}&{c_local_fptr}{cast2};",
"void *{c_local_cptr} = {cxx_nonconst_ptr};",
"size_t {c_local_len} = {stdlib}strlen({cxx_var});",
"{c_local_err} = CFI_establish({c_local_cdesc},\t {c_local_cptr},"
"\t CFI_attribute_pointer,\t CFI_type_char,"
"\t {c_local_len},\t 0,\t {nullptr});",
"if ({c_local_err} == CFI_SUCCESS) {{+",
"{c_var_cfi}->elem_len = {c_local_cdesc}->elem_len;", # set assumed-length
"{c_local_err} = CFI_setpointer(\t{c_var_cfi},\t {c_local_cdesc},\t {nullptr});",
"-}}",
"-}}",
],
local=["cptr", "fptr", "cdesc", "len", "err"],
),
########################################
# char **
dict(
name='c_in_char_**_cfi',
mixin=[
"c_mixin_arg_character_cfi",
],
f_arg_decl=[
"character(len=*), intent({f_intent}) :: {c_var}(:)",
],
pre_call=[
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"size_t {c_var_len} = {c_var_cfi}->elem_len;",
"size_t {c_var_size} = {c_var_cfi}->dim[0].extent;",
"char **{cxx_var} = ShroudStrArrayAlloc("
"{c_var},\t {c_var_size},\t {c_var_len});",
],
temps=["cfi", "len", "size"],
c_helper="ShroudStrArrayAlloc ShroudStrArrayFree",
cxx_local_var="pointer",
post_call=[
"ShroudStrArrayFree({cxx_var}, {c_var_size});",
],
),
########################################
# std::string
dict(
# c_in_string_scalar_cfi
# c_in_string_*_cfi
# c_in_string_&_cfi
name="c_in_string_scalar/*/&_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
c_helper="ShroudLenTrim",
cxx_local_var="scalar", # replace mixin
pre_call=[
# Get Fortran character pointer and create std::string.
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"size_t {c_local_trim} = ShroudLenTrim({c_var}, {c_var_cfi}->elem_len);",
"{c_const}std::string {cxx_var}({c_var}, {c_local_trim});",
],
local=["trim"],
),
dict(
# c_out_string_*_cfi
# c_out_string_&_cfi
name="c_out_string_*/&_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
c_helper="ShroudStrCopy",
cxx_local_var="scalar",
pre_call=[
"std::string {cxx_var};",
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
],
post_call=[
"ShroudStrCopy({c_var},"
"\t {c_var_cfi}->elem_len,"
"\t {cxx_var}{cxx_member}data(),"
"\t {cxx_var}{cxx_member}size());"
],
),
dict(
# c_inout_string_*_cfi
# c_inout_string_&_cfi
name="c_inout_string_*/&_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
c_helper="ShroudStrCopy",
cxx_local_var="scalar",
pre_call=[
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"size_t {c_local_trim} = ShroudLenTrim({c_var}, {c_var_cfi}->elem_len);",
"{c_const}std::string {cxx_var}({c_var}, {c_local_trim});",
],
post_call=[
"ShroudStrCopy({c_var},"
"\t {c_var_cfi}->elem_len,"
"\t {cxx_var}{cxx_member}data(),"
"\t {cxx_var}{cxx_member}size());"
],
local=["trim"],
),
dict(
# c_function_string_scalar_cfi
# c_function_string_*_cfi
# c_function_string_&_cfi
name="c_function_string_scalar/*/&_cfi",
mixin=[
"c_mixin_arg_character_cfi",
],
cxx_local_var=None, # replace mixin
pre_call=[], # replace mixin
c_helper="ShroudStrCopy",
post_call=[
"char *{c_var} = "
"{cast_static}char *{cast1}{c_var_cfi}->base_addr{cast2};",
"if ({cxx_var}{cxx_member}empty()) {{+",
"ShroudStrCopy({c_var}, {c_var_cfi}->elem_len,"
"\t {nullptr},\t 0);",
"-}} else {{+",
"ShroudStrCopy({c_var}, {c_var_cfi}->elem_len,"
"\t {cxx_var}{cxx_member}data(),"
"\t {cxx_var}{cxx_member}size());",
"-}}",
],
return_type="void", # Convert to function.
),
# std::string * function()
dict(
# c_function_string_*_cfi_allocatable
# c_function_string_&_cfi_allocatable
name="c_function_string_*/&_cfi_allocatable",
mixin=[
"c_mixin_function_character",
],
f_arg_decl=[
"character(len=:), intent({f_intent}), allocatable :: {c_var}",
],
return_type="void", # Convert to function.
f_c_arg_names=["{c_var}"],
c_impl_header=["<string.h>"],
cxx_impl_header=["<cstring>"],
post_call=[
"int SH_ret = CFI_allocate({c_var_cfi}, \t(CFI_index_t *) 0, \t(CFI_index_t *) 0, \t{cxx_var}{cxx_member}length());",
"if (SH_ret == CFI_SUCCESS) {{+",
"{stdlib}memcpy({c_var_cfi}->base_addr,"
" \t{cxx_var}{cxx_member}data(),"
" \t{cxx_var}{cxx_member}length());",
"-}}",
],
),
# XXX - consolidate with c_function_*_cfi_pointer?
# XXX - via a helper to get address and length of string
dict(
name="c_function_string_*_cfi_pointer",
mixin=[
"c_mixin_function_character",
],
return_type="void", # Convert to function.
f_c_arg_names=["{c_var}"],
f_arg_decl=[ # replace mixin
"character(len=:), intent({f_intent}), pointer :: {c_var}",
],
cxx_local_var=None, # replace mixin
pre_call=[], # replace mixin
post_call=[
"int {c_local_err};",
"if ({cxx_var} == {nullptr}) {{+",
"{c_local_err} = CFI_setpointer(\t{c_var_cfi},\t {nullptr},\t {nullptr});",
"-}} else {{+",
"CFI_CDESC_T(0) {c_local_fptr};",
"CFI_cdesc_t *{c_local_cdesc} = {cast_reinterpret}CFI_cdesc_t *{cast1}&{c_local_fptr}{cast2};",
"void *{c_local_cptr} = const_cast<char *>({cxx_var}{cxx_member}data());",
"size_t {c_local_len} = {cxx_var}{cxx_member}length();",
"{c_local_err} = CFI_establish({c_local_cdesc},\t {c_local_cptr},"
"\t CFI_attribute_pointer,\t CFI_type_char,"
"\t {c_local_len},\t 0,\t {nullptr});",
"if ({c_local_err} == CFI_SUCCESS) {{+",
"{c_var_cfi}->elem_len = {c_local_cdesc}->elem_len;", # set assumed-length
"{c_local_err} = CFI_setpointer(\t{c_var_cfi},\t {c_local_cdesc},\t {nullptr});",
"-}}",
"-}}",
],
local=["cptr", "fptr", "cdesc", "len", "err"],
),
# std::string & function()
dict(
name="c_function_string_scalar_cfi_allocatable",
mixin=[
"c_mixin_function_character",
],
f_c_arg_names=["{c_var}"],
f_arg_decl=[ # replace mixin
"character(len=:), intent({f_intent}), allocatable :: {c_var}",
],
return_type="void", # convert to function
cxx_local_var=None, # replace mixin
pre_call=[], # replace mixin
post_call=[
"int SH_ret = CFI_allocate({c_var_cfi}, \t(CFI_index_t *) 0, \t(CFI_index_t *) 0, \t{cxx_var}.length());",
"if (SH_ret == CFI_SUCCESS) {{+",
"{stdlib}memcpy({c_var_cfi}->base_addr, \t{cxx_var}.data(), \t{c_var_cfi}->elem_len);",
"-}}",
],
destructor_name="new_string",
destructor=[
"std::string *cxx_ptr = \treinterpret_cast<std::string *>(ptr);",
"delete cxx_ptr;",
],
),
dict(
# f_function_string_scalar_cfi
# f_function_string_*_cfi
# f_function_string_&_cfi
name="f_function_string_scalar/*/&_cfi",
# XXX - avoid calling C directly since the Fortran function
# is returning an CHARACTER, which CFI can not do.
# Fortran wrapper passed function result to C which fills it.
need_wrapper=True,
arg_c_call=["{f_var}"],
),
# similar to f_char_scalar_allocatable
dict(
# f_function_string_scalar_cfi_allocatable
# f_function_string_*_cfi_allocatable
# f_function_string_&_cfi_allocatable
name="f_function_string_scalar/*/&_cfi_allocatable",
# XXX - avoid calling C directly since the Fortran function
# is returning an allocatable, which CFI can not do.
# Fortran wrapper passed function result to C which fills it.
need_wrapper=True,
arg_decl=[
"character(len=:), allocatable :: {f_var}",
],
arg_c_call=["{f_var}"],
),
dict(
# f_function_string_scalar_cfi_pointer
# f_function_string_*_cfi_pointer
# f_function_string_&_cfi_pointer
name="f_function_string_scalar/*/&_cfi_pointer",
# XXX - avoid calling C directly since the Fortran function
# is returning an pointer, which CFI can not do.
# Fortran wrapper passed function result to C which fills it.
need_wrapper=True,
arg_decl=[
"character(len=:), pointer :: {f_var}",
],
arg_c_call=["{f_var}"],
),
########################################
# native
dict(
name="f_out_native_*_cfi_allocatable",
),
dict(
# Set Fortran pointer to point to cxx_var
name="c_out_native_**_cfi_allocatable",
mixin=[
"c_mixin_arg_native_cfi",
"c_mixin_native_cfi_allocatable",
],
f_arg_decl=[
"{f_type}, intent({f_intent}), allocatable :: {c_var}{f_assumed_shape}",
],
pre_call=[
"{c_const}{c_type} * {cxx_var};",
],
arg_call=["&{cxx_var}"],
),
dict(
# Set Fortran pointer to point to cxx_var
name="c_out_native_**_cfi_pointer",
mixin=[
"c_mixin_arg_native_cfi",
"c_mixin_native_cfi_pointer",
],
f_arg_decl=[
"{f_type}, intent({f_intent}), pointer :: {c_var}{f_assumed_shape}",
],
# set pointer on fortran declaration
pre_call=[
"{c_const}{c_type} * {cxx_var};",
],
arg_call=["&{cxx_var}"],
),
dict(
# Pass result as an argument to C wrapper.
name="f_function_native_*_cfi_allocatable",
arg_decl=[
"{f_type}, allocatable :: {f_var}{f_assumed_shape}",
],
arg_c_call=["{f_var}"],
),
dict(
# Convert to subroutine and pass result as an argument.
# Return an allocated copy of data.
name="c_function_native_*_cfi_allocatable",
mixin=[
"c_mixin_arg_native_cfi",
"c_mixin_native_cfi_allocatable", # post_call
],
f_arg_decl=[
"{f_type}, intent({f_intent}), allocatable :: {c_var}{f_assumed_shape}",
],
cxx_local_var="result",
return_type="void", # Convert to function.
),
dict(
# Pass result as an argument to C wrapper.
name="f_function_native_*_cfi_pointer",
arg_decl=[
"{f_type}, pointer :: {f_var}{f_assumed_shape}",
],
pre_call=[
"nullify({f_var})",
],
arg_c_call=["{f_var}"],
),
dict(
# Convert to subroutine and pass result as an argument.
# Return | |
time) for the given rank, allowing
# dims to be specified using negative indices
def canonicalize_dim(rank: int, idx: int) -> int:
# TODO: add a comment for why this is
_rank = rank if rank != 0 else 1
if idx >= 0 and idx < _rank:
return idx
if idx < 0:
_idx = idx + _rank
else:
_idx = idx
if _idx < 0 or _idx > _rank:
msg = "Received out of bounds index {0} for tensor of rank {1}!".format(
idx, rank
)
raise ValueError(msg)
return _idx
# Takes a dimension or sequence of dimensions and "wraps" them,
# mapping negative offsets to positive ones
def canonicalize_dims(rank: int, indices: DimsType) -> DimsType:
if isinstance(indices, int):
return canonicalize_dim(rank, indices)
return tuple(canonicalize_dim(rank, x) for x in indices)
def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:
"""
Validates that perm is a permutation of length rank.
"""
if not isinstance(perm, Sequence):
return False
if not (tuple(sorted(perm)) == tuple(range(0, rank))):
return False
return True
def is_same_shape(a: Sequence, b: Sequence) -> bool:
"""
Compares two shapes a and b, returning True if they are the same
(their ranks and corresponding lengths match) and False otherwise.
"""
return tuple(a) == tuple(b)
def is_cpu_scalar_tensor(a: Any) -> bool:
return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu"
def check_same_device(*args, allow_cpu_scalar_tensors):
"""
Checks that all Tensors in args have the same device.
Raises a RuntimeError when:
- args contains an object whose type is not Tensor or Number
- two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
"""
# Short-circuits if all (one or fewer) arguments are trivially on the same device
if len(args) <= 1:
return
# Note: cannot initialize device to the first arg's device (it may not have one)
device = None
for arg in args:
if isinstance(arg, Number):
continue
elif isinstance(arg, TensorLike):
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
continue
if device is None:
device = arg.device
if device != arg.device:
msg = (
"Tensor on device "
+ str(arg.device)
+ " is not on the expected device "
+ str(device)
+ "!"
)
raise RuntimeError(msg)
else:
msg = (
"Unexpected type when checking for same device, " + str(type(arg)) + "!"
)
raise RuntimeError(msg)
# Asserts if any of the following are true:
# - a non-scalar or non-Tensor is given
# - the shape of any tensors is distinct
def check_same_shape(*args, allow_cpu_scalar_tensors):
"""
Checks that all Tensors in args have the same shape.
Raises a RuntimeError when:
- args contains an object whose type is not Tensor or Number
- two Tensor objects in args have different devices
"""
shape = None
for arg in args:
if isinstance(arg, Number):
continue
elif isinstance(arg, TensorLike):
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
continue
if shape is None:
shape = arg.shape
if not is_same_shape(shape, arg.shape):
msg = "Shape {0} is not the expected shape {1}!".format(
arg.shape, shape
)
raise RuntimeError(msg)
else:
msg = (
"Unexpected type when checking for same shape, " + str(type(arg)) + "!"
)
raise RuntimeError(msg)
_integer_dtypes = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
_float_dtypes = (torch.float16, torch.bfloat16, torch.float32, torch.float64)
_complex_dtypes = (torch.complex32, torch.complex64, torch.complex128)
def is_boolean_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype is torch.bool
def is_integer_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _integer_dtypes
def is_float_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _float_dtypes
def is_complex_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _complex_dtypes
_complex_to_real_dtype_map = {
torch.complex128: torch.float64,
torch.complex64: torch.float32,
torch.complex32: torch.float16,
}
_real_to_complex_dtype_map = {
torch.float16: torch.complex32,
torch.bfloat16: torch.complex64,
torch.float32: torch.complex64,
torch.float64: torch.complex128,
}
def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype:
return _complex_to_real_dtype_map[dtype]
def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype:
return _real_to_complex_dtype_map[dtype]
def dtype_to_type(dtype: torch.dtype) -> type:
"""
Computes the corresponding Python type (AKA "type kind") for the
given dtype.
"""
assert isinstance(dtype, torch.dtype)
if dtype is torch.bool:
return bool
if dtype in _integer_dtypes:
return int
if dtype in _float_dtypes:
return float
if dtype in _complex_dtypes:
return complex
raise ValueError("Invalid dtype!")
_type_to_dtype_map = {
bool: torch.bool,
int: torch.int64,
float: torch.float64,
complex: torch.complex128,
}
def type_to_dtype(typ: type) -> torch.dtype:
"""
Computes the corresponding dtype for a Number type.
"""
return _type_to_dtype_map[typ]
_ordered_types = (bool, int, float, complex)
def get_higher_type(a: type, b: type) -> type:
"""
Returns the higher of the two given Number types.
The types are ordered bool -> int -> float -> complex.
"""
# Type checking
assert a in _ordered_types
assert b in _ordered_types
if a is b:
return a
for typ in _ordered_types:
if a is typ:
return b
if b is typ:
return a
raise ValueError("Unknown Python scalar type!")
# Returns the higher of two torch datatypes a and b or, if the two
# are not ordered relative to each other, the next
# higher datatype
def get_higher_dtype(
a: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
b: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
) -> Optional[torch.dtype]:
"""
Computes the "lowest" datatype that is weakly
"higher" than both a and b.
"""
# Type checking
assert a is None or isinstance(a, (torch.dtype, TensorLike, Number))
assert b is None or isinstance(b, (torch.dtype, TensorLike, Number))
def _extract_dtype(
x: Optional[Union[torch.dtype, TensorLikeType, NumberType]]
) -> Optional[torch.dtype]:
if x is None:
return None
if isinstance(x, torch.dtype):
return x
if isinstance(x, TensorLike):
return x.dtype
if isinstance(x, Number):
return type_to_dtype(type(x))
raise RuntimeError("Unexpected type given to _extract_dtype!")
a, b = _extract_dtype(a), _extract_dtype(b)
if a is b:
return a
if a is None:
return b
if b is None:
return a
ordered_datatypes = (
(torch.bool,),
(torch.uint8, torch.int8),
(torch.int16,),
(torch.int32,),
(torch.int64,),
(torch.float16, torch.bfloat16),
(torch.float32,),
(torch.float64,),
(torch.complex32,),
(torch.complex64,),
(torch.complex128,),
)
for idx, dtypes in enumerate(ordered_datatypes):
if a in dtypes and b in dtypes:
return ordered_datatypes[idx + 1][0]
if a in dtypes:
return b
if b in dtypes:
return a
raise RuntimeError("Unexpected termination!")
# TODO: maybe unify with can_cast_to?
def is_weakly_lesser_type(a: type, b: type) -> bool:
"""
Compares two types, a and b, returning True if a is weakly "less" than b.
The comparison is determined by the following type ordering: bool, int, float, complex.
"""
ordered_types = (
bool,
int,
float,
complex,
)
assert a in ordered_types
assert b in ordered_types
for typ in ordered_types:
if a == typ:
return True
if b == typ:
return False
raise RuntimeError("Unexpected termination!")
def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool:
for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype):
if fn(cast_to):
return True
if fn(cast_from):
return False
raise ValueError("Received unknown dtypes {0}, {1}!".format(cast_to, cast_from))
def check_same_dtype(*args):
"""
Checks that all Tensors in args have the same device and that all Numbers have the
same corresponding Python type.
Raises a RuntimeError when:
- args contains an object whose type is not Tensor or Number
- two Tensors objects in args have different dtypes
- two Number objects in args have different types
- there are Tensors and Numbers in args, and one of those Tensors corresponding
Python types is different from the type of one of those Numbers
"""
full_dtype = None
scalar_type = None
for arg in args:
if isinstance(arg, Number):
# Scalar type checking is disabled (and may be removed in the future)
continue
# if scalar_type is None:
# scalar_type = type(arg)
# if scalar_type is not type(arg):
# msg = (
# "Scalar of type "
# + str(type(arg))
# + " is not the expected type of "
# + str(scalar_type)
# + "!"
# )
# raise RuntimeError(msg)
elif isinstance(arg, TensorLike):
if full_dtype is None:
full_dtype = arg.dtype
if scalar_type is None:
scalar_type = dtype_to_type(arg.dtype)
if full_dtype is not arg.dtype:
msg = (
"Tensor with dtype "
+ str(arg.dtype)
+ " is not the expected dtype of "
+ str(full_dtype)
+ "!"
)
raise RuntimeError(msg)
arg_type = dtype_to_type(arg.dtype)
if arg_type is not scalar_type:
msg = (
"Tensor with corresponding Python type "
+ str(arg_type)
+ " is not the expected type of "
+ str(scalar_type)
+ "!"
)
raise RuntimeError(msg)
else:
msg = (
"Unexpected type when checking for same dtype, " + str(type(arg)) + "!"
)
raise RuntimeError(msg)
# Maps datatypes to their computation types for elementwise operations
_computation_dtype_map = {
torch.bfloat16: torch.float32,
| |
<filename>lib/evaluation.py
from easydict import EasyDict as edict
import logging
import logging.config
import numpy as np
from osgeo import gdal
import torch
from lib import data_normalization, fdutil, rasterutils, utils
def compute_residuals(raster, raster_gt, nodata, mask_gt=None):
"""
Computes the residual errors. A positive error means that the predicted height is larger than the reference value
and conversely for a negative error.
:param raster: np.array, DSM to be evaluated
:param raster_gt: np.array, reference (ground truth) DSM
:param nodata: float, nodata value of the DSMs
:param mask_gt: np.array (boolean), ground truth mask (True indicates valid ground truth heights)
:return: np.ma.array, DSM residual errors
"""
# Mask out invalid ground truth pixels
if mask_gt is not None:
mask = np.ma.mask_or(raster_gt == nodata, ~mask_gt)
raster_gt_masked = np.ma.masked_array(raster_gt, mask=mask)
else:
raster_gt_masked = np.ma.masked_where(raster_gt == nodata, raster_gt)
# Mask out invalid pixels in the input raster
raster_masked = np.ma.masked_where(raster == nodata, raster)
# Compute residual errors: raster - raster_gt
residuals_masked = raster_masked - raster_gt_masked
return residuals_masked
def truncate_residuals(residuals, threshold):
"""
Truncates residual errors outside the interval [-threshold, threshold].
:param residuals: np.ma.array, DSM residual errors
:param threshold: positive float, threshold to truncate the residual errors prior to evaluation
:return: np.ma.array, truncated DSM residual errors
"""
return np.ma.masked_outside(residuals, -threshold, threshold)
def get_statistics(residuals_masked, residual_threshold=None):
"""
Computes several evaluation metrics using non-truncated residuals and optionally using truncated residuals.
:param residuals_masked: np.ma.array, DSM residual errors
:param residual_threshold: positive float, threshold to truncate the residual errors prior to evaluation
(None to deactivate thresholding)
:return: EasyDict, dictionary with the following key-value pairs
(statistics of the non-truncated residual errors):
truncation: boolean, True if the evaluation metrics are additionally computed
for truncated residual errors, False otherwise
count_total: float, number of valid pixels
diff_max: float, maximum residual error [m]
diff_min: float, minimum residual error [m]
MAE: float, mean absolute error (MAE) [m]
RMSE: float, root mean square error (RMSE) [m]
absolute_median: float, median absolute error (MedAE) [m]
median: float, median error [m]
NMAD: float, normalized median absolute deviation (NMAD) [m]
If residual_threshold is not equal to None:
truncated: EasyDict, dictionary with the following key-value pairs
(statistics of the truncated residual errors):
count_total: float, number of valid pixels
threshold: =residual_threshold
MAE: float, mean absolute error (MAE) [m]
RMSE: float, root mean square error (RMSE) [m]
absolute_median: float, median absolute error (MedAE) [m]
median: float, median error [m]
NMAD: float, normalized median absolute deviation (NMAD) [m]
"""
stats = edict()
stats.truncation = True if residual_threshold else False
if residual_threshold:
stats.truncated = edict()
# Compute absolute truncated residual errors
residuals_truncated = truncate_residuals(residuals_masked, residual_threshold)
abs_residuals_truncated = np.ma.abs(residuals_truncated)
# Number of unmasked pixels (= number of valid pixels)
stats.truncated.count_total = float(np.ma.count(residuals_truncated))
stats.truncated.threshold = residual_threshold
# Number of unmasked pixels (= number of valid pixels)
stats.count_total = float(np.ma.count(residuals_masked))
# Compute absolute residual errors
abs_residuals = np.ma.abs(residuals_masked)
# Minimum and maximum residual error
stats.diff_max = np.ma.MaskedArray.max(residuals_masked)
stats.diff_min = np.ma.MaskedArray.min(residuals_masked)
# Mean absolute error (MAE)
stats.MAE = np.ma.mean(abs_residuals)
# Root mean square error (RMSE)
stats.RMSE = np.ma.sqrt(np.ma.mean(abs_residuals ** 2))
# Median absolute error
stats.absolute_median = np.ma.median(abs_residuals)
# Median error
stats.median = np.ma.median(residuals_masked)
# Normalized median absolute deviation (NMAD)
abs_diff_from_med = np.ma.abs(residuals_masked - stats.absolute_median)
stats.NMAD = 1.4826 * np.ma.median(abs_diff_from_med)
if stats.truncation:
stats.truncated.MAE = np.ma.mean(abs_residuals_truncated)
stats.truncated.RMSE = np.ma.sqrt(np.ma.mean(abs_residuals_truncated ** 2))
stats.truncated.absolute_median = np.ma.median(abs_residuals_truncated)
stats.truncated.median = np.ma.median(residuals_truncated)
abs_diff_from_med = np.ma.abs(residuals_truncated - stats.truncated.absolute_median)
stats.truncated.NMAD = 1.4826 * np.ma.median(abs_diff_from_med)
return stats
def print_statistics(stats, logger, print_min_max=True):
"""
Prints the evaluation metrics computed by the function get_statistics().
:param stats: EasyDict, dictionary returned by the function get_statistics()
:param logger: logger instance
:param print_min_max: boolean, True to print minimum and maximum residual errors, False otherwise
"""
if print_min_max:
logger.info('Maximum residual error [m]:\t\t\t\t\t\t{:10.3f} m'.format(stats.diff_max))
logger.info('Minimum residual error [m]:\t\t\t\t\t\t{:10.3f} m'.format(stats.diff_min))
# Evaluation metrics: non-truncated residual errors
logger.info('Mean absolute residual error (MAE) [m]:\t\t\t\t\t{:10.3f} m'.format(stats.MAE))
logger.info('RMSE residual error [m]:\t\t\t\t\t\t{:10.3f} m'.format(stats.RMSE))
logger.info('Absolute median residual error [m]:\t\t\t\t\t{:10.3f} m'.format(stats.absolute_median))
logger.info('Median residual error [m]:\t\t\t\t\t\t{:10.3f} m'.format(stats.median))
logger.info('Normalized median absolute deviation (NMAD) [m]:\t\t\t{:10.3f} m\n'.format(stats.NMAD))
# Evaluation metrics: truncated residual errors
if stats.truncation:
logger.info('Truncated mean absolute residual error (MAE) [m]:\t\t\t{:10.3f} m'.format(stats.truncated.MAE))
logger.info('Truncated RMSE residual error [m]:\t\t\t\t\t{:10.3f} m'.format(stats.truncated.RMSE))
logger.info('Truncated absolute median residual error [m]:\t\t\t\t{:10.3f} m'.format(stats.truncated.absolute_median))
logger.info('Truncated median residual error [m]:\t\t\t\t\t{:10.3f} m'.format(stats.truncated.median))
logger.info('Truncated normalized median absolute deviation (NMAD) [m]:\t\t{:10.3f} m\n'.format(stats.truncated.NMAD))
def evaluate_performance(raster_prediction, ds_raster_input, ds_raster_gt, logger_root, area_defn=None,
path_gt_mask=None, path_building_mask=None, path_water_mask=None, path_forest_mask=None,
logger_stats=None, residual_threshold=None):
"""
Computes the evaluation metrics for both the initial DSM and the refined DSM. The error metrics are computed over
a) all pixels,
b) building pixels (if path_building_mask is provided),
c) terrain pixels (if path_building_mask is provided),
d) terrain pixels excluding water pixels (if path_building_mask and path_water_mask are provided),
e) terrain pixels excluding water and forest pixels (if path_building_mask, path_water_mask, and path_forest_mask
are provided).
Note that the building mask is dilated by two pixels to avoid aliasing at vertical walls. Additionally, the error
metrics are optionally computed a second time, where the residual errors exceeding residual_threshold are ignored.
:param raster_prediction: np.array, refined DSM
:param ds_raster_input: gdal.Dataset or str, initial DSM loaded as gdal.Dataset
(or alternatively, path to the initial DSM GeoTiff raster)
:param ds_raster_gt: gdal.Dataset or str, ground truth DSM loaded as gdal.Dataset
(or alternatively, path to the ground truth DSM GeoTiff raster)
:param logger_root: logger instance (root logger)
:param area_defn: dictionary, defines one or multiple rectangularly-shaped geographic regions
for which the performance will be evaluated. The dictionary is composed of the
following key-value pairs:
x_extent: list of n tuples, where n denotes the number of rectangular regions
(stripes). Each tuple defines the upper-left and lower-right
x-coordinate of a rectangular region (stripe).
y_extent: list of n tuples, where n denotes the number of rectangular regions
(stripes). Each tuple defines the upper-left and lower-right
y-coordinate of a rectangular region (stripe).
Assumption: The i.th tuple of x_extent and i.th tuple of y_extent define a
geographically rectangular region (stripe).
Specify area_defn as follows if the entire refined DSM should be evaluated
(alternatively, specify area_defn=None):
area_defn = {'x_extent': [(0, cols - 1)], 'y_extent': [(0, rows - 1)]}
where cols denotes the number of columns and rows the number of rows of the raster.
:param path_gt_mask: path to the ground truth mask (a pixel value of 1 indicates a valid pixel, whereas
a pixel value of 0 indicates an invalid ground truth height)
:param path_building_mask: path to the building mask (a pixel value of 1 indicates a building pixel, whereas
a pixel value of 0 indicates a terrain pixel)
:param path_water_mask: path to the water mask (a pixel value of 1 indicates a water pixel, whereas
a pixel value of 0 indicates a non-water pixel)
:param path_forest_mask: path to the forest mask (a pixel value of 1 indicates a forest pixel, whereas
a pixel value of 0 indicates a non-forest pixel)
:param logger_stats: logger instance to print the statistics (if None, output is print to console)
:param residual_threshold: positive float, threshold to truncate the residual errors prior to evaluation
:return: EasyDict, dictionary storing the residual errors of the refined DSM; the dictionary
consists of the following key-value pairs:
all: np.array, residual errors evaluated over all pixels
building: np.array, residual errors evaluated over building
pixels only
terrain: np.array, residual errors evaluated over terrain pixels
only
terrain_nowater: np.array, residual errors evaluated over terrain pixels
excluding water bodies
terrain_nowater_noforest: np.array, residual errors evaluated over terrain pixels
excluding water bodies and forested areas
"""
if logger_stats is None:
logger_stats = utils.setup_logger('stats_logger', level=logging.INFO, log_to_console=True, log_file=None)
data = edict()
mask = edict()
# Load the refined DSM
if isinstance(raster_prediction, gdal.Dataset):
data.prediction = raster_prediction.GetRasterBand(1).ReadAsArray().astype(np.float64)
elif isinstance(raster_prediction, np.ndarray):
data.prediction = raster_prediction.copy().astype(np.float64)
else:
logger_root.info('\tLoad the refined DSM...')
ds = rasterutils.load_raster(raster_prediction)
data.prediction = ds.GetRasterBand(1).ReadAsArray().astype(np.float64)
ds = None
# Load the ground truth DSM
if isinstance(ds_raster_gt, gdal.Dataset):
data.ground_truth = ds_raster_gt.GetRasterBand(1).ReadAsArray().astype(np.float64)
data.nodata = np.array(ds_raster_gt.GetRasterBand(1).GetNoDataValue()).astype(np.float64)
else:
logger_root.info('\tLoad the ground truth DSM...')
ds = rasterutils.load_raster(ds_raster_gt)
data.ground_truth = ds.GetRasterBand(1).ReadAsArray().astype(np.float64)
data.nodata = np.array(ds.GetRasterBand(1).GetNoDataValue()).astype(np.float64)
ds = None
# Load the initial DSM
if isinstance(ds_raster_input, gdal.Dataset):
data.initial = ds_raster_input.GetRasterBand(1).ReadAsArray().astype(np.float64)
# Get GSD [m]
gsd = ds_raster_input.GetGeoTransform()[1]
else:
logger_root.info('\tLoad the initial DSM...')
ds = rasterutils.load_raster(ds_raster_input)
data.initial = ds.GetRasterBand(1).ReadAsArray().astype(np.float64)
# Get GSD [m]
gsd | |
# -*- coding: utf8 -*-
# Program to process the features from GATE annotation export files
# Requires the following files in the current working directory:
# _raw.txt
# _toc.txt
# _rel.txt
# TODO Add support for more than one set of files in cwd
import os
import csv
import re
import json
from collections import OrderedDict
# Defining regular expressions to match extracted annotations with PDF content
number = re.compile(r"[1-9]")
whitespace = re.compile(r"\s")
capital = re.compile(r"([A-Z])")
roman = "([IXV])+"
# GATE works on a Token basis, so that we are potentially obfuscating original whitespace
# when not only single tokens are extracted.
# Therefore, we define a pattern how usually the parts of a regulation citation are formatted
# Use lookahead (?=foo) and lookbehind (?<=foo) syntax to remove only selected whitespace in between those two
refs = re.compile(r"(\s)*(?<=[\W\D\a-z])(\s)(?!der)(?=[ac-eg-tv-z\W]|[fb]{1}\s)")
# Defining a function which determines section boundaries, with start and end row number
def pairwise(lst):
""" yield item i and item i+1 in lst. e.g.
(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)
"""
if not lst:
return
for l in range(len(lst)-1):
yield lst[l], lst[l+1]
# Function to perform a regex search within the document for TOC components
# This is for dividing the PDF in chunks
# to later match the REFs to each chunk they belong to
def doc_parser(raw_doc, searchstring, alt_searchstring, max_lines):
matches = []
tempmatches = []
with open(raw_doc, 'r', encoding="utf8") as doc:
for num, line in enumerate(doc, 1):
if searchstring:
try:
if re.findall(searchstring, line, re.M):
tempmatches.append([num, line])
elif re.findall(alt_searchstring, line, re.M):
tempmatches.append([num, line])
except Exception as e:
pass
if max_lines:
if num == max_lines:
tempmatches.append([num, line])
# Sometimes there is more than one match (when the TOC is also fits the search string)
# We only want the passage within the text body, no TOC elements
if len(tempmatches) == 1:
matches.append(tempmatches[0])
if len(tempmatches) >= 2:
matches.append(tempmatches[1])
del tempmatches[:]
return matches
# This function used the chunks from the doc_parser output and looks up
# whether a reference is present within a text chunk
def get_match(r, chunks, some_string):
some_list = []
# enable multiline matches by removing newline characters
for p in range(0, len(chunks)):
o = re.sub('\n', '', str(chunks.get(p)), re.S| re.M | re.U)
o_n = "".join(chunks.get(p))
o_new =str(re.sub('\n\r', ' ', o_n, re.M | re.U))
if re.search(str(r), o, re.M | re.S | re.U):
some_list.append([r, str(some_string.get(p))])
continue
else:
if re.search(str(r), o_new, re.M | re.S | re.U):
some_list.append([r, str(some_string.get(p))])
continue
if len(some_list) == 0:
some_list.append([r, ""])
return some_list
# opening all input text files
def toc_extractor(toc_doc, raw_doc, rel_doc):
# opening the _rel.txt file and cleaning RFC, REL, REG and DBp features
with open(rel_doc, 'r', encoding="utf8") as doc1:
doc_reader1 = csv.reader(doc1, delimiter=',', quotechar='"', skipinitialspace=True)
doc_data1 = list(doc_reader1)
references = []
relationships = []
rfc = []
dbp = []
reg = []
for row in doc_data1:
if row:
#REF
replaced_ref = row[4].strip()
if replaced_ref:
ref = re.sub(refs, "", replaced_ref)
if ref:
references.append(ref)
else:
references.append(replaced_ref)
else:
replaced_ref1 = row[5].strip()
if replaced_ref1:
ref = re.sub(refs, "", replaced_ref1)
if ref:
references.append(ref)
else:
references.append(replaced_ref1)
else:
references.append(row[5])
# REL
relationships.append(row[3])
# RFC
rfcstring=row[1].strip()
if rfcstring:
rfc.append(rfcstring)
else:
rfcstring1 = row[2].strip()
rfc.append(rfcstring1)
# REG
regstr=row[12].strip()
if regstr:
reg.append(regstr)
# DBp
if row[9]:
replaced_url = (str(row[9]).split('/')[-1:])[0]
if replaced_url:
url= re.sub(r'Schweizerische_Volkspartei', "Bürgerliches_Gesetzbuch", replaced_url)
if url:
dbp.append(url)
else:
dbp.append(replaced_url)
elif row[7]:
replaced_url=(str(row[7]).split('/')[-1:])[0]
if replaced_url:
# Replacing a DBpedia mismatch regarding German civil code
url = re.sub(r'Schweizerische_Volkspartei', "Bürgerliches_Gesetzbuch", replaced_url)
if url:
dbp.append(url)
else:
dbp.append(replaced_url)
else:
dbp.append(row[7])
# Getting TOC components from the annotation file
with open(toc_doc, "r", encoding="utf8") as doc2:
doc_reader2 = csv.reader(doc2, delimiter=',', quotechar='"', skipinitialspace=True)
doc_data2 = list(doc_reader2)
# print(doc_data1)
chapter_dict = {}
part_dict = OrderedDict()
part_order = []
subchapter_dict = {}
subchapter_order = []
subsubchapter_dict = {}
subsubchapter_order = []
part_num=[]
chapter_name=""
for row in doc_data2:
if row:
# Chapter (= classification attribute)
if number.search(row[1]):
chapter_dict[row[1]] = row[2]
chapter_name=row[2]
chapter_name = re.sub(whitespace, "-", chapter_name.strip())
# Part
if capital.search(row[4]):
part_dict[row[4]] = row[5]
# Subchapter
if number.search(row[9]):
if row[8] not in subchapter_dict:
subchapter_order.append(row[8])
subchapter_dict[row[8]] = row[9]
# Subsubchapter
if number.search(row[13]):
if row[12] not in subsubchapter_dict:
subsubchapter_dict[row[12]] = row[13]
subsubchapter_order.append(row[12])
# preserving part order
for x in range(len(part_dict)):
part_order.append(list(part_dict.values())[x])
part_num.append(list(part_dict.keys())[x])
part_matches = []
subchapter_matches = []
subsubchapter_matches = []
# finding last line in a document for chunking
def last_line(doc):
num_lines = 0
with open(doc, "r", encoding="utf8") as document:
num_lines += sum(1 for l in document)
return num_lines
numlines = last_line(raw_doc)
#Lookup of TOC components in raw_doc (pdftotext of original PDF)
# Part Lookup
for p in part_order:
searchstring = "^([0-9]*[\s]*[A-H][.]\s)" + p.strip()
alt_searchstring = "^([0-9]*[\s]*[A-H][.]\s)" + p[0:-8].strip()
part_matches.append(doc_parser(raw_doc, searchstring, alt_searchstring, None))
part_matches.append(doc_parser(raw_doc, None, None, numlines))
# Subchapter Lookup
subsubchapter_prefix = []
for s in subchapter_order:
searchstring = "^([0-9]*[\s]*(%s)[.]\s)" % roman + (s.replace('\(\)', '')).strip()
alt_searchstring = "^([0-9]*[\s]*(%s)[.]\s)" % roman + (s[0:-8].replace('\(\)', '')).strip()
subsubchapter_prefix.append(searchstring)
subchapter_matches.append(doc_parser(raw_doc, searchstring, alt_searchstring, None))
if len(subchapter_matches) > 0:
continue
subchapter_matches.append(doc_parser(raw_doc, None, None, numlines))
# Subsubchapter Lookup
for ss in subsubchapter_order:
searchstring = "(^([0-9]*[\s]*(%s)[.]\s([A-Za-z0-9\s])+[.]\s)?[0-9]*[\s]*[0-9][.]\s)" % roman + ss.strip() + "([.]\s([A-Za-z0-9])+)"
alt_searchstring = "(^([0-9]*[\s]*(%s)[.]\s([A-Za-z0-9\s])+[.]\s)?[0-9]*[\s]*[0-9][.]\s)" % roman + ss[0:-8].strip()
subsubchapter_matches.append(doc_parser(raw_doc, searchstring, alt_searchstring, None))
if len(subchapter_matches) > 0:
continue
subsubchapter_matches.append(doc_parser(raw_doc, None, None, numlines))
# get boundaries of part
part_boundaries=[]
for p in part_matches:
for num, line in p:
part_boundaries.append(num)
# get boundaries of subchapter
subchapter_boundaries = []
for p in subchapter_matches:
for num, line in p:
subchapter_boundaries.append(num)
# get boundaries of subsubchapter
subsubchapter_boundaries = []
for p in subsubchapter_matches:
for num, line in p:
subsubchapter_boundaries.append(num)
# part extraction
reference_part=[]
reference_pn = []
i=0
chunks={}
part_name={}
pn={}
for n, m in pairwise(part_boundaries):
chunk=[]
with open(raw_doc, 'r', encoding="utf8") as reference_doc:
for l, chunkline in enumerate(reference_doc):
if n <= l < m:
chunk.append(chunkline)
else:
continue
part_name[i] = (part_order[i]).strip()
pn[i] = (part_num[i]).strip()
chunks[i] = chunk
i += 1
for r in references:
reference_part.append(get_match(r, chunks, part_name))
for r in references:
reference_pn.append(get_match(r, chunks, pn))
# subchapter extraction
reference_subchapter = []
i = 0
chunks = {}
subchapter_name = {}
for n, m in pairwise(subchapter_boundaries):
chunk = []
with open(raw_doc, 'r', encoding="utf8") as reference_doc:
for l, chunkline in enumerate(reference_doc):
if n <= l < m:
chunk.append(chunkline)
else:
continue
subchapter_name[i] = (subchapter_order[i]).strip()
chunks[i] = chunk
i += 1
for r in references:
reference_subchapter.append(get_match(r, chunks, subchapter_name))
print(reference_subchapter)
# subsubchapter extraction
reference_subsubchapter=[]
i = 0
chunks = {}
subsubchapter_name = {}
for n, m in pairwise(subsubchapter_boundaries):
chunk = []
with open(raw_doc, 'r', encoding="utf8") as reference_doc:
for l, chunkline in enumerate(reference_doc):
if n <= l < m:
chunk.append(chunkline)
else:
continue
subsubchapter_name[i] = (subsubchapter_order[i]).strip()
chunks[i] = chunk
i += 1
for r in references:
reference_subsubchapter.append(get_match(r, chunks, subsubchapter_name))
#now use all extracted information to create cluster instances, where references are the instances
# and part, subchapter, subsubchapter, and dbpedia concepts are features
reference_features=[]
parts = []
pn = []
for inner_l in reference_part:
for item in inner_l:
parts.append(item[1])
for inner_l in reference_pn:
for item in inner_l:
pn.append(item[1])
subchapters = []
sc = []
for inner_l in reference_subchapter:
for item in inner_l:
subchapters.append(item[1])
sc.append(item[0])
subsubchapters = []
ssc = []
for inner_l in reference_subsubchapter:
for item in inner_l:
subsubchapters.append(item[1])
ssc.append(item[0])
'''
print(len(references))
print(len(relationships))
print(len(rfc))
print(len(dbp))
print(len(reference_part))
print(len(reference_subchapter))
print(len(reference_subsubchapter))
print(references)
print(relationships)
print(rfc)
print(dbp)
'''
# Construct format of instances with features
for r in range(len(references)):
features = {}
features["ref"] = str(references[r])
#features["chapter"] = chapter_name
features["rfc"] = str(rfc[r])
features["rel"] = str(relationships[r])
features["part"] = str(parts[r])
features["dbp"] = str(dbp[r])
features["reg"] = str(reg[r])
features["sc"] = str(subchapters[r])
features["ssc"] = str(subsubchapters[r])
features["classification"] = chapter_name
#features["classification"] = str(rfc[r])
#features["classification"] = str(references[r])
reference_features.append(features)
print(reference_features)
return reference_features
# Outputs which files will be processed. For debugging, the parseAllFlag shall be modified.
cwd = os.getcwd()
print('Do you want to process the features of all corresponding "_toc.txt" files in the current working directory? ' + cwd + ' (y/n)')
parseAllFlag = "y"
filePaths = []
rawFilePaths = []
relFilePaths = []
if parseAllFlag == "y":
for file in os.listdir('.'):
if file.endswith("_toc.txt"):
filePath = os.path.join(cwd , file)
filePaths.append(filePath)
if file.endswith("_raw.txt"):
filePath = os.path.join(cwd , file)
rawFilePaths.append(filePath)
if file.endswith("_rel.txt"):
filePath = os.path.join(cwd, file)
relFilePaths.append(filePath)
print("The following file paths have been found:")
print(filePaths)
print(rawFilePaths)
print(relFilePaths)
else:
continueFlag = True
while continueFlag == True:
print("Do you want to specify (a) filepath(s) from | |
p in sel_files:
idx = sel_model.model().index(str(p))
sel_model.select(idx, QItemSelectionModel.SelectionFlag.Select | QItemSelectionModel.SelectionFlag.Rows)
self.file_view = file_view
def disable_buttons(self):
# remove okay/cancel buttons from dialog, when showing in another dialog
btn_box = self.findChild(QDialogButtonBox)
if btn_box:
btn_box.hide()
# self.layout().removeWidget(btn_box) # this stopped working for some reason
def onChange(self, *args):
if not args:
return
else:
arg = args[0]
if isinstance(arg, QModelIndex):
index = arg
if not index.column() == 0:
index = index.siblingAtColumn(0)
path = str(Path(self.directory) / index.data())
else:
path = arg
pixmap = QPixmap(path)
mpPreview = self.mpPreview
if(pixmap.isNull()):
mpPreview.setText('Preview')
else:
mpPreview.setPixmap(pixmap.scaled(mpPreview.width(), mpPreview.height(),
Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.SmoothTransformation))
def accept(self):
# prevent window from being closed when part of a form
if self.standalone:
super().accept()
def onFileSelected(self, file):
self._fileSelected = file
def onFilesSelected(self, files):
self._filesSelected = files
def getFileSelected(self):
return self._fileSelected
def getFilesSelected(self):
return self._filesSelected
class Search(BaseDialog):
index_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent, window_title='Search')
# self.setFocusPolicy(Qt.FocusPolicy.StrongFocus)
self.index_changed.connect(self.select)
items = [] # list of match items
# parent should be view?
view = parent
model = view.data_model
model.highlight_rows = False # turn off row highlighting so we can see single selection
label_matches = QLabel('Matches:')
search_box = QLineEdit()
self.meta_state = False
search_box.textChanged.connect(self.text_changed)
search_box.installEventFilter(self)
self.v_layout.addWidget(search_box)
self.v_layout.addWidget(label_matches)
# cancel, prev, next
prev = QPushButton('Prev')
next_ = QPushButton('Next')
prev.clicked.connect(self.find_prev)
next_.clicked.connect(self.find_next)
prev.setToolTip('Ctrl + Left Arrow')
next_.setToolTip('Ctrl + Right Arrow | Enter')
btnbox = QDialogButtonBox(QDialogButtonBox.StandardButton.Cancel)
btnbox.addButton(prev, QDialogButtonBox.ButtonRole.ActionRole)
btnbox.addButton(next_, QDialogButtonBox.ButtonRole.ActionRole)
btnbox.rejected.connect(self.reject)
self.rejected.connect(self.close) # need to trigger close event to reset selection
self.v_layout.addWidget(btnbox, alignment=Qt.AlignmentFlag.AlignBottom | Qt.AlignmentFlag.AlignCenter)
f.set_self(vars())
def closeEvent(self, event):
self.model.highlight_rows = True
def eventFilter(self, obj, event):
if event.type() == QEvent.Type.KeyPress:
mod = event.modifiers()
key = event.key()
# print(keyevent_to_string(event))
if mod and (
(cf.is_win and mod == Qt.KeyboardModifier.ControlModifier) or
mod == Qt.KeyboardModifier.AltModifier or
mod == Qt.KeyboardModifier.MetaModifier or
mod == (Qt.KeyboardModifier.MetaModifier | Qt.KeyboardModifier.KeypadModifier) or
mod == (Qt.KeyboardModifier.AltModifier | Qt.KeyboardModifier.KeypadModifier)):
if key == Qt.Key.Key_Right:
self.find_next()
return True
elif key == Qt.Key.Key_Left:
self.find_prev()
return True
elif key == Qt.Key.Key_Enter or key == Qt.Key.Key_Return:
self.find_next()
return True
return super().eventFilter(obj, event)
def select(self, i: int):
"""Call view to select, pass tuple of name index"""
self.current_index = i
if self.items:
self.view.select_by_nameindex(self.items[i])
else:
i = -1 # no matches, select 0/0
self.label_matches.setText(f'Selected: {i + 1}/{self.num_items}')
def text_changed(self):
search_box = self.search_box
text = search_box.text()
# get list of match items from model
self.items = self.model.search(text)
self.num_items = len(self.items)
self.index_changed.emit(0)
def find_next(self):
i = self.current_index
i += 1
if i > self.num_items - 1:
i = 0
self.index_changed.emit(i)
def find_prev(self):
i = self.current_index
i -= 1
if i < 0:
i = self.num_items - 1
self.index_changed.emit(i)
class PLMReport(InputForm):
def __init__(self, unit: str = None, d_upper: dt = None, d_lower: dt = None, **kw):
super().__init__(window_title='PLM Report', **kw)
# unit, start date, end date
IPF = InputField
if d_upper is None:
d_upper = dt.now().date()
if d_lower is None:
d_lower = d_upper + relativedelta(years=-1)
# Unit
df = db.get_df_unit()
lst = f.clean_series(df[df.MineSite == self.minesite].Unit)
self.add_input(field=IPF(text='Unit', default=unit), items=lst)
# Dates
dates = {'Date Upper': d_upper, 'Date Lower': d_lower}
for k, v in dates.items():
self.add_input(
field=IPF(
text=k,
default=v,
dtype='date'))
class BaseReportDialog(InputForm):
"""Report MineSite/Month period selector dialog for monthly reports"""
def __init__(self, **kw):
super().__init__(**kw)
self.add_input(field=InputField(text='MineSite', default=self.minesite), items=cf.config['MineSite'])
df = qr.df_rolling_n_months(n=12)
months = df.period.to_list()[::-1]
self.add_input(field=InputField(text='Month', default=months[0]), items=months)
f.set_self(vars())
def accept(self):
"""Set day based on selected month"""
period = self.fMonth.val
self.d = self.df.loc[period, 'd_lower']
super().accept()
class VerticalTabs(QTabWidget):
"""Vertical tab widget"""
class TabBar(QTabBar):
"""Horizontal text for vertical tabs"""
def tabSizeHint(self, index):
s = QTabBar.tabSizeHint(self, index)
s.transpose()
# make tab label width slightly wider
return QSize(s.width() + 20, s.height())
def paintEvent(self, event):
"""
NOTE - this seems to mess up custom qdarkstyle QTabBar::tab:left,
when any custom css uses borders/margins etc
"""
painter = QStylePainter(self)
opt = QStyleOptionTab()
for i in range(self.count()):
self.initStyleOption(opt, i)
painter.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt)
painter.save()
s = opt.rect.size()
s.transpose()
r = QRect(QPoint(), s)
r.moveCenter(opt.rect.center())
opt.rect = r
c = self.tabRect(i).center()
painter.translate(c)
painter.rotate(90)
painter.translate(-c)
painter.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt)
painter.restore()
def __init__(self, parent=None):
super().__init__(parent)
self.setTabBar(self.TabBar(self))
self.setTabPosition(QTabWidget.TabPosition.West)
class Preferences(BaseDialog):
"""Global user preferences dialog"""
input_types = {
bool: ff.CheckBox,
list: ff.ComboBox,
str: ff.LineEdit}
def __init__(self, parent=None):
super().__init__(parent, window_title='Preferences')
self.resize(QSize(800, 600))
self.s = gbl.get_settings()
# apply settings changes and funcs on accept
self.queued_changes = {}
self.queued_funcs = {}
# TODO enforce values?
settings_conf = dict(
General=dict(
username=dict(default=''),
email=dict(default=''),
minesite=dict(
default='FortHills',
items=db.get_list_minesite()),
read_only=dict(
default=False,
tooltip='Prevent app from updating any values in database.')
),
Appearance=dict(
font_size=dict(
default=cf.config_platform['font size'],
items=list(range(8, 17)),
tooltip='IMPORTANT: Must restart app for font size change to take effect.'
)
),
Events=dict(
close_wo_with_event=dict(
default=False,
label='Close Work Order with Event',
tooltip='Set WO status to "Closed" when Event is closed and vice versa.'),
wo_request_folder=dict(
default='WO Request',
label='WO Request Email Folder',
tooltip='Specify the folder in your Outlook app to search for new WO request emails.'
+ ' (Case insensitive).'
)
),
TSI=dict(
open_downloads_folder=dict(
default=False,
tooltip='Open downloads folder in addition to event folder on "View Folder" command.'),
save_tsi_pdf=dict(
default=False,
label='Save TSI PDF',
tooltip='Auto save PDF of TSI to event folder after created.')
),
Advanced=dict(
dev_channel=dict(
default=False,
label='Alpha Update Channel',
tooltip='Get alpha updates (potentially unstable).'),
is_admin=dict(
default=False,
label='Owner',
tooltip='Set user to owner status to unlock specific tabs.')
),
)
# cant update mw in testing
if not parent is None:
settings_conf['General']['minesite']['funcs'] = self.mw.update_minesite_label
settings_conf['Appearance']['font_size']['funcs'] = gbl.set_font_size
tabs = VerticalTabs(self)
for tab_name, vals in settings_conf.items():
tab = QWidget()
form_layout = FormLayout(tab)
tabs.addTab(tab, tab_name)
for name, config in vals.items():
self.add_setting(key=name, config=config, layout=form_layout)
self.v_layout.addWidget(tabs)
add_okay_cancel(self, self.v_layout)
def add_setting(self, key: str, config: dict, layout: QFormLayout) -> None:
"""Add single inputbox, connect to settings key, add to tab layout
Parameters
----------
key : str
string key to access global setting
config : dict
settings to init specific input box with
layout : QFormLayout
form layout
"""
# pass in specific text if reqd
label_text = config.get('label', key.replace('_', ' ').title())
label = QLabel(f'{label_text}:')
label.setFixedWidth(150)
label.setWordWrap(True)
# get input box type from type of default arg
# set box current value
default = config.get('default', None)
items = config.get('items', None)
val = gbl.get_setting(key, default)
kw = dict(key=key, val=val)
# combobox needs to set default items
if not items is None:
_type = list
kw |= dict(items=items)
else:
_type = type(default)
InputBox = self.input_types.get(_type)
box = InputBox(**kw)
box.changed.connect(lambda x, box=box: self.queue_setting(box=box))
# use hbox to add tooltip as QLabel
h_box = QHBoxLayout()
h_box.addWidget(box)
# connect extra funcs on value changed
funcs = config.get('funcs')
if funcs:
for func in f.as_list(funcs):
box.changed.connect(lambda x: self.queue_func(key=key, func=func))
# set tooltip
tooltip = config.get('tooltip', None)
if not tooltip is None:
label.setToolTip(tooltip)
info_label = QLabel(tooltip)
info_label.setFixedWidth(300)
info_label.setAlignment(Qt.AlignmentFlag.AlignLeft)
info_label.setWordWrap(True)
h_box.addWidget(info_label)
layout.addRow(label, h_box)
def queue_setting(self, box: ff.FormFields) -> None:
"""Update saved setting when box state changed
Parameters
----------
box : ff.FormFields
box who's value to save to setting
"""
self.queued_changes[box.key] = box.val
def queue_func(self, key: str, func: Callable) -> None:
"""Queue func to call if settings accepted
Parameters
----------
key : str
settings key to keep func calls unique
func : Callable
"""
self.queued_funcs[key] = func
def accept(self):
"""Save settings and trigger funcs"""
for key, val in self.queued_changes.items():
try:
self.s.setValue(key, val)
except Exception as e:
log.error(f'Failed to set value: ({key}, {val})')
for key, func in self.queued_funcs.items():
try:
func()
except Exception as e:
log.error(f'Failed to call function: {func}')
super().accept()
def msgbox(msg='', yesno=False, statusmsg=None, **kw):
"""Show messagebox, with optional yes/no prompt\n
If app isn't running, prompt through python instead of dialog
Parameters
----------
msg : str, optional\n
yesno : bool, optional\n
statusmsg : [type], optional\n
Show more detailed smaller message
"""
if gbl.app_running():
app = check_app()
dlg = MsgBoxAdvanced(
msg=msg,
window_title=gbl.title,
yesno=yesno,
statusmsg=statusmsg,
**kw)
return dlg.exec()
elif yesno:
# if yesno and NOT frozen, prompt user through terminal
return f._input(msg)
else:
print(msg)
def msg_simple(msg: str = '', icon: str = '', infotext: str = None):
"""Show message to user with dialog if app running, else print
Parameters
----------
msg : str, optional
icon : str, optional
Show icon eg 'warning', 'critical', default None
infotext : str, optional
Detailed text to show, by default None
"""
if gbl.app_running():
dlg = QMessageBox()
dlg.setText(msg)
dlg.setWindowTitle(gbl.title)
icon = icon.lower()
if icon == 'critical':
dlg.setIcon(QMessageBox.Icon.Critical)
elif icon == 'warning':
dlg.setIcon(QMessageBox.Icon.Warning)
if infotext:
dlg.setInformativeText(infotext)
return dlg.exec()
else:
print(msg)
def show_err_msg(text: str, tb_text: | |
faster loading.
df = df.astype('float32')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if m_value:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='m_value', bit=bit, poobah=poobah)
if not batch_size:
pkl_name = 'm_values.pkl'
else:
pkl_name = f'm_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if betas or m_value:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='noob_meth', bit=bit)
if not batch_size:
pkl_name = 'noob_meth_values.pkl'
else:
pkl_name = f'noob_meth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
# TWO PARTS
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='noob_unmeth', bit=bit)
if not batch_size:
pkl_name = 'noob_unmeth_values.pkl'
else:
pkl_name = f'noob_unmeth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if (betas or m_value) and save_uncorrected:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='meth', bit=bit)
if not batch_size:
pkl_name = 'meth_values.pkl'
else:
pkl_name = f'meth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
# TWO PARTS
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='unmeth', bit=bit)
if not batch_size:
pkl_name = 'unmeth_values.pkl'
else:
pkl_name = f'unmeth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if manifest.array_type == ArrayType.ILLUMINA_MOUSE:
# save mouse specific probes
if not batch_size:
mouse_probe_filename = f'mouse_probes.pkl'
else:
mouse_probe_filename = f'mouse_probes_{batch_num}.pkl'
consolidate_mouse_probes(batch_data_containers, Path(data_dir, mouse_probe_filename))
LOGGER.info(f"saved {mouse_probe_filename}")
if export:
export_path_parents = list(set([str(Path(e).parent) for e in export_paths]))
LOGGER.info(f"[!] Exported results (csv) to: {export_path_parents}")
if export_poobah:
# this option will save a pickled dataframe of the pvalues for all samples, with sample_ids in the column headings and probe names in index.
# this sets poobah to false in kwargs, otherwise some pvalues would be NaN I think.
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='poobah_pval', bit=bit, poobah=False, poobah_sig=poobah_sig)
if not batch_size:
pkl_name = 'poobah_values.pkl'
else:
pkl_name = f'poobah_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
# v1.3.0 fixing mem probs: pickling each batch_data_containers object then reloading it later.
# consolidating data_containers this will break with really large sample sets, so skip here.
#if batch_size and batch_size >= 200:
# continue
#data_containers.extend(batch_data_containers)
pkl_name = f"_temp_data_{batch_num}.pkl"
with open(Path(data_dir,pkl_name), 'wb') as temp_data:
pickle.dump(batch_data_containers, temp_data)
temp_data_pickles.append(pkl_name)
del batch_data_containers
if meta_data_frame == True:
#sample_sheet.fields is a complete mapping of original and renamed_fields
cols = list(sample_sheet.fields.values()) + ['Sample_ID']
meta_frame = pd.DataFrame(columns=cols)
field_classattr_lookup = {
'Sentrix_ID': 'sentrix_id',
'Sentrix_Position': 'sentrix_position',
'Sample_Group': 'group',
'Sample_Name': 'name',
'Sample_Plate': 'plate',
'Pool_ID': 'pool',
'Sample_Well': 'well',
'GSM_ID': 'GSM_ID',
'Sample_Type': 'type',
'Sub_Type': 'sub_type',
'Control': 'is_control',
}
# row contains the renamed fields, and pulls in the original data from sample_sheet
for sample in samples:
row = {}
for field in sample_sheet.fields.keys():
if sample_sheet.fields[field] in field_classattr_lookup:
row[ sample_sheet.fields[field] ] = getattr(sample, field_classattr_lookup[sample_sheet.fields[field]] )
elif field in sample_sheet.renamed_fields:
row[ sample_sheet.fields[field] ] = getattr(sample, sample_sheet.renamed_fields[field])
else:
LOGGER.info(f"extra column: {field} ignored")
# row[ sample_sheet.fields[field] ] = getattr(sample, field)
# add the UID that matches m_value/beta value pickles
#... unless there's a GSM_ID too
# appears that methylprep m_value and beta files only include ID_Position as column names.
#if row.get('GSM_ID') != None:
# row['Sample_ID'] = f"{row['GSM_ID']}_{row['Sentrix_ID']}_{row['Sentrix_Position']}"
#else:
row['Sample_ID'] = f"{row['Sentrix_ID']}_{row['Sentrix_Position']}"
meta_frame = meta_frame.append(row, ignore_index=True)
meta_frame_filename = f'sample_sheet_meta_data.pkl'
meta_frame.to_pickle(Path(data_dir,meta_frame_filename))
LOGGER.info(f"Exported meta data to {meta_frame_filename}")
# FIXED in v1.3.0
# moved consolidate_control_snp() from this spot to earlier in pipeline, because it uses
# raw_dataset and this gets removed before pickling _temp files. Here I pickle.dump the SNPS.
if save_control:
control_filename = f'control_probes.pkl'
with open(Path(data_dir, control_filename), 'wb') as control_file:
pickle.dump(control_snps, control_file)
LOGGER.info(f"saved {control_filename}")
# batch processing done; consolidate and return data. This uses much more memory, but not called if in batch mode.
if batch_size and batch_size >= 50:
print("Because the batch size was >=50 samples, files are saved but no data objects are returned.")
del batch_data_containers
for temp_data in temp_data_pickles:
temp_file = Path(data_dir, temp_data)
temp_file.unlink(missing_ok=True) # delete it
return
# consolidate batches and delete parts, if possible
for file_type in ['beta_values', 'm_values', 'meth_values', 'unmeth_values',
'noob_meth_values', 'noob_unmeth_values', 'mouse_probes', 'poobah_values']:
test_parts = list([str(temp_file) for temp_file in Path(data_dir).rglob(f'{file_type}*.pkl')])
num_batches = len(test_parts)
# ensures that only the file_types that appear to be selected get merged.
#print(f"DEBUG num_batches {num_batches}, batch_size {batch_size}, file_type {file_type}")
if batch_size and num_batches >= 1: #--- if the batch size was larger than the number of total samples, this will still drop the _1
merge_batches(num_batches, data_dir, file_type)
# reload all the big stuff -- after everything important is done.
# attempts to consolidate all the batch_files below, if they'll fit in memory.
data_containers = []
for temp_data in temp_data_pickles:
temp_file = Path(data_dir, temp_data)
if temp_file.exists(): #possibly user deletes file while processing, since these are big
with open(temp_file,'rb') as _file:
batch_data_containers = pickle.load(_file)
data_containers.extend(batch_data_containers)
del batch_data_containers
temp_file.unlink() # delete it after loading.
if betas:
return consolidate_values_for_sheet(data_containers, postprocess_func_colname='beta_value')
elif m_value:
return consolidate_values_for_sheet(data_containers, postprocess_func_colname='m_value')
else:
return data_containers
class SampleDataContainer():
"""Wrapper that provides easy access to slices of data for a Sample,
its RawDataset, and the pre-configured MethylationDataset subsets of probes.
Arguments:
raw_dataset {RawDataset} -- A sample's RawDataset for a single well on the processed array.
manifest {Manifest} -- The Manifest for the correlated RawDataset's array type.
bit (default: float64) -- option to store data as float16 or float32 to save space.
pval (default: False) -- whether to apply p-value-detection algorithm to remove
unreliable probes (based on signal/noise ratio of fluoresence)
uses the sesame method (pOOBah) based on out of band background levels
Jan 2020: added .snp_(un)methylated property. used in postprocess.consolidate_crontrol_snp()
Mar 2020: added p-value detection option
Mar 2020: added mouse probe post-processing separation
"""
__data_frame = None
def __init__(self, raw_dataset, manifest, retain_uncorrected_probe_intensities=False,
bit='float32', pval=False, poobah_decimals=3):
self.manifest = manifest
self.pval = pval
self.poobah_decimals = poobah_decimals
self.raw_dataset = raw_dataset
self.sample = raw_dataset.sample
self.retain_uncorrected_probe_intensities=retain_uncorrected_probe_intensities
self.methylated = MethylationDataset.methylated(raw_dataset, manifest)
self.unmethylated = MethylationDataset.unmethylated(raw_dataset, manifest)
self.snp_methylated = MethylationDataset.snp_methylated(raw_dataset, manifest)
self.snp_unmethylated = MethylationDataset.snp_unmethylated(raw_dataset, manifest)
# mouse probes are processed within the normals meth/unmeth sets, then split at end of preprocessing step.
#self.mouse_methylated = MethylationDataset.mouse_methylated(raw_dataset, manifest)
#self.mouse_unmethylated = MethylationDataset.mouse_unmethylated(raw_dataset, manifest)
self.oob_controls = raw_dataset.get_oob_controls(manifest)
self.data_type = bit #(float64, float32, or float16)
if self.data_type == None:
self.data_type = 'float32'
if self.data_type not in ('float64','float32','float16'):
raise ValueError(f"invalid data_type: {self.data_type} should be one of ('float64','float32','float16')")
@property
def fg_green(self):
return self.raw_dataset.get_fg_values(self.manifest, Channel.GREEN)
@property
def fg_red(self):
return self.raw_dataset.get_fg_values(self.manifest, Channel.RED)
@property
def ctrl_green(self):
return self.raw_dataset.get_fg_controls(self.manifest, Channel.GREEN)
@property
def ctrl_red(self):
return self.raw_dataset.get_fg_controls(self.manifest, Channel.RED)
@property
def oob_green(self):
return self.oob_controls[Channel.GREEN]
@property
def oob_red(self):
return self.oob_controls[Channel.RED]
def preprocess(self):
""" combines the methylated and unmethylated columns from the SampleDataContainer. """
if not self.__data_frame:
if self.retain_uncorrected_probe_intensities == True:
uncorrected_meth = self.methylated.data_frame.copy()['mean_value'].astype('float32')
uncorrected_unmeth = self.unmethylated.data_frame.copy()['mean_value'].astype('float32')
# could be int16, if missing values didn't happen (cuts file size in half)
if uncorrected_meth.isna().sum() == 0 and uncorrected_unmeth.isna().sum() == 0:
uncorrected_meth = uncorrected_meth.astype('int16')
uncorrected_unmeth = uncorrected_unmeth.astype('int16')
if self.pval == True:
pval_probes_df = _pval_sesame_preprocess(self)
# output: df with one column named 'poobah_pval'
preprocess_noob(self) # apply corrections: bg subtract, then noob (in preprocess.py)
methylated = self.methylated.data_frame[['noob']]
unmethylated = self.unmethylated.data_frame[['noob']]
self.__data_frame = methylated.join(
unmethylated,
lsuffix='_meth',
rsuffix='_unmeth',
)
if self.pval == True:
self.__data_frame = self.__data_frame.merge(pval_probes_df, how='inner', left_index=True, right_index=True)
if self.retain_uncorrected_probe_intensities == True:
self.__data_frame['meth'] = uncorrected_meth
self.__data_frame['unmeth'] = uncorrected_unmeth
# reduce to float32 during processing. final output may be 16,32,64 in _postprocess() + export()
self.__data_frame = self.__data_frame.astype('float32')
if self.poobah_decimals != 3 and 'poobah_pval' in self.__data_frame.columns:
other_columns = list(self.__data_frame.columns)
other_columns.remove('poobah_pval')
other_columns = {column:3 for column in other_columns}
self.__data_frame = self.__data_frame.round(other_columns)
self.__data_frame = self.__data_frame.round({'poobah_pval': self.poobah_decimals})
else:
self.__data_frame = self.__data_frame.round(3)
# here, separate the mouse from normal probes and store mouse separately.
# normal_probes_mask = (self.manifest.data_frame.index.str.startswith('cg', na=False)) | (self.manifest.data_frame.index.str.startswith('ch', na=False))
mouse_probes_mask = (self.manifest.data_frame.index.str.startswith('mu', na=False)) | (self.manifest.data_frame.index.str.startswith('rp', na=False))
mouse_probes = self.manifest.data_frame[mouse_probes_mask]
mouse_probe_count = mouse_probes.shape[0]
self.mouse_data_frame = self.__data_frame[self.__data_frame.index.isin(mouse_probes.index)]
if mouse_probe_count > 0:
LOGGER.debug(f"{mouse_probe_count} mouse probes ->> {self.mouse_data_frame.shape[0]} in idat")
# now remove | |
<reponame>MorganeAudrain/Calcium_new<gh_stars>0
import os
import logging
import numpy as np
import caiman as cm
import datetime
import pickle
import mysql.connector
import getpass
database = mysql.connector.connect(
host="172.16.31.10",
user="morgane",
passwd=<PASSWORD>pass.getpass(),
database="Calcium_imaging",
use_pure=True
)
mycursor = database.cursor()
def get_corr_pnr(input_mmap_file_path, gSig=None):
"""
This function gets an analysis state and a gSig absolute value
and creates correlation and pnr images for it.
"""
# Define data directory
data_dir = 'data/interim/source_extraction/trial_wise/'
if type(gSig) == type(None):
sql = "SELECT gSig FROM Analysis WHERE motion_correctioni_main=%s "
val = [input_mmap_file_path, ]
mycursor.execute(sql, val)
myresult = mycursor.fetchall()
for x in myresult:
gSig = x
# Compute summary images
t0 = datetime.datetime.today()
logging.info('Computing summary images')
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
logging.debug(f' Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning('.mmap file does not exist. Cancelling')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=gSig, swap_dim=False)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.info(f' Computed summary images. dt = {dt} min')
# Saving summary images as npy files
sql="SELECT mouse,session,trial,is_rest,decoding_v,cropping_v FROM Analysis WHERE motion_correction_main=%s "
val=[input_mmap_file_path,]
mycursor.execute(sql,val)
myresult = mycursor.fetchall()
data=[]
for x in myresult:
data += x
file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}"
output_tif_file_path = f"data/interim/cropping/main/{file_name}.tif"
corr_npy_file_path = data_dir + f'meta/corr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'meta/pnr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
# Define the source extraction output already
output = {'meta': {}}
# Store the paths in the meta dictionary
output['meta']['corr'] = {'main': corr_npy_file_path, 'meta': {}}
output['meta']['pnr'] = {'main': pnr_npy_file_path, 'meta': {}}
# Get the min, mean, max
output['meta']['corr']['min'] = round(cn_filter.min(), 3)
output['meta']['corr']['mean'] = round(cn_filter.mean(), 3)
output['meta']['corr']['max'] = round(cn_filter.max(), 3)
output['meta']['pnr']['min'] = round(pnr.min(), 2)
output['meta']['pnr']['mean'] = round(pnr.mean(), 2)
output['meta']['pnr']['max'] = round(pnr.max(), 2)
# Store the output in the row
row.loc['source_extraction_output'] = str(output)
return index, row
def get_corr_pnr_path(index, gSig_abs=None):
fname = db.create_file_name(2, index)
os.chdir(os.environ['PROJECT_DIR'])
corr_dir = 'data/interim/source_extraction/trial_wise/meta/corr'
corr_path = None
for path in os.listdir(corr_dir):
if fname in path:
if gSig_abs == None:
corr_path = os.path.join(corr_dir, path)
else:
if path[-5] == str(gSig_abs):
corr_path = os.path.join(corr_dir, path)
pnr_dir = 'data/interim/source_extraction/trial_wise/meta/pnr'
pnr_path = None
for path in os.listdir(pnr_dir):
if fname in path:
if gSig_abs == None:
pnr_path = os.path.join(pnr_dir, path)
else:
if path[-5] == str(gSig_abs):
pnr_path = os.path.join(pnr_dir, path)
return corr_path, pnr_path
def get_quality_metrics_motion_correction(row, crispness=False, local_correlations=False, correlations=False,
optical_flow=False):
'''
This is a wrapper function to compute (a selection of) the metrics provided
by CaImAn for motion correction.
'''
# Get the parameters, motion correction output and cropping output of this row
index = row.name
row_local = row.copy()
parameters = eval(row_local.loc['motion_correction_parameters'])
output = eval(row_local.loc['motion_correction_output'])
cropping_output = eval(row_local.loc['cropping_output'])
# Get the metrics file path
metrics_pkl_file_path = output['meta']['metrics']['other']
# Load the already available metrics
with open(metrics_pkl_file_path, 'rb') as f:
try:
meta_dict = pickle.load(f)
except:
meta_dict = {}
# ORIGINAL MOVIE
logging.info(f'{index} Computing metrics for original movie')
t0 = datetime.datetime.today()
fname_orig = cropping_output['main']
tmpl_orig, crispness_orig, crispness_corr_orig, correlations_orig, img_corr_orig, flows_orig, norms_orig = get_metrics_auxillary(
fname_orig, swap_dim=False, winsize=100, play_flow=False,
resize_fact_flow=.2, one_photon=True, crispness=crispness,
correlations=correlations, local_correlations=local_correlations,
optical_flow=optical_flow)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
output['meta']['metrics']['original'] = {
'crispness': crispness_orig,
'crispness_corr': crispness_corr_orig
}
meta_dict['original'] = db.remove_None_from_dict({
'correlations': correlations_orig,
'local_correlations': img_corr_orig,
'flows': flows_orig,
'norms': norms_orig})
output['meta']['duration']['metrics_orig'] = dt
logging.info(f'{index} Computed metrics for original movie. dt = {dt} min')
# RIGID MOVIE
if not parameters['pw_rigid'] or (parameters['pw_rigid'] and 'alternate' in output):
logging.info(f'{index} Computing metrics for rigid movie')
t0 = datetime.datetime.today()
fname_rig = output['main'] if not parameters['pw_rigid'] else output['alternate']
tmpl_rig, crispness_rig, crispness_corr_rig, correlations_rig, img_corr_rig, flows_rig, norms_rig = get_metrics_auxillary(
fname_rig, swap_dim=False, winsize=100, play_flow=False,
resize_fact_flow=.2, one_photon=True, crispness=crispness,
correlations=correlations, local_correlations=local_correlations,
optical_flow=optical_flow)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
output['meta']['metrics']['rigid'] = {
'crispness': crispness_rig,
'crispness_corr': crispness_corr_rig
}
meta_dict['rigid'] = db.remove_None_from_dict({
'correlations': correlations_rig,
'local_correlations': img_corr_rig,
'flows': flows_rig,
'norms': norms_rig})
output['meta']['duration']['metrics_rig'] = dt
logging.info(f'{index} Computed metrics for rigid movie. dt = {dt} min')
if parameters['pw_rigid']:
logging.info(f'{index} Computing metrics for pw-rigid movie')
t0 = datetime.datetime.today()
fname_els = output['main']
tmpl_els, crispness_els, crispness_corr_els, correlations_els, img_corr_els, flows_els, norms_els = get_metrics_auxillary(
fname_els, swap_dim=False, winsize=100, play_flow=False,
resize_fact_flow=.2, one_photon=True, crispness=crispness,
correlations=correlations, local_correlations=local_correlations,
optical_flow=optical_flow)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
output['meta']['metrics']['pw_rigid'] = {
'crispness': crispness_els,
'crispness_corr': crispness_corr_els
}
meta_dict['pw_rigid'] = db.remove_None_from_dict({
'correlations': correlations_els,
'local_correlations': img_corr_els,
'flows': flows_els,
'norms': norms_els})
output['meta']['duration']['metrics_els'] = dt
logging.info(f'{index} Computed metrics for pw-rigid movie. dt = {dt} min')
# Save the metrics in a pkl file
logging.info(f'{index} Saving metrics')
with open(metrics_pkl_file_path, 'wb') as f:
pickle.dump(meta_dict, f)
logging.info(f'{index} Saved metrics')
# Store the new output and return it
row_local.loc['motion_correction_output'] = str(output)
return row_local
def get_metrics_auxillary(fname, swap_dim, pyr_scale=.5, levels=3,
winsize=100, iterations=15, poly_n=5, poly_sigma=1.2 / 5, flags=0,
play_flow=False, resize_fact_flow=.2, template=None, save_npz=False,
one_photon=True, crispness=True, correlations=True, local_correlations=True,
optical_flow=True):
'''
This function is actually copied from the CaImAn packages and edited for use in this calcium
imaging analysis pipeline. It contained some abnormalities that we wanted to avoid.
'''
import scipy
import cv2
# Logic
if crispness: local_correlations = True
# Load the movie
m = cm.load(fname)
vmin, vmax = -1, 1
# max_shft_x = np.int(np.ceil((np.shape(m)[1] - final_size_x) / 2))
# max_shft_y = np.int(np.ceil((np.shape(m)[2] - final_size_y) / 2))
# max_shft_x_1 = - ((np.shape(m)[1] - max_shft_x) - (final_size_x))
# max_shft_y_1 = - ((np.shape(m)[2] - max_shft_y) - (final_size_y))
# if max_shft_x_1 == 0:
# max_shft_x_1 = None
#
# if max_shft_y_1 == 0:
# max_shft_y_1 = None
# logging.info([max_shft_x, max_shft_x_1, max_shft_y, max_shft_y_1])
# m = m[:, max_shft_x:max_shft_x_1, max_shft_y:max_shft_y_1]
# Check the movie for NaN's which may cause problems
if np.sum(np.isnan(m)) > 0:
logging.info(m.shape)
logging.warning('Movie contains NaN')
raise Exception('Movie contains NaN')
if template is None:
tmpl = cm.motion_correction.bin_median(m)
else:
tmpl = template
if correlations:
logging.debug('Computing correlations')
t0 = datetime.datetime.today()
correlations = []
count = 0
if one_photon:
m_compute = m - np.min(m)
for fr in m_compute:
if count % 100 == 0:
logging.debug(f'Frame {count}')
count += 1
correlations.append(scipy.stats.pearsonr(
fr.flatten(), tmpl.flatten())[0])
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(f'Computed correlations. dt = {dt} min')
else:
correlations = None
if local_correlations:
logging.debug('Computing local correlations')
t0 = datetime.datetime.today()
img_corr = m.local_correlations(eight_neighbours=True, swap_dim=swap_dim)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(f'Computed local correlations. dt = {dt} min')
else:
img_corr = None
if crispness:
logging.debug('Computing crispness')
t0 = datetime.datetime.today()
smoothness = np.sqrt(
np.sum(np.sum(np.array(np.gradient(np.mean(m, 0))) ** 2, 0)))
smoothness_corr = np.sqrt(
np.sum(np.sum(np.array(np.gradient(img_corr)) ** 2, 0)))
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(
f'Computed crispness. dt = {dt} min. Crispness = {smoothness}, crispness corr = {smoothness_corr}.')
else:
smoothness = None
if optical_flow:
logging.debug('Computing optical flow')
t0 = datetime.datetime.today()
m = m.resize(1, 1, resize_fact_flow)
norms = []
flows = []
count = 0
for fr in m:
if count % 100 == 0:
logging.debug(count)
count += 1
flow = cv2.calcOpticalFlowFarneback(
tmpl, fr, None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
if play_flow:
pl.subplot(1, 3, 1)
pl.cla()
pl.imshow(fr, vmin=0, vmax=300, cmap='gray')
pl.title('movie')
pl.subplot(1, 3, 3)
pl.cla()
pl.imshow(flow[:, :, 1], vmin=vmin, vmax=vmax)
pl.title('y_flow')
pl.subplot(1, 3, 2)
pl.cla()
pl.imshow(flow[:, :, 0], vmin=vmin, vmax=vmax)
pl.title('x_flow')
pl.pause(.05)
n = np.linalg.norm(flow)
flows.append(flow)
norms.append(n)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(f'Computed optical flow. dt = {dt} min')
else:
flows = norms = None
if save_npz:
logging.debug('Saving metrics in .npz format')
np.savez(fname[:-4] + '_metrics', flows=flows, norms=norms, correlations=correlations, smoothness=smoothness,
tmpl=tmpl, smoothness_corr=smoothness_corr, img_corr=img_corr)
logging.debug('Saved metrics in .npz format')
return tmpl, smoothness, smoothness_corr, correlations, img_corr, flows, norms
def make_figures(index, row, force=False):
# Create file name
file_name = db.create_file_name(step_index, index)
# Load meta_pkl file
output = eval(row.loc['motion_correction_output'])
metrics_pkl_file_path = output['meta']['metrics']
with open(metrics_pkl_file_path, 'rb') as f:
x = pickle.load(f)
# Possible figures
figure_names = np.array(['rig_template', 'rig_shifts', 'els_template', 'correlations', 'corelations_orig_vs_rig',
'correlations_rig_vs_els', 'correlations_orig_vs_els', 'orig_local_correlations',
'rig_local_correlations',
'els_local_correlations'])
def figure_flag(i):
# This function determines which figures can be made. If they cannot be made, either
# an analysis step has not been performed or metrics have to be computed
if i == 0:
return 'rigid' in x
elif i == 1:
return 'rigid' in x
elif i == 2:
return 'non-rigid' in x
elif i == 3:
return ['original' in x and 'correlations' in x['original'], 'rigid' in x and 'correlations' in | |
import numpy as np
import pickle
import theano
import time
import constants as c
import scipy.constants as sc
import scipy.interpolate as spi
import eigen
import starry
import progressbar
import theano
import theano.tensor as tt
import mc3.stats as ms
from numba import njit
def initsystem(fit, ydeg):
'''
Uses a fit object to build the respective starry objects. Useful
because starry objects cannot be pickled. Returns a tuple of
(star, planet, system).
'''
cfg = fit.cfg
star = starry.Primary(starry.Map(ydeg=1, amp=1),
m =cfg.star.m,
r =cfg.star.r,
prot=cfg.star.prot)
planet = starry.kepler.Secondary(starry.Map(ydeg=ydeg),
m =cfg.planet.m,
r =cfg.planet.r,
porb =cfg.planet.porb,
prot =cfg.planet.prot,
Omega=cfg.planet.Omega,
ecc =cfg.planet.ecc,
w =cfg.planet.w,
t0 =cfg.planet.t0,
inc =cfg.planet.inc,
theta0=180)
system = starry.System(star, planet)
return star, planet, system
def specint(wn, spec, filtwn_list, filttrans_list):
"""
Integrate a spectrum over the given filters.
Arguments
---------
wn: 1D array
Wavenumbers (/cm) of the spectrum
spec: 1D array
Spectrum to be integrated
filtwn_list: list
List of arrays of filter wavenumbers, in /cm.
filttrans_list: list
List of arrays of filter transmission. Same length as filtwn_list.
Returns
-------
intspec: 1D array
The spectrum integrated over each filter.
"""
if len(filtwn_list) != len(filttrans_list):
print("ERROR: list sizes do not match.")
raise Exception
intspec = np.zeros(len(filtwn_list))
for i, (filtwn, filttrans) in enumerate(zip(filtwn_list, filttrans_list)):
# Sort ascending
idx = np.argsort(filtwn)
intfunc = spi.interp1d(filtwn[idx], filttrans[idx],
bounds_error=False, fill_value=0)
# Interpolate transmission
inttrans = intfunc(wn)
# Normalize to one
norminttrans = inttrans / np.trapz(inttrans, wn)
# Integrate filtered spectrum
intspec[i] = np.trapz(spec * norminttrans, wn)
return intspec
def vislon(planet, fit):
"""
Determines the range of visible longitudes based on times of
observation.
Arguments
---------
planet: starry Planet object
Planet object
fit: Fit object
Fit object. Must contain observation information.
Returns
-------
minlon: float
Minimum visible longitude, in degrees
maxlon: float
Maximum visible longitude, in degrees
"""
t = fit.t
porb = planet.porb # days / orbit
prot = planet.prot # days / rotation
t0 = planet.t0 # days
theta0 = planet.theta0 # degrees
# Central longitude at each time ("sub-observer" point)
centlon = theta0 - (t - t0) / prot * 360
# Minimum and maximum longitudes (assuming +/- 90 degree
# visibility)
limb1 = centlon - 90
limb2 = centlon + 90
# Rescale to [-180, 180]
limb1 = (limb1 + 180) % 360 - 180
limb2 = (limb2 + 180) % 360 - 180
return np.min(limb1.eval()), np.max(limb2.eval())
def readfilters(filterfiles):
"""
Reads filter files and determines the mean wavelength.
Arguments
---------
filterfiles: list
list of paths to filter files
Returns
-------
filtmid: 1D array
Array of mean wavelengths
"""
filtwl_list = []
filtwn_list = []
filttrans_list = []
wnmid = np.zeros(len(filterfiles))
for i, filterfile in enumerate(filterfiles):
filtwl, trans = np.loadtxt(filterfile, unpack=True)
filtwn = 1.0 / (filtwl * c.um2cm)
wnmid[i] = np.sum(filtwn * trans) / np.sum(trans)
filtwl_list.append(filtwl)
filtwn_list.append(filtwn)
filttrans_list.append(trans)
wlmid = 1 / (c.um2cm * wnmid)
return filtwl_list, filtwn_list, filttrans_list, wnmid, wlmid
def visibility(t, latgrid, longrid, dlatgrid, dlongrid, theta0, prot,
t0, rp, rs, x, y):
"""
Calculate the visibility of a grid of cells on a planet at a specific
time. Returns a combined visibility based on the observer's
line-of-sight, the area of the cells, and the effect of the star.
Arguments
---------
t: float
Time to calculate visibility.
latgrid: 2D array
Array of latitudes, in radians, from -pi/2 to pi/2.
longrid: 2D array
Array of longitudes, in radians, from -pi to pi.
dlat: float
Latitude resolution in radians.
dlon: float
Longitude resoltuion in radians.
theta0: float
Rotation at t0 in radians.
prot: float
Rotation period, the same units as t.
t0: float
Time of transit, same units as t.
rp: float
Planet radius in solar radii.
rs: float
Star radius in solar radii.
x: tuple
x position of (star, planet)
y: tuple
y position of (star, planet)
Returns
-------
vis: 2D array
Visibility of each grid cell. Same shape as latgrid and longrid.
"""
if latgrid.shape != longrid.shape:
print("Number of latitudes and longitudes do not match.")
raise Exception
losvis = np.zeros(latgrid.shape)
starvis = np.zeros(latgrid.shape)
# Flag to do star visibility calculation (improves efficiency)
dostar = True
# Central longitude (observer line-of-sight)
centlon = theta0 - (t - t0) / prot * 2 * np.pi
# Convert relative to substellar point
centlon = (centlon + np.pi) % (2 * np.pi) - np.pi
xsep = x[0] - x[1]
ysep = y[0] - y[1]
d = np.sqrt(xsep**2 + ysep**2)
# Visible fraction due to star
# No grid cells visible. Return 0s
if (d < rs - rp):
return np.zeros(latgrid.shape)
# All grid cells visible. No need to do star calculation.
elif (d > rs + rp):
starvis[:,:] = 1.0
dostar = False
# Otherwise, time is during ingress/egress and we cannot simplify
# calculation
nlat, nlon = latgrid.shape
for i in range(nlat):
for j in range(nlon):
# Angles wrt the observer
lat = latgrid[i,j]
lon = longrid[i,j]
dlat = dlatgrid[i,j]
dlon = dlongrid[i,j]
phi = lon - centlon
theta = lat
phimin = phi - dlon / 2.
phimax = phi + dlon / 2.
thetamin = lat - dlat / 2.
thetamax = lat + dlat / 2.
# Cell is not visible at this time. No need to calculate further.
if (phimin > np.pi / 2.) or (phimax < -np.pi / 2.):
losvis[i,j] = 0
# Cell is visible at this time
else:
# Determine visible phi/theta range of the cell
phirng = np.array((np.max((phimin, -np.pi / 2.)),
np.min((phimax, np.pi / 2.))))
thetarng = np.array((np.max((thetamin, -np.pi / 2.)),
np.min((thetamax, np.pi / 2.))))
# Visibility based on LoS
# This is the integral of
#
# A(theta, phi) V(theta, phi) dtheta dphi
#
# where
#
# A = r**2 cos(theta)
# V = cos(theta) cos(phi)
#
# Here we've normalized by pi*r**2, since
# visibility will be applied to Fp/Fs where planet
# size is already taken into account.
losvis[i,j] = (np.diff(thetarng/2) + \
np.diff(np.sin(2*thetarng) / 4)) * \
np.diff(np.sin(phirng)) / \
np.pi
# Grid cell maybe only partially visible
if dostar:
thetamean = np.mean(thetarng)
phimean = np.mean(phirng)
# Grid is "within" the star
if dgrid(x, y, rp, thetamean, phimean) < rs:
starvis[i,j] = 0.0
# Grid is not in the star
else:
starvis[i,j] = 1.0
return starvis * losvis
def dgrid(x, y, rp, theta, phi):
"""
Calculates the projected distance between a latitude (theta) and a
longitude (phi) on a planet with radius rp to a star. Projected
star position is (x[0], y[0]) and planet position is (x[1], y[1]).
"""
xgrid = x[1] + rp * np.cos(theta) * np.sin(phi)
ygrid = y[1] + rp * np.sin(theta)
d = np.sqrt((xgrid - x[0])**2 + (ygrid - y[0])**2)
return d
def t_dgrid():
"""
Returns a theano function of dgrid(), with the same arguments.
"""
print('Defining theano function.')
arg1 = theano.tensor.dvector('x')
arg2 = theano.tensor.dvector('y')
arg3 = theano.tensor.dscalar('rp')
arg4 = theano.tensor.dscalar('theta')
arg5 = theano.tensor.dscalar('phi')
f = theano.function([arg1, arg2, arg3, arg4, arg5],
dgrid(arg1, arg2, arg3, arg4, arg5))
return f
def mapintensity(map, lat, lon, amp):
"""
Calculates a grid of intensities, multiplied by the amplitude given.
"""
grid = map.intensity(lat=lat.flatten(), lon=lon.flatten()).eval()
grid *= amp
grid = grid.reshape(lat.shape)
return grid
def hotspotloc_driver(fit, map):
"""
Calculates a distribution of hotspot locations based on the MCMC
posterior distribution.
Note that this function assumes the first ncurves parameters
in the posterior are associated with eigencurves. This will not
be true if some eigencurves are skipped over, as MC3 does not
include fixed parameters in the posterior.
Inputs
------
fit: Fit instance
map: Map instance (not starry Map)
Returns
-------
hslocbest: tuple
Best-fit hotspot location (lat, lon), in degrees.
hslocstd: tuple
Standard deviation of the hotspot location posterior distribution
as (lat, lon)
hspot: tuple
Marginalized posterior distributions of latitude and longitude
"""
post = map.post[map.zmask]
nsamp, nfree = post.shape
ntries = 5
oversample = 1
if fit.cfg.twod.ncalc > nsamp:
print("Warning: ncalc reduced to match burned-in sample.")
ncalc = nsamp
else:
| |
<filename>rpython/jit/backend/llsupport/test/zrpy_gc_test.py
"""
This is a test that translates a complete JIT together with a GC and runs it.
It is testing that the GC-dependent aspects basically work, mostly the mallocs
and the various cases of write barrier.
"""
import weakref
import os, py
from rpython.rlib import rgc
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib.jit import JitDriver, dont_look_inside
from rpython.rlib.jit import elidable, unroll_safe
from rpython.rlib.jit import promote
from rpython.jit.backend.llsupport.gc import GcLLDescr_framework
from rpython.tool.udir import udir
from rpython.config.translationoption import DEFL_GC
from rpython.config.config import ConfigError
class X(object):
def __init__(self, x=0):
self.x = x
next = None
class Y(object):
# for pinning tests we need an object without references to other
# objects
def __init__(self, x=0):
self.x = x
class CheckError(Exception):
pass
def check(flag):
if not flag:
raise CheckError
def get_entry(g):
def entrypoint(args):
name = ''
n = 2000
argc = len(args)
if argc > 1:
name = args[1]
if argc > 2:
n = int(args[2])
r_list = []
for i in range(20):
r = g(name, n)
r_list.append(r)
rgc.collect()
rgc.collect(); rgc.collect()
freed = 0
for r in r_list:
if r() is None:
freed += 1
print freed
return 0
return entrypoint
def get_functions_to_patch():
from rpython.jit.backend.llsupport import gc
#
can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc
def can_use_nursery_malloc2(*args):
try:
if os.environ['PYPY_NO_INLINE_MALLOC']:
return False
except KeyError:
pass
return can_use_nursery_malloc1(*args)
#
return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'):
can_use_nursery_malloc2}
def compile(f, gc, **kwds):
from rpython.annotator.listdef import s_list_of_strings
from rpython.translator.translator import TranslationContext
from rpython.jit.metainterp.warmspot import apply_jit
from rpython.translator.c import genc
#
t = TranslationContext()
t.config.translation.gc = gc
if gc != 'boehm':
t.config.translation.gcremovetypeptr = True
for name, value in kwds.items():
setattr(t.config.translation, name, value)
ann = t.buildannotator()
ann.build_types(f, [s_list_of_strings], main_entry_point=True)
t.buildrtyper().specialize()
if kwds['jit']:
patch = get_functions_to_patch()
old_value = {}
try:
for (obj, attr), value in patch.items():
old_value[obj, attr] = getattr(obj, attr)
setattr(obj, attr, value)
#
apply_jit(t)
#
finally:
for (obj, attr), oldvalue in old_value.items():
setattr(obj, attr, oldvalue)
cbuilder = genc.CStandaloneBuilder(t, f, t.config)
cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES)
cbuilder.compile()
return cbuilder
def run(cbuilder, args=''):
#
pypylog = udir.join('test_zrpy_gc.log')
env = os.environ.copy()
env['PYPYLOG'] = ':%s' % pypylog
data = cbuilder.cmdexec(args, env=env)
return data.strip()
# ______________________________________________________________________
class BaseFrameworkTests(object):
gc = DEFL_GC
def setup_class(cls):
funcs = []
name_to_func = {}
for fullname in dir(cls):
if not fullname.startswith('define'):
continue
definefunc = getattr(cls, fullname)
_, name = fullname.split('_', 1)
beforefunc, loopfunc, afterfunc = definefunc.im_func(cls)
if beforefunc is None:
def beforefunc(n, x):
return n, x, None, None, None, None, None, None, None, None, None, ''
if afterfunc is None:
def afterfunc(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
pass
beforefunc.__name__ = 'before_'+name
loopfunc.__name__ = 'loop_'+name
afterfunc.__name__ = 'after_'+name
funcs.append((beforefunc, loopfunc, afterfunc))
assert name not in name_to_func
name_to_func[name] = len(name_to_func)
print name_to_func
def allfuncs(name, n):
x = X()
x.foo = 2
main_allfuncs(name, n, x)
x.foo = 5
return weakref.ref(x)
def main_allfuncs(name, n, x):
num = name_to_func[name]
n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s = funcs[num][0](n, x)
while n > 0:
myjitdriver.can_enter_jit(num=num, n=n, x=x, x0=x0, x1=x1,
x2=x2, x3=x3, x4=x4, x5=x5, x6=x6, x7=x7, l=l, s=s)
myjitdriver.jit_merge_point(num=num, n=n, x=x, x0=x0, x1=x1,
x2=x2, x3=x3, x4=x4, x5=x5, x6=x6, x7=x7, l=l, s=s)
n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s = funcs[num][1](
n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s)
funcs[num][2](n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s)
myjitdriver = JitDriver(greens = ['num'],
reds = ['n', 'x', 'x0', 'x1', 'x2', 'x3', 'x4',
'x5', 'x6', 'x7', 'l', 's'], is_recursive=True)
cls.main_allfuncs = staticmethod(main_allfuncs)
cls.name_to_func = name_to_func
OLD_DEBUG = GcLLDescr_framework.DEBUG
try:
GcLLDescr_framework.DEBUG = True
cls.cbuilder = compile(get_entry(allfuncs), cls.gc,
gcrootfinder=cls.gcrootfinder, jit=True,
thread=True)
finally:
GcLLDescr_framework.DEBUG = OLD_DEBUG
def _run(self, name, n, env):
res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env)
assert int(res) == 20
def run(self, name, n=2000):
pypylog = udir.join('TestCompileFramework.log')
env = os.environ.copy()
env['PYPYLOG'] = ':%s' % pypylog
env['PYPY_NO_INLINE_MALLOC'] = '1'
self._run(name, n, env)
env['PYPY_NO_INLINE_MALLOC'] = ''
self._run(name, n, env)
def run_orig(self, name, n, x):
self.main_allfuncs(name, n, x)
class CompileFrameworkTests(BaseFrameworkTests):
# Test suite using (so far) the minimark GC.
## def define_libffi_workaround(cls):
## # XXX: this is a workaround for a bug in database.py. It seems that
## # the problem is triggered by optimizeopt/fficall.py, and in
## # particular by the ``cast_base_ptr_to_instance(Func, llfunc)``: in
## # these tests, that line is the only place where libffi.Func is
## # referenced.
## #
## # The problem occurs because the gctransformer tries to annotate a
## # low-level helper to call the __del__ of libffi.Func when it's too
## # late.
## #
## # This workaround works by forcing the annotator (and all the rest of
## # the toolchain) to see libffi.Func in a "proper" context, not just as
## # the target of cast_base_ptr_to_instance. Note that the function
## # below is *never* called by any actual test, it's just annotated.
## #
## from rpython.rlib.libffi import get_libc_name, CDLL, types, ArgChain
## libc_name = get_libc_name()
## def f(n, x, *args):
## libc = CDLL(libc_name)
## ptr = libc.getpointer('labs', [types.slong], types.slong)
## chain = ArgChain()
## chain.arg(n)
## n = ptr.call(chain, lltype.Signed)
## return (n, x) + args
## return None, f, None
def define_compile_framework_1(cls):
# a moving GC. Simple test, works
# without write_barriers and root stack enumeration.
def f(n, x, *args):
y = X()
y.foo = x.foo
n -= y.foo
return (n, x) + args
return None, f, None
def test_compile_framework_1(self):
self.run('compile_framework_1')
def define_compile_framework_2(cls):
# More complex test, requires root stack enumeration but
# not write_barriers.
def f(n, x, *args):
prev = x
for j in range(101): # f() runs 20'000 times, thus allocates
y = X() # a total of 2'020'000 objects
y.foo = prev.foo
prev = y
n -= prev.foo
return (n, x) + args
return None, f, None
def test_compile_framework_2(self):
self.run('compile_framework_2')
def define_compile_framework_3(cls):
# Third version of the test. Really requires write_barriers.
def f(n, x, *args):
x.next = None
for j in range(101): # f() runs 20'000 times, thus allocates
y = X() # a total of 2'020'000 objects
y.foo = j+1
y.next = x.next
x.next = y
check(x.next.foo == 101)
total = 0
y = x
for j in range(101):
y = y.next
total += y.foo
check(not y.next)
check(total == 101*102/2)
n -= x.foo
return (n, x) + args
return None, f, None
def test_compile_framework_3(self):
x_test = X()
x_test.foo = 5
self.run_orig('compile_framework_3', 6, x_test) # check that it does not raise CheckError
self.run('compile_framework_3')
def define_compile_framework_3_extra(cls):
# Extra version of the test, with tons of live vars around the residual
# call that all contain a GC pointer.
@dont_look_inside
def residual(n=26):
x = X()
x.next = X()
x.next.foo = n
return x
#
def before(n, x):
residual(5)
x0 = residual()
x1 = residual()
x2 = residual()
x3 = residual()
x4 = residual()
x5 = residual()
x6 = residual()
x7 = residual()
n *= 19
return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None
def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
x8 = residual()
x9 = residual()
check(x0.next.foo == 26)
check(x1.next.foo == 26)
check(x2.next.foo == 26)
check(x3.next.foo == 26)
check(x4.next.foo == 26)
check(x5.next.foo == 26)
check(x6.next.foo == 26)
check(x7.next.foo == 26)
check(x8.next.foo == 26)
check(x9.next.foo == 26)
x0, x1, x2, x3, x4, x5, x6, x7 = x7, x4, x6, x5, x3, x2, x9, x8
n -= 1
return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None
return before, f, None
def test_compile_framework_3_extra(self):
self.run_orig('compile_framework_3_extra', 6, None) # check that it does not raise CheckError
self.run('compile_framework_3_extra')
def define_compile_framework_4(cls):
# Fourth version of the test, with __del__.
from rpython.rlib.debug import debug_print
class Counter:
cnt = 0
counter = Counter()
class Z:
def __del__(self):
counter.cnt -= 1
def before(n, x):
debug_print('counter.cnt =', counter.cnt)
check(counter.cnt < 5)
counter.cnt = n // x.foo
return n, x, None, None, None, None, None, None, None, None, None, None
def f(n, x, *args):
Z()
n -= x.foo
return (n, x) + args
return before, f, None
def test_compile_framework_4(self):
self.run('compile_framework_4')
def define_compile_framework_5(cls):
# Test string manipulation.
def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
n -= x.foo
s += str(n)
return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
def after(n, x, x0, x1, x2, | |
of <NAME>, though carried
on for the benefit of a small number of clients, was of vast importance
and productive of lucrative returns.
At present, the importance was overshadowed by the immediate interest of
a campaign, which, if successful would land the second Appleby in the
gubernatorial chair. This plan, as yet not a boom, was taking shape with
the neatness and dispatch that characterized the Appleby work.
Young Sam was content to have the matter principally in his father’s
hands, and things had reached a pitch where, to the senior mind, the
coöperation of <NAME>er was imperatively necessary.
And, therefore, to Wheeler’s house they must betake themselves.
“What do you know about the Wheeler business, kid?” Keefe inquired, after
Mr. Appleby had left them.
Genevieve leaned back in her chair, her dimpled chin moving up and down
with a pretty rhythm as she enjoyed her chewing-gum, and gazed at the
ceiling beams.
Appleby’s offices were in his own house, and the one given over to these
two was an attractive room, fine with mahogany and plate glass, but also
provided with all the paraphernalia of the most up-to-date of office
furniture. There were good pictures and draperies, and a wood fire added
to the cheer and mitigated the chill of the early fall weather.
Sidling from her seat, <NAME> moved over to a chair near the fire.
“I’ll take those letters when you’re ready,” she said. “Why, I don’t know
a single thing about any Wheeler. Do you?”
“Not definitely. He’s a man who had an awful fight with Mr. Appleby, long
ago. I’ve heard allusions to him now and then, but I know no details.”
“I, either. But, it seems we’re to go there. Only for a night, and then,
on to Boston! Won’t I be glad to go!”
“We’ll only be there a few days. I’m more interested in this Wheeler
performance. I don’t understand it. Who’s Wheeler, anyhow?”
“Dunno. If Sammy turns up this morning, he may enlighten us.”
Sammy did turn up, and not long after the conversation young Appleby
strolled into the office.
Though still looked upon as a boy by his father, the man was of huge
proportions and of an important, slightly overbearing attitude.
Somewhat like his parent in appearance, young Sam, as he was always
called, had more grace and ease, if less effect of power. He smiled
genially and impartially; he seemed cordial and friendly to all the
world, and he was a general favorite. Yet so far he had achieved no great
thing, had no claim to any especial record in public or private life.
At forty, unmarried and unattached, his was a case of an able mentality
and a firm, reliable character, with no opportunity offered to prove its
worth. A little more initiative and he would have made opportunities for
himself; but a nature that took the line of least resistance, a
philosophy that believed in a calm acceptance of things as they came,
left <NAME>, junior, pretty much where he was when he began. If
no man could say aught against him, equally surely no man could say
anything very definite for him. Yet many agreed that he was a man whose
powers would develop with acquired responsibilities, and already he had a
following.
“Hello, little one,” he greeted Genevieve, carelessly, as he sat down
near Keefe. “I say, old chap, you’re going down to the Wheelers’ to-day,
I hear.”
“Yes; this afternoon,” and the secretary looked up inquiringly.
“Well, I’ll tell you what. You know the governor’s going there to get
Wheeler’s aid in my election boom, and I can tell you a way to help
things along, if you agree. See?”
“Not yet, but go ahead.”
“Well, it’s this way. <NAME>’s daughter is devoted to her father.
Not only filial respect and all that, but she just fairly idolizes the
old man. Now, he recips, of course, and what she says goes. So—I’m asking
you squarely—won’t you put in a good word to Maida, that’s the girl—and
if you do it with your inimitable dexterity and grace, she’ll fall for
it.”
“You mean for me to praise you up to <NAME> and ask her father to
give you the benefit of his influence?”
“How clearly you do put things! That’s exactly what I mean. It’s no harm,
you know—merely the most innocent sort of electioneering——”
“Rather!” laughed Keefe. “If all electioneering were as innocent as that,
the word would carry no unpleasant meaning.”
“Then you’ll do it?”
“Of course I will—if I get opportunity.”
“Oh, you’ll have that. It’s a big, rambling country house—a delightful
one, too—and there’s tea in the hall, and tennis on the lawn, and
moonlight on the verandas——”
“Hold up, Sam,” Keefe warned him, “is the girl pretty?”
“Haven’t seen her for years, but probably, yes. But that’s nothing to
you. You’re working for me, you see.” Appleby’s glance was direct, and
Keefe understood.
“Of course; I was only joking. I’ll carry out your commission, if, as I
said, I get the chance. Tell me something of <NAME>.”
“Oh, he’s a good old chap. Pathetic, rather. You see, he bumped up
against dad once, and got the worst of it.”
“How?”
<NAME> hesitated a moment and then said: “I see you don’t know the
story. But it’s no secret, and you may as well be told. You listen, too,
<NAME>, but there’s no call to tattle.”
“I’ll go home if you say so,” Genevieve piped up, a little crisply.
“No, sit still. Why, it was while dad was governor—about fifteen years
ago, I suppose. And <NAME> forged a paper—that is, he said he
didn’t, but twelve other good and true peers of his said he did. Anyway,
he was convicted and sentenced, but father was a good friend of his, and
being governor, he pardoned Wheeler. But the pardon was on condition—oh,
I say—hasn’t dad ever told you, Keefe?”
“Never.”
“Then, maybe I’d better leave it for him to tell. If he wants you to know
he’ll tell you, and if not, I mustn’t.”
“Oh, goodness!” cried Genevieve. “What a way to do! Get us all excited
over a thrilling tale, and then chop it off short!”
“Go on with it,” said Keefe; but Appleby said, “No; I won’t tell you the
condition of the pardon. But the two men haven’t been friends since, and
won’t be, unless the condition is removed. Of course, dad can’t do it,
but the present governor can make the pardon complete, and would do so in
a minute, if dad asked him to. So, though he hasn’t said so, the
assumption is, that father expects to trade a full pardon of Friend
Wheeler for his help in my campaign.”
“And a good plan,” Keefe nodded his satisfaction.
“But,” Sam went on, “the trouble is that the very same points and
principles that made Wheeler oppose my father’s election will make him
oppose mine. The party is the same, the platform is the same, and I can’t
hope that the man Wheeler is not the same stubborn, adamant, unbreakable
old hickory knot he was the other time.”
“And so, you want me to soften him by persuading his daughter to line up
on our side?”
“Just that, Keefe. And you can do it, I am sure.”
“I’ll try, of course; but I doubt if even a favorite daughter could
influence the man you describe.”
“Let me help,” broke in the irrepressible Genevieve. “I can do lots with
a girl. I can do more than Curt could. I’ll chum up with her and——”
“Now, <NAME>, you keep out of this. I don’t believe in mixing women
and politics.”
“But <NAME>’s a woman.”
“And I don’t want her troubled with politics. Keefe here can persuade her
to coax her father just through her affections—I don’t want her
enlightened as to any of the political details. And I can’t think your
influence would work half as well as that of a man. Moreover, Keefe has
discernment, and if it isn’t a good plan, after all, he’ll know enough to
discard it—while you’d blunder ahead blindly, and queer the whole game!”
“Oh, well,” and bridling with offended pride, Genevieve sought refuge in
her little mirror.
“Now, don’t get huffy,” and Sam smiled at her; “you’ll probably find that
<NAME>’s complexion is finer than yours, anyway, and then you’ll
hate her and won’t want to speak to her at all.”
<NAME> flashed an | |
<reponame>danlove99/django-auth-ldap-ng
# coding: utf-8
# Copyright (c) 2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import deepcopy
import logging
import pickle
import warnings
import ldap
try:
import mockldap
except ImportError:
mockldap = None
import django
from django.conf import settings
import django.db.models.signals
from django.contrib.auth.models import User, Permission, Group
from django.test import TestCase
try:
from django.utils.encoding import force_str
except ImportError: # Django < 1.5
from django.utils.encoding import smart_str as force_str
import unittest
try:
from django.test.utils import override_settings
except ImportError:
override_settings = lambda *args, **kwargs: (lambda v: v)
from django_auth_ldap.models import TestUser, TestProfile
from django_auth_ldap import backend
from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion
from django_auth_ldap.config import PosixGroupType, MemberDNGroupType, NestedMemberDNGroupType, NISGroupType
from django_auth_ldap.config import GroupOfNamesType
class TestSettings(backend.LDAPSettings):
"""
A replacement for backend.LDAPSettings that does not load settings
from django.conf.
"""
def __init__(self, **kwargs):
for name, default in self.defaults.items():
value = kwargs.get(name, default)
setattr(self, name, value)
@unittest.skipIf(mockldap is None, "django_auth_ldap tests require the mockldap package.")
class LDAPTest(TestCase):
top = ("o=test", {"o": "test"})
people = ("ou=people,o=test", {"ou": "people"})
groups = ("ou=groups,o=test", {"ou": "groups"})
moregroups = ("ou=moregroups,o=test", {"ou": "moregroups"})
alice = ("uid=alice,ou=people,o=test", {
"uid": ["alice"],
"objectClass": ["person", "organizationalPerson", "inetOrgPerson", "posixAccount"],
"userPassword": ["password"],
"uidNumber": ["1000"],
"gidNumber": ["1000"],
"givenName": ["Alice"],
"sn": ["Adams"]
})
bob = ("uid=bob,ou=people,o=test", {
"uid": ["bob"],
"objectClass": ["person", "organizationalPerson", "inetOrgPerson", "posixAccount"],
"userPassword": ["password"],
"uidNumber": ["1001"],
"gidNumber": ["50"],
"givenName": ["Robert"],
"sn": ["Barker"]
})
dressler = (force_str(u"uid=dreßler,ou=people,o=test"), {
"uid": [force_str(u"dreßler")],
"objectClass": ["person", "organizationalPerson", "inetOrgPerson", "posixAccount"],
"userPassword": ["password"],
"uidNumber": ["1002"],
"gidNumber": ["50"],
"givenName": ["Wolfgang"],
"sn": [force_str(u"Dreß<PASSWORD>")]
})
nobody = ("uid=nobody,ou=people,o=test", {
"uid": ["nobody"],
"objectClass": ["person", "organizationalPerson", "inetOrgPerson", "posixAccount"],
"userPassword": ["password"],
"binaryAttr": ["\xb2"] # Invalid UTF-8
})
# posixGroup objects
active_px = ("cn=active_px,ou=groups,o=test", {
"cn": ["active_px"],
"objectClass": ["posixGroup"],
"gidNumber": ["1000"],
"memberUid": [],
})
staff_px = ("cn=staff_px,ou=groups,o=test", {
"cn": ["staff_px"],
"objectClass": ["posixGroup"],
"gidNumber": ["1001"],
"memberUid": ["alice"],
})
superuser_px = ("cn=superuser_px,ou=groups,o=test", {
"cn": ["superuser_px"],
"objectClass": ["posixGroup"],
"gidNumber": ["1002"],
"memberUid": ["alice"],
})
# groupOfNames groups
empty_gon = ("cn=empty_gon,ou=groups,o=test", {
"cn": ["empty_gon"],
"objectClass": ["groupOfNames"],
"member": []
})
active_gon = ("cn=active_gon,ou=groups,o=test", {
"cn": ["active_gon"],
"objectClass": ["groupOfNames"],
"member": ["uid=alice,ou=people,o=test"]
})
staff_gon = ("cn=staff_gon,ou=groups,o=test", {
"cn": ["staff_gon"],
"objectClass": ["groupOfNames"],
"member": ["uid=alice,ou=people,o=test"]
})
superuser_gon = ("cn=superuser_gon,ou=groups,o=test", {
"cn": ["superuser_gon"],
"objectClass": ["groupOfNames"],
"member": ["uid=alice,ou=people,o=test"]
})
other_gon = ("cn=other_gon,ou=moregroups,o=test", {
"cn": ["other_gon"],
"objectClass": ["groupOfNames"],
"member": ["uid=bob,ou=people,o=test"]
})
# nisGroup objects
active_nis = ("cn=active_nis,ou=groups,o=test", {
"cn": ["active_nis"],
"objectClass": ["nisNetgroup"],
"nisNetgroupTriple": ["(,alice,)"]
})
staff_nis = ("cn=staff_nis,ou=groups,o=test", {
"cn": ["staff_nis"],
"objectClass": ["nisNetgroup"],
"nisNetgroupTriple": ["(,alice,)"],
})
superuser_nis = ("cn=superuser_nis,ou=groups,o=test", {
"cn": ["superuser_nis"],
"objectClass": ["nisNetgroup"],
"nisNetgroupTriple": ["(,alice,)"],
})
# Nested groups with a circular reference
parent_gon = ("cn=parent_gon,ou=groups,o=test", {
"cn": ["parent_gon"],
"objectClass": ["groupOfNames"],
"member": ["cn=nested_gon,ou=groups,o=test"]
})
nested_gon = ("CN=nested_gon,ou=groups,o=test", {
"cn": ["nested_gon"],
"objectClass": ["groupOfNames"],
"member": [
"uid=alice,ou=people,o=test",
"cn=circular_gon,ou=groups,o=test"
]
})
circular_gon = ("cn=circular_gon,ou=groups,o=test", {
"cn": ["circular_gon"],
"objectClass": ["groupOfNames"],
"member": ["cn=parent_gon,ou=groups,o=test"]
})
directory = dict([top, people, groups, moregroups, alice, bob, dressler,
nobody, active_px, staff_px, superuser_px, empty_gon,
active_gon, staff_gon, superuser_gon, other_gon,
active_nis, staff_nis, superuser_nis,
parent_gon, nested_gon, circular_gon])
@classmethod
def configure_logger(cls):
logger = logging.getLogger('django_auth_ldap')
formatter = logging.Formatter("LDAP auth - %(levelname)s - %(message)s")
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.CRITICAL)
@classmethod
def setUpClass(cls):
cls.configure_logger()
cls.mockldap = mockldap.MockLdap(cls.directory)
warnings.filterwarnings('ignore', message='.*?AUTH_PROFILE_MODULE', category=DeprecationWarning, module='django_auth_ldap')
@classmethod
def tearDownClass(cls):
del cls.mockldap
def setUp(self):
self.mockldap.start()
self.ldapobj = self.mockldap['ldap://localhost']
self.backend = backend.LDAPBackend()
self.backend.ldap # Force global configuration
def tearDown(self):
self.mockldap.stop()
del self.ldapobj
#
# Tests
#
def test_options(self):
self.backend.authenticate(username='alice',
password='password',
settings={
'USER_DN_TEMPLATE': 'uid=%(user)s,ou=people,o=test',
'CONNECTION_OPTIONS': {
'opt1': 'value1',
},
})
self.assertEqual(self.ldapobj.get_option('opt1'), 'value1')
def test_callable_server_uri(self):
self._init_settings(
SERVER_URI=lambda: 'ldap://ldap.example.com',
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
self.backend.authenticate(username='alice', password='password')
ldapobj = self.mockldap['ldap://ldap.example.com']
self.assertEqual(
ldapobj.methods_called(with_args=True),
[('initialize', ('ldap://ldap.example.com',), {}),
('simple_bind_s', ('uid=alice,ou=people,o=test', 'password'), {})]
)
def test_simple_bind(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user_count = User.objects.count()
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(not user.has_usable_password())
self.assertEqual(user.username, 'alice')
self.assertEqual(User.objects.count(), user_count + 1)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s']
)
def test_default_settings(self):
class MyBackend(backend.LDAPBackend):
default_settings = dict(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
self.backend = MyBackend()
user_count = User.objects.count()
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(not user.has_usable_password())
self.assertEqual(user.username, 'alice')
self.assertEqual(User.objects.count(), user_count + 1)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s']
)
def test_simple_bind_escaped(self):
""" Bind with a username that requires escaping. """
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user = self.backend.authenticate(username='alice,1', password='password')
self.assertEqual(user, None)
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[('initialize', ('ldap://localhost',), {}),
('simple_bind_s', ('uid=alice\\,1,ou=people,o=test', 'password'), {})]
)
def test_new_user_lowercase(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user_count = User.objects.count()
user = self.backend.authenticate(username='Alice', password='password')
self.assertTrue(not user.has_usable_password())
self.assertEqual(user.username, 'alice')
self.assertEqual(User.objects.count(), user_count + 1)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s']
)
def test_deepcopy(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user = self.backend.authenticate(username='Alice', password='password')
user = deepcopy(user)
@override_settings(AUTH_USER_MODEL='django_auth_ldap.TestUser')
def test_auth_custom_user(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test',
)
user = self.backend.authenticate(username='Alice', password='password')
self.assertTrue(isinstance(user, TestUser))
@override_settings(AUTH_USER_MODEL='django_auth_ldap.TestUser')
def test_get_custom_user(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test',
)
user = self.backend.authenticate(username='Alice', password='password')
user = self.backend.get_user(user.id)
self.assertTrue(isinstance(user, TestUser))
def test_new_user_whitespace(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user_count = User.objects.count()
user = self.backend.authenticate(username=' alice', password='password')
user = self.backend.authenticate(username='alice ', password='password')
self.assertTrue(not user.has_usable_password())
self.assertEqual(user.username, 'alice')
self.assertEqual(User.objects.count(), user_count + 1)
def test_simple_bind_bad_user(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user_count = User.objects.count()
user = self.backend.authenticate(username='evil_alice', password='password')
self.assertTrue(user is None)
self.assertEqual(User.objects.count(), user_count)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s']
)
def test_simple_bind_bad_password(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user_count = User.objects.count()
user = self.backend.authenticate(username='alice', password='<PASSWORD>')
self.assertTrue(user is None)
self.assertEqual(User.objects.count(), user_count)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s']
)
def test_existing_user(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
User.objects.create(username='alice')
user_count = User.objects.count()
user = self.backend.authenticate(username='alice', password='password')
# Make sure we only created one user
self.assertTrue(user is not None)
self.assertEqual(User.objects.count(), user_count)
def test_existing_user_insensitive(self):
self._init_settings(
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'
)
)
# mockldap doesn't handle case-insensitive matching properly.
self.ldapobj.search_s.seed('ou=people,o=test', ldap.SCOPE_SUBTREE,
'(uid=Alice)')([self.alice])
User.objects.create(username='alice')
user = self.backend.authenticate(username='Alice', password='password')
self.assertTrue(user is not None)
self.assertEqual(user.username, 'alice')
self.assertEqual(User.objects.count(), 1)
def test_convert_username(self):
class MyBackend(backend.LDAPBackend):
def ldap_to_django_username(self, username):
return 'ldap_%s' % username
def django_to_ldap_username(self, username):
return username[5:]
self.backend = MyBackend()
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test'
)
user_count = User.objects.count()
user1 = self.backend.authenticate(username='alice', password='password')
user2 = self.backend.get_user(user1.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertEqual(user1.username, 'ldap_alice')
self.assertEqual(user1.ldap_user._username, 'alice')
self.assertEqual(user1.ldap_username, 'alice')
self.assertEqual(user2.username, 'ldap_alice')
self.assertEqual(user2.ldap_user._username, 'alice')
self.assertEqual(user2.ldap_username, 'alice')
def test_search_bind(self):
self._init_settings(
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'
)
)
user_count = User.objects.count()
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(user is not None)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s']
)
def test_search_bind_escaped(self):
""" Search for a username that requires escaping. """
self._init_settings(
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'
)
)
user = self.backend.authenticate(username='alice*', password='password')
self.assertEqual(user, None)
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[('initialize', ('ldap://localhost',), {}),
('simple_bind_s', ('', ''), {}),
('search_s', ('ou=people,o=test', ldap.SCOPE_SUBTREE, '(uid=alice\\2a)'), {})]
)
def test_search_bind_no_user(self):
self._init_settings(
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(cn=%(user)s)'
)
)
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(user is None)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s', 'search_s']
)
def test_search_bind_multiple_users(self):
self._init_settings(
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=*)'
)
)
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(user is None)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s', 'search_s']
)
def test_search_bind_bad_password(self):
self._init_settings(
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'
)
)
user = self.backend.authenticate(username='alice', password='<PASSWORD>')
self.assertTrue(user is None)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s']
)
def test_search_bind_with_credentials(self):
self._init_settings(
BIND_DN='uid=bob,ou=people,o=test',
BIND_PASSWORD='password',
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'
)
)
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(user is not None)
self.assertTrue(user.ldap_user is not None)
self.assertEqual(user.ldap_user.dn, self.alice[0])
self.assertEqual(user.ldap_user.attrs, ldap.cidict.cidict(self.alice[1]))
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s']
)
def test_search_bind_with_bad_credentials(self):
self._init_settings(
BIND_DN='uid=bob,ou=people,o=test',
BIND_PASSWORD='<PASSWORD>',
USER_SEARCH=LDAPSearch(
"ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'
)
)
user = self.backend.authenticate(username='alice', password='password')
self.assertTrue(user is None)
self.assertEqual(
self.ldapobj.methods_called(),
['initialize', 'simple_bind_s']
)
def test_unicode_user(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test',
USER_ATTR_MAP={'first_name': 'givenName', 'last_name': 'sn'}
)
user = self.backend.authenticate(username=u'dreßler', password='password')
self.assertTrue(user is not None)
self.assertEqual(user.username, u'dreßler')
self.assertEqual(user.last_name, u'Dreßler')
def test_cidict(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test',
)
user = self.backend.authenticate(username="alice", password="password")
self.assertTrue(isinstance(user.ldap_user.attrs, ldap.cidict.cidict))
def test_populate_user(self):
self._init_settings(
USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test',
USER_ATTR_MAP={'first_name': 'givenName', 'last_name': 'sn'}
)
user = self.backend.authenticate(username='alice', password='password')
| |
multiple durations and not
# error and reminds me of the needed metrics by FULL_DURATION
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
all_fp_ids = []
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it.
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = None
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly.
# Set result to None here to fix a interpolation error below
result = None
fp_layers_ids = []
fp_layers_present = False
try:
connection = engine.connect()
# @modified 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# Order by the latest features profile, this also results in the
# layers ids being ordered by latest too.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id)
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.id))
result = connection.execute(stmt)
for row in result:
# @added 20170116 - Feature #1854: Ionosphere learn
# if a features profiles is not enabled or deleted, skip it
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
fp_id = row['id']
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly
fp_layers_id = int(row['layers_id'])
if fp_layers_id > 0:
fp_layers_present = True
fp_layers_ids.append(fp_layers_id)
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids
all_fp_ids.append(int(fp_id))
if int(row['full_duration']) == int(full_duration):
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Handle ionosphere_learn
if added_by != 'ionosphere_learn':
fp_ids.append(int(fp_id))
logger.info('using fp id %s matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
else:
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is were to match. Ionosphere learn is
# limited here on generation.
# Set the default as max e.g. not allowed
current_fp_generation = int(metric_max_generations)
try:
current_fp_generation = row['generation']
if int(current_fp_generation) < int(metric_max_generations):
fp_ids.append(int(fp_id))
logger.info(
'valid ionosphere_learn generation %s - fp id %s matched full_duration %s - %s' % (
str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
else:
logger.info(
'ionosphere_learn cannot check due to max_generations of %s would be exceeded, current generation %s - fp id %s matched full_duration %s - %s' % (
str(metric_max_generations), str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: ionosphere_learn check could not determine the fp generation of fp id %s from the row object for %s' % (
str(fp_id), base_name))
else:
logger.info('not using fp id %s not matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available throughout
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = row
connection.close()
fp_count = len(fp_ids)
logger.info('determined %s fp ids for %s' % (str(fp_count), base_name))
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = len(fp_layers_ids)
logger.info('determined %s layers ids for %s' % (str(fp_layers_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine fp ids from DB for %s' % base_name)
fp_count = 0
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = 0
# @added 20170306 - Feature #1960: ionosphere_layers
# Corrected the interpolation of the fp_ids_db_object above where it
# was set to the last row only, however it was not used anyway.
# Here we go, let us TEACH you properly. We only evaluate
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# if result:
# fp_ids_db_object = result
if len(fp_ids) == 0:
logger.info('there are no fp ids that match full duration for %s' % base_name)
else:
fp_ids_found = True
if not fp_ids_found:
logger.info('no fp ids were found for %s at %s' % (base_name, str(full_duration)))
# @modified 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Use all_fp_ids so that we can handle multiple durations
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if len(all_fp_ids) == 0:
logger.error('error :: Ionosphere is enabled on %s but has no feature_profiles' % (base_name))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
else:
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = '%s/%s.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# ionosphere_learn should always provide the features profile csv
# Ionosphere does not create features profiles for learn, it only
# checks them.
# Here we go! Learn!
if added_by == 'ionosphere_learn':
if not calculated_feature_file_found:
logger.error('error :: no ionosphere_learn calculated_feature_file file found - %s' % calculated_feature_file)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len([f for f in os.listdir(metric_training_data_dir)
if f.endswith('.png') and
os.path.isfile(os.path.join(metric_training_data_dir, f))])
if graphite_file_count == 0:
logger.info('not calculating features no anomaly Graphite alert resources created in %s' % (metric_training_data_dir))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('anomaly Graphite alert resources found in %s' % (metric_training_data_dir))
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
if f_calc:
send_metric_name = '%s.features_calculation_time' % skyline_app_graphite_namespace
f_calc_time = '%.2f' % float(f_calc)
try:
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send calculate features')
if training_metric:
logger.info('training metric done')
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
logger.error('error :: calculated features file not available - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
count_id = 0
with open(calculated_feature_file, 'rb') as fr:
reader = csv.reader(fr, delimiter=',')
for i, line in enumerate(reader):
if str(line[0]) != '':
if ',' in line[0]:
feature_name = '"%s"' % str(line[0])
else:
feature_name = str(line[0])
count_id += 1
calc_value = float(line[1])
calculated_features.append([feature_name, calc_value])
if len(calculated_features) == 0:
logger.error('error :: no calculated features were determined from - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked = 0
layers_checked = 0
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked += 1
self.features_profiles_checked.append(fp_id)
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = 'z_fp_%s' % str(metrics_id)
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
| |
+ '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">Hazard</th>' + \
'<th scope="col">Risk</th>' + \
'<th scope="col">Preventative measures</th>' + \
'<th scope="col">How to implement</th>' + \
'<th scope="col">Supervisor</th>' + \
'<th scope="col">Projected risk</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>1</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'<td>' + r1c5 + '</td>' + \
'<td>' + r1c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>2</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'<td>' + r2c5 + '</td>' + \
'<td>' + r2c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>3</td>' + \
'<td>' + r3c1 + '</td>' + \
'<td>' + r3c2 + '</td>' + \
'<td>' + r3c3 + '</td>' + \
'<td>' + r3c4 + '</td>' + \
'<td>' + r3c5 + '</td>' + \
'<td>' + r3c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>4</td>' + \
'<td>' + r4c1 + '</td>' + \
'<td>' + r4c2 + '</td>' + \
'<td>' + r4c3 + '</td>' + \
'<td>' + r4c4 + '</td>' + \
'<td>' + r4c5 + '</td>' + \
'<td>' + r4c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>5</td>' + \
'<td>' + r5c1 + '</td>' + \
'<td>' + r5c2 + '</td>' + \
'<td>' + r5c3 + '</td>' + \
'<td>' + r5c4 + '</td>' + \
'<td>' + r5c5 + '</td>' + \
'<td>' + r5c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>6</td>' + \
'<td>' + r6c1 + '</td>' + \
'<td>' + r6c2 + '</td>' + \
'<td>' + r6c3 + '</td>' + \
'<td>' + r6c4 + '</td>' + \
'<td>' + r6c5 + '</td>' + \
'<td>' + r6c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>7</td>' + \
'<td>' + r7c1 + '</td>' + \
'<td>' + r7c2 + '</td>' + \
'<td>' + r7c3 + '</td>' + \
'<td>' + r7c4 + '</td>' + \
'<td>' + r7c5 + '</td>' + \
'<td>' + r7c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>8</td>' + \
'<td>' + r8c1 + '</td>' + \
'<td>' + r8c2 + '</td>' + \
'<td>' + r8c3 + '</td>' + \
'<td>' + r8c4 + '</td>' + \
'<td>' + r8c5 + '</td>' + \
'<td>' + r8c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>9</td>' + \
'<td>' + r9c1 + '</td>' + \
'<td>' + r9c2 + '</td>' + \
'<td>' + r9c3 + '</td>' + \
'<td>' + r9c4 + '</td>' + \
'<td>' + r9c5 + '</td>' + \
'<td>' + r9c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>10</td>' + \
'<td>' + r10c1 + '</td>' + \
'<td>' + r10c2 + '</td>' + \
'<td>' + r10c3 + '</td>' + \
'<td>' + r10c4 + '</td>' + \
'<td>' + r10c5 + '</td>' + \
'<td>' + r10c6 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Risk management worksheet',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="risk_management_worksheet.pdf"'
return response
def vendor_service_pricing_sheet(request):
return render(request, 'reporting/vendor_service_pricing_sheet.html')
def generate_html_to_pdf_vendor_service_pricing_sheet(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c6 = request.POST.get('r1c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c7 = request.POST.get('r1c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c8 = request.POST.get('r1c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c9 = request.POST.get('r1c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c10 = request.POST.get('r1c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c6 = request.POST.get('r2c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c7 = request.POST.get('r2c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c8 = request.POST.get('r2c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c9 = request.POST.get('r2c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c10 = request.POST.get('r2c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c6 = request.POST.get('r3c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c7 = request.POST.get('r3c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c8 = request.POST.get('r3c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c9 = request.POST.get('r3c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c10 = request.POST.get('r3c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c6 = request.POST.get('r4c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c7 = request.POST.get('r4c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c8 = request.POST.get('r4c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c9 = request.POST.get('r4c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c10 = request.POST.get('r4c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c6 = request.POST.get('r5c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c7 = request.POST.get('r5c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c8 = request.POST.get('r5c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c9 = request.POST.get('r5c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c10 = request.POST.get('r5c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c6 = request.POST.get('r6c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c7 = request.POST.get('r6c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c8 = request.POST.get('r6c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c9 = request.POST.get('r6c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c10 = request.POST.get('r6c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c6 = request.POST.get('r7c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c7 = request.POST.get('r7c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c8 = request.POST.get('r7c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c9 = request.POST.get('r7c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c10 = request.POST.get('r7c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = | |
<gh_stars>0
#-*- coding: utf-8 -*-
from __future__ import division
import os
import math
import time
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import math_ops
from tensorflow.contrib.rnn import GRUCell
from GAN_GRU.ops import *
from utils import *
import mygru_cell
import matplotlib.pyplot as plt
"""
D输入标准化, 不要m 填充0
G输入去掉m,只有delta
g 没有每次累加z
"""
class WGAN(object):
model_name = "WGAN_no_mask" # name for checkpoint
def __init__(self, sess, args, datasets):
self.sess = sess
self.isbatch_normal=args.isBatch_normal
self.lr = args.lr
self.epoch = args.epoch
self.batch_size = args.batch_size
self.n_inputs = args.n_inputs # MNIST data input (img shape: 28*28)
self.n_steps = datasets.maxLength # time steps
self.n_hidden_units = args.n_hidden_units # neurons in hidden layer
self.n_classes = args.n_classes # MNIST classes (0-9 digits)
self.gpus=args.gpus
self.pretrain_epoch=args.pretrain_epoch
self.impute_iter=args.impute_iter
self.g_loss_lambda=args.g_loss_lambda
self.datasets=datasets
self.z_dim = args.z_dim # dimension of noise-vector
# WGAN_GP parameter
self.lambd = 0.25 # The higher value, the more stable, but the slower convergence
self.disc_iters = args.disc_iters # The number of critic iterations for one-step of generator
# train
self.learning_rate = args.lr
self.beta1 = args.beta1
self.Gru_g = GRUCell(self.n_hidden_units)
self.Gru_d = GRUCell(self.n_hidden_units)
self.num_batches = len(datasets.x) // self.batch_size
def pretrainG(self, X,X_lengths,Keep_prob,reuse=False):
with tf.variable_scope("g_enerator", reuse=reuse):
"""
the rnn cell's variable scope is defined by tensorflow,
if we want to update rnn cell's weights, the variable scope must contains 'g_' or 'd_'
"""
w_out= tf.get_variable("g_w_out",shape=[self.n_hidden_units, self.n_inputs],initializer=tf.random_normal_initializer())
b_out= tf.get_variable("g_b_out",shape=[self.n_inputs, ],initializer=tf.constant_initializer(0.001))
w_z = tf.get_variable("g_w_z", shape=[self.z_dim, self.n_inputs],
initializer=tf.random_normal_initializer())
b_z = tf.get_variable("g_b_z", shape=[self.n_inputs, ], initializer=tf.constant_initializer(0.001))
X_in = tf.reshape(X, [-1, self.n_steps, self.n_inputs])
init_state = self.Gru_g.zero_state(self.batch_size, dtype=tf.float32) # 初始化全零 state
outputs, final_state = tf.nn.dynamic_rnn(self.Gru_g, X_in, \
initial_state=init_state,\
sequence_length=X_lengths,
time_major=False)
#outputs: batch_size*n_steps*n_hiddensize
outputs=tf.reshape(outputs,[-1,self.n_hidden_units])
out_predict=tf.matmul(tf.nn.dropout(outputs,Keep_prob), w_out) + b_out
out_predict=tf.reshape(out_predict,[-1,self.n_steps,self.n_inputs])
return out_predict
def discriminator(self, X,X_lengths,Keep_prob, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S
with tf.variable_scope("d_iscriminator", reuse=reuse):
w_out= tf.get_variable("d_w_out",shape=[self.n_hidden_units, 1],initializer=tf.random_normal_initializer())
b_out= tf.get_variable("d_b_out",shape=[1, ],initializer=tf.constant_initializer(0.001))
X_in = tf.reshape(X, [self.batch_size, self.n_steps , self.n_inputs])
init_state = self.Gru_d.zero_state(self.batch_size, dtype=tf.float32) # 初始化全零 state
outputs, final_state = tf.nn.dynamic_rnn(self.Gru_d, X_in, \
initial_state=init_state,\
sequence_length=X_lengths,
time_major=False)
# final_state:batch_size*n_hiddensize
# 不能用最后一个,应该用第length个 之前用了最后一个,所以输出无论如何都是b_out
out_logit=tf.matmul(tf.nn.dropout(final_state,Keep_prob), w_out) + b_out
out =tf.nn.sigmoid(out_logit) #选取最后一个 output
return out,out_logit
def generator(self, z, Keep_prob, is_training=True, reuse=False):
# x,delta,n_steps
# z :[self.batch_size, self.z_dim]
# first feed noize in rnn, then feed the previous output into next input
# or we can feed noize and previous output into next input in future version
with tf.variable_scope("g_enerator", reuse=reuse):
#gennerate
w_out= tf.get_variable("g_w_out",shape=[self.n_hidden_units, self.n_inputs],initializer=tf.random_normal_initializer())
b_out= tf.get_variable("g_b_out",shape=[self.n_inputs, ],initializer=tf.constant_initializer(0.001))
w_z=tf.get_variable("g_w_z",shape=[self.z_dim,self.n_inputs],initializer=tf.random_normal_initializer())
b_z=tf.get_variable("g_b_z",shape=[self.n_inputs, ],initializer=tf.constant_initializer(0.001))
#self.times=tf.reshape(self.times,[self.batch_size,self.n_steps,self.n_inputs])
#change z's dimension
# batch_size*z_dim-->batch_size*n_inputs
x=tf.matmul(z,w_z)+b_z
X_in = tf.reshape(x, [-1, 1, self.n_inputs])
init_state = self.Gru_g.zero_state(self.batch_size, dtype=tf.float32) # 初始化全零 state
#z=tf.reshape(z,[self.batch_size,1,self.z_dim])
seq_len=tf.constant(1,shape=[self.batch_size])
outputs, final_state = tf.nn.dynamic_rnn(self.Gru_g, X_in, \
initial_state=init_state,\
sequence_length=seq_len,
time_major=False)
init_state=final_state
#outputs: batch_size*1*n_hidden
outputs=tf.reshape(outputs,[-1,self.n_hidden_units])
# full connect
out_predict=tf.matmul(tf.nn.dropout(outputs,Keep_prob), w_out) + b_out
out_predict=tf.reshape(out_predict,[-1,1,self.n_inputs])
total_result=tf.multiply(out_predict,1.0)
for i in range(1,self.n_steps):
out_predict=tf.reshape(out_predict,[self.batch_size,self.n_inputs])
#输出加上noise z
out_predict=out_predict+tf.matmul(z,w_z)+b_z
X_in = tf.reshape(out_predict, [-1, 1, self.n_inputs])
outputs, final_state = tf.nn.dynamic_rnn(self.Gru_g, X_in, \
initial_state=init_state,\
sequence_length=seq_len,
time_major=False)
init_state=final_state
outputs=tf.reshape(outputs,[-1,self.n_hidden_units])
out_predict=tf.matmul(tf.nn.dropout(outputs,Keep_prob), w_out) + b_out
out_predict=tf.reshape(out_predict,[-1,1,self.n_inputs])
total_result=tf.concat([total_result,out_predict],1)
#delta:[batch_size,,n_inputs]
if self.isbatch_normal:
with tf.variable_scope("g_bn", reuse=tf.AUTO_REUSE):
total_result=bn(total_result,is_training=is_training, scope="g_bn_imple")
return total_result
def impute(self):
with tf.variable_scope("impute", reuse=tf.AUTO_REUSE):
z_need_tune=tf.get_variable("z_needtune",shape=[self.batch_size,self.z_dim],initializer=tf.random_normal_initializer(mean=0,stddev=0.1) )
return z_need_tune
def build_model(self):
self.keep_prob = tf.placeholder(tf.float32)
self.x = tf.placeholder(tf.float32, [self.batch_size, self.n_steps, self.n_inputs])
self.m = tf.placeholder(tf.float32, [self.batch_size, self.n_steps, self.n_inputs])
self.x_lengths = tf.placeholder(tf.int32, shape=[self.batch_size,])
self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name='z')
""" Loss Function """
# 不进行preTrain
Pre_out=self.pretrainG(self.x, self.x_lengths,\
self.keep_prob, \
reuse=False)
self.pretrain_loss=tf.reduce_sum(tf.square(tf.multiply(Pre_out,self.m)-self.x)) / tf.cast(tf.reduce_sum(self.x_lengths),tf.float32)
D_real, D_real_logits = self.discriminator(self.x, \
self.x_lengths,self.keep_prob, \
reuse=False)
#G return total_result,self.imputed_deltapre,self.imputed_deltasub,self.imputed_m,self.x_lengths,last_values,sub_values
g_x = self.generator(self.z,self.keep_prob, is_training=True, reuse=True)
D_fake, D_fake_logits = self.discriminator(g_x,self.x_lengths,self.keep_prob,\
reuse = True)
"""
impute loss
"""
self.z_need_tune=self.impute()
impute_out=self.generator(self.z_need_tune,self.keep_prob, is_training=False, reuse=True)
impute_fake, impute_fake_logits = self.discriminator(impute_out,self.x_lengths,\
self.keep_prob,
reuse=True )
# loss for imputation
self.mask_loss = tf.reduce_mean(tf.square(tf.multiply(impute_out,self.m)-self.x))
self.g_impute_loss = -tf.reduce_mean(impute_fake_logits)
self.impute_loss=self.mask_loss + self.g_loss_lambda*self.g_impute_loss
self.impute_out=impute_out
#the imputed results
self.imputed=tf.multiply((1-self.m),self.impute_out)+self.x
# get loss for discriminator
d_loss_real = - tf.reduce_mean(D_real_logits)
d_loss_fake = tf.reduce_mean(D_fake_logits)
self.d_loss = d_loss_real + d_loss_fake
# get loss for generator
self.g_loss = - d_loss_fake
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
z_vars = [self.z_need_tune]
'''
print("d vars:")
for v in d_vars:
print(v.name)
print("g vars:")
for v in g_vars:
print(v.name)
print("z vars:")
for v in z_vars:
print(v.name)
'''
#don't need normalization because we have adopted the dropout
"""
ld = 0.0
for w in d_vars:
ld += tf.contrib.layers.l2_regularizer(1e-4)(w)
lg = 0.0
for w in g_vars:
lg += tf.contrib.layers.l2_regularizer(1e-4)(w)
self.d_loss+=ld
self.g_loss+=lg
"""
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
# this code have used batch normalization, so the upside line should be executed
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.d_loss, var_list=d_vars)
#self.d_optim=self.optim(self.learning_rate, self.beta1,self.d_loss,d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate*self.disc_iters, beta1=self.beta1) \
.minimize(self.g_loss, var_list=g_vars)
#self.g_optim=self.optim(self.learning_rate, self.beta1,self.g_loss,g_vars)
self.g_pre_optim=tf.train.AdamOptimizer(self.learning_rate*2,beta1=self.beta1) \
.minimize(self.pretrain_loss,var_list=g_vars)
self.impute_optim=tf.train.AdamOptimizer(self.learning_rate*7,beta1=self.beta1).minimize(self.impute_loss,var_list=z_vars)
#clip weight
self.clip_all_vals = [p.assign(tf.clip_by_value(p, -0.99, 0.99)) for p in t_vars]
self.clip_D = [p.assign(tf.clip_by_value(p, -0.99, 0.99)) for p in d_vars]
self.clip_G = [p.assign(tf.clip_by_value(p, -0.99, 0.99)) for p in g_vars]
"""" Testing """
# for test
# self.fake_x,self.fake_delta,_,_ = self.generator(self.z, self.keep_prob, is_training=False, reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
g_pretrain_loss_sum=tf.summary.scalar("g_pretrain_loss", self.pretrain_loss)
# final summary operations
self.impute_sum=tf.summary.scalar("impute_loss", self.impute_loss)
self.g_sum = g_loss_sum
self.g_pretrain_sum=tf.summary.merge([g_pretrain_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum,d_loss_fake_sum, d_loss_sum])
def optim(self,learning_rate,beta,loss,var):
optimizer = tf.train.AdamOptimizer(learning_rate, beta1=beta)
grads = optimizer.compute_gradients(loss,var_list=var)
for i, (g, v) in enumerate(grads):
if g is not None:
grads[i] = (tf.clip_by_norm(g, 5), v) # clip gradients
train_op = optimizer.apply_gradients(grads)
return train_op
def pretrain(self, start_epoch,counter,start_time):
if start_epoch < self.pretrain_epoch:
#todo
self.pretrainG_fig_loss = plt.figure()
self.pretrainG_ax_loss = self.pretrainG_fig_loss.add_subplot(1, 1, 1)
p_loss_list = []
for epoch in range(start_epoch, self.pretrain_epoch):
# get batch data
self.datasets.shuffle(self.batch_size,True)
idx=0
#x,y,mean,m,deltaPre,x_lengths,lastvalues,files,imputed_deltapre,imputed_m,deltaSub,subvalues,imputed_deltasub
for data_x,data_missing,data_m,data_detla,data_x_lengths,_ in self.datasets.nextBatch():
# pretrain
_, summary_str, p_loss = self.sess.run([self.g_pre_optim, self.g_pretrain_sum, self.pretrain_loss],
feed_dict={self.x: data_x,
self.m: data_m,
self.x_lengths: data_x_lengths,
self.keep_prob: 0.5})
# self.writer.add_summary(summary_str, counter)
p_loss_list.append(p_loss)
self.pretrain_plot_loss(p_loss_list)
counter += 1
# display training status
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, pretrain_loss: %.8f" \
% (epoch, idx, self.num_batches, time.time() - start_time, p_loss))
idx+=1
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
def train(self):
# graph inputs for visualize training results
self.sample_z = np.random.standard_normal(size=(self.batch_size , self.z_dim))
# initialize all variables
tf.global_variables_initializer().run()
start_epoch = 0
counter = 1
# loop for epoch
start_time = time.time()
self.pretrain(start_epoch,counter,start_time)
if start_epoch < self.pretrain_epoch:
start_epoch=self.pretrain_epoch
# d_loss_plot,g_loss_plot
self.gan_fig_loss = plt.figure()
self.gan_ax_loss = self.gan_fig_loss.add_subplot(1, 1, 1)
d_loss_list = []
g_loss_list = []
d_loss = 0
for epoch in range(start_epoch, self.epoch):
# get batch data
self.datasets.shuffle(self.batch_size,True)
idx=0
for data_x,data_missing,data_m,data_deltaPre,data_x_lengths,_ in self.datasets.nextBatch():
batch_z = np.random.standard_normal(size=(self.batch_size, self.z_dim))
if counter % self.disc_iters == 0:
_ = self.sess.run(self.clip_all_vals)
_, summary_str, d_loss = self.sess.run([self.d_optim, self.d_sum, self.d_loss],
feed_dict={self.z: batch_z,
self.x: data_x,
self.m: data_m,
self.x_lengths: data_x_lengths,
self.keep_prob: 0.5})
# display training status
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, counter:%4d" \
% (epoch, idx, self.num_batches, time.time() - start_time, d_loss, counter))
# update G network
#batch_z = np.random.normal(0, 1, [self.batch_size, self.z_dim]).astype(np.float32)
_, summary_str, g_loss = self.sess.run([self.g_optim, self.g_sum, self.g_loss],
feed_dict={self.z: batch_z,
self.keep_prob: 0.5,
self.x_lengths: data_x_lengths
})
# self.writer.add_summary(summary_str, counter)
d_loss_list.append(d_loss)
g_loss_list.append(g_loss)
self.gan_plot_loss(g_loss_list,d_loss_list)
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, g_loss: %.8f,counter:%4d" \
% (epoch, idx, self.num_batches, time.time() - start_time, g_loss,counter))
counter += 1
idx+=1
def imputation(self,dataset):
self.datasets=dataset
# self.datasets.shuffle(self.batch_size,True)
tf.variables_initializer([self.z_need_tune]).run()
#是否shuffle无所谓,填充之后存起来,测试的时候用填充之后的数据再shuffle即可
#训练数据集不能被batch_size整除剩下的部分,扔掉
start_time = time.time()
batchid=1
impute_tune_time=1
counter=1
imputed_list = []
# impute_loss_plot,mask_loss_plot,g_impute_loss
self.impute_fig_loss = plt.figure()
self.impute_ax_loss = self.impute_fig_loss.add_subplot(1, 1, 1)
impute_loss_list = []
mask_loss_list = []
g_impute_loss_list = []
loss_sum = 0
m_sum = 0
for data_x,data_missing,data_m,data_deltaPre,data_x_lengths,_ in self.datasets.nextBatch():
#self.z_need_tune=tf.assign(self.z_need_tune,tf.random_normal([self.batch_size,self.z_dim]))
tf.variables_initializer([self.z_need_tune]).run()
for i in range(0,self.impute_iter):
_, impute_out, summary_str, impute_loss, imputed,mask_loss,g_impute_loss = self.sess.run([self.impute_optim, self.impute_out, self.impute_sum, self.impute_loss, self.imputed,self.mask_loss,self.g_impute_loss ], \
feed_dict={self.x: data_missing,
self.m: data_m,
self.x_lengths: data_x_lengths,
self.keep_prob: 1.0})
impute_tune_time+=1
counter+=1
# 计算loss_sum
loss_sum = loss_sum + np.sum(np.multiply(np.abs(data_x - imputed),1-data_m))
m_sum = m_sum+np.sum(data_m)
print(loss_sum/m_sum)
impute_loss_list.append(impute_loss)
mask_loss_list.append(mask_loss)
g_impute_loss_list.append(g_impute_loss)
self.impute_plot_loss(impute_loss_list, mask_loss_list, g_impute_loss_list)
if counter%10==0:
print("Batchid: [%2d] [%4d/%4d] time: %4.4f, impute_loss: %.8f" \
% (batchid, impute_tune_time, self.impute_iter, time.time() - start_time, impute_loss))
imputed_list.append(imputed)
batchid+=1
impute_tune_time=1
self.imputed_list = np.array(imputed_list)
self.loss_pre = loss_sum/m_sum
def pretrain_plot_loss(self,loss):
if self.pretrainG_ax_loss.lines:
self.pretrainG_ax_loss.lines.remove(self.pretrainG_ax_loss.lines[0])
self.pretrainG_ax_loss.plot(loss,linestyle='-',color='#2E68AA')
plt.title("PreTrainG_loss")
plt.ylabel("loss")
plt.ion()
plt.show()
plt.pause(0.1)
def gan_plot_loss(self,g_loss,d_loss):
if self.gan_ax_loss.lines:
self.gan_ax_loss.lines.remove(self.gan_ax_loss.lines[0])
# | |
("The observed correlation between strain-specific pain profiles and MOR-1K gene expression levels suggests that MOR-1K contributes to OIH in genetically susceptible mice.",
{"entities": [(67, 73, PHYS), (111, 117, PHYS), (133, 136, FUNC)]}),
("Within the OIH murine paradigm, we found that sustained i.t.",
{"entities": [(11, 14, FUNC)]}),
("Taken together all these results suggest that the resolution of opioid-induced hyperalgesia is not due to a rapid extinction of pronociceptive systems, but is rather due to a counter-adaptation by inhibitory systems dependent on endogenous opioid release.",
{"entities": [(64, 70, PHYS), (79, 91, FUNC), (128, 150, FUNC), (229, 246, NT)]}),
("First, it seems that opioid-induced pronociceptive activity may persist long after cessation of opioid administration.",
{"entities": [(21, 27, PHYS), (36, 49, FUNC), (96, 102, PHYS)]}),
("Actually, these data support the critical role of microglia not only in opioid-induced hyperalgesia and tolerance but also in long-term pain sensitization observed after brief exposure to opioid.",
{"entities": [(50, 59, PHYS), (72, 78, PHYS), (87, 99, FUNC), (104, 113, FUNC), (136, 154, FUNC), (188, 194, PHYS)]}),
("These adaptive modifications range from the receptors modulation and uncoupling with G protein to the hyper-activation of the cAMP-pathway, and so of the AC, with consequent increase of the proteins CREB (cAMP response element-binding protein) and fos.",
{"entities": [(85, 94, PHYS), (126, 138, PHYS), (174, 198, FUNC), (199, 203, PHYS), (205, 242, PHYS), (248, 251, PHYS)]}),
("Low COMT activity also increases opioid receptors and enhances opioid analgesia and adverse effects in some cancer [198, 199].",
{"entities": [(0, 8, PHYS), (23, 49, PHYS), (54, 79, FUNC), (84, 99, FUNC)]}),
("On the contrary, the SVM model can be over-fitted to the data and thus loose generalizability.",
{"entities": []}),
("At the neurotransmitter level, decades of research have supported the role of the opioid system in the neurobiology of placebo analgesic effects.",
{"entities": [(82, 88, PHYS), (127, 144, PHYS)]}),
("Muscimol provides an efficient, reversible inactivation effect and was chosen based on the results of previous studies70–73.",
{"entities": [(0, 8, PHYS), (43, 62, FUNC)]}),
("Because the expression of inducible transcription factor proteins peaks at approximately 1 hour after stimulus induction and fades by 3–4 hours31, rats were perfused 1 h after the last behavioral test.",
{"entities": [(26, 56, PHYS)]}),
("This hypothesis is based the spinal cord and brainstem pain-signaling neurons project via the parabrachial and solitary nuclei to densely innervate the hypothalamus.",
{"entities": [(29, 40, LABEL), (45, 54, LABEL), (55, 69, FUNC), (94, 106, LABEL), (111, 119, LABEL), (130, 147, FUNC), (152, 164, LABEL)]}),
("Data were acquired from a commercial database (Celera Discovery System, CDS) were used to select SNPs spaced at 2–5 kb intervals throughout each gene region plus 4–6 kb upstream and 4–6 kb downstream of each gene.",
{"entities": []}),
("In order to illustrate the method, however, we show the results for the galanin-2 receptor.",
{"entities": [(72, 90, PHYS)]}),
("Morphine can bind to μ (mu) opioid receptor (MOR), δ (delta) opioid receptor (DOR) and κ (kappa) opioid receptor (KOR).",
{"entities": [(0, 8, PHYS), (21, 43, PHYS), (45, 48, PHYS), (51, 76, PHYS), (78, 81, PHYS), (87, 112, PHYS), (114, 117, PHYS)]}),
("The multiple action sites of morphine in the brain decrease the effectiveness of morphine due to development of tolerance, physical dependence, and addiction.",
{"entities": [(29, 37, PHYS), (45, 50, LABEL), (81, 89, PHYS), (112, 121, FUNC), (123, 142, FUNC), (148, 157, FUNC)]}),
("Expression of MOR is also reported in the habenula—interpeduncular nucleus (IPN) pathway; suggesting the potential role of MOR in mediating the positive and negative effect of opioids, which needs to be further investigated (Gardon et al.,",
{"entities": [(14, 17, PHYS), (42, 50, LABEL), (51, 74, LABEL), (76, 79, LABEL), (123, 126, PHYS), (144, 172, FUNC), (176, 183, PHYS), (225, 231, PER)]}),
("Although the distribution of the oprm1 gene and protein in the whole tissue has been demonstrated in larval zebrafish (Bretaud et al.,",
{"entities": [(33, 38, PHYS), (119, 126, PER)]}),
("Next, to identify brain regions sensitive to morphine, we examined the effect of acute (20-min) morphine exposure on oprm1, cfos and npas4a gene expression in the brain by in situ hybridization and real-time PCR.",
{"entities": [(45, 53, PHYS), (96, 104, PHYS), (117, 122, PHYS), (124, 128, PHYS), (133, 139, PHYS), (163, 168, LABEL)]}),
("Some cells expressing mmp9 were also found in the medulla oblongata.",
{"entities": [(22, 26, PHYS), (50, 67, LABEL)]}),
("Expression of Slc17a7 gene in the brain of zebrafish.",
{"entities": [(14, 21, PHYS), (34, 52, LABEL)]}),
("Number of npas4 mRNA expressing cells in control and morphine-treated fish.",
{"entities": [(10, 20, PHYS), (53, 61, PHYS)]}),
("These studies suggest the distribution of DOR in the telencephalon is well-conserved across vertebrate species, which could be a center for reward (Charbogne et al.,",
{"entities": [(42, 45, PHYS), (53, 66, LABEL)]}),
("On the other hand, in situ hybridization is a sensitive method that allows detail anatomical localization, but the analysis of signal intensity is semi-quantitative.",
{"entities": []}),
("In the habenula, oxycodone induced downregulation of oprm1 and npas4a expression.",
{"entities": [(7, 15, LABEL), (17, 26, PHYS), (35, 49, FUNC), (53, 48, PHYS), (63, 69, PHYS)]}),
("Although there are no reports on the regulation of CXCR3 in the parabrachial nucleus of rodents, a reduced neuronal activity was reported in the previous study (Hashimoto et al.,",
{"entities": [(51, 56, PHYS), (64, 84, LABEL), (99, 124, FUNC)]}),
("The animal study was reviewed and approved by Animal Ethics Committee of Monash University (ethics approval number: MARP/2017/049).",
{"entities": []}),
("Following hybridization, the sections were washed and blocked with 2% normal sheep serum.",
{"entities": []}),
("To standardize sections with different background intensity, all the section were changed to gray mode using adobe illustrator software CS5.",
{"entities": []}),
("However, the sample size in this study is comparable to that in the previous study on neuronal activity quantification in zebrafish (Lau et al.,",
{"entities": [(133, 136, PER)]}),
("While for the semi-quantitative analysis of oprm1 and npas4a mRNA, staining density was subjectively scored on a five-point scale as follows + + + (high), + + (moderate), + (low), and – (absent).",
{"entities": [(44, 49, PHYS), (54, 65, PHYS)]}),
("Photomicrographs showing expression of oprm1 genes in sagittal (A,B) and coronal sections (C–H) of zebrafish brain.",
{"entities": [(39, 44, PHYS), (99, 114, LABEL)]}),
("The effect of morphine on the oprd1 gene expression was further morphologically assessed via in situ hybridization (ISH).",
{"entities": [(14, 22, PHYS), (30, 35, PHYS) ]}),
("In short, unbiased sampling was applied such that each location along the tissue section axis had an equal probability of being included in the sample and all locations in the plane of section (excluding the set guard zones)",
{"entities": []}),
("The design-based stereological approach that was used in this study to estimate total number of biological particles (terminal axon boutons) included the application of the 3-D optical fractionator probe applied within a known volume of a defined region of interest (ROI; each nuclei).",
{"entities": []}),
("All brain slices of one batch per animal were processed simultaneously under the same conditions.",
{"entities": []}),
("The protocol was adapted from previous publications [35], [36].",
{"entities": []}),
("Animals were placed, facing the wall, at 1 of 4 start locations (north, south, east or west) and allowed to swim to the visible platform for a maximum of 60 seconds.",
{"entities": []}),
("The number of stained cells in 6 sections was counted bilaterally, and, as every 12th section was used, this number was multiplied by 12.",
{"entities": []}),
("To determine cell survival and proliferation, cells were plated in triplicate in multiple sets of 12-well culture plates.",
{"entities": []}),
("Cell proliferation was quantified by electronically counting cell | |
<gh_stars>0
import csv
import re
import os
from pyaedt.generic.general_methods import aedt_exception_handler, generate_unique_name, is_ironpython
from pyaedt.application.Analysis import Analysis
from pyaedt.modeler.Model3D import Modeler3D
from pyaedt.modules.MeshIcepak import IcepakMesh
if is_ironpython:
from pyaedt.modules.PostProcessor import PostProcessor
else:
from pyaedt.modules.AdvancedPostProcessing import PostProcessor
class FieldAnalysisIcepak(Analysis, object):
"""Manages 3D field analysis setup in Icepak.
This class is automatically initialized by an appliation call from
HFSS, Icepak, Q3D, or Maxwell 3D. See the application function
for parameter definitions.
Parameters
----------
application : str
Application that is to initialize the call.
projectname : str, optional
Name of the project to select or the full path to the project
or AEDTZ archive to open. The default is ``None``, in which
case an attempt is made to get an active project. If no
projects are present, an empty project is created.
designname : str, optional
Name of the design to select. The default is ``None``, in
which case an attempt is made to get an active design. If no
designs are present, an empty design is created.
solutiontype : str, optional
Solution type to apply to the design. The default is
``None``, in which case the default type is applied.
setup_name : str, optional
Name of the setup to use as the nominal. The default is
``None``, in which case the active setup is used or
nothing is used.
specified_version : str, optional
Version of AEDT to use. The default is ``None``, in which case
the active version or latest installed version is used.
NG : bool, optional
Whether to run AEDT in the non-graphical mode. The default
is ``False``, in which case AEDT launches in the graphical mode.
new_desktop_session : bool, optional
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine. The default is ``True``.
close_on_exit : bool, optional
Whether to release AEDT on exit. The default is ``False``.
student_version : bool, optional
Whether to enable the student version of AEDT. The default
is ``False``.
"""
def __init__(
self,
application,
projectname,
designname,
solutiontype,
setup_name=None,
specified_version=None,
non_graphical=False,
new_desktop_session=False,
close_on_exit=False,
student_version=False,
):
Analysis.__init__(
self,
application,
projectname,
designname,
solutiontype,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self._osolution = self._odesign.GetModule("Solutions")
self._oboundary = self._odesign.GetModule("BoundarySetup")
self._modeler = Modeler3D(self)
self._mesh = IcepakMesh(self)
self._post = PostProcessor(self)
@property
def osolution(self):
"""Solution Module.
References
----------
>>> oModule = oDesign.GetModule("Solutions")
"""
return self._osolution
@property
def oboundary(self):
"""Boundary Module.
References
----------
>>> oModule = oDesign.GetModule("BoundarySetup")
"""
return self._oboundary
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modeler.Model3D.Modeler3D`
"""
return self._modeler
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.MeshIcepak.IcepakMesh`
"""
return self._mesh
@aedt_exception_handler
def plot(
self,
objects=None,
show=True,
export_path=None,
plot_as_separate_objects=True,
plot_air_objects=True,
force_opacity_value=None,
clean_files=False,
):
"""Plot the model or a substet of objects.
Parameters
----------
objects : list, optional
Optional list of objects to plot. If `None` all objects will be exported.
show : bool, optional
Show the plot after generation or simply return the
generated Class for more customization before plot.
export_path : str, optional
If available, an image is saved to file. If `None` no image will be saved.
plot_as_separate_objects : bool, optional
Plot each object separately. It may require more time to export from AEDT.
plot_air_objects : bool, optional
Plot also air and vacuum objects.
force_opacity_value : float, optional
Opacity value between 0 and 1 to be applied to all model.
If `None` aedt opacity will be applied to each object.
clean_files : bool, optional
Clean created files after plot. Cache is mainteined into the model object returned.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if is_ironpython:
self.logger.warning("Plot is available only on CPython")
elif self._aedt_version < "2021.2":
self.logger.warning("Plot is supported from AEDT 2021 R2.")
else:
return self.post.plot_model_obj(
objects=objects,
show=show,
export_path=export_path,
plot_as_separate_objects=plot_as_separate_objects,
plot_air_objects=plot_air_objects,
force_opacity_value=force_opacity_value,
clean_files=clean_files,
)
@aedt_exception_handler
def apply_icepak_settings(
self,
ambienttemp=20,
gravityDir=5,
perform_minimal_val=True,
default_fluid="air",
default_solid="Al-Extruded",
default_surface="Steel-oxidised-surface",
):
"""Apply Icepak default design settings.
Parameters
----------
ambienttemp : float, optional
Ambient temperature, which can be an integer or a parameter already
created in AEDT. The default is ``20``.
gravityDir : int, optional
Gravity direction index in the range ``[0, 5]``. The default is ``5``.
perform_minimal_val : bool, optional
Whether to perform minimal validation. The default is ``True``.
If ``False``, full validation is performend.
default_fluid : str, optional
Default for the type of fluid. The default is ``"Air"``.
default_solid :
Default for the type of solid. The default is ``"Al-Extruded"``.
default_surface :
Default for the type of surface. The default is ``"Steel-oxidised-surface"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.SetDesignSettings
"""
try:
AmbientTemp = str(float(ambienttemp)) + "cel"
except:
AmbientTemp = ambienttemp
IceGravity = ["X", "Y", "Z"]
GVPos = False
if int(gravityDir) > 2:
GVPos = True
GVA = IceGravity[int(gravityDir) - 3]
self.odesign.SetDesignSettings(
[
"NAME:Design Settings Data",
"Perform Minimal validation:=",
perform_minimal_val,
"Default Fluid Material:=",
default_fluid,
"Default Solid Material:=",
default_solid,
"Default Surface Material:=",
default_surface,
"AmbientTemperature:=",
AmbientTemp,
"AmbientPressure:=",
"0n_per_meter_sq",
"AmbientRadiationTemperature:=",
AmbientTemp,
"Gravity Vector CS ID:=",
1,
"Gravity Vector Axis:=",
GVA,
"Positive:=",
GVPos,
],
["NAME:Model Validation Settings"],
)
return True
@aedt_exception_handler
def export_3d_model(self, fileName, filePath, fileFormat=".step", object_list=[], removed_objects=[]):
"""Export the 3D model.
Parameters
----------
fileName : str
Name of the file.
filePath : str
Path for the file.
fileFormat : str, optional
Format of the file. The default is ``".step"``.
object_list : list, optional
List of objects to export. The default is ``[]``.
removed_objects : list, optional
The default is ``[]``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.Export
"""
if not object_list:
allObjects = self.modeler.primitives.object_names
if removed_objects:
for rem in removed_objects:
allObjects.remove(rem)
else:
if "Region" in allObjects:
allObjects.remove("Region")
else:
allObjects = object_list[:]
self.logger.info("Exporting {} objects".format(len(allObjects)))
major = -1
minor = -1
# actual version supported by AEDT is 29.0
if fileFormat in [".step", ".stp", ".sm3", ".sat", ".sab"]:
major = 29
minor = 0
stringa = ",".join(allObjects)
arg = [
"NAME:ExportParameters",
"AllowRegionDependentPartSelectionForPMLCreation:=",
True,
"AllowRegionSelectionForPMLCreation:=",
True,
"Selections:=",
stringa,
"File Name:=",
os.path.join(filePath, fileName + fileFormat),
"Major Version:=",
major,
"Minor Version:=",
minor,
]
self.modeler.oeditor.Export(arg)
return True
@aedt_exception_handler
def get_property_value(self, objectname, property, type=None):
"""Retrieve a design property value for an object.
Parameters
----------
objectname : str
Name of the object.
property : str
Name of the design property.
type : string, optional
Type of the property. Options are ``"boundary"``,
``"excitation"``, ``"setup"``, and ``"mesh"``. The default
is ``None``.
Returns
-------
type
Value of the property.
References
----------
>>> oDesign.GetPropertyValue
Examples
--------
>>> val = ipk.get_property_value('BoundarySetup:Source1', 'Total Power')
"""
boundary = {"HFSS": "HfssTab", "Icepak": "Icepak", "Q3D": "Q3D", "Maxwell3D": "Maxwell3D"}
excitation = {"HFSS": "HfssTab", "Icepak": "Icepak", "Q3D": "Q3D", "Maxwell3D": "Maxwell3D"}
setup = {"HFSS": "HfssTab", "Icepak": "Icepak", "Q3D": "General", "Maxwell3D": "General"}
mesh = {"HFSS": "MeshSetupTab", "Icepak": "Icepak", "Q3D": "Q3D", "Maxwell3D": "Maxwell3D"}
all = {
"HFSS": ["HfssTab", "MeshSetupTab"],
"Icepak": ["Icepak"],
"Q3D": ["Q3D", "General"],
"Maxwell3D": ["Maxwell3D", "General"],
}
if type == "Boundary":
propserv = boundary[self._design_type]
val = self.odesign.GetPropertyValue(propserv, objectname, property)
return val
elif type == "Setup":
propserv = setup[self._design_type]
val = self.odesign.GetPropertyValue(propserv, objectname, property)
return val
elif type == "Excitation":
propserv = excitation[self._design_type]
val = self.odesign.GetPropertyValue(propserv, objectname, property)
return val
elif type == "Mesh":
propserv = mesh[self._design_type]
val = self.odesign.GetPropertyValue(propserv, objectname, property)
return val
else:
propservs = all[self._design_type]
for propserv in propservs:
properties = list(self.odesign.GetProperties(propserv, objectname))
if property in properties:
val = self.odesign.GetPropertyValue(propserv, objectname, property)
return val
return None
@aedt_exception_handler
def copy_solid_bodies_from(self, design, object_list=None, no_vacuum=True, no_pec=True):
"""Copy a list of objects from one design to the active design.
Parameters
----------
design : str
Starting application object. For example, ``'hfss1=HFSS3DLayout'``.
object_list : list, optional
List of objects to copy. The default is ``None``.
no_vacuum : bool, optional
Whether to include vacuum objects for the copied objects.
The default is ``True``.
no_pec :
Whether to include pec objects for the copied objects. The
default is ``True``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.Copy
>>> oEditor.Paste
"""
body_list = design.modeler.solid_bodies
selection_list = []
material_properties = design.modeler.primitives.objects
for body in body_list:
include_object = True
if object_list:
if body not in object_list:
include_object = False
for key, val in | |
_("Countries")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Country")
def get_absolute_url(self):
return reverse('business:business_pfcountry_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfcountry_update', args=(self.pk,))
@staticmethod
def api_pull(store=None, key=None):
"""
Update the Country and State objects from the Printful API.
:param store: Optional bzStore object. If not provided, method will
attempt to use the first store from the database if it exists.
:param key: If a key is provided, then it is used instead of store.
This is especially useful for when you're first creating a
store, and so avoids a race condition.
"""
if key:
api = pyPrintful(key=key)
else:
_storeObj = pfStore.get_store(store)
api = pyPrintful(key=_storeObj.key)
countries = api.get_countries_list()
for c in countries:
cObj, cCreated = pfCountry.objects.update_or_create(
code=c['code'],
defaults={
'name': c['name']
}
)
if c['states']:
for s in c['states']:
sObj, sCreated = pfState.objects.update_or_create(
code=s['code'],
pfcountry=cObj,
defaults={
'name': s['name'],
}
)
def get_states(self):
return pfState.objects.filter(pfcountry=self)
get_states.short_description = _("States")
def num_states(self):
return self.get_states().count()
num_states.short_description = _("States")
class pfState(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=50,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
pfcountry = ForeignKey('business.pfCountry', verbose_name=_("Country"))
class Meta:
ordering = ('pfcountry__code', 'code',)
verbose_name = _("State")
verbose_name_plural = _("States")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown State")
def get_absolute_url(self):
return reverse('business:business_pfstate_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfstate_update', args=(self.pk,))
class pfSyncProduct(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=200,
default="", blank=True, null=True)
external_id = CharField(_("External ID"), max_length=200,
default="", blank=True, null=True)
variants = IntegerField(_("Variant Count"), default=0)
synced = IntegerField(_("Synced"), default=0)
# Relationship Fields
pfstore = ForeignKey('business.pfStore', verbose_name=_("Store"))
class Meta:
ordering = ('-pk',)
verbose_name = _("Sync Product")
verbose_name_plural = _("Sync Products")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_pfsyncproduct_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfsyncproduct_update',
args=(self.pk,))
class pfSyncVariant(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=200,
default="", blank=True, null=True)
external_id = CharField(_("External ID"), max_length=200,
default="", blank=True, null=True)
synced = BooleanField(_("Synced"), default=False)
# Relationship Fields
pfsyncproduct = ForeignKey(
'business.pfSyncProduct', verbose_name=_("Sync Product"))
files = ManyToManyField('business.pfPrintFile', blank=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("Sync Variant")
verbose_name_plural = _("Sync Variants")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_pfsyncvariant_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfsyncvariant_update',
args=(self.pk,))
class pfSyncItemOption(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=200,
default="", blank=True, null=True)
value = CharField(_("Value"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
pfsyncvariant = ForeignKey('business.pfSyncVariant', )
class Meta:
ordering = ('-pk',)
verbose_name = _("Sync Item Option")
verbose_name_plural = _("Sync Item Options")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfsyncitemoption_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfsyncitemoption_update', args=(self.pk,))
class pfCatalogColor(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=3,
default="", blank=True, null=True)
name = CharField(_("Color"), max_length=255,
default="", blank=True, null=True)
label_clean = CharField(_("Clean Label"), max_length=255,
default="", blank=True, null=True)
hex_code = CharField(_("Color Hex Code"), max_length=255,
default="", blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful Color")
verbose_name_plural = _("Printful Colors")
def __str__(self):
rv = []
if self.code:
rv.append(self.code)
if self.label_clean:
rv.append(self.label_clean)
elif self.name:
rv.append(self.name)
if rv:
return " - ".join(rv)
return _("Unknown Color")
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogcolor_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogcolor_update', args=(self.pk,))
def get_hex_code_clean(self):
return self.hex_code.replace("#", "")
class pfCatalogSize(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=3,
default="", blank=True, null=True)
name = CharField(_("Size"), max_length=255,
default="", blank=True, null=True)
label_clean = CharField(_("Clean Label"), max_length=255,
default="", blank=True, null=True)
sort_group = CharField(_("Sort Group"), max_length=2,
default="", blank=True, null=True)
sort_order = CharField(_("Sort Order"), max_length=16,
default="", blank=True, null=True)
class Meta:
ordering = ('sort_group', 'sort_order',)
verbose_name = _("Printful Size")
verbose_name_plural = _("Printful Sizes")
def __str__(self):
rv = []
if self.code:
rv.append(self.code)
if self.label_clean:
rv.append(self.label_clean)
elif self.name:
rv.append(self.name)
if rv:
return " - ".join(rv)
return _("Unknown Size")
def get_absolute_url(self):
return reverse('business:business_pfcatalogsize_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfcatalogsize_update',
args=(self.pk,))
class pfCatalogFileSpec(commonBusinessModel):
COLORSYSTEM_RGB = 'R'
COLORSYSTEM_CMYK = 'Y'
COLORSYSTEM_CHOICES = (
(COLORSYSTEM_RGB, "RGB"),
(COLORSYSTEM_CMYK, "CMYK"),
)
# Fields
name = CharField(_("Name"), max_length=5,
default="", blank=True, null=True)
note = TextField(_("Note"), default="", blank=True, null=True)
width = IntegerField(_("Width"), default=0)
height = IntegerField(_("Height"), default=0)
width_in = DecimalField(_("Width (in)"), default=0,
decimal_places=2, max_digits=4)
height_in = DecimalField(_("Height (in)"), default=0,
decimal_places=2, max_digits=4)
ratio = CharField(_("Ratio"), max_length=32,
default="", blank=True, null=True)
colorsystem = CharField(_("Color System"), max_length=1,
default="R", choices=COLORSYSTEM_CHOICES)
class Meta:
ordering = ('name',)
verbose_name = _("Printful File Spec")
verbose_name_plural = _("Printful File Specs")
def __str__(self):
if self.name:
return self.name
return _("Unknown File Spec")
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogfilespec_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogfilespec_update', args=(self.pk,))
def save(self, *args, **kwargs):
if self.width and not self.width_in:
self.width_in = int(self.width / 300)
elif self.width_in and not self.width:
self.width = self.width_in * 300
if self.height and not self.height_in:
self.height_in = int(self.height / 300)
elif self.height_in and not self.height:
self.height = self.height_in * 300
# This should prevent ZeroDivisionError exceptions.
if not self.ratio and self.width and self.height:
_fraction = Fraction(int(self.width), int(self.height))
self.ratio = "{}:{}".format(
_fraction.numerator, _fraction.denominator)
super(pfCatalogFileSpec, self).save(*args, **kwargs)
class pfCatalogFileType(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=255,
default="", blank=True, null=True)
title = CharField(_("Title"), max_length=255,
default="", blank=True, null=True)
additional_price = CharField(_("Additional Price"), max_length=100,
default="", blank=True, null=True)
# Relationship Fields
pfcatalogvariant = ForeignKey(
'business.pfCatalogVariant', verbose_name=_("Variant"))
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful File Type")
verbose_name_plural = _("Printful File Types")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogfiletype_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogfiletype_update', args=(self.pk,))
class pfCatalogOptionType(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=255,
default="", blank=True, null=True)
title = CharField(_("Title"), max_length=255,
default="", blank=True, null=True)
type = CharField(_("Type"), max_length=255,
default="", blank=True, null=True)
additional_price = CharField(_("Additional Price"), max_length=100,
default="", blank=True, null=True)
# Relationship Fields
pfcatalogvariant = ForeignKey('business.pfCatalogVariant', )
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful Option Type")
verbose_name_plural = _("Printful Option Types")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogoptiontype_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogoptiontype_update', args=(self.pk,))
class pfCatalogBrand(commonBusinessModel):
name = CharField(_("Name"), max_length=128,
null=True, blank=True, default="")
def __str__(self):
if self.name:
return self.name
return "Unknown Brand"
class Meta:
ordering = ('-pk',)
verbose_name = _("Catalog Brand")
verbose_name_plural = _("Catalog Brands")
class pfCatalogType(commonBusinessModel):
name = CharField(_("Name"), max_length=128,
null=True, blank=True, default="")
def __str__(self):
if self.name:
return self.name
return "Unknown Type"
class Meta:
ordering = ('-pk',)
verbose_name = _("Catalog Product Type")
verbose_name_plural = _("Catalog Product Types")
class pfCatalogProduct(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
pid = CharField(_("Printful ID"), max_length=255,
default="", blank=True, null=True)
ptype = ForeignKey('business.pfCatalogType', blank=True, null=True)
brand = ForeignKey('business.pfCatalogBrand', blank=True, null=True)
model = CharField(_("Model"), max_length=255,
default="", blank=True, null=True)
image = CharField(_("Image"), max_length=255,
default="", blank=True, null=True)
variant_count = IntegerField(_("Variants"), default=0)
class Meta:
ordering = ('brand', 'model')
verbose_name = _("Printful Product")
verbose_name_plural = _("Printful Products")
def __str__(self):
return self.get_friendly_name()
def get_friendly_name(self):
if self.pid and self.brand and self.brand:
return "{} / {} ({})".format(self.brand, self.model, self.pid)
return "Unknown Product"
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogproduct_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogproduct_update', args=(self.pk,))
def get_variants(self):
return pfCatalogVariant.objects.filter(pfcatalogproduct=self)
get_variants.short_description = _("Variants")
def get_colors(self):
"""
Get all color objects associated with this product's variants.
"""
return pfCatalogColor.objects.filter(pfcatalogvariant__in=self.get_variants()).distinct()
get_colors.short_description = _("Colors")
def get_colors_as_string(self):
c = self.get_colors()
if c:
rv = ", ".join([v.label for v in c])
else:
rv = "-"
return rv
get_colors_as_string.short_description = _("Available Colors")
def num_colors(self):
return self.get_colors().count()
num_colors.short_description = _("Colors")
def get_sizes(self):
return pfCatalogSize.objects.filter(pfcatalogvariant__in=self.get_variants()).distinct()
get_sizes.short_description = _("Sizes")
def get_sizes_as_string(self):
s = self.get_sizes()
if s:
rv = ", ".join([v.get_name() for v in s])
else:
rv = "-"
return rv
get_sizes_as_string.short_description = _("Available Sizes")
def num_sizes(self):
return self.get_sizes().count()
num_sizes.short_description = _("Sizes")
def get_out_of_stock(self):
return pfCatalogVariant.objects.filter(pfcatalogproduct=self, in_stock=False)
def num_out_of_stock(self):
return self.get_out_of_stock().count()
num_out_of_stock.short_description = _("Out of Stock")
@staticmethod
def api_pull(store=None, key=None):
"""
Update the product objects from the Printful API.
:param store: Optional bzStore object. If not provided, method will
attempt to use the first store from the database if it exists.
:param key: If a key is provided, then it is used instead of store.
This is especially useful for when you're first creating a
store, and so avoids a race condition.
"""
if key:
api = pyPrintful(key=key)
else:
_storeObj = pfStore.get_store(store)
api = pyPrintful(key=_storeObj.key)
logger.debug("pfCatalogProduct.api_pull / Making API Call")
products = api.get_product_list()
logger.debug("pfCatalogProduct.api_pull / All: is_active=False")
pfCatalogProduct.objects.all().update(is_active=False)
for p in products:
# {
# 'dimensions': {
# '16×20': '16×20',
# },
# 'options': [],
# 'files': [
# {'id': 'preview', 'title': 'Mockup', 'type': 'mockup', 'additional_price': | |
from collections import defaultdict
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union
import networkx as nx
import onnx
from . import graph_ir as g
from .onnx_attr import get_node_shape, node_attr_to_dict, node_to_shape
PathLike = Union[str, Path]
GraphT = onnx.GraphProto
NodeT = onnx.NodeProto
NodeT.__hash__ = lambda self: id(self)
NodeT.__repr__ = NodeT.__str__ = lambda self: self.name
class MarkedSubGraph:
"""A subgraph with information on how it should replace a node in a super graph.
subgraph: a nx.DiGraph subgraph
entry_edges: a list of edges from nodes "outside" to nodes in self.subgraph
exit: the exit node of the subgraph.
When this subgraph replaces a node `n`, self.exit will be connected to
whateven `n` is connected to.
"""
def __init__(self, subgraph: nx.DiGraph, entry_edges, exit) -> None:
assert all(to in subgraph for _, to, _ in entry_edges)
assert exit in subgraph
self.subgraph, self.exit = subgraph, exit
self.entry_edges = [(f, t, {"index": i}) for f, t, i in entry_edges]
@classmethod
def idiomatic_1to2(cls, node1, node2, predecessors):
"""Create an idiomatic replacement as follow:
node(arg1, arg2, arg3) -> node2(node1(arg1, arg2), arg3)"""
p0, p1, p2 = predecessors
graph = nx.DiGraph()
graph.add_edge(node1, node2, index=0)
return cls(graph, [(p0, node1, 0), (p1, node1, 1), (p2, node2, 1)], node2)
EmitNodeT = Union[MarkedSubGraph, g.DFGNode]
class DFG(object):
"""ONNX model translated into DFG with `DFGNode`s.
This class has a DFG, input/output information, and a clear traverse order
(think dominant tree), and is easier for CodeGen classes to work with."""
def __init__(self, graph: GraphT):
self._check_model(graph)
self._var_count = 0
# Build explicit DFG with ONNX nodes
onnx_graph = self._build_onnx_dfg(graph)
# Convert ONNX dfg into DFGNode DFG
self.graph = self._build_dfg(onnx_graph)
# Find out input nodes and output node (unique)
# removing dead nodes along the way if any
self.inputs, self.output = self._dce_get_io_info()
################ Interfaces:
@property
def traverse_order(self) -> List[g.DFGNode]:
"""Get topological order of computational graph by use-def relation."""
return list(nx.topological_sort(self.graph))
def node_args(self, node: g.DFGNode):
"""Get input arguments of node."""
sorted_edges = sorted(self.graph.in_edges(node, "index"), key=lambda p: p[2])
return [e[0] for e in sorted_edges]
def dump_weights(self, output_dir: PathLike) -> None:
"""Dump `WeightTensor`s into output_dir."""
output_dir = Path(output_dir)
for node in self.graph.nodes:
if not isinstance(node, g.WeightTensor):
continue
node.dump_weight(output_dir / (node.new_name + "_path.bin"))
################ Internal methods (high-level):
@staticmethod
def _check_model(onnx_graph: GraphT):
"""Check model validaty and single output (which is our limitation)"""
import warnings
from onnx import checker, onnx_cpp2py_export
# try use onnx's own model checker before converting any model
try:
checker.check_graph(onnx_graph)
except onnx_cpp2py_export.checker.ValidationError as e:
warnings.warn(str(e))
if any(len(n.output) > 1 for n in onnx_graph.node):
raise ValueError("All node must have single output")
if len(onnx_graph.output) > 1:
raise ValueError("Graph must have single output")
@staticmethod
def _build_onnx_dfg(graph: GraphT) -> nx.DiGraph:
"""Creates a DiGraph (by use-def relation) of onnx nodes from onnx GraphProto.
DiGraph is easier to use as a graph compared to GraphProto where use-def is implicit."""
ret_graph = nx.DiGraph()
onnx_defs, onnx_uses = def_use(graph.node)
node_shape = node_to_shape(graph)
node_and_attr = [(n, {"shape": shape}) for n, shape in node_shape.items()]
ret_graph.add_nodes_from(node_and_attr)
tensors = extract_tensors_from_graph(graph)
tensor_and_attr = [(t, {"shape": t.output_shape}) for t in tensors.values()]
ret_graph.add_nodes_from(tensor_and_attr)
for onnx_value_name, use_nodes in onnx_uses.items():
def_node = onnx_defs.get(onnx_value_name)
if def_node is None:
def_node = tensors[onnx_value_name]
for use_node, used_at_narg in use_nodes:
ret_graph.add_edge(def_node, use_node, index=used_at_narg)
return ret_graph
def _build_dfg(self, onnx_graph: nx.DiGraph) -> nx.DiGraph:
"""Translate _build_onnx_dfg output into DFGNode DFG.
First run some passes to process subgraphs that needs to be
processed together, then each unprocessed node is generated into
1 or more nodes."""
# Gemm in tensor_runtime does reshape automatically
# it also doesn't have a dedicated reshape operator
onnx_graph = drop_reshape_before_gemm(onnx_graph)
# For each onnx node, generate our nodes
node_to_nodes, error_nodes = {}, []
for onnx_node in nx.topological_sort(onnx_graph):
our_nodes = self._emit_node(onnx_graph, onnx_node)
if our_nodes is None:
error_nodes.append(onnx_node)
else:
node_to_nodes[onnx_node] = our_nodes
if error_nodes:
error_repr = [f"{n.name}({n.op_type})" for n in error_nodes]
if len(error_nodes) > 10: # Magic number
raise ValueError(f"Unsupported operators (first 10): {error_repr[:10]}")
else:
raise ValueError(f"Unsupported operators: {error_repr}")
# Apply node_to_nodes replacement on onnx_graph to create a new DFG
return build_graph_with_mapping(onnx_graph, node_to_nodes)
def _dce_get_io_info(self):
inputs = [n for n in self.graph if isinstance(n, g.InputTensor)]
inputs_set = set(inputs)
reachables = set()
for component in nx.connected_components(self.graph.to_undirected()):
# If any inputs goes into this subgraph, it's alive.
if set(component).intersection(inputs_set):
reachables.update(component)
unreachables = set(self.graph) - reachables
# Remove nodes unreachable from input
self.graph.remove_nodes_from(unreachables)
# Then outputs are nodes with out_degree = 0
outputs = [n for n in self.graph if self.graph.out_degree[n] == 0]
assert len(outputs) == 1
return inputs, outputs[0]
@staticmethod
def _emit_node(in_graph: nx.DiGraph, node: NodeT) -> Optional[EmitNodeT]:
output_shape = in_graph.nodes[node].get("shape")
predec = sorted_inputs(in_graph, node)
predec_shapes = [in_graph.nodes[n].get("shape") for n in predec]
if isinstance(node, g.DFGNode):
# Directly add node into return graph.
return node
attrs = node_attr_to_dict(node)
attrs["input_shapes"] = predec_shapes
attrs["output_shape"] = output_shape
if node.op_type == "Conv":
if not isinstance(predec[1], g.WeightTensor) or len(predec_shapes[1]) != 4:
return None # Only supports 2D conv with rhs being constant
# Only pass in the first 2 arguments' shapes
attrs["input_shapes"] = predec_shapes[:2]
conv_node = g.Conv2DNode(node.name, **attrs)
if len(predec) == 2:
return conv_node
# Split into conv followed by an addition
bias_node = g.BiasAddNode(
f"Bias_{node.name.split('_')[-1]}", [output_shape], output_shape
)
return MarkedSubGraph.idiomatic_1to2(conv_node, bias_node, predec)
if node.op_type in ("MatMul", "Gemm"):
attrs["input_shapes"] = predec_shapes[:2]
mul_node = g.MatMulNode(node.name, **attrs)
if node.op_type == "Gemm":
mul_node.gemm_transpose(predec)
if len(predec) == 2:
return mul_node
# Split into mul followed by an addition
bias_node = g.BiasAddNode(
f"Bias_{node.name.split('_')[-1]}", [output_shape], output_shape
)
return MarkedSubGraph.idiomatic_1to2(mul_node, bias_node, predec)
if node.op_type == "GlobalAveragePool":
input0_shape = in_graph.nodes[predec[0]]["shape"]
_, _, h, w = input0_shape
return g.AveragePool2DNode(
node.name, predec_shapes, output_shape, [1, 1], (h, w), [0, 0, 0, 0]
)
one_to_one_nodes = {
"MaxPool": g.MaxPool2DNode,
"AveragePool": g.AveragePool2DNode,
"Add": g.AddNode,
"Softmax": g.SoftMaxNode,
"Relu": g.ReluNode,
"Tanh": g.TanhNode,
"BatchNormalization": g.BatchNormalizationNode,
"Pad": g.PadNode,
"Identity": g.IdentityNode,
"Flatten": g.FlattenNode,
}
if node.op_type not in one_to_one_nodes:
return None
try:
return one_to_one_nodes[node.op_type](node.name, **attrs)
except (TypeError, KeyError, ValueError, RuntimeError):
node_class = one_to_one_nodes[node.op_type]
raise ValueError(f"Node ({node_class}) creation failed")
def def_use(nodes: Iterable) -> Tuple[dict, dict]:
"""Computes def/use relation from a list of node.
This method is duck-typed and operates on any node defining .input and .output.
"""
defs, uses = {}, defaultdict(list)
for n in nodes:
for i, input_ in enumerate(n.input):
uses[input_].append((n, i))
for output in n.output:
defs[output] = n
return defs, uses
def drop_reshape_before_gemm(graph: nx.DiGraph) -> nx.DiGraph:
"""Look for a shape-gather-unsqueeze-concat-reshape chain and replace that with flatten."""
for node in list(graph.nodes):
if node.op_type != "Reshape":
continue
reshape_input, target_shape = sorted_inputs(graph, node)
if not isinstance(target_shape, g.WeightTensor): # Not constant shape, nope
continue
n_gemm = get_next_in_chain(graph, "Gemm", node)
if n_gemm is None:
continue
# Must be an (n-1)-d flatten before gemm
assert list(target_shape.input_data) == [1, -1]
# Connect input of reshape to gemm, then remove reshape
graph.add_edge(reshape_input, n_gemm, index=0)
graph.remove_node(node)
return graph
def get_next_in_chain(
graph: nx.DiGraph, type_: str, node: Optional[NodeT]
) -> Optional[NodeT]:
"""
Get a unique user node of the unique output of Node `node`,
and return it if it has Type `type_`.
"""
if node is None or len(node.output) != 1:
return None # Propagates None; Unique output
users = list(graph.neighbors(node))
if len(users) != 1 or users[0].op_type != type_:
return None # Unique user of the output; Correct type
return users[0]
def build_graph_with_mapping(
graph: nx.DiGraph, node_mapping: Dict[NodeT, EmitNodeT]
) -> nx.DiGraph:
graph = graph.copy()
single_node, multi_node = {}, {}
for replace_node, by_node in node_mapping.items():
if isinstance(by_node, g.DFGNode):
single_node[replace_node] = by_node
else:
multi_node[replace_node] = by_node
# We do one-to-many replacements first
# because their predecessors are specified as onnx nodes.
for replace_node, subgraph in multi_node.items():
# Add subgraph itself
graph = nx.compose(graph, subgraph.subgraph)
# Add in edges
graph.add_edges_from(subgraph.entry_edges)
# Add out edges
succ = graph.out_edges(replace_node, "index")
for _, to, index in succ:
graph.add_edge(subgraph.exit, to, index=index)
# Remove old node
graph.remove_node(replace_node)
# Then do all one-to-one replacements.
graph = nx.relabel_nodes(graph, single_node)
return graph
def extract_tensors_from_graph(onnx_graph: GraphT) -> Dict[str, g.TensorNode]:
tensors = {}
# parse weight
weight_cnt = 0
for weight_tensor in onnx_graph.initializer:
tensors[weight_tensor.name] = g.WeightTensor(
weight_tensor, f"weight_{weight_cnt}"
)
weight_cnt += 1
# parse input
input_cnt = 0
for input_ in onnx_graph.input:
if input_.name in tensors:
continue
tensors[input_.name] = g.InputTensor(
input_, get_node_shape(input_), f"input_{input_cnt}"
)
input_cnt += 1
return tensors
def sorted_inputs(graph: nx.DiGraph, node):
sorted_edges = sorted(graph.in_edges(node, "index"), key=lambda | |
# -*- coding: utf-8 -*-
"""
Copyright 2015-2021 The MITRE Corporation.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
=============================================================================
@author: ubaldino
OpenSextant utilities
"""
import csv
import os
import re
from io import StringIO
from math import isnan
from chardet import detect as detect_charset
from .unicode import LATIN1_FOLDING
# ---------------------------------------
# TEXT UTILITIES
# ---------------------------------------
#
def is_text(t):
return isinstance(t, str)
code_pattern = re.compile("^[A-Z0-9]{1,}$", re.ASCII)
def is_code(t: str, nlen=6):
"""
Test if a string is an ASCII code typically 1-3 chars in len.
:param t: text
:param nlen: threshold for string len
:return:
"""
if not t:
return False
if len(t) > nlen or not t.isupper():
return False
return code_pattern.match(t) is not None
def is_abbreviation(nm: str):
"""
Determine if something is an abbreviation.
Otherwise if text ends with "." we'll conclude so.
Examples:
Ala. YES
Ala NO
S. Bob NO -- abbreviated, yes, but this is more like a contraction.
S. B. YES
:param nm: textual name
:return: True if obj is inferred to be an abbreviation
"""
return nm.endswith(".")
def is_ascii(s):
try:
return all(ord(c) < 128 for c in s)
except:
pass
return False
def get_text(t):
""" Default is to return Unicode string from raw data"""
if isinstance(t, str):
return t
return str(t, encoding='utf-8')
def fast_replace(t, sep, sub=None):
"""
Replace separators (sep) with substitute char, sub. Many-to-one substitute.
"a.b, c" SEP='.,'
:param t: input text
:param sep: string of chars to replace
:param sub: replacement char
:return: text with separators replaced
"""
result = []
for ch in t:
if ch in sep:
if sub:
result.append(sub)
else:
result.append(ch)
return ''.join(result)
# ISO-8859-2 is a common answer, when they really mean ISO-1
CHARDET_LATIN2_ENCODING = 'ISO-8859-1'
def guess_encoding(text):
""" Given bytes, determine the character set encoding
@return: dict with encoding and confidence
"""
if not text: return {'confidence': 0, 'encoding': None}
enc = detect_charset(text)
cset = enc['encoding']
if cset.lower() == 'iso-8859-2':
# Anomoaly -- chardet things Hungarian (iso-8850-2) is
# a close match for a latin-1 document. At least the quotes match
# Other Latin-xxx variants will likely match, but actually be Latin1
# or win-1252. see Chardet explanation for poor reliability of Latin-1 detection
#
enc['encoding'] = CHARDET_LATIN2_ENCODING
return enc
def bytes2unicode(buf, encoding=None):
"""
Convert bytes 2 unicode by guessing character set.
:param buf:
:param encoding:
:return:
"""
if not encoding:
enc = guess_encoding(buf)
encoding = enc['encoding']
if not encoding:
return None
return str(buf, encoding=encoding)
reSqueezeWhiteSpace = re.compile(r'\s+', re.MULTILINE)
def squeeze_whitespace(s):
return reSqueezeWhiteSpace.sub(' ', s).strip()
def scrub_eol(t):
return t.replace('\n', ' ').replace('\r', '')
def levenshtein_distance(s, t):
"""
Wikipedia page on Levenshtein Edit Distance
https://en.wikipedia.org/wiki/Levenshtein_distance
This is the fastest, simplest of 3 methods documented for Python.
"""
s = ' ' + s
t = ' ' + t
d = {}
S = len(s)
T = len(t)
if S == T and s == t:
return 0
for i in range(S):
d[i, 0] = i
for j in range(T):
d[0, j] = j
for j in range(1, T):
for i in range(1, S):
if s[i] == t[j]:
d[i, j] = d[i - 1, j - 1]
else:
d[i, j] = min(d[i - 1, j] + 1, d[i, j - 1] + 1, d[i - 1, j - 1] + 1)
return d[(S - 1, T - 1)]
BOOL_F_STR = {"false", 0, "0", "n", "f", "no", "", "null"}
BOOL_T_STR = {"true", 1, "1", "y", "t", "yes"}
def get_bool(token):
if not token:
return False
if isinstance(token, bool):
return token
if isinstance(token, int):
if token > 0:
return True
if token == 0:
return False
t = token.lower()
if t in BOOL_F_STR:
return False
if t in BOOL_T_STR:
return True
return False
def get_number(token):
""" Turn leading part of a string into a number, if possible.
"""
num = StringIO()
for ch in token:
if ch.isdigit() or ch == '.' or ch == '-':
num.write(ch)
else:
break
val = num.getvalue()
num.close()
return val
def has_digit(text):
"""
Used primarily to report places and appears to be critical for
name filtering when doing phonetics.
"""
if text is None:
return False
for ch in text:
# ascii
if ch.isdigit():
return True
return False
def is_value(v):
"""
Working more with pandas or sci libraries -- you run into various types of default "Null" values.
This checks to see if value is non-trivial, non-empty.
:param v:
:return:
"""
if v is None:
return False
if isinstance(v, (float, int)):
return not isnan(v)
return True
def parse_float(v):
if not v:
return None
try:
return float(v)
except Exception as float_err:
print("Unable to parse float", v, str(float_err))
return None
def get_list(text, delim=',', lower=False):
"""
Take a string and return trim segments given the delimiter:
"A, B,\tC" => ["A", "B", "C"]
:param text:
:param delim: delimiter str
:param lower: True if you want items lowercased
:return: array
"""
if not text:
return []
data = text.split(delim)
arr = []
for v in data:
_v = v.strip()
if _v:
if lower:
_v = _v.lower()
arr.append(_v)
return arr
def get_text_window(offset, matchlen, textsize, width):
""" prepreprepre MATCH postpostpost
^ ^ ^ ^
l-width l l+len l+len+width
left_y left_x right_x right_y
"""
left_x = offset - width
left_y = offset - 1
right_x = offset + matchlen
right_y = right_x + width
if left_x < 0:
left_x = 0
if left_y < left_x:
left_y = left_x
# bounds checking END....y? then y=END, results in shorter postmatch
if right_y >= textsize:
right_y = textsize - 1
# bounds checking y.... x? then x=y, results in empty postmatch
if right_x > right_y:
right_x = right_y
return [left_x, left_y, right_x, right_y]
def has_cjk(text):
"""
infer if chinese (unihan), korean (hangul) or japanese (hirgana) characters are present
:param text:
:return:
"""
# CJK, Hirgana, Katana. Unified Ideagoraphs. Hangjul.
search = re.search("[\u3000-\u30ff\u3400-\u4dbf\u4e00-\u9fff\uac00-\ud7af]", text, flags=re.IGNORECASE | re.UNICODE)
return search is not None
def has_arabic(text):
"""
infer if text has Arabic / Middle-eastern scripts ~ Urdu, Farsi, Arabic.
:param text:
:return:
"""
search = re.search("[\u0600-\u08ff]", text, flags=re.IGNORECASE | re.UNICODE)
return search is not None
def trivial_bias(name):
""" Experimental: Deteremine unique a name is using length and character set and # of words
Abcd 4/2 + 1 + 0 x 0.02 = 0.06
Abcde fghi 10/2 + 2 + 0 x 0.02 = 0.14
Abcdé fghi 10/2 + 2 + 1 x 0.02 = 0.16
"""
l_points = len(name) / 2
word_points = len(name.split())
charset_points = 1 if not is_ascii(name) else 0
score = (l_points + word_points + charset_points) * 0.02
return float("{:0.3}".format(score))
COMMON_DIACRITC_HASHMARKS = re.compile("[\"'`\u00B4\u2018\u2019]")
def replace_diacritics(txt: str):
"""
Leverage the OpenSextant traditional ASCII Folding map for now.
Yes encoded("ascii", "ignore") may do this....
:param txt:
:return: a non-diacritic version of the text
"""
str_prepped = COMMON_DIACRITC_HASHMARKS.sub("'", txt)
buf = []
for ch in str_prepped:
buf.append(LATIN1_FOLDING.get(ch, ch))
return "".join(buf)
def strip_quotes(t):
"""
Run replace_diacritics first -- this routine only attempts to remove normal quotes ~ ', "
"""
return t.strip('"').strip("'")
# /---------------------------------------
# FILE UTILITIES
# /---------------------------------------
#
def _utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
def get_csv_writer(fh, columns, delim=','):
return csv.DictWriter(fh, columns, restval="", extrasaction='raise',
dialect='excel', lineterminator='\n',
delimiter=delim, quotechar='"',
quoting=csv.QUOTE_ALL, escapechar='\\')
def get_csv_reader(fh, columns, delim=','):
return csv.DictReader(fh, columns, restval="", dialect='excel', lineterminator='\n', escapechar='\\',
delimiter=delim, quotechar='"', quoting=csv.QUOTE_ALL)
# |||||||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||
class ConfigUtility:
""" A utility to load parameter lists, CSV files, word lists, etc. from a folder *dir*
functions here take an Oxygen cfg parameter keyword or a file path.
If the keyword is valid and points to a valid file path, then the file path is used.
In otherwords, keywords are aliases for a file on disk.
Ex. 'mywords' = './cfg/mywords_v03_filtered.txt'
oxygen.cfg file would have this mapping. Your code just references 'mywords' to load it.
"""
def __init__(self, config=None, rootdir='.'):
# | |
<reponame>Kricki/kicktipper
import mechanicalsoup
import re
import pandas as pd
import warnings
import getpass
class KicktippAPI:
""" API for communication with kicktipp.de website
Attributes
----------
name : str
Name of the kicktipp group
members : pandas.DataFrame
DataFrame containing registered members of the kicktipp group
"""
def __init__(self, name):
"""
Parameters
----------
name : str
Name of the kicktipp group
"""
self._name = self.name = name
self.members = pd.DataFrame(columns=['name', 'id'])
self._url = "https://www.kicktipp.de/" + self._name + "/"
self._url_login = self._url + "profil/login"
self._url_logout = self._url + "profil/logout"
self._url_tippabgabe = self._url + "tippabgabe"
self._browser = mechanicalsoup.StatefulBrowser(soup_config={'features': 'html5lib'})
@property
def name(self):
"""str: Name of the kicktipp group"""
return self._name
@name.setter
def name(self, value):
self._name = value
self._url = "https://www.kicktipp.de/" + self._name + "/"
self._url_login = self._url + "profil/login"
self._url_logout = self._url + "profil/logout"
self._url_tippabgabe = self._url + "tippabgabe"
@staticmethod
def read_username_from_user_input():
return input('Username: ')
@staticmethod
def read_password_from_user_input():
return getpass.getpass('Password: ')
def _browser_open(self, url):
""" Open URL.
The object self.browser is updated.
Parameters
----------
url : str
URL of target website
Returns
-------
bool
True if opening was successful, False otherwise.
"""
self._browser.open(url)
if self._browser.get_url() == url:
return True
else:
return False
def login(self, username=None, password=<PASSWORD>):
""" Logs into the kicktipp website in the current group.
Parameters
----------
username : str
Username, optional. If not given, the user is prompted to type the username.
password : str
Password, optional. If not given, the user is prompted to type the password.
Returns
-------
bool
True if login was successful, False otherwise.
"""
if username is None:
username = self.read_username_from_user_input()
if password is None:
password = self.read_password_from_user_input()
# TODO: implement timeout
self._browser.open(self._url_login)
# Select the signup form
self._browser.select_form('form[action="/' + self._name + '/profil/loginaction"]')
# Fill it out and submit
self._browser['kennung'] = username
self._browser['passwort'] = password
self._browser.submit_selected()
if self._browser.get_url() == self._url: # redirection to group page successful?
return True
else:
return False
def logout(self):
""" Logs out from current account.
Returns
-------
bool
True if logout was successful, False otherwise
"""
return self._browser_open(self._url_logout)
def read_games(self, matchday=None):
""" Reads data of a matchday from the kicktipp website
Parameters
----------
matchday : int, optional
Number of matchday to be read. If None (default), the upcoming matchday is read.
Returns
-------
pandas.DataFrame
Dataframe containing the games, points and odds
"""
if matchday is None:
url = self._url_tippabgabe
else:
url = self._url_tippabgabe + '?&spieltagIndex=' + str(matchday)
if self._browser_open(url):
soup = self._browser.get_current_page()
data = soup.find_all('td', {'class': 'nw'})
teams = []
points = []
odds = []
dates = []
teams_temp = []
quoten_temp = []
wettquoten_temp = []
dates_temp = []
for element in data:
class_name = None
if len(element.attrs['class']) > 1:
if element.attrs['class'][1] == 'kicktipp-time':
class_name = 'kicktipp-time'
elif element.attrs['class'][0] == 'kicktipp-wettquote':
class_name = 'kicktipp-wettquote'
if class_name == 'kicktipp-time': # a date => new row (new match)
if teams_temp:
teams.append(teams_temp)
teams_temp = []
if quoten_temp:
points.append(quoten_temp)
quoten_temp = []
if wettquoten_temp:
odds.append(wettquoten_temp)
wettquoten_temp = []
if dates_temp:
dates.append(dates_temp)
date = element.text
if date:
dates_temp = date
elif class_name == 'kicktipp-wettquote': # wettquote
wettquoten_temp.append(float(element.string.replace(',', '.')))
elif class_name is None:
if re.match('[0-9]{2} - [0-9]{2} - [0-9]{2}', element.string): # quoten (Punkte)
quoten_temp = re.findall(r'\d+', element.string)
quoten_temp = [int(_) for _ in quoten_temp]
elif not re.match('[0-9]:[0-9]', element.string): # not a score (e.g. '2:1')
# it is a team name
teams_temp.append(element.string)
if teams_temp:
teams.append(teams_temp)
if quoten_temp:
points.append(quoten_temp)
if wettquoten_temp:
odds.append(wettquoten_temp)
if dates_temp:
dates.append(dates_temp)
# Transpose the nested lists
# see https://stackoverflow.com/questions/6473679/transpose-list-of-lists
teams = list(map(list, zip(*teams)))
points = list(map(list, zip(*points)))
odds = list(map(list, zip(*odds)))
# Read matchday number
text = soup.find_all('div', {'class': 'prevnextTitle'})[0].get_text()
r = re.findall(r'\d+\. Spieltag', text)
md_no = None
if r:
md_no = int(re.findall(r'\d+', r[0])[0])
# consistency check
if matchday is not None:
if md_no != matchday:
warnings.warn('Parsed matchday from website does not match the requested value.', UserWarning)
# Create pandas DataFrame
n_games = len(teams[0]) # no of games = no of rows
md_no_col = [md_no]*n_games
col_names = ['matchday', 'date', 'team1', 'team2',
'points_win1', 'points_draw', 'points_win2',
'odds_win1', 'odds_draw', 'odds_win2']
df = pd.DataFrame(columns=col_names)
df['matchday'] = md_no_col
df['date'] = dates
df['team1'] = teams[0]
df['team2'] = teams[1]
if len(points) == 3:
if len(points[0]) == len(points[1]) == len(points[2]) == n_games:
df['points_win1'] = points[0]
df['points_draw'] = points[1]
df['points_win2'] = points[2]
if len(odds) == 3:
if len(odds[0]) == len(odds[1]) == len(odds[2]) == n_games:
df['odds_win1'] = odds[0]
df['odds_draw'] = odds[1]
df['odds_win2'] = odds[2]
return df
else:
return None
def read_predictions(self, member, matchday):
""" Reads predictions from a member for a specific matchday
Parameters
----------
member : str or int
Name or ID of member (see self.members)
matchday : int
Matchday to be read
Returns
-------
pandas.DataFrame
Dataframe containing the predictions
"""
if type(member) is str: # assume the member name is passed => convert to ID
member_id = self.members[self.members['name'] == member]['id'].item()
else:
member_id = member
url = self._url + 'tippuebersicht/tipper?spieltagIndex=' + str(matchday) + '&rankingTeilnehmerId=' \
+ str(member_id)
if self._browser_open(url):
soup = self._browser.get_current_page()
data = soup.find_all('td', {'class': 'nw'})
tipps = pd.DataFrame(columns=['team1', 'team2', 'tipp1', 'tipp2'])
team1 = []
team2 = []
tipp1 = []
tipp2 = []
team_names_read = 0
for el in data:
if el.string is not None:
if len(el.find_all()) > 0:
# the element has subtags => This is probably a strangely formatted score ("Ergebnis")
# which we will ignore
pass
elif re.match('^[a-zA-Z0-9ZäöüÄÖÜß._\-\s]+$', el.string):
# a team name (including Umlauts, numbers (e.g. "Mainz 05"), period (e.g. "1. FC Köln")
# hyphen and whitespace)
if team_names_read == 2:
tipp1.append(None)
tipp2.append(None)
team_names_read = 0
if team_names_read == 0:
team1.append(el.string)
team_names_read = 1
elif team_names_read == 1:
team2.append(el.string)
team_names_read = 2
elif re.match('[0-9]:[0-9]', el.string): # a score
tipp_score = self._parse_score(el.string)
tipp1.append(tipp_score[0])
tipp2.append(tipp_score[1])
team_names_read = 0
if team_names_read == 2:
tipp1.append(None)
tipp2.append(None)
tipps['team1'] = team1
tipps['team2'] = team2
tipps['tipp1'] = tipp1
tipps['tipp2'] = tipp2
return tipps
def read_members(self):
""" Reads the members and corresponding IDs and stores it in the pandas.DataFrame self.members
Returns
-------
pandas.DataFrame
Dataframe containing the member's names and IDs
"""
url = self._url + 'gesamtuebersicht'
if self._browser_open(url):
soup = self._browser.get_current_page()
data = soup.find_all('td', {"class": 'name'})
names = []
for el in data:
names.append(str(el.string))
data = soup.find_all('tr', {"class": 'teilnehmer'}) # "TeilnehmerID"
ids = []
for el in data:
ids.append(int(el.attrs['data-teilnehmer-id']))
self.members['name'] = names
self.members['id'] = ids
return self.members
def submit_predictions(self, scores, matchday=None, n_matches=9):
""" Uploads the matchday predictions to the kicktipp website
The user must be logged in.
Parameters
----------
scores : 2-d array with 2 columns
Containing the predicted scores
matchday : int, optional
Number of matchday to be read. If None (default), the upcoming matchday is read.
n_matches : int
Number of matches per matchday, defaults to 9
"""
if matchday is None:
url = self._url_tippabgabe
else:
url = self._url_tippabgabe + '?&spieltagIndex=' + str(matchday)
if self._browser_open(url):
tipp_form = self._browser.select_form('form[id="tippabgabeForm"]')
soup = self._browser.get_current_page()
# Get the names of the individual forms
# The forms have the name "spieltippForms[ID].heimTipp" and "spieltippForms[ID].gastTipp", where ID
# is an integer specifying the individual form.
# Get these IDs from the form:
form_ids = []
for tag in soup.find_all('td', {'class':'kicktipp-tippabgabe'}): # iterate over tags in form
id_ = int(re.findall(r'\d+', tag.find_all()[0]['name'])[0])
form_ids.append(id_)
# Example for tag.find_all()[0]['name']: "spieltippForms[697554851].tippAbgegeben"
n_not_played = len(form_ids) # number of matches of this matchday not played yet
# iteration starts at "n_matches-n_not_played": matches that are already played are ignored
# e.g. if you submit your scores on saturday, the score from the friday's match will be ignored.
for idx, score in enumerate(scores[n_matches-n_not_played:]):
form_name = 'spieltippForms[' + str(form_ids[idx]) + ']'
tipp_form[form_name + '.heimTipp'] = score[0]
tipp_form[form_name + '.gastTipp'] = score[1]
self._browser.submit_selected()
def _parse_score(self, element) -> list:
""" Generic method to parse a score.
The method tries to pick the appropriate method according to the passed datatype of element
Parameters
----------
element : {str, bs4.element.Tag}
Element to be parsed
Returns
-------
list
List with two values: score of the two teams, e.g. [3, 2]
"""
if isinstance(element, str):
score = self._score_from_str(element) # type: list
else:
score = self._score_from_tag(element) # type: list
return score
@staticmethod
def _score_from_str(score_str) -> list:
| |
<reponame>edu-gp/annotation_tool<filename>alchemy/ar/data.py
import itertools
import logging
from collections import namedtuple
from typing import Dict, List
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.metrics import cohen_kappa_score
from sqlalchemy import distinct, func
from alchemy.db.model import (
AnnotationRequest,
AnnotationRequestStatus,
AnnotationType,
AnnotationValue,
ClassificationAnnotation,
)
from alchemy.db.model import Task as NewTask
from alchemy.db.model import (
User,
db,
delete_requests_for_user_under_task,
update_instance,
)
from alchemy.shared.annotation_server_path_finder import (
generate_annotation_server_compare_link,
)
from alchemy.shared.utils import (
PrettyDefaultDict,
)
# Utility namedtuples
UserNameAndIdPair = namedtuple("UserNameAndIdPair", ["username", "id"])
EntityAndAnnotationValuePair = namedtuple(
"EntityAndAnnotationValuePair", ["entity", "value"]
)
def save_new_ar_for_user_db(
dbsession,
task_id,
username,
annotation_requests,
label,
entity_type,
clean_existing=True,
):
user = (dbsession.query(User)
.filter_by(username=username)
.one_or_none())
if not user:
# Should not happen since the annotator usernames are not arbitrary anymore
raise ValueError(f"Annotator {user} is not registered on the website.")
if clean_existing:
try:
delete_requests_for_user_under_task(
dbsession=dbsession, username=username, task_id=task_id
)
dbsession.commit()
logging.info(
"Deleted requests under user {} for task {}".format(username, task_id)
)
except Exception as e:
dbsession.rollback()
logging.error(e)
raise
try:
# TODO requests were generated in reverse order.
for i, req in enumerate(annotation_requests[::-1]):
"""
Currently the full request looks like:
{
"fname": "myfile.jsonl", <-- (optional)
"line_number": 78, <-- (optional)
"score": 0.11627906976744186,
"entity": "blah",
"data": {
"text": "Blah blah ...",
"meta": {"name": "Blah", "domain": "blah"}
},
"pattern_info": { <-- (optional)
"tokens": ["Blah", "blah", ...],
"matches": [(1, 2, "Blah"), ...],
"score": 0.11627906976744186
}
}
"""
new_request = AnnotationRequest(
user_id=user.id,
entity_type=entity_type,
entity=req["entity"],
label=label,
annotation_type=AnnotationType.ClassificationAnnotation,
task_id=task_id,
context=req,
order=i,
)
dbsession.add(new_request)
dbsession.commit()
except Exception as e:
logging.error(e)
dbsession.rollback()
raise
def fetch_tasks_for_user_from_db(dbsession, username):
res = (
dbsession.query(AnnotationRequest.task_id, NewTask.name)
.distinct(AnnotationRequest.task_id, NewTask.name)
.join(NewTask)
.join(User)
.filter(User.username == username)
.all()
)
TaskIdAndNamePair = namedtuple("TaskIdAndNamePair", ["task_id", "name"])
return [TaskIdAndNamePair(item[0], item[1]) for item in res]
def fetch_ar_ids(dbsession, task_id, username):
query = (
dbsession.query(AnnotationRequest.id)
.join(User)
.filter(User.username == username, AnnotationRequest.task_id == task_id)
)
res = query.all()
print(res)
return [item[0] for item in res]
def fetch_ar_id_and_status(dbsession, task_id, username):
query = (
dbsession.query(AnnotationRequest.id, AnnotationRequest.status)
.join(User)
.filter(User.username == username, AnnotationRequest.task_id == task_id)
.order_by(AnnotationRequest.order)
)
return query.all()
def count_ar_under_task_and_user(dbsession, task_id, username):
res = (
dbsession.query(func.count(AnnotationRequest.id))
.join(User)
.filter(User.username == username, AnnotationRequest.task_id == task_id)
.all()
)
return res[0][0]
def count_completed_ar_under_task_and_user(dbsession, task_id, username):
res = (
dbsession.query(func.count(AnnotationRequest.id))
.join(User)
.filter(
User.username == username,
AnnotationRequest.task_id == task_id,
AnnotationRequest.status == AnnotationRequestStatus.Complete,
)
.all()
)
return res[0][0]
# TODO refactor this piece since it's a duplicate of the ar_request function.
def construct_annotation_dict(dbsession, annotation_id) -> Dict:
# TODO possible destructing of None
annotation_id, entity, entity_type, label, context = (
dbsession.query(
ClassificationAnnotation.id,
ClassificationAnnotation.entity,
ClassificationAnnotation.entity_type,
ClassificationAnnotation.label,
ClassificationAnnotation.context,
)
.filter(ClassificationAnnotation.id == annotation_id)
.one_or_none()
)
result = {
# Essential fields
"annotation_id": annotation_id,
"entity": entity,
"entity_type": entity_type,
"label": label,
# Optional fields
"fname": None,
"line_number": None,
"score": None,
"data": None,
"pattern_info": None,
}
if context is not None and isinstance(context, dict):
result.update(
{
"fname": context.get("fname"),
"line_number": context.get("line_number"),
"score": context.get("score"),
"data": context.get("data") if "data" in context else context,
"pattern_info": context.get("pattern_info"),
}
)
else:
# TODO this is a temporarily workaround since some entities only
# have annotations but not annotation requests in my local db and
# they are not from Salesforce. Weird...
# I can't run backfill on the context column so I have to hardcode
# a text field to show the description on the annotation server.
result.update({"data": {"text": context}})
return result
def construct_ar_request_dict(dbsession, ar_id) -> Dict:
request_id, entity, entity_type, label, context = (
dbsession.query(
AnnotationRequest.id,
AnnotationRequest.entity,
AnnotationRequest.entity_type,
AnnotationRequest.label,
AnnotationRequest.context,
)
.filter(AnnotationRequest.id == ar_id)
.one_or_none()
)
result = {
# Essential fields
"ar_id": request_id,
"entity": entity,
"entity_type": entity_type,
"label": label,
# Optional fields
"fname": None,
"line_number": None,
"score": None,
"data": None,
"pattern_info": None,
}
if context is not None:
result.update(
{
"fname": context.get("fname"),
"line_number": context.get("line_number"),
"score": context.get("score"),
"data": context.get("data"),
"pattern_info": context.get("pattern_info"),
}
)
return result
def get_next_ar_id_from_db(dbsession, task_id, user_id, current_ar_id):
res = (
dbsession.query(AnnotationRequest.id)
.filter(
AnnotationRequest.task_id == task_id,
AnnotationRequest.user_id == user_id,
AnnotationRequest.id > current_ar_id,
)
.order_by(AnnotationRequest.id.asc())
.first()
)
if res is not None:
return res[0]
else:
return res
def get_next_annotation_id_from_db(dbsession, user_id, current_annotation_id, labels):
res = (
dbsession.query(ClassificationAnnotation.id)
.filter(
ClassificationAnnotation.user_id == user_id,
ClassificationAnnotation.id > current_annotation_id,
ClassificationAnnotation.label.in_(labels),
ClassificationAnnotation.value != AnnotationValue.NOT_ANNOTATED,
)
.order_by(ClassificationAnnotation.id.asc())
.first()
)
if res is not None:
return res[0]
else:
return res
def build_empty_annotation(ar):
return {"req": ar, "anno": {"labels": {}}}
def mark_ar_complete_in_db(dbsession, ar_id):
logging.info(
"Updating the status of the annotation "
"request {} to {}".format(ar_id, AnnotationRequestStatus.Complete)
)
update_instance(
dbsession=dbsession,
model=AnnotationRequest,
filter_by_dict={"id": ar_id},
update_dict={"status": AnnotationRequestStatus.Complete},
)
# logging.info("Updating the value of the annotation {} to {}".format(
# annotation_id, annotation_result))
# update_instance(dbsession=dbsession,
# model=ClassificationAnnotation,
# filter_by_dict={"id": annotation_id},
# update_dict={"value": annotation_result})
# logging.info("Updated annotation request and result.")
def fetch_user_id_by_username(dbsession, username):
return dbsession.query(User.id).filter(User.username == username).one_or_none()[0]
def fetch_existing_classification_annotation_from_db(dbsession, annotation_id):
return (
dbsession.query(ClassificationAnnotation.label, ClassificationAnnotation.value)
.filter(ClassificationAnnotation.id == annotation_id)
.one_or_none()
)
def fetch_annotated_ar_ids_from_db(dbsession, task_id, username):
res = (
dbsession.query(AnnotationRequest.id)
.join(User)
.filter(
AnnotationRequest.task_id == task_id,
AnnotationRequest.status == AnnotationRequestStatus.Complete,
User.username == username,
)
.all()
)
return [item[0] for item in res]
def compute_annotation_request_statistics(dbsession, task_id):
total_number_of_outstanding_requests = (
dbsession.query(AnnotationRequest)
.filter(
AnnotationRequest.task_id == task_id,
AnnotationRequest.status == AnnotationRequestStatus.Pending,
)
.count()
)
n_outstanding_requests_per_user = (
dbsession.query(func.count(AnnotationRequest.id), User.username)
.join(User)
.filter(AnnotationRequest.task_id == task_id)
.filter(AnnotationRequest.status == AnnotationRequestStatus.Pending)
.group_by(User.username)
.all()
)
n_outstanding_requests_per_user_dict = {
username: num for num, username in n_outstanding_requests_per_user
}
return {
"total_outstanding_requests": total_number_of_outstanding_requests,
"n_outstanding_requests_per_user": n_outstanding_requests_per_user_dict,
}
def compute_annotation_statistics_db(dbsession, label, task_id):
total_distinct_annotated_entities = _compute_total_distinct_number_of_annotated_entities_for_label(
dbsession=dbsession, label=label
)
num_of_annotations_done_per_user = _compute_number_of_annotations_done_per_user(
dbsession=dbsession, label=label
)
user_names_mapping = {
username: (f'{first_name or ""} {last_name or ""}'.strip() or username)
for _, username, first_name, last_name, _ in num_of_annotations_done_per_user
}
total_num_of_annotations_done_by_users = sum(
[num for num, username, first_name, last_name, user_id in num_of_annotations_done_per_user]
)
n_annotations_done_per_user_dict = {
user_names_mapping[username]: num
for num, username, first_name, last_name, user_id in num_of_annotations_done_per_user
}
num_of_annotations_per_value = _compute_num_of_annotations_per_value(
dbsession=dbsession, label=label
)
# kappa stats calculation
distinct_users = set(
[
UserNameAndIdPair(username=user_names_mapping[item[1]], id=item[4])
for item in num_of_annotations_done_per_user
]
)
kappa_stats_raw_data = _construct_kappa_stats_raw_data(
db.session, distinct_users, label
)
kappa_matrices = _compute_kappa_matrix(kappa_stats_raw_data)
kappa_analysis_link_dict = _construct_kappa_analysis_link_dict(
kappa_matrices=kappa_matrices, task_id=task_id
)
return {
"total_annotations": total_num_of_annotations_done_by_users,
"total_distinct_annotated_entities": total_distinct_annotated_entities,
"n_annotations_per_value": num_of_annotations_per_value,
"n_annotations_per_user": n_annotations_done_per_user_dict,
"kappa_table": kappa_matrices,
"kappa_analysis_link_dict": kappa_analysis_link_dict,
}
def _compute_num_of_annotations_per_value(dbsession, label):
res = (
dbsession.query(
func.count(ClassificationAnnotation.id), ClassificationAnnotation.value
)
.filter_by(label=label)
.group_by(ClassificationAnnotation.value)
.all()
)
data = PrettyDefaultDict(lambda: 0)
for item in res:
data[item[1]] = item[0]
return data
def _compute_total_distinct_number_of_annotated_entities_for_label(dbsession, label):
"""Note: An "unknown" annotation (of value 0) doesn't count.
"""
query = (
dbsession.query(
ClassificationAnnotation.entity_type, ClassificationAnnotation.entity
)
.filter_by(label=label)
.filter(ClassificationAnnotation.value != 0)
.group_by(ClassificationAnnotation.entity_type, ClassificationAnnotation.entity)
)
return query.count()
def _compute_number_of_annotations_done_per_user(dbsession, label):
num_of_annotations_done_per_user = (
dbsession.query(
func.count(ClassificationAnnotation.id),
User.username,
User.first_name,
User.last_name,
User.id
)
.join(User)
.filter(ClassificationAnnotation.label == label)
.group_by(User.username, User.id)
.all()
)
return num_of_annotations_done_per_user
def _construct_kappa_stats_raw_data(dbsession, distinct_users, label):
entities_and_annotation_values_by_user = _retrieve_entity_ids_and_annotation_values_by_user(
dbsession, distinct_users, label
)
user_pairs = list(itertools.combinations(distinct_users, 2))
kappa_stats_raw_data = {
label: {
tuple(
sorted([user_pair[0].username, user_pair[1].username])
): _retrieve_annotation_with_same_entity_shared_by_two_users(
user_pair[0], user_pair[1], entities_and_annotation_values_by_user
)
for user_pair in user_pairs
}
}
return kappa_stats_raw_data
def _retrieve_entity_ids_and_annotation_values_by_user(dbsession, users, label):
res = (
dbsession.query(
ClassificationAnnotation.entity,
ClassificationAnnotation.value,
ClassificationAnnotation.user_id,
)
.filter(
ClassificationAnnotation.label == label,
ClassificationAnnotation.user_id.in_([user.id for user in users]),
)
.all()
)
data = PrettyDefaultDict(lambda: [])
for item in res:
data[item[2]].append(
EntityAndAnnotationValuePair(entity=item[0], value=item[1])
)
return data
def _retrieve_annotation_with_same_entity_shared_by_two_users(
user1, user2, entities_and_annotation_values_by_user
):
annotations_from_user1 = entities_and_annotation_values_by_user[user1.id]
annotations_from_user2 = entities_and_annotation_values_by_user[user2.id]
dict_of_context_value_from_user1 = {
annotation.entity: annotation.value for annotation in annotations_from_user1
}
dict_of_context_value_from_user2 = {
annotation.entity: annotation.value for annotation in annotations_from_user2
}
intersection = set(dict_of_context_value_from_user1.keys()).intersection(
set(dict_of_context_value_from_user2.keys())
)
intersection = sorted(list(intersection))
if len(intersection) == 0:
return None
values_from_annotations_with_overlapping_context_user1 = [
dict_of_context_value_from_user1[entity] for entity in intersection
]
values_from_annotations_with_overlapping_context_user2 = [
dict_of_context_value_from_user2[entity] for entity in intersection
]
return {
user1.username: values_from_annotations_with_overlapping_context_user1,
user2.username: values_from_annotations_with_overlapping_context_user2,
}
def _compute_kappa_matrix(kappa_stats_raw_data):
"""Compute the kappa matrix for each label and return the html form of the
matrix.
:param user_ids: the user ids
:param kappa_stats_raw_data: raw labeling results per label per user pair
on overlapping annotations
:return: a dictionary of kappa matrix html table per label
Structure of the input:
{
"label1": {
("user_id1", "user_id2"): {
"user_id1": [1, -1, 1, 1, -1],
"user_id2": [-1, 1, 1, -1, 1]
},
("user_id1", "user_id3"): {
"user_id1": [1, -1],
"user_id2": [-1, 1]
}
...
},
"label12": {
("user_id1", "user_id3"): {
"user_id1": [1, -1],
"user_id2": [-1, 1]
},
...
},
...
}
Structure of the final output:
{
"label1": kappa matrix for this label as a pandas dataframe,
"label2": kappa matrix for this label as a pandas dataframe,
...
}
"""
kappa_matrix = PrettyDefaultDict(
lambda: PrettyDefaultDict(lambda: PrettyDefaultDict(float))
)
for label, result_per_user_pair_per_label in sorted(kappa_stats_raw_data.items()):
for user_pair, result_per_user in result_per_user_pair_per_label.items():
if result_per_user is None:
kappa_matrix[label][user_pair[0]][user_pair[1]] = np.nan
kappa_matrix[label][user_pair[1]][user_pair[0]] = np.nan
else:
result_user1 = result_per_user[user_pair[0]]
result_user2 = result_per_user[user_pair[1]]
logging.info(
"Calculating the kappa score for {} and {}".format(
user_pair[0], user_pair[1]
)
| |
<reponame>mikusjelly/smafile
import os
import re
__VERSION__ = '0.4.3'
class NotSmaliClassException(Exception):
def __init__(self, clz):
err = '{} is not a class that conforms to the smali grammar.'.format(
clz)
Exception.__init__(self, err)
def smali2java(smali_clz):
if not smali_clz.startswith('L') or not smali_clz.endswith(';'):
raise NotSmaliClassException(smali_clz)
return smali_clz.replace('/', '.')[1:-1]
def java2smali(java_clz):
return 'L{};'.format(java_clz.replace('.', '/'))
class SmaliLine:
'''
用于解析一行Smali代码
按照opcode语法返回
'''
@staticmethod
def parse(line):
opcode = line.split()[0]
if 'invoke-static' in line:
return SmaliLine.parse_invoke_static(line)
if 'invoke-virtual' in line:
return SmaliLine.parse_invoke_virtual(line)
if 'Ljava/lang/String;->' in line:
return SmaliLine.parse_string(line)
if 'move-result-object' in line:
return SmaliLine.parse_move(line)
if 'iget' in opcode:
return SmaliLine.parse_iget(line)
if 'sget-object' in line:
return SmaliLine.parse_sget_object(line)
if 'const-string' in line:
return SmaliLine.parse_const_string(line)
if 'sget ' in line:
return SmaliLine.parse_sget(line)
print('Could not parse: ' + line)
return
@staticmethod
def parse_const_string(line):
'''
const-string vx,string_id
Puts reference to a string constant identified by string_id into vx.
'''
arrs = line.strip().split()
vx = arrs[1][:-1]
string_id = arrs[2][1:-1]
return (vx, string_id)
@staticmethod
def parse_sget(line):
'''
sget v0, Lcom/cmcc/papp/a/f;->e:I
@return 类名、字段名、返回类型、寄存器名
'''
REG = (
r'sget (?P<rname>.*?), '
r'(?P<cname>.*?;)->(?P<fname>.*?):(?P<rtype>.*)'
)
result = re.match(REG, line.strip())
cname = smali2java(result['cname'])
return cname, result['fname'], result['rtype'], result['rname']
@staticmethod
def parse_invoke_static(line):
'''
解析invoke-static语句
@return 返回(调用类名、方法名、参数类型、返回值类型、寄存器的值)
'''
INVOKE_STATIC_NORMAL = (
r'^invoke-static.*?{(?P<registers>.*?)}, (?P<cname>.*?;)'
r'->(?P<mname>.*?)\((?P<proto>.*?)\)(?P<rtype>.*?)\s*?$')
result = re.match(INVOKE_STATIC_NORMAL, line.strip())
cname = result['cname'].replace('/', '.')[1:-1]
ptypes = SmaliLine.parse_proto(result['proto'])
rnames = re.sub('\s', '', result['registers']).split(',')
return cname, result['mname'], ptypes, result['rtype'], rnames
@staticmethod
def parse_invoke_virtual(line):
'''
解析invoke-virtual语句
@return 返回(调用类名、方法名、参数类型、返回值类型、寄存器的值)
'''
INVOKE_STATIC_NORMAL = (
r'^invoke-virtual.*?{(?P<registers>.*?)}, (?P<cname>.*?;)'
r'->(?P<mname>.*?)\((?P<proto>.*?)\)(?P<rtype>.*?)\s*?$')
result = re.match(INVOKE_STATIC_NORMAL, line.strip())
cname = result['cname'].replace('/', '.')[1:-1]
ptypes = SmaliLine.parse_proto(result['proto'])
rnames = re.sub('\s', '', result['registers']).split(',')[1:]
return cname, result['mname'], ptypes, result['rtype'], rnames
@staticmethod
def parse_string(line):
'''
解析字符串相关函数
@return 返回值类型、操作寄存器名
'''
STRING_INIT_REG = (
r'invoke-direct {(?P<registers>.*?)}, '
r'Ljava/lang/String;-><init>\([\[BCI]+\)V')
result = re.match(STRING_INIT_REG, line.strip())
if result:
rnames = re.sub('\s', '', result['registers']).split(',')
return rnames[0], rnames
else:
return None, None
@staticmethod
def parse_proto(proto):
'''
解析方法原型
'''
result = []
PARAMETER_INDIVIDUATOR = r'(\[*(?:[BCDFIJSZ]|L[^;]+;))'
pattern2 = re.compile(PARAMETER_INDIVIDUATOR)
for item in pattern2.finditer(proto):
result.append(item.group())
return result
@staticmethod
def parse_move(line):
'''
move-result-object vx
@return 返回寄存器名 vx
'''
return line.strip().split()[1]
@staticmethod
def parse_iget(line):
'''
iget v0, p0, Lcom/a/b/c;->g:I
iget-object v0, p0, Lcom/android/unit/d;->a:Landroid/content/Context;
@return 类名、字段名、返回类型、寄存器名
'''
REG = (
r'iget(?:-object|) (?P<rname>.*?), \w+, '
r'(?P<cname>.*?;)->(?P<fname>.*?):(?P<rtype>.*)'
)
result = re.match(REG, line.strip())
cname = smali2java(result['cname'])
return cname, result['fname'], result['rtype'], result['rname']
@staticmethod
def parse_sget_object(line):
'''
sget-object v0, Lcom/c/a/a/f;->b:[B
@return 类名、字段名、返回类型、寄存器名
'''
REG = (
r'sget-object (?P<rname>.*?), '
r'(?P<cname>.*?;)->(?P<fname>.*?):(?P<rtype>.*)'
)
result = re.match(REG, line.strip())
cname = smali2java(result['cname'])
return cname, result['fname'], result['rtype'], result['rname']
INCLUDE = 2 # 包含操作
EXCLUDE = 1 # 排除操作
NO_OPT = 0 # 无操作
class SmaliDir:
def __init__(self, smali_dirs: list, filters: list, opt: int):
"""初始化smali目录
:param smali_dirs: smali目录列表
:type smali_dirs: list
:param filters: 过滤器,包(a/b/),类(a/b/c),都可以
:type filters: list
:param opt: 如果值为2,那么仅初始化过滤器命中的文件;如果值为1,则初始化过滤器不命中的文件;如果为0,则全部初始化。
:type opt: int
"""
self._files = [] # 存放已解析的smali文件
self.filters = filters
self.opt = opt
counter = 0
for item in self.filters:
filters[counter] = item.replace('.', os.sep)
filters += 1
for smali_dir in smali_dirs:
self.init_smali_dir(smali_dir)
def init_smali_dir(self, smali_dir):
for parent, _, filenames in os.walk(smali_dir):
for filename in filenames:
if not filename.endswith('.smali'):
continue
filepath = os.path.join(parent, filename)
if self.opt == INCLUDE:
for item in self.filters:
if item in filepath:
sf = SmaliFile(filepath)
self._files.append(sf)
break
elif self.opt == EXCLUDE:
for item in self.filters:
if item in filepath:
break
else:
sf = SmaliFile(filepath)
self._files.append(sf)
else:
sf = SmaliFile(filepath)
self._files.append(sf)
def __len__(self):
return len(self._files)
def __getitem__(self, index):
return self._files[index]
def __setitem__(self, index, smali_file):
self._files[index] = smali_file
def get_smali_file(self, clz_name):
for sf in self._files:
if clz_name == sf.get_class():
return sf
def get_method_from_desc(self, full_desc):
clz_name, mtd_desc = full_desc.split('->')
sf = self.get_smali_file(clz_name)
if sf:
return sf.get_method(mtd_desc)
def get_method(self, clz_name, mtd_desc):
'''
Lcom/android/mtp/rp/a;
a([B)Ljava/security/Key;
'''
sf = self.get_smali_file(clz_name)
if sf:
return sf.get_method(mtd_desc)
def get_field(self, field_desc):
clz_name = field_desc.split('->')[0]
sf = self.get_smali_file(clz_name)
if sf:
return sf.get_field(field_desc)
def xref(self, desc):
'''找出所有引用了该类、方法、变量的SmaliFile'''
sfs = []
for sf in self._files:
if desc in sf.get_content():
sfs.append(sfs)
def update_desc(self, desc, new_desc):
"""找出所有引用了该类、方法、变量的SmaliFile,并更新
:param desc: [description]
:type desc: [type]
:param new_desc: [description]
:type new_desc: [type]
"""
arrs = []
old = None
new = None
# 如果替换的方法、变化,则会带有->符号。
if '->' in desc:
arrs = desc.split('->')
old = ' ' + arrs[1] # 旧类名
new = ' ' + new_desc.split('->')[1] # 新的类名
is_inner_class = '$' in desc
for sf in self._files:
file_path = None
# 单纯替换类名
if old is not None:
if arrs and str(sf) == arrs[0] and old in sf.get_content():
sf.set_content(sf.get_content().replace(old, new))
sf.set_modified(True)
if desc in sf.get_content():
sf.set_content(sf.get_content().replace(desc, new_desc))
sf.set_modified(True)
if desc == str(sf):
smali_dir = os.path.dirname(sf.get_file_path())
file_path = os.path.join(
smali_dir, *new_desc[1:-1].split('/')) + '.smali'
if is_inner_class and desc in str(sf):
# 如果更新的是内部类,还需要修改注解部分
# .annotation system Ldalvik/annotation/InnerClass;
# accessFlags = 0x0
# name = "NewInnerClassName"
# .end annotation
n = desc.split('$')[1][:-1]
native_str = str(n.encode('unicode-escape'), 'utf-8')
old_name_dsm = 'name = "{}"'.format(native_str)
new_name_dsm = 'name = "{}"'.format(
new_desc.split('$')[1][:-1])
sf.set_content(sf.get_content().replace(
old_name_dsm, new_name_dsm))
sf.set_modified(True)
if sf.get_modified():
sf.save(file_path)
class SmaliFile:
def __init__(self, file_path):
# smali文件路径,用于代码更新
self._file_path = file_path
self._dir = None
self.source_file = file_path
# 是否编辑过,如果编辑过,则需要保存
self._modified = False
# smali的类名 - 所有的类、方法、参数都是用smali格式
self._class = None # La/b/c;
# c, not La/b/c;
self._name = None
# 父类的类名
self._supper_class = None
# 包名
self.__package = None
# 接口列表
self._interfaces = []
# 成员方法
self._methods = []
# 成员变量
self._fields = []
# 代码
self._content = None
self.parse()
def __str__(self):
return self._class
def get_package(self):
return self.__package
def get_file_path(self):
return self._file_path
def set_modified(self, modified):
self._modified = modified
def get_modified(self):
return self._modified
def get_class(self):
return self._class
def set_class(self, clz):
'''
自动修改
field,method的类?
自动修改文件名
'''
self._class = clz
def get_supper(self):
return self._supper_class
def get_interfaces(self):
return self._interfaces
def get_fields(self):
return self._fields
def get_methods(self):
return self._methods
def get_content(self):
return self._content
def set_content(self, content):
self._content = content
def get_method(self, mtd_sign):
for mtd in self._methods:
if mtd_sign in str(mtd):
return mtd
def get_field(self, field_desc):
for field in self._fields:
if field_desc == str(field):
return field
def get_dir(self):
return self._dir
def parse(self):
self._dir = os.path.dirname(self._file_path)
with open(self._file_path, 'r', encoding='utf-8') as f:
self._content = re.sub(r'\s*?\.line \d+', '', f.read())
p = re.compile(r'^\.class[a-z\s]+(.+)')
line = p.search(self._content).groups()
self._class = line[0]
self.sign = line[0]
p = re.compile(r'\.super (.+)')
line = p.search(self._content).groups()
self._supper_class = line[0]
# interfaces_regex = '\.implements (L[^;]+;)'
# p = re.compile(interfaces_regex)
# for i in p.finditer(self.content):
# self.interfaces.append(i.group().replace('.implements ', ''))
field_regex = r'\.field .*?(?=\n)'
p = re.compile(field_regex)
p1 = re.compile(r'\.field[\w| |]*(?=\s)\s(.+):(.+)')
p2 = re.compile(r'\.field[\w| |]*(?=\s)\s(.+):(.+) = (.+)')
for i in p.finditer(self._content):
line = i.group()
field_name = None
field_type = None
field_value = None
if '=' in line:
field_name, field_type, field_value = p2.match(line).groups()
else:
try:
field_name, field_type = p1.match(line).groups()
except AttributeError as e:
print(self._file_path)
print(line, e)
sf = SmaliField(class_name=self._class)
sf.set_declaration_sm(line)
self._fields.append(sf)
mtd_ptn = r'\n\.method (.*)'
mtd_prog = re.compile(mtd_ptn)
for item in mtd_prog.finditer(self._content, re.M):
line = item.group()
escape_line = re.escape(line)
mbody_ptn = r'%s\n(.*?)\.end method' % escape_line
mbody_prog = re.compile(mbody_ptn, re.DOTALL)
body = mbody_prog.search(self._content).groups()[0]
sm = SmaliMethod(self._class, item.groups()[0], body)
self._methods.append(sm)
# @staticmethod
# def get_mbody_ptn(mtd_line):
# '''get method body pattern'''
# mbody_ptn = r'%s\n(.*?)\.end method' % (re.escape(mtd_line))
# return mbody_ptn
# return r'\.method .*?%s((?!\.end method)[.\s\S])*.end method' %
# (re.escape(mtd_ptn))
def save(self, file_path=None):
new_path = file_path if file_path else self._file_path
new_dir = os.path.dirname(new_path)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# 写入新文件
with open(new_path, 'w', encoding='utf-8') as f:
f.write(self._content)
self._modified = False
# 删除旧文件
if new_path != self._file_path:
os.remove(self._file_path)
self._file_path = file_path
self._fields.clear()
self._methods.clear()
self.parse()
def update(self):
'''
update smali file.
把内存的Smali文件内容,写入到文件
'''
for f in self._fields:
if not f.get_modified():
continue
self._update_field(f)
f.set_modified(False)
for mtd in self._methods:
if not mtd.get_modified():
continue
self._update_method(mtd)
mtd.set_modified(False)
with open(self._file_path, 'w', encoding='utf-8') as f:
f.write(self._content)
def _update_field(self, sfield):
'''
更新Smali文件的指定成员变量Field,仅在内存中更新
'''
rsm = sfield.get_reference_sm()
if '[Ljava/lang/String;' == sfield.get_type():
codes = SmaliFile.__genarate_string_array_codes(
sfield.get_value(), rsm)
mtd = self.get_method(sfield.get_class() + '-><clinit>()V')
mtd.set_body(mtd.get_body().replace('return-void', codes))
self._update_method(mtd)
return
# 下面则是更新字符串
# 更新声明语句
old_sm = sfield.get_old_declaration_sm()
sm = sfield.get_declaration_sm()
self._content = self._content.replace(old_sm, sm)
# 更新方法
# 删除所有对该Field赋值的语句,避免反编译失败
ptn = r'\wput-object .*?, {}'.format(rsm)
for mtd in self._methods:
body = mtd.get_body()
if not re.search(ptn, body):
continue
body = re.sub(ptn, '', body)
mtd.set_body(body)
mtd.set_modified(True)
@staticmethod
def __genarate_string_array_codes(str_arr, rsm):
'''
根据字符串数组的内容,生成smali代码
'''
num = len(str_arr)
snippet = '''
const/16 v0, {}
new-array v0, v0, [Ljava/lang/String;
'''.format(hex(num))
# 注意:
# const/4 只能表示 -8到7,如果数组大于8,那么会出错。
# const/16 则可以表示更大的数
part = '''
const/16 v1, {}
const-string v2, "{}"
aput-object v2, v0, v1
'''
| |
<reponame>BitWorks/xbrlstudio
"""
:mod: 'BookModel'
~~~~~~~~~~~~~~~~~
.. py:module:: BookModel
:copyright: Copyright BitWorks LLC, All rights reserved.
:license: MIT
:synopsis: Collection of PyQt models used by XBRLStudio
:description: Contains the following classes:
BookTableModel - model for numerical and textual XBRLStudio tables
BookEntityTreeModel - model for the entity tree
BookEntityTreeItem - model for an item within the entity tree
BookFilingTreeModel - model for the filing tree
BookFilingTreeItem - model for an item within the filing tree
BookLineEdit - custom QLineEdit with overridden contextMenuEvent()
BookTableViewDelegate - delegate object for table editors and models (see Qt model/view documentation)
"""
try:
import copy, sys, os, datetime, logging
model_logger = logging.getLogger()
from PySide2 import (QtCore, QtWidgets, QtGui)
# Tiered
# from . import BookFilingUtility
# Flat
import BookFilingUtility
except Exception as err:
model_logger.error("{0}:BookModel import error:{1}".format(str(datetime.datetime.now()), str(err)))
class BookTableModel(QtCore.QAbstractTableModel):
"""
BookTableModel
~~~~~~~~~~~~~~
Customized sub-class of QAbstractTableModel; this class implements all the required model functions, as well as
a function for inserting a selected entity/filing pair into the table model
Functions
~~~~~~~~~
rowCount(self, parent) - returns row_count, the number of rows in the table
addRows(self, num_rows) - appends user-defined number of rows to the table
columnCount(self, parent) - returns the number of columns in the table, based on the table type
data(self, index, role) - returns the items object at the given index position, for the given role
headerData(self, section, orientation, role) - sets labels for the columns, and other column attributes
setData(self, index, value, role) - assigns an items object to be equal to the given value, according to position (index) and role
insertFilingIntoTable(self, current_cik, current_period) - inserts the selected entity name and filing into items
flags(self, index) - function for QAbstractTableModel flags; defines aspects of the different columns
setHeaderData(self, section, orientation, value, role) - default implementation of QAbstractTableModel.setHeaderData
fillDown(self, index, fill_text) - fills a column (downwards) with text in fill_text at given index
viewAll(self) - sets all rows to be viewed in table's graphic (numerical or textual)
Attributes
~~~~~~~~~~
book_table_view - (BookView.BookTableView type); view for instances of this model
row_count - (int type); number of rows (initialized to be 10)
items - (list type); main model collection of bool, string, and int values for use by the view
view_indices - (list type); used (e.g., by BookView.BookMainWindow instance) to create persistent checkboxes in 'View' column
sub_items - (list type); used during the initial creation of the items matrix (items[row][column])
"""
def __init__(self, book_table_view):
model_logger.info("{0}:Initializing BookTableModel".format(str(datetime.datetime.now())))
QtCore.QAbstractTableModel.__init__(self)
self.book_table_view = book_table_view
self.row_count = 10
self.items = []
self.view_indices = []
#cik, period, view, entity, filing, fact, context, value, unit, dec
self.sub_items = [None, None, None, None, None, None, None, None, None, None]
for i in range(0, self.row_count):
self.items.append(copy.deepcopy(self.sub_items))
if self.book_table_view.objectName() == "numericalTableView":
self.setObjectName("numericalTableModel")
elif self.book_table_view.objectName() == "textualTableView":
self.setObjectName("textualTableModel")
else:
model_logger.error("{0}:BookTableModel.book_table_view.objectName(): unacceptable return value".format(str(datetime.datetime.now())))
def rowCount(self, parent):
return self.row_count
def addRows(self, num_rows):
self.layoutAboutToBeChanged.emit()
for i in range(self.row_count, self.row_count + num_rows):
self.items.append(copy.deepcopy(self.sub_items))
self.row_count += num_rows
self.layoutChanged.emit()
return
def columnCount(self, parent):
if self.objectName() == "textualTableModel":
return 5
elif self.objectName() == "numericalTableModel":
return 8
else:
model_logger.error("{0}:BookTableModel.columnCount(): unacceptable object name".format(str(datetime.datetime.now())))
def data(self, index, role):
if not index.isValid():
model_logger.warning("{0}:BookTableModel.data(): invalid index".format(str(datetime.datetime.now())))
return
if index.column() == 0:
if len(self.view_indices) < self.row_count:
row = len(self.view_indices)
while row < self.row_count:
self.view_indices.append(index.sibling(row, 0))
row += 1
if role == QtCore.Qt.DisplayRole:
pass
elif role == QtCore.Qt.DecorationRole:
return QtCore.Qt.AlignCenter
elif role == QtCore.Qt.EditRole:
return self.items[index.row()][index.column() + 2]
elif index.column() in (1, 2, 3, 4):
if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole):
return self.items[index.row()][index.column() + 2]
elif index.column() in (5, 6, 7):
if role == QtCore.Qt.DisplayRole:
if self.objectName() == "numericalTableModel":
return self.items[index.row()][index.column() + 2]
elif self.objectName() == "textualTableModel":
return None
if index.column() == 5:
if role == QtCore.Qt.TextAlignmentRole:
return QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter
if index.column() == 6 or index.column() == 7:
if role == QtCore.Qt.TextAlignmentRole:
return QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
else:
model_logger.warning("{0}:BookTableModel.data(): invalid index.column()".format(str(datetime.datetime.now())))
return
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal:
if section == 0:
if role == QtCore.Qt.DisplayRole:
return "View"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(45, 25)
elif section == 1:
if role == QtCore.Qt.DisplayRole:
return "Entity"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif section == 2:
if role == QtCore.Qt.DisplayRole:
return "Filing"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif section == 3:
if role == QtCore.Qt.DisplayRole:
return "Fact"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif section == 4:
if role == QtCore.Qt.DisplayRole:
return "Context"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif section == 5:
if role == QtCore.Qt.DisplayRole:
return "Value"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif section == 6:
if role == QtCore.Qt.DisplayRole:
return "Unit"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif section == 7:
if role == QtCore.Qt.DisplayRole:
return "Dec"
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(100, 25)
elif orientation == QtCore.Qt.Vertical:
if role == QtCore.Qt.DisplayRole:
return str(section + 1)
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(40, 25)
return
def setData(self, index, value, role):
try:
if value is not None and index.isValid():
if index.column() == 0: #view toggled
self.book_table_view.closePersistentEditor(index)
self.items[index.row()][2] = value
self.book_table_view.openPersistentEditor(index)
self.book_table_view.refreshGraphic()
elif index.column() == 1: #entity selected (set cik)
# TODO - rearrange for this to be cik_name_dict ({cik:name})
name_cik_dict = self.book_table_view.book_main_window.cntlr.book_filing_manager.getEntityDict()
current_cik = int(name_cik_dict[value])
self.items[index.row()][0] = current_cik
self.items[index.row()][3] = value
elif index.column() == 2: #filing selected (set period)
current_period = value.split("-")[1].lower() + value.split("-")[0]
self.items[index.row()][1] = current_period
self.items[index.row()][4] = value
elif index.column() == 3: #fact selected
self.items[index.row()][5] = value
elif index.column() == 4: #context selected (set value and unit)
self.items[index.row()][6] = value
current_cik = self.items[index.row()][0]
current_period = self.items[index.row()][1]
current_filing = self.book_table_view.book_main_window.cntlr.book_filing_manager.getFiling(current_cik, current_period)
current_fact_name = self.items[index.row()][5]
current_context = str(value)
current_facts = set()
if current_filing is not None:
for fact_item in current_filing.facts:
if fact_item.label == current_fact_name:
if fact_item.context_ref == current_context:
current_facts.add(fact_item)
if len(current_facts) == 1:
if self.objectName() == "numericalTableModel":
self.items[index.row()][7] = list(current_facts)[0].value
self.items[index.row()][8] = list(current_facts)[0].unit_ref
self.items[index.row()][9] = list(current_facts)[0].dec
elif self.objectName() == "textualTableModel":
self.items[index.row()][7] = list(current_facts)[0].value
self.items[index.row()][8] = list(current_facts)[0].unit_ref
self.items[index.row()][9] = list(current_facts)[0].dec
elif len(current_facts) != 1:
return False
self.book_table_view.refreshGraphic()
return True
except Exception as err:
model_logger.error("{0}:BookTableModel.setData():{1}".format(str(datetime.datetime.now()), str(err)))
return
def insertFilingIntoTable(self, current_cik, current_period):
try:
current_filing = self.book_table_view.book_main_window.cntlr.getFiling(current_cik, current_period)
entity_name = self.book_table_view.book_main_window.cntlr.getNameFromCik(current_cik)
pretty_filing_period = current_period[2:6] + "-" + current_period[0:2].upper()
i = 0
while i < self.row_count:
if self.items[i][0] == None:
self.items[i][0] = int(current_cik)
self.items[i][1] = current_period
self.items[i][3] = entity_name
self.items[i][4] = pretty_filing_period
self.book_table_view.update()
i = self.row_count
i += 1
except Exception as err:
model_logger.error("{0}:BookTableModel.insertFilingIntoTable():{1}".format(str(datetime.datetime.now()), str(err)))
def flags(self, index):
try:
if not index.isValid():
model_logger.info("{0}:BookTableModel.flags(): invalid index".format(str(datetime.datetime.now())))
return QtCore.Qt.ItemIsEnabled
if index.column() == 0:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable
elif index.column() == 1 or index.column() == 2:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDropEnabled
elif index.column() == 3 or index.column() == 4:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable
elif index.column() == 5 or index.column() == 6 or index.column() == 7:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable
else:
model_logger.warning("{0}:BookTableModel.flags(): invalid index.column()".format(str(datetime.datetime.now())))
except Exception as err:
model_logger.error("{0}:BookTableModel.flags():{1}".format(str(datetime.datetime.now()), str(err)))
def setHeaderData(self, section, orientation, value, role):
QtCore.QAbstractTableModel.setHeaderData(self, section, orientation, value, role)
return
def fillDown(self, index, fill_text):
try:
row_pos = index.row()
col_pos = index.column()
num_rows = self.row_count - row_pos
if num_rows > 0:
try:
if fill_text is not None and fill_text is not "":
i = row_pos
while i < self.row_count:
current_index = self.createIndex(i, col_pos)
self.setData(current_index, fill_text, QtCore.Qt.EditRole)
i += 1
self.book_table_view.update()
except Exception as err:
model_logger.error("{0}:BookTableModel.fillDown():{1}".format(str(datetime.datetime.now()), str(err)))
except Exception as err:
model_logger.error("{0}:BookTableModel.fillDown():{1}".format(str(datetime.datetime.now()), str(err)))
return
def viewAll(self):
try:
i = 0
while i < self.row_count:
current_index = self.createIndex(i, 0)
self.setData(current_index, True, QtCore.Qt.EditRole)
i += 1
self.book_table_view.update()
except Exception as err:
model_logger.error("{0}:BookTableModel.viewAll():{1}".format(str(datetime.datetime.now()), str(err)))
return
class BookEntityTreeModel(QtGui.QStandardItemModel):
"""
BookEntityTreeModel
~~~~~~~~~~~~~~~~~~~
Customized sub-class of QStandardItemModel
Functions
~~~~~~~~~
itemMoved(self, item_changed) - updates parent_cik for the entity moved
populateRawItems(self) - uses the controller to query the database and get entity tree information
renameItem(self, target_cik, new_name) - uses the controller to rename an entity in the database
flags(self, index) - enables drag and drop for entity tree, for reorganization of entity hierarchy
Attributes
~~~~~~~~~~
book_main_window - (BookView.BookMainWindow type); for accessing main window object, such as controller
raw_items - (list type); list of tuples in the format [(entity_cik, | |
"""
:mod:`transformer.python` -- Python Syntax Tree
===============================================
Transformer's Python Syntax Tree framework allows you to create and manipulate
Python source code without bothering with irrelevant, style-related details.
It is the main API for writing :term:`OnPythonProgram` plugins.
A non-goal of this framework is *customization of style*: users should rely on
an external tool (such as `black`_) if they need style customization of their
generated locustfile.
.. _black: https://github.com/ambv/black
"""
import re
from types import MappingProxyType
from typing import (
Sequence,
Mapping,
Any,
List,
Type,
Set,
Optional,
Tuple,
cast,
Iterable,
Callable,
TypeVar,
ClassVar,
)
from dataclasses import dataclass
IMMUTABLE_EMPTY_DICT = MappingProxyType({})
@dataclass
class Line:
"""
A line of text and its associated indentation level.
This class allows not to constantly copy strings to add a new indentation
level at every scope of the syntax tree.
.. attribute:: text
:any:`str` -- Text contained by this line.
.. attribute:: indent_level
:any:`int` -- Indentation level of :attr:`text` in the line.
"""
text: str
indent_level: int = 0
INDENT_UNIT: ClassVar[str] = " " * 4
def __str__(self) -> str:
"""
Textual representation of this line, with :attr:`text` indented
according to :attr:`indent_level`.
"""
return f"{self.INDENT_UNIT * self.indent_level}{self.text}"
def clone(self) -> "Line":
"""
Creates an exact but disconnected copy of self.
Useful in tests.
"""
return type(self)(text=self.text, indent_level=self.indent_level)
def _resplit(parts: Iterable[str]) -> List[str]:
"""
Given a list of strings, returns a list of lines, by splitting each string
into multiple lines where it contains newlines.
>>> _resplit([])
[]
>>> _resplit(['a', 'b'])
['a', 'b']
>>> _resplit(['a', 'b\\nc\\nd'])
['a', 'b', 'c', 'd']
"""
return [line for part in parts for line in part.splitlines()]
class Statement:
"""
Python distinguishes between statements and expressions: basically,
statements cannot be assigned to a variable, whereas expressions can.
For our purpose, another distinction is important: statements may span over
multiple lines (and not just for style), whereas all expressions can be
expressed in a single line.
This class serves as abstract base for all implementors of :meth:`lines` and
handles comment processing for them.
"""
def __init__(self, comments: Sequence[str] = ()) -> None:
"""
:param comments: Comment lines attached to this statement.
"""
self._comments = _resplit(comments)
@property
def comments(self) -> List[str]:
"""
Comment lines attached to this statement.
This is a :class:`property` to ensure that modifications of this list
preserve the invariant "one element = one line".
"""
self._comments = _resplit(self._comments)
return self._comments
@comments.setter
def comments(self, value: List[str]):
self._comments = value
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
"""
All Line objects necessary to represent this Statement, along with the
appropriate indentation level.
:param indent_level: How much indentation to apply to the least indented
line of this statement.
:param comments: Whether existing comments attached to *self* should be
included in the result.
"""
raise NotImplementedError
def comment_lines(self, indent_level: int) -> List[Line]:
"""
Converts self.comments from str to Line with ``#`` prefixes.
"""
return [Line(f"# {s}", indent_level) for s in self.comments]
def attach_comment(self, line: Line) -> List[Line]:
"""
Attach a comment to *line*: inline if *self.comments* is just one line,
on dedicated new lines above otherwise.
"""
comments = self.comments
if not comments:
return [line]
if len(comments) == 1:
line.text += f" # {comments[0]}"
return [line]
lines = self.comment_lines(line.indent_level)
lines.append(line)
return lines
def __eq__(self, o: object) -> bool:
return (
isinstance(o, self.__class__)
and self.comments == cast(__class__, o).comments
)
# Handy alias for type signatures.
Program = Sequence[Statement]
class OpaqueBlock(Statement):
"""
A block of code already represented as a string.
This helps moving existing code (e.g. in plugins) from our ad-hoc
"blocks of code" framework to the syntax tree framework defined in this
module.
It also allows to express Python constructs that would otherwise not yet be
representable with this AST framework.
"""
PREFIX_RX = re.compile(r"\s+")
TAB_SIZE = 8
def __init__(self, block: str, comments: Sequence[str] = ()) -> None:
"""
:param block: String representing a block of Python code.
"""
super().__init__(comments)
if not block.strip():
raise ValueError(f"OpaqueBlock can't be empty but got {block!r}")
self.block = block
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
raw_lines = [line.expandtabs(self.TAB_SIZE) for line in self.block.splitlines()]
first_nonempty_line = next(i for i, l in enumerate(raw_lines) if l.strip())
after_last_nonempty_line = next(
len(raw_lines) - i for i, l in enumerate(reversed(raw_lines)) if l.strip()
)
raw_lines = raw_lines[first_nonempty_line:after_last_nonempty_line]
indents = [self.PREFIX_RX.match(line) for line in raw_lines]
shortest_indent = min(len(p.group()) if p else 0 for p in indents)
block_lines = [Line(line[shortest_indent:], indent_level) for line in raw_lines]
if comments:
return [*self.comment_lines(indent_level), *block_lines]
return block_lines
def __repr__(self) -> str:
return "{}({!r}, comments={!r})".format(
self.__class__.__qualname__, self.block, self.comments
)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) and self.block == cast(__class__, o).block
class Function(Statement):
"""
A function definition (``def ...``).
"""
def __init__(
self,
name: str,
params: Sequence[str],
statements: Sequence[Statement],
comments: Sequence[str] = (),
) -> None:
"""
:param name: Name of this function.
:param params: Names of each parameter of this function.
:param statements: Body of this function.
"""
super().__init__(comments)
self.name = name
self.params = list(params)
self.statements = list(statements)
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
param_list = ", ".join(self.params)
body_lines = [
line
for stmt in self.statements
for line in stmt.lines(indent_level + 1, comments)
] or [Line("pass", indent_level + 1)]
top = Line(f"def {self.name}({param_list}):", indent_level)
if comments:
return [*self.attach_comment(top), *body_lines]
return [top, *body_lines]
def __repr__(self) -> str:
return "{}(name={!r}, params={!r}, statements={!r}, comments={!r})".format(
self.__class__.__qualname__,
self.name,
self.params,
self.statements,
self.comments,
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.name == cast(__class__, o).name
and self.params == cast(__class__, o).params
and self.statements == cast(__class__, o).statements
)
class Decoration(Statement):
"""
A function or class definition to which is applied a decorator
(e.g. ``@task``).
"""
def __init__(
self, decorator: str, target: Statement, comments: Sequence[str] = ()
) -> None:
"""
:param decorator: Name of the decorator applied to *target*.
:param target: Function or class definition to which is applied
*decorator*.
"""
super().__init__(comments)
self.decorator = decorator
self.target = target
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
top = Line(f"@{self.decorator}", indent_level)
target_lines = self.target.lines(indent_level, comments)
if comments:
return [*self.attach_comment(top), *target_lines]
return [top, *target_lines]
def __repr__(self) -> str:
return "{}({!r}, {!r}, comments={!r})".format(
self.__class__.__qualname__, self.decorator, self.target, self.comments
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.decorator == cast(__class__, o).decorator
and self.target == cast(__class__, o).target
)
class Class(Statement):
"""
A class definition.
"""
def __init__(
self,
name: str,
statements: Sequence[Statement],
superclasses: Sequence[str] = (),
comments: Sequence[str] = (),
) -> None:
"""
:param name: Name of this class.
:param statements: Fields of this class: methods, attributes, etc.
:param superclasses: Names of each superclass of this class.
In fact anything in the "function argument" format can be used here,
like keyword-arguments (but in a string!).
"""
super().__init__(comments)
self.name = name
self.statements = list(statements)
self.superclasses = list(superclasses)
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
superclasses = ""
if self.superclasses:
superclasses = "({})".format(", ".join(self.superclasses))
body = [
line
for stmt in self.statements
for line in stmt.lines(indent_level + 1, comments)
] or [Line("pass", indent_level + 1)]
top = Line(f"class {self.name}{superclasses}:", indent_level)
if comments:
return [*self.attach_comment(top), *body]
return [top, *body]
def __repr__(self) -> str:
return (
"{}(name={!r}, statements={!r}, " "superclasses={!r}, comments={!r})"
).format(
self.__class__.__qualname__,
self.name,
self.statements,
self.superclasses,
self.comments,
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.name == cast(__class__, o).name
and self.statements == cast(__class__, o).statements
and self.superclasses == cast(__class__, o).superclasses
)
class Expression:
"""
See the documentation of :class:`Statement` for why Expression is a separate
class.
An expression is still a statement in Python (e.g. functions can be called
anywhere), but this :class:`Expression` class is **not** a
:class:`Statement` because we can't attach comments to arbitrary expressions
(e.g. between braces).
If you need to use an :class:`Expression` as a :class:`Statement`,
see the :class:`Standalone` wrapper class.
This class serves as abstract base for all our implementors of
:meth:`__str__`.
"""
def __str__(self) -> str:
raise NotImplementedError
def __eq__(self, o: object) -> bool:
return isinstance(o, self.__class__)
class Standalone(Statement):
"""
Wraps an :class:`Expression` so that it can be used as a :class:`Statement`.
"""
| |
cm) :
self.__CM = cm
class InnerClassesAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(InnerClassesAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length
# u2 number_of_classes;
self.number_of_classes = SV( '>H', buff.read(2) )
# { u2 inner_class_info_index;
# u2 outer_class_info_index;
# u2 inner_name_index;
# u2 inner_class_access_flags;
# } classes[number_of_classes];
self.__classes = []
for i in range(0, self.number_of_classes.get_value()) :
self.__classes.append( InnerClassesDesc( self.__CM, buff ) )
def get_classes(self) :
return self.__classes
def show(self) :
print self.number_of_classes
for i in self.__classes :
i.show()
def set_cm(self, cm) :
self.__CM = cm
for i in self.__classes :
i.set_cm( cm )
def get_raw(self) :
return self.number_of_classes.get_value_buff() + \
''.join(x.get_raw() for x in self.__classes)
class ConstantValueAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(ConstantValueAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 constantvalue_index;
self.constantvalue_index = SV( '>H', buff.read(2) )
def show(self) :
print self.constantvalue_index
def set_cm(self, cm) :
self.__CM = cm
def get_raw(self) :
return self.constantvalue_index.get_value_buff()
class EnclosingMethodAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
ENCLOSING_METHOD_FORMAT = [ '>HH', "class_index method_index" ]
self.__CM = class_manager
super(EnclosingMethodAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 class_index
# u2 method_index;
self.__raw_buff = buff.read( calcsize( ENCLOSING_METHOD_FORMAT[0] ) )
self.format = SVs( ENCLOSING_METHOD_FORMAT[0], namedtuple( "EnclosingMethodFormat", ENCLOSING_METHOD_FORMAT[1] ), self.__raw_buff )
def show(self) :
print self.format
def set_cm(self, cm) :
self.__CM = cm
def get_raw(self) :
return self.format.get_value_buff()
ATTRIBUTE_INFO_DESCR = {
"Code" : CodeAttribute,
"Deprecated" : DeprecatedAttribute,
"SourceFile" : SourceFileAttribute,
"Exceptions" : ExceptionsAttribute,
"LineNumberTable" : LineNumberTableAttribute,
"LocalVariableTable" : LocalVariableTableAttribute,
"LocalVariableTypeTable" : LocalVariableTypeTableAttribute,
"StackMapTable" : StackMapTableAttribute,
"InnerClasses" : InnerClassesAttribute,
"ConstantValue" : ConstantValueAttribute,
"EnclosingMethod" : EnclosingMethodAttribute,
"Signature" : SignatureAttribute,
"Synthetic" : SyntheticAttribute,
"SourceDebugExtension" : SourceDebugExtensionAttribute,
"RuntimeVisibleAnnotations" : RuntimeVisibleAnnotationsAttribute,
"RuntimeInvisibleAnnotations" : RuntimeInvisibleAnnotationsAttribute,
"RuntimeVisibleParameterAnnotations" : RuntimeVisibleParameterAnnotationsAttribute,
"RuntimeInvisibleParameterAnnotations" : RuntimeInvisibleParameterAnnotationsAttribute,
"AnnotationDefault" : AnnotationDefaultAttribute,
}
class AttributeInfo :
"""AttributeInfo manages each attribute info (Code, SourceFile ....)"""
def __init__(self, class_manager, buff) :
self.__CM = class_manager
self.__raw_buff = buff.read( calcsize( ATTRIBUTE_INFO[0] ) )
self.format = SVs( ATTRIBUTE_INFO[0], ATTRIBUTE_INFO[1], self.__raw_buff )
self.__name = self.__CM.get_string( self.format.get_value().attribute_name_index )
try :
self._info = ATTRIBUTE_INFO_DESCR[ self.__name ](self.__CM, buff)
except KeyError, ke :
bytecode.Exit( "AttributeInfo %s doesn't exit" % self.__name )
def get_item(self) :
"""Return the specific attribute info"""
return self._info
def get_name(self) :
"""Return the name of the attribute"""
return self.__name
def get_raw(self) :
v1 = self.format.get_value().attribute_length
v2 = len(self._info.get_raw())
if v1 != v2 :
self.set_attribute_length( v2 )
return self.format.get_value_buff() + self._info.get_raw()
def get_attribute_name_index(self) :
return self.format.get_value().attribute_name_index
def set_attribute_name_index(self, value) :
self.format.set_value( { "attribute_name_index" : value } )
def set_attribute_length(self, value) :
self.format.set_value( { "attribute_length" : value } )
def get_attributes(self) :
return self.format
def _fix_attributes(self, new_cm) :
self._info._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
self._info.set_cm( cm )
def show(self) :
print self.format, self.__name
if self._info != None :
self._info.show()
def pretty_show(self, m_a) :
print self.format, self.__name
if self._info != None :
if isinstance(self._info, CodeAttribute) :
self._info.pretty_show(m_a)
else :
self._info.show()
class ClassManager :
"""ClassManager can be used by all classes to get more information"""
def __init__(self, constant_pool, constant_pool_count) :
self.constant_pool = constant_pool
self.constant_pool_count = constant_pool_count
self.__this_class = None
def get_value(self, idx) :
name = self.get_item(idx[0]).get_name()
if name == "CONSTANT_Integer" :
return [ name, self.get_item(idx[0]).get_format().get_value().bytes ]
elif name == "CONSTANT_String" :
return [ name, self.get_string( self.get_item(idx[0]).get_format().get_value().string_index ) ]
elif name == "CONSTANT_Class" :
return [ name, self.get_class( idx[0] ) ]
elif name == "CONSTANT_Fieldref" :
return [ name, self.get_field( idx[0] ) ]
elif name == "CONSTANT_Float" :
return [ name, self.get_item(idx[0]).get_format().get_value().bytes ]
bytecode.Exit( "get_value not yet implemented for %s" % name )
def get_item(self, idx) :
return self.constant_pool[ idx - 1]
def get_interface(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_InterfaceMethodref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_interface_index(self, class_name, name, descriptor) :
raise("ooo")
def get_method(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Methodref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_method_index(self, class_name, name, descriptor) :
idx = 1
for i in self.constant_pool :
res = self.get_method( idx )
if res != [] :
m_class_name, m_name, m_descriptor = res
if m_class_name == class_name and m_name == name and m_descriptor == descriptor :
return idx
idx += 1
return -1
def get_field(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Fieldref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_field_index(self, name, descriptor) :
idx = 1
for i in self.constant_pool :
res = self.get_field( idx )
if res != [] :
_, m_name, m_descriptor = res
if m_name == name and m_descriptor == descriptor :
return idx
idx += 1
def get_class(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Class" :
return []
return [ self.get_string( self.get_item(idx).get_name_index() ) ]
def get_array_type(self, idx) :
return ARRAY_TYPE[ idx[0] ]
def get_string_index(self, name) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Utf8" :
if i.get_bytes() == name :
return idx
idx += 1
return -1
def get_integer_index(self, value) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Integer" :
if i.get_format().get_value().bytes == value :
return idx
idx += 1
return -1
def get_cstring_index(self, value) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_String" :
if self.get_string( i.get_format().get_value().string_index ) == value :
return idx
idx += 1
return -1
def get_name_and_type_index(self, name_method_index, descriptor_method_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_NameAndType" :
value = i.get_format().get_value()
if value.name_index == name_method_index and value.descriptor_index == descriptor_method_index :
return idx
idx += 1
return -1
def get_class_by_index(self, name_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Class" :
value = i.get_format().get_value()
if value.name_index == name_index :
return idx
idx += 1
return -1
def get_method_ref_index(self, new_class_index, new_name_and_type_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Methodref" :
value = i.get_format().get_value()
if value.class_index == new_class_index and value.name_and_type_index == new_name_and_type_index :
return idx
idx += 1
return -1
def get_field_ref_index(self, new_class_index, new_name_and_type_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Fieldref" :
value = i.get_format().get_value()
if value.class_index == new_class_index and value.name_and_type_index == new_name_and_type_index :
return idx
idx += 1
return -1
def get_class_index(self, method_name) :
idx = 1
for i in self.constant_pool :
res = self.get_method( idx )
if res != [] :
_, name, _ = res
if name == method_name :
return i.get_class_index()
idx += 1
return -1
def get_class_index2(self, class_name) :
idx = 1
for i in self.constant_pool :
res = self.get_class( idx )
if res != [] :
name = res[0]
if name == class_name :
return idx
idx += 1
return -1
def get_used_fields(self) :
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Fieldref" :
l.append( i )
return l
def get_used_methods(self) :
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Methodref" :
l.append( i )
return l
def get_string(self, idx) :
if self.constant_pool[idx - 1].get_name() == "CONSTANT_Utf8" :
return self.constant_pool[idx - 1].get_bytes()
return None
def set_string(self, idx, name) :
if self.constant_pool[idx - 1].get_name() == "CONSTANT_Utf8" :
self.constant_pool[idx - 1].set_bytes( name )
else :
bytecode.Exit( "invalid index %d to set string %s" % (idx, name) )
def add_string(self, name) :
name_index = self.get_string_index(name)
if name_index != -1 :
return name_index
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Utf8" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, len(name) ) + pack( ">%ss" % len(name), name )
ci = CONSTANT_INFO[ tag_value ][-1]( self, bytecode.BuffHandle( buff ) )
self.constant_pool.append( ci )
self.constant_pool_count.set_value( self.constant_pool_count.get_value() + 1 )
return self.constant_pool_count.get_value() - 1
def set_this_class(self, this_class) :
self.__this_class = this_class
def get_this_class(self) :
return self.__this_class.get_value()
def get_this_class_name(self) :
return self.get_class( self.__this_class.get_value() )[0]
def add_constant_pool(self, elem) :
self.constant_pool.append( elem )
self.constant_pool_count.set_value( self.constant_pool_count.get_value() + 1 )
def get_constant_pool_count(self) :
return self.constant_pool_count.get_value()
def create_class(self, name) :
class_name_index = self.add_string( name )
return self._create_class( class_name_index )
def _create_class(self, class_name_index) | |
'''
Name: load_ops.py
Desc: Input pipeline using feed dict method to provide input data to model.
Some of this code is taken from <NAME>'s colorzation github
and python caffe library.
Other parts of this code have been taken from <NAME>'s library
'''
from __future__ import absolute_import, division, print_function
import itertools
import json
import math
import numpy as np
from numpy import linalg as LA
import os
from PIL import Image
import PIL
import pdb
import pickle
import random
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter
import skimage
import skimage.io
from skimage.transform import resize
import sklearn.neighbors as nn
import string
import subprocess
import sys
# import tensorflow as tf
from transforms3d import euler
import transforms3d
import traceback as tb
# if tf.__version__ == '0.10.0':
# tf_summary_scalar = tf.scalar_summary
# else:
# tf_summary_scalar = tf.summary.scalar
#######################
# Loading fns
#######################
def load_scaled_image( filename, color=True ):
"""
Load an image converting from grayscale or alpha as needed.
From KChen
Args:
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
By kchen
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_gray=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def load_raw_image( filename, color=True, use_pil=False ):
"""
Load an image converting from grayscale or alpha as needed.
Adapted from KChen
Args:
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
image : an image with image original dtype and image pixel range
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
if use_pil:
img = Image.open( filename )
else:
img = skimage.io.imread(filename, as_gray=not color)
if use_pil:
return img
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
#########################
# Image manipulation fns
#########################
def resize_rescale_imagenet(img, new_dims, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = img[:,:,[2,1,0]] * 255.
mean_bgr = [103.062623801, 115.902882574, 123.151630838]
img = img - mean_bgr
return img
def resize_rescale_image_low_sat(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.1, 0.9)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def resize_rescale_image_low_sat_2(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.2, 0.8)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img ) # between [0,255] (512,512,3)
img = resize_image( img, new_dims, interp_order ) # between [0,1] (512,512,3)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip ) # between [-1,1] (256,256,3)
return img
def resize_rescale_image_gaussian_blur(img, new_dims, new_scale, interp_order=1, blur_strength=4, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=True )
blurred = gaussian_filter(img, sigma=blur_strength)
if not no_clip:
min_val, max_val = new_scale
np.clip(blurred, min_val, max_val, out=blurred)
return blurred
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
im = skimage.img_as_float(im).astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
return im
def resize_and_rescale_image_log( img, new_dims, offset=1., normalizer=1.):
"""
Resizes and rescales an img to log-linear
Args:
img: A np array
offset: Shifts values by offset before taking log. Prevents
taking the log of a negative number
normalizer: divide by the normalizing factor after taking log
Returns:
rescaled_image
"""
img = np.log( float( offset ) + img ) / normalizer
img = resize_image(img, new_dims)
return img
def rescale_image_log( img, offset=1., normalizer=1. ):
"""
Rescales an img to log-linear
Args:
img: A np array
offset: Shifts values by offset before taking log. Prevents
taking the log of a negative number
normalizer: divide by the normalizing factor after taking log
Returns:
rescaled_image
"""
return np.log( float( offset ) + img ) / normalizer
################
# Curvature #
#################
def curvature_preprocess(img, new_dims, interp_order=1):
img = resize_image(img, new_dims, interp_order)
img = img[:,:,:2]
img = img - [123.572, 120.1]
img = img / [31.922, 21.658]
return img
def curvature_preprocess_gaussian_with_blur(img, new_dims, interp_order=1, blur_strength=4):
k1 = img[:,:,0].astype(np.float32) - 128.0
k2 = img[:,:,1].astype(np.float32) - 128.0
curv = k1 * k2
curv = curv * 8.0 / (127.0 ** 2)
curv = curv[:,:,np.newaxis]
curv = resize_image(curv, new_dims, interp_order)
blurred = gaussian_filter(curv, sigma=blur_strength)
return blurred
def curvature_preprocess_gaussian(img, new_dims, interp_order=1):
k1 = img[:,:,0].astype(np.float32) - 128.0
k2 = img[:,:,1].astype(np.float32) - 128.0
curv = k1 * k2
curv = curv * 8.0 / (127.0 ** 2)
curv = curv[:,:,np.newaxis]
curv = resize_image(curv, new_dims, interp_order)
return curv
#################
# Denoising #
#################
def random_noise_image(img, new_dims, new_scale, interp_order=1 ):
"""
Add noise to an image
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : | |
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
""" Validation related functions """
import bpy
import os
import xrs.collection
import xrs.filename
import xrs.log
import xrs.material
import xrs.object
import xrs.tools
import xrs.validation_report
def active_object_is_a_mesh():
""" Check if the active object has a mesh """
return bpy.context.active_object.type == 'MESH'
def materials_submission():
""" Cleans up materials and links image textures for submission """
# Remove all unused material slots
for obj in bpy.data.objects:
xrs.material.clean_material_slots(obj)
# Check that the image texture exists
for obj in bpy.data.collections["web"].objects:
for slot in obj.material_slots:
mat = slot.material
if mat.use_nodes == False:
xrs.log.debug("Material that does not use nodes found: " + mat.name)
return False
for node in mat.node_tree.nodes:
if node.type == 'TEX_IMAGE':
if node.image == None:
xrs.log.debug("No image linked to texture node: " + mat.name)
return False
node.image.source = 'FILE'
node.image.filepath = os.path.join("//", node.image.name + ".png")
# node.image.filepath = xrs.filename.get_sibling_dir("textures") + node.image.name + ".png"
# if os.path.exists(node.image.filepath) == False:
# xrs.log.debug("Filepath error: " + node.image.filepath)
# return False
# properName = node.image.name.split("_4k_")[0]
# if mat.name != properName:
# xrs.log.debug("Naming conventions not followed for: " + mat.name + ", Should be: " + properName)
# return False
# Image textures live in the textures directory
return True
def check_and_report_normal_material(bsdf, mat):
""" Checks that the normal material goes through a normal node """
slot_name = "Normal"
valid = True
if (xrs.material.get_node_link_count_named(bsdf, slot_name) == 0):
xrs.validation_report.write_ok(
mat.name + " has no " + slot_name + " texture"
)
else:
normal_map = xrs.material.get_first_from_node_link_named(
bsdf,
slot_name
)
if normal_map.type != 'NORMAL_MAP':
xrs.validation_report.write_error(
mat.name + " " + slot_name +
" input should be an Normal Map node"
)
valid = False
else:
# check that there is an image texture with the right name
valid = valid and check_and_report_material(normal_map, mat, "Color", "normal")
return valid
def check_and_report_procedural_material():
""" Checks if the created procedural material complies with current 3XR standards """
valid = True
# Check for valid product name in .blend file
try:
name = bpy.context.scene.xr_studio.product_name
except:
xrs.log.error("Unable to find the product name in the .blend file")
# Check for naming conventions
if xrs.material.procedural_nodes_name_check() == True:
xrs.validation_report.write_ok("The procedural material nodes are named correctly")
nodes = bpy.data.materials[name].node_tree.nodes
# Check for number of nodes in the material (There should only be 3 on the top level of nodes.)
if len(nodes) > 3:
xrs.validation_report.write_error("There are too many nodes in the top layer of this material. There should only be 3.")
valid = False
else:
xrs.validation_report.write_ok("There are a correct number of nodes in the top layer of this material")
# Look for the group, BSDF, and Material Output nodes
if xrs.material.get_one_node_of_type(nodes, 'BSDF_PRINCIPLED'):
xrs.validation_report.write_ok("PRINCIPLED BSDF node found")
else:
xrs.validation_report.write_error("PRINCIPLED BSDF node not found")
valid = False
if xrs.material.get_one_node_of_type(nodes, 'OUTPUT_MATERIAL'):
xrs.validation_report.write_ok("MATERIAL OUTPUT node found")
else:
xrs.validation_report.write_error("MATERIAL OUTPUT node not found")
valid = False
if xrs.material.get_one_node_of_type(nodes, 'GROUP'):
xrs.validation_report.write_ok("CUSTOM GROUP node found")
else:
xrs.validation_report.write_error("CUSTOM GROUP node not found")
valid = False
# Check if the nodes are connected to each other correctly
if xrs.material.procedural_nodes_link_check:
xrs.validation_report.write_ok("The procedural material nodes are linked correctly")
else:
xrs.validation_report.write_error("The procedural material nodes are not linked correctly")
valid = False
else:
valid = False
return valid
def check_and_report_material(bsdf, mat, slot_name, texture_name):
valid = True
if (xrs.material.get_node_link_count_named(bsdf, slot_name) == 0):
xrs.validation_report.write_ok(
mat.name + " has no " + slot_name + " texture"
)
else:
tex = xrs.material.get_first_from_node_link_named(
bsdf,
slot_name
)
# (ERR) input not an Image Texture
if (tex.type != 'TEX_IMAGE'):
valid = False
xrs.validation_report.write_error(
mat.name + " " + slot_name +
" input needs to be an image texture, found " + tex.type
)
else:
xrs.validation_report.write_ok(
mat.name + " " + slot_name +
" uses an image texture"
)
# (ERR) texture has no image
if (tex.image == None):
valid = False
xrs.validation_report.write_error(
mat.name + " " + slot_name +
" texture needs an image"
)
else:
img = tex.image
# (ERR) texture source not a file
if (img.source != 'FILE'):
valid = False
xrs.validation_report.write_error(
mat.name + " " + slot_name +
" texture image not a file"
)
else:
# Note: os.path.basename does not work on Windows
img_name = bpy.path.basename(img.filepath)
# (WARN) filename mismatch
if (img_name != mat.name + "_4k_" + texture_name + ".png"):
xrs.validation_report.write_warning(
"Texture name (" + img_name + ") should be " + mat.name +
"_4k_" + texture_name + ".png (Ignore for a transparent material)"
)
else:
xrs.validation_report.write_ok(
img_name + " name is valid"
)
# (ERR) Not in the textures folder
# Note: split for Windows compatibility
paths = os.path.dirname(img.filepath).split('\\')
directory = paths[0]
folder = ""
if (len(paths) > 1):
folder = paths[1]
if (directory == "//../textures"):
# Mac / Linux
xrs.validation_report.write_ok(
img_name + " is in the textures folder"
)
elif (directory == "//.." and folder == "textures"):
# Windows
xrs.validation_report.write_ok(
img_name + " is in the textures folder"
)
else:
valid = False
xrs.validation_report.write_error(
img_name +
" needs to be in the textures folder. Found " +
directory
)
return valid
def mat_scene():
""" Check that the scene is valid for procedural materrial creation and creates a report """
xrs.validation_report.new_report()
valid = True
# Start by getting into object mode with nothing selected
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action='DESELECT')
# Find all needed items / collections in the material .blend scene
if (xrs.collection.collection_has_objects("3XR_lights") == False):
xrs.validation_report.write_error("3XR lights collection not found")
valid = False
else:
xrs.validation_report.write_ok("3XR lights collection found")
if (xrs.collection.collection_has_objects("3XR_reference") == False):
xrs.validation_report.write_error("3XR_reference collection not found")
valid = False
else:
xrs.validation_report.write_ok("3XR_reference collection found")
try:
bpy.data.objects['3XR_material_ball']
xrs.validation_report.write_ok("3XR_material_ball found")
except:
xrs.validation_report.write_error("3XR_material_ball not found")
valid = False
# Check number of materials & procedrual material setup
if len(bpy.data.materials) > 2:
xrs.validation_report.write_error("There are too many materials in the Blender file. Please remove excess materials")
valid = False
else:
xrs.validation_report.write_ok("There are a correct number of materials in the Blender file")
# Check for valid product name in .blend file
try:
name = bpy.context.scene.xr_studio.product_name
except:
xrs.log.error("Unable to find the product name in the .blend file")
if check_and_report_procedural_material() == False:
#xrs.validation_report.write_error("3XR procedural material "+ name + " is not valid")
valid = False
else:
xrs.validation_report.write_ok("3XR procedural material is valid")
# Show the report after it is complete
xrs.validation_report.show_report()
return valid
def scene():
""" Check that the scene is valid for submission and creates a report """
xrs.validation_report.new_report()
valid = True
# Start by getting into object mode with nothing selected
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action='DESELECT')
if (xrs.collection.collection_has_objects("master") == False):
xrs.validation_report.write_error("master collection not found or has no objects")
valid = False
else:
xrs.validation_report.write_ok("master collection found")
if (xrs.collection.collection_has_objects("web") == False):
valid = False
xrs.validation_report.write_error(
"web collection not found or has no objects"
)
else:
# Check all objects in the web collection
web_objects = xrs.collection.get_objects("web")
base_objects = xrs.collection.get_objects("master")
transparent_object_count = 0
total_triangles = 0
xrs.validation_report.write_ok(
"web collection found. object count: " + str(len(web_objects))
)
xrs.validation_report.write_hr()
# TODO: Additional checks for master objects
if ('dimensions_cube' not in bpy.data.objects):
valid = False
xrs.validation_report.write_error(
"dimensions_cube not found"
)
else:
dimensions_cube = bpy.data.objects['dimensions_cube']
tolerance = 1.05
web_dimensions = xrs.collection.get_dimensions("web")
# (WARN) Width
if (
web_dimensions[0] > dimensions_cube.dimensions.x * tolerance
):
xrs.validation_report.write_warning(
"Model width is too big (" +
str(web_dimensions[0]) + " > " +
str(dimensions_cube.dimensions.x) + ")"
)
elif (
web_dimensions[0] < dimensions_cube.dimensions.x / tolerance
):
xrs.validation_report.write_warning(
"Model width is too small (" +
str(web_dimensions[0]) + " < " +
str(dimensions_cube.dimensions.x) + ")"
)
else:
xrs.validation_report.write_ok(
"Model width is " + str(web_dimensions[0])
)
# (WARN) Depth
if (
web_dimensions[1] > dimensions_cube.dimensions.y * tolerance
):
xrs.validation_report.write_warning(
"Model depth is too big (" +
str(web_dimensions[1]) + " > " +
str(dimensions_cube.dimensions.y) + ")"
)
elif (
web_dimensions[1] < dimensions_cube.dimensions.y / tolerance
):
xrs.validation_report.write_warning(
"Model depth is too small (" +
str(web_dimensions[1]) + " < " +
str(dimensions_cube.dimensions.y) + ")"
)
else:
xrs.validation_report.write_ok(
"Model depth is " + str(web_dimensions[1])
)
# (WARN) Height
if (
web_dimensions[2] > dimensions_cube.dimensions.z * tolerance
):
xrs.validation_report.write_warning(
"Model height is too big (" +
str(web_dimensions[2]) + " > " +
str(dimensions_cube.dimensions.z) + ")"
)
elif (
web_dimensions[2] < dimensions_cube.dimensions.z / tolerance
):
xrs.validation_report.write_warning(
"Model height is too small (" +
str(web_dimensions[2]) + " < " +
str(dimensions_cube.dimensions.z) + ")"
)
else:
xrs.validation_report.write_ok(
"Model height is " + | |
<reponame>hectormartinez/rougexstem
import os, sys, re
import util, compression, text, ilp
from globals import *
import nltk
class SummaryProblem:
"""
A class for representing elements of a summary problem
self.id 'D0701'
self.title 'Southern Poverty Law Center'
self.narr 'Describe the activities of <NAME>...'
self.query <title>: <narr>
self.new_docs_paths a list of paths to the input documents
self.old_docs_paths a list of paths to 'old' input docs (update task only)
self.new_docs [Document1, ... ]
self.old_docs [Document1, ... ]
self.annotators set(['A', 'B', 'C', 'D'])
self.training {'A': <summary A>, ... }
"""
def __init__(self, id, title, narr, new_docs, old_docs):
self.id = id
self.title = title
self.narr = narr
self.query = text.Sentence(title+": "+ narr)
self.new_docs_paths = new_docs[:]
self.old_docs_paths = old_docs[:]
## for checking state
self.loaded_docs = False
self.parsed = False
self.loaded_ir_docs = False
## variables that might get set later
self.new_docs = None
self.old_docs = None
self.training = {}
self.annotators = set()
def load_documents(self):
"""
"""
self.new_docs = []
for path in self.new_docs_paths:
doc = text.Document(path)
doc.get_sentences()
self.new_docs.append(doc)
self.old_docs = []
for path in self.old_docs_paths:
doc = text.Document(path)
self.old_docs.append(doc)
self.loaded_docs = True
def _load_training(self, path, source='DUC'):
"""
load [human] summaries, setting these member variables:
self.training_sent_sets = [[Sentence1, Sentence2, ... ], [ ... ], ... ]
self.annotators = set(['A1', 'A2', ... ]
"""
self.training = {}
self.annotators = set()
if source.startswith('DUC') or source.startswith('TAC'):
for file in os.listdir(path):
items = file.split('.')
id = items[0]
## skip ids not relevant to this problem
compare_id = self.id.upper()
if source == 'TAC08':
compare_id = self.id.upper()[:5] + self.id.upper()[6:]
if id.upper() != compare_id: continue
annotator = items[-1]
self.annotators.add(annotator)
rawsents = open(path + file).read().splitlines()
self.training[annotator] = rawsents
def get_new_sentences(self):
sents = []
for doc in self.new_docs:
for sent in doc.sentences:
sents.append(sent)
return sents
def __str__(self):
s = []
s.append('%s SUMMARYPROBLEM' %'#START')
s.append('ID %s' %self.id)
s.append('TITLE %s' %self.title)
s.append('NARR %s' %self.narr)
s.append('NEW_DOCS %d\n%s' %(len(self.new_docs), '\n'.join(['%s' %n for n in self.new_docs])))
s.append('OLD_DOCS %d\n%s' %(len(self.old_docs), '\n'.join(['%s' %n for n in self.old_docs])))
for annotator in self.annotators:
s.append('TRAIN %s\n%s' %(annotator, '\n'.join(['%s' %n for n in self.training[annotator]])))
return '\n'.join(s)
def check_state(problems):
checks = ['sentences', 'parsed', 'ir']
results = dict.fromkeys(checks, True)
for problem in problems:
if not problem.loaded_docs: results['sentences'] = False
if not problem.parsed: results['parsed'] = False
if not problem.loaded_ir_docs: results['ir'] = False
return results
### SETUP FUNCTIONS ###
def setup_simple(data_path, id='simple', title='', narr=''):
"""
create a summary problem from a single clean (text only) input file
"""
doc = text.Document(data_path, is_clean=True)
problem = SummaryProblem(id, title, narr, [doc], [])
return problem
def setup_TAC08(task, skip_updates=False):
"""
task.topic_file: xml file for TAC
task.doc_path: path containing source documents
task.manual_path: path for manual (human) summaries
"""
## get all document data
all_docs = {}
files = util.get_files(task.doc_path, r'[^_]+_[^_]+_\d+[\.\-]\d+')
sys.stderr.write('Loading [%d] files\n' %len(files))
for file in files:
id = os.path.basename(file)
all_docs[id] = file
## initialize problems
problems = []
# load XML task definition
from xml.etree import ElementTree
root = ElementTree.parse(task.topic_file).getroot()
for topic in root:
if topic.tag != "topic": continue
id = topic.attrib["id"]
title = None
narr = None
docsets = []
docset_ids = []
for node in topic:
if node.tag == "title":
title = node.text.strip()
elif node.tag == "narrative":
narr = node.text.strip()
elif node.tag == "docsetA":
documents = node.findall("doc")
docsets.append([doc.attrib["id"] for doc in documents])
docset_ids.append(node.attrib["id"])
elif node.tag == "docsetB":
if skip_updates: continue
documents = node.findall("doc")
docsets.append([doc.attrib["id"] for doc in documents])
docset_ids.append(node.attrib["id"])
old_docs = []
for docset_index in range(len(docsets)):
## map docids to documents
new_docs = [all_docs[doc] for doc in docsets[docset_index]]
## create a SummaryProblem
problem = SummaryProblem(docset_ids[docset_index], title, narr, new_docs, old_docs)
old_docs += new_docs
## include training data in problem
if task.manual_path: problem._load_training(task.manual_path, source='TAC08')
problems.append(problem)
sys.stderr.write('Setting up [%d] problems\n' %len(problems))
task.problems = problems
def setup_DUC_basic(task, skip_updates=False):
"""
task.topic_file: sgml file for DUC
task.doc_path: path containing source documents
task.manual_path: path for manual (human) summaries
"""
## get all document data
all_docs = {}
files = util.get_files(task.doc_path, '\w{2,3}\d+[\.\-]\d+')
sys.stderr.write('Loading [%d] files\n' %len(files))
for file in files:
id = os.path.basename(file)
all_docs[id] = file
## initialize problems
problems = []
data = open(task.topic_file).read().replace('\n', ' ')
topics = re.findall('<topic>.+?</topic>', data)
sys.stderr.write('Setting up [%d] problems\n' %len(topics))
for topic in topics:
id = util.remove_tags(re.findall('<num>.+?</num>', topic)[0])[:-1]
title = util.remove_tags(re.findall('<title>.+?</title>', topic)[0])
narr = util.remove_tags(re.findall('<narr>.+?</narr>', topic)[0])
docsets = re.findall('<docs.*?>.+?</docs.*?>', topic)
docsets = map(util.remove_tags, docsets)
docsets = [d.split() for d in docsets]
old_docs = []
for docset_index in range(len(docsets)):
## update naming convention different from main
if len(docsets) > 1: id_ext = '-' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[docset_index]
else: id_ext = ''
new_docs = [all_docs[doc] for doc in docsets[docset_index]]
## create a SummaryProblem
problem = SummaryProblem(id+id_ext, title, narr, new_docs, old_docs)
old_docs += new_docs
## include training data in problem
if task.manual_path: problem._load_training(task.manual_path)
problems.append(problem)
## skip updates?
if skip_updates: break
task.problems = problems
def setup_DUC_sentences(task, parser=None, reload=False, options=None):
## load problems quickly from pickle file
if (not reload) and os.path.isfile(task.data_pickle):
sys.stderr.write('Loading [%s] problem data from [%s]\n' %(task.name, task.data_pickle))
task.problems = util.load_pickle(task.data_pickle)
return
## parse sentences
if options:
text.text_processor.load_splitta_model(options.splitta_model)
else:
text.text_processor.load_splitta_model('/u/dgillick/sbd/splitta/model_nb/')
for problem in task.problems:
sys.stderr.write('%s\n' %problem.id)
problem.load_documents()
if parser:
for doc in problem.new_docs:
doc.parse_sentences(parser)
problem.parsed = True
if parser:
parser.run()
for sentence, parsetree in parser.parsed.items():
sentence.parsed = parsetree
## save pickled version for faster loading later
sys.stderr.write('Saving [%s] problem data in [%s]\n' %(task.name, task.data_pickle))
util.save_pickle(task.problems, task.data_pickle)
def build_program(problem, concept_weight, length=100, sentences = None):
"""
the ILP keeps tracks of the constraints
s<num> variables handle sentences, subsentences and removable subtrees
c<num> variables represent concepts in those selected pseudo-sentences
"""
program = compression.SentenceSelectionILP(concept_weight, length, use_subsentences=True, use_removables=True,
use_min_length=True, use_min_length_ratio=False)
if not sentences:
sentences = problem.get_new_sentences()
for sentence in sentences:
if not hasattr(sentence, "compression_node"):
sentence.compression_node = compression.TreebankNode(sentence.parsed)
nounPhraseMapping = compression.generateNounPhraseMapping([s.compression_node for s in sentences])
for sentence in sentences:
## generate a compression candidate tree
candidates = sentence.compression_node.getCandidateTree(nounPhraseMapping)
candidate_root = compression.TreebankNode(candidates)
candidate_root.sentence = sentence
## (or a non compressed tree)
#candidate_root = treenode.TreeNode(sentence.compression_node.getNonCompressedCandidate())
if candidate_root.isLeaf(): continue
## debugging
#candidate_root.original = root
#candidate_root.original_text = candidates
# update ILP with the new sentence
program.addSentence(candidate_root, lambda x: compression.get_bigrams_from_node(x,
node_skip=lambda y: not re.match(r'[A-Za-z0-9]', y.label), node_transform=lambda y: text.text_processor.porter_stem(y.text.lower())))
# skip debugging part
continue
sentence_concepts = program.getConcepts(candidate_root, lambda x: compression.get_bigrams_from_node(x,
node_skip=lambda y: not re.match(r'[A-Za-z0-9]', y.label), node_transform=lambda y: text.text_processor.porter_stem(y.text.lower())))
print sentence.original
print candidate_root.getPrettyCandidates()
for concept in sentence_concepts.keys():
if concept not in concept_weight:
del sentence_concepts[concept]
print sorted(sentence_concepts.keys())
units = dict([(x, 1) for x in util.get_ngrams(sentence.stemmed, n=2, bounds=False)])
for concept in units.keys():
if concept not in concept_weight:
del units[concept]
print sorted(units.keys())
return program
def get_program_result(program):
# get the selected sentences
selection = []
for id in program.output:
if id.startswith("s") and program.output[id] == 1:
node = program.binary[id] # gives you back the actual node (which can be a subsentence, or a chunk not removed)
if not program.nodeHasSelectedParent(node): # only start printing at the topmost nodes
# create a fake sentence to hold the compressed content
sentence = text.Sentence(compression.postProcess(program.getSelectedText(node)), \
node.root.sentence.order, node.root.sentence.source, node.root.sentence.date)
sentence.parsed = str(node)
sentence.original_node = node
selection.append(sentence)
#print node.root.getPrettyCandidates()
return selection
def build_alternative_program(problem, concept_weight, length=100, sentences = None, longuest_candidate_only=False):
if not sentences:
sentences = problem.get_new_sentences()
for sentence in sentences:
if not hasattr(sentence, "compression_node"):
sentence.compression_node = compression.TreebankNode(sentence.parsed)
nounPhraseMapping = compression.generateNounPhraseMapping([s.compression_node for s in sentences])
#print "generating acronyms"
acronymMapping = compression.generateAcronymMapping(problem.get_new_sentences())
print problem.id, acronymMapping
compressed_sentences = []
seen_sentences = {}
group_id = 0
for sentence in sentences:
subsentences = sentence.compression_node.getNodesByFilter(compression.TreebankNode.isSubsentence)
candidates = {}
for node in subsentences:
candidates.update(node.getCandidates(mapping=nounPhraseMapping))
if longuest_candidate_only:
max_length = 0
argmax = None
for candidate in candidates:
if len(candidate) > max_length:
max_length = len(candidate)
argmax = candidate
if argmax != None:
candidates = [argmax]
for candidate in candidates:
new_sentence = text.Sentence(compression.postProcess(candidate), sentence.order, sentence.source, sentence.date)
if new_sentence.length <= 5: continue # skip short guys
new_sentence.group_id = group_id
compressed_sentences.append(new_sentence)
seen_sentences[new_sentence.original] = 1
group_id += 1
compression.replaceAcronyms(compressed_sentences, acronymMapping)
log_file = open("%s.log" % problem.id, "w")
for sentence in compressed_sentences:
log_file.write("%d %s\n" %( group_id, str(sentence)))
log_file.close()
# generate ids for acronyms
acronym_id = {}
acronym_length = {}
for definition, acronym in acronymMapping.items():
if acronym not in acronym_id:
acronym_id[acronym] = len(acronym_id)
acronym_length[acronym] = len(definition.strip().split())
# get concepts
relevant_sentences = []
sentence_concepts = | |
<reponame>RobotJustina/tmc_justina_docker<filename>src/planning/knowledge_representation/scripts/interprete/interpretation.py<gh_stars>0
# code dependencies
import kb_services
import parsing
# network toolkit
import networkx as nx
# regular expressions
import re
def diff(a, b):
b = set(b)
return [aa for aa in a if aa not in b]
def intersection(a,b):
if isinstance(a, str):
if " " in a:
a = a.split(" ")
else:
a = [a]
a = set(a)
return [bb for bb in b if bb in a]
def set_mapping(mapping):
global meaning_mapping_patterns, used_patterns
if mapping == 'gpsr':
meaning_mapping_patterns = meaning_mapping_patterns_gpsr
elif mapping == 'eegpsr':
meaning_mapping_patterns = meaning_mapping_patterns_eegpsr
elif mapping == 'open_challenge':
meaning_mapping_patterns = meaning_mapping_patterns_open_challenge
elif mapping == 'eegpsr2':
meaning_mapping_patterns = meaning_mapping_patterns_eegpsr2
elif mapping == 'restaurant':
meaning_mapping_patterns = meaning_mapping_patterns_restaurant
elif mapping == 'catering_comfort':
meaning_mapping_patterns = meaning_mapping_patterns_catering_comfort
elif mapping == 'receptionist':
meaning_mapping_patterns = meaning_mapping_patterns_receptionist
elif mapping == 'servingdrinks':
meaning_mapping_patterns = meaning_mapping_patterns_servingdrinks
elif mapping == 'spr':
meaning_mapping_patterns = meaning_mapping_patterns_spr
elif mapping == 'where_is_this':
meaning_mapping_patterns = meaning_mapping_patterns_where_is_this
elif mapping == 'cia':
meaning_mapping_patterns = meaning_mapping_patterns_cia
used_patterns = [0]*len(meaning_mapping_patterns)
#
# Patrones para OPEN CHALLENGE
meaning_mapping_patterns_open_challenge = [
# test open challenge
{"params": ["Action_get", "Object_find", "Action_deliver", "Destination_me"],
"Action_get": [["take", "get"], ["vrb"], [], []],
"Object_find": [[], ["noun"], ["item", "drink"], []],
"Action_deliver": [["deliver"], ["vrb"], [], []],
"Destination_me": [["john", "peter"], [], [], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object_find- dining_room) (step )) " +
"(task (plan user_speech) (action_type find_person_in_room) (params -Destination_me- dining_room) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
## Cubes Challenge
{"params":["Action_put", "Object1", "Object2"],
"Action_put": [["put"], ["vrb"], [], []],
"Object1":[[], ["noun"], ["item"],[]],
"Object2":[[],["noun"],["item"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type explain_cubes_plan) (params -Object1- -Object2-) (step ))"+
"(task (plan user_speech) (action_type update_object_location) (params location table) (step ))"+
"(task (plan user_speech) (action_type stack_state)(params ) (step ))" +
"(task (plan user_speech) (action_type speech_generator)(params speech_1)(step ))"+
"(task (plan user_speech) (action_type put_on_top) (params -Object1- -Object2-) (step))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''}
]
meaning_mapping_patterns_gpsr = [
# patrones para TMR 2017
############################################# GetNDeliver
# param: [["palabras", "clave"], ["noun", "vrb", "prep_phrase"], ["categoria", "item", "place", "person"], []]
# take from and deliver to person
#{"params": ["Action_get", "Get_object", "Source_get", "Action_deliver", "Destination_person", "Destination_location"],
#"Action_get": [["get", "grasp", "take"], ["vrb"], [], []],
#"Get_object": [[], ["noun"], ["item"], []],
#"Source_get": [[], ["noun"], ["place"], []],
#"Action_deliver": [["bring", "carry", "deliver", "take"], ["vrb"], [], []],
#"Destination_person": [[], ["noun", "prep_phrase"], ["person"], []],
#"Destination_location": [[], ["noun"], ["place"], []],
#"conceptual_dependency": "(task (plan user_speech) (action_type update_object_location) (params -Get_object- -Source_get- ) (step 1)) " +
# "(task (plan user_speech) (action_type get_object) (params -Get_object- -Source_get-) (step 2)) " +
# "(task (plan user_speech) (action_type find_person_in_room) (params -Destination_person- -Destination_location-) (step 3))" +
# "(task (plan user_speech) (action_type handover_object) (params -Get_object-) (step 4))",
#"verbal_confirmation": '',
#"planner_confirmed": '',
#"planner_not_confirmed": ''},
#### add object to kdb
{"params": ["Object", "Is", "Location"],
"Object": [[], [], ["item"], []],
"Is": [["is", "are"], [], [], []],
"Location": [[], [], ["place", "room"], []],
"conceptual_dependency": "(inst (plan user_speech) (action_type set_object_location) (params -Object- -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
## obtain the object location
{"params": ["Where", "Is", "Object"],
"Where":[["where"],[],[],[]],
"Is": [["is", "are"], [], [], []],
"Object": [[], [], ["item"], []],
"Location": [[], [], ["place", "room"], []],
"conceptual_dependency": "(inst (plan user_speech) (action_type get_object_location) (params -Object-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#navigate
{"params": ["Action_nav", "Location"],
"Action_nav": [["navigate", "go", "locate", "enter"], [], [], []],
"Location": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#question
{"params": ["Action_talk", "Question"],
"Action_talk": [["speak", "answer", "tell", "say"], [], [], []],
"Question": [[], [], ["question"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type wait_for_user_instruction) (params question -Question-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#find person 1 parametro
{"params": ["Action_find","Find_person"],
"Action_find": [["find", "look_for", "locate"], [], [], []],
"Find_person": [[], [], ["person"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type find_person_in_room) (params -Find_person-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#find person 2 parametros
{"params": ["Find_person", "Person", "Person_location"],
"Find_person": [["find", "locate", "look_for", "meet"], [], [], []],
"Person": [[], [], ["person"], []],
"Person_location": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Person_location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
# deliver to me
{"params": ["Action_deliver", "Person"],
"Action_deliver": [["give", "bring", "deliver", "hand"], [], [], []],
"Person": [["me"], [], [], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
# deliver to person
{"params": ["Action_deliver", "Person"],
"Action_deliver": [["give", "bring", "deliver", "hand"], [], [], []],
"Person": [[], [], ["person"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_person_in_room) (params -Person-) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
# deliver to person 2 parametros
{"params": ["Action_deliver", "Person", "Location"],
"Action_deliver": [["give", "bring", "deliver", "hand"], [], [], []],
"Person": [[], [], ["person"], []],
"Location": [[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Location-) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#grasp object 2 parametros
{"params": ["Action_get", "Get_object", "Source_get"],
"Action_get": [["get", "grasp", "take"], [], [], []],
"Get_object": [[], [], ["item"], []],
"Source_get": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Get_object- -Source_get-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#grasp object 1 parametro
{"params": ["Action_get", "Get_object"],
"Action_get": [["get", "grasp", "take", "look_for"], [], [], []],
"Get_object": [[], [], ["item"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Get_object-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#deliver in place
{"params": ["Action_place", "Destination_place"],
"Action_place": [["place", "deliver", "put"], [], [], []],
"Destination_place": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type deliver_in_position) (params -Destination_place-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#follow 1 parametro
{"params": ["Action_follow", "Pron"],
"Action_follow": [["follow", "after"], [], [], []],
"Pron":[["me","us","you","it","him","her","them"],[],[],[]],
"conceptual_dependency": "(task (plan user_speech) (action_type follow_man) (params man no_location) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#follow to room
{"params": ["Action_follow", "Pron", "Location"],
"Action_follow": [["follow", "after"], [], [], []],
"Pron":[["me","us","you","it","him","her","them"],[],[],[]],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type follow_man) (params man -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#guide to room
{"params": ["Action_guide", "Pron", "Location"],
"Action_guide": [["guide" , "escort" ,"take" , "lead" , "accompany"], [], [], []],
"Pron":[["me","us","you","it","him","her","them"],[],[],[]],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params man_guide -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#guide to room 2 parameters (was comented for Sidney gpsr)
#{"params": ["Action_guide", "Person", "Location"],
#"Action_guide": [["guide" , "escort" ,"take" , "lead" , "accompany"], [], [], []],
#"Person": [[], [], ["person"], []],
#"Location":[[], [], ["place"], []],
#"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params man_guide -Location-) (step ))",
#"verbal_confirmation": '',
#"planner_confirmed": '',
#"planner_not_confirmed": ''},
#####################NAGOYA 2017
##########################
####################### MANIPULATION
#$take to the {placement 2}
{"params": ["Action_take", "Object", "To", "Place"],
"Action_take": [[ "get" , "grasp" , "take" , "pick up", "bring", "place"], [], [], []],
"Object": [[], [], ["item"], []],
"To": [["to"], [], [], []],
"Place": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- default_location) (step )) " +
"(task (plan user_speech) (action_type deliver_in_position) (params -Place-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$vbplace the $object on the {placement 2}
{"params": ["Action_take", "Object", "Place"],
"Action_take": [["put", "place", "deliver"], [], [], []],
"Object": [[], [], ["item"], []],
"Place": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- default_location) (step )) " +
"(task (plan user_speech) (action_type deliver_in_position) (params -Place-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$deliver = $vbbring me the $object
{"params": ["Action_take", "Person", "Object"],
"Action_take": [["bring", "give", "deliver"], [], [], []],
"Person": [["me"], [], [], []],
"Object": [[], [], ["item"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- default_location) (step )) " +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$vbdeliver the $object to $someone
{"params": ["Action_take", "Object", "Person", "Location"],
"Action_take": [["bring", "give", "deliver"], [], [], []],
"Object": [[], [], ["item"], []],
"Person": [[], [], ["person"], []],
"Location": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- default_location) (step )) " +
"(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Location-) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$takefrom to the {placement 2}
{"params": ["Action_take", "Object","Place_first","To", "Place_second"],
"Action_take": [["get" , "grasp" , "take" , "pick up", "bring"], [], [], []],
"Object": [[], [], ["item"], []],
"Place_first": [[], [], ["place", "room"], []],
"To": [["to"], [], [], []],
"Place_second": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- -Place_first-) (step )) " +
"(task (plan user_speech) (action_type deliver_in_position) (params -Place_second-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$goplace, $vbfind the $object, and ($delivme | $delivat)
#$goplace, $vbfind the $object, and $place
#$vbfind the $object 1 parametro
{"params": ["Action_get", "Get_object"],
"Action_get": [["find", "look_for", "locate"], [], [], []],
"Get_object": [[], [], ["item"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Get_object-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$vbbring (me | to $whowhere) the {kobject} from the {placement}
{"params": ["Action_take", "Person", "Place_first", "Object", "Place_second"],
"Action_take": [["bring" , "give"], [], [], []],
"Person": [[], [], ["person"], []],
"Place_first": [[], [], ["place", "room"], []],
"Object": [[], [], ["item"], []],
"Place_second": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- -Place_second-) (step )) " +
"(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Place_first-) (step ))" | |
<filename>pysmartnode/components/devices/climate/__init__.py
# Author: <NAME>
# Copyright <NAME> 2019-2020 Released under the MIT license
# Created on 2019-10-10
"""
example config:
{
package: .devices.climate
component: Climate
constructor_args: {
temperature_sensor: "mysensor" # temperature sensor name or object. can also be a remote sensor
heating_unit: "myswitch" # heating unit name or object. implemented as ComponentSwitch turning on/off the heating unit
modes: ["off","heat"] # all supported modes. cooling, auto and fan not implemented.
# temp_step: 0.1 # temperature steps in homeassistant gui
# precision: 0.1 # temperature sensor precision in homeassistant
# min_temp: 16 # optional, minimal possible target temp
# max_temp: 28 # optional, maximal possible target temp
# temp_low: 20 # optional, initial temperature low if no value saved by mqtt
# temp_high: 21 # optional, initial temperature high if no value saved by mqtt
# away_temp_low: 16 # optional, initial away temperature low if no value saved by mqtt
# away_temp_high: 17 # optional, initial away temperature high if no value saved by mqtt
# single_temp: false # optional, use a single temperature instead of temp_low and temp_high
# tolerance_single_temp: 0.5 # optional, tolerance when using single temperature.
# interval: 300 #optional, defaults to 300s, interval sensor checks situation. Should be >60s
# friendly_name: null # optional, friendly name shown in homeassistant gui with mqtt discovery
}
}
Note: - mqtt broker is used to save the state between restarts using retained messages.
- Temp_high/low supported since Homeassistant >100.3, before there were temp_high/low
templates missing.
Not Implemented:
cooling_unit
fan_unit
"""
__updated__ = "2020-09-01"
__version__ = "0.94"
from pysmartnode import config
from pysmartnode import logging
import uasyncio as asyncio
import gc
import time
from pysmartnode.utils.component import ComponentBase
# imports of ComponentSensor and ComponentSwitch to keep heap fragmentation low
# as those will be needed in any case
gc.collect()
from pysmartnode.utils.component.sensor import ComponentSensor, SENSOR_TEMPERATURE
gc.collect()
from pysmartnode.utils.component.switch import ComponentSwitch
gc.collect()
from pysmartnode.utils import sys_vars
import ujson
from .definitions import *
COMPONENT_NAME = "Climate"
_COMPONENT_TYPE = "climate"
_mqtt = config.getMQTT()
_log = logging.getLogger(COMPONENT_NAME)
gc.collect()
_unit_index = -1
class Climate(ComponentBase):
def __init__(self, temperature_sensor: ComponentSensor, heating_unit: ComponentSwitch,
modes: list, interval: float = 300, temp_step=0.1, min_temp: float = 16,
max_temp: float = 26, temp_low: float = 20, temp_high: float = 21,
away_temp_low: float = 16, away_temp_high: float = 17,
single_temp: bool = False, tolerance_single_temp: float = 0.5,
friendly_name=None, **kwargs):
self.checkSensorType(temperature_sensor, SENSOR_TEMPERATURE)
self.checkSwitchType(heating_unit)
# This makes it possible to use multiple instances of MyComponent
global _unit_index
_unit_index += 1
super().__init__(COMPONENT_NAME, __version__, _unit_index, logger=_log, **kwargs)
self._temp_step = temp_step
self._min_temp = min_temp
self._max_temp = max_temp
self._stemp = single_temp
self._tolerance = tolerance_single_temp
self.temp_sensor: ComponentSensor = temperature_sensor
self.heating_unit: ComponentSwitch = heating_unit
self._modes = {}
if "off" not in modes:
modes.append("off")
for mode in modes:
if mode not in MODES_SUPPORTED:
_log.error("Mode {!s} not supported".format(mode))
modes.remove(mode)
else:
try:
mod = __import__("pysmartnode.components.devices.climate.{}".format(mode),
globals(), locals(), [], 0)
except ImportError as e:
_log.error("Mode {!s} not available: {!s}".format(mode, e))
continue
if hasattr(mod, mode):
modeobj = getattr(mod, mode)
else:
_log.error("Mode {!s} has no class {!r}".format(mode, mode))
continue
try:
modeobj = modeobj(self)
except Exception as e:
_log.error("Error creating mode {!s} object: {!s}".format(mode, e))
continue
self._modes[mode] = modeobj
self._frn = friendly_name
self.state = {CURRENT_TEMPERATURE_HIGH: temp_high, # current temperature high
CURRENT_TEMPERATURE_LOW: temp_low, # current temperature low
CURRENT_TEMPERATURE_SINGLE: (temp_high - temp_low) / 2 + temp_low,
AWAY_MODE_STATE: AWAY_OFF, # away mode "ON"/"OFF"
STORAGE_AWAY_TEMPERATURE_HIGH: away_temp_high, # away temperature low
STORAGE_AWAY_TEMPERATURE_LOW: away_temp_low, # away temperature high
STORAGE_TEMPERATURE_HIGH: temp_high, # temperature high, storage value
STORAGE_TEMPERATURE_LOW: temp_low, # temperature low, storage value
CURRENT_MODE: str(self._modes["off"]),
CURRENT_ACTION: ACTION_OFF}
self.event = asyncio.Event()
self.lock = asyncio.Lock()
# every extneral change (like mode) that could break an ongoing trigger needs
# to be protected by self.lock.
self.log = _log
gc.collect()
self._mode_topic = _mqtt.getDeviceTopic(
"{!s}{!s}/statem/set".format(COMPONENT_NAME, self._count))
self._temp_low_topic = _mqtt.getDeviceTopic(
"{!s}{!s}/statetl/set".format(COMPONENT_NAME, self._count))
self._temp_high_topic = _mqtt.getDeviceTopic(
"{!s}{!s}/stateth/set".format(COMPONENT_NAME, self._count))
self._temp_single_topic = _mqtt.getDeviceTopic(
"{!s}{!s}/statet/set".format(COMPONENT_NAME, self._count))
self._away_topic = _mqtt.getDeviceTopic(
"{!s}{!s}/stateaw/set".format(COMPONENT_NAME, self._count))
_mqtt.subscribeSync(self._mode_topic, self.changeMode, self)
_mqtt.subscribeSync(self._temp_low_topic, self.changeTempLow, self)
_mqtt.subscribeSync(self._temp_high_topic, self.changeTempHigh, self)
_mqtt.subscribeSync(self._temp_single_topic, self.changeTempSingle, self)
_mqtt.subscribeSync(self._away_topic, self.changeAwayMode, self)
self._restore_done = False
asyncio.create_task(self._loop(interval))
async def _init_network(self):
await _mqtt.awaitSubscriptionsDone() # wait until subscriptions are done
# because received messages will take up RAM and the discovery message
# of climate is very big and could easily fail if RAM is fragmented.
gc.collect()
await asyncio.sleep(1)
gc.collect()
await super()._init_network()
# let discovery succeed first because it is a big message
await _mqtt.subscribe(
_mqtt.getDeviceTopic("{!s}{!s}/state".format(COMPONENT_NAME, self._count)),
self._restore, self)
gc.collect()
async def _loop(self, interval):
t = time.ticks_ms()
while not self._restore_done and time.ticks_diff(time.ticks_ms(), t) < 30000:
await asyncio.sleep(1)
# wait for network to finish so the old state can be restored, or time out (30s)
if not self._restore_done:
await _mqtt.unsubscribe(
_mqtt.getDeviceTopic("{!s}{!s}/state".format(COMPONENT_NAME, self._count)), self)
self._restore_done = True
self.event.set()
await asyncio.sleep(1)
t = 0
while True:
try:
await asyncio.wait_for(self.event.wait(),
interval - time.ticks_diff(time.ticks_ms(), t) / 1000)
except asyncio.TimeoutError:
pass
else:
self.event.clear()
async with self.lock:
cur_temp = await self.temp_sensor.getValue(SENSOR_TEMPERATURE)
try:
await self._modes[self.state[CURRENT_MODE]].trigger(self, cur_temp)
except Exception as e:
_log.error(
"Error executing mode {!s}: {!s}".format(self.state[CURRENT_MODE], e))
await _mqtt.publish(
_mqtt.getDeviceTopic("{!s}{!s}/state".format(COMPONENT_NAME, self._count)),
self.state, qos=1, retain=True, timeout=4)
t = time.ticks_ms()
async def _restore(self, topic, msg, retain):
# used to restore the state after a restart
await _mqtt.unsubscribe(
_mqtt.getDeviceTopic("{!s}{!s}/state".format(COMPONENT_NAME, self._count)), self)
mode = msg[CURRENT_MODE]
del msg[CURRENT_MODE]
del msg[CURRENT_ACTION] # is going to be set after trigger()
self.state.update(msg)
try:
await self.changeMode(topic, mode, retain) # uses lock
except AttributeError as e:
await _log.asyncLog("error", e, timeout=10)
self._restore_done = True
await asyncio.sleep(1)
self.event.set()
def _updateSingleTemp(self):
self.state[CURRENT_TEMPERATURE_SINGLE] = (self.state[CURRENT_TEMPERATURE_HIGH] -
self.state[CURRENT_TEMPERATURE_LOW]) / 2 + \
self.state[CURRENT_TEMPERATURE_LOW]
def _updateHiLoTemp(self):
self.state[CURRENT_TEMPERATURE_HIGH] = self.state[
CURRENT_TEMPERATURE_SINGLE] + self._tolerance
self.state[CURRENT_TEMPERATURE_LOW] = self.state[
CURRENT_TEMPERATURE_SINGLE] - self._tolerance
async def changeAwayMode(self, topic, msg, retain):
if msg in _mqtt.payload_on:
if self.state[AWAY_MODE_STATE] == AWAY_ON:
return False # no publish needed as done in _loop
async with self.lock:
self.state[AWAY_MODE_STATE] = AWAY_ON
self.state[CURRENT_TEMPERATURE_HIGH] = self.state[STORAGE_AWAY_TEMPERATURE_HIGH]
self.state[CURRENT_TEMPERATURE_LOW] = self.state[STORAGE_AWAY_TEMPERATURE_LOW]
self._updateSingleTemp()
self.event.set()
return False # no publish needed as done in _loop
elif msg in _mqtt.payload_off:
if self.state[AWAY_MODE_STATE] == AWAY_OFF:
return False # no publish needed as done in _loop
async with self.lock:
self.state[AWAY_MODE_STATE] = AWAY_OFF
self.state[CURRENT_TEMPERATURE_HIGH] = self.state[STORAGE_TEMPERATURE_HIGH]
self.state[CURRENT_TEMPERATURE_LOW] = self.state[STORAGE_TEMPERATURE_LOW]
self._updateSingleTemp()
self.event.set()
return False # no publish needed as done in _loop
else:
raise TypeError("Unsupported payload {!s}".format(msg))
async def changeMode(self, topic, msg, retain):
if msg not in self._modes:
raise AttributeError("Mode {!s} not supported".format(msg))
if msg == self.state[CURRENT_MODE]:
return False # no publish needed as done in _loop # mode already active
async with self.lock:
mode = self._modes[msg]
if await self._modes[self.state[CURRENT_MODE]].deactivate(self):
if await mode.activate(self):
self.state[CURRENT_MODE] = msg
self.event.set()
return False # no publish needed as done in _loop
else:
self.state[CURRENT_MODE] = MODE_OFF
await self._modes[MODE_OFF].activate()
self.event.set()
return False
else:
return False
async def changeTempHigh(self, topic, msg, retain):
msg = float(msg)
if msg > self._max_temp:
raise ValueError("Can't set temp to {!s}, max temp is {!s}".format(msg,
self._max_temp))
if self.state[CURRENT_TEMPERATURE_HIGH] == msg:
return False # already set to requested temperature, prevents unneeded event & publish
self.state[CURRENT_TEMPERATURE_HIGH] = msg
self._updateSingleTemp()
if self.state[AWAY_MODE_STATE] == AWAY_ON:
self.state[STORAGE_AWAY_TEMPERATURE_HIGH] = msg
else:
self.state[STORAGE_TEMPERATURE_HIGH] = msg
self.event.set()
return False
async def changeTempLow(self, topic, msg, retain):
msg = float(msg)
if msg < self._min_temp:
raise ValueError("Can't set temp to {!s}, min temp is {!s}".format(msg,
self._min_temp))
if self.state[CURRENT_TEMPERATURE_LOW] == msg:
return False # already set to requested temperature, prevents unneeded event & publish
self.state[CURRENT_TEMPERATURE_LOW] = msg
self._updateSingleTemp()
if self.state[AWAY_MODE_STATE] == AWAY_ON:
self.state[STORAGE_AWAY_TEMPERATURE_LOW] = msg
else:
self.state[STORAGE_TEMPERATURE_LOW] = msg
self.event.set()
return False
async def changeTempSingle(self, topic, msg, retain):
msg = float(msg)
if not self._min_temp < msg < self._max_temp:
raise ValueError(
"Can't set temp to {!s}, min temp is {!s}, max temp is {!s}".format(msg,
self._min_temp,
self._max_temp))
if self.state[CURRENT_TEMPERATURE_SINGLE] == msg:
return False # already set to requested temperature, prevents unneeded event & publish
await self.changeTempHigh(topic, msg + self._tolerance, retain)
await self.changeTempLow(topic, msg - self._tolerance, retain)
if self.state[CURRENT_TEMPERATURE_SINGLE] != msg:
self.log.error("Single temp not as requested, set {!s} but received {!s}".format(
self.state[CURRENT_TEMPERATURE_SINGLE], msg))
return False
async def _discovery(self, register=True):
name = "{!s}{!s}".format(COMPONENT_NAME, self._count)
base_topic = _mqtt.getRealTopic(_mqtt.getDeviceTopic(name))
modes = ujson.dumps([str(mode) for mode in self._modes])
gc.collect()
if register:
sens = CLIMATE_DISCOVERY.format(base_topic, self._frn or name,
self._composeAvailability(),
sys_vars.getDeviceID(), name, # unique_id
_mqtt.getRealTopic(
self.temp_sensor.getTopic(SENSOR_TEMPERATURE)),
# current_temp_topic
self.temp_sensor.getTemplate(SENSOR_TEMPERATURE),
# cur_temp_template
self._temp_step,
self._min_temp + (
self._tolerance if self._stemp else 0),
self._max_temp - (
self._tolerance if self._stemp else 0), modes,
CLIMATE_DISCOVERY_STEMP if self._stemp else CLIMATE_DISCOVERY_HILOW,
sys_vars.getDeviceDiscovery())
else:
sens = | |
<reponame>evgeniimv/cloud-pipeline
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import errno
import logging
import os
from datetime import datetime, timedelta
from pipeline import PipelineAPI, Logger as CloudPipelineLogger
import subprocess
import time
import multiprocessing
class ExecutionError(RuntimeError):
pass
class ParsingError(RuntimeError):
pass
class LoggingError(RuntimeError):
pass
class ScalingError(RuntimeError):
pass
class Logger:
task = None
cmd = None
verbose = None
@staticmethod
def init(cmd=True, task=None, log_file=None, verbose=False):
if not cmd and (not task or not log_file):
raise LoggingError('Arguments \'task\' and \'log_file\' should be specified if \'cmd\' is False.')
Logger.task = task
Logger.cmd = cmd
Logger.verbose = verbose
if cmd:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s')
else:
make_dirs(os.path.dirname(log_file))
logging.basicConfig(filename=log_file,
level=logging.INFO,
format='%(asctime)s %(message)s')
@staticmethod
def info(message, crucial=False, *args, **kwargs):
logging.info(message, *args, **kwargs)
if not Logger.cmd and (crucial or Logger.verbose):
CloudPipelineLogger.info(message, task_name=Logger.task)
@staticmethod
def warn(message, crucial=False, *args, **kwargs):
logging.warn(message, *args, **kwargs)
if not Logger.cmd and (crucial or Logger.verbose):
CloudPipelineLogger.warn(message, task_name=Logger.task)
@staticmethod
def success(message, crucial=True, *args, **kwargs):
logging.info(message, *args, **kwargs)
if not Logger.cmd and (crucial or Logger.verbose):
CloudPipelineLogger.success(message, task_name=Logger.task)
@staticmethod
def fail(message, crucial=True, *args, **kwargs):
logging.error(message, *args, **kwargs)
if not Logger.cmd and (crucial or Logger.verbose):
CloudPipelineLogger.fail(message, task_name=Logger.task)
class CmdExecutor:
def __init__(self):
pass
def execute(self, command):
process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process.communicate()
exit_code = process.wait()
if exit_code != 0:
Logger.warn('Command \'%s\' execution has failed due to %s.' % (command, err))
raise ExecutionError('Command \'%s\' execution has failed due to %s.' % (command, err))
return out
def execute_to_lines(self, command):
return self._non_empty(self.execute(command).splitlines())
def _non_empty(self, elements):
return [element for element in elements if element.strip()]
class KubernetesPod:
def __init__(self, ip, name):
self.ip = ip
self.name = name
class GridEngineJobState:
RUNNING = 'running'
PENDING = 'pending'
SUSPENDED = 'suspended'
ERROR = 'errored'
DELETED = 'deleted'
_letter_codes_to_states = {
RUNNING: ['r', 't', 'Rr', 'Rt'],
PENDING: ['qw', 'qw', 'hqw', 'hqw', 'hRwq', 'hRwq', 'hRwq', 'qw', 'qw'],
SUSPENDED: ['s', 'ts', 'S', 'tS', 'T', 'tT', 'Rs', 'Rts', 'RS', 'RtS', 'RT', 'RtT'],
ERROR: ['Eqw', 'Ehqw', 'EhRqw'],
DELETED: ['dr', 'dt', 'dRr', 'dRt', 'ds', 'dS', 'dT', 'dRs', 'dRS', 'dRT']
}
@staticmethod
def from_letter_code(code):
for key in GridEngineJobState._letter_codes_to_states:
if code in GridEngineJobState._letter_codes_to_states[key]:
return key
raise ParsingError('Unknown sge job state: %s.' % code)
class GridEngineJob:
def __init__(self, id, name, user, state, datetime, host=None, array=None):
self.id = id
self.name = name
self.user = user
self.state = state
self.datetime = datetime
self.host = host
self.array = array
class GridEngine:
_MAIN_Q = os.getenv('CP_CAP_SGE_QUEUE_NAME', 'main.q')
_PARALLEL_ENVIRONMENT = os.getenv('CP_CAP_SGE_PE_NAME', 'local')
_ALL_HOSTS = '@allhosts'
_DELETE_HOST = 'qconf -de %s'
_SHOW_PARALLEL_ENVIRONMENT_SLOTS = 'qconf -sp %s | grep "^slots" | awk \'{print $2}\''
_REPLACE_PARALLEL_ENVIRONMENT_SLOTS = 'qconf -rattr pe slots %s %s'
_REMOVE_HOST_FROM_HOST_GROUP = 'qconf -dattr hostgroup hostlist %s %s'
_REMOVE_HOST_FROM_QUEUE_SETTINGS = 'qconf -purge queue slots %s@%s'
_SHUTDOWN_HOST_EXECUTION_DAEMON = 'qconf -ke %s'
_QSTAT = 'qstat -u "*"'
_QSTAT_DATETIME_FORMAT = '%m/%d/%Y %H:%M:%S'
_QSTAT_COLUMNS = ['job-ID', 'prior', 'name', 'user', 'state', 'submit/start at', 'queue', 'slots', 'ja-task-ID']
_QMOD_DISABLE = 'qmod -d %s@%s'
_QMOD_ENABLE = 'qmod -e %s@%s'
_SHOW_EXECUTION_HOST = 'qconf -se %s'
_KILL_JOBS = 'qdel %s'
_FORCE_KILL_JOBS = 'qdel -f %s'
def __init__(self, cmd_executor):
self.cmd_executor = cmd_executor
def get_jobs(self):
"""
Executes command and parse its output. The expected output is something like the following:
job-ID prior name user state submit/start at queue slots ja-task-ID
-----------------------------------------------------------------------------------------------------------------
2 0.75000 sleep root r 12/21/2018 11:48:00 main.q@pipeline-38415 1
9 0.25000 sleep root qw 12/21/2018 12:39:38 1
11 0.25000 sleep root qw 12/21/2018 14:34:43 1 1-10:1
:return: Grid engine jobs list.
"""
lines = self.cmd_executor.execute_to_lines(GridEngine._QSTAT)
if len(lines) == 0:
return []
jobs = []
indentations = [lines[0].index(column) for column in GridEngine._QSTAT_COLUMNS]
for line in lines[2:]:
jobs.append(GridEngineJob(
id=self._by_indent(line, indentations, 0),
name=self._by_indent(line, indentations, 2),
user=self._by_indent(line, indentations, 3),
state=GridEngineJobState.from_letter_code(self._by_indent(line, indentations, 4)),
datetime=self._parse_date(line, indentations),
host=self._parse_host(line, indentations),
array=self._parse_array(line, indentations)
))
return jobs
def _parse_date(self, line, indentations):
return datetime.strptime(self._by_indent(line, indentations, 5), GridEngine._QSTAT_DATETIME_FORMAT)
def _parse_host(self, line, indentations):
queue_and_host = self._by_indent(line, indentations, 6)
return queue_and_host.split('@')[1] if queue_and_host else None
def _parse_array(self, line, indentations):
array_jobs = self._by_indent(line, indentations, 8)
if not array_jobs:
return None
if ':' in array_jobs:
array_borders, _ = array_jobs.split(':')
start, stop = array_borders.split('-')
return list(range(int(start), int(stop) + 1))
elif ',' in array_jobs:
return list(map(int, array_jobs.split(',')))
else:
return [int(array_jobs)]
def _by_indent(self, line, indentations, index):
if index >= len(indentations) - 1:
return line[indentations[index]:].strip()
else:
return line[indentations[index]:indentations[min(len(indentations) - 1, index + 1)]].strip()
def disable_host(self, host, queue=_MAIN_Q):
"""
Disables host to prevent receiving new jobs from the given queue.
This command does not abort currently running jobs.
:param host: Host to be enabled.
:param queue: Queue that host is a part of.
"""
self.cmd_executor.execute(GridEngine._QMOD_DISABLE % (queue, host))
def enable_host(self, host, queue=_MAIN_Q):
"""
Enables host to make it available to receive new jobs from the given queue.
:param host: Host to be enabled.
:param queue: Queue that host is a part of.
"""
self.cmd_executor.execute(GridEngine._QMOD_ENABLE % (queue, host))
def increase_parallel_environment_slots(self, slots, pe=_PARALLEL_ENVIRONMENT):
"""
Increases the number of parallel environment slots.
:param slots: Number of slots to append.
:param pe: Parallel environment to update number of slots for.
"""
pe_slots = self.get_parallel_environment_slots(pe)
self.cmd_executor.execute(GridEngine._REPLACE_PARALLEL_ENVIRONMENT_SLOTS % (pe_slots + slots, pe))
def decrease_parallel_environment_slots(self, slots, pe=_PARALLEL_ENVIRONMENT):
"""
Decreases the number of parallel environment slots.
:param slots: Number of slots to subtract.
:param pe: Parallel environment to update number of slots for.
"""
pe_slots = self.get_parallel_environment_slots(pe)
self.cmd_executor.execute(GridEngine._REPLACE_PARALLEL_ENVIRONMENT_SLOTS % (pe_slots - slots, pe))
def get_parallel_environment_slots(self, pe=_PARALLEL_ENVIRONMENT):
"""
Returns number of the parallel environment slots.
:param pe: Parallel environment to return number of slots for.
"""
return int(self.cmd_executor.execute(GridEngine._SHOW_PARALLEL_ENVIRONMENT_SLOTS % pe).strip())
def delete_host(self, host, queue=_MAIN_Q, hostgroup=_ALL_HOSTS, skip_on_failure=False):
"""
Completely deletes host from GE:
1. Shutdown host execution daemon.
2. Removes host from queue settings.
3. Removes host from host group.
4. Removes host from GE.
:param host: Host to be removed.
:param queue: Queue host is a part of.
:param hostgroup: Host group queue uses.
:param skip_on_failure: Specifies if the host killing should be continued even if some of
the commands has failed.
"""
self._shutdown_execution_host(host, skip_on_failure=skip_on_failure)
self._remove_host_from_queue_settings(host, queue, skip_on_failure=skip_on_failure)
self._remove_host_from_host_group(host, hostgroup, skip_on_failure=skip_on_failure)
self._remove_host_from_grid_engine(host, skip_on_failure=skip_on_failure)
def _shutdown_execution_host(self, host, skip_on_failure):
self._perform_command(
action=lambda: self.cmd_executor.execute(GridEngine._SHUTDOWN_HOST_EXECUTION_DAEMON % host),
msg='Shutdown GE host execution daemon.',
error_msg='Shutdown GE host execution daemon has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_queue_settings(self, host, queue, skip_on_failure):
self._perform_command(
action=lambda: self.cmd_executor.execute(GridEngine._REMOVE_HOST_FROM_QUEUE_SETTINGS % (queue, host)),
msg='Remove host from queue settings.',
error_msg='Removing host from queue settings has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_host_group(self, host, hostgroup, skip_on_failure):
self._perform_command(
action=lambda: self.cmd_executor.execute(GridEngine._REMOVE_HOST_FROM_HOST_GROUP % (host, hostgroup)),
msg='Remove host from host group.',
error_msg='Removing host from host group has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_grid_engine(self, host, skip_on_failure):
self._perform_command(
action=lambda: self.cmd_executor.execute(GridEngine._DELETE_HOST % host),
msg='Remove host from GE.',
error_msg='Removing host from GE has failed.',
skip_on_failure=skip_on_failure
)
def _perform_command(self, action, msg, error_msg, skip_on_failure):
Logger.info(msg)
try:
action()
except RuntimeError as e:
Logger.warn(error_msg)
if not skip_on_failure:
raise RuntimeError(error_msg, e)
def is_valid(self, host):
"""
Validates host in GE checking corresponding execution host availability.
:param host: Host to be checked.
:return: True if execution host exists.
"""
try:
self.cmd_executor.execute_to_lines(GridEngine._SHOW_EXECUTION_HOST % host)
return True
except RuntimeError:
Logger.warn('Execution host %s in GE wasn\'t found.' % host)
return False
def kill_jobs(self, jobs, force=False):
"""
Kills jobs in GE.
:param jobs: Grid engine jobs.
:param force: Specifies if this command should be performed with -f flag.
"""
job_ids = []
for job in jobs:
job_id = str(job.id)
job_array_index = '' if not job.array or len(job.array) > 1 else ('.%s' % job.array[0])
job_ids.append(job_id + job_array_index)
self.cmd_executor.execute((GridEngine._FORCE_KILL_JOBS if force else GridEngine._KILL_JOBS) % ' '.join(job_ids))
class Clock:
def __init__(self):
pass
def now(self):
return datetime.now()
class GridEngineScaleUpHandler:
_POLL_ATTEMPTS = 60
_POLL_DELAY = 10
def __init__(self, cmd_executor, pipe, grid_engine, host_storage, parent_run_id, default_hostfile, instance_disk,
instance_type, instance_image, price_type, region_id, instance_cores, polling_timeout,
polling_delay=_POLL_DELAY):
"""
Grid engine scale up implementation. It handles additional nodes launching and hosts configuration (/etc/hosts
and self.default_hostfile).
:param cmd_executor: Cmd executor.
:param pipe: Cloud pipeline client.
:param grid_engine: Grid | |
= 0
self.keys['(e) '+self.gris1+'Emission_Lines_Multiple'] = 0
if not self.ACSins: self.keys['(f) '+self.gris2+'Emission_Lines_Multiple'] = 0
self.keys['(g) G102_Contamination_Level'] = 0
self.keys['(h) G141_Contamination_Level'] = 0
self.keys['(i) '+self.gris1+'Defect'] = 0
if not self.ACSins: self.keys['(j) '+self.gris2+'Defect'] = 0
self.keys['(k) '+self.gris1+'Contam_Defect'] = 0
self.keys['(l) Spectral_Coverage_Type'] = 0
self.keys['(m) '+self.gris1+'Mild_Contamination'] = 0
if not self.ACSins: self.keys['(n) '+self.gris2+'Mild_Contamination'] = 0
self.keys['(o) '+self.gris2+'Contam_Defect'] = 0
self.keys['(p) Contamination_Level_Type'] = 0
self.keys['(q) '+self.gris1+'Moderate_Contamination'] = 0
if not self.ACSins: self.keys['(r) '+self.gris2+'Moderate_Contamination'] = 0
self.keys['(s) '+self.dirstr+'Defect'] = 0
self.keys['(t) empty7'] = 0
self.keys['(u) '+self.gris1+'Severe_Contamination'] = 0
if not self.ACSins: self.keys['(v) '+self.gris2+'Severe_Contamination'] = 0
self.keys['(w) '+self.dirstr+'Star'] = 0
self.keys['(x) I_have_no_idea'] = 0
self.keys['(y) '+self.gris1+'Continuum'] = 0
if not self.ACSins: self.keys['(z) '+self.gris2+'Continuum'] = 0
if (sys.version_info[0] == 2) & (sys.version_info[1] == 7): # sort dictionary if running python 2.7
import collections
self.keys = collections.OrderedDict(sorted(self.keys.items()))
else:
print 'WARNING Python version not 2.7 so not sorting dictionary of keywords(1)'
Nkey = 0
self.cbdic = {}
self.sliderdic = {}
for key in self.keys:
rowval = position[0]+int(np.floor(Nkey/self.Ncol))
colval = position[1]+int(np.round((Nkey/self.Ncol-np.floor((Nkey/self.Ncol)))*self.Ncol))
self.keys[key] = Variable()
if key[1] in self.sliders:
self.slider = Scale(self, from_=0, to=4,label=key,variable = self.keys[key],
orient=HORIZONTAL,background=colors[key[1]],length=200)
self.slider.grid(row=rowval,column=colval,columnspan=position[2],rowspan=2,sticky=W)
self.slider.set(0)
if disable:
self.slider.configure(state='disabled')
else:
self.sliderdic[key] = self.slider
elif key[1] in self.empty:
self.cb = Checkbutton(self, text=' ')
self.cb.grid(row=position[0]+5,column=0,columnspan=1,sticky=W)
self.cb.deselect()
self.keys[key].set('-1')
if key[1] in self.calculations:
self.keys[key].set(key)
else:
self.cb = Checkbutton(self, text=key, variable=self.keys[key],background=colors[key[1]])
self.cb.grid(row=rowval,column=colval,columnspan=position[2],sticky=W)
self.cb.deselect()
if disable:
self.cb.configure(state='disabled')
else:
self.cbdic[key] = self.cb
Nkey = Nkey + 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def checkboxes2(self,position,disable=False):
"""
Checkboxes for second PA
"""
# Note that letters in () enables sorting of boxes
colors = self.getcolors()
self.keys2 = {}
self.keys2['(A) '+self.gris1+'Emission_Line'] = 0
if not self.ACSins: self.keys2['(B) '+self.gris2+'Emission_Line'] = 0
self.keys2['(C) G102_Spectral_Coverage'] = 0
self.keys2['(D) G141_Spectral_Coverage'] = 0
self.keys2['(E) '+self.gris1+'Emission_Lines_Multiple'] = 0
if not self.ACSins: self.keys2['(F) '+self.gris2+'Emission_Lines_Multiple'] = 0
self.keys2['(G) G102_Contamination_Level'] = 0
self.keys2['(H) G141_Contamination_Level'] = 0
self.keys2['(I) '+self.gris1+'Defect'] = 0
if not self.ACSins: self.keys2['(J) '+self.gris2+'Defect'] = 0
self.keys2['(K) '+self.gris1+'Contam_Defect'] = 0
self.keys2['(L) Spectral_Coverage_Type'] = 0
self.keys2['(M) '+self.gris1+'Mild_Contamination'] = 0
if not self.ACSins: self.keys2['(N) '+self.gris2+'Mild_Contamination'] = 0
self.keys2['(O) '+self.gris2+'Contam_Defect'] = 0
self.keys2['(P) Contamination_Level_Type'] = 0
self.keys2['(Q) '+self.gris1+'Moderate_Contamination'] = 0
if not self.ACSins: self.keys2['(R) '+self.gris2+'Moderate_Contamination'] = 0
self.keys2['(S) '+self.dirstr+'Defect'] = 0
self.keys2['(T) empty7'] = 0
self.keys2['(U) '+self.gris1+'Severe_Contamination'] = 0
if not self.ACSins: self.keys2['(V) '+self.gris2+'Severe_Contamination'] = 0
self.keys2['(W) '+self.dirstr+'Star'] = 0
self.keys2['(X) I_have_no_idea'] = 0
self.keys2['(Y) '+self.gris1+'Continuum'] = 0
if not self.ACSins: self.keys2['(Z) '+self.gris2+'Continuum'] = 0
if (sys.version_info[0] == 2) & (sys.version_info[1] == 7): # sort dictionary if running python 2.7
import collections
self.keys2 = collections.OrderedDict(sorted(self.keys2.items()))
else:
print 'WARNING Python version not 2.7 so not sorting dictionary of keywords(2)'
Nkey = 0
self.cbdic2 = {}
self.sliderdic2 = {}
for key in self.keys2:
rowval = position[0]+int(np.floor(Nkey/self.Ncol))
colval = position[1]+int(np.round((Nkey/self.Ncol-np.floor((Nkey/self.Ncol)))*self.Ncol))
self.keys2[key] = Variable()
if key[1] in self.sliders:
self.slider2 = Scale(self, from_=0, to=4,label=key,variable = self.keys2[key],
orient=HORIZONTAL,background=colors[key[1]],length=200)
self.slider2.grid(row=rowval,column=colval,columnspan=position[2],rowspan=2,sticky=W)
self.slider2.set(0)
if disable:
self.slider2.configure(state='disabled')
else:
self.sliderdic2[key] = self.slider2
elif key[1] in self.empty:
self.cb2 = Checkbutton(self, text=' ')
self.cb2.grid(row=position[0]+5,column=0,columnspan=1,sticky=W)
self.cb2.deselect()
self.keys2[key].set('-1')
if key[1] in self.calculations:
self.keys2[key].set(key)
else:
self.cb2 = Checkbutton(self, text=key, variable=self.keys2[key],background=colors[key[1]])
self.cb2.grid(row=rowval,column=colval,columnspan=position[2],sticky=W)
self.cb2.deselect()
if disable:
self.cb2.configure(state='disabled')
else:
self.cbdic2[key] = self.cb2
Nkey = Nkey + 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcolors(self,):
"""
Dictionary with colors for keys
"""
collist = ['orange','red','cyan','magenta','green','white']
colors = {}
colors['a'] = collist[0]
colors['b'] = collist[1]
colors['c'] = collist[4]
colors['d'] = collist[4]
colors['e'] = collist[0]
colors['f'] = collist[1]
colors['g'] = collist[4]
colors['h'] = collist[4]
colors['i'] = collist[0]
colors['j'] = collist[1]
colors['k'] = collist[0]
colors['l'] = collist[4]
colors['m'] = collist[0]
colors['n'] = collist[1]
colors['o'] = collist[1]
colors['p'] = collist[4]
colors['q'] = collist[0]
colors['r'] = collist[1]
colors['s'] = collist[5]
colors['t'] = collist[4]
colors['u'] = collist[0]
colors['v'] = collist[1]
colors['w'] = collist[5]
colors['x'] = collist[5]
colors['y'] = collist[0]
colors['z'] = collist[1]
colors['A'] = collist[2]
colors['B'] = collist[3]
colors['C'] = collist[4]
colors['D'] = collist[4]
colors['E'] = collist[2]
colors['F'] = collist[3]
colors['G'] = collist[4]
colors['H'] = collist[4]
colors['I'] = collist[2]
colors['J'] = collist[3]
colors['K'] = collist[2]
colors['L'] = collist[4]
colors['M'] = collist[2]
colors['N'] = collist[3]
colors['O'] = collist[3]
colors['P'] = collist[4]
colors['Q'] = collist[2]
colors['R'] = collist[3]
colors['S'] = collist[5]
colors['T'] = collist[4]
colors['U'] = collist[2]
colors['V'] = collist[3]
colors['W'] = collist[5]
colors['X'] = collist[5]
colors['Y'] = collist[2]
colors['Z'] = collist[3]
return colors
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def wavefieldG102_1(self,position):
"""
Field to provide emission line wavelength
"""
self.label_G102_1 = Label(self,text='(c) G102 emission line wavelength(s) [A]: ')
self.label_G102_1.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
self.linewaveG102_1 = Entry(self)
self.linewaveG102_1.grid(row=position[0],column=position[1]+position[2],
columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def wavefieldG141_1(self,position):
"""
Field to provide emission line wavelength
"""
self.label_G141_1 = Label(self,text='(g) G141 emission line wavelength(s) [A]: ')
self.label_G141_1.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
self.linewaveG141_1 = Entry(self)
self.linewaveG141_1.grid(row=position[0],column=position[1]+position[2],
columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def wavefieldG102_2(self,position):
"""
Field to provide emission line wavelength
"""
self.label_G102_2 = Label(self,text='(C) G102 emission line wavelength(s) [A]: ')
self.label_G102_2.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
self.linewaveG102_2 = Entry(self)
self.linewaveG102_2.grid(row=position[0],column=position[1]+position[2],
columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def wavefieldG141_2(self,position):
"""
Field to provide emission line wavelength
"""
self.label_G141_2 = Label(self,text='(G) G141 emission line wavelength(s) [A]: ')
self.label_G141_2.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
self.linewaveG141_2 = Entry(self)
self.linewaveG141_2.grid(row=position[0],column=position[1]+position[2],
columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def commentfield(self,position):
"""
Field to provide comments
"""
self.label = Label(self,text='(l) Comments ("tab" to move focus): ')
self.label.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
self.comments = Entry(self)
self.comments.grid(row=position[0],column=position[1]+position[2],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def commentfield2(self,position):
"""
Field to provide comments for second PA
"""
self.label2 = Label(self,text='(L) Comments ("tab" to move focus): ')
self.label2.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
self.comments2 = Entry(self)
self.comments2.grid(row=position[0],column=position[1]+position[2],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def openpngs(self,objid=None):
"""
Function to open pngs of object
"""
if objid == None:
id = self.currentobj
else:
| |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import cchardet
import re
import argparse
import os
COLOR = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#F4A460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32',
}
class Dialogue:
_start = 0
_end = 0
_text = ''
def __init__(self, ext, start, end, text):
if ext == 'smi':
self._start = start
self._end = end
self._text = text
elif ext == 'srt':
self._start = self.from_srt_time(start)
self._end = self.from_srt_time(end)
self._text = text
elif ext == 'ass':
self._start = self.from_ass_time(start)
self._end = self.from_ass_time(end)
self._text = self.from_ass_text(text)
def start(self, ext):
if ext == 'smi':
return self._start
elif ext == 'srt':
return self.to_srt_time(self._start)
elif ext == 'ass':
return self.to_ass_time(self._start)
def end(self, ext):
if ext == 'smi':
return self._end
elif ext == 'srt':
return self.to_srt_time(self._end)
elif ext == 'ass':
return self.to_ass_time(self._end)
def text(self, ext):
if ext == 'smi':
return self._text
if ext == 'srt':
return self._text
elif ext == 'ass':
return self.to_ass_text(self._text)
@staticmethod
def from_srt_time(hms):
h, m, s, ms = re.findall('(\d+):(\d+):(\d+),(\d+)', hms)[0]
return int(h) * 3600000 + int(m) * 60000 + int(s) * 1000 + int(ms)
@staticmethod
def to_srt_time(ms):
hours = ms // 3600000
ms -= hours * 3600000
minutes = ms // 60000
ms -= minutes * 60000
seconds = ms // 1000
ms -= seconds * 1000
return '%02d:%02d:%02d,%03d' % (hours, minutes, seconds, ms)
@staticmethod
def from_ass_time(hms):
h, m, s, ms10 = re.findall('(\d+):(\d+):(\d+).(\d+)', hms)[0]
return int(h) * 3600000 + int(m) * 60000 + int(s) * 1000 + int(ms10) * 10
@staticmethod
def to_ass_time(ms):
hours = ms // 3600000
ms -= hours * 3600000
minutes = ms // 60000
ms -= minutes * 60000
seconds = ms // 1000
ms -= seconds * 1000
ms //= 10
return '%01d:%02d:%02d.%02d' % (hours, minutes, seconds, ms)
@staticmethod
def from_ass_text(text):
matches = re.findall(r'({[^}]*\\c&H([0-9A-Fa-f]+)&[^}]*}([^{}\\]+))', text)
for match in matches:
entire, color, block1 = match
if len(color) < 6:
color = '0' * (6 - len(color)) + color
color = f'#{color[4:6]}{color[2:4]}{color[0:2]}'
text = text.replace(entire, f'<font color={color}>{block1}</font>')
text = re.sub(r'{[^}]*}', '', text)
text = text.replace('\\n', '\n').replace('\\N', '\n')
return text
@staticmethod
def to_ass_text(text):
matches = re.findall(r'(<font [^>]*color=[\"\']?([^\"\'>]+)[\"\']?[^>]*>(.+?)(</[^>]*font>))', text, flags=re.I+re.DOTALL)
for match in matches:
entire, color, block1, block2 = match
if color.startswith('#'):
color = color.upper()
elif color.lower() in COLOR:
color = COLOR[color.lower()]
elif len(color) == 6 and len([x for x in color.upper() if x in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']]) == 6:
color = '#' + color.upper()
else:
color = None
if color:
bgr = color[5:] + color[3:5] + color[1:3]
text = text.replace(entire, f'{{\\c&H{bgr}&}}{block1}{{\\r}}')
else:
text = text.replace(entire, block1)
text = re.sub(r'<[^>]*>', '', text)
text = text.replace('\n', '\\N')
return text
class SubConverter:
title = ''
ext = ''
dialogues = []
font_face = '<NAME>'
font_size = 70
width = 1920
height = 1080
def __init__(self, args=None):
if args:
self.font_face = args.font_face
self.font_size = args.font_size
self.width = args.width
self.height = args.height
def load_file(self, file):
self.ext = file.split('.')[-1]
self.ext = 'ass' if self.ext == 'ssa' else self.ext
self.title = file[:-(len(self.ext) + 1)]
self.dialogues = []
with open(file, 'rb') as fp:
content = fp.read()
chdt = cchardet.detect(content)
content = content.decode(chdt['encoding'])
content = content.strip()
content = content.replace('\r\n', '\n')
if self.ext == 'smi':
self.from_smi(content)
elif self.ext == 'srt':
self.from_srt(content)
elif self.ext == 'ass':
self.from_ass(content)
def load_string(self, ext, content):
self.ext = 'ass' if ext == 'ssa' else ext
self.title = ''
self.dialogues = []
content = content.replace('\r\n', '\n')
if self.ext == 'smi':
self.from_smi(content)
elif self.ext == 'srt':
self.from_srt(content)
elif self.ext == 'ass':
self.from_ass(content)
def convert(self, ext):
if ext == 'smi':
return self.to_smi()
elif ext == 'srt':
return self.to_srt()
elif ext == 'ass':
return self.to_ass()
def from_smi(self, content):
title = re.search(r'<title>(.+)</title>', content, flags=re.I)[1]
self.title = title if title else self.title
content = re.split(r'<body>', content, flags=re.I)[1]
content = re.split(r'</body>', content, flags=re.I)[0]
texts = re.split(r'<sync\s+start=', content, flags=re.I)[1:]
for cur, next_ in zip(texts, (texts + [None])[1:]):
start = int(re.match(r'\d+', cur)[0])
end = int(re.match(r'\d+', next_)[0] if next_ else start)
cur = cur.replace('\n', '')
text = re.search(r'.+<p class=\w+[^>]*>(.+)$', cur, flags=re.I)[1]
text = re.sub(r'<br>', '\n', text, flags=re.I)
text = re.sub(r' ', '', text, flags=re.I)
text = text.strip()
if not text:
continue
self.dialogues.append(Dialogue('smi', start, end, text))
def from_srt(self, content):
content = re.sub(r'\n{3,}', '\n\n', content)
for block in content.split('\n\n'):
lines = block.split('\n')
if len(lines) < 3:
continue
start, end = re.findall(r'(.+) --> (.+).*', lines[1])[0]
self.dialogues.append(Dialogue('srt', start, end, '\n'.join(lines[2:])))
def from_ass(self, content):
for line in content.split('\n'):
if line.lower().startswith('dialogue:'):
start, end, text = re.findall(r'[^,]*,([^,]+),([^,]+),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,(.*)$', line)[0]
if not text:
continue
self.dialogues.append(Dialogue('ass', start, end, text))
elif line.lower().startswith('title:'):
self.title = line[6:].strip()
def to_smi(self):
header = f'''<SAMI>
<HEAD>
<TITLE>{self.title}</TITLE>
<STYLE TYPE="text/css">
<!--
P [ text-align:center; font-family:{self.font_face}, Arial; color:white;
background-color:black; ]
.KRCC [ Name:ko; lang:ko-KR; SAMIType:CC; ]
-->
</STYLE>
</HEAD>
<BODY>
'''
# {, } are not allowed in formatted string..
header = header.replace('[', '{').replace(']', '}')
lines = []
last_end = 0
for dialogue in self.dialogues:
if last_end and last_end != dialogue.start('smi'):
lines.append(f'<SYNC Start={last_end}><P Class=KRCC> ')
lines.append(f'<SYNC Start={dialogue.start("smi")}><P Class=KRCC>')
lines.append(dialogue.text('smi').replace('\n', '<BR>\n'))
last_end = dialogue.end('smi')
lines.append(f'<SYNC Start={last_end}><P Class=KRCC> ')
lines.append('</BODY>\n</SAMI>')
return header + '\n'.join(lines)
def to_srt(self):
lines = []
index = 1
for dialogue in self.dialogues:
lines.append(f'{index}')
lines.append(f'{dialogue.start("srt")} --> {dialogue.end("srt")}')
lines.append(dialogue.text('srt'))
lines.append('')
index += 1
return '\n'.join(lines)
def to_ass(self):
header = f'''[Script Info]
Title: {self.title}
ScriptType: v4.00+
PlayDepth: 0
PlayResX: {self.width}
PlayResY: {self.height}
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
Style: Default,{self.font_face},{self.font_size},&H00FFFFFF,&HFF00FFFF,&H00000000,&H02000000,-1,0,0,0,100,100,0,0,1,2.7,0,2,0,0,80,1
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
'''
lines = []
for dialogue in self.dialogues:
text = dialogue.text('ass')
lines.append(f'Dialogue: 0, {dialogue.start("ass")},{dialogue.end("ass")},Default,,0,0,0,,{text}')
return header + '\n'.join(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar='file', nargs='*', help='Input subtitle file(s) [.smi | .srt | .ass], leave blank to all files in current folder')
parser.add_argument('-f', '--format', metavar='format', help='Target format [smi | srt | ass] (Default: ass)', default='ass')
parser.add_argument('-d', '--delete', action="store_true", help='Enable to delete original files')
parser.add_argument('-ff', '--font-face', metavar='font_face', help='Font face for .smi, .ass (Default: 맑은 고딕)', default="맑은 고딕")
parser.add_argument('-fs', '--font-size', metavar='font_size', type=int, help='Font size for .ass (Default: 70)', default=70)
parser.add_argument('-W', '--width', metavar='width', type=int, help='Resolution width for .ass (Default: 1920)', default=1920)
parser.add_argument('-H', '--height', metavar='height', type=int, help='Resolution height for .ass (Default: 1080)', default=1080)
args = parser.parse_args()
files = args.file
if not files:
files = [x for x in os.listdir('.') if x.endswith('smi') or x.endswith('srt') or x.endswith('ass') or x.endswith('ssa')]
if args.format == 'ssa':
args.format = | |
<reponame>geobook2015/magPy
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
ProcessorSingleSite
Inherits from Processor
And implements single site processing
"""
import os
import random
import numpy as np
import scipy.signal as signal
import scipy.interpolate as interp
# utils
from utilsFreq import *
from utilsIO import *
from utilsRobust import *
from utilsProcess import *
class ProcessorSingleSite(object):
###################
### CONSTRUCTOR
##################
def __init__(self, project, winSelector):
# data reader
self.proj = project
self.winSelector = winSelector
self.decParams = winSelector.getDecParams()
self.winParams = winSelector.getWinParams()
self.setDefaults()
def setDefaults(self):
# inputs
self.inSite = ""
self.inChannels = ["Hx", "Hy"]
self.outSite = ""
self.outChannels = ["Ex", "Ey"]
self.allChannels = self.inChannels + self.outChannels
# set cross channels - these are the actual channels to use in the solution
self.crossChannels = self.allChannels
# evaluation frequency data
self.evalFreq = []
self.evalFreqEqns = []
# whether to include an intercept term
self.intercept = False
# smoothing options
self.win = "hanning"
self.winSmooth = -1
# output filename
self.prepend = ""
###################
### GET GENERAL INFO
##################
def getWinSelector(self):
return self.winSelector
def getDecParams(self):
return self.decParams
def getWinParams(self):
return self.winParams
def getInChannels(self):
return self.inChannels
def getInSite(self):
return self.inSite
def getInSize(self):
return self.inSize
def getOutChannels(self):
return self.outChannels
def getOutSite(self):
return self.outSite
def getOutSize(self):
return self.outSize
def getAllChannels(self):
return self.allChannels
def getCrossChannels(self):
return self.crossChannels
def getPrepend(self):
return self.prepend
def getIntercept(self):
return self.intercept
def getWindow(self):
return self.win
def getWindowSmooth(self, **kwargs):
# first check if window size specified by user
if self.winSmooth != -1 and self.winSmooth > 1:
return self.winSmooth
# if not, then calculate based on datasize
if "datasize" in kwargs:
winSmooth = kwargs["datasize"]*1.0/16.0
if winSmooth < 3:
return 3 # minimum smoothing
# otherwise round to nearest odd number
winSmooth = np.ceil(winSmooth) // 2 # this is floor division
return int(winSmooth*2 + 1)
# otherwise, return a default value
return 15
def getWinSelector(self):
return self.winSelector
###################
### SET INPUTS AND OUTPUTS
##################
def setInput(self, inSite, inChannels):
self.inSite = inSite
self.inChannels = inChannels
self.inSize = len(inChannels)
# set all and cross channels
self.allChannels = self.inChannels + self.outChannels
self.crossChannels = self.allChannels
def setOutput(self, outSite, outChannels):
self.outSite = outSite
self.outChannels = outChannels
self.outSize = len(outChannels)
# set all and cross channels
self.allChannels = self.inChannels + self.outChannels
self.crossChannels = self.allChannels
def setCrossChannels(self, crossChannels):
# this is for the case where we want to limit the cross channels used
self.crossChannels = crossChannels
def setPrepend(self, prepend):
self.prepend = prepend
def setSmooth(self, window, windowSize):
self.win = window
self.winSmooth = windowSize
def setIntercept(self, intercept):
self.intercept = intercept
###################
### PROCESS
###################
def process(self):
# checks
# for testing
evalFreqEqnsTest = []
evalFreqEqnsTest2 = []
evalFreqEqnsTest3 = []
evalFreqEqnsTest4 = []
evalFreqVarsTest4 = []
evalFreqEqnsTest5 = []
evalFreqEqnsTest6 = []
self.crossChannels = ["Ex", "Ey"]
# for each decimation level
# read in the shared windows from all sites
# for each evaluation frequency, store the data from each window
# and then at the end, perform robust processing
numLevels = self.getDecParams().getNumLevels()
inChans = self.getInChannels()
outChans = self.getOutChannels()
# get set of shared windows for all decimation levels
sharedWindows = self.getWinSelector().getSharedWindows()
for iDec in xrange(0, numLevels):
# print out some info
self.printText("Processing decimation level {}".format(iDec))
fs = self.getDecParams().getSampleFreqLevel(iDec)
# get the number of all shared windows and the number of unmasked windows
# unmasked windows are ones that will actually be used in the calculation
numWindows = self.getWinSelector().getNumSharedWindows(iDec)
unmaskedWindows = self.getWinSelector().getUnmaskedWindowsLevel(iDec)
numUnmasked = len(unmaskedWindows)
self.printText("Total shared windows for decimation level = {}".format(numWindows))
self.printText("Total unmasked windows for decimation level = {}".format(numUnmasked))
if numUnmasked == 0:
self.printText("No unmasked windows found at this decimation level, continuing to next level".format(iDec))
continue # continue to next decimation level
self.printText("{} windows will be processed".format(numUnmasked))
# get the evaluation frequencies
evalFreq = self.getDecParams().getEvalFrequenciesForLevel(iDec)
# set some variables
totalSize = self.getInSize() + self.getOutSize()
numEvalFreq = len(evalFreq)
dataSize = self.getWinSelector().getDataSize(iDec)
freq = np.linspace(0, fs/2, dataSize)
# get the window smoothing params
smoothLen = self.getWindowSmooth(datasize=dataSize)
# create the data array
# for each evaluation frequency
# keep the spectral power information for all windows
evalFreqData = np.empty(shape=(numEvalFreq, numWindows, totalSize, totalSize), dtype="complex")
# an array for the window data
winSpectraMatrix = np.empty(shape=(totalSize, totalSize, dataSize), dtype="complex")
winDataArray = np.empty(shape=(totalSize, dataSize), dtype="complex")
# loop over shared windows
localWin = 0
global2local = {}
# randomise the windows
# sharedWin = random.shuffle(sharedWindows[iDec])
for iWin in unmaskedWindows:
# do the local to global map
global2local[iWin] = localWin
# get the window for the input site
inSF, inReader = self.getWinSelector().getSpecReaderForWindow(self.getInSite(), iDec, iWin)
inData = inReader.readBinaryWindowGlobal(iWin)
# get the window and channels for the output site
if self.getOutSite() != self.getInSite():
outSF, outReader = self.getWinSelector().getSpecReaderForWindow(self.getOutSite(), iDec, iWin)
outData = outReader.readBinaryWindowGlobal(iWin)
else:
outData = inData
# get data into the right part of the arrays
for i in xrange(0, self.getInSize()):
winDataArray[i] = inData[inChans[i]]
for i in xrange(0, self.getOutSize()):
winDataArray[self.getInSize() + i] = outData[outChans[i]]
# and now can fill the parts of the matrix
# recall, smooth the power spectra
for i in xrange(0, totalSize):
for j in xrange(i, totalSize):
# winSpectraMatrix[i,j] = winDataArray[i] * np.conjugate(winDataArray[j])
winSpectraMatrix[i,j] = smooth1d(winDataArray[i] * np.conjugate(winDataArray[j]), smoothLen, self.getWindow())
if i != j:
winSpectraMatrix[j,i] = np.conjugate(winSpectraMatrix[i,j]) # due to complex symmetry
# after running through all windows, calculate evaluation frequencies
# calculate frequency array
evalFreqData[:, localWin] = self.calcEvalFrequencyData(freq, evalFreq, winSpectraMatrix)
# increment local window
localWin = localWin + 1
# now all the data has been collected
# for each evaluation frequency, do the robust processing
# and get the evaluation frequency data
for eIdx in xrange(0, numEvalFreq):
self.printText("Processing evaluation frequency = {:.6f} [Hz], period = {:.6f} [s]".format(evalFreq[eIdx], 1/evalFreq[eIdx]))
# get the constrained windows for the evaluation frequency
evalFreqWindows = self.getWinSelector().getWindowsForFreq(iDec, eIdx)
if len(evalFreqWindows) == 0: # no windows meet constraints
self.printText("No windows found - possibly due to masking")
continue
localWinIndices = []
for iW in evalFreqWindows:
localWinIndices.append(global2local[iW])
self.printText("{:d} windows will be solved for".format(len(localWinIndices)))
# restrict processing to data that meets constraints for this evaluation frequency
# add to class vars
self.evalFreq.append(evalFreq[eIdx])
# try a random smoothing of the spectral density estimates across windows
# spectralEstimates = self.smoothSpectralEstimates(evalFreqData[eIdx, localWinIndices])
# self.evalFreqEqns.append(self.robustProcess(spectralEstimates))
# solution using all components
numSolveWindows, obs, reg = self.prepareLinearEqn2(evalFreqData[eIdx, localWinIndices])
self.evalFreqEqns.append(self.robustProcess(numSolveWindows, obs, reg))
# evalFreqEqnsTest.append(self.stackedProcess(evalFreqData[eIdx, localWinIndices]))
# evalFreqEqnsTest2.append(self.robustProcessReduced(evalFreqData[eIdx, localWinIndices]))
# evalFreqEqnsTest3.append(self.robustProcessOLS(numSolveWindows, obs, reg))
tmp1, tmp2 = self.robustProcessCM(numSolveWindows, obs, reg)
evalFreqEqnsTest4.append(tmp1)
evalFreqVarsTest4.append(tmp2)
# evalFreqEqnsTest4.append(self.robustProcessCM(numSolveWindows, obs, reg))
# evalFreqEqnsTest5.append(self.robustProcessCMMod(numSolveWindows, obs, reg))
# evalFreqEqnsTest6.append(self.robustProcessStack(numSolveWindows, obs, reg))
# write out all the data
self.writeTF(self.getPrepend() + "_mmest", self.evalFreq, self.evalFreqEqns)
# self.writeTF(self.getPrepend() + "_stacked", self.evalFreq, evalFreqEqnsTest)
# self.writeTF(self.getPrepend() + "_reduced", self.evalFreq, evalFreqEqnsTest2)
# self.writeTF(self.getPrepend() + "_ols", self.evalFreq, evalFreqEqnsTest3)
self.writeTF(self.getPrepend() + "_cm", self.evalFreq, evalFreqEqnsTest4, variances=evalFreqVarsTest4)
# self.writeTF(self.getPrepend() + "_cmMod", self.evalFreq, evalFreqEqnsTest5)
# self.writeTF(self.getPrepend() + "_mmestStacked", self.evalFreq, evalFreqEqnsTest6)
###################
### SOLVER ROUTINES
###################
def calcEvalFrequencyData(self, freq, evalFreq, winDataMatrix):
# interpolate data to the evaluation frequencies
inShape = winDataMatrix.shape
data = np.empty(shape=(evalFreq.size, inShape[0], inShape[1]), dtype="complex")
# get data from winDataMatrix
for i in xrange(0, inShape[0]):
for j in xrange(0, inShape[1]):
interpFunc = interp.interp1d(freq, winDataMatrix[i,j])
interpVals = interpFunc(evalFreq)
for eIdx, eFreq in enumerate(evalFreq):
data[eIdx,i,j] = interpVals[eIdx]
return data
def smoothSpectralEstimates(self, data):
# takes the evaluation frequency data, which is indexed
# windows, matrix of spectral components
winSmooth = 9
totalChans = self.getInSize() + self.getOutSize()
for i in xrange(0, totalChans):
for j in xrange(0, totalChans):
data[:,i,j] = smooth1d(data[:,i,j], winSmooth, self.getWindow())
return data
def checkForBadValues(self, numWindows, data):
finiteArray = np.ones(shape=(numWindows))
for iW in xrange(0, numWindows):
if not np.isfinite(data[iW]).all():
finiteArray[iW] = 0
numGoodWindows = sum(finiteArray)
if numGoodWindows == numWindows:
return numWindows, data
self.printWarning("Bad data found...number of windows reduced from {} to {}".format(numWindows, numGoodWindows))
goodWindowIndices = np.where(finiteArray == 1)
return numGoodWindows, data[goodWindowIndices]
def prepareLinearEqn(self, data):
# prepare observations and regressors for linear processing
numWindows = data.shape[0]
numWindows, data = self.checkForBadValues(numWindows, data)
totalSize = self.getOutSize() + self.getInSize()
# for each output variable, have ninput regressor variables
# let's construct our arrays
obs = np.empty(shape=(self.getOutSize(), totalSize*numWindows), dtype="complex")
reg = np.empty(shape=(self.getOutSize(), totalSize*numWindows, self.getInSize()), dtype="complex")
for iW in xrange(0, numWindows):
iOffset = iW*totalSize
for i in xrange(0, self.getOutSize()):
for j in xrange(0, totalSize):
# this is the observation row where,i is the observed output
obs[i, iOffset + j] = data[iW, self.getInSize() + i, j]
for k in xrange(0, self.getInSize()):
reg[i, iOffset + j, k] = data[iW, k, j]
return numWindows, obs, reg
def prepareLinearEqn2(self, data):
# prepare observations and regressors for linear processing
numWindows = data.shape[0]
numWindows, data = self.checkForBadValues(numWindows, data)
totalSize = self.getOutSize() + self.getInSize()
crossSize = len(self.getCrossChannels())
# for each output variable, have ninput regressor variables
# let's construct our arrays
obs = np.empty(shape=(self.getOutSize(), crossSize*numWindows), dtype="complex")
reg = np.empty(shape=(self.getOutSize(), crossSize*numWindows, self.getInSize()), dtype="complex")
for iW in xrange(0, numWindows):
iOffset = iW*crossSize
for i in xrange(0, self.getOutSize()):
for j, crossChan in enumerate(self.crossChannels):
# this is the observation row where,i is the observed output
crossIndex = self.allChannels.index(crossChan)
obs[i, iOffset + j] = data[iW, self.getInSize() + i, crossIndex]
for k in xrange(0, self.getInSize()):
reg[i, iOffset + j, k] = data[iW, k, crossIndex]
return numWindows, obs, reg
# def calcConfidenceIntervals(self, obs, reg, out):
def robustProcess(self, numWindows, obs, reg):
# do mmestimate robust processing for a single evaluation frequency
crossSize = len(self.getCrossChannels())
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
# solve
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, scale, weights = mmestimateModel(predictors, observation, intercept=self.getIntercept())
# now take the weights, apply to the observations and predictors, stack the appropriate rows and test
observation2 = np.zeros(shape=(crossSize), dtype="complex")
predictors2 = np.zeros(shape=(crossSize, self.getInSize()), dtype="complex")
for iChan in xrange(0, crossSize):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*crossSize, crossSize)
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation2[iChan] = np.sum(obs[i, indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors2[iChan, j] = np.sum(reg[i, indexArray, j]*weightsLim)/numWindows
out, resids, scale, weights = mmestimateModel(predictors2, observation2, intercept=self.getIntercept())
if self.getIntercept():
output[i] = out[1:]
else:
output[i] = out
return output
def robustProcessStack(self, numWindows, obs, reg):
# loop over the outputs
output = np.zeros(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
totalSize = self.getOutSize() + self.getInSize()
for i in xrange(0, self.getOutSize()):
# lets out some easier lettering
y = obs[i]
A = reg[i]
# get some sizes
n = A.shape[0]
p = A.shape[1]
# first calculate the leverage weights
# this is | |
<reponame>Alejandro5852/tytus<gh_stars>10-100
# File: Interface
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from PIL import Image, ImageTk
from team17 import BMode as B
def runInterface():
r = PP()
class PP:
#------------------------------ PANTALLA INICIAL----------------------------------#
def __init__(self):
self.PP = Tk()
self.PP.resizable(True,False)
self.PP.title("Tytus 2020")
self.PP.geometry("1000x500")
self.PP.configure(bg="#102027")
self.isPantalla1 = 0
self.isPantallaBases = 0
self.isPantFunciones = 0
self.isAcerca = False
self.dell = False
self.pantalla1()
def pantalla1(self):
if self.isPantalla1 == 1:
self.ndb.destroy()
elif self.isPantalla1 == 2:
self.Funciones.destroy()
if self.isAcerca:
self.Acerca.destroy()
self.isAcerca = False
self.isPantalla1 = 0
self.FrameInicial = Frame(height=500, width=800)
self.FrameInicial.config(bg="#37474f")
self.FrameInicial.pack(padx=25, pady=25)
self.image = PhotoImage(file='team17/docs/img/usac.png')
Label(self.FrameInicial,image=self.image, bg="#37474f").place(x=230,y=70)
Label(self.FrameInicial,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Button(self.FrameInicial, text="Reportes", command=self.Reportes,font=("Times New Roman",15),fg="#000000", bg="#ff6f00",width=10).place(x=330,y=125)
Button(self.FrameInicial, text="Funciones", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#000000",bg="#ff6f00",width=10).place(x=330,y=200)
Button(self.FrameInicial, text="Acerca De", command=self.AcercaDe,font=("Times New Roman",15),fg="#000000",bg="#ff6f00",width=10).place(x=330,y=275)
Button(self.FrameInicial, text="Salir", command=self.Salir,font=("Times New Roman",15),fg="#ffffff",bg="#ff3d00",width=5).place(x=10,y=400)
Label(self.FrameInicial,text="USAC",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=315,y=375)
self.FrameInicial.mainloop()
#---------------------------- REPORTES ---------------------------------------------#
def Reportes(self):
if self.isPantFunciones ==0:
self.FrameInicial.destroy()
self.isPantFunciones = 0
self.isPantalla1 = 2
self.Funciones = Frame(height=500, width=800)
self.Funciones.config(bg="#37474f")
self.Funciones.pack(padx=25, pady=25)
Label(self.Funciones,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.Funciones,text="Reportes",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.Funciones,text="Bases de Datos: ",font=("Times New Roman",20),fg="#ffffff", bg="#37474f").place(x=50,y=150)
Button(self.Funciones, text="Atras", command=self.pantalla1,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=650,y=150)
Button(self.Funciones, text="Aceptar", command=self._Reportes,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=500,y=150)
self.com = ttk.Combobox(self.Funciones,state="readonly",font=("Times New Roman",15))
self.com.place(x=240,y=155)
aux = ["Seleccionar"]
for i in B.showDatabases():
aux.append(i)
self.com["values"] = aux
self.com.current(0)
def _Reportes(self):
if self.com.get() != "Seleccionar":
self.con = ttk.Combobox(self.Funciones,state="readonly",font=("Times New Roman",15))
Label(self.Funciones,text="Tablas: ",font=("Times New Roman",20),fg="#ffffff", bg="#37474f").place(x=150,y=200)
Button(self.Funciones, text="Aceptar", command=self.__Reportes,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=500,y=200)
self.con.place(x=240,y=200)
aux = ["Seleccionar"]
for i in B.showTables(str(self.com.get())):
aux.append(i)
self.con["values"] = aux
self.con.current(0)
def __Reportes(self):
if self.com.get() != "Seleccionar" and self.con.get() != "Seleccionar":
B.serializar.rollback(str(self.com.get())+"-"+str(self.con.get())+"-B").graficar()
self.nodos = ttk.Combobox(self.Funciones,state="readonly",font=("Times New Roman",15))
self.nodos.place(x=240,y=250)
aux = ["Seleccionar"]
self.o = B.serializar.rollback(str(self.com.get())+"-"+str(self.con.get())+"-B").Keys()
for i in self.o:
aux.append(i)
self.nodos["values"] = aux
self.nodos.current(0)
Label(self.Funciones,text="Llaves: ",font=("Times New Roman",20),fg="#ffffff", bg="#37474f").place(x=150,y=250)
Button(self.Funciones, text="Aceptar", command=self.Nodo,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=500,y=250)
self.win = Toplevel()
self.win.geometry("600x600")
self.win.configure(bg="#102027")
self.image = Image.open('salida.png')
self.copy = self.image.copy()
self.foto = ImageTk.PhotoImage(self.image)
self.label = ttk.Label(self.win, image = self.foto)
self.label.bind('<Configure>', self.resize_image)
self.label.pack(fill=BOTH, expand = YES)
def Nodo(self):
if self.com.get() != "Seleccionar" and self.con.get() != "Seleccionar" and self.nodos != "Seleccionar":
c = B.extractRow(str(self.com.get()), str(self.con.get()),self.nodos.get().split("_"))
if c != []:
z = ""
for i in c:
z += str(i) + ", "
z = z[:-2]
Label(self.Funciones,text= z ,font=("Times New Roman",20),fg="black", bg="white",width=50).place(x=25,y=350)
else:
messagebox.showinfo(message="Ha ocurrido un error\n",
title="Error")
else:
messagebox.showinfo(message="Algo anda mal\n",
title="Error")
def AccederPestanaFunciones(self):
if self.isPantFunciones == 0:
self.FrameInicial.destroy()
else:
self.ndb.destroy()
self.isPantFunciones = 0
self.isPantalla1 = 2
self.Funciones = Frame(height=500, width=800)
self.Funciones.config(bg="#37474f")
self.Funciones.pack(padx=15, pady=15)
Label(self.Funciones,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.Funciones,text="Funciones",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
#BASES DE DATOS
Label(self.Funciones,text="Bases de Datos",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=150,y=100)
Button(self.Funciones, text="Nueva\n BD",font=("Times New Roman",10),command = self.NewDB,fg="#102027", bg="#ff6f00",width=10).place(x=70,y=150)
Button(self.Funciones, text="Mostrar\n BD",font=("Times New Roman",10),command = self.ShowDB,fg="#102027",bg="#ff6f00",width=10).place(x=170,y=150)
Button(self.Funciones, text="Cambiar\n Nombre",font=("Times New Roman",10),command = self.ReDB,fg="#102027",bg="#ff6f00",width=10).place(x=270,y=150)
Button(self.Funciones, text="Eliminar\n BD",font=("Times New Roman",10),command = self.DelBD,fg="#102027", bg="#ff6f00",width=10).place(x=170,y=200)
#TABLAS
Label(self.Funciones,text="Tablas",font=("Times New Roman",15),fg="#ffffff", bg="#37474f",width=10).place(x=550,y=100)
Button(self.Funciones, text="Nueva \nTabla",font=("Times New Roman",10),command = self.NewTable,fg="#102027",bg="#ff6f00",width=10).place(x=470,y=150)
Button(self.Funciones, text="Mostrar \nTablas",font=("Times New Roman",10),command = self.ShowTB,fg="#102027",bg="#ff6f00",width=10).place(x=570,y=150)
Button(self.Funciones, text="Mostrar \nDatos",font=("Times New Roman",10),command = self.ShowData,fg="#102027",bg="#ff6f00",width=10).place(x=670,y=150)
Button(self.Funciones, text="Rango\nTabla",font=("Times New Roman",10),command = self.RangleTB,fg="#102027",bg="#ff6f00",width=10).place(x=470,y=200)
Button(self.Funciones, text="Agregar \nLlave Primaria",font=("Times New Roman",10),command = self.AddPK,fg="#102027",bg="#ff6f00",width=10).place(x=570,y=200)
Button(self.Funciones, text="Eliminar \nLlave Primaria",font=("Times New Roman",10),command = self.DropPK,fg="#102027",bg="#ff6f00",width=10).place(x=670,y=200)
Button(self.Funciones, text="Cambiar \nNombre",font=("Times New Roman",10),command = self.AlterTB,fg="#102027",bg="#ff6f00",width=10).place(x=470,y=250)
Button(self.Funciones, text="Agregar \nColumna",font=("Times New Roman",10),command = self.AddCL,fg="#102027",bg="#ff6f00",width=10).place(x=570,y=250)
Button(self.Funciones, text="Eliminar \nColumna",font=("Times New Roman",10),command = self.DropCL,fg="#102027",bg="#ff6f00",width=10).place(x=670,y=250)
Button(self.Funciones, text="Eliminar \nTabla",font=("Times New Roman",10),command = self.DropTB,fg="#102027",bg="#ff6f00",width=10).place(x=570,y=300)
#TUPLAS
Label(self.Funciones,text="Tuplas",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=180,y=250)
Button(self.Funciones, text="Insertar\n Tupla",font=("Times New Roman",10),command = self.insertTP,fg="#102027", bg="#ff6f00",width=10).place(x=70,y=300)
Button(self.Funciones, text="Cargar\n CSV",font=("Times New Roman",10),command = self.CargarCSV,fg="#102027",bg="#ff6f00",width=10).place(x=170,y=300)
Button(self.Funciones, text="Extraer\n Tupla",font=("Times New Roman",10),command =self.ExtRow,fg="#102027",bg="#ff6f00",width=10).place(x=270,y=300)
Button(self.Funciones, text="Update\n Tupla",font=("Times New Roman",10),command =self.Up,fg="#102027", bg="#ff6f00",width=10).place(x=70,y=350)
Button(self.Funciones, text="Eliminar\n Tupla",font=("Times New Roman",10),command =self.DeleteTP,fg="#102027", bg="#ff6f00",width=10).place(x=170,y=350)
Button(self.Funciones, text="Truncate\n Tabla",font=("Times New Roman",10),command =self.TruncateTB,fg="#102027", bg="#ff6f00",width=10).place(x=270,y=350)
#REGRESAR A PANTALLA INICIAL
Button(self.Funciones, text="Atras", command=self.pantalla1,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=350,y=425)
#------------------------FUNCIONES DE BASES DE DATOS -----------------------#
def NewDB(self):
self.isPantFunciones = 1
self.Funciones.destroy()
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Crear Base de Datos",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.ndb,text="Nombre Base de Datos: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=180,y=200)
Entry(self.ndb,textvariable=self.name,font=("Times New Roman",15),fg="black").place(x=400,y=200)
Button(self.ndb, text="Atras", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=250,y=300)
Button(self.ndb, text="Aceptar", command=self._NewBD,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=400,y=300)
def _NewBD(self):
if str(self.name.get()) != "":
a = B.createDatabase(str(self.name.get()))
if a == 0:
messagebox.showinfo(message="Base creada con exito\n",
title="BD")
elif a==1:
messagebox.showinfo(message="Ha ocurrido un error\n",
title="BD")
elif a==2:
messagebox.showinfo(message="La base de datos\n ya existe",
title="BD")
else:
messagebox.showinfo(message="No se ha ingresado\nningun nombre",
title="Nombre no ingresado")
def ShowDB(self):
self.isPantFunciones = 1
self.Funciones.destroy()
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Mostrar Base de Datos",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.ndb,text="Bases de Datos: ",font=("Times New Roman",20),fg="#ffffff", bg="#37474f").place(x=50,y=100)
Button(self.ndb, text="Atras", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=250,y=400)
Button(self.ndb, text="Aceptar", command=self._NewBD,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=400,y=400)
a = B.showDatabases()
c = 0
d = 0
e = 1
for i in a:
Label(self.ndb,text=str(e)+") "+i,font=("Times New Roman",15),fg="white", bg="#37474f").place(x=100+d,y=150+c)
c += 40
e += 1
if c == 200:
c = 0
d += 150
def ReDB(self):
if self.dell:
self.ndb.destroy()
self.dell = False
else:
self.Funciones.destroy()
self.isPantFunciones = 1
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Renombrar Base de Datos",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.ndb,text="Seleccione la Base de Datos: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=50,y=200)
Label(self.ndb,text="Nuevo Nombre: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=150,y=300)
Entry(self.ndb,textvariable=self.name,font=("Times New Roman",15),fg="black").place(x=300,y=300)
Button(self.ndb, text="Atras", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=250,y=400)
Button(self.ndb, text="Aceptar", command=self._ReBD,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=400,y=400)
self.com = ttk.Combobox(self.ndb,state="readonly",font=("Times New Roman",15))
self.com.place(x=300,y=200)
aux = ["Seleccionar"]
for i in B.showDatabases():
aux.append(i)
self.com["values"] = aux
self.com.current(0)
def _ReBD(self):
if str(self.com.get()) != "Seleccionar" and str(self.name.get()) != "":
a = B.alterDatabase(str(self.com.get()),str(self.name.get()))
if a == 0:
messagebox.showinfo(message="Operacion realizada\ncon exito",
title="BD")
self.com = ttk.Combobox(self.ndb,state="readonly",font=("Times New Roman",15))
self.com.place(x=300,y=200)
aux = ["Seleccionar"]
for i in B.showDatabases():
aux.append(i)
self.com["values"] = aux
self.com.current(0)
elif a==1:
messagebox.showinfo(message="Ha ocurrido un error\n",
title="BD")
self.com.current(0)
elif a==3:
messagebox.showinfo(message="La base de datos\n ya existe",
title="BD")
self.com.current(0)
self.dell = True
self.ReDB()
else:
messagebox.showinfo(message="Por favor ingrese los campos\n",
title="Datos Incompletos")
self.com.current(0)
def DelBD(self):
if self.dell:
self.ndb.destroy()
self.dell = False
else:
self.Funciones.destroy()
self.isPantFunciones = 1
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Eliminar Base de Datos",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.ndb,text="Seleccione la Base de Datos: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=80,y=200)
Button(self.ndb, text="Atras", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=250,y=300)
Button(self.ndb, text="Aceptar", command=self._DelBD,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=400,y=300)
self.com = ttk.Combobox(self.ndb,state="readonly",font=("Times New Roman",15))
self.com.place(x=350,y=200)
aux = ["Seleccionar"]
for i in B.showDatabases():
aux.append(i)
self.com["values"] = aux
self.com.current(0)
def _DelBD(self):
if str(self.com.get()) != "Seleccionar":
a = messagebox.askquestion(message="Seguro que desea eliminar la\n Base de Datos "+str(self.com.get()+"\nesta accion no puede revertirse"),
title="Confirmacion")
if a != "no":
z = B.dropDatabase(str(self.com.get()))
if z == 0:
messagebox.showinfo(message="Operacion realizada\n con exito",
title="BD")
elif z==1:
messagebox.showinfo(message="Ha ocurrido un error\n",
title="BD")
self.dell = True
self.DelBD()
else:
self.com.current(0)
else:
messagebox.showinfo(message="Por favor ingrese los campos\n",
title="Datos Incompletos")
self.com.current(0)
#-------------------------FUNCIONES DE TABLAS --------------------------------#
def NewTable(self):
self.isPantFunciones = 1
self.Funciones.destroy()
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Crear Nueva Tabla",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
self.num = StringVar()
Label(self.ndb,text="Seleccione la Base de Datos: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=80,y=200)
Label(self.ndb,text="Ingrese el nombre de la Tabla: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=80,y=250)
Label(self.ndb,text="Numero de columnas: ",font=("Times New Roman",15),fg="#ffffff", bg="#37474f").place(x=120,y=300)
Entry(self.ndb,textvariable=self.name,font=("Times New Roman",15),fg="black").place(x=350,y=250)
Button(self.ndb, text="Atras", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=250,y=400)
Button(self.ndb, text="Aceptar", command=self._NewTable,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=400,y=400)
Spinbox(self.ndb, from_=1,to=10000,font=("Times New Roman",15),fg="black", bg="white",state="readonly",textvariable = self.num).place(x=350,y=300)
self.com = ttk.Combobox(self.ndb,state="readonly",font=("Times New Roman",15))
self.com.place(x=350,y=200)
aux = ["Seleccionar"]
for i in B.showDatabases():
aux.append(i)
self.com["values"] = aux
self.com.current(0)
def _NewTable(self):
if str(self.com.get()) != "Seleccionar" and str(self.name.get()) != "":
a = B.createTable(str(self.com.get()),str(self.name.get()), int(self.num.get()))
if a == 0:
messagebox.showinfo(message="Operacion realizada\ncon exito",
title="BD")
elif a==1:
messagebox.showinfo(message="Ha ocurrido un error\n",
title="BD")
elif a==3:
messagebox.showinfo(message="La Tabla en la Base de Datos\n ya existe",
title="BD")
else:
messagebox.showinfo(message="Por favor ingrese los campos\n",
title="Datos Incompletos")
def ShowTB(self):
self.isPantFunciones = 1
self.Funciones.destroy()
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Mostrar Tablas de una Base de Datos",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.ndb,text="Bases de Datos: ",font=("Times New Roman",20),fg="#ffffff", bg="#37474f").place(x=50,y=100)
Button(self.ndb, text="Atras", command=self.AccederPestanaFunciones,font=("Times New Roman",15),fg="#102027",bg="red",width=10).place(x=650,y=100)
Button(self.ndb, text="Aceptar", command=self._ShowTB,font=("Times New Roman",15),fg="#102027",bg="#ff6f00",width=10).place(x=500,y=100)
self.com = ttk.Combobox(self.ndb,state="readonly",font=("Times New Roman",15))
self.com.place(x=240,y=105)
aux = ["Seleccionar"]
for i in B.showDatabases():
aux.append(i)
self.com["values"] = aux
self.com.current(0)
def _ShowTB(self):
if str(self.com.get()) != "Seleccionar":
self.aux = self.com.get()
self.ndb.destroy()
self.ndb = Frame(height=500, width=800)
self.ndb.config(bg="#37474f")
self.ndb.pack(padx=15, pady=15)
Label(self.ndb,text="Tytus 2020",font=("Times New Roman",40),fg="#ffffff", bg="#37474f").place(x=250,y=10)
Label(self.ndb,text="Mostrar Tablas de una Base de Datos",font=("Times New Roman",10),fg="#ffffff", bg="#37474f").place(x=250,y=70)
self.name = StringVar()
Label(self.ndb,text="Bases de Datos: | |
<reponame>mnubo/smartobjects-python-client
import gzip
import json
import uuid
from builtins import filter
from datetime import datetime
from io import BytesIO
from typing import Tuple, Dict, Union, Any, List, Optional
import zlib
from .routes import route
class MockMnuboBackend(object):
def __init__(self):
self.clear()
def clear(self):
self.events = {}
self.owners = {}
self.objects = {}
self.counter = 0
def _gzip_encode(self, data: str) -> bytes:
out = BytesIO()
f = gzip.GzipFile(mode='wb', fileobj=out)
f.write(data.encode('utf8'))
f.close()
return out.getvalue()
@route('POST', '^/oauth/.*')
def auth(self, body: Dict[str, Any], params: Dict[str, Any]) -> Tuple[int, Dict[str, Union[str, int]]]:
return 200, {
"access_token": "<TOKEN>",
"token_type": "Bearer",
"expires_in": 3600,
"scope": "ALL",
"jti": str(uuid.uuid4())
}
@route('POST', '^/fail/oauth/.*')
def auth(self, body: Dict[str, Any], params: Dict[str, Any]) -> Tuple[int, str]:
return 502, '<h1ml>hey oh</html>'
@route('GET', '^/unvailable/(.+)$')
def unavailable(self, params: Dict[str, Any]) -> Tuple[int, Union[Dict[str, Any], str]]:
count = int(params[0])
self.counter = self.counter + 1
if self.counter > count:
return 200, {
"data": "ok"
}
else:
return 503, "Service Unavailable"
# events
def _process_event(self, event: Dict[str, Any], must_exists: bool) -> Dict[str, Any]:
if "x_object" not in event or "x_device_id" not in event["x_object"]:
return {"result": "error", "message": "Missing x_object.x_device_id"}
if "x_event_type" not in event:
return {"result": "error", "message": "Missing x_event_type"}
device_id = event["x_object"]["x_device_id"]
id = uuid.UUID(event['event_id']) if 'event_id' in event else uuid.uuid4()
if id in self.events:
return {"result": "error", "id": str(id), "message": "Event ID '{}' already exists".format(id)}
if must_exists and device_id not in self.objects:
return {"result": "error", "id": str(id), "message": "Object '{}' not found".format(device_id)}
self.events[id] = event
return {"result": "success", "id": str(id), "objectExists": device_id in self.objects}
@route('POST', r'^/events(?:\?([a-z=_]+)?)?(?:&([a-z=_]+)?)?$')
def post_events(self, body: Dict[Dict[str, Any], Any], params: Dict[str, Any]) -> Tuple[
int, Optional[Union[List[Union[str, Dict[str, Any]]], str]]]:
must_exists, report_result = False, False
for p in params:
if p and p.startswith('must_exist'):
must_exists = p.endswith('true')
if p and p.startswith('report_result'):
report_result = p.endswith('true')
result = [self._process_event(event, must_exists) for event in body]
failed = list(filter(lambda r: r['result'] != "success", result))
if report_result:
return 207 if failed else 200, result
else:
if failed:
return 400, failed[0]['message']
else:
return 200, None
@route('POST', '^/objects/(.+)/events(?:\?([a-z=_]+)?)?$')
def post_events_on_object(self, body: Dict[Dict[str, Any], Any], params: Dict[str, Any]) -> Tuple[
int, List[Dict[str, Any]]]:
[event.update({'x_object': {'x_device_id': params[0]}}) for event in body]
result = [self._process_event(event, True) for event in body]
failed = filter(lambda r: r['result'] != "success", result)
return 207 if failed else 200, result
@route('GET', '^/events/exists/(.+)$')
def get_events_exists(self, params: Dict[str, Any]) -> Tuple[int, Dict[Any, bool]]:
return 200, {params[0]: uuid.UUID(params[0]) in self.events}
@route('POST', '^/events/exists$')
def post_events_exist(self, body: Dict[str, Any], _) -> Tuple[int, List[Dict[str, bool]]]:
return 200, [{id: uuid.UUID(id) in self.events} for id in body]
# objects
def _process_object(self, obj, update=False) -> Dict[str, str]:
if 'x_device_id' not in obj:
return {"result": "error", "message": "x_device_id cannot be null or empty."}
dev_id = obj['x_device_id']
# x_device_id is mandatory for object creation
if dev_id not in self.objects and 'x_object_type' not in obj:
return {"result": "error", "id": dev_id, "message": "x_object_type cannot be null or empty."}
if dev_id in self.objects:
if update:
self.objects[dev_id].update(obj)
else:
return {"result": "error", "id": dev_id,
"message": "Object with device id '{}' already exists.".format(dev_id)}
else:
obj['x_registration_date'] = datetime.now().isoformat()
self.objects[dev_id] = obj
return {"result": "success", "id": dev_id}
@route('POST', '^/objects$')
def post_one_object(self, body: Dict[str, Any], _) -> Tuple[int, Union[str, Dict[str, Any]]]:
result = self._process_object(body)
if result['result'] != "success":
return 400, result["message"]
else:
return 201, body
@route('PUT', '^/objects$')
def put_batch_objects(self, body: Dict[str, Any], _) -> Tuple[int, List[Dict[str, Any]]]:
result = [self._process_object(obj, True) for obj in body]
failed = filter(lambda r: r['result'] != "success", result)
return 207 if failed else 200, result
@route('PUT', '^/objects/(.+)$')
def put_object_by_id(self, body: Dict[str, Any], params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
dev_id = params[0]
if dev_id not in self.objects:
return 400, "Object with x_device_id '{}' not found.".format(dev_id)
self.objects[dev_id].update(body)
return 200, None
@route('DELETE', '^/objects/(.+)$')
def delete_objects(self, _, params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
dev_id = params[0]
if dev_id not in self.objects:
return 400, "Object with x_device_id '{}' not found.".format(dev_id)
del self.objects[dev_id]
return 200, None
@route('GET', '^/objects/exists/(.+)$')
def get_objects_exists(self, params: Dict[str, Any]) -> Tuple[int, Dict[Any, bool]]:
dev_id = params[0]
return 200, {dev_id: dev_id in self.objects}
@route('POST', '^/objects/exists$')
def post_objects_exists(self, body: Dict[str, Any], _) -> Tuple[int, List[Dict[Any, bool]]]:
return 200, [{dev_id: dev_id in self.objects} for dev_id in body]
# owners
def _process_owner(self, owner: Dict[str, Any], update=False) -> Dict[str, str]:
if 'username' not in owner:
return {"result": "error", "message": "username cannot be null or empty."}
username = owner['username']
if 'invalid_property' in owner:
return {"result": "error", "id": username, "message": "Unknown field 'invalid_property'"}
if username in self.owners:
if update:
self.owners[username].update(owner)
else:
return {"result": "error", "id": username,
"message": "The username '{}' is already in use.".format(username)}
else:
owner['x_registration_date'] = datetime.now().isoformat()
self.owners[username] = owner
return {"result": "success", "id": username}
@route('POST', '^/owners/?$')
def post_one_owner(self, body: Dict[str, Any], _) -> Tuple[int, str]:
result = self._process_owner(body)
if result['result'] != 'success':
return 400, result['message']
else:
return 201, self.owners[body['username']]
@route('PUT', '^/owners$')
def put_owners(self, body: Dict[str, Any], _) -> Tuple[int, List[Dict[str, str]]]:
result = [self._process_owner(owner, True) for owner in body]
failed = filter(lambda r: 'result' in r and r['result'] == "error", result)
return 207 if failed else 200, result
@route('PUT', '^/owners/(.+)$')
def put_owner_by_id(self, body: Dict[str, Any], params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
username = params[0]
if username not in self.owners:
return 400, "Owner '{}' not found.".format(username)
self.owners[username].update(body)
return 200, None
@route('DELETE', '^/owners/(.+)$')
def delete_owners(self, body: Dict[str, Any], params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
username = params[0]
if body and 'x_timestamp' in body:
# actual SmartObjects platform would tag the deletion with the provided timestamp
pass
if username not in self.owners:
return 400, "Owner '{}' not found.".format(username)
del self.owners[username]
return 200, None
@route('POST', '^/owners/(.+)/objects/(.+)/claim$')
def post_owners_claim(self, _, params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
username, device_id = params
if username not in self.owners:
return 400, "Owner '{}' not found.".format(username)
if device_id not in self.objects:
return 400, "Object with x_device_id '{}' not found.".format(device_id)
self.objects[device_id]['x_owner'] = username
return 200, None
@route('POST', '^/owners/(.+)/objects/(.+)/unclaim$')
def post_owners_unclaim(self, _, params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
username, device_id = params
if username not in self.owners:
return 400, "Owner '{}' not found.".format(username)
if device_id not in self.objects:
return 400, "Object with x_device_id '{}' not found.".format(device_id)
self.objects[device_id]['x_owner'] = None
return 200, None
@route('POST', '^/owners/claim$')
def post_owners_batch_claim(self, body: Dict[Union[int, slice], Any], _) -> Tuple[int, List[Dict[str, str]]]:
results = []
for claim in body:
username, device_id = claim['username'], claim['x_device_id']
if username not in self.owners:
results.append(
{"id": device_id, "result": "error", "message": "Owner '{}' not found.".format(username)})
elif device_id not in self.objects:
results.append({"id": device_id, "result": "error",
"message": "Object with x_device_id '{}' not found.".format(device_id)})
else:
self.objects[device_id]['x_owner'] = username
results.append({"id": device_id, "result": "success"})
failed = filter(lambda r: 'result' in r and r['result'] == "error", results)
return 207 if failed else 200, results
@route('POST', '^/owners/unclaim$')
def post_owners_batch_unclaim(self, body: Dict[Union[int, slice], Any], _):
results = []
for unclaim in body:
username, device_id = unclaim['username'], unclaim['x_device_id']
if username not in self.owners:
results.append(
{"id": device_id, "result": "error", "message": "Owner '{}' not found.".format(username)})
elif device_id not in self.objects:
results.append({"id": device_id, "result": "error",
"message": "Object with x_device_id '{}' not found.".format(device_id)})
elif 'x_owner' not in self.objects[device_id] or self.objects[device_id]['x_owner'] != username:
results.append({"id": device_id, "result": "error",
"message": "Object with x_device_id '{}' is not claimed by '{}'.".format(device_id,
username)})
else:
self.objects[device_id]['x_owner'] = None
results.append({"id": device_id, "result": "success"})
failed = filter(lambda r: 'result' in r and r['result'] == "error", results)
return 207 if failed else 200, results
@route('POST', '^/owners/(.+)/password$')
def put_owners_password(self, body: Dict[str, Any], params: Dict[str, Any]) -> Tuple[int, Optional[str]]:
username = params[0]
if username not in self.owners:
return 400, "Owner '{}' not found.".format(username)
if 'x_password' not in body:
return 400, "x_password cannot be null or empty."
self.owners[username]['x_password'] = body['x_password']
return 200, None
@route('GET', '^/owners/exists/(.+)')
def get_owner_exists(self, params: Dict[str, Any]) -> Tuple[int, Dict[Any, bool]]:
username = params[0]
return 200, {username: username in self.owners}
@route('POST', '^/owners/exists/?$')
def post_owners_exist(self, body: Dict[str, Any], _) -> Tuple[int, List[Dict[Any, bool]]]:
return 200, [{username: username in self.owners} for username in body]
# search
def _validate_query(self, query: Dict[str, Any]) -> List[str]:
# /!\ this validation is for test purpose only and do not implement all checks done by the actual API
errors = []
if not query:
errors.append("Query cannot be empty or | |
### Rubik's cube solver ###
# Inspired from http://beust.com/rubik/ and personal knowledge
# Imports functions to do the movements
from cube_functions.fonctions_logique import *
from cube_functions.constantes import *
# DEFINE SEARCH FUNCTION
def _cubesearch(rubik, side_filter, color_filters, xyz_filter = (-1, -1, -1)):
"""
Returns the corresponding cubelets according to filters
Parameters
----------
rubik : list of lists of lists
side_filter : int
The number of faces exposed in the cubelet to look for
-1 : ignore type search
2 : look for 2-colors cubelets
3 : look for 3-colors cubelets
color_filters : list
The list of colors to look for (<= 3 colors needed)
format : [COLOR, COLOR]
COLOR : str
If no color is given, x and y and z must be != -1 (see below) and side_filter is ignored
xyz_filter : tuple
Filter by coordinates (default : no specific coordinates)
format : (x, y, z)
x : int
y : int
z : int
-1 : ignore coordinate search for the given coordinate
0 1 2 : the coordinate to look for
Returns
-------
cubelets : list
The cubelets found
Example
-------
_cubesearch(rubik, -1, ["B", "Y", "R"])
Search where is BYR
_cubesearch(rubik, -1, [], (1, 0, 2))
Search who is at x=1, y=0, z=2
_cubesearch(rubik, 2, ["W"], (1, -1, -1))
Look for every edge cubelet at x=1 with a W side
_cubesearch(rubik, 3, ["W", "B"], (-1, 2, -1))
Look for every corner cubelet at y=2 with a W and B side
"""
# Debug
if side_filter > 3 or side_filter < -1 or side_filter == 0 or side_filter == 1:
raise ValueError("side_filter value must be between 0 and 3")
if len(color_filters) > side_filter:
raise ValueError("color_filters' len must be less than side_filter'")
if ("O" in color_filters and "R" in color_filters) or ("W" in color_filters and "Y" in color_filters) or ("G" in color_filters and "B" in color_filters):
raise ValueError("color_filters must not contain opposites colors")
if len(xyz_filter) > 3:
raise ValueError("xyz_filter's len must be less than 3")
for i in xyz_filter:
if i not in [-1, 0, 1, 2]:
raise ValueError("coordinates must be -1, 0, 1 or 2")
if side_filter == 3:
if i in [0, 1]:
raise ValueError("coordinates must be -1 or 2")
for i in color_filters:
if i not in ["B", "G", "R", "Y", "O", "W"]:
raise ValueError("color_filters must contain only G, R, B, Y, O or W")
# -----------------------------------------------------------
# Search mode return str colors
if color_filters == []:
for coord in xyz_filter:
if coord == -1:
raise ValueError("coordinates must be 0, 1 or 2")
return rubik[xyz_filter[0]][xyz_filter[1]][xyz_filter[2]]
# -----------------------------------------------------------
# Search mode return list of cubelets coordinates
# List of cubelets found
cubelets = []
# Counts the number of sides (liked to type, double = 2, triple = 3)
sides = 3
# Counts the number of sides corresponding to the color_filters (if count == sides, then add the cubelet to the list)
count = 0
# Counts if color in z in y in x is at its 3rd iteration
maincount = 0
# Apply coordinates filter (+1 for the range limit)
if xyz_filter[0] == -1:
x_filter = len(rubik)
else:
x_filter = xyz_filter[0]+1
if xyz_filter[1] == -1:
y_filter = len(rubik[0])
else:
y_filter = xyz_filter[1]+1
if xyz_filter[2] == -1:
z_filter = len(rubik[0][0])
else:
z_filter = xyz_filter[2]+1
# MAIN SEARCH
for x in range(x_filter):
for y in range(y_filter):
for z in range(z_filter):
for color in z:
maincount += 1
# Count colors and noneside
if color == "N":
sides -= 1
elif color in color_filters:
count += 1
else:
count = count
# When colors has finished
if maincount == 3:
# If the sides exposed of the cubelets is equal to the type of cubelets
if side_filter == sides:
# If the number of cubelets found is the number of filters, add it to the list
if count == len(color_filters):
cubelets.append(tuple(x, y, z))
maincount = count = 0
return cubelets
# DEFINE SOLVE FUNCTIONS
def solve_rubiks_easiest_method(rubik):
"""
Solves the rubik's cube using the classic technique :
1- Make the white cross
2- Place the white corners
3- Place the mid edges
4- Make the yellow cross
5- Adjust the yellow cross
6- Place the yellow corners
7- Rotate the yellow corners
8- Optimize the movement list
Parameters
----------
rubik : list
The rubik's cube (list of lists of lists)
Returns
-------
movement : list
The movements to solve the rubik's cube
format : <U, D, L, R, F, B> [' 2]
"""
if TAILLE != 3:
return "This method is only for 3x3x3 rubik's cube"
movements = []
temp_movement = []
# 1- Make the white cross
rubik, temp_movement = _white_cross(rubik)
movements.append(temp_movement)
# 2- Place the white corners
rubik, temp_movement = _white_corners(rubik)
movements.append(temp_movement)
# 3- Place the mid edges
rubik, temp_movement = _mid_edges(rubik)
movements.append(temp_movement)
# 4- Make the yellow cross
rubik, temp_movement = _make_yellow_cross(rubik)
movements.append(temp_movement)
# 5- Adjust the yellow cross
rubik, temp_movement = _adjust_yellow_cross(rubik)
movements.append(temp_movement)
# 6- Place the yellow corners
rubik, temp_movement = _place_yellow_corners(rubik)
movements.append(temp_movement)
# 7- Rotate the yellow corners
rubik, temp_movement = _rotate_yellow_corners(rubik)
movements.append(temp_movement)
# 8- Optimize the movement list
movements = _optimize_movement_list(movements)
return movements
#TODO
def _white_cross(rubik): #that's the worst :c
"""
Creates the white cross for the rubik's cube (places only the edges)
- Find all the edges (2 colors 1 none)
- Find the white ones (White, COLOR:BGRO)
- Look for the COLOR of the edge
- For each white edge :
> Place the COLOR next to the center of the color
> Rotate until the white edges are in the right position
Parameters
----------
rubik : list
The rubik's cube (list of lists of lists)
Returns
-------
rubik : list
The rubik's cube (list of lists of lists)
movements: list
The list of movements to make the white cross
"""
# Defining constants
movements = []
# cX imported from constantes.py
# Defining movements needed
def _place_edge(rubik, edge, color):
#TODO: Separate in z cases and write each algorithm
return None
# Placing the blue edge
cWB = _cubesearch(rubik, 2, ["W","B"], (-1, -1, -1))
return rubik, movements
#TODO
def _white_corners(rubik):
"""
Places the white corners for the rubik's cube
- Find all the corners (3 colors 0 none)
- Find the white ones (White, COLOR:BGRO, COLOR:BGRO)
"""
movements = []
#TODO: Write the algorithm
return rubik, movements
#TODO
def _mid_edges(rubik):
"""
Places the mid layer
- Find all of the edges (2 colors 1 none)
- Filter to remove white and yellow edges (BGRO, BGRO, none = colored edges)
- Look for positions of the edges
FIRST COLORED EDGES ON THE YELLOW SIDE:
- Look for the color ON THE YELLOW SIDE (=C1)
- Look for the color ON THE COLORS SIDE (=C2)
- Look for the center color orders (already set)
> If mid colors = C1, C2 (left to right)
- C2 -> C2 + 90°
- Focus on C2 color side, white down
- up_leftcolumn > left_upline > down_leftcolumn > ledt_upline > clockwise > right_upline > counterclockwise
> If mid colors = C2, C1 (left to right)
- C2 -> C2 - 90°
- Focus on C2 color side, white down
- up_rightcolumn > right_upline > down_rightcolumn > right_upline > counterclockwise > left_upline > clockwise
THEN COLORED EDGES ON THE COLORSIDE BUT REVERSED
- Focus on any side
- If focus on right side : up_leftcolumn > left_upline > down_leftcolumn > ledt_upline > clockwise > right_upline > counterclockwise
- If focus on left side : up_rightcolumn > right_upline > down_rightcolumn > right_upline > counterclockwise > left_upline > clockwise
- Redo the "FIRST COLORED EDGES ON THE YELLOW SIDE"
Parameters
----------
rubik : list
The rubik's cube (list of lists of lists)
Returns
-------
rubik : list
The rubik's cube (list of lists of lists)
movements: list
The list of movements to make the mid edges
"""
| |
"Gift Certificate for %(waybill)s" % {"waybill": send_ref}
filename = "%s.xls" % title
response = current.response
from gluon.contenttype import contenttype
response.headers["Content-Type"] = contenttype(".xls")
disposition = "attachment; filename=\"%s\"" % filename
response.headers["Content-disposition"] = disposition
return output.read()
# =============================================================================
def inv_item_total_volume(row):
"""
Compute the total volume of an inventory item (Field.Method)
@param row: the Row
"""
try:
inv_item = getattr(row, "inv_inv_item")
except AttributeError:
inv_item = row
try:
supply_item = getattr(row, "supply_item")
volume = supply_item.volume
except AttributeError:
# Need to load the supply item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
stable = current.s3db.supply_item
query = (itable.id == inv_item.id) & \
(itable.item_id == stable.id)
supply_item = current.db(query).select(stable.volume,
limitby = (0, 1),
).first()
volume = supply_item.volume if supply_item else None
if volume is None:
return NONE
try:
quantity = inv_item.quantity
except AttributeError:
# Need to reload the inv item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
query = (itable.id == inv_item.id)
inv_item = current.db(query).select(itable.quantity,
limitby = (0, 1),
).first()
quantity = inv_item.quantity
try:
supply_item_pack = getattr(row, "supply_item_pack")
pack_quantity = supply_item_pack.quantity
except AttributeError:
# Need to load the supply item pack
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
ptable = current.s3db.supply_item_pack
query = (itable.id == inv_item.id) & \
(itable.item_pack_id == ptable.id)
supply_item_pack = current.db(query).select(ptable.quantity,
limitby = (0, 1),
).first()
pack_quantity = supply_item_pack.quantity
return round(quantity * pack_quantity * volume, 2)
# -----------------------------------------------------------------------------
def inv_item_total_weight(row):
"""
Compute the total weight of an inventory item (Field.Method)
@param row: the Row
"""
try:
inv_item = getattr(row, "inv_inv_item")
except AttributeError:
inv_item = row
try:
supply_item = getattr(row, "supply_item")
weight = supply_item.weight
except AttributeError:
# Need to load the supply item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
stable = current.s3db.supply_item
query = (itable.id == inv_item.id) & \
(itable.item_id == stable.id)
supply_item = current.db(query).select(stable.weight,
limitby = (0, 1),
).first()
weight = supply_item.weight if supply_item else None
if weight is None:
return NONE
try:
quantity = inv_item.quantity
except AttributeError:
# Need to reload the inv item
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
query = (itable.id == inv_item.id)
inv_item = current.db(query).select(itable.quantity,
limitby = (0, 1),
).first()
quantity = inv_item.quantity
try:
supply_item_pack = getattr(row, "supply_item_pack")
pack_quantity = supply_item_pack.quantity
except AttributeError:
# Need to load the supply item pack
# Avoid this by adding to extra_fields
itable = current.s3db.inv_inv_item
ptable = current.s3db.supply_item_pack
query = (itable.id == inv_item.id) & \
(itable.item_pack_id == ptable.id)
supply_item_pack = current.db(query).select(ptable.quantity,
limitby = (0, 1),
).first()
pack_quantity = supply_item_pack.quantity
return round(quantity * pack_quantity * weight, 3)
# =============================================================================
def inv_packing_list(r, **attr):
"""
Generate a Packing List for an Outbound Shipment
This is exported in XLS format
"""
from s3.codecs.xls import S3XLS
try:
import xlwt
except ImportError:
r.error(503, S3XLS.ERROR.XLWT_ERROR)
# Extract the Data
send_id = r.id
record = r.record
send_ref = record.send_ref
site_id = record.site_id
to_site_id = record.to_site_id
sites = [site_id, to_site_id]
db = current.db
s3db = current.s3db
# Items
ptable = s3db.inv_package
sptable = s3db.inv_send_package
spitable = s3db.inv_send_package_item
ttable = s3db.inv_track_item
itable = s3db.supply_item
query = (sptable.send_id == send_id) & \
(sptable.package_id == ptable.id) & \
(spitable.send_package_id == sptable.id) & \
(spitable.track_item_id == ttable.id) & \
(ttable.item_id == itable.id)
items = db(query).select(ptable.type,
sptable.number,
itable.name,
spitable.quantity,
sptable.weight,
sptable.volume,
orderby = sptable.number,
)
# Countries of both Source & Destination
stable = s3db.org_site
gtable = s3db.gis_location
query = (stable.site_id.belongs(sites)) & \
(stable.location_id == gtable.id)
locations = db(query).select(gtable.L0,
limitby = (0, 2),
)
fr = False
settings = current.deployment_settings
for row in locations:
if "fr" in settings.get_L10n_languages_by_country(row.L0):
fr = True
break
# Organisations
otable = s3db.org_organisation
query = (stable.site_id.belongs(sites)) & \
(stable.organisation_id == otable.id)
fields = [stable.location_id,
stable.site_id,
otable.id,
otable.root_organisation,
otable.name,
otable.logo,
]
if fr:
ontable = s3db.org_organisation_name
fields.append(ontable.name_l10n)
left = ontable.on((ontable.organisation_id == otable.id) & \
(ontable.language == "fr"))
else:
left = None
orgs = db(query).select(*fields,
left = left,
limitby = (0, 2)
)
for row in orgs:
site = row.org_site
if site.site_id == site_id:
# Sender Org
if fr:
org_name = row["org_organisation_name.name_l10n"]
org = row["org_organisation"]
if not org_name:
org_name = org.name
else:
org = row["org_organisation"]
org_name = org.name
if org.id == org.root_organisation:
branch = None
else:
branch = org.name
# Lookup Root Org
fields = [otable.name,
otable.logo,
]
if fr:
fields.append(ontable.name_l10n)
org = db(otable.id == org.root_organisation).select(*fields,
left = left,
limitby = (0, 1)
).first()
if fr:
org_name = org["org_organisation_name.name_l10n"]
org = org["org_organisation"]
if not org_name:
org_name = org.name
else:
org_name = org.name
else:
# Recipient Org
location_id = site.location_id
if fr:
dest_org_name = row["org_organisation_name.name_l10n"]
dest_org = row["org_organisation"]
if not dest_org_name:
dest_org_name = dest_org.name
else:
dest_org = row["org_organisation"]
dest_org_name = dest_org.name
if dest_org.id != dest_org.root_organisation:
# Lookup Root Org
fields = [otable.name,
]
if fr:
fields.append(ontable.name_l10n)
dest_org = db(otable.id == dest_org.root_organisation).select(*fields,
left = left,
limitby = (0, 1)
).first()
if fr:
dest_org_name = dest_org["org_organisation_name.name_l10n"]
dest_org = dest_org["org_organisation"]
if not dest_org_name:
dest_org_name = dest_org.name
else:
dest_org_name = dest_org.name
# Represent the Data
from .org import org_SiteRepresent
site_represent = org_SiteRepresent(show_type = False)
site_represent.bulk(sites) # Bulk lookup, with results cached in class instance
source = site_represent(site_id)
destination = site_represent(to_site_id)
from .gis import gis_LocationRepresent
address = gis_LocationRepresent(show_level = False)(location_id)
recipient_id = record.recipient_id
if recipient_id:
from .pr import pr_PersonRepresent
recipient = pr_PersonRepresent(truncate = False)(recipient_id)
else:
recipient = None
package_type_represent = ptable.type.represent
T = current.T
labels = ["N° Box",
"Description",
"Quantity",
"Weight\nKg",
"Volume\nm3",
]
# Create the workbook
book = xlwt.Workbook(encoding = "utf-8")
# Add sheet
title = "Packing List"
sheet = book.add_sheet(title)
sheet.set_print_scaling(69)
# Set column Widths
sheet.col(0).width = 3300 # 2.52 cm
sheet.col(1).width = 13595 # 10.39 cm
sheet.col(2).width = 4291 # 3.28 cm
sheet.col(3).width = 4432 # 3.39 cm
sheet.col(4).width = 4031 # 3.08 cm
# Define styles
POINT_12 = 240 # Twips = Points * 20
POINT_10 = 200 # Twips = Points * 20
POINT_9 = 180 # Twips = Points * 20
ROW_HEIGHT = 320 # Realised through trial & error
ROWS_2_HEIGHT = int(2.2 * 320)
style = xlwt.XFStyle()
style.font.height = POINT_12
HORZ_CENTER = style.alignment.HORZ_CENTER
HORZ_RIGHT = style.alignment.HORZ_RIGHT
VERT_CENTER = style.alignment.VERT_CENTER
THICK = style.borders.THICK
THIN = style.borders.THIN
style.borders.top = THIN
style.borders.left = THIN
style.borders.right = THIN
style.borders.bottom = THIN
if fr:
italic_style = xlwt.XFStyle()
italic_style.font.italic = True
italic_style.font.height = POINT_12
italic_wrap_style = xlwt.XFStyle()
italic_wrap_style.font.italic = True
italic_wrap_style.font.height = POINT_12
italic_wrap_style.alignment.wrap = 1
bold_style = xlwt.XFStyle()
bold_style.font.bold = True
bold_style.font.height = POINT_12
bold_italic_style = xlwt.XFStyle()
bold_italic_style.font.italic = True
bold_italic_style.font.bold = True
bold_italic_style.font.height = POINT_12
bold_italic_center_style = xlwt.XFStyle()
bold_italic_center_style.font.italic = True
bold_italic_center_style.font.bold = True
bold_italic_center_style.font.height = POINT_12
bold_italic_center_style.alignment.horz = HORZ_CENTER
center_style = xlwt.XFStyle()
center_style.font.height = POINT_12
center_style.alignment.horz = HORZ_CENTER
center_style.borders.top = THIN
center_style.borders.left = THIN
center_style.borders.right = THIN
center_style.borders.bottom = THIN
right_style = xlwt.XFStyle()
right_style.font.height = POINT_12
right_style.alignment.horz = HORZ_RIGHT
right_style.borders.top = THIN
right_style.borders.left = THIN
right_style.borders.right = THIN
right_style.borders.bottom = THIN
wrap_style = xlwt.XFStyle()
wrap_style.font.height = POINT_12
wrap_style.alignment.wrap = 1
header_style = xlwt.XFStyle()
header_style.font.bold = True
header_style.font.height = POINT_12
header_style.alignment.horz = HORZ_CENTER
header_style.alignment.vert = VERT_CENTER
header_style.borders.top = THIN
header_style.borders.left = THIN
header_style.borders.right = THIN
header_style.borders.bottom = THIN
header_style.pattern.pattern = xlwt.Style.pattern_map["fine_dots"]
header_style.pattern.pattern_fore_colour = xlwt.Style.colour_map["gray25"]
dest_style = xlwt.XFStyle()
dest_style.font.bold = True
dest_style.font.height = POINT_12
dest_style.alignment.horz = HORZ_CENTER
dest_style.alignment.vert = VERT_CENTER
left_header_style = xlwt.XFStyle()
left_header_style.font.bold = True
left_header_style.font.height = POINT_12
box_style = xlwt.XFStyle()
box_style.font.bold = True
box_style.font.height = 360 # 360 Twips = 18 point
box_style.alignment.horz = HORZ_CENTER
box_style.alignment.vert = VERT_CENTER
box_style.borders.top = THICK
box_style.borders.left = THICK
box_style.borders.right = THICK
box_style.borders.bottom = THICK
large_italic_font = xlwt.Font()
large_italic_font.bold = True
large_italic_font.height = 360 # 360 Twips = 18 point
large_italic_font.italic = True
# 1st row => Org Logo
current_row = sheet.row(0)
# current_row.set_style() not giving the correct height
current_row.height = ROW_HEIGHT
#sheet.write_merge(0, 0, 0, 1, org_name, left_header_style)
logo = org.logo
if logo:
# We need to convert to 24-bit BMP
try:
from PIL import Image
except:
current.log.error("PIL not installed: Cannot insert logo")
else:
IMG_WIDTH = 230
filename, extension = os.path.splitext(logo)
logo_path = os.path.join(r.folder, "uploads", logo)
if extension == ".png":
# Remove Transparency
png = Image.open(logo_path).convert("RGBA")
size = png.size
background = Image.new("RGBA", | |
y = pos
# x -= 0.5*rect.width
x -= 0.5*rect.width()
# y -= 0.5*rect.height
y -= 0.5*rect.height()
self.x += x / self.zoom_ratio - x / zoom_ratio
self.y += y / self.zoom_ratio - y / zoom_ratio
self.zoom_ratio = zoom_ratio
self.zoom_to_fit_on_resize = False
self.update()
def zoom_to_area(self, x1, y1, x2, y2):
# rect = self.get_allocation()
rect = self.rect()
width = abs(x1 - x2)
height = abs(y1 - y2)
self.zoom_ratio = min(
float(rect.width())/float(width),
float(rect.height())/float(height)
)
self.zoom_to_fit_on_resize = False
self.x = (x1 + x2) / 2
self.y = (y1 + y2) / 2
self.update()
def zoom_to_fit(self):
# rect = self.get_allocation()
rect = self.rect()
# rect.x += self.ZOOM_TO_FIT_MARGIN
rect.setX (rect.x() + self.ZOOM_TO_FIT_MARGIN)
# rect.y += self.ZOOM_TO_FIT_MARGIN
rect.setY (rect.y() + self.ZOOM_TO_FIT_MARGIN)
# rect.width -= 2 * self.ZOOM_TO_FIT_MARGIN
rect.setWidth(rect.width() - 2 * self.ZOOM_TO_FIT_MARGIN)
# rect.height -= 2 * self.ZOOM_TO_FIT_MARGIN
rect.setHeight(rect.height() - 2 * self.ZOOM_TO_FIT_MARGIN)
if(self.graph.width < 10):
self.graph.width = 10
if(self.graph.height < 10):
self.graph.height = 10
zoom_ratio = min(
# float(rect.width)/float(self.graph.width),
float(rect.width())/float(self.graph.width),
# float(rect.height)/float(self.graph.height)
float(rect.height())/float(self.graph.height)
)
self.zoom_image(zoom_ratio, center=True)
self.zoom_to_fit_on_resize = True
def on_zoom_in(self):
# def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self):
# def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self):
# def on_zoom_fit(self, action):
self.zoom_to_fit()
def on_zoom_100(self):
# def on_zoom_100(self, action):
self.zoom_image(1.0)
def keyPressEvent(self, event):
self.animation.stop()
self.drag_action.abort()
if event.key() == Qt.Key_Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
self.update()
elif event.key() == Qt.Key_Right:
self.x += self.POS_INCREMENT/self.zoom_ratio
self.update()
elif event.key() == Qt.Key_Up:
self.y -= self.POS_INCREMENT/self.zoom_ratio
self.update()
elif event.key() == Qt.Key_Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
self.update()
elif event.key() == Qt.Key_PageUp:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
self.update()
elif event.key() == Qt.Key_PageDown:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
self.update()
elif event.key() == Qt.Key_PageUp:
self.drag_action.abort()
self.drag_action = NullAction(self)
elif event.key() == Qt.Key_R:
self.reload()
elif event.key() == Qt.Key_F:
self.zoom_to_fit()
event.accept()
def get_drag_action(self, event):
modifiers = event.modifiers()
if event.button() in (Qt.LeftButton, Qt.MidButton):
if modifiers & Qt.ControlModifier:
return ZoomAction
elif modifiers & Qt.ShiftModifier:
return ZoomAreaAction
else:
return PanAction
return NullAction
def mousePressEvent(self, event):
self.animation.stop()
self.drag_action.abort()
action_type = self.get_drag_action(event)
self.drag_action = action_type(self)
self.drag_action.on_button_press(event)
self.presstime = time.time()
self.pressx = event.x()
self.pressy = event.y()
event.accept()
def is_click(self, event, click_fuzz=4, click_timeout=1.0):
if self.presstime is None:
# got a button release without seeing the press?
return False
# XXX instead of doing this complicated logic, shouldn't we listen
# for gtk's clicked event instead?
deltax = self.pressx - event.x()
deltay = self.pressy - event.y()
return (time.time() < self.presstime + click_timeout and
math.hypot(deltax, deltay) < click_fuzz)
def mouseReleaseEvent(self, event):
self.drag_action.on_button_release(event)
self.drag_action = NullAction(self)
if event.button() == Qt.LeftButton and self.is_click(event):
x, y = event.x(), event.y()
url = self.get_url(x, y)
if url is not None:
for cb in self.select_cbs:
cb(str(url.url), event)
else:
jump = self.get_jump(x, y)
if jump is not None:
self.animate_to(jump.x, jump.y)
event.accept()
return
if event.button() == Qt.RightButton and self.is_click(event):
x, y = event.x(), event.y()
url = self.get_url(x, y)
if url is not None:
for cb in self.select_cbs:
cb(str(url.url), event)
else:
jump = self.get_jump(x, y)
if jump is not None:
self.animate_to(jump.x, jump.y)
if event.button() in (Qt.LeftButton, Qt.MidButton):
event.accept()
return
def on_area_scroll_event(self, area, event):
return False
def wheelEvent(self, event):
if event.angleDelta().y() > 0:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT,
pos=(event.x(), event.y()))
if event.angleDelta().y() < 0:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT,
pos=(event.x(), event.y()))
def mouseMoveEvent(self, event):
self.drag_action.on_motion_notify(event)
self.setFocus()
def on_area_size_allocate(self, area, allocation):
if self.zoom_to_fit_on_resize:
self.zoom_to_fit()
def animate_to(self, x, y):
self.animation = ZoomToAnimation(self, x, y)
self.animation.start()
def window_to_graph(self, x, y):
rect = self.rect()
x -= 0.5*rect.width()
y -= 0.5*rect.height()
x /= self.zoom_ratio
y /= self.zoom_ratio
x += self.x
y += self.y
return x, y
def get_url(self, x, y):
x, y = self.window_to_graph(x, y)
return self.graph.get_url(x, y)
def get_jump(self, x, y):
x, y = self.window_to_graph(x, y)
return self.graph.get_jump(x, y)
# Apache-Style Software License for ColorBrewer software and ColorBrewer Color
# Schemes, Version 1.1
#
# Copyright (c) 2002 <NAME>, <NAME>, and The Pennsylvania State
# University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions as source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The end-user documentation included with the redistribution, if any,
# must include the following acknowledgment:
#
# This product includes color specifications and designs developed by
# Cynthia Brewer (http://colorbrewer.org/).
#
# Alternately, this acknowledgment may appear in the software itself, if and
# wherever such third-party acknowledgments normally appear.
#
# 3. The name "ColorBrewer" must not be used to endorse or promote products
# derived from this software without prior written permission. For written
# permission, please contact <NAME> at <EMAIL>.
#
# 4. Products derived from this software may not be called "ColorBrewer",
# nor may "ColorBrewer" appear in their name, without prior written
# permission of Cynthia Brewer.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CYNTHIA
# BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
brewer_colors = {
'accent3': [(127, 201, 127), (190, 174, 212), (253, 192, 134)],
'accent4': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153)],
'accent5': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176)],
'accent6': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127)],
'accent7': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127), (191, 91, 23)],
'accent8': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127), (191, 91, 23), (102, 102, 102)],
'blues3': [(222, 235, 247), (158, 202, 225), (49, 130, 189)],
'blues4': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (33, 113, 181)],
'blues5': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues6': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues7': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 69, 148)],
'blues8': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 69, 148)],
'blues9': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 81, 156), (8, 48, 107)],
'brbg10': [(84, 48, 5), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg11': [(84, 48, 5), (1, 102, 94), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143)],
'brbg3': [(216, 179, 101), (245, 245, 245), (90, 180, 172)],
'brbg4': [(166, 97, 26), (223, 194, 125), (128, 205, 193), (1, 133, 113)],
'brbg5': [(166, 97, 26), (223, 194, 125), (245, 245, 245), (128, 205, 193), (1, 133, 113)],
'brbg6': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg7': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (245, 245, 245), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg8': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg9': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'bugn3': [(229, 245, 249), (153, 216, 201), (44, 162, 95)],
'bugn4': | |
row, col, val)
def _get_matrix_index(index, name, nrows, ncols, bRow):
if isinstance(index, int):
if bRow:
if index<-nrows or index>=nrows:
raise ValueError("%d: row index out of range" % (index))
else:
if index<-ncols or index>=ncols:
raise ValueError("%d: column index out of range" % (index))
return [index]
elif isinstance(index, str):
oret = []
oindex = index.split()
if bRow:
rowNames = _stp._st_getmatrixrownames(name)
for o in oindex:
try:
orowi = rowNames.index(o)
except:
raise ValueError("row %s not found" % (o))
oret.append(orowi)
else:
colNames = _stp._st_getmatrixcolnames(name)
for o in oindex:
try:
ocoli = colNames.index(o)
except:
raise ValueError("column %s not found" % (o))
oret.append(ocoli)
return oret
elif isinstance(index, list) or isinstance(index, tuple):
if _check_all(isinstance(o, int) for o in index):
oret = []
for o in index:
if bRow:
if o<-nrows or o>=nrows:
raise ValueError("%d: row index out of range" % (o))
else:
if o<-ncols or o>=ncols:
raise ValueError("%d: column index out of range" % (o))
oret.append(o)
return oret
elif _check_all(isinstance(o, str) for o in index):
oret = []
if bRow:
rowNames = _stp._st_getmatrixrownames(name)
for o in index:
try:
orowi = rowNames.index(o)
except:
raise ValueError("row %s not found" % (o))
oret.append(orowi)
else:
colNames = _stp._st_getmatrixcolnames(name)
for o in index:
try:
ocoli = colNames.index(o)
except:
raise ValueError("column %s not found" % (o))
oret.append(ocoli)
return oret
else:
raise TypeError("all values for row or column indices must be a string or an integer")
elif hasattr(index, "__iter__"):
index = tuple(index)
if _check_all(isinstance(o, int) for o in index):
oret = []
for o in index:
if bRow:
if o<-nrows or o>=nrows:
raise ValueError("%d: row index out of range" % (o))
else:
if o<-ncols or o>=ncols:
raise ValueError("%d: column index out of range" % (o))
oret.append(o)
return oret
elif _check_all(isinstance(o, str) for o in index):
oret = []
if bRow:
rowNames = _stp._st_getmatrixrownames(name)
for o in index:
try:
orowi = rowNames.index(o)
except:
raise ValueError("row %s not found" % (o))
oret.append(orowi)
else:
colNames = _stp._st_getmatrixcolnames(name)
for o in index:
try:
ocoli = colNames.index(o)
except:
raise ValueError("column %s not found" % (o))
oret.append(ocoli)
return oret
else:
raise TypeError("all values for row or column indices must be a string or an integer")
else:
raise TypeError("unsupported operand type(s) for row or column indices")
class Matrix:
"""
This class provides access to Stata matrices. All row and column
numbering of the matrix begins at 0. The allowed values for the
row index `row` and the column index `col` are
.. _ref-matrixrange:
.. centered:: **-nrows** `<=` `row` `<` **nrows**
and
.. centered:: **-ncols** `<=` `col` `<` **ncols**
Here **nrows** is the number of rows of the specified matrix, which is
returned by :meth:`getRowTotal()`. **ncols** is the number of columns of the
specified matrix, which is returned by :meth:`getColTotal()`. Negative
values for `row` and `col` are allowed and are interpreted in the usual
way for Python indexing.
Matrix names can be one of the following:
* global matrix such as **"mymatrix"**
* **r()** matrix such as **"r(Z)"**
* **e()** macro such as **"e(Z)"**
"""
def __init__(self):
pass
@staticmethod
def convertSymmetricToStd(name):
"""
Convert a symmetric matrix to a standard matrix.
Parameters
----------
name : str
Name of the matrix.
Raises
------
ValueError
If matrix `name` does not exist.
"""
return _stp._st_convertsymmetrictostd(name)
@staticmethod
def create(name, nrows, ncols, initialValue, isSymmetric=False):
"""
Create a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
nrows : int
Number of rows.
ncols : int
Number of columns.
initialValue : float
An initialization value for each element.
isSymmetric : bool, optional
Mark the matrix as symmetric. If the number of rows and columns
are not equal, this parameter will be ignored. This parameter
affects the behavior of :meth:`storeAt()`. When the matrix is
marked as symmetric, :meth:`storeAt()` will always maintain
symmetry. Default is False.
Raises
------
ValueError
This error can be raised if
- `nrows` is not a positive integer.
- `ncols` is not a positive integer.
"""
if isSymmetric is True:
isSymmetric = 1
elif isSymmetric is False:
isSymmetric = 0
else:
raise TypeError("isSymmetric must be a boolean value")
return _stp._st_creatematrix(name, nrows, ncols, initialValue, isSymmetric)
@staticmethod
def get(name, rows=None, cols=None):
"""
Get the data in a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
rows : int or list-like, optional
Rows to access. It can be specified as a single row index
or an iterable of row indices. If `rows` is not specified,
all the rows are specified.
cols : int or list-like, optional
Columns to access. It can be specified as a single column
index or an iterable of column indices. If `cols` is not
specified, all the columns are specified.
Returns
-------
list
A list of lists containing the matrix values. Each sublist
contains values from each row of the matrix. Abort with an error
if the matrix does not exist.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- any of the row indices specified in `rows` is out of :ref:`range <ref-matrixrange>`.
- any of the column indices specified in `cols` is out of :ref:`range <ref-matrixrange>`.
"""
ncols = _stp._st_getmatrixcol(name)
if rows is None:
mrows = None
else:
ncols = _stp._st_getmatrixcol(name)
nrows = _stp._st_getmatrixrow(name)
mrows = _get_matrix_index(rows, name, nrows, ncols, True)
if cols is None:
mcols = None
else:
ncols = _stp._st_getmatrixcol(name)
nrows = _stp._st_getmatrixrow(name)
mcols = _get_matrix_index(cols, name, nrows, ncols, False)
return _stp._st_getmatrix(name, mrows, mcols)
@staticmethod
def getAt(name, row, col):
"""
Access an element from a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
row : int
Row to access.
col : int
Column to access.
Returns
-------
float
The value.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- `row` is out of :ref:`range <ref-matrixrange>`.
- `col` is out of :ref:`range <ref-matrixrange>`.
"""
return _stp._st_getmatrixat(name, row, col)
@staticmethod
def getColNames(name):
"""
Get the column names of a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
Returns
-------
list
A string list containing the column names of the matrix.
Raises
------
ValueError
If matrix `name` does not exist.
"""
return _stp._st_getmatrixcolnames(name)
@staticmethod
def getColTotal(name):
"""
Get the number of columns in a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
Returns
-------
int
The number of columns.
Raises
------
ValueError
If matrix `name` does not exist.
"""
return _stp._st_getmatrixcol(name)
@staticmethod
def getRowNames(name):
"""
Get the row names of a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
Returns
-------
list
A string list containing the row names of the matrix.
Raises
------
ValueError
If matrix `name` does not exist.
"""
return _stp._st_getmatrixrownames(name)
@staticmethod
def getRowTotal(name):
"""
Get the number of rows in a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
Returns
-------
int
The number of rows.
Raises
------
ValueError
If matrix `name` does not exist.
"""
return _stp._st_getmatrixrow(name)
@staticmethod
def list(name, rows=None, cols=None):
"""
Display a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
rows : int or list-like, optional
Rows to display. It can be specified as a single row index
or an iterable of row indices. If `rows` is not specified,
all the rows are specified.
cols : int or list-like, optional
Columns to display. It can be specified as a single column
index or an iterable of column indices. If `cols` is not
specified, all the columns are specified.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- any of the row indices specified in `rows` is out of :ref:`range <ref-matrixrange>`.
- any of the column indices specified in `cols` is | |
<filename>AER_theorist/object_of_study.py
from abc import ABC, abstractmethod
from torch.utils.data import Dataset
from enum import Enum
from AER_experimentalist.experiment_environment.variable import *
import AER_config as AER_cfg
from typing import List, Dict
import torch
import numpy as np
import copy
import random
class Object_Of_Study(Dataset):
key_experiment_id = 'AER_Experiment'
def __init__(self, name, independent_variables: List[Variable], dependent_variables: List[Variable], covariates=list(), input_dimensions=None, output_dimensions=None, output_type=None):
self.name = name
self.independent_variables = list()
self.dependent_variables = list()
self.covariates = list()
self.data = dict()
self._normalize_input = False
self._normalize_output = False
# set independent and dependent variables
if len(independent_variables) == 0:
Exception("No independent variables specified.")
if len(dependent_variables) == 0:
Exception("No dependent variables specified.")
self.independent_variables = independent_variables
self.dependent_variables = dependent_variables
self.covariates = covariates
# set number of output dimensions
if output_dimensions is None:
self.output_dimensions = len(self.dependent_variables)
else:
self.output_dimensions = output_dimensions
# set number of input dimensions
if input_dimensions is None:
self.input_dimensions = len(self.independent_variables) + len(self.covariates)
else:
self.input_dimensions = input_dimensions
# set output type
self.output_type = self.dependent_variables[0].type
for variable in dependent_variables:
if variable.type != self.output_type:
Exception("Dependent variable output types don't match. Different output types are not supported yet.")
# set up data
for var in self.dependent_variables:
self.data[var.get_name()] = list()
for var in self.independent_variables:
self.data[var.get_name()] = list()
for var in self.covariates:
self.data[var.get_name()] = list()
self.data[AER_cfg.experiment_label] = list()
def __len__(self, experiment_id=None):
if experiment_id is None:
return len(self.data[self.key_experiment_id])
else:
return self.data[self.key_experiment_id].count(experiment_id)
def __getitem__(self, idx, experiment_id=None):
# determine relevant experiment id
# get input data
input_data = list()
for var in self.independent_variables:
input_data.append(var.get_value_from_dict(self.data, idx))
for var in self.covariates:
input_data.append(var.get_value_from_dict(self.data, idx))
# get output data
output_data = list()
for var in self.dependent_variables:
output_data.append(var.get_value_from_dict(self.data, idx))
input = torch.tensor(input_data).float()
output = torch.tensor(output_data).float()
# normalize if required
if self._normalize_input:
input = self.normalize_variables(input, self.independent_variables)
if self._normalize_output:
output = self.normalize_variables(output, self.dependent_variables)
return input, output
def get_random_input_sample(self):
# sample input data
input_data = list()
for var in self.independent_variables:
sample = np.random.uniform(var.__get_value_range__()[0] * var._rescale,
var.__get_value_range__()[1] * var._rescale)
input_data.append(sample)
for var in self.covariates:
sample = np.random.uniform(var.__get_value_range__()[0] * var._rescale,
var.__get_value_range__()[1] * var._rescale)
input_data.append(sample)
input = torch.tensor(input_data).float()
# normalize if required
if self._normalize_input:
input = self.normalize_variables(input, self.independent_variables)
return input
def new_experiment_sequence(self):
experiment_sequence = dict()
for var in self.independent_variables:
experiment_sequence[var.get_name()] = list()
for var in self.covariates:
experiment_sequence[var.get_name()] = list()
return experiment_sequence
def get_last_experiment_id(self):
return np.max(self.data[self.key_experiment_id])
def get_experiment_indices(self, experiment_id):
indices = [i for i, x in enumerate(self.data[self.key_experiment_id]) if x == experiment_id]
return indices
# potentially redundant with: get_all_data
def get_dataset(self, experiment_id=None):
# determine length of data set
if experiment_id is None:
num_data_points = len(self)
else:
num_data_points = self.__len__(experiment_id)
# create an empty tensor
input_dataset = torch.empty(num_data_points, self.__get_input_length__()).float()
output_dataset = torch.empty(num_data_points, self.__get_output_length__()).float()
if experiment_id is None:
for idx in range(len(self)):
(input, output) = self.__getitem__(idx)
input_dataset[idx,:] = input
output_dataset[idx, :] = output
else:
experiment_indices = self.get_experiment_indices(experiment_id)
sub_idx = 0
for idx in range(len(self)):
(input, output) = self.__getitem__(idx)
if idx in experiment_indices:
input_dataset[sub_idx, :] = input
output_dataset[sub_idx, :] = output
sub_idx += 1
return input_dataset, output_dataset
def get_counterbalanced_input(self, resolution):
factor_levels = list()
independent_variables = self.independent_variables + self.covariates
for var in independent_variables:
var_levels = np.linspace(var.__get_value_range__()[0] * var._rescale,
var.__get_value_range__()[1] * var._rescale,
resolution)
factor_levels.append(var_levels)
input_np = np.array(np.meshgrid(*factor_levels)).T.reshape(-1,len(independent_variables))
input = torch.tensor(input_np).float()
return input
def average_DV_for_IVs(self, DV, IVs, input, output):
IV1 = IVs[0]
IV2 = IVs[1]
DV_idx = self.get_DV_idx(DV)
if IV2 is None:
IV1_idx = self.get_IV_idx(IV1)
unique_IV_values = np.unique(input[:,IV1_idx])
DV_values = np.empty(unique_IV_values.shape)
for row, element in enumerate(unique_IV_values):
value_log = list()
for idx in range(output.shape[0]):
if element == input[idx, IV1_idx]:
value_log.append(output[idx, DV_idx])
value_mean = np.mean(value_log)
DV_values[row] = value_mean
return unique_IV_values, DV_values
else:
IV1_idx = self.get_IV_idx(IV1)
IV2_idx = self.get_IV_idx(IV2)
unique_IV_rows = np.unique(input[:, [IV1_idx, IV2_idx]], axis=0)
DV_values = np.empty((unique_IV_rows.shape[0]))
IV1_values = np.empty((unique_IV_rows.shape[0]))
IV2_values = np.empty((unique_IV_rows.shape[0]))
for row, combination in enumerate(unique_IV_rows):
value_log = list()
for idx in range(output.shape[0]):
if (combination == input[idx, [IV1_idx, IV2_idx]]).all():
value_log.append(output[idx, DV_idx])
value_mean = np.mean(value_log)
DV_values[row] = value_mean
IV1_values[row] = combination[0]
IV2_values[row] = combination[1]
unique_IV_values = (IV1_values, IV2_values)
return unique_IV_values, DV_values
def get_plot_list(self):
IV_list_1 = list()
IV_list_2 = list()
DV_list = list()
# combine each IV with each IV with each DV
independent_variables_1 = self.independent_variables + self.covariates
independent_variables_2 = [None] + self.independent_variables + self.covariates
for IV1 in independent_variables_1:
for IV2 in independent_variables_2:
for DV in self.dependent_variables:
if IV1 != IV2:
IV_list_1.append(IV1)
IV_list_2.append(IV2)
DV_list.append(DV)
# combine each IV
return (IV_list_1, IV_list_2, DV_list)
def get_variable_data(self, variable):
var_data = list()
for idx in len(self):
var_data.append(variable.get_value_from_dict(self.data, idx))
IV_data = torch.tensor(var_data).float()
return IV_data
def get_IVs_from_input(self, input, IVs):
columns = list()
if isinstance(IVs, list):
for IV in IVs:
if IV is not None:
columns.append(self.get_IV_idx(IV))
else:
columns.append(self.get_IV_idx(IVs))
return input[:, columns]
def get_DV_from_output(self, output, DV):
column = self.get_DV_idx(DV)
return output[:, column]
def get_IV_idx(self, IV):
column = None
for idx, var in enumerate(self.independent_variables):
if var.get_name() == IV.get_name():
column = idx
break
for idx, var in enumerate(self.covariates):
if var.get_name() == IV.get_name():
column = idx + len(self.independent_variables)
break
return column
def get_DV_idx(self, DV):
column = None
for idx, var in enumerate(self.dependent_variables):
if var.get_name() == DV.get_name():
column = idx
break
return column
def get_IV_name(self, idx):
if idx < len(self.independent_variables):
name = self.independent_variables[idx].get_name()
else:
idx = idx - len(self.independent_variables)
if idx < len(self.covariates):
name = self.covariates[idx].get_name()
else:
raise Exception("Index exceeds number of independent variables.")
return name
def get_DV_name(self, idx):
if idx < len(self.dependent_variables):
name = self.dependent_variables[idx].get_name()
return name
else:
raise Exception("Index exceeds number of dependent variables.")
def get_variable_limits(self, var):
limits = [var.__get_value_range__()[0] * var._rescale, var.__get_value_range__()[1] * var._rescale]
return limits
def rescale_experiment_sequence(self, sequence):
rescaled_sequence = dict()
for key in sequence:
values = sequence[key]
rescale = self.get_IV_rescale_from_name(key)
values_rescaled = [val * rescale for val in values]
rescaled_sequence[key] = values_rescaled
return rescaled_sequence
def get_IV_rescale_from_name(self, IV_name):
for var in self.independent_variables:
if var.get_name() == IV_name:
return var._rescale
for var in self.covariates:
if var.get_name() == IV_name:
return var._rescale
def get_IV_limits_from_name(self, IV_name):
for var in self.independent_variables:
if var.get_name() == IV_name:
return self.get_variable_limits(var)
for var in self.covariates:
if var.get_name() == IV_name:
return self.get_variable_limits(var)
return None
def get_variable_summary_stats(self, variables):
# collect means and stds
means = list()
stds = list()
for var in variables:
IV_data = self.data[var.get_name()][0]
m = np.mean(IV_data)
s = np.std(IV_data)
means.append(m)
stds.append(s)
return [means, stds]
def normalize_variables(self, tensor, variables):
# collect means and stds
[means, stds] = self.get_variable_summary_stats(variables)
# return normalized data
return normalize(tensor, means, stds)
def unnormalize_variables(self, tensor, variables):
# collect means and stds
[means, stds] = self.get_variable_summary_stats(variables)
# return normalized data
return unnormalize(tensor, means, stds)
def get_name(self):
return self.name
def __get_input_dim__(self):
return self.input_dimensions
def __get_output_dim__(self):
return self.output_dimensions
def __get_output_type__(self):
return self.output_type
def __get_input_labels__(self):
input_labels = list()
for var in self.independent_variables:
input_labels.append(var.get_variable_label())
for var in self.covariates:
input_labels.append(var.get_variable_label())
return input_labels
def __get_input_names__(self):
input_names = list()
for var in self.independent_variables:
input_names.append(var.get_name())
for var in self.covariates:
input_names.append(var.get_name())
return input_names
def __get_input_length__(self):
input_data = list()
for var in self.independent_variables:
input_data.append(var.get_value_from_dict(self.data, 0))
for var in self.covariates:
input_data.append(var.get_value_from_dict(self.data, 0))
return len(input_data)
def __get_output_length__(self):
output_data = list()
for var in self.dependent_variables:
output_data.append(var.get_value_from_dict(self.data, 0))
return len(output_data)
def split(self, proportion=0.5):
split_copy = copy.deepcopy(self)
# determine indices to be split
num_data_points = self.__len__()
indices = range(num_data_points)
num_samples = round(proportion*num_data_points)
samples = random.sample(indices, num_samples)
split_copy.data = dict()
# first add samples to the new copy
for key in self.data.keys():
split_copy.data[key] = list()
for samp in samples:
split_copy.data[key].append(self.data[key][samp])
# now remove samples from original object
for key in self.data.keys():
values = self.data[key]
values = [i for j, i in enumerate(values) if j not in samples]
self.data[key] = values
return split_copy
# potentially redundant with: get_dataset
def get_all_data(self):
num_patterns = self.__len__()
if num_patterns > 0:
input_tensor, output_tensor = self.__getitem__(0)
else:
input_tensor = torch.Variable(np.empty((0, self.input_dimensions), dtype=np.float32))
output_tensor = torch.Variable(np.empty((0, self.output_dimensions), dtype=np.float32))
for idx in range(1, num_patterns):
tmp_input_tensor, tmp_output_tensor = self.__getitem__(idx)
input_tensor = torch.cat((input_tensor, tmp_input_tensor), 0)
output_tensor = torch.cat((output_tensor, tmp_output_tensor), 0)
def add_data(self, new_data: Dict):
for key in self.data.keys():
if key in new_data:
for value in new_data[key]:
self.data[key].append(value)
else:
raise Exception("Could not find key '" + key + "' in the new data dictionary.")
def normalize(tensor, mean, std):
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
def unnormalize( tensor, mean, std):
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
class unnormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) | |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for TensorFlow Probability ODE solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import six
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.math.ode import runge_kutta_util as rk_util
from tensorflow_probability.python.math.ode import util
# TODO(b/138303336): Support MATLAB-style events.
__all__ = [
'ChosenBySolver',
'Diagnostics',
'Results',
'Solver',
]
@six.add_metaclass(abc.ABCMeta)
class Solver(object):
"""Base class for an ODE solver."""
def __init__(self, use_pfor_to_compute_jacobian, validate_args, name):
self._use_pfor_to_compute_jacobian = use_pfor_to_compute_jacobian
self._validate_args = validate_args
self._name = name
def solve(
self,
ode_fn,
initial_time,
initial_state,
solution_times,
jacobian_fn=None,
jacobian_sparsity=None,
batch_ndims=None,
previous_solver_internal_state=None,
constants=None,
):
"""Solves an initial value problem.
An initial value problem consists of a system of ODEs and an initial
condition:
```none
dy/dt(t) = ode_fn(t, y(t), **constants)
y(initial_time) = initial_state
```
Here, `t` (also called time) is a scalar float `Tensor` and `y(t)` (also
called the state at time `t`) is an N-D float or complex `Tensor`.
`constants` is are values that are constant with respect to time. Passing
the constants here rather than just closing over them in `ode_fn` is only
necessary if you want gradients with respect to these values.
### Example
The ODE `dy/dt(t) = dot(A, y(t))` is solved below.
```python
t_init, t0, t1 = 0., 0.5, 1.
y_init = tf.constant([1., 1.], dtype=tf.float64)
A = tf.constant([[-1., -2.], [-3., -4.]], dtype=tf.float64)
def ode_fn(t, y):
return tf.linalg.matvec(A, y)
results = tfp.math.ode.BDF().solve(ode_fn, t_init, y_init,
solution_times=[t0, t1])
y0 = results.states[0] # == dot(matrix_exp(A * t0), y_init)
y1 = results.states[1] # == dot(matrix_exp(A * t1), y_init)
```
If the exact solution times are not important, it can be much
more efficient to let the solver choose them using
`solution_times=tfp.math.ode.ChosenBySolver(final_time=1.)`.
This yields the state at various times between `t_init` and `final_time`,
in which case `results.states[i]` is the state at time `results.times[i]`.
#### Gradients
The gradients are computed using the adjoint sensitivity method described in
[Chen et al. (2018)][1].
```python
grad = tf.gradients(y1, y0) # == dot(e, J)
# J is the Jacobian of y1 with respect to y0. In this case, J = exp(A * t1).
# e = [1, ..., 1] is the row vector of ones.
```
This is not capable of computing gradients with respect to values closed
over by `ode_fn`, e.g., in the example above:
```python
def ode_fn(t, y):
return tf.linalg.matvec(A, y)
with tf.GradientTape() as tape:
tape.watch(A)
results = tfp.math.ode.BDF().solve(ode_fn, t_init, y_init,
solution_times=[t0, t1])
tape.gradient(results.states, A) # Undefined!
```
There are two options to get the gradients flowing through these values:
1. Use `tf.Variable` for these values.
2. Pass the values in explicitly using the `constants` argument:
```python
def ode_fn(t, y, A):
return tf.linalg.matvec(A, y)
with tf.GradientTape() as tape:
tape.watch(A)
results = tfp.math.ode.BDF().solve(ode_fn, t_init, y_init,
solution_times=[t0, t1],
constants={'A': A})
tape.gradient(results.states, A) # Fine.
```
#### References
[1]: Chen, <NAME>, et al. "Neural ordinary differential equations."
Advances in Neural Information Processing Systems. 2018.
Args:
ode_fn: Function of the form `ode_fn(t, y, **constants)`. The input `t` is
a scalar float `Tensor`. The input `y` and output are both `Tensor`s
with the same shape and `dtype` as `initial_state`. `constants` is are
values that are constant with respect to time. Passing the constants
here rather than just closing over them in `ode_fn` is only necessary if
you want gradients with respect to these values.
initial_time: Scalar float `Tensor` specifying the initial time.
initial_state: N-D float or complex `Tensor` specifying the initial state.
The `dtype` of `initial_state` must be complex for problems with
complex-valued states (even if the initial state is real).
solution_times: 1-D float `Tensor` specifying a list of times. The solver
stores the computed state at each of these times in the returned
`Results` object. Must satisfy `initial_time <= solution_times[0]` and
`solution_times[i] < solution_times[i+1]`. Alternatively, the user can
pass `tfp.math.ode.ChosenBySolver(final_time)` where `final_time` is a
scalar float `Tensor` satisfying `initial_time < final_time`. Doing so
requests that the solver automatically choose suitable times up to and
including `final_time` at which to store the computed state.
jacobian_fn: Optional function of the form `jacobian_fn(t, y)`. The input
`t` is a scalar float `Tensor`. The input `y` has the same shape and
`dtype` as `initial_state`. The output is a 2N-D `Tensor` whose shape is
`initial_state.shape + initial_state.shape` and whose `dtype` is the
same as `initial_state`. In particular, the `(i1, ..., iN, j1, ...,
jN)`-th entry of `jacobian_fn(t, y)` is the derivative of the `(i1, ...,
iN)`-th entry of `ode_fn(t, y)` with respect to the `(j1, ..., jN)`-th
entry of `y`. If this argument is left unspecified, the solver
automatically computes the Jacobian if and when it is needed.
Default value: `None`.
jacobian_sparsity: Optional 2N-D boolean `Tensor` whose shape is
`initial_state.shape + initial_state.shape` specifying the sparsity
pattern of the Jacobian. This argument is ignored if `jacobian_fn` is
specified.
Default value: `None`.
batch_ndims: Optional nonnegative integer. When specified, the first
`batch_ndims` dimensions of `initial_state` are batch dimensions.
Default value: `None`.
previous_solver_internal_state: Optional solver-specific argument used to
warm-start this invocation of `solve`.
Default value: `None`.
constants: Optional dictionary with string keys and values being (possibly
nested) float `Tensor`s. These represent values that are constant with
respect to time. Specifying these here allows the adjoint sentitivity
method to compute gradients of the results with respect to these values.
Returns:
Object of type `Results`.
"""
if constants is None:
constants = {}
input_state_structure = initial_state
constant_state_structure = constants
flat_initial_state = tf.nest.flatten(initial_state)
flat_constants = tf.nest.flatten(constants)
num_state_components = len(flat_initial_state)
@tf.custom_gradient
def gradient_helper(*flat_initial_state_and_constants):
"""Restricts gradient to initial state components and constants."""
flat_initial_state_and_constants = [
tf.convert_to_tensor(c) for c in flat_initial_state_and_constants
]
flat_initial_state = (
flat_initial_state_and_constants[:num_state_components])
flat_constants = flat_initial_state_and_constants[num_state_components:]
initial_state = tf.nest.pack_sequence_as(
input_state_structure, flat_initial_state)
constants = tf.nest.pack_sequence_as(
constant_state_structure, flat_constants)
results = self._solve(
functools.partial(ode_fn, **constants),
initial_time,
initial_state,
solution_times,
jacobian_fn,
jacobian_sparsity,
batch_ndims,
previous_solver_internal_state,
)
results = Results(
times=tf.stop_gradient(results.times),
states=results.states,
diagnostics=util.stop_gradient_of_real_or_complex_entries(
results.diagnostics),
solver_internal_state=util.stop_gradient_of_real_or_complex_entries(
results.solver_internal_state))
def grad_fn(*dresults, **kwargs):
"""Adjoint sensitivity method to compute gradients."""
dresults = tf.nest.pack_sequence_as(results, dresults)
dstates = dresults.states
# The signature grad_fn(*dresults, variables=None) is not valid Python 2
# so use kwargs instead.
variables = kwargs.pop('variables', [])
assert not kwargs # This assert should never fail.
# TODO(b/138304303): Support complex types.
with tf.name_scope('{}Gradients'.format(self._name)):
get_dtype = lambda x: x.dtype
def error_if_complex(dtype):
if dtype.is_complex:
raise NotImplementedError('The adjoint sensitivity method does '
'not support complex dtypes.')
state_dtypes = tf.nest.map_structure(get_dtype, initial_state)
tf.nest.map_structure(error_if_complex, state_dtypes)
common_state_dtype = dtype_util.common_dtype(initial_state)
real_dtype = dtype_util.real_dtype(common_state_dtype)
# We add initial_time to ensure that we know where to stop.
result_times = tf.concat(
[[tf.cast(initial_time, real_dtype)], results.times], 0)
num_result_times = tf.size(result_times)
# First two components correspond to reverse and adjoint states.
# the last two component is adjoint state for variables and constants.
terminal_augmented_state = tuple([
rk_util.nest_constant(initial_state, 0.0),
rk_util.nest_constant(initial_state, 0.0),
tuple(
rk_util.nest_constant(variable, 0.0) for variable in variables
),
rk_util.nest_constant(constants, 0.0),
])
# The XLA compiler does not compile code which slices/indexes using
# integer `Tensor`s. `TensorArray`s are used to get around this.
result_time_array = tf.TensorArray(
results.times.dtype,
clear_after_read=False,
size=num_result_times,
element_shape=[]).unstack(result_times)
# TensorArray shape should not include time dimension, hence shape[1:]
result_state_arrays = [
tf.TensorArray( # pylint: disable=g-complex-comprehension
dtype=component.dtype, size=num_result_times - 1,
element_shape=component.shape[1:]).unstack(component)
for component in tf.nest.flatten(results.states)
]
result_state_arrays = tf.nest.pack_sequence_as(
results.states, result_state_arrays)
dresult_state_arrays = [
tf.TensorArray( # pylint: disable=g-complex-comprehension
dtype=component.dtype, size=num_result_times - 1,
element_shape=component.shape[1:]).unstack(component)
for component in tf.nest.flatten(dstates)
]
dresult_state_arrays = tf.nest.pack_sequence_as(
results.states, dresult_state_arrays)
def augmented_ode_fn(backward_time, augmented_state):
"""Dynamics function for the augmented system.
Describes a differential equation that evolves the augmented state
backwards in time to compute gradients using the adjoint method.
Augmented state consists of 4 components `(state, adjoint_state,
vars, constants)` all evaluated at time `backward_time`:
state: represents | |
"""
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
from ..base.calendar import BaseExchangeCalendarEvent, BaseExchangeCalendarService, ExchangeEventOrganizer, ExchangeEventResponse
from ..base.folder import BaseExchangeFolder, BaseExchangeFolderService
from ..base.soap import ExchangeServiceSOAP
from ..exceptions import FailedExchangeException, ExchangeStaleChangeKeyException, ExchangeItemNotFoundException, ExchangeInternalServerTransientErrorException, ExchangeIrresolvableConflictException, InvalidEventType
from ..compat import BASESTRING_TYPES
from . import soap_request
from lxml import etree
from copy import deepcopy
from datetime import date
import warnings
log = logging.getLogger("pyexchange")
class Exchange2010Service(ExchangeServiceSOAP):
def calendar(self, id="calendar"):
return Exchange2010CalendarService(service=self, calendar_id=id)
def mail(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def contacts(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def folder(self):
return Exchange2010FolderService(service=self)
def _send_soap_request(self, body, headers=None, retries=2, timeout=30, encoding="utf-8"):
headers = {
"Accept": "text/xml",
"Content-type": "text/xml; charset=%s " % encoding
}
return super(Exchange2010Service, self)._send_soap_request(body, headers=headers, retries=retries, timeout=timeout, encoding=encoding)
def _check_for_errors(self, xml_tree):
super(Exchange2010Service, self)._check_for_errors(xml_tree)
self._check_for_exchange_fault(xml_tree)
def _check_for_exchange_fault(self, xml_tree):
# If the request succeeded, we should see a <m:ResponseCode>NoError</m:ResponseCode>
# somewhere in the response. if we don't (a) see the tag or (b) it doesn't say "NoError"
# then flip out
response_codes = xml_tree.xpath(u'//m:ResponseCode', namespaces=soap_request.NAMESPACES)
if not response_codes:
raise FailedExchangeException(u"Exchange server did not return a status response", None)
# The full (massive) list of possible return responses is here.
# http://msdn.microsoft.com/en-us/library/aa580757(v=exchg.140).aspx
for code in response_codes:
if code.text == u"ErrorChangeKeyRequiredForWriteOperations":
# change key is missing or stale. we can fix that, so throw a special error
raise ExchangeStaleChangeKeyException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorItemNotFound":
# exchange_invite_key wasn't found on the server
raise ExchangeItemNotFoundException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorIrresolvableConflict":
# tried to update an item with an old change key
raise ExchangeIrresolvableConflictException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorInternalServerTransientError":
# temporary internal server error. throw a special error so we can retry
raise ExchangeInternalServerTransientErrorException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorCalendarOccurrenceIndexIsOutOfRecurrenceRange":
# just means some or all of the requested instances are out of range
pass
elif code.text != u"NoError":
raise FailedExchangeException(u"Exchange Fault (%s) from Exchange server" % code.text)
class Exchange2010CalendarService(BaseExchangeCalendarService):
def event(self, id=None, **kwargs):
return Exchange2010CalendarEvent(service=self.service, id=id, **kwargs)
def get_event(self, id):
return Exchange2010CalendarEvent(service=self.service, id=id)
def new_event(self, **properties):
return Exchange2010CalendarEvent(service=self.service, calendar_id=self.calendar_id, **properties)
def list_events(self, start=None, end=None, details=False, delegate_for=None):
return Exchange2010CalendarEventList(service=self.service, calendar_id=self.calendar_id, start=start, end=end, details=details, delegate_for=delegate_for)
class Exchange2010CalendarEventList(object):
"""
Creates & Stores a list of Exchange2010CalendarEvent items in the "self.events" variable.
"""
def __init__(self, service=None, calendar_id=u'calendar', start=None, end=None, details=False, delegate_for=None):
self.service = service
self.count = 0
self.start = start
self.end = end
self.events = list()
self.event_ids = list()
self.details = details
self.delegate_for = delegate_for
# This request uses a Calendar-specific query between two dates.
body = soap_request.get_calendar_items(format=u'AllProperties', calendar_id=calendar_id, start=self.start, end=self.end, delegate_for=self.delegate_for)
response_xml = self.service.send(body)
self._parse_response_for_all_events(response_xml)
# Populate the event ID list, for convenience reasons.
for event in self.events:
self.event_ids.append(event._id)
# If we have requested all the details, basically repeat the previous 3 steps,
# but instead of start/stop, we have a list of ID fields.
if self.details:
log.debug(u'Received request for all details, retrieving now!')
self.load_all_details()
return
def _parse_response_for_all_events(self, response):
"""
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
"""
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item)))
else:
log.debug(u'No calendar items found with search parameters.')
return self
def _add_event(self, xml=None):
log.debug(u'Adding new event to all events list.')
event = Exchange2010CalendarEvent(service=self.service, xml=xml)
log.debug(u'Subject of new event is %s' % event.subject)
self.events.append(event)
return self
def load_all_details(self):
"""
This function will execute all the event lookups for known events.
This is intended for use when you want to have a completely populated event entry, including
Organizer & Attendee details.
"""
log.debug(u"Loading all details")
if self.count > 0:
# Now, empty out the events to prevent duplicates!
del(self.events[:])
# Send the SOAP request with the list of exchange ID values.
log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))
body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties')
response_xml = self.service.send(body)
# Re-parse the results for all the details!
self._parse_response_for_all_events(response_xml)
return self
class Exchange2010CalendarEvent(BaseExchangeCalendarEvent):
def _init_from_service(self, id):
log.debug(u'Creating new Exchange2010CalendarEvent object from ID')
body = soap_request.get_item(exchange_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_event(response_xml)
self._update_properties(properties)
self._id = id
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def _init_from_xml(self, xml=None):
log.debug(u'Creating new Exchange2010CalendarEvent object from XML')
properties = self._parse_response_for_get_event(xml)
self._update_properties(properties)
self._id, self._change_key = self._parse_id_and_change_key_from_response(xml)
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def as_json(self):
raise NotImplementedError
def validate(self):
if self.recurrence is not None:
if not (isinstance(self.recurrence_end_date, date)):
raise ValueError('recurrence_end_date must be of type date')
elif (self.recurrence_end_date < self.start.date()):
raise ValueError('recurrence_end_date must be after start')
if self.recurrence == u'daily':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 999):
raise ValueError('recurrence_interval must be an int in the range from 1 to 999')
elif self.recurrence == u'weekly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
if self.recurrence_days is None:
raise ValueError('recurrence_days is required')
for day in self.recurrence_days.split(' '):
if day not in self.WEEKLY_DAYS:
raise ValueError('recurrence_days received unknown value: %s' % day)
elif self.recurrence == u'monthly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
elif self.recurrence == u'yearly':
pass # everything is pulled from start
else:
raise ValueError('recurrence received unknown value: %s' % self.recurrence)
super(Exchange2010CalendarEvent, self).validate()
def create(self):
"""
Creates an event in Exchange. ::
event = service.calendar().new_event(
subject=u"80s Movie Night",
location = u"My house",
)
event.create()
Invitations to attendees are sent out immediately.
"""
self.validate()
body = soap_request.new_event(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def resend_invitations(self):
"""
Resends invites for an event. ::
event = service.calendar().get_event(id='KEY HERE')
event.resend_invitations()
Anybody who has not declined this meeting will get a new invite.
"""
if not self.id:
raise TypeError(u"You can't send invites for an event that hasn't been created yet.")
# Under the hood, this is just an .update() but with no attributes changed.
# We're going to enforce that by checking if there are any changed attributes and bail if there are
if self._dirty_attributes:
raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll')
self.service.send(body)
return self
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
"""
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
"""
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self
def cancel(self):
"""
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
"""
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Calendar ID to | |
len(similars) == self.dist_similarity_test_times * len(discm_tests_gender0)
with open(file1, "a") as f2:
for prt in similars:
f2.write(str(prt)[1:-1].replace(" ", "") + "\n") # remove space
with open(file0, "a") as f1:
for cnt, i in enumerate(discm_tests_gender0):
for _ in range(self.dist_similarity_test_times): # each datapoint get printed 10 times
f1.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
if total % 100 == 0:
print(total, "doing")
df = pd.read_csv(file0)
assert df['race'].unique() == 0
df = pd.read_csv(file1)
assert df['race'].unique() == 1
def single_feature_discm_salary(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names))
discm_tests_gender0 = []
total = 0
with open("sex0_salary.csv", "a") as f:
f.write("sex,rank,year,degree,Experience\n")
while True:
new = self.randomInput_class0(feature)
discm_tests_gender0.append(new)
total += 1
# x = len(discm_tests_gender0)
if total == 52*100:
with open("sex0_salary.csv", "a") as f:
for i in discm_tests_gender0:
f.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
# check if any tests are duplicated:
df = pd.read_csv("sex0_salary.csv")
x = df.duplicated()
print(x.any(), "see duplication") # if this is False, we are all good.
# np.where(x) # if this is an empty list we are good, For adult we are good.
# This generates same examples for the other class
# df = pd.read_csv("gender0_adult.csv")
df['sex'] = 1
df.to_csv("sex1_salary.csv", index=False)
def single_feature_discm_salary_dist(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names))
discm_tests_gender0 = []
total = 0
file0 = "sex0_salary_dist10.csv"
file1 = "sex1_salary_dist10.csv"
with open(file0, "w") as f:
f.write("sex,rank,year,degree,Experience\n")
with open(file1, "w") as f:
f.write("sex,rank,year,degree,Experience\n")
while True:
new = self.randomInput_class0(feature) # random sample one datapoint
discm_tests_gender0.append(new)
total += 1
if total == 52 * 100:
for i in discm_tests_gender0:
for prt in range(10): # each datapoint get printed 10 times
similar_inp = self.find_val_within_range(i, feature, 1)
with open(file1, "a") as f2:
f2.write(str(similar_inp)[1:-1].replace(" ", "") + "\n") # remove space
for prt in range(10): # each datapoint get printed 10 times
with open(file0, "a") as f1:
f1.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
df = pd.read_csv(file0)
assert df['sex'].unique() == 0
df = pd.read_csv(file1)
assert df['sex'].unique() == 1
def single_feature_discm_german(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names)) # feature is the sensitive feature
# score = self.causalDiscrimination([feature], confidence, epsilon)
# print("No. of discriminating tests: ", len(self.causal_tests), "Score: ", score)
discm_tests_gender0 = []
total = 0
with open("gender0_german.csv", "a") as f:
f.write("Checking-ccount,Months,Credit-history,Purpose,Credit-mount,Svings-ccount,Present-employment-since,Instllment-rte,Gender,Other-debtors,Present-residence-since,Property,ge,Other-instllment-plns,Housing,Number-of-existing-credits,Job,Number-of-people-being-lible,Telephone,Foreign-worker\n")
while True:
new = self.randomInput_class0(feature)
# if not new in discm_tests_gender0: # its fine for 2 or more tests to be identical, we generate it randomly
discm_tests_gender0.append(new)
total += 1
x = len(discm_tests_gender0)
if x == 10000:
# if x == self.MaxSamples:
print(total, "hello")
with open("gender0_german.csv", "a") as f:
for i in discm_tests_gender0:
f.write(str(i)[1:-1].replace(" ", "") + "\n")
discm_tests_gender0 = []
if total == 100000:
# if total == self.MaxSamples:
with open("gender0_german.csv", "a") as f:
for i in discm_tests_gender0:
f.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
# check if any tests are duplicated:
df = pd.read_csv("gender0_german.csv")
x = df.duplicated()
print(x.any(), "see duplication") # if this is False, we are all good.
# np.where(x) # if this is an empty list we are good, For adult we are good.
# This generates same examples for the other gender
# df = pd.read_csv("gender0_german.csv")
df['Gender'] = 1
df.to_csv("gender1_german.csv", index=False)
# with open("gender0_german.csv", "a") as f:
# for i in discm_tests_gender0:
# f.write(str(i)[1:-1] + "\n")
# if score > theta:
# print("Discriminates against: ", self.attr_names[feature])
def single_feature_discm_german_dist(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names)) # feature is the sensitive feature
discm_tests_gender0 = []
total = 0
file0 = "gender0_german_dist10.csv"
file1 = "gender1_german_dist10.csv"
with open(file0, "w") as f:
f.write("Checking-ccount,Months,Credit-history,Purpose,Credit-mount,Svings-ccount,Present-employment-since,Instllment-rte,Gender,Other-debtors,Present-residence-since,Property,ge,Other-instllment-plns,Housing,Number-of-existing-credits,Job,Number-of-people-being-lible,Telephone,Foreign-worker\n")
with open(file1, "w") as f:
f.write("Checking-ccount,Months,Credit-history,Purpose,Credit-mount,Svings-ccount,Present-employment-since,Instllment-rte,Gender,Other-debtors,Present-residence-since,Property,ge,Other-instllment-plns,Housing,Number-of-existing-credits,Job,Number-of-people-being-lible,Telephone,Foreign-worker\n")
while True:
new = self.randomInput_class0(feature)
discm_tests_gender0.append(new)
total += 1
x = len(discm_tests_gender0)
if total == 100000:
similars = []
for cnt, i in enumerate(discm_tests_gender0):
# for _ in range(10): # each datapoint get printed 10 times
similar_inputs = self.find_val_within_range(i, feature, 1, 10)
for sims in similar_inputs:
similars.append(sims)
if cnt % 100 == 0:
print(cnt, "done")
assert len(similars) == 10 * len(discm_tests_gender0)
with open(file1, "a") as f2:
for prt in similars:
f2.write(str(prt)[1:-1].replace(" ", "") + "\n") # remove space
with open(file0, "a") as f1:
for cnt, i in enumerate(discm_tests_gender0):
for _ in range(10): # each datapoint get printed 10 times
f1.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
df = pd.read_csv(file0)
assert df['Gender'].unique() == 0
df = pd.read_csv(file1)
assert df['Gender'].unique() == 1
def single_feature_discm_small_dataset(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names))
# score = self.causalDiscrimination([feature], confidence, epsilon)
# print("No. of discriminating tests: ", len(self.causal_tests), "Score: ", score)
discm_tests_gender0 = []
total = 0
with open("race0_biased_smalldataset.csv", "a") as f:
f.write("Income,Neighbor-income,Race\n")
while True:
new = self.randomInput_class0(feature)
discm_tests_gender0.append(new)
total += 1
if total == 700:
with open("race0_biased_smalldataset.csv", "a") as f:
for i in discm_tests_gender0:
f.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
# check if any tests are duplicated:
df = pd.read_csv("race0_biased_smalldataset.csv")
x = df.duplicated()
print(x.any(), "see duplication") # if this is False, we are all good.
# This generates same examples for the other gender
df['Race'] = 1
df.to_csv("race1_biased_smalldataset.csv", index=False)
def single_feature_discm_compas(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names))
# score = self.causalDiscrimination([feature], confidence, epsilon)
# print("No. of discriminating tests: ", len(self.causal_tests), "Score: ", score)
discm_tests_gender0 = []
total = 0
with open("race0_compas.csv", "a") as f:
f.write("sex,age,race,juv_fel_count,juv_misd_count,juv_other_count,priors_count,days_b_screening_arrest,c_days_from_compas,c_charge_degree\n")
while True:
new = self.randomInput_class0(feature)
# if not new in discm_tests_gender0: # its fine for 2 or more tests to be identical, we generate it randomly
discm_tests_gender0.append(new)
total += 1
x = len(discm_tests_gender0)
if total == 1000000:
with open("race0_compas.csv", "a") as f:
for i in discm_tests_gender0:
f.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
break
# check if any tests are duplicated:
df = pd.read_csv("race0_compas.csv")
x = df.duplicated()
print(x.any(), "see duplication") # if this is False, we are all good.
# np.where(x) # if this is an empty list we are good, For adult we are good.
# This generates same examples for the other demographic group
df['race'] = 1
df.to_csv("race1_compas.csv", index=False)
# with open("gender0_adult.csv", "a") as f:
# for i in discm_tests_gender0:
# f.write(str(i)[1:-1] + "\n")
# if score > theta:
# print("Discriminates against: ", self.attr_names[feature])
def single_feature_discm_compas_two_year(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names))
discm_tests_gender0 = []
total = 0
with open("race0_compas_two_year.csv", "a") as f:
f.write("age,sex,race,diff_custody,diff_jail,priors_count,juv_fel_count,juv_misd_count,juv_other_count,c_charge_degree\n")
# f.write("sex,age,race,juv_fel_count,decile_score,juv_misd_count,juv_other_count,priors_count,days_b_screening_arrest,c_days_from_compas,c_charge_degree,is_recid,is_violent_recid,decile_score.1,v_decile_score,priors_count.1,start,end,event\n")
while True:
new = self.randomInput_class0(feature)
discm_tests_gender0.append(new)
total += 1
x = len(discm_tests_gender0)
if total == 615000:
with open("race0_compas_two_year.csv", "a") as f:
for i in discm_tests_gender0:
f.write(str(i)[1:-1].replace(" ", "") + "\n") # remove space
# discm_tests_gender0 = []
print(total, "done")
break
# if total == 615000:
# assert(x % 10000 == 0)
# break
# check if any tests are duplicated:
df = pd.read_csv("race0_compas_two_year.csv")
x = df.duplicated()
print(x.any(), "see duplication") # if this is False, we are all good.
# This generates same examples for the other demographic group
df['race'] = 1
df.to_csv("race1_compas_two_year.csv", index=False)
def single_feature_discm_compas_two_year_dist(self, feature, theta, confidence, epsilon, type_discm):
assert(isinstance(feature, int))
assert(feature <= len(self.attr_names))
discm_tests_gender0 = []
total = 0
file0 = "race0_compas_two_year_dist10.csv"
file1 = "race1_compas_two_year_dist10.csv"
with open(file0, "w") as f:
f.write("age,sex,race,diff_custody,diff_jail,priors_count,juv_fel_count,juv_misd_count,juv_other_count,c_charge_degree\n")
with open(file1, "w") as f:
f.write("age,sex,race,diff_custody,diff_jail,priors_count,juv_fel_count,juv_misd_count,juv_other_count,c_charge_degree\n")
while True:
new = self.randomInput_class0(feature)
discm_tests_gender0.append(new)
total += 1
x = len(discm_tests_gender0)
if total == 615000:
similars = []
for cnt, i in enumerate(discm_tests_gender0):
# for _ in range(10): # each datapoint get printed 10 times
similar_inputs = self.find_val_within_range(i, feature, 1, 10)
for sims in similar_inputs:
similars.append(sims)
if cnt % 100 == 0:
print(cnt, "done")
assert len(similars) == 10 * len(discm_tests_gender0)
with open(file1, "a") as f2:
for prt in similars:
f2.write(str(prt)[1:-1].replace(" ", "") + "\n") # remove space
with open(file0, "a") as f1:
for cnt, i in enumerate(discm_tests_gender0):
for _ in | |
"""
SSBJ test case - http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19980234657.pdf
Python implementation and OpenMDAO integration developed by
<NAME> and <NAME> ONERA, the French Aerospace Lab.
"""
from __future__ import print_function
import numpy as np
from openmdao.api import ExplicitComponent
from .common import PolynomialFunction, WFO, WO, NZ
# pylint: disable=C0103
def structure(pf, x_str, Z, L, WE):
t = Z[0]*Z[5]/(np.sqrt(abs(Z[5]*Z[3])))
b = np.sqrt(abs(Z[5]*Z[3]))/2.0
R = (1.0+2.0*x_str[0])/(3.0*(1.0+x_str[0]))
Theta = pf([abs(x_str[1]), b, R, L],
[2, 4, 4, 3], [0.25]*4, "twist")
Fo1 = pf([x_str[1]], [1], [.008], "Fo1")
WT_hat = L
WW = Fo1 * (0.0051 * abs(WT_hat*NZ)**0.557 * \
abs(Z[5])**0.649 * abs(Z[3])**0.5 * abs(Z[0])**(-0.4) \
* abs(1.0+x_str[0])**0.1 * (0.1875*abs(Z[5]))**0.1 \
/ abs(np.cos(Z[4]*np.pi/180.)))
WFW = 5.0/18.0 * abs(Z[5]) * 2.0/3.0 * t * 42.5
WF = WFW + WFO
WT = WO + WW + WF + WE
sigma = 5*[0.]
sigma[0] = pf([Z[0], L, x_str[1], b, R], [4, 1, 4, 1, 1], [0.1]*5, "sigma[1]")
sigma[1] = pf([Z[0], L, x_str[1], b, R], [4, 1, 4, 1, 1], [0.15]*5, "sigma[2]")
sigma[2] = pf([Z[0], L, x_str[1], b, R], [4, 1, 4, 1, 1], [0.2]*5, "sigma[3]")
sigma[3] = pf([Z[0], L, x_str[1], b, R], [4, 1, 4, 1, 1], [0.25]*5, "sigma[4]")
sigma[4] = pf([Z[0], L, x_str[1], b, R], [4, 1, 4, 1, 1], [0.30]*5, "sigma[5]")
return Theta, WF, WT, sigma
class Structure(ExplicitComponent):
def __init__(self, scalers):
super(Structure, self).__init__()
# scalers values
self.scalers = scalers
# Polynomial function initialized with given constant values
self.pf = PolynomialFunction()
def setup(self):
# Global Design Variable z=(t/c,h,M,AR,Lambda,Sref)
self.add_input('z', val=np.ones(6))
# Local Design Variable x_str=(lambda,section caisson)
self.add_input('x_str', val=np.ones(2))
# Coupling parameters
self.add_input('L', val=1.0)
self.add_input('WE', val=1.0)
# Coupling output
self.add_output('WT', val=1.0)
self.add_output('Theta', val=1.0)
self.add_output('WF', val=1.0)
self.add_output('sigma', val=np.ones(5))
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
Z = inputs['z']*self.scalers['z']
x_str = inputs['x_str']*self.scalers['x_str']
L = inputs['L']*self.scalers['L']
WE = inputs['WE']*self.scalers['WE']
Theta, WF, WT, sigma = structure(self.pf, x_str, Z, L, WE)
#Unknowns
outputs['Theta'] = Theta/self.scalers['Theta']
outputs['WF'] = WF/self.scalers['WF']
outputs['WT'] = WT/self.scalers['L']
outputs['sigma'] = np.zeros(5)
for i in range(5):
outputs['sigma'][i] = sigma[i]/self.scalers['sigma'][i]
def compute_partials(self, inputs, J):
Z = inputs['z']*self.scalers['z']
Xstr = inputs['x_str']*self.scalers['x_str']
L = inputs['L']*self.scalers['L']
# dWT ################################################################
Fo1 = self.pf([Xstr[1]], [1], [.008], "Fo1")
dWtdlambda = 0.1*Fo1/np.cos(Z[4]*np.pi/180.)*0.0051 \
*(abs(L)*NZ)**0.557*abs(Z[5])**0.649 \
* abs(Z[3])**0.5 * abs(Z[0])**(-0.4) \
* (1.0+Xstr[0])**-0.9 * (0.1875*abs(Z[5]))**0.1
A = (0.0051 * abs(L*NZ)**0.557 * abs(Z[5])**0.649 \
* abs(Z[3])**0.5 * abs(Z[0])**(-0.4) * abs(1.0+Xstr[0])**0.1 \
* (0.1875*abs(Z[5]))**0.1 / abs(np.cos(Z[4]*np.pi/180.)))
S_shifted, Ai, Aij = self.pf([Xstr[1]], [1], [.008],
"Fo1", deriv=True)
if Xstr[1]/self.pf.d['Fo1'][0]>=0.75 and Xstr[1]/self.pf.d['Fo1'][0]<=1.25:
dSxdx = 1.0/self.pf.d['Fo1'][0]
else:
dSxdx = 0.0
dWtdx = A*(Ai[0]*dSxdx \
+ Aij[0, 0]*dSxdx*S_shifted[0, 0])
val = np.append(dWtdlambda/self.scalers['L'], dWtdx/self.scalers['L'])
J['WT', 'x_str'] = np.array([val])*self.scalers['x_str']
dWTdtc = -0.4*Fo1/np.cos(Z[4]*np.pi/180.)*0.0051 \
* abs(L*NZ)**0.557 * abs(Z[5])**0.649 \
* abs(Z[3])**0.5*abs(Z[0])**(-1.4)*abs(1.0+Xstr[0])**0.1 \
* (0.1875*abs(Z[5]))**0.1 + 212.5/27.*Z[5]**(3.0/2.0)/np.sqrt(Z[3])
dWTdh = 0.0
dWTdM = 0.0
dWTdAR = 0.5*Fo1/np.cos(Z[4]*np.pi/180.)* 0.0051 \
* abs(L*NZ)**0.557 * abs(Z[5])**0.649 \
* abs(Z[3])**-0.5*abs(Z[0])**(-0.4)*abs(1.0+Xstr[0])**0.1 \
* (0.1875*abs(Z[5]))**0.1 + 212.5/27.*Z[5]**(3.0/2.0) \
* Z[0] * -0.5*Z[3]**(-3.0/2.0)
dWTdLambda = Fo1*np.pi/180.*np.sin(Z[4]*np.pi/180.)/np.cos(Z[4]*np.pi/180.)**2 \
* 0.0051 * abs(L*NZ)**0.557 * abs(Z[5])**0.649 \
* abs(Z[3])**0.5*abs(Z[0])**(-0.4)*abs(1.0+Xstr[0])**0.1 \
* (0.1875*abs(Z[5]))**0.1
dWTdSref = 0.749*Fo1/np.cos(Z[4]*np.pi/180.)*0.1875**(0.1)*0.0051 \
* abs(L*NZ)**0.557*abs(Z[5])**-0.251 \
*abs(Z[3])**0.5*abs(Z[0])**(-0.4)*abs(1.0+Xstr[0])**0.1 \
+ 637.5/54.*Z[5]**(0.5)*Z[0]/np.sqrt(Z[3])
val = np.append(dWTdtc/self.scalers['L'],
[dWTdh/self.scalers['L'],
dWTdM/self.scalers['L'],
dWTdAR/self.scalers['L'],
dWTdLambda/self.scalers['L'],
dWTdSref/self.scalers['L']])
J['WT', 'z'] = np.array([val])*self.scalers['z']
dWTdL = 0.557*Fo1/np.cos(Z[4]*np.pi/180.)*0.0051 * abs(L)**-0.443 \
*NZ**0.557* abs(Z[5])**0.649 * abs(Z[3])**0.5 \
* abs(Z[0])**(-0.4) * abs(1.0+Xstr[0])**0.1 * (0.1875*abs(Z[5]))**0.1
J['WT', 'L'] = np.array([[dWTdL]])
dWTdWE = 1.0
J['WT', 'WE'] = np.array([[dWTdWE]])/self.scalers['L']*self.scalers['WE']
# dWF ################################################################
dWFdlambda = 0.0
dWFdx = 0.0
val = np.append(dWFdlambda/self.scalers['WF'], dWFdx/self.scalers['WF'])
J['WF', 'x_str'] = np.array([val]) *self.scalers['x_str']
dWFdtc = 212.5/27.*Z[5]**(3.0/2.0)/np.sqrt(Z[3])
dWFdh = 0.0
dWFdM = 0.0
dWFdAR = 212.5/27.*Z[5]**(3.0/2.0) * Z[0] * -0.5*Z[3]**(-3.0/2.0)
dWFdLambda = 0.0
dWFdSref = 637.5/54.*Z[5]**(0.5)*Z[0]/np.sqrt(Z[3])
val=np.append(dWFdtc/self.scalers['WF'],
[dWFdh/self.scalers['WF'],
dWFdM/self.scalers['WF'],
dWFdAR/self.scalers['WF'],
dWFdLambda/self.scalers['WF'],
dWFdSref/self.scalers['WF']])
J['WF', 'z'] = np.array([val])*self.scalers['z']
dWFdL = 0.0
J['WF', 'L'] = np.array([[dWFdL]])/self.scalers['WF']*self.scalers['L']
dWFdWE = 0.0
J['WF', 'WE'] = np.array([[dWFdWE]])/self.scalers['WF']*self.scalers['WE']
### dTheta ###########################################################
b = np.sqrt(abs(Z[5]*Z[3]))/2.0
R = (1.0+2.0*Xstr[0])/(3.0*(1.0+Xstr[0]))
S_shifted, Ai, Aij = self.pf([abs(Xstr[1]), b, R, L],
[2, 4, 4, 3],
[0.25]*4, "twist", deriv=True)
if R/self.pf.d['twist'][2]>=0.75 and R/self.pf.d['twist'][2]<=1.25:
dSRdlambda = 1.0/self.pf.d['twist'][2]*1.0/(3.0*(1.0+Xstr[0])**2)
else:
dSRdlambda = 0.0
dSRdlambda2 = 2.0*S_shifted[0, 2]*dSRdlambda
dThetadlambda = Ai[2]*dSRdlambda + 0.5*Aij[2, 2]*dSRdlambda2 \
+ Aij[0, 2]*S_shifted[0, 0]*dSRdlambda \
+ Aij[1, 2]*S_shifted[0, 1]*dSRdlambda\
+ Aij[3, 2]*S_shifted[0, 3]*dSRdlambda
if abs(Xstr[1])/self.pf.d['twist'][0]>=0.75 and abs(Xstr[1])/self.pf.d['twist'][0]<=1.25:
dSxdx = 1.0/self.pf.d['twist'][0]
else:
dSxdx = 0.0
dSxdx2 = 2.0*S_shifted[0, 0]*dSxdx
dThetadx = Ai[0]*dSxdx + 0.5*Aij[0, 0]*dSxdx2 \
+ Aij[1, 0]*S_shifted[0, 1]*dSxdx \
+ Aij[2, 0]*S_shifted[0, 2]*dSxdx \
+ Aij[3, 0]*S_shifted[0, 3]*dSxdx
J['Theta', 'x_str'] = np.array([np.append(dThetadlambda[0, 0]/self.scalers['Theta'],
dThetadx[0, 0]/self.scalers['Theta'])])\
*self.scalers['x_str']
dThetadtc = 0.0
dThetadh = 0.0
dThetadM = 0.0
if b/self.pf.d['twist'][1]>=0.75 and b/self.pf.d['twist'][1]<=1.25:
dSbdAR = 1.0/self.pf.d['twist'][1]*(np.sqrt(Z[5])/4.0*Z[3]**-0.5)
else:
dSbdAR = 0.0
dSbdAR2 = 2.0*S_shifted[0, 1]*dSbdAR
dThetadAR = Ai[1]*dSbdAR+0.5*Aij[1, 1]*dSbdAR2 \
+ Aij[0, 1]*S_shifted[0, 0]*dSbdAR \
+ Aij[2, 1]*S_shifted[0, 2]*dSbdAR \
+ Aij[3, 1]*S_shifted[0, 3]*dSbdAR
dThetadLambda = 0.0
if b/self.pf.d['twist'][1]>=0.75 and b/self.pf.d['twist'][1]<=1.25:
dSbdSref= 1.0/self.pf.d['twist'][1]*(np.sqrt(Z[3])/4.0*Z[5]**-0.5)
else:
dSbdSref = 0.0
dSbdSref2 = 2.0*S_shifted[0, 1]*dSbdSref
dThetadSref = Ai[1]*dSbdSref + 0.5*Aij[1, 1]*dSbdSref2 \
+ Aij[0, 1]*S_shifted[0, 0]*dSbdSref \
+ Aij[2, 1]*S_shifted[0, 2]*dSbdSref \
+ Aij[3, 1]*S_shifted[0, 3]*dSbdSref
J['Theta', 'z'] = np.array([np.append(dThetadtc/self.scalers['Theta'],
[dThetadh/self.scalers['Theta'],
dThetadM/self.scalers['Theta'],
dThetadAR/self.scalers['Theta'],
dThetadLambda/self.scalers['Theta'],
dThetadSref/self.scalers['Theta']])])*self.scalers['z']
if L/self.pf.d['twist'][3]>=0.75 and L/self.pf.d['twist'][3]<=1.25:
dSLdL = 1.0/self.pf.d['twist'][3]
else:
dSLdL = 0.0
dSLdL2 = 2.0*S_shifted[0, 3]*dSLdL
dThetadL = Ai[3]*dSLdL + 0.5*Aij[3, 3]*dSLdL2 \
+ Aij[0, 3]*S_shifted[0, 0]*dSLdL \
+ Aij[1, 3]*S_shifted[0, 1]*dSLdL \
+ Aij[2, 3]*S_shifted[0, 2]*dSLdL
J['Theta', 'L'] = (np.array([[dThetadL]]) \
/ self.scalers['Theta']*self.scalers['L']).reshape((1, 1))
dThetadWE = 0.0
J['Theta', 'WE'] = np.array([[dThetadWE]])/self.scalers['Theta']*self.scalers['WE']
# dsigma #############################################################
b = np.sqrt(abs(Z[5]*Z[3]))/2.0
R = (1.0+2.0*Xstr[0])/(3.0*(1.0+Xstr[0]))
s_new = [Z[0], L, Xstr[1], b, R]
S_shifted, Ai, Aij = self.pf(s_new,
[4, 1, 4, 1, 1], [0.1]*5,
"sigma[1]", deriv=True)
if R/self.pf.d['sigma[1]'][4]>=0.75 and R/self.pf.d['sigma[1]'][4]<=1.25:
dSRdlambda = 1.0/self.pf.d['sigma[1]'][4]*1.0/(3.0*(1.0+Xstr[0])**2)
else:
dSRdlambda = 0.0
dSRdlambda2 = 2.0*S_shifted[0, 4]*dSRdlambda
dsigma1dlambda = Ai[4]*dSRdlambda + 0.5*Aij[4, 4]*dSRdlambda2 \
+ Aij[0, 4]*S_shifted[0, 0]*dSRdlambda \
+ Aij[1, 4]*S_shifted[0, 1]*dSRdlambda \
+ Aij[2, 4]*S_shifted[0, 2]*dSRdlambda \
+ Aij[3, 4]*S_shifted[0, 3]*dSRdlambda
if Xstr[1]/self.pf.d['sigma[1]'][2]>=0.75 and Xstr[1]/self.pf.d['sigma[1]'][2]<=1.25:
dSxdx = 1.0/self.pf.d['sigma[1]'][2]
else:
dSxdx = 0.0
dSxdx2 = 2.0*S_shifted[0, 2]*dSxdx
dsigma1dx = Ai[2]*dSxdx+0.5*Aij[2, 2]*dSxdx2 \
+ Aij[0, 2]*S_shifted[0, 0]*dSxdx \
+ Aij[1, 2]*S_shifted[0, 1]*dSxdx \
+ Aij[3, 2]*S_shifted[0, 3]*dSxdx \
+ Aij[4, 2]*S_shifted[0, 4]*dSxdx
S_shifted, Ai, Aij = self.pf(s_new,
[4, 1, 4, 1, 1], [0.15]*5,
"sigma[2]", deriv=True)
if R/self.pf.d['sigma[2]'][4]>=0.75 and R/self.pf.d['sigma[2]'][4]<=1.25:
dSRdlambda = 1.0/self.pf.d['sigma[2]'][4]*1.0/(3.0*(1.0+Xstr[0])**2)
else:
dSRdlambda = 0.0
dSRdlambda2 = 2.0*S_shifted[0, 4]*dSRdlambda
dsigma2dlambda = Ai[4]*dSRdlambda \
+ 0.5*Aij[4, 4]*dSRdlambda2 \
+ Aij[0, 4]*S_shifted[0, 0]*dSRdlambda \
+ Aij[1, 4]*S_shifted[0, 1]*dSRdlambda \
+ Aij[2, 4]*S_shifted[0, 2]*dSRdlambda \
+ Aij[3, 4]*S_shifted[0, 3]*dSRdlambda
if Xstr[1]/self.pf.d['sigma[2]'][2]>=0.75 and Xstr[1]/self.pf.d['sigma[2]'][2]<=1.25:
dSxdx = 1.0/self.pf.d['sigma[2]'][2]
else:
dSxdx = 0.0
dSxdx2 = 2.0*S_shifted[0, 2]*dSxdx
dsigma2dx = Ai[2]*dSxdx + 0.5*Aij[2, 2]*dSxdx2 \
+ Aij[0, 2]*S_shifted[0, 0]*dSxdx \
+ Aij[1, 2]*S_shifted[0, 1]*dSxdx \
+ Aij[3, 2]*S_shifted[0, 3]*dSxdx \
+ Aij[4, 2]*S_shifted[0, 4]*dSxdx
S_shifted, Ai, Aij = self.pf(s_new,
[4, 1, 4, 1, 1], [0.2]*5,
"sigma[3]", deriv=True)
if R/self.pf.d['sigma[3]'][4]>=0.75 and R/self.pf.d['sigma[3]'][4]<=1.25:
dSRdlambda = 1.0/self.pf.d['sigma[3]'][4]*1.0/(3.0*(1.0+Xstr[0])**2)
else:
dSRdlambda = 0.0
dSRdlambda2 = 2.0*S_shifted[0, 4]*dSRdlambda
dsigma3dlambda = Ai[4]*dSRdlambda+0.5*Aij[4, 4]*dSRdlambda2 \
+ Aij[0, 4]*S_shifted[0, 0]*dSRdlambda \
+ Aij[1, 4]*S_shifted[0, 1]*dSRdlambda \
+ Aij[2, 4]*S_shifted[0, 2]*dSRdlambda \
+ Aij[3, 4]*S_shifted[0, 3]*dSRdlambda
if Xstr[1]/self.pf.d['sigma[3]'][2]>=0.75 and Xstr[1]/self.pf.d['sigma[3]'][2]<=1.25:
dSxdx = 1.0/self.pf.d['sigma[3]'][2]
else:
dSxdx = 0.0
dSxdx2 = 2.0*S_shifted[0, 2]*dSxdx
dsigma3dx = Ai[2]*dSxdx+0.5*Aij[2, 2]*dSxdx2 \
+ Aij[0, 2]*S_shifted[0, 0]*dSxdx \
+ Aij[1, 2]*S_shifted[0, 1]*dSxdx \
+ Aij[3, 2]*S_shifted[0, 3]*dSxdx \
+ Aij[4, 2]*S_shifted[0, 4]*dSxdx
S_shifted, Ai, Aij = self.pf(s_new,
[4, 1, 4, 1, 1], [0.25]*5,
"sigma[4]", deriv=True)
if R/self.pf.d['sigma[4]'][4]>=0.75 and R/self.pf.d['sigma[4]'][4]<=1.25:
dSRdlambda = 1.0/self.pf.d['sigma[4]'][4]*1.0/(3.0*(1.0+Xstr[0])**2)
else:
dSRdlambda = 0.0
dSRdlambda2 = 2.0*S_shifted[0, 4]*dSRdlambda
dsigma4dlambda = Ai[4]*dSRdlambda \
+ 0.5*Aij[4, 4]*dSRdlambda2 \
+ Aij[0, 4]*S_shifted[0, 0]*dSRdlambda \
+ Aij[1, 4]*S_shifted[0, 1]*dSRdlambda \
+ Aij[2, 4]*S_shifted[0, 2]*dSRdlambda \
+ Aij[3, 4]*S_shifted[0, 3]*dSRdlambda
if Xstr[1]/self.pf.d['sigma[4]'][2]>=0.75 and Xstr[1]/self.pf.d['sigma[4]'][2]<=1.25:
dSxdx = 1.0/self.pf.d['sigma[4]'][2]
else:
dSxdx = 0.0
dSxdx2 = 2.0*S_shifted[0, 2]*dSxdx
dsigma4dx = Ai[2]*dSxdx+0.5*Aij[2, 2]*dSxdx2 \
+ Aij[0, 2]*S_shifted[0, 0]*dSxdx \
+ Aij[1, 2]*S_shifted[0, 1]*dSxdx \
+ Aij[3, 2]*S_shifted[0, 3]*dSxdx \
+ Aij[4, 2]*S_shifted[0, 4]*dSxdx
S_shifted, Ai, Aij = self.pf(s_new,
[4, 1, 4, 1, 1], [0.3]*5,
"sigma[5]", deriv=True)
if R/self.pf.d['sigma[5]'][4]>=0.75 and R/self.pf.d['sigma[5]'][4]<=1.25:
dSRdlambda = 1.0/self.pf.d['sigma[5]'][4]*1.0/(3.0*(1.0+Xstr[0])**2)
else:
dSRdlambda = 0.0
dSRdlambda2 = 2.0*S_shifted[0, 4]*dSRdlambda
dsigma5dlambda = Ai[4]*dSRdlambda+0.5*Aij[4, 4]*dSRdlambda2 \
+ Aij[0, 4]*S_shifted[0, 0]*dSRdlambda \
+ Aij[1, 4]*S_shifted[0, 1]*dSRdlambda \
+ Aij[2, 4]*S_shifted[0, 2]*dSRdlambda \
+ Aij[3, 4]*S_shifted[0, 3]*dSRdlambda
if Xstr[1]/self.pf.d['sigma[5]'][2]>=0.75 and Xstr[1]/self.pf.d['sigma[5]'][2]<=1.25:
dSxdx = 1.0/self.pf.d['sigma[5]'][2]
else:
dSxdx = 0.0
dSxdx2 = 2.0*S_shifted[0, 2]*dSxdx
dsigma5dx | |
"""Main class for working with records.
vectorbt works with two different representations of data: matrices and records.
A matrix, in this context, is just an array of one-dimensional arrays, each corresponding
to a separate feature. The matrix itself holds only one kind of information (one attribute).
For example, one can create a matrix for entry signals, with columns being different strategy
configurations. But what if the matrix is huge and sparse? What if there is more
information we would like to represent by each element? Creating multiple matrices would be
a waste of memory.
Records make possible representing complex, sparse information in a dense format. They are just
an array of one-dimensional arrays of fixed schema. You can imagine records being a DataFrame,
where each row represents a record and each column represents a specific attribute.
```plaintext
a b
0 1.0 5.0
attr1 = 1 2.0 NaN
2 NaN 7.0
3 4.0 8.0
a b
0 9.0 13.0
attr2 = 1 10.0 NaN
2 NaN 15.0
3 12.0 16.0
|
v
col idx attr1 attr2
0 0 0 1 9
1 0 1 2 10
2 0 3 4 12
3 1 0 5 13
4 1 1 7 15
5 1 3 8 16
```
Another advantage of records is that they are not constrained by size. Multiple records can map
to a single element in a matrix. For example, one can define multiple orders at the same time step,
which is impossible to represent in a matrix form without using complex data types.
## Records class
`Records` are just [structured arrays](https://numpy.org/doc/stable/user/basics.rec.html) with a bunch
of methods and properties for processing them. Its main feature is to map the records array and
to reduce it by column (similar to the MapReduce paradigm). The main advantage is that it all happens
without conversion to the matrix form and wasting memory resources.
## MappedArray class
When mapping records using `Records`, for example, to compute P&L of each trade record, the mapping
result is wrapped with `MappedArray` class. This class takes the mapped array and the corresponding column
and (optionally) index arrays, and offers features to directly process the mapped array without converting
it to the matrix form; for example, to compute various statistics by column, such as standard deviation.
## Example
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from collections import namedtuple
>>> from vectorbt.base.array_wrapper import ArrayWrapper
>>> from vectorbt.records import Records, MappedArray
>>> example_dt = np.dtype([
... ('col', np.int64),
... ('idx', np.int64),
... ('some_field', np.float64)
... ])
>>> records_arr = np.array([
... (0, 0, 10.),
... (0, 1, 11.),
... (0, 2, 12.),
... (1, 0, 13.),
... (1, 1, 14.),
... (1, 2, 15.)
... ], dtype=example_dt)
>>> wrapper = ArrayWrapper(index=['x', 'y', 'z'],
... columns=['a', 'b'], ndim=2, freq='1 day')
>>> records = Records(records_arr, wrapper)
>>> records.records
col idx some_field
0 0 0 10.0
1 0 1 11.0
2 0 2 12.0
3 1 0 13.0
4 1 1 14.0
5 1 2 15.0
```
### Mapping
There are several options for mapping:
* Use `Records.map_field` to map a record field:
```python-repl
>>> records.map_field('some_field')
<vectorbt.records.base.MappedArray at 0x7ff49bd31a58>
>>> records.map_field('some_field').mapped_arr
[10. 11. 12. 13. 14. 15.]
```
* Use `Records.map` to map records using a custom function.
```python-repl
>>> @njit
... def power_map_nb(record, pow):
... return record.some_field ** pow
>>> records.map(power_map_nb, 2)
<vectorbt.records.base.MappedArray at 0x7ff49c990cf8>
>>> records.map(power_map_nb, 2).mapped_arr
[100. 121. 144. 169. 196. 225.]
```
* Use `Records.map_array` to convert an array to `MappedArray`.
```python-repl
>>> records.map_array(records_arr['some_field'] ** 2)
<vectorbt.records.base.MappedArray object at 0x7fe9bccf2978>
>>> records.map_array(records_arr['some_field'] ** 2).mapped_arr
[100. 121. 144. 169. 196. 225.]
```
### Reducing
Using `MappedArray`, you can then reduce by column as follows:
* Use already provided reducers such as `MappedArray.mean`:
```python-repl
>>> mapped = records.map_field('some_field')
>>> mapped.mean()
a 11.0
b 14.0
dtype: float64
```
* Use `MappedArray.to_matrix` to map to a matrix and then reduce manually (expensive):
```python-repl
>>> mapped.to_matrix().mean()
a 11.0
b 14.0
dtype: float64
```
* Use `MappedArray.reduce` to reduce using a custom function:
```python-repl
>>> @njit
... def pow_mean_reduce_nb(col, a, pow):
... return np.mean(a ** pow)
>>> mapped.reduce(pow_mean_reduce_nb, 2)
a 121.666667
b 196.666667
dtype: float64
>>> @njit
... def min_max_reduce_nb(col, a):
... return np.array([np.min(a), np.max(a)])
>>> mapped.reduce(min_max_reduce_nb, to_array=True,
... n_rows=2, index=['min', 'max'])
a b
min 10.0 13.0
max 12.0 15.0
>>> @njit
... def idxmin_idxmax_reduce_nb(col, a):
... return np.array([np.argmin(a), np.argmax(a)])
>>> mapped.reduce(idxmin_idxmax_reduce_nb, to_array=True,
... n_rows=2, to_idx=True, index=['idxmin', 'idxmax'])
a b
idxmin x x
idxmax z z
```
### Conversion
You can convert any `MappedArray` instance to the matrix form, given `idx_arr` was provided:
```python-repl
>>> mapped.to_matrix()
a b
x 10.0 13.0
y 11.0 14.0
z 12.0 15.0
```
!!! note
Will raise an error if there are multiple records pointing to the same matrix element.
### Plotting
You can build histograms and boxplots of `MappedArray` directly:
```python-repl
>>> mapped.box()
```

To use scatterplots or any other plots that require index, convert to matrix first:
```python-repl
>>> mapped.to_matrix().vbt.scatter(trace_kwargs=dict(connectgaps=True))
```

## Grouping
Additionally to reducing per column, you can also reduce per group of columns by providing
`group_by`. The `group_by` variable can be anything from positions or names of column levels,
to a NumPy array with actual groups, and can be passed to either `Records`, `MappedArray`,
or the reducing method itself.
```python-repl
>>> np.random.seed(42)
>>> index = pd.Index(['x', 'y'])
>>> columns = pd.MultiIndex.from_arrays([
... [1, 1, 1, 2, 2, 2],
... [1, 2, 3, 1, 2, 3]
... ], names=['a', 'b'])
>>> mapped_group = MappedArray(
... mapped_arr=np.random.randint(1, 10, size=12),
... col_arr=np.repeat(np.arange(6), 2),
... wrapper=ArrayWrapper(index=index, columns=columns),
... idx_arr=np.tile([0, 1], 6)
... )
>>> mapped_group.hist()
```

```python-repl
>>> mapped_group.hist(group_by='a')
```

!!! note
Grouping applies only to reducing and plotting operations.
## Indexing
You can use pandas indexing on both the `Records` and `MappedArray` class, which will forward
the indexing operation to each `__init__` argument with index:
```python-repl
>>> records['a'].records
col idx some_field
0 0 0 10.0
1 0 1 11.0
2 0 2 12.0
>>> mapped['a'].mapped_arr
[10. 11. 12.]
```
!!! note
Changing index (time axis) is not supported.
## Operators
Additionally, `MappedArray` implements arithmetic, comparison and logical operators.
You can perform basic operations (such as addition) on mapped arrays as if they were NumPy arrays.
```python-repl
>>> mapped ** 2
<vectorbt.records.base.MappedArray at 0x7f97bfc49358>
>>> mapped * np.array([1, 2, 3, 4, 5, 6])
<vectorbt.records.base.MappedArray at 0x7f97bfc65e80>
>>> mapped + mapped
<vectorbt.records.base.MappedArray at 0x7f97bfc492e8>
```
!!! note
You should ensure that your `*.vbt` operand is on the left if the other operand is an array.
Two mapped arrays must have the same metadata to be compared/combined.
"""
import numpy as np
import pandas as pd
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.base.indexing import PandasIndexer
from vectorbt.base import reshape_fns
from vectorbt.base.index_grouper import IndexGrouper
from vectorbt.base.common import (
add_binary_magic_methods,
add_unary_magic_methods,
binary_magic_methods,
unary_magic_methods
)
from vectorbt.base.array_wrapper import ArrayWrapper
from vectorbt.generic import nb as generic_nb
from vectorbt.records import nb
def _mapped_indexing_func(obj, pd_indexing_func):
"""Perform indexing on `MappedArray`."""
if obj.wrapper.ndim == 1:
raise TypeError("Indexing on Series is not supported")
n_rows = len(obj.wrapper.index)
n_cols = len(obj.wrapper.columns)
col_mapper = obj.wrapper.wrap(np.broadcast_to(np.arange(n_cols), (n_rows, n_cols)))
col_mapper = pd_indexing_func(col_mapper)
if not pd.Index.equals(col_mapper.index, obj.wrapper.index):
raise NotImplementedError("Changing index (time axis) is not supported")
new_cols = reshape_fns.to_1d(col_mapper.values[0]) # array required
new_indices, new_col_arr = nb.select_mapped_cols_nb(
obj.col_arr,
obj.col_index,
new_cols
)
new_mapped_arr = obj.mapped_arr[new_indices]
if obj.idx_arr is not None:
new_idx_arr = obj.idx_arr[new_indices]
else:
new_idx_arr = None
new_wrapper = ArrayWrapper.from_obj(col_mapper, freq=obj.wrapper.freq)
if obj.grouper.group_by is not None:
new_group_by = obj.grouper.group_by[new_cols]
else:
new_group_by = None
return obj.__class__(
new_mapped_arr,
new_col_arr,
new_wrapper,
idx_arr=new_idx_arr,
group_by=new_group_by
)
def _mapped_binary_translate_func(self, other, np_func):
"""Perform operation between two instances of `MappedArray`."""
if isinstance(other, self.__class__):
passed = True
if not np.array_equal(self.col_arr, other.col_arr):
passed = False
if self.idx_arr is not None or other.idx_arr is not None:
if not np.array_equal(self.idx_arr, other.idx_arr):
passed = False
if self.wrapper != other.wrapper:
passed = False
if self.grouper != other.grouper:
passed = False
if not passed:
raise ValueError("Both MappedArray instances must have same metadata")
other = other.mapped_arr
return self.__class__(
np_func(self.mapped_arr, other),
self.col_arr,
self.wrapper,
idx_arr=self.idx_arr,
group_by=self.grouper.group_by
)
@add_binary_magic_methods(
binary_magic_methods,
_mapped_binary_translate_func
)
@add_unary_magic_methods(
unary_magic_methods,
lambda self, np_func: self.__class__(
np_func(self.mapped_arr),
self.col_arr,
self.wrapper,
idx_arr=self.idx_arr,
group_by=self.grouper.group_by
)
)
class MappedArray(PandasIndexer):
"""Exposes methods and properties for working with records.
Args:
mapped_arr (array_like): A one-dimensional array of mapped record values.
col_arr (array_like): A one-dimensional column array.
Must be of the same size as `mapped_arr`.
wrapper (ArrayWrapper): Array wrapper of type `vectorbt.base.array_wrapper.ArrayWrapper`.
idx_arr (array_like): A one-dimensional index array. Optional.
Must be of the same size as `mapped_arr`.
group_by (int, str or array_like): Group columns by a mapper when reducing.
See `vectorbt.base.index_fns.group_index`."""
def __init__(self, mapped_arr, col_arr, wrapper, idx_arr=None, group_by=None):
if not isinstance(mapped_arr, np.ndarray):
mapped_arr = np.asarray(mapped_arr)
if not isinstance(col_arr, np.ndarray):
col_arr = np.asarray(col_arr)
checks.assert_same_shape(mapped_arr, col_arr, axis=0)
checks.assert_type(wrapper, ArrayWrapper)
if idx_arr is not None:
if not isinstance(idx_arr, np.ndarray):
idx_arr = np.asarray(idx_arr)
checks.assert_same_shape(mapped_arr, idx_arr, axis=0)
self.mapped_arr = mapped_arr
self.col_arr = col_arr
self.wrapper = wrapper
self.idx_arr = idx_arr
self.grouper = IndexGrouper(self.wrapper.columns, group_by=group_by)
PandasIndexer.__init__(self, _mapped_indexing_func)
@cached_property
def col_index(self):
"""Column index for `MappedArray.mapped_arr`."""
return nb.mapped_col_index_nb(self.mapped_arr, self.col_arr, len(self.wrapper.columns))
def filter_by_mask(self, mask, idx_arr=None, group_by=None):
"""Return a new class instance, filtered by mask."""
if idx_arr is None:
idx_arr = self.idx_arr
if idx_arr is not None:
idx_arr | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Script to analyse FASTA sequence files from BRENDA in bulk.
Author: <NAME>
Date Created: 06 Dec 2018
"""
from os.path import exists
import pandas as pd
from numpy import arange, histogram
import matplotlib.pyplot as plt
from Bio import SeqIO
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio.Alphabet import IUPAC
from ercollect.rs_protein_analysis import calculate_seq_aliphatic_index
from ercollect.tm_predictor import calculate_TM_index
from ercollect.plotting import EC_descriptions
def specific_EC_descriptions():
"""Dictionary of EC descriptions + colours for target reactions.
"""
top_tier = {'3.5.5.1': ('nitrilase', '#1469b5'),
'3.5.5.4': ('cyanoalanine nitrilase', '#FF7900'),
'4.2.1.65': ('3-cyanoalanine hydratase', '#00B036'),
}
return top_tier
def fix_fasta(FASTA_file):
"""
Fix FASTA files to be in BioPYTHON format.
Arguments:
FASTA_file (str) - FASTA file name to fix
"""
file_mod = FASTA_file.replace(".fasta", "_mod.fasta")
if exists(file_mod) is False:
with open(FASTA_file, 'r', encoding='mac-roman') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if '|' in line and ">" not in line:
# we replace spaces in header line with "__"
# so I can manipulate that later as biopython
# doesn't like "__"
new_line = ">"+line
new_lines.append(new_line)
else:
new_lines.append(line)
with open(file_mod, 'w') as f:
for line in new_lines:
f.write(line)
return file_mod
def read_seq_output(output_file):
"""Read sequence information output file. Returns associated PANDAS
dataframe.
"""
output = pd.read_table(output_file, delimiter='@', skiprows=[0],
names=['acc_code', 'organism', 'EC_code',
'species', 'note', 'pI', 'GRAVY',
'I_index', 'A_index', 'TM_index'],
engine='python')
return output
def update_seq_output(output_file, ROW):
"""Update sequence information output file with ROW.
Returns associated PANDAS dataframe.
"""
# output = output.append(ROW, ignore_index=True)
# output.to_csv(output_file, index=False, sep='@')
with open(output_file, 'a') as f:
string = ROW.acc_code.iloc[0]+'@'
string += ROW.organism.iloc[0]+'@'
string += ROW.EC_code.iloc[0]+'@'
string += ROW.species.iloc[0]+'@'
string += ROW.note.iloc[0]+'@'
string += str(ROW.pI.iloc[0])+'@'
string += str(ROW.GRAVY.iloc[0])+'@'
string += str(ROW.I_index.iloc[0])+'@'
string += str(ROW.A_index.iloc[0])+'@'
string += str(ROW.TM_index.iloc[0])
string += '\n'
f.write(string)
def write_seq_output(output_file):
"""Write new sequence information output file. Returns associated
PANDAS dataframe.
"""
output = pd.DataFrame(columns=['acc_code', 'organism', 'EC_code',
'species', 'note', 'pI', 'GRAVY',
'I_index', 'A_index', 'TM_index'])
output.to_csv(output_file, index=False, sep='@')
return output
def check_sequence(sequence_string):
"""Check sequence string for unknown or non-natural amino acids.
Returns True if only natural AA is found.
"""
nat_AA = [
"G", "P", "V", "A", "L", "I", "M", "C", "F", "Y", "W", "H",
"R", "K", "Q", "T", "D", "N", "S", "E"
]
for AA in sequence_string:
if AA not in nat_AA:
return False
return True
def get_no_seq(FASTA_file):
"""Get number of sequences in a FASTA file from number of '>'.
"""
seq = 0
with open(FASTA_file, 'r') as f:
for line in f.readlines():
if '>' in line:
seq += 1
return seq
def get_fasta_sequence_properties(output_file, fasta_file):
"""Get sequence properties for all reaction systems with an associated
protein sequence.
Currently applied to only SABIO DB.
Properties:
- pI: we do not consider the possibility of modifications here.
(Biopython: http://biopython.org/DIST/docs/api/
Bio.SeqUtils.ProtParam-pysrc.html)
- instability index:
(Biopython: http://biopython.org/DIST/docs/api/
Bio.SeqUtils.ProtParam-pysrc.html)
- aliphatic index:
(code from: https://github.com/ddofer/ProFET/blob
/master/ProFET/feat_extract/ProtFeat.py)
Under GNU GPL
- GRAVY:
(Biopython: http://biopython.org/DIST/docs/api/
Bio.SeqUtils.ProtParam-pysrc.html)
Keywords:
output_dir (str) - directory to output reaction system files
"""
if input('load existing data? (t/f)') == 't':
# load existing data from this FASTA file
if exists(output_file) is True:
output = read_seq_output(output_file)
else:
write_seq_output(output_file)
output = read_seq_output(output_file)
else:
# overwrite output file
write_seq_output(output_file)
output = read_seq_output(output_file)
print('-------------------------------------------------------')
print('doing calculations...')
# need to fix the FASTA output format so BIOPYTHON can read it
file_mod = fix_fasta(FASTA_file=fasta_file)
total_start_time = time.time()
total_seq = get_no_seq(FASTA_file=file_mod)
print_opt = arange(0, total_seq, 1000)
total_seq_done = 0
# iterate through sequences in FASTA file
done = list(output.acc_code)
del output
with open(file_mod, "r") as handle:
generator = SeqIO.parse(
handle,
"fasta",
alphabet=IUPAC.protein
)
for i, record in enumerate(generator):
total_seq_done += 1
record_list = record.description.split("|")
# collect information on sequence and sequence
# should be a unique descriptor
acc_code = record_list[0].lstrip().rstrip()
organism = record_list[1].lstrip().rstrip()
EC_code = record_list[2].lstrip().rstrip()
species = record_list[3].lstrip().rstrip()
note = record_list[4]
if acc_code in done:
continue
seq = record.seq
sequence_string = str(seq)
# check sequence string for uknown or nonnatural amino acid
natural = check_sequence(sequence_string=sequence_string)
seq_obj = ProteinAnalysis(''.join(seq))
if natural is False:
continue
# do calculations
pI = seq_obj.isoelectric_point()
GRAVY = seq_obj.gravy()
I_index = seq_obj.instability_index()
A_index = calculate_seq_aliphatic_index(sequence_string)
TM_index = calculate_TM_index(seq_string=sequence_string)
ROW = pd.DataFrame({
'acc_code': acc_code,
'organism': organism,
'EC_code': EC_code,
'species': species,
'note': note,
'pI': pI,
'GRAVY': GRAVY,
'I_index': I_index,
'A_index': A_index,
'TM_index': TM_index
}, index=[0])
# save to output file
update_seq_output(output_file, ROW)
if i in print_opt:
print(
i+1, 'done of', total_seq,
'in %s seconds' % ('{0:.2f}'.format(
time.time() - total_start_time)
)
)
print(
'--- finished %s sequences in %s seconds ---'
% (
total_seq_done,
'{0:.2f}'.format(time.time() - total_start_time)
)
)
def dist_plot(fig, ax, name, xlim, xtitle, plot_suffix):
"""Standard plot properties for distributions.
"""
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel(xtitle, fontsize=16)
ax.set_ylabel('count', fontsize=16)
ax.set_xlim(xlim)
# legend
# ax.legend(fontsize=16)
fig.tight_layout()
fig.savefig("dist_"+name+"_"+plot_suffix+".pdf",
dpi=720, bbox_inches='tight')
def dist_Aindex(output, plot_suffix, EC):
"""
Plot distribution of protein Aindex for all sequences in FASTA file.
"""
fig, ax = plt.subplots(figsize=(8, 5))
width = 5
X_bins = arange(0, 150, width)
hist, bin_edges = histogram(a=list(output.A_index), bins=X_bins)
# output.GRAVY.plot.hist(bins=50,
# color='#607c8e')
# ax.plot(X_bins[:-1]+width/2, hist, c='k', lw='2')
ax.bar(bin_edges[:-1],
hist,
align='edge',
alpha=0.4, width=width,
color=EC_descriptions()[str(EC)][1],
edgecolor='k',
label=EC_descriptions()[str(EC)][0])
# AI specific visuals
ylim = ax.get_ylim()
ax.text(
10, max(ylim)/2 + 0.05*(max(ylim)/2), 'more stable',
fontsize=16
)
ax.arrow(
10, max(ylim)/2, 40, 0,
head_width=0.05*(max(ylim)/2), head_length=5, fc='k', ec='k'
)
# catalase_AI = 68
# ax.axvline(x=catalase_AI, c='r', alpha=1.0)
# urease_AI = 90.476
# ax.axvline(x=urease_AI, c='b', alpha=1.0)
dist_plot(fig, ax, name='Aindex', xlim=(0, 150),
xtitle='aliphatic index', plot_suffix=plot_suffix)
def dist_Iindex(output, plot_suffix, EC):
"""
Plot distribution of protein I index for all sequences in
FASTA file.
"""
fig, ax = plt.subplots(figsize=(8, 5))
width = 5
X_bins = arange(0, 150, width)
hist, bin_edges = histogram(a=list(output.I_index), bins=X_bins)
# output.GRAVY.plot.hist(bins=50,
# color='#607c8e')
# ax.plot(X_bins[:-1]+width/2, hist, c='k', lw='2')
ax.bar(bin_edges[:-1],
hist,
align='edge',
alpha=0.4, width=width,
color=EC_descriptions()[str(EC)][1],
edgecolor='k',
label=EC_descriptions()[str(EC)][0])
# instability specific visuals
# get ylim
ylim = ax.get_ylim()
ax.text(
51, max(ylim)/2 + 0.05*(max(ylim)/2), 'unstable', fontsize=16
)
ax.arrow(
50, max(ylim)/2, 30, 0,
head_width=0.05*(max(ylim)/2), head_length=4, fc='k', ec='k'
)
II_cutoff = 40
ax.axvline(x=II_cutoff, c='k', alpha=1.0, linestyle='--', lw=2)
# catalase_II = 27.010
# ax.axvline(x=catalase_II, c='r', alpha=1.0)
# urease_II = 31.75
# ax.axvline(x=urease_II, c='b', alpha=1.0)
dist_plot(fig, ax, name='Iindex', xlim=(0, 100),
xtitle='instability index', plot_suffix=plot_suffix)
def dist_TMindex(output, plot_suffix, EC):
"""
Plot distribution of protein TM index for all sequences in
FASTA file.
"""
fig, ax = plt.subplots(figsize=(8, 5))
width = 0.2
X_bins = arange(-5, 5.1, width)
hist, bin_edges = histogram(a=list(output.TM_index), bins=X_bins)
# output.GRAVY.plot.hist(bins=50,
# color='#607c8e')
# ax.plot(X_bins[:-1]+width/2, hist, c='k', lw='2')
ax.bar(bin_edges[:-1],
hist,
align='edge',
alpha=0.4, width=width,
color=EC_descriptions()[str(EC)][1],
edgecolor='k',
label=EC_descriptions()[str(EC)][0])
# melting temperature index specific visuals
TM_cutoff = (0, 1)
ax.axvspan(xmin=TM_cutoff[0], xmax=TM_cutoff[1], facecolor='grey',
alpha=0.2)
# catalase_TMI = 1.22
# ax.axvline(x=catalase_TMI, c='r', alpha=1.0)
# urease_TMI = 0.62
# ax.axvline(x=urease_TMI, c='b', alpha=1.0)
dist_plot(fig, ax, name='TMindex', xlim=(-5, 5),
xtitle='thermostability index', plot_suffix=plot_suffix)
def dist_pI(output, plot_suffix, EC):
"""
Plot distribution of protein pI for all sequences in FASTA file.
"""
fig, ax = plt.subplots(figsize=(8, 5))
width = 0.5
X_bins = arange(0, 14.1, width)
hist, bin_edges = histogram(a=list(output.pI), bins=X_bins)
# output.GRAVY.plot.hist(bins=50,
# color='#607c8e')
ax.bar(bin_edges[:-1],
hist,
align='edge',
alpha=0.4, width=width,
color=EC_descriptions()[str(EC)][1],
edgecolor='k',
label=EC_descriptions()[str(EC)][0])
# ax.plot(X_bins[:-1]+width/2, hist, c='k', lw='2')
dist_plot(fig, ax, name='pI', xlim=(0, 14),
xtitle='pI', plot_suffix=plot_suffix)
def dist_GRAVY(output, plot_suffix, EC):
"""
Plot distribution of protein GRAVY for all sequences in FASTA file.
"""
fig, ax = plt.subplots(figsize=(8, 5))
width = 0.05
X_bins = arange(-2, 2.2, width)
hist, bin_edges = histogram(a=list(output.GRAVY), bins=X_bins)
# output.GRAVY.plot.hist(bins=50,
# color='#607c8e')
ax.bar(bin_edges[:-1],
hist,
align='edge',
alpha=0.4, width=width,
color=EC_descriptions()[str(EC)][1],
edgecolor='k',
label=EC_descriptions()[str(EC)][0])
# ax.plot(X_bins[:-1]+width/2, hist, c='k', lw='2')
# GRAVY specific visuals
# ax.text(-1.45, 40, 'hydrophilic', fontsize=16)
# get ylim
ylim = ax.get_ylim()
ax.text(
0.55, max(ylim)/2 + 0.05*(max(ylim)/2),
'hydrophobic', fontsize=16
)
ax.arrow(
0.5, max(ylim)/2, 0.7, 0,
head_width=0.05*(max(ylim)/2), head_length=0.1, fc='k', ec='k'
)
# avg_GRAVY = -0.4
# ax.axvline(x=avg_GRAVY, c='grey', alpha=1.0, linestyle='--')
# catalase_GRAVY = -0.605
# ax.axvline(x=catalase_GRAVY, c='r', alpha=1.0)
# urease_GRAVY = -0.1524
# ax.axvline(x=urease_GRAVY, c='b', alpha=1.0)
dist_plot(fig, ax, name='GRAVY', xlim=(-1.5, 1.5),
xtitle='GRAVY', plot_suffix=plot_suffix)
def all_EC_violin_plot():
"""Do violin plots of all properties for all EC output files.
"""
properties = ['I_index', 'A_index', 'TM_index', 'pI', 'GRAVY']
prop_label = ['instability index', 'aliphatic index',
'TM index', 'pI', 'GRAVY']
prop_lim = [(0, 100), (0, 150), (-5, 5), (0, 14), (-1.5, 1.5)]
ECs = ['1', '2', '3', '4', '5', '6']
output_files = [i+'__BRENDA_sequences_output.csv' for i in ECs]
for i, prop in enumerate(properties):
print('doing', prop, '....')
fig, ax = | |
- is for electrons
# - Make a copy of energy so that the original dest['energy']
# does not change
energy = f_out['energy'].copy()
if scpot is not None:
# sign = -1 if dist.attrs['species'] == 'e' else 1
sign = -1
energy += (sign * J2eV * e * scpot)
# Low energy integration limit
# - Exclude data below the low-energy limit
# - xr.DataArray.integrate does not avoid NaNs
# - Fill with 0.0 because at front of array and trapezoidal integration
# results in zero area.
if E_low is not None:
mask = energy >= E_low
energy = energy.where(mask, 0.0)
f_out = f_out.where(mask, 0.0)
if E_high is not None:
mask = energy <= E_high
energy = energy.where(mask, 0.0)
f_out = f_out.where(mask, 0.0)
# Exclude measurements from below the spacecraft potential
# - Same reasoning as for low-energy integration limit
if scpot is not None:
mask = energy >= 0
energy = energy.where(mask, 0.0)
f_out = f_out.where(mask, 0.0)
if low_energy_extrapolation:
# Create boundary points for the energy at 0 and infinity, essentially
# extrapolating the distribution to physical limits. Since absolute
# zero and infinite energies are impossible, set the values in the
# distribution to zero at those points. This changes the order of the
# dimensions so they will have to be transposed back.
f_energy = xr.DataArray(np.zeros((1,)),
dims='energy_index',
coords={'energy': ('energy_index', [0,])})
# Append the extrapolated points to the distribution
f_out = xr.concat([f_energy, f_out], 'energy_index')
# Append the
e0 = xr.DataArray(np.zeros((1,)),
dims='energy_index',
coords={'energy': ('energy_index', [0,])})
energy = xr.concat([e0, energy], dim='energy_index')
if high_energy_extrapolation:
# Create boundary points for the energy infinity, essentially
# extrapolating the distribution to physical limits. Since
# infinite energies are impossible, set the values in the
# distribution to zero at those points. This changes the order of the
# dimensions so they will have to be transposed back.
f_energy = xr.DataArray(np.zeros((1,)),
dims='energy_index',
coords={'energy': ('energy_index', [np.inf,])})
# Append the extrapolated points to the distribution
f_out = xr.concat([f_out, f_energy], 'energy_index')
# Append the
einf = xr.DataArray(np.array([np.inf]),
dims='energy_index',
coords={'energy': ('energy_index', [np.inf,])}
)
energy = xr.concat([energy, einf], dim='energy_index')
# Rearrange dimensions
# - Several functions depend on dimensions being ordered
# [time, phi, theta, energy], or, at the very least,
# having time as the leading dimension (for iterating)
try:
f_out = f_out.transpose('time', 'phi', 'theta', 'energy_index')
except ValueError:
f_out = f_out.transpose('time', 'phi_index', 'theta', 'energy_index')
energy = energy.transpose('time', ...)
# Energy extrapolation
# - Map the energy to range [0, 1]
U = energy / (energy + E0)
U = U.where(np.isfinite(U), 1)
U = U.drop_vars('energy')
# Assign new coordinates
f_out = f_out.assign_coords(phi=np.deg2rad(f_out['phi']),
theta=np.deg2rad(f_out['theta']),
energy=energy,
U=U)
# Include metadata
f_out.attrs = dist.attrs
f_out.attrs['Energy_e0'] = E0
f_out.attrs['Lower_energy_integration_limit'] = E_low
f_out.attrs['Upper_energy_integration_limit'] = None
return f_out
def precond_params(sc, mode, level, optdesc,
start_date, end_date,
time=None):
'''
Gather parameters and data required to precondition the distribution
functions. Parameters are gathered from global attributes of the
corresponding FPI moments files and from the EDP spacecraft potential.
Parameters
----------
sc : str
Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4')
mode : str
Instrument mode: ('fast', 'brst').
level : str
Data quality level.
optdesc : str
Optional descriptor: ('dis-dist' | 'des-dist')
start_date, end_date : `datetime.datetime`
Start and end of the data interval.
time : `xarray.DataArray`
Time that the spacecraft potential will be interpolated to
Returns
-------
precond_kwargs : dict
Keywords accepted by the *precondition* function
'''
# Get the moments files
sdc = api.MrMMS_SDC_API(sc, 'fpi', mode, level,
optdesc=optdesc[0:3]+'-moms',
start_date=start_date, end_date=end_date)
files = sdc.download()
# Read the global attributes containing integration parameters
cdf = cdfread.CDF(files[0])
E0 = cdf.attget('Energy_E0', entry=0)['Data']
E_low = cdf.attget('Lower_energy_integration_limit', entry=0)['Data']
E_high = cdf.attget('Upper_energy_integration_limit', entry=0)['Data']
low_E_extrap = cdf.attget('Low_energy_extrapolation', entry=0)['Data']
high_E_extrap = cdf.attget('High_energy_extrapolation', entry=0)['Data']
regex = re.compile('([0-9]+.[0-9]+)')
try:
E_high = float(regex.match(E_high).group(1))
except AttributeError:
if E_high == 'highest energy step':
E_high = None
else:
AttributeError('Unable to parse high energy integration limit: '
'"{}"'.format(E_high))
# Get the spacecraft potential
edp_mode = mode if mode == 'brst' else 'fast'
scpot = edp.load_scpot(sc=sc, mode=edp_mode,
start_date=start_date, end_date=end_date)
if time is not None:
scpot = scpot['Vsc'].interp_like(time, method='nearest')
# Extract the data so that it is acceptable by precondition()
precond_kwargs = {'E0': float(regex.match(E0).group(1)),
'E_low': float(regex.match(E_low).group(1)),
'E_high': E_high,
'low_energy_extrapolation': True if (low_E_extrap == 'Enabled') else False,
'high_energy_extrapolation': True if (high_E_extrap == 'Enabled') else False,
'scpot': scpot}
return precond_kwargs
def species_to_mass(species):
'''
Return the mass (kg) of the given particle species.
Parameters
----------
species : str
Particle species: 'i' or 'e'
Returns
----------
mass : float
Mass of the given particle species
'''
if species == 'i':
mass = constants.m_p
elif species == 'e':
mass = constants.m_e
else:
raise ValueError(('Unknown species {}. Select "i" or "e".'
.format(species))
)
return mass
def density(dist):
'''
Calculate number density from a time series of 3D distribution function.
Parameters
----------
dist : `xarray.DataArray`
A time series of 3D distribution functions
Returns
-------
N : `xarray.DataArray`
Number density
'''
mass = species_to_mass(dist.attrs['species'])
if dist.attrs['mode'] == 'brst':
N = xr.concat([density_3D(f1, mass, dist.attrs['Energy_e0'])
for f1 in dist],
'time')
else:
N = density_4D(dist, mass, dist.attrs['Energy_e0'])
# Add metadata
N.name = 'N{}'.format(dist.attrs['species'])
N.attrs['long_name'] = ('Number density calculated by integrating the '
'distribution function.')
N.attrs['species'] = dist.attrs['species']
N.attrs['standard_name'] = 'number_density'
N.attrs['units'] = 'cm^-3'
return N
def entropy(dist):
'''
Calculate entropy from a time series of 3D velocity space
distribution function.
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>. (2019). Decomposition of
plasma kinetic entropy into position and velocity space and the
use of kinetic entropy in particle-in-cell simulations. Physics
of Plasmas, 26(8), 82903. https://doi.org/10.1063/1.5098888
Parameters
----------
f : `xarray.DataArray`
A time series of 3D distribution functions
Returns
-------
S : `xarray.DataArray`
Entropy
'''
mass = species_to_mass(dist.attrs['species'])
if dist.attrs['mode'] == 'brst':
S = xr.concat([entropy_3D(f1, mass, dist.attrs['Energy_e0'])
for f1 in dist],
'time')
else:
S = entropy_4D(dist, mass, dist.attrs['Energy_e0'])
S.name = 'S{}'.format(dist.attrs['species'])
S.attrs['long_name'] = 'Velocity space entropy density'
S.attrs['standard_name'] = 'entropy_density'
S.attrs['units'] = 'J/K/m^3 ln(s^3/m^6)'
return S
def epsilon(dist, dist_max=None, N=None, V=None, T=None):
'''
Calculate epsilon [1]_ from a time series of 3D velocity space
distribution functions.
.. [1] <NAME>., <NAME>., <NAME>., &
<NAME>. (2012). Inhomogeneous kinetic effects related
to intermittent magnetic discontinuities. Phys. Rev. E,
86(6), 66405. https://doi.org/10.1103/PhysRevE.86.066405
Parameters
----------
dist : `xarray.DataArray`
A time series of 3D distribution functions
dist_max : `xarray.DataArray`
The maxwellian equivalent of `dist`. If not provided,
it is calculated
N : `xarray.DataArray`
Number density computed from `dist`. If not provided,
it is calculated
V : `xarray.DataArray`
Bulk velocity computed from `dist`. If not provided,
it is calculated
T : `xarray.DataArray`
Scalar temperature computed from `dist`. If not provided,
it is calculated
Returns
-------
e : `xarray.DataArray`
Epsilon parameter
'''
mass = species_to_mass(dist.attrs['species'])
if N is None:
N = density(dist)
if dist_max is None:
if V is None:
V = velocity(dist, N=N)
if T is None:
T = temperature(dist, N=N, V=V)
T = (T[:,0,0] + T[:,1,1] + T[:,2,2]) / 3.0
dist_max = maxwellian_distribution(dist, N, V, T)
if dist.attrs['mode'] == 'brst':
e = xr.concat([epsilon_3D(f1, mass, dist.attrs['Energy_e0'], f1_max, n1)
for f1, f1_max, n1 in zip(dist, dist_max, N)],
'time')
else:
e = epsilon_4D(dist, mass, dist.attrs['Energy_e0'], dist_max, N)
e.name = 'Epsilon{}'.format(dist.attrs['species'])
e.attrs['long_name'] = 'Non-maxwellian'
e.attrs['standard_name'] = 'epsilon'
e.attrs['units'] = '$(s/cm)^{3/2}$'
return e
def information_loss(dist_max, dist, N=None, T=None):
'''
Calculate entropy from a time series of 3D velocity space
distribution function.
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., Drake,
<NAME>., <NAME>., … <NAME>. (2019). Decomposition of
plasma kinetic entropy into position and velocity space and the
use of kinetic entropy in particle-in-cell simulations. Physics
of Plasmas, 26(8), 82903. https://doi.org/10.1063/1.5098888
Parameters
----------
dist : `xarray.DataArray`
A time series of 3D distribution functions
N : `xarray.DataArray`
Number density computed from `dist`
s : `xarray.DataArray`
Entropy density computed from `dist`
Returns
-------
| |
init fails
# Define a task for testing
class FailInit(PipelineTask):
def init(self,a,b,c='hello',d=13,e=None):
raise Exception("Forced init to fail")
def setup(self):
result = "a=%s b=%s c=%s d=%s e=%s" \
% (self.args.a,
self.args.b,
self.args.c,
self.args.d,
self.args.e)
self.output.results.append(result)
self.assertRaises(PipelineError,
FailInit,
"This will fail on init",
"a",
"b")
def test_pipelinetask_requirements(self):
"""
PipelineTask: check task requirements
"""
# Define task for testing
class AppendTask(PipelineTask):
def init(self,*inputs):
self.add_output('result',list())
def setup(self):
for x in self.args.inputs:
self.output.results.append(x)
# Instantiate tasks
t1 = AppendTask("Task1",1,2)
t2 = AppendTask("Task2",3,4)
t3 = AppendTask("Task3",5,6)
# Check requirements on all tasks
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[])
self.assertEqual(t3.required_task_ids,[])
# Make second task depend on first
t2.requires(t1)
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,[])
# Make third task depend on first and second
t3.requires(t1,t2)
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,
sorted([t2.id(),t1.id()]))
def test_pipelinetask_requirements_as_ids(self):
"""
PipelineTask: check task requirements supplied as IDs
"""
# Define task for testing
class AppendTask(PipelineTask):
def init(self,*inputs):
self.add_output('result',list())
def setup(self):
for x in self.args.inputs:
self.output.results.append(x)
# Instantiate tasks
t1 = AppendTask("Task1",1,2)
t2 = AppendTask("Task2",3,4)
t3 = AppendTask("Task3",5,6)
# Check requirements on all tasks
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[])
self.assertEqual(t3.required_task_ids,[])
# Make second task depend on first
t2.requires_id(t1.id())
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,[])
# Make third task depend on first and second
t3.requires_id(t1.id())
t3.requires_id(t2.id())
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,
sorted([t2.id(),t1.id()]))
def test_pipelinetask_required_by(self):
"""
PipelineTask: check tasks required by others
"""
# Define task for testing
class AppendTask(PipelineTask):
def init(self,*inputs):
self.add_output('result',list())
def setup(self):
for x in self.args.inputs:
self.output.results.append(x)
# Instantiate tasks
t1 = AppendTask("Task1",1,2)
t2 = AppendTask("Task2",3,4)
t3 = AppendTask("Task3",5,6)
# Check requirements on all tasks
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[])
self.assertEqual(t3.required_task_ids,[])
# Make second and third task depend on first
t1.required_by(t2,t3)
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,[t1.id()])
# Make third task depend on second
t2.required_by(t3)
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,
sorted([t2.id(),t1.id()]))
def test_pipelinetask_implied_requirement_from_input_param(self):
"""
PipelineTask: check implied task requirements from inputs
"""
# Define task for testing
class AppendTask(PipelineTask):
def init(self,*inputs,**kws):
self.add_output('result',PipelineParam(type=list()))
def setup(self):
for x in self.args.inputs:
self.output.results.value.append(x)
# Instantiate tasks
t1 = AppendTask("Task1",1,2)
t2 = AppendTask("Task2",t1.output.result,4)
t3 = AppendTask("Task3",t1.output.result,extras=t2.output.result)
# Check requirements on both tasks
self.assertEqual(t1.required_task_ids,[])
self.assertEqual(t2.required_task_ids,[t1.id()])
self.assertEqual(t3.required_task_ids,
sorted([t2.id(),t1.id()]))
def test_pipelinetask_raise_exception_for_non_task_requirement(self):
"""
PipelineTask: raise exception if requirement is not a task
"""
# Define stask for testing
class AppendTask(PipelineTask):
def init(self,*inputs):
self.add_output('result',list())
def setup(self):
for x in self.args.inputs:
self.output.results.append(x)
# Instantiate task
t1 = AppendTask(1,2)
# Check initial requirements
self.assertEqual(t1.required_task_ids,[])
# Raise exception by trying to adding a non-task
# object as a requirement
self.assertRaises(PipelineError,
t1.requires,
"not_a_task")
def test_pipelinetask_no_commands(self):
"""
PipelineTask: run task with no commands
"""
# Define a task with no commands
class Add(PipelineTask):
def init(self,x,y):
self.add_output('result',list())
def setup(self):
self.output.result.append(self.args.x+self.args.y)
# Make a task instance
task = Add("Add two numbers",1,2)
# Check initial state
self.assertEqual(task.args.x,1)
self.assertEqual(task.args.y,2)
self.assertFalse(task.completed)
self.assertEqual(task.exit_code,None)
self.assertEqual(task.output.result,[])
# Run the task
task.run(sched=self.sched,
working_dir=self.working_dir,
asynchronous=False)
# Check final state
self.assertTrue(task.completed)
self.assertEqual(task.exit_code,0)
self.assertEqual(task.output.result,[3])
self.assertEqual(task.stdout,"")
def test_pipelinetask_with_commands(self):
"""
PipelineTask: run task with shell command
"""
# Define a task with a command
# Echoes text via shell command
class Echo(PipelineTask):
def init(self,s):
pass
def setup(self):
self.add_cmd(
PipelineCommandWrapper(
"Echo text","echo",self.args.s))
# Make a task instance
task = Echo("Echo string","Hello!")
# Check initial state
self.assertEqual(task.args.s,"Hello!")
self.assertFalse(task.completed)
self.assertEqual(task.exit_code,None)
self.assertFalse(task.output)
# Run the task
task.run(sched=self.sched,
working_dir=self.working_dir,
asynchronous=False)
# Check final state
self.assertTrue(task.completed)
self.assertEqual(task.exit_code,0)
self.assertFalse(task.output)
# Check stdout
# Should look like:
# #### COMMAND Echo text
# #### HOSTNAME popov
# #### USER pjb
# #### START Thu Aug 17 08:38:14 BST 2017
# #### CWD /tmp/dir
# Hello!
# #### END Thu Aug 17 08:38:14 BST 2017
# #### EXIT_CODE 0
stdout = task.stdout.split("\n")
self.assertEqual(len(stdout),9) # 9 = 8 + trailing newline
self.assertEqual(stdout[0],"#### COMMAND Echo text")
self.assertEqual(stdout[1],"#### HOSTNAME %s" % self._hostname())
self.assertEqual(stdout[2],"#### USER %s" % self._user())
self.assertTrue(stdout[3].startswith("#### START "))
self.assertEqual(stdout[4],"#### CWD %s" % self.working_dir)
self.assertEqual(stdout[5],"Hello!")
self.assertTrue(stdout[6].startswith("#### END "))
self.assertEqual(stdout[7],"#### EXIT_CODE 0")
def test_pipelinetask_with_multiple_commands(self):
"""
PipelineTask: run task with multiple shell commands
"""
# Define a task with a command
# Echoes text via shell command
class EchoMany(PipelineTask):
def init(self,*s):
pass
def setup(self):
for s in self.args.s:
self.add_cmd(
PipelineCommandWrapper(
"Echo text","echo",s))
# Make a task instance
task = EchoMany("Echo string","Hello!","Goodbye!")
# Check initial state
self.assertEqual(task.args.s,("Hello!","Goodbye!"))
self.assertFalse(task.completed)
self.assertEqual(task.exit_code,None)
self.assertFalse(task.output)
# Run the task
task.run(sched=self.sched,
working_dir=self.working_dir,
asynchronous=False)
# Check final state
self.assertTrue(task.completed)
self.assertEqual(task.exit_code,0)
self.assertFalse(task.output)
# Check stdout
# Should look like:
# #### COMMAND Echo text
# #### HOSTNAME popov
# #### USER pjb
# #### START Thu Aug 17 08:38:14 BST 2017
# #### CWD /tmp/dir
# Hello!
# #### END Thu Aug 17 08:38:14 BST 2017
# #### EXIT_CODE 0
# #### COMMAND Echo text
# #### HOSTNAME popov
# #### USER pjb
# #### START Thu Aug 17 08:38:14 BST 2017
# #### CWD /tmp/dir
# Goodbye!
# #### END Thu Aug 17 08:38:14 BST 2017
# #### EXIT_CODE 0
stdout = task.stdout.split("\n")
self.assertEqual(len(stdout),17) # 17 = 16 + trailing newline
self.assertEqual(stdout[0],"#### COMMAND Echo text")
self.assertEqual(stdout[1],"#### HOSTNAME %s" % self._hostname())
self.assertEqual(stdout[2],"#### USER %s" % self._user())
self.assertTrue(stdout[3].startswith("#### START "))
self.assertEqual(stdout[4],"#### CWD %s" % self.working_dir)
self.assertEqual(stdout[5],"Hello!")
self.assertTrue(stdout[6].startswith("#### END "))
self.assertEqual(stdout[7],"#### EXIT_CODE 0")
self.assertEqual(stdout[8],"#### COMMAND Echo text")
self.assertEqual(stdout[9],"#### HOSTNAME %s" % self._hostname())
self.assertEqual(stdout[10],"#### USER %s" % self._user())
self.assertTrue(stdout[11].startswith("#### START "))
self.assertEqual(stdout[12],"#### CWD %s" % self.working_dir)
self.assertEqual(stdout[13],"Goodbye!")
self.assertTrue(stdout[14].startswith("#### END "))
self.assertEqual(stdout[15],"#### EXIT_CODE 0")
def test_pipelinetask_with_batched_commands(self):
"""
PipelineTask: run task with batched shell commands
"""
# Define a task with a command
# Echoes text via shell command
class EchoMany(PipelineTask):
def init(self,*s):
pass
def setup(self):
for s in self.args.s:
self.add_cmd(
PipelineCommandWrapper(
"Echo text","echo",s))
# Make a task instance
task = EchoMany("Echo string",
"Hello!",
"Bonjour!",
"Takk!",
"Wilkommen!",
"Benvenuto!")
# Check initial state
self.assertEqual(task.args.s,
("Hello!",
"Bonjour!",
"Takk!",
"Wilkommen!",
"Benvenuto!"))
self.assertFalse(task.completed)
self.assertEqual(task.exit_code,None)
self.assertFalse(task.output)
# Run the task with batches
task.run(sched=self.sched,
working_dir=self.working_dir,
batch_size=2,
asynchronous=False)
# Check final state
self.assertTrue(task.completed)
self.assertEqual(task.exit_code,0)
self.assertFalse(task.output)
# Check stdout
# Should look like:
# #### COMMAND Batch commands for Echo string
# #### BATCH 1
# #### HOSTNAME popov
# #### USER pjb
# #### START Thu Aug 17 08:38:14 BST 2017
# #### CWD /tmp/dir
# Hello!
# Bonjour!
# #### END Thu Aug 17 08:38:14 BST 2017
# #### EXIT_CODE 0
# #### COMMAND Batch commands for Echo string
# #### BATCH 2
# #### HOSTNAME popov
# #### USER pjb
# #### START Thu Aug 17 08:38:14 BST 2017
# #### CWD /tmp/dir
# Takk!
# Wilkommen!
# #### END Thu Aug 17 08:38:14 BST 2017
# #### EXIT_CODE 0
# #### COMMAND Batch commands for Echo string
# #### BATCH 3
# #### HOSTNAME popov
# #### USER pjb
# #### START Thu Aug 17 08:38:14 BST 2017
# #### CWD /tmp/dir
# Benvenuto!
# #### END Thu Aug 17 08:38:14 BST 2017
# #### EXIT_CODE 0
stdout = task.stdout.split("\n")
self.assertEqual(len(stdout),30) # 30 = 29 + trailing newline
self.assertEqual(stdout[0],"#### COMMAND Batch commands for Echo "
"string")
self.assertEqual(stdout[1],"#### BATCH 1")
self.assertEqual(stdout[2],"#### HOSTNAME %s" % self._hostname())
self.assertEqual(stdout[3],"#### USER %s" % self._user())
self.assertTrue(stdout[4].startswith("#### START "))
self.assertEqual(stdout[5],"#### CWD %s" % self.working_dir)
self.assertEqual(stdout[6],"Hello!")
self.assertEqual(stdout[7],"Bonjour!")
self.assertTrue(stdout[8].startswith("#### END "))
self.assertEqual(stdout[9],"#### EXIT_CODE 0")
self.assertEqual(stdout[10],"#### COMMAND Batch commands for Echo "
"string")
self.assertEqual(stdout[11],"#### BATCH 2")
self.assertEqual(stdout[12],"#### HOSTNAME %s" % self._hostname())
self.assertEqual(stdout[13],"#### USER %s" % self._user())
self.assertTrue(stdout[14].startswith("#### START "))
self.assertEqual(stdout[15],"#### CWD %s" % self.working_dir)
self.assertEqual(stdout[16],"Takk!")
self.assertEqual(stdout[17],"Wilkommen!")
self.assertTrue(stdout[18].startswith("#### END "))
self.assertEqual(stdout[19],"#### EXIT_CODE 0")
self.assertEqual(stdout[20],"#### COMMAND Batch commands for Echo "
"string")
self.assertEqual(stdout[21],"#### BATCH 3")
self.assertEqual(stdout[22],"#### HOSTNAME %s" % self._hostname())
self.assertEqual(stdout[23],"#### USER %s" % self._user())
self.assertTrue(stdout[24].startswith("#### START "))
self.assertEqual(stdout[25],"#### CWD %s" % self.working_dir)
self.assertEqual(stdout[26],"Benvenuto!")
self.assertTrue(stdout[27].startswith("#### END "))
self.assertEqual(stdout[28],"#### EXIT_CODE 0")
def test_pipelinetask_with_batched_functions(self):
"""
PipelineTask: run task with batched functions
"""
# Define a task with a command
# Echoes text via Python function
class EchoMany(PipelineFunctionTask):
def init(self,*s):
pass
def setup(self):
for s in self.args.s:
self.add_call("Echo text",self.echo,s)
def echo(self,s):
print(s)
# Make a task instance
task = EchoMany("Echo string",
"Hello!",
"Bonjour!",
"Takk!",
"Wilkommen!",
"Benvenuto!")
# Check initial state
self.assertEqual(task.args.s,
("Hello!",
"Bonjour!",
"Takk!",
"Wilkommen!",
"Benvenuto!"))
self.assertFalse(task.completed)
self.assertEqual(task.exit_code,None)
self.assertFalse(task.output)
# Run the task with batches
task.run(sched=self.sched,
working_dir=self.working_dir,
batch_size=2,
asynchronous=False)
# Check final state
self.assertTrue(task.completed)
self.assertEqual(task.exit_code,0)
self.assertFalse(task.output)
# Check stdout
# Should look like:
# #### COMMAND Batch commands for Echo string
# #### BATCH 1
# #### HOSTNAME popov
# #### USER pjb
# | |
import torch
import math
import numpy as np
from matplotlib import path
import pdb
class BoxSampler(object):
def __init__(self,
RoI_number=1,
IoU_bin_bases=torch.tensor([0.73,0.12,0.15,0.05,0], dtype=torch.float),
IoU_weights=torch.tensor([0.5,0.6,0.7,0.8,0.9], dtype=torch.float),
IoU_limit_precision=1e-5):
super(BoxSampler,self).__init__()
'''
INPUTS:
RoI_number : Number of RoIs/boxes to generate
IoU_bin_bases : N dimensional tensor storing the lower bounds for the bins.
Ex.[0.5, 0.6, 0.7, 0.8, 0.9] then there are 5 bins from [0.5,0.6] to [0.9, 1.0]
IoU_weights: N dimensional tensor storing the weights of the bins.
IoU_limit_precision: While drawing the limits for an IoU (e.g. see Fig.2 red curves),
it show the precision of the points. This is the part that makes the
algorithm a bit slower and needs an improvement.
'''
self.RoI_number=RoI_number
self.IoU_bin_bases=IoU_bin_bases
self.IoU_weights=IoU_weights
self.IoU_limit_precision=IoU_limit_precision
self.IoU_bin_tops=torch.cat([IoU_bin_bases[1:], torch.tensor([1.])])
self.bin_width=self.IoU_bin_tops-self.IoU_bin_bases
# We assume that self.reference_box is a square. Following coordinates are preferred
# since even the IoU=0.5, the limits will be always positive (see Fig.2 or Fig.6 in the paper).
self.reference_box=[0.3, 0.3, 0.6, 0.6]
def isnan(self,x):
return x != x
def sample_single(self, B, IoUs, imgSize):
'''
Samples a set of bounding boxes for a given input BB.
INPUTS:
B : Input BB (i.e. B in Alg.1 in the paper) Mx4 dimensional tensor.
A BB is represented by [TL_x, TL_y, BR_x, BR_y]
IoUs : Set of IoU thresholds. T in Alg.1. A box is generated for each IoU.
imgSize : [width, height] of the image. Ensures that the generated box is in the image.
'''
#Normalize the input box such that it is shifted/scaled on the reference box
#that resides at [0.3, 0.3, 0.6, 0.6]. Save scale and shift, for renormalization
#before returning. All operations are conducted within [0, 1] range. Hence we do not,
#normalize image, we normalize the boxes owing to Theorems 1 and 2 in the paper.
inputBox, scale, shift=self.normalize(B.clone().detach().unsqueeze(0))
#BoundingBoxGenerator is doing exactly what Alg.1 in the paper achieves.
#Given a GT/input BB and and IoU, it generates the boxes with the desired IoU.
#To make it more efficient, it generates sample_count boxes for
#a GT at once.
sample_count =IoUs.shape[0]
sampledBoxSet=self.BoundingBoxGenerator(inputBox.squeeze(), IoUs, sample_count)
#Given the generated boxes from a BB, now we map the generated boxes to the image by reshifting and rescaling.
sampledBoxSet=self.unnormalize(sampledBoxSet, scale[0], shift[0])
#Clamp the boxes from 0 and imgSize to ensure that they are in the image.
sampledBoxSet[:,[0,2]]=torch.clamp(sampledBoxSet[:,[0,2]], 0, imgSize[0])
sampledBoxSet[:,[1,3]]=torch.clamp(sampledBoxSet[:,[1,3]], 0, imgSize[1])
#Compute the bbox overlaps of the generated boxes.
generated_box_overlaps=self.computeBoxToBoxIoU(B.expand(sample_count,5)[:,:4], sampledBoxSet).squeeze()
return sampledBoxSet, generated_box_overlaps
def sample(self, inputBoxSet, imgSize):
'''
INPUTS:
inputBoxSet : Input BBs (i.e. ground truths-GTs in Alg.2 in the paper)
Mx5 dimensional tensor.
Each box is represented by [TL_x, TL_y, BR_x, BR_y, gt_label]
imgSize : [width, height] of an image
'''
#Normalize the input boxes such that all are shifted/scaled on the reference box
#that resides at [0.3, 0.3, 0.6, 0.6]. Save scales and shifts, for renormalization
#before returning. All operations are conducted within [0, 1] range. Hence we do not,
#normalize image, we normalize the boxes owing to Theorems 1 and 2.
inputBoxSet, scales, shifts=self.normalize(inputBoxSet)
boxNumber=inputBoxSet.size()[0]
#Annotations of the datasets may be incorrect especially for small objects.
#In some cases TL_x=BR_x (same for y). If there is such kind of very rare examples,
#then we catch the error here, and discard the corrupted annotation.
validIndices=torch.cuda.ByteTensor(boxNumber).fill_(1)
flag=0
for i in range(boxNumber):
if self.isnan(inputBoxSet[i,0]) or self.isnan(inputBoxSet[i,1]):
validIndices[i]=0
flag=1
if flag==1:
inputBoxSet = inputBoxSet[validIndices,:]
scales = scales[validIndices,:]
shifts = shifts[validIndices,:]
boxNumber=inputBoxSet.size()[0]
# InstanceAllocation determines:
# 1-perInputAllocation: Number of boxes to be generated for each gt. So, it is a boxNumber sized tensor.
# 2-positiveRoI_number: In some cases, number of boxes can be 1 or 2 more. So we keep the number of returned boxes.
# The sum of perInputAllocation should also provide this number.
# 3-inputBoxSetExtended: positiveRoI_numberx5 dimensional array for gts. Basically, each BB in inputBoxSet is
# duplicated for perInputAllocation[i] times. We use this info to validate/return the IoUs of
# generated boxes on computeBoxToBoxIoU function.
perInputAllocation, positiveRoI_number, inputBoxSetExtended =self.InstanceAllocation(inputBoxSet)
# Another question is the IoU distribution over the boxes. Having estimated the number of generated boxes
# for each GT, IoUAllocation assigns an IoU using the desired distribution (i.e. self.IoU_weights) for each box.
IoUSet=self.IoUAllocation(inputBoxSetExtended,positiveRoI_number)
#Initialize the necessary data structures to be returned
sampledBoxSet=torch.cuda.FloatTensor(positiveRoI_number,4).fill_(-1)
gt_inds=torch.cuda.LongTensor(positiveRoI_number).fill_(0)
indexPointer=0
for i in range(boxNumber):
#BoundingBoxGenerator is doing exactly what Alg.1 in the paper achieves.
#Given a GT and and IoU, it generates the boxes with the desired IoU.
#To make it more efficient, it generates perInputAllocation[i] boxes for
#a GT at once.
sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:]=self.BoundingBoxGenerator(inputBoxSet[i,:],\
IoUSet[indexPointer:indexPointer+perInputAllocation[i]],\
perInputAllocation[i])
#Given the generated boxes from a GT (also GT), now we map the generated boxes to the image by reshifting and rescaling.
sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:]=self.unnormalize(sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:], scales[i], shifts[i])
inputBoxSetExtended[indexPointer:indexPointer+perInputAllocation[i],:4] = self.unnormalize(inputBoxSetExtended[indexPointer:indexPointer+perInputAllocation[i],:4], scales[i], shifts[i])
#In mmdetection, the association between the boxes are tracked, hence we store the mapping.
gt_inds[indexPointer:indexPointer+perInputAllocation[i]]=i+1
#Update indexpointer to show next empty cell.
indexPointer+=perInputAllocation[i]
#Clamp the boxes from 0 and imgSize to ensure that they are in the image.
sampledBoxSet[:,[0,2]]=torch.clamp(sampledBoxSet[:,[0,2]], 0, imgSize[0])
sampledBoxSet[:,[1,3]]=torch.clamp(sampledBoxSet[:,[1,3]], 0, imgSize[1])
#Compute the bbox overlaps of the generated boxes.
generated_box_overlaps=self.computeBoxToBoxIoU(inputBoxSetExtended[:,:4],sampledBoxSet).squeeze()
return sampledBoxSet, inputBoxSetExtended[:,-1].type(torch.cuda.LongTensor),generated_box_overlaps,gt_inds
def normalize(self, boxes):
#Compute shifts
shifts = boxes[:,[0,1]]
#Compute scales
scales = (torch.cat(((boxes[:,2]-boxes[:,0]).unsqueeze(1), (boxes[:,3]-boxes[:,1]).unsqueeze(1)),1))/(self.reference_box[2]-self.reference_box[0])
#All the boxes are normalized to reference box.
#One can safely following two lines by assigning the boxes[:,:4] to reference box.
boxes[:,[0,2]]=(boxes[:,[0,2]]-shifts[:,0].unsqueeze(1))/scales[:,0].unsqueeze(1)+self.reference_box[0]
boxes[:,[1,3]]=(boxes[:,[1,3]]-shifts[:,1].unsqueeze(1))/scales[:,1].unsqueeze(1)+self.reference_box[1]
return boxes, scales, shifts
def unnormalize(self, boxes,scales,shifts):
#self.reference_box[1] will work also, for different reference boxes please correct here.
boxes[:,:4]-=self.reference_box[0]
#Map the normalized boxes to the image coordinates
boxes[:,[0,2]]=boxes[:,[0,2]]*scales[0]+shifts[0]
boxes[:,[1,3]]=boxes[:,[1,3]]*scales[1]+shifts[1]
return boxes
def InstanceAllocation(self,inputBoxSet):
#Determine the number of classes and ensure the sampling to be balanced over classes
#instead of the instances. Note that this idea originates from OFB sampling in the paper.
#Here BB generator generates class-balanced examples. Hence determine perClassAllocation
# in this manner.
classes=torch.unique(inputBoxSet[:,-1])
classNumber=classes.size()[0]
perClassAllocation=math.ceil(self.RoI_number/classNumber)
#Count the number of instances from each class
classIndices=torch.cuda.FloatTensor(classNumber,inputBoxSet.size()[0]).fill_(0)
for i in range(classNumber):
classIndices[i,:]=inputBoxSet[:,-1]==classes[i]
classCounts=torch.sum(classIndices,1)
#Distribute the perClassAllocation over instances of each class equally
perInstanceAllocation=torch.ceil(perClassAllocation/classCounts)
#count the total number of positive examples determined in this fashion
positiveRoI_number=torch.sum(classCounts*perInstanceAllocation).int()
extendedInputBoxSet=torch.cuda.FloatTensor(positiveRoI_number,5).fill_(0)
instanceNumber=inputBoxSet.size()[0]
indexTracker=0
perInputAllocation=torch.cuda.FloatTensor(inputBoxSet.size()[0]).fill_(0)
for i in range(instanceNumber):
index=classes==inputBoxSet[i,-1]
extendedInputBoxSet[indexTracker:indexTracker+perInstanceAllocation[index].int()]=inputBoxSet[i,:].expand(perInstanceAllocation[index].int(),5)
indexTracker+=perInstanceAllocation[index].int()
perInputAllocation[i]=perInstanceAllocation[index].int()
# if positiveRoI_number>self.RoI_number:
# delete_idx=torch.multinomial(perInstanceAllocation,positiveRoI_number-self.RoI_number,replacement=False)
# pdb.set_trace()
# delete_idx=torch.randint(positiveRoI_number, [positiveRoI_number-self.RoI_number])
return perInputAllocation.int(), positiveRoI_number.item(), extendedInputBoxSet
def IoUAllocation(self,inputBoxSet, positiveRoI_number):
#Determine the number of examples to be sampled from each bin
IoUIndices=torch.multinomial(self.IoU_weights,positiveRoI_number,replacement=True)
#Sample the exact IoUs consdiering the bin length and base of each bin
IoUSet=(self.IoU_bin_bases[IoUIndices]+torch.rand(positiveRoI_number)*self.bin_width[IoUIndices]).cuda()
#If IoU is larger than 0.95, then it can be problematic during sampling, so set it to 0.95 for stability.
IoUSet[IoUSet>0.95]=0.95
return IoUSet
def findBottomRightMaxBorders(self,inputBox, IoU, boxArea,proposedx1,proposedy1):
xA = torch.max(proposedx1, inputBox[0])#alpha
yA = torch.max(proposedy1, inputBox[1])
xB = inputBox[2]
yB = inputBox[3]
I=torch.clamp(xB - xA,min=0) * torch.clamp(yB - yA,min=0)
limitLeftX=IoU*boxArea+xA*IoU*(inputBox[3]-yA)+xA*(inputBox[3]-yA)-IoU*proposedx1*(inputBox[3]-proposedy1)
limitLeftX/=((IoU+1)*(inputBox[3]-yA)-IoU*(inputBox[3]-proposedy1))
limitRightX=(I/IoU-boxArea+I)/(inputBox[3]-proposedy1)
limitRightX+=proposedx1
limitTopY=IoU*boxArea+IoU*(inputBox[2]-xA)*yA+yA*(inputBox[2]-xA)-IoU*proposedy1*(inputBox[2]-proposedx1)
limitTopY/=((IoU+1)*(inputBox[2]-xA)-IoU*(inputBox[2]-proposedx1))
limitBottomY=(I/IoU-boxArea+I)/(inputBox[2]-proposedx1)
limitBottomY+=proposedy1
return limitLeftX,limitRightX,limitTopY,limitBottomY
def findBottomRightBorders(self,inputBox, IoU, boxArea,proposedx1,proposedy1,limitLeftX,limitRightX,limitTopY,limitBottomY):
xA = torch.max(proposedx1, inputBox[0])#alpha
yA = torch.max(proposedy1, inputBox[1])
xB = inputBox[2]
yB = inputBox[3]
I=torch.clamp(xB - xA,min=0) * torch.clamp(yB - yA,min=0)
y2TR=torch.arange(limitTopY, inputBox[3]+self.IoU_limit_precision, step=self.IoU_limit_precision).cuda()
yBnew = torch.min(y2TR, inputBox[3])
Inew=torch.clamp(xB - xA,min=0) * torch.clamp(yBnew - yA,min=0)
x2TR=(Inew/IoU-boxArea+Inew)/(y2TR-proposedy1)
x2TR+=proposedx1
x2BR=torch.arange(limitRightX, inputBox[2]-self.IoU_limit_precision, step=-self.IoU_limit_precision).cuda()
y2BR=(I/IoU-boxArea+I)/(x2BR-proposedx1)
y2BR+=proposedy1
y2BL=torch.arange(limitBottomY, inputBox[3]-self.IoU_limit_precision, step=-self.IoU_limit_precision).cuda()
yBnew = torch.min(y2BL, inputBox[3])
x2BL=IoU*boxArea+xA*IoU*(yBnew-yA)+xA*(yBnew-yA)-IoU*proposedx1*(y2BL-proposedy1)
x2BL/=((IoU+1)*(yBnew-yA)-IoU*(y2BL-proposedy1))
x2TL=torch.arange(limitLeftX, inputBox[2]+self.IoU_limit_precision, step=self.IoU_limit_precision).cuda()
xBnew = torch.min(x2TL, inputBox[2])
y2TL=IoU*boxArea+IoU*(xBnew-xA)*yA+yA*(xBnew-xA)-IoU*proposedy1*(x2TL-proposedx1)
y2TL/=((IoU+1)*(xBnew-xA)-IoU*(x2TL-proposedx1))
x2=torch.cat((x2TR,x2BR,x2BL,x2TL))
y2=torch.cat((y2TR,y2BR,y2BL,y2TL))
bottomRightBorders=torch.cat((x2.unsqueeze(1),1-y2.unsqueeze(1)),1)
return bottomRightBorders
def findTopLeftPointBorders(self,inputBox, IoU,boxArea):
#Top Left
y1TR=torch.arange((((inputBox[3]*(IoU-1))+ inputBox[1])/IoU), inputBox[1], step=self.IoU_limit_precision).cuda()
x1TR=inputBox[2]-(boxArea/(IoU*(inputBox[3]-y1TR)))
inv_idx = torch.arange(y1TR.size(0)-1, -1, -1).long()
y1TR = y1TR[inv_idx]
x1TR = x1TR[inv_idx]
#Top Right
x1BR=torch.arange(inputBox[0], inputBox[2]-IoU*(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
I=(inputBox[2]-x1BR)*(inputBox[3]-inputBox[1])
y1BR=inputBox[3]-(I/IoU-boxArea+I)/(inputBox[2]-x1BR)
#Top Left
y1BL=torch.arange(inputBox[1], inputBox[3]-(boxArea*IoU)/(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
x1BL=inputBox[2]-((boxArea*IoU)/((inputBox[3]-y1BL)))
#Top Right
y1TL=torch.arange(inputBox[1], inputBox[3]-(boxArea*IoU)/(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
I=(inputBox[2]-inputBox[0])*(inputBox[3]-y1TL)
x1TL=inputBox[2]-(I/IoU-boxArea+I)/(inputBox[3]-y1TL)
inv_idx = torch.arange(y1TL.size(0)-1, -1, -1).long()
y1TL = y1TL[inv_idx]
x1TL = x1TL[inv_idx]
x1=torch.cat((x1TR, x1BR,x1BL,x1TL))
y1=torch.cat((y1TR, y1BR,y1BL,y1TL))
P=torch.cat((x1.unsqueeze(1),1-y1.unsqueeze(1)),1)
return P
def BoundingBoxGenerator(self, inputBox, IoUSet, numBoxes):
sampledBox=torch.cuda.FloatTensor(numBoxes,4).fill_(-1)
boxArea=(inputBox[3]-inputBox[1])*(inputBox[2]-inputBox[0])
box=inputBox
for i in range(numBoxes):
#In order to prevent bias for a single corner, decide which corner to pick first
| |
beta=self.beta_psf)
# Define the Flux
psf = psf.withFlux(self.psf_flux)
# Define the shear
psf = psf.shear(g1=e1s[it], g2=e2s[it])
# Draw the PSF on a vignet
image_epsf = gs.ImageF(self.image_size, self.image_size)
# Define intrapixel shift (uniform distribution in [-0.5,0.5])
rand_shift = np.random.rand(2) - 0.5
psf.drawImage(image=image_epsf, offset=rand_shift,
scale=self.pix_scale)
# Generate Gaussian noise for the PSF
# sigma_noise = 0
# gaussian_noise = gs.GaussianNoise(sigma=sigma_noise)
# Before adding the noise, we measure the ellipticity components
my_moments = gs.hsm.FindAdaptiveMom(image_epsf)
new_e1_HSM[it] = my_moments.observed_shape.g1
new_e2_HSM[it] = my_moments.observed_shape.g2
new_sig_HSM[it] = my_moments.moments_sigma
# Add Gaussian noise to the PSF
# image_epsf.addNoise(gaussian_noise)
new_vignets[it, :, :] = image_epsf.array
new_masks = self.handle_SExtractor_mask(new_vignets, thresh=-1e5)
# Build the dictionary
train_dic = {'VIGNET_LIST': new_vignets,
'GLOB_POSITION_IMG_LIST': self.positions,
'MASK_LIST': new_masks, 'CCD_ID_LIST': self.ccd_list,
'TRUE_E1_HSM': new_e1_HSM, 'TRUE_E2_HSM': new_e2_HSM,
'TRUE_SIG_HSM': new_sig_HSM}
# Save the fits file
mccd.mccd_utils.save_fits(train_dic,
train_bool=True,
cat_id=self.catalog_id,
output_path=self.output_path)
if self.save_realisation:
# Save the exposure object realisation
cat_id_str = "%07d" % self.catalog_id
save_str = self.output_path + 'exposure_sim' + '-' + \
cat_id_str + '.npy'
np.save(save_str, self.exposure_sim)
def generate_test_data(self, grid_pos_bool=False, x_grid=5, y_grid=10):
r"""Generate the test dataset and save it into a fits file.
Parameters
----------
x_grid: int
Horizontal number of elements of the testing grid in one CCD.
y_grid: int
Vertical number of elements of the testing grid in one CCD.
"""
# Generate positions (on the grid or at random places)
if grid_pos_bool:
self.init_grid_positions(x_grid, y_grid)
else:
self.init_random_positions()
# Calculate the ellipticities on the testing positions
test_e1s, test_e2s, test_fwhms = self.exposure_sim.interpolate_values(
self.positions[:, 0], self.positions[:, 1])
# Verify the max fwhm variations
# We need to scale the variation range
test_fwhms = self.scale_fwhms(test_fwhms)
# Define the constant shape of the stars (before the shearing)
# sigma_vect = np.sqrt(test_r2s/2)
# test_fwhms = (2 * np.sqrt(2 * np.log(2))) * sigma_vect
# Generate the vignets
test_vignets = np.zeros(
(test_e1s.shape[0], self.image_size, self.image_size))
test_e1_HSM = np.zeros(test_e1s.shape)
test_e2_HSM = np.zeros(test_e1s.shape)
test_sig_HSM = np.zeros(test_e1s.shape)
for it in range(test_e1s.shape[0]):
# PSF generation. Define size
psf = gs.Moffat(fwhm=test_fwhms[it],
beta=self.beta_psf)
# Define the Flux
psf = psf.withFlux(self.psf_flux)
# Define the shear
psf = psf.shear(g1=test_e1s[it], g2=test_e2s[it])
# Draw the PSF on a vignet
image_epsf = gs.ImageF(self.image_size, self.image_size)
psf.drawImage(image=image_epsf, scale=self.pix_scale)
# Before adding the noise, we measure the ellipticity components
my_moments = gs.hsm.FindAdaptiveMom(image_epsf)
test_e1_HSM[it] = my_moments.observed_shape.g1
test_e2_HSM[it] = my_moments.observed_shape.g2
test_sig_HSM[it] = my_moments.moments_sigma
test_vignets[it, :, :] = image_epsf.array
# Build the masks
test_masks = self.handle_SExtractor_mask(test_vignets, thresh=-1e5)
# Build the dictionary
test_dic = {'VIGNET_LIST': test_vignets,
'GLOB_POSITION_IMG_LIST': self.positions,
'MASK_LIST': test_masks, 'CCD_ID_LIST': self.ccd_list,
'TRUE_E1_HSM': test_e1_HSM, 'TRUE_E2_HSM': test_e2_HSM,
'TRUE_SIG_HSM': test_sig_HSM}
# Save the fits file
mccd.mccd_utils.save_fits(test_dic,
train_bool=False,
cat_id=self.catalog_id,
output_path=self.output_path)
@staticmethod
def handle_SExtractor_mask(stars, thresh):
r"""Handle SExtractor masks.
Reads SExtracted star stamps, generates MCCD-compatible masks
(that is, binary weights), and replaces bad pixels with 0s - they will
not be used by MCCD, but the ridiculous numerical values can
otherwise still lead to problems because of convolutions.
"""
mask = np.ones(stars.shape)
mask[stars < thresh] = 0
stars[stars < thresh] = 0
return mask
class MomentInterpolator(object):
r"""Allow to interpolate moments from a bin image.
Bin image like the one from the MeanShapes function.
Notes
-----
Hard-coded for the CFIS convention!
"""
def __init__(
self,
moment_map,
n_neighbors=1000,
rbf_function='thin_plate',
loc2glob=None
):
r"""Initialize class attributes."""
# Save variables
if loc2glob is None:
self.loc2glob = mccd.mccd_utils.Loc2Glob()
else:
self.loc2glob = loc2glob
self.n_neighbors = n_neighbors
self.rbf_function = rbf_function
self.moment_map = np.zeros(moment_map.shape)
self.x_pix = self.loc2glob.x_npix
self.y_pix = self.loc2glob.y_npix
# Define parameters
self.n_ccd = moment_map.shape[0] # 40
self.x_grid = moment_map.shape[1] # 20
self.y_grid = moment_map.shape[2] # 40
self.bin_x = self.x_pix / self.x_grid
self.bin_y = self.y_pix / self.y_grid
# Correct MegaCam origin conventions
for ccd_it in range(self.n_ccd):
for it_x in range(self.x_grid):
for it_y in range(self.y_grid):
if ccd_it < 18 or ccd_it in [36, 37]:
# swap x axis so origin is on top-right
x = it_x
y = it_y
else:
# swap y axis so origin is on bottom-left
x = self.x_grid - it_x - 1
y = self.y_grid - it_y - 1
self.moment_map[ccd_it, x, y] = moment_map[ccd_it, it_x,
it_y]
# Generate local generic grid
x_lin = np.linspace(start=self.bin_x / 2,
stop=self.x_pix - self.bin_x / 2, num=self.x_grid)
y_lin = np.linspace(start=self.bin_y / 2,
stop=self.y_pix - self.bin_y / 2, num=self.y_grid)
xv, yv = np.meshgrid(x_lin, y_lin, indexing='ij')
self.xv = xv
self.yv = yv
# Generate global positions for the bins
self.x_pos = np.zeros(moment_map.shape)
self.y_pos = np.zeros(moment_map.shape)
for ccd_it in range(self.n_ccd):
x_glob, y_glob = self.loc2glob.loc2glob_img_coord(
ccd_n=ccd_it,
x_coor=np.copy(self.xv.flatten()),
y_coor=np.copy(self.yv.flatten()))
self.x_pos[ccd_it, :, :] = x_glob.reshape(self.x_grid,
self.y_grid)
self.y_pos[ccd_it, :, :] = y_glob.reshape(self.x_grid,
self.y_grid)
def interpolate_position(self, target_x, target_y):
r"""Interpolate positions."""
# Calculate distances
res_x = self.x_pos.flatten() - target_x
res_y = self.y_pos.flatten() - target_y
dist = np.sqrt(res_x ** 2 + res_y ** 2)
# Select bins to use. The n_neighbors closest positions
sort_idxs = np.argsort(dist)[:self.n_neighbors]
# Extract values
x_pos_interp = self.x_pos.flatten()[sort_idxs]
y_pos_interp = self.y_pos.flatten()[sort_idxs]
val_interp = self.moment_map.flatten()[sort_idxs]
# Generate the interpolation function
rbf = Rbf(x_pos_interp, y_pos_interp,
val_interp, function=self.rbf_function)
output_val = rbf(target_x, target_y)
return output_val
class AtmosphereGenerator(object):
r""" Generate atmospheric variations.
This class generates a random atmospheric contributition in terms of
elloipticity and size.
The simulation is done using the Von Karman model for isotropic
atmospheric turbulence. We use the model's 2D power spectrum to generate
a realisation of the atmosphere of the dimensions of our focal plane.
The parameter `theta_zero` of the model, also known as the outer scale,
is by default fixed for th CFHT telescope based on the results of
Heymans et al. 2012 (DOI: 10.1111/j.1365-2966.2011.20312.x).
Parameters
----------
theta_zero: float
Outer scale parameter of the Von Karman model. In arcsec.
r_trunc: float
Gaussian truncation parameter of the power spectrum. In arcsec.
ngrid: int
Number of grid points to use for our power spectrum realisation.
Should be a power of 2.
map_std: float
Standard deviation of our realisation.
pix_scale: float
Pixel scale of our instrument. In arcsec/pixel.
loc2glob: object
The object that allows to do the coordinate conversion from local to
global. It is specific for each instrument's focal plane geometry.
If is ``None`` it defaults to the CFIS MegaCam instrument.
Default is ``None``.
"""
def __init__(
self,
theta_zero=3. * 60,
r_trunc=1.,
ngrid=8192,
map_std=0.008,
pix_scale=0.187,
loc2glob=None
):
# Variables initialised
self.theta_zero = theta_zero
self.r_trunc = r_trunc
self.pix_scale = pix_scale # arcsec/pixel
self.ngrid = ngrid # 2048 # 4096 # 8192
self.map_std = map_std
if loc2glob is None:
self.loc2glob = mccd.mccd_utils.Loc2Glob()
else:
self.loc2glob = loc2glob
# Other variables to initialise
self.my_ps = None
self.total_arcsec = None
self.grid_spacing = None
self.g1 = None
self.g2 = None
self.kappa = None
# Initialise powerspectrum (might be slow)
self.init_powerspectrum()
def power_fun(self, freq):
r""" Von Karman power function.
Parameters should be in arcsec.
Heymans' parameter for the CFHT telescope is in the range
[2.62, 3.22] arcmin.
"""
# theta = self.theta_zero
# r = self.r_trunc
return (freq**2 + 1 / (self.theta_zero**2))**(- 11 / 6) * \
np.exp(-freq**2 * (self.r_trunc**2))
def init_powerspectrum(self):
r""" Initialise the powerspectrum. """
# We need to have the hole area of the focal plane expressed in arcsec.
# Get the maximum values for the global positions (in pixels])
min_x, max_x = self.loc2glob.x_coord_range()
min_y, max_y = self.loc2glob.y_coord_range()
# Max absolute value in pixels
# This gives us the maximum value of a square [-max_val, max_val]^2
max_val = np.max(abs(np.array([max_x, min_x, max_y, min_y])))
# Convert to total arcsec. As it is the total we need to multiply by 2.
self.total_arcsec = 2 * max_val * self.pix_scale
# For CFIS this given ~ 4676.06
# We want to use a power of 2 for the FFTs so we fix the `ngrid`
# variable (recommended 8192) and we adjust the `grid_spacing`
# `grid_spacing` is in arcsec/grid_point
self.grid_spacing = self.total_arcsec / self.ngrid
# Create the powerspectrum instance
self.my_ps = gs.PowerSpectrum(
e_power_function=self.power_fun,
b_power_function=self.power_fun)
# Generate grid points of the powerspectrum
self.g1, self.g2, self.kappa = self.my_ps.buildGrid(
grid_spacing=self.grid_spacing,
ngrid=self.ngrid,
get_convergence=True,
bandlimit='soft',
variance=self.map_std**2)
def regenerate_atmosphere(self):
r""" Generate a new random atmosphere."""
self.init_powerspectrum()
def interpolate_position(self, target_x, target_y):
r""" Get the ellipticity and size factor for a target position.
It is recommended to calculate with 1D arrays as it is much faster.
Parameters
----------
target_x: 1D np.ndarray or | |
<gh_stars>1-10
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import heapq
import math
import multiprocessing
import os
import signal
import typing
import time
from datetime import datetime
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.datasets import movielens
from official.recommendation import constants as rconst
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
_TOP_K = 10 # Top-k list for evaluation
# keys for evaluation metrics
_HR_KEY = "HR"
_NDCG_KEY = "NDCG"
class LoggerHook(tf.train.SessionRunHook):
""" Logs runtime. """
def begin(self):
self._step = -1
self._displayed_steps = 0
self._total_recommendations_per_sec = 0
self._total_duration = 0
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
# Display benchmarking metrics
display_every = 100 if FLAGS.dataset == 'ml-1m' else 50000
if self._step != 0 and self._step % display_every == 0:
recommendations_per_sec = FLAGS.batch_size / duration
self._displayed_steps += 1
self._total_recommendations_per_sec += recommendations_per_sec
self._total_duration += duration
format_str = ('%s: step %d, %.1f recommendations/sec, %.5f msec/batch')
print(format_str % (datetime.now(), self._step, recommendations_per_sec, duration * 1000))
def end(self, run_context):
print('Average recommendations/sec across %d steps: %.1f (%.5f msec/batch)' %
(self._step, self._total_recommendations_per_sec / self._displayed_steps,
(self._total_duration * 1000) / self._displayed_steps))
def get_hit_rate_and_ndcg(predicted_scores_by_user, items_by_user, top_k=_TOP_K,
match_mlperf=False):
"""Returns the hit rate and the normalized DCG for evaluation.
`predicted_scores_by_user` and `items_by_user` are parallel NumPy arrays with
shape (num_users, num_items) such that `predicted_scores_by_user[i, j]` is the
predicted score that user `i` would rate item `items_by_user[i][j]`.
`items_by_user[i, 0]` is the item that user `i` interacted with, while
`items_by_user[i, 1:] are items that user `i` did not interact with. The goal
of the NCF model to give a high score for `predicted_scores_by_user[i, 0]`
compared to `predicted_scores_by_user[i, 1:]`, and the returned HR and NDCG
will be higher the more successful the model is at this goal.
If `match_mlperf` is True, then the HR and NDCG computations are done in a
slightly unusual way to match the MLPerf reference implementation.
Specifically, if `items_by_user[i, :]` contains duplicate items, it will be
treated as if the item only appeared once. Effectively, for duplicate items in
a row, the predicted score for all but one of the items will be set to
-infinity
For example, suppose we have that following inputs:
predicted_scores_by_user: [[ 2, 3, 3],
[ 5, 4, 4]]
items_by_user: [[10, 20, 20],
[30, 40, 40]]
top_k: 2
Then with match_mlperf=True, the HR would be 2/2 = 1.0. With
match_mlperf=False, the HR would be 1/2 = 0.5. This is because each user has
predicted scores for only 2 unique items: 10 and 20 for the first user, and 30
and 40 for the second. Therefore, with match_mlperf=True, it's guarenteed the
first item's score is in the top 2. With match_mlperf=False, this function
would compute the first user's first item is not in the top 2, because item 20
has a higher score, and item 20 occurs twice.
Args:
predicted_scores_by_user: 2D Numpy array of the predicted scores.
`predicted_scores_by_user[i, j]` is the predicted score that user `i`
would rate item `items_by_user[i][j]`.
items_by_user: 2d numpy array of the item IDs. For user `i`,
`items_by_user[i][0]` is the itme that user `i` interacted with, while
`predicted_scores_by_user[i, 1:] are items that user `i` did not interact
with.
top_k: Only consider the highest rated `top_k` items per user. The HR and
NDCG for that user will only be nonzero if the predicted score for that
user's first item is in the `top_k` top scores.
match_mlperf: If True, compute HR and NDCG slightly differently to match the
MLPerf reference implementation.
Returns:
(hr, ndcg) tuple of floats, averaged across all users.
"""
num_users = predicted_scores_by_user.shape[0]
zero_indices = np.zeros((num_users, 1), dtype=np.int32)
if match_mlperf:
predicted_scores_by_user = predicted_scores_by_user.copy()
items_by_user = items_by_user.copy()
# For each user, sort the items and predictions by increasing item number.
# We use mergesort since it's the only stable sort, which we need to be
# equivalent to the MLPerf reference implementation.
sorted_items_indices = items_by_user.argsort(kind="mergesort")
sorted_items = items_by_user[
np.arange(num_users)[:, np.newaxis], sorted_items_indices]
sorted_predictions = predicted_scores_by_user[
np.arange(num_users)[:, np.newaxis], sorted_items_indices]
# For items that occur more than once in a user's row, set the predicted
# score of the subsequent occurrences to -infinity, which effectively
# removes them from the array.
diffs = sorted_items[:, :-1] - sorted_items[:, 1:]
diffs = np.concatenate(
[np.ones((diffs.shape[0], 1), dtype=diffs.dtype), diffs], axis=1)
predicted_scores_by_user = np.where(diffs, sorted_predictions, -np.inf)
# After this block, `zero_indices` will be a (num_users, 1) shaped array
# indicating, for each user, the index of item of value 0 in
# `sorted_items_indices`. This item is the one we want to check if it is in
# the top_k items.
zero_indices = np.array(np.where(sorted_items_indices == 0))
assert np.array_equal(zero_indices[0, :], np.arange(num_users))
zero_indices = zero_indices[1, :, np.newaxis]
# NumPy has an np.argparition() method, however log(1000) is so small that
# sorting the whole array is simpler and fast enough.
top_indicies = np.argsort(predicted_scores_by_user, axis=1)[:, -top_k:]
top_indicies = np.flip(top_indicies, axis=1)
# Both HR and NDCG vectorized computation takes advantage of the fact that if
# the positive example for a user is not in the top k, that index does not
# appear. That is to say: hit_ind.shape[0] <= num_users
hit_ind = np.argwhere(np.equal(top_indicies, zero_indices))
hr = hit_ind.shape[0] / num_users
ndcg = np.sum(np.log(2) / np.log(hit_ind[:, 1] + 2)) / num_users
return hr, ndcg
def evaluate_model(estimator, ncf_dataset, pred_input_fn):
# type: (tf.estimator.Estimator, prepare.NCFDataset, typing.Callable) -> dict
"""Model evaluation with HR and NDCG metrics.
The evaluation protocol is to rank the test interacted item (truth items)
among the randomly chosen 999 items that are not interacted by the user.
The performance of the ranked list is judged by Hit Ratio (HR) and Normalized
Discounted Cumulative Gain (NDCG).
For evaluation, the ranked list is truncated at 10 for both metrics. As such,
the HR intuitively measures whether the test item is present on the top-10
list, and the NDCG accounts for the position of the hit by assigning higher
scores to hits at top ranks. Both metrics are calculated for each test user,
and the average scores are reported.
Args:
estimator: The Estimator.
ncf_dataset: An NCFDataSet object, which contains the information about
test/eval dataset, such as:
num_users: How many unique users are in the eval set.
test_data: The points which are used for consistent evaluation. These
are already included in the pred_input_fn.
pred_input_fn: The input function for the test data.
Returns:
eval_results: A dict of evaluation results for benchmark logging.
eval_results = {
_HR_KEY: hr,
_NDCG_KEY: ndcg,
tf.GraphKeys.GLOBAL_STEP: global_step
}
where hr is an integer indicating the average HR scores across all users,
ndcg is an integer representing the average NDCG scores across all users,
and global_step | |
<reponame>RangeKing/Paddle<gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import sys
import math
import numbers
import warnings
import collections
import numpy as np
from PIL import Image
from numpy import sin, cos, tan
import paddle
from . import functional_pil as F_pil
from . import functional_cv2 as F_cv2
from . import functional_tensor as F_t
__all__ = []
def _is_pil_image(img):
return isinstance(img, Image.Image)
def _is_tensor_image(img):
return isinstance(img, paddle.Tensor)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def to_tensor(pic, data_format='CHW'):
"""Converts a ``PIL.Image`` or ``numpy.ndarray`` to paddle.Tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL.Image|np.ndarray): Image to be converted to tensor.
data_format (str, optional): Data format of output tensor, should be 'HWC' or
'CHW'. Default: 'CHW'.
Returns:
Tensor: Converted image. Data type is same as input img.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
tensor = F.to_tensor(fake_img)
print(tensor.shape)
"""
if not (_is_pil_image(pic) or _is_numpy_image(pic) or
_is_tensor_image(pic)):
raise TypeError(
'pic should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(pic)))
if _is_pil_image(pic):
return F_pil.to_tensor(pic, data_format)
elif _is_numpy_image(pic):
return F_cv2.to_tensor(pic, data_format)
else:
return pic if data_format.lower() == 'chw' else pic.transpose((1, 2, 0))
def resize(img, size, interpolation='bilinear'):
"""
Resizes the image to given size
Args:
input (PIL.Image|np.ndarray): Image to be resized.
size (int|list|tuple): Target size of input data, with (height, width) shape.
interpolation (int|str, optional): Interpolation method. when use pil backend,
support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC,
- "box": Image.BOX,
- "lanczos": Image.LANCZOS,
- "hamming": Image.HAMMING
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "area": cv2.INTER_AREA,
- "bicubic": cv2.INTER_CUBIC,
- "lanczos": cv2.INTER_LANCZOS4
Returns:
PIL.Image or np.array: Resized image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
converted_img = F.resize(fake_img, 224)
print(converted_img.size)
# (262, 224)
converted_img = F.resize(fake_img, (200, 150))
print(converted_img.size)
# (150, 200)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.resize(img, size, interpolation)
elif _is_tensor_image(img):
return F_t.resize(img, size, interpolation)
else:
return F_cv2.resize(img, size, interpolation)
def pad(img, padding, fill=0, padding_mode='constant'):
"""
Pads the given PIL.Image or numpy.array on all sides with specified padding mode and fill value.
Args:
img (PIL.Image|np.array): Image to be padded.
padding (int|list|tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If list/tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a list/tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (float, optional): Pixel fill value for constant fill. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant. Default: 0.
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default: 'constant'.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL.Image or np.array: Padded image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
padded_img = F.pad(fake_img, padding=1)
print(padded_img.size)
padded_img = F.pad(fake_img, padding=(2, 1))
print(padded_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.pad(img, padding, fill, padding_mode)
elif _is_tensor_image(img):
return F_t.pad(img, padding, fill, padding_mode)
else:
return F_cv2.pad(img, padding, fill, padding_mode)
def crop(img, top, left, height, width):
"""Crops the given Image.
Args:
img (PIL.Image|np.array): Image to be cropped. (0,0) denotes the top left
corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
Returns:
PIL.Image or np.array: Cropped image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
cropped_img = F.crop(fake_img, 56, 150, 200, 100)
print(cropped_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.crop(img, top, left, height, width)
elif _is_tensor_image(img):
return F_t.crop(img, top, left, height, width)
else:
return F_cv2.crop(img, top, left, height, width)
def center_crop(img, output_size):
"""Crops the given Image and resize it to desired size.
Args:
img (PIL.Image|np.array): Image to be cropped. (0,0) denotes the top left corner of the image.
output_size (sequence or int): (height, width) of the crop box. If int,
it is used for both directions
Returns:
PIL.Image or np.array: Cropped image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
cropped_img = F.center_crop(fake_img, (150, 100))
print(cropped_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.center_crop(img, output_size)
elif _is_tensor_image(img):
return F_t.center_crop(img, output_size)
else:
return F_cv2.center_crop(img, output_size)
def hflip(img):
"""Horizontally flips the given Image or np.array.
Args:
img (PIL.Image|np.array): Image to be flipped.
Returns:
PIL.Image or np.array: Horizontall flipped image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
flpped_img = F.hflip(fake_img)
print(flpped_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.hflip(img)
elif _is_tensor_image(img):
return F_t.hflip(img)
else:
return F_cv2.hflip(img)
def vflip(img):
"""Vertically flips the given Image or np.array.
Args:
img (PIL.Image|np.array): Image to be flipped.
Returns:
PIL.Image or np.array: Vertically flipped image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
flpped_img = F.vflip(fake_img)
print(flpped_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.vflip(img)
elif _is_tensor_image(img):
return F_t.vflip(img)
else:
return F_cv2.vflip(img)
def adjust_brightness(img, brightness_factor):
"""Adjusts brightness of an Image.
Args:
img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.
brightness_factor (float): | |
<reponame>rimmartin/cctbx_project<gh_stars>0
from __future__ import division
from cctbx.array_family import flex
from cctbx import sgtbx
from cctbx.crystal.find_best_cell import alternative_find_best_cell
from cctbx.sgtbx import cosets
from cctbx import crystal
from cctbx import miller
import cctbx.sgtbx.lattice_symmetry
import cctbx.sgtbx.cosets
from scitbx import matrix
from scitbx.python_utils import graph_tools
from libtbx.utils import Sorry
import sys
def reference_setting_choices(space_group):
# we used to have
cyclic_permutations = ['x,y,z',
'y,z,x',
'z,x,y' ]
adams_group = sgtbx.space_group_info(
group=space_group.build_derived_group(False, False))
space_group = sgtbx.space_group_info(group=space_group)
# please check that we have something in reference setting
# just to make sure that thne thing is used for it's original purpose
assert space_group.is_reference_setting()
info = []
identity_op = sgtbx.change_of_basis_op('x,y,z').c().r()
for cyclic_permutation in cyclic_permutations:
cob_op = sgtbx.change_of_basis_op(cyclic_permutation)
transformed_adams_group = adams_group.change_basis(cob_op)
transformed_space_group = space_group.change_basis(cob_op)
cob_to_ref_sg = transformed_space_group.\
change_of_basis_op_to_reference_setting()
cob_to_ref_pg = transformed_adams_group.\
change_of_basis_op_to_reference_setting()
adams_norm = False
space_norm = False
# check if the rotation part of the cb_op to ref is
# the identity operator
# if hall symbols are equal, sg's are equal
if (identity_op == cob_to_ref_pg.c().r()):
adams_norm=True
if (identity_op == cob_to_ref_sg.c().r()):
space_norm=True
info_tuple = (cob_op, cob_to_ref_sg, adams_norm, space_norm)
info.append(info_tuple)
possible_additional_transforms = []
# we have to of course take into account the identity operator
possible_additional_transforms.append(info[0][0]*info[0][1])
for ii in info:
if ii[2]: # should fall in the adams normalizer
if not ii[3]: # should NOT fall in the space normalizer
# cob should ONLY be applied on unit cell, not to the sg.
possible_additional_transforms.append(ii[0])
return possible_additional_transforms
def coset_lookup(pg_low,
pg_high):
coset = cosets.left_decomposition(g=pg_high,
h=pg_low)
full_cosets = []
for set in coset.partitions:
if (set[0].r().determinant()>0):
tmp = []
for symop in set:
tmp.append(symop.r().as_hkl())
full_cosets.append(tmp)
return full_cosets
class sub_super_point_group_relations(object):
def __init__(self,
sg_low,
sg_high,
enforce_point_groups=True):
self.enforce_point_groups=enforce_point_groups
if enforce_point_groups:
assert (sg_low == sg_low.build_derived_point_group())
assert (sg_high == sg_high.build_derived_point_group())
self.sg_low = sg_low
self.sg_high = sg_high
self.symops=[]
self.grouped_symops = []
self.grouped_index = []
self.sg_groups = []
self.left_over_symops= []
self.get_symops_from_supergroup_that_are_not_in_subgroup(sg_high)
self.assemble_symops()
self.find_left_over_symops()
def get_symops_from_supergroup_that_are_not_in_subgroup(self, sg_high):
coset = cosets.left_decomposition(g=sg_high,
h=self.sg_low)
for set in coset.partitions[1:]:
if self.enforce_point_groups:
if set[0].r().determinant()>0:
self.symops.append(set[0])
else:
self.symops.append(set[0])
def assemble_symops(self):
t_den = self.sg_low.t_den()
r_den = self.sg_low.r_den()
# loop over all symops
for item, symop in enumerate(self.symops):
tmp_symops = []
tmp_indices = []
# multiply in the symop in the space group
check_sg = sgtbx.space_group(self.sg_low)
check_sg.expand_smx(symop.new_denominators(r_den, t_den))
# Check if this SG is already in the list
assert check_sg != self.sg_low
if check_sg not in self.sg_groups:
# add sg to list
self.sg_groups.append(check_sg)
tmp_symops.append(symop)
tmp_indices.append(item)
# check if the other symops generate the same sg please
for check_item, check_op in enumerate(self.symops):
if check_sg.contains(check_op.new_denominators(r_den,t_den)):
# add symop to list if it is not in there yet
if check_op not in tmp_symops:
tmp_symops.append(check_op)
# add index to list
if check_item not in tmp_indices:
tmp_indices.append(check_item)
self.grouped_symops.append(tmp_symops)
self.grouped_index.append(tmp_indices)
def find_left_over_symops(self):
# this function gives the left over symops after
# assuming a certain supergroup
for set, group in zip(self.grouped_symops,
self.sg_groups):
if len(set)>0:
select = []
for item, symop in enumerate(self.symops):
if symop not in set:
select.append(symop)
self.left_over_symops.append(select)
else:
self.left_over_symops.append([])
def return_next_set(self):
for set in self.grouped_symops:
yield set
def return_next_index(self):
for iset in self.grouped_index:
yield iset
def return_next_sg(self):
for sg in self.sg_groups:
yield sg
def return_next_left_over_set(self):
for missing_set in self.left_over_symops:
yield missing_set
def show(self, out=None):
if out is None:
out = sys.stdout
print >> out, (
"Input subgroup : %s" % sgtbx.space_group_info(group=self.sg_low))
print >> out, (
"Input lattice group : %s" % sgtbx.space_group_info(group=self.sg_high))
print >> out
print >> out
for set,group,leftover in zip(self.grouped_symops,
self.sg_groups,
self.left_over_symops):
assert(len(set)+len(leftover)==len(self.symops))
print >> out, (
"Supergroup : %s" % sgtbx.space_group_info(group=group))
print >> out, " Used symops:"
for symop in set:
print >> out, " (%s) "%(symop.r().as_hkl())
print >> out
print >> out, " Left over symops:"
for symop in leftover:
if symop is not None:
print >> out, " (%s) "%(symop.r().as_hkl())
else:
print >> out, " None"
print >> out
print >> out
class edge_object(object):
def __init__(self, used, unused,as_xyz=False):
# This object characterises a spacegroup transformation
# by listing: used symops, unused symops
self.symops_used = used
self.symops_unused = unused
self.as_xyz = as_xyz
def return_used(self):
for symop in self.symops_used:
yield symop
def return_unused(self):
for symop in self.symops_unused:
yield symop
def __repr__(self):
repr = str()
repr += " using: "
for symop in self.symops_used:
if self.as_xyz:
repr+="("+symop.as_xyz()+") "
else:
repr+="("+symop.r().as_hkl()+") "
repr +=" symops left: " +str(len(self.symops_unused))
return repr
def __str__(self):
repr = str()
repr += " using: "
for symop in self.symops_used:
if self.as_xyz:
repr+="("+symop.as_xyz()+") "
else:
repr+="("+symop.r().as_hkl()+") "
repr +=" symops left: " +str(len(self.symops_unused))
return repr
class point_group_graph(object):
def __init__(self,
pg_low,
pg_high,
enforce_point_group=True,
as_xyz = False):
# It is rather import (i think) to make sure
# that point groups are supplied. This might prevent later surprises.
# hopefully.
self.as_xyz = as_xyz
low_point_group_check = (pg_low == pg_low.build_derived_point_group())
if enforce_point_group:
if not low_point_group_check:
raise Sorry("Input spacegroup not a point group")
high_point_group_check = (pg_high == pg_high.build_derived_point_group())
if enforce_point_group:
if not high_point_group_check:
raise Sorry("Input spacegroup not a point group")
self.assert_pg = enforce_point_group
self.graph = graph_tools.graph()
self.pg_low = pg_low
self.pg_high = pg_high
self.queue = [] # the queue used in building the space_group
self.build_it()
del self.queue # you have no business looking at this object,
# so I delete it .. ;-)
self.graph.assert_is_clean()
def build_it(self):
# we start by putting the spacegroup on the queue
self.queue.append(self.pg_low)
while len(self.queue) > 0 :
this_sg = self.queue.pop(0)
self.make_and_place_nodes_and_connections(this_sg)
def make_and_place_nodes_and_connections(self, input_sg):
# make the object and name please
object = sgtbx.space_group_info(group=input_sg)
name = str(object)
sg_relations = sub_super_point_group_relations(
input_sg,
self.pg_high,
self.assert_pg)
# loop over the possible outgoing edges
edge_list = {}
for possible_super_sg, used_symops, unused_symops \
in zip(sg_relations.return_next_sg(),
sg_relations.return_next_set(),
sg_relations.return_next_left_over_set()):
# This is enough info to make connections from the given node
edge = edge_object(used =used_symops,
unused = unused_symops,
as_xyz = self.as_xyz)
edge_list[str(sgtbx.space_group_info(group=possible_super_sg)) ] = edge
# place the sg's generated on the queue
if not possible_super_sg in self.queue:
self.queue.append(possible_super_sg)
# place/insert the node with the proper connections please
# print object, type(object)
self.graph.insert_node(name = name,
edge_object = edge_list,
node_object = object)
def remove_point_group_and_its_super_groups_from_graph(self, group_name):
# please find all super groups of the given group
# and remove them from the graph
#
# I think the easiest way is just to find all nodes with
# no outgoing nodes, and determine all possible paths between them
# Then group the found pg's and remove them from the graph
end_nodes = []
for trial_node in self.graph.edge_objects:
n_out = len(self.graph.edge_objects[trial_node])
if n_out == 0:
end_nodes.append(trial_node)
# now see if there is a path between the given group and the end points
to_be_removed = []
for trial_end_node in end_nodes:
tmp_paths = self.graph.find_all_paths(group_name, trial_end_node)
for path in tmp_paths:
for sg in path:
if not (sg in to_be_removed):
to_be_removed.append(sg)
for sg in to_be_removed:
self.graph.remove_node(sg)
self.graph.assert_is_clean()
def reverse_dict(self):
new_dict = {}
for item in self.graph.o:
for value in self.graph.o[item]:
if value is not None:
if new_dict.has_key(value):
tmp = new_dict[value]
tmp.append(item)
new_dict[value] = tmp
else:
new_dict[value] = [item]
return new_dict
def get_maximal_subgroup(self, sg_name):
subgroups = []
reverse_graph = self.reverse_dict()
if reverse_graph.has_key(sg_name):
subgroups = reverse_graph[sg_name]
maximal = {}
for sg in subgroups:
maximal[sg] = True
result = []
for trial_sg in subgroups:
tmp = {}
if reverse_graph.has_key(trial_sg):
tmp = reverse_graph[trial_sg]
is_trial_sg_a_subgroup_of_items_in_subgroups=False
for item in tmp:
if item in subgroups:
maximal[item] = False
is_trial_sg_a_subgroup_of_subgroups=True
for item in maximal:
if maximal[item]:
result.append(item)
return result
class find_compatible_space_groups(object):
def __init__(self,
likely_pointgroup=None,
xtal_sg=None,
unit_cell=None,
sys_abs_flag=True,
miller_array=None):
# we have the choice of supplynig either a dataset
# or just cell, symmetry and likely point group
# when supplynig data, an attempt will bhe made to detemine
# the most likely spacegroup
self.miller_array = None
self.x_sg = None
self.x_uc = None
self.x_lpg = None
if (xtal_sg is None) or (unit_cell is None):
assert miller_array is not None
self.miller_array = miller_array
assert likely_pointgroup is None
self.miller_array = miller_array
self.x_sg = miller_array.space_group()
self.x_uc = miller_array.unit_cell()
self.x_lpg = miller_array.space_group().build_derived_group(True, True)
if miller_array is None:
assert xtal_sg is not None
assert unit_cell is not None
assert likely_pointgroup is not None
self.x_lpg = likely_pointgroup
self.x_sg = xtal_sg
self.x_uc = unit_cell
self.xs = crystal.symmetry(self.x_uc,
space_group=self.x_sg)
self.cb_op_xs_to_niggli = self.xs.change_of_basis_op_to_niggli_cell()
self.cb_op_lpg_to_ref_set = sgtbx.space_group_info(
group=self.x_lpg).change_of_basis_op_to_reference_setting()
self.point_group_compatible_sg = []
self.is_chiral = []
self.get_space_groups_compatible_with_likely_point_group()
self.allowed_under_pg_and_sys_abs = []
for sg in self.point_group_compatible_sg:
additional_cb_ops | |
,
u'讹' : [u'e'] ,
u'幀' : [u'z'] ,
u'靆' : [u'd'] ,
u'淍' : [u'z'] ,
u'䙐' : [u'k'] ,
u'祚' : [u'z'] ,
u'嗝' : [u'g'] ,
u'諣' : [u'h'] ,
u'慪' : [u'o'] ,
u'㟱' : [u'y'] ,
u'陰' : [u'y', u'a'] ,
u'泷' : [u's', u'l'] ,
u'䥺' : [u'y'] ,
u'鼁' : [u'q'] ,
u'碄' : [u'l'] ,
u'下' : [u'x'] ,
u'蜑' : [u'd'] ,
u'悔' : [u'h'] ,
u'馚' : [u'f'] ,
u'椥' : [u'z'] ,
u'䢤' : [u's'] ,
u'鸫' : [u'd'] ,
u'膪' : [u'z'] ,
u'儵' : [u's'] ,
u'蘻' : [u'j'] ,
u'掾' : [u'y'] ,
u'飄' : [u'p'] ,
u'桏' : [u'q'] ,
u'䯎' : [u'g'] ,
u'胔' : [u'z'] ,
u'竘' : [u'q', u'j'] ,
u'偟' : [u'h'] ,
u'襥' : [u'p'] ,
u'拨' : [u'b', u'f'] ,
u'鯮' : [u'z'] ,
u'歹' : [u'e', u'd'] ,
u'䫸' : [u'n'] ,
u'菾' : [u't'] ,
u'辋' : [u'w'] ,
u'瀊' : [u'p'] ,
u'䲍' : [u't'] ,
u'鬘' : [u'm'] ,
u'堚' : [u'h'] ,
u'熟' : [u's'] ,
u'茨' : [u'c'] ,
u'䀪' : [u'h'] ,
u'鲭' : [u'q', u'z'] ,
u'紬' : [u'c'] ,
u'妯' : [u'z'] ,
u'㨮' : [u'd'] ,
u'蒽' : [u'e'] ,
u'攼' : [u'g'] ,
u'䆿' : [u'y'] ,
u'绁' : [u'y', u'x'] ,
u'遊' : [u'y'] ,
u'曑' : [u's'] ,
u'釟' : [u'b'] ,
u'牞' : [u'j'] ,
u'仡' : [u'y', u'g'] ,
u'鵬' : [u'p', u'f'] ,
u'珳' : [u'w'] ,
u'蕼' : [u's'] ,
u'䉾' : [u'm'] ,
u'羀' : [u'l'] ,
u'㲂' : [u'c'] ,
u'項' : [u'x'] ,
u'唇' : [u'c'] ,
u'析' : [u'x'] ,
u'耕' : [u'g'] ,
u'稙' : [u'z'] ,
u'㜛' : [u'r', u'n'] ,
u'銞' : [u'j'] ,
u'侠' : [u'x'] ,
u'戩' : [u'j'] ,
u'璲' : [u's'] ,
u'贷' : [u'd', u't'] ,
u'峂' : [u't'] ,
u'潋' : [u'l'] ,
u'䓒' : [u'k'] ,
u'㻖' : [u'd'] ,
u'驙' : [u'z'] ,
u'坛' : [u't'] ,
u'槤' : [u'l'] ,
u'艩' : [u'q'] ,
u'籭' : [u's'] ,
u'㥯' : [u'y'] ,
u'铲' : [u'c'] ,
u'凴' : [u'p'] ,
u'摽' : [u'p', u'b'] ,
u'䰈' : [u'c'] ,
u'炋' : [u'p'] ,
u'輊' : [u'z'] ,
u'㦑' : [u'x', u'l'] ,
u'芗' : [u'x'] ,
u'搘' : [u'z'] ,
u'垥' : [u'x'] ,
u'骧' : [u'x'] ,
u'簨' : [u's'] ,
u'澵' : [u'z'] ,
u'踴' : [u'y'] ,
u'㢻' : [u'w'] ,
u'臁' : [u'l'] ,
u'捂' : [u'w'] ,
u'囏' : [u'j'] ,
u'駑' : [u'n'] ,
u'筒' : [u't', u'd'] ,
u'䩜' : [u'z'] ,
u'滟' : [u'y'] ,
u'赞' : [u'z'] ,
u'胫' : [u'k', u'j'] ,
u'扬' : [u'y'] ,
u'嗹' : [u'l'] ,
u'飻' : [u't'] ,
u'穼' : [u's'] ,
u'䦆' : [u'j'] ,
u'貈' : [u'h', u'm'] ,
u'猍' : [u'l'] ,
u'䈗' : [u's'] ,
u'憖' : [u'y'] ,
u'蔙' : [u'x'] ,
u'娧' : [u't'] ,
u'禦' : [u'y'] ,
u'鴩' : [u'h', u't'] ,
u'䢰' : [u't'] ,
u'讲' : [u'j'] ,
u'爷' : [u'y'] ,
u'䅁' : [u'a'] ,
u'惀' : [u'l'] ,
u'葃' : [u'z'] ,
u'契' : [u'q', u'x'] ,
u'磐' : [u'p'] ,
u'鱓' : [u's', u't'] ,
u'䟚' : [u'q'] ,
u'諜' : [u'x', u'd'] ,
u'煡' : [u'x'] ,
u'㩧' : [u'b'] ,
u'䁫' : [u'h'] ,
u'忪' : [u's', u'z'] ,
u'荭' : [u'h'] ,
u'塻' : [u'm'] ,
u'矺' : [u't'] ,
u'魽' : [u'h'] ,
u'䜄' : [u'c'] ,
u'殇' : [u's'] ,
u'鐎' : [u'j'] ,
u'㺕' : [u'f'] ,
u'弔' : [u'd'] ,
u'閣' : [u'g'] ,
u'眤' : [u'y'] ,
u'岩' : [u'y'] ,
u'褰' : [u'q'] ,
u'倶' : [u'j'] ,
u'璹' : [u's'] ,
u'蛅' : [u'r', u'z'] ,
u'框' : [u'k'] ,
u'凋' : [u'd'] ,
u'黕' : [u'd'] ,
u'䕘' : [u'l'] ,
u'槛' : [u'k', u'j'] ,
u'鉢' : [u'b'] ,
u'嵨' : [u'w'] ,
u'䋭' : [u'y', u'h'] ,
u'鏷' : [u'p'] ,
u'畸' : [u'q', u'j'] ,
u'嫽' : [u'l'] ,
u'态' : [u't'] ,
u'螄' : [u's'] ,
u'亊' : [u's'] ,
u'㜏' : [u'y'] ,
u'砑' : [u'y'] ,
u'龔' : [u'g'] ,
u'暚' : [u'y'] ,
u'訝' : [u'y'] ,
u'唣' : [u'z'] ,
u'纪' : [u'j'] ,
u'䎬' : [u'p', u'b'] ,
u'洳' : [u'r'] ,
u'邶' : [u'b'] ,
u'宼' : [u'k'] ,
u'䙅' : [u'y'] ,
u'珌' : [u'b'] ,
u'靏' : [u'h'] ,
u'幕' : [u'm'] ,
u'藘' : [u'l'] ,
u'䳞' : [u'b'] ,
u'㕣' : [u'y'] ,
u'癥' : [u'z'] ,
u'鷨' : [u'h'] ,
u'擮' : [u'j'] ,
u'衱' : [u'j'] ,
u'卷' : [u'q', u'j'] ,
u'糾' : [u'j'] ,
u'脂' : [u'z'] ,
u'纓' : [u'y'] ,
u'餒' : [u'n'] ,
u'貟' : [u'y'] ,
u'㬦' : [u'y'] ,
u'憭' : [u'l'] ,
u'耬' : [u'l'] ,
u'唺' : [u't'] ,
u'禽' : [u'q'] ,
u'頼' : [u'l'] ,
u'䣇' : [u'q'] ,
u'迉' : [u'q'] ,
u'浊' : [u'z'] ,
u'惗' : [u'n'] ,
u'荖' : [u'c', u'l'] ,
u'呤' : [u'l'] ,
u'磧' : [u'q'] ,
u'魦' : [u's'] ,
u'䯱' : [u'f'] ,
u'軳' : [u'p'] ,
u'汴' : [u'b'] ,
u'㕺' : [u'h'] ,
u'芀' : [u't'] ,
u'攅' : [u'z'] ,
u'垎' : [u'k'] ,
u'骐' : [u'q'] ,
u'紕' : [u'p', u'c', u'b'] ,
u'䰟' : [u'h'] ,
u'澞' : [u'y'] ,
u'錡' : [u'q', u'y'] ,
u'㒤' : [u's'] ,
u'搯' : [u't'] ,
u'嚸' : [u'd'] ,
u'閺' : [u'm', u'w'] ,
u'簿' : [u'b'] ,
u'佉' : [u'q'] ,
u'滈' : [u'h'] ,
u'鉋' : [u'p', u'b'] ,
u'㟎' : [u't'] ,
u'杙' : [u'y'] ,
u'凢' : [u'f'] ,
u'铤' : [u't'] ,
u'罩' : [u'z'] ,
u'乳' : [u'r'] ,
u'槲' : [u'h'] ,
u'赵' : [u'z', u't'] ,
u'㛸' : [u's'] ,
u'訆' : [u'j'] ,
u'儌' : [u'y', u'j'] ,
u'疏' : [u's'] ,
u'讛' : [u'y'] ,
u'検' : [u'j'] ,
u'务' : [u'w'] ,
u'㰪' : [u'w'] ,
u'䘮' : [u'x', u's'] ,
u'檱' : [u'q', u'j'] ,
u'霸' : [u'p', u'b'] ,
u'㶿' : [u'b'] ,
u'帾' : [u'd'] ,
u'䟃' : [u'c', u'z'] ,
u'郍' : [u'n'] ,
u'癎' : [u'x'] ,
u'忓' : [u'g'] ,
u'葚' : [u's', u'r'] ,
u'占' : [u'z'] ,
u'矣' : [u'y', u'x'] ,
u'藯' : [u'w'] ,
u'歰' : [u's'] ,
u'䳵' : [u'c'] ,
u'鷿' : [u'p'] ,
u'㹾' : [u'p'] ,
u'䂂' : [u'q'] ,
u'渉' : [u's', u'd'] ,
u'醌' : [u'k'] ,
u'墒' : [u's'] ,
u'䬛' : [u'y', u'b'] ,
u'炢' : [u'z'] ,
u'鐥' : [u's'] ,
u'挫' : [u'c'] ,
u'蚮' : [u't'] ,
u'㘹' : [u'c'] ,
u'笻' : [u'q'] ,
u'麾' : [u'h'] ,
u'旄' : [u'm'] ,
u'襇' : [u'j'] ,
u'偍' : [u't'] ,
u'㣒' : [u'c'] ,
u'緔' : [u'z'] ,
u'䋖' : [u'y'] ,
u'鏠' : [u'f'] ,
u'嫦' : [u'c'] ,
u'䕯' : [u'p'] ,
u'狶' : [u'x'] ,
u'嵿' : [u'd'] ,
u'䠈' : [u't'] ,
u'璋' : [u'z'] ,
u'謊' : [u'h'] ,
u'㶑' : [u'l'] ,
u'蚗' : [u'j'] ,
u'怘' : [u'g'] ,
u'厥' : [u'j'] ,
u'麧' : [u'h', u'g'] ,
u'砨' : [u'a'] ,
u'伲' : [u'n'] ,
u'段' : [u'd'] ,
u'訴' : [u's'] ,
u'㲻' : [u'n'] ,
u'藁' : [u'g'] ,
u'杂' : [u'z'] ,
u'勏' : [u'p'] ,
u'鷑' : [u'l'] ,
u'罒' : [u'w'] ,
u'乜' : [u'm', u'n'] ,
u'櫟' : [u'y', u'l'] ,
u'襞' : [u'b'] ,
u'蓫' : [u'z'] ,
u'晬' : [u'z'] ,
u'凹' : [u'a', u'w'] ,
u'鳻' : [u'f'] ,
u'繼' : [u'j'] ,
u'袈' : [u'j'] ,
u'眍' : [u'o'] ,
u'㠓' : [u'm'] ,
u'斖' : [u'w'] ,
u'脙' : [u'q'] ,
u'帧' : [u'z'] ,
u'綦' : [u'q'] ,
u'餩' : [u'e'] ,
u'䲰' : [u'y'] ,
u'農' : [u'n'] ,
u'瘷' : [u's'] ,
u'㼽' : [u's'] ,
u'䕁' : [u'f'] ,
u'擀' : [u'h', u'g'] ,
u'聃' : [u'd'] ,
u'嵑' : [u'h', u'j'] ,
u'糐' : [u'f'] ,
u'顓' : [u'z'] ,
u'䏚' : [u'c'] ,
u'軜' : [u'n'] ,
u'畡' : [u'g'] ,
u'寪' : [u'w'] ,
u'蝭' : [u't'] ,
u'屻' : [u'r'] ,
u'珺' : [u'j'] ,
u'齽' : [u'j'] ,
u'䌄' : [u'g'] ,
u'澇' : [u'l'] ,
u'逎' : [u'q'] ,
u'㪕' : [u'd'] ,
u'嬔' : [u'f'] ,
u'醣' : [u't'] ,
u'㘢' : [u'w'] ,
u'猤' : [u'j'] ,
u'墩' : [u'd'] ,
u'贰' : [u'e'] ,
u'㞷' : [u'h'] ,
u'吶' : [u'n'] ,
u'点' : [u'd'] ,
u'苅' : [u'y'] ,
u'汆' : [u'c'] ,
u'嗋' : [u'x'] ,
u'髕' : [u'b'] ,
u'㭔' : [u'l'] ,
u'䅘' : [u'l'] ,
u'淛' : [u'z'] ,
u'院' : [u'y'] ,
u'奨' : [u'j'] ,
u'䛭' : [u'x'] ,
u'韷' : [u'l'] ,
u'㑶' : [u'x'] ,
u'煸' : [u'b'] ,
u'廽' : [u'h'] ,
u'搁' : [u'g'] ,
u'莄' : [u'g'] ,
u'䪊' : [u'l'] ,
u'簑' : [u's'] ,
u'鮔' : [u'j'] ,
u'抚' : [u'h', u'f'] ,
u'踝' : [u'h'] ,
u'儣' : [u'k'] ,
u'㦨' : [u'l'] ,
u'窪' : [u'w'] ,
u'䞬' : [u't'] ,
u'椳' : [u'w'] ,
u'钶' : [u'k', u'e'] ,
u'徼' : [u'y', u'j'] ,
u'䉅' : [u'z'] ,
u'矌' : [u'k'] ,
u'鍏' : [u'w'] ,
u'婕' : [u'j'] ,
u'臘' : [u'l'] ,
u'䣞' : [u'e'] ,
u'牥' : [u'f'] ,
u'駨' : [u'x'] ,
u'惮' : [u'd'] ,
u'豱' : [u'w'] ,
u'坷' : [u'k'] ,
u'㿼' : [u'y'] ,
u'磾' : [u'd'] ,
u'䘀' : [u'f'] ,
u'抃' : [u'p', u'b'] ,
u'蔂' : [u'l'] ,
u'帐' : [u'z'] ,
u'窓' : [u'c'] ,
u'鴒' : [u'l'] ,
u'䶝' : [u'q', u'x'] ,
u'袟' : [u'z'] ,
u'瘠' : [u'j', u'z'] ,
u'㼦' : [u'g'] ,
u'断' : [u'd'] ,
u'萬' : [u'w'] ,
u'儺' : [u'n'] ,
u'綽' : [u'c'] ,
u'鰼' : [u'x'] ,
u'䳇' : [u'w'] ,
u'诉' | |
save : bool
Save the resulting PSF coefficients to a file? (default: True)
Keyword Args
------------
return_results : bool
By default, results are saved in `self._psf_coeff_mod` dictionary.
If return_results=True, results are instead returned as function outputs
and will not be saved to the dictionary attributes.
return_raw : bool
Normally, we return the relation between PSF coefficients as a function
of position. Instead this returns (as function outputs) the raw values
prior to fitting. Final results will not be saved to the dictionary attributes.
"""
return _gen_wfemask_coeff(self, large_grid=large_grid, force=force, save=save, **kwargs)
def gen_wfefield_coeff(self, force=False, save=True, **kwargs):
""" Fit WFE field-dependent coefficients
Find a relationship between field position and PSF coefficients for
non-coronagraphic observations and when `include_si_wfe` is enabled.
Parameters
----------
force : bool
Forces a recalculation of coefficients even if saved file exists.
(default: False)
save : bool
Save the resulting PSF coefficients to a file? (default: True)
Keyword Args
------------
return_results : bool
By default, results are saved in `self._psf_coeff_mod` dictionary.
If return_results=True, results are instead returned as function outputs
and will not be saved to the dictionary attributes.
return_raw : bool
Normally, we return the relation between PSF coefficients as a function
of position. Instead this returns (as function outputs) the raw values
prior to fitting. Final results will not be saved to the dictionary attributes.
"""
return _gen_wfefield_coeff(self, force=force, save=save, **kwargs)
def calc_psf_from_coeff(self, sp=None, return_oversample=True, wfe_drift=None,
coord_vals=None, coord_frame='tel', coron_rescale=False, return_hdul=True,
**kwargs):
""" Create PSF image from polynomial coefficients
Create a PSF image from instrument settings. The image is noiseless and
doesn't take into account any non-linearity or saturation effects, but is
convolved with the instrument throughput. Pixel values are in counts/sec.
The result is effectively an idealized slope image (no background).
Returns a single image or list of images if sp is a list of spectra.
By default, it returns only the oversampled PSF, but setting
return_oversample=False will instead return detector-sampled images.
Parameters
----------
sp : :mod:`pysynphot.spectrum`
If not specified, the default is flat in phot lam
(equal number of photons per spectral bin).
The default is normalized to produce 1 count/sec within that bandpass,
assuming the telescope collecting area and instrument bandpass.
Coronagraphic PSFs will further decrease this due to the smaller pupil
size and coronagraphic spot.
return_oversample : bool
Returns the oversampled version of the PSF instead of detector-sampled PSF.
Default: True.
wfe_drift : float or None
Wavefront error drift amplitude in nm.
coord_vals : tuple or None
Coordinates (in arcsec or pixels) to calculate field-dependent PSF.
If multiple values, then this should be an array ([xvals], [yvals]).
coord_frame : str
Type of input coordinates.
* 'tel': arcsecs V2,V3
* 'sci': pixels, in DMS axes orientation; aperture-dependent
* 'det': pixels, in raw detector read out axes orientation
* 'idl': arcsecs relative to aperture reference location.
return_hdul : bool
Return PSFs in an HDUList rather than set of arrays (default: True).
coron_rescale : bool
Rescale off-axis coronagraphic PSF to better match analytic prediction
when source overlaps coronagraphic occulting mask.
Primarily used for planetary companion PSFs.
"""
res = _calc_psf_from_coeff(self, sp=sp, return_oversample=return_oversample,
coord_vals=coord_vals, coord_frame=coord_frame,
wfe_drift=wfe_drift, return_hdul=return_hdul, **kwargs)
# Ensure correct scaling for off-axis PSFs
if self.is_coron and coron_rescale and (coord_vals is not None):
siaf_ap = kwargs.get('siaf_ap', None)
res = _nrc_coron_rescale(self, res, coord_vals, coord_frame, siaf_ap=siaf_ap, sp=sp)
return res
def calc_psf(self, add_distortion=None, fov_pixels=None, oversample=None,
wfe_drift=None, coord_vals=None, coord_frame='tel', **kwargs):
""" Compute a PSF
Slight modification of inherent WebbPSF `calc_psf` function. If add_distortion, fov_pixels,
and oversample are not specified, then we automatically use the associated attributes.
Also, add ability to directly specify wfe_drift and coordinate offset values in the same
fashion as `calc_psf_from_coeff`.
Notes
-----
Additional PSF computation options (pupil shifts, source positions, jitter, ...)
may be set by configuring the `.options` dictionary attribute of this class.
Parameters
----------
sp : :mod:`pysynphot.spectrum`
Source input spectrum. If not specified, the default is flat in phot lam.
(equal number of photons per spectral bin).
source : synphot.spectrum.SourceSpectrum or dict
TODO: synphot not yet implemented in webbpsf_ext!!
nlambda : int
How many wavelengths to model for broadband?
The default depends on how wide the filter is: (5,3,1) for types (W,M,N) respectively
monochromatic : float, optional
Setting this to a wavelength value (in meters) will compute a monochromatic PSF at that
wavelength, overriding filter and nlambda settings.
fov_arcsec : float
field of view in arcsec. Default=5
fov_pixels : int
field of view in pixels. This is an alternative to fov_arcsec.
outfile : string
Filename to write. If None, then result is returned as an HDUList
oversample, detector_oversample, fft_oversample : int
How much to oversample. Default=4. By default the same factor is used for final output
pixels and intermediate optical planes, but you may optionally use different factors
if so desired.
overwrite : bool
overwrite output FITS file if it already exists?
display : bool
Whether to display the PSF when done or not.
save_intermediates, return_intermediates : bool
Options for saving to disk or returning to the calling function the intermediate optical planes during
the propagation. This is useful if you want to e.g. examine the intensity in the Lyot plane for a
coronagraphic propagation.
normalize : string
Desired normalization for output PSFs. See doc string for OpticalSystem.calc_psf. Default is
to normalize the entrance pupil to have integrated total intensity = 1.
add_distortion : bool
If True, will add 2 new extensions to the PSF HDUlist object. The 2nd extension
will be a distorted version of the over-sampled PSF and the 3rd extension will
be a distorted version of the detector-sampled PSF.
crop_psf : bool
If True, when the PSF is rotated to match the detector's rotation in the focal
plane, the PSF will be cropped so the shape of the distorted PSF will match it's
undistorted counterpart. This will only be used for NIRCam, NIRISS, and FGS PSFs.
Keyword Args
------------
return_hdul : bool
Return PSFs in an HDUList rather than set of arrays (default: True).
return_oversample : bool
Returns the oversampled version of the PSF instead of detector-sampled PSF.
Only valid for `reaturn_hdul=False`, otherwise full HDUList returned. Default: True.
"""
calc_psf_func = super().calc_psf
res = _calc_psf_webbpsf(self, calc_psf_func, add_distortion=add_distortion,
fov_pixels=fov_pixels, oversample=oversample, wfe_drift=wfe_drift,
coord_vals=coord_vals, coord_frame=coord_frame, **kwargs)
return res
def calc_psfs_grid(self, sp=None, wfe_drift=0, osamp=1, npsf_per_full_fov=15,
xsci_vals=None, ysci_vals=None, return_coords=None,
use_coeff=True, **kwargs):
""" PSF grid across an instrumnet FoV
Create a grid of PSFs across instrument aperture FoV. By default,
imaging observations will be for full detector FoV with regularly
spaced grid. Coronagraphic observations will cover nominal
coronagraphic mask region (usually 10s of arcsec) and will have
logarithmically spaced values where appropriate.
Keyword Args
============
sp : :mod:`pysynphot.spectrum`
If not specified, the default is flat in phot lam (equal number of photons
per wavelength bin). The default is normalized to produce 1 count/sec within
that bandpass, assuming the telescope collecting area and instrument bandpass.
Coronagraphic PSFs will further decrease this due to the smaller pupil
size and suppression of coronagraphic mask.
If set, then the resulting PSF image will be scaled to generate the total
observed number of photons from the spectrum (ie., not scaled by unit response).
wfe_drift : float
Desired WFE drift value relative to default OPD.
osamp : int
Sampling of output PSF relative to detector sampling.
npsf_per_full_fov : int
Number of PSFs across one dimension of the instrument's field of
view. If a coronagraphic observation, then this is for the nominal
coronagrahic field of view (20"x20").
xsci_vals: None or ndarray
Option to pass a custom grid values along x-axis in 'sci' coords.
ysci_vals: None or ndarray
Option to pass a custom grid values along y-axis in 'sci' coords.
return_coords : None or str
| |
vusb_device_configure(self, dev_sxp, devid):
"""Configure a virtual root port.
"""
dev_class = sxp.name(dev_sxp)
if dev_class != 'vusb':
return False
dev_config = {}
ports = sxp.child(dev_sxp, 'port')
for port in ports[1:]:
try:
num, bus = port
dev_config['port-%i' % int(num)] = str(bus)
except TypeError:
pass
dev_control = self.getDeviceController(dev_class)
dev_control.reconfigureDevice(devid, dev_config)
return True
def device_configure(self, dev_sxp, devid = None):
"""Configure an existing device.
@param dev_config: device configuration
@type dev_config: SXP object (parsed config)
@param devid: device id
@type devid: int
@return: Returns True if successfully updated device
@rtype: boolean
"""
# convert device sxp to a dict
dev_class = sxp.name(dev_sxp)
dev_config = {}
if dev_class == 'pci':
return self.pci_device_configure(dev_sxp)
if dev_class == 'vscsi':
return self.vscsi_device_configure(dev_sxp)
if dev_class == 'vusb':
return self.vusb_device_configure(dev_sxp, devid)
for opt_val in dev_sxp[1:]:
try:
dev_config[opt_val[0]] = opt_val[1]
except IndexError:
pass
dev_control = self.getDeviceController(dev_class)
if devid is None:
dev = dev_config.get('dev', '')
if not dev:
raise VmError('Block device must have virtual details specified')
if 'ioemu:' in dev:
(_, dev) = dev.split(':', 1)
try:
(dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
except ValueError:
pass
devid = dev_control.convertToDeviceNumber(dev)
dev_info = self._getDeviceInfo_vbd(devid)
if dev_info is None:
raise VmError("Device %s not connected" % devid)
dev_uuid = sxp.child_value(dev_info, 'uuid')
if self.domid is not None:
# use DevController.reconfigureDevice to change device config
dev_control.reconfigureDevice(devid, dev_config)
else:
(_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
if (new_f['device-type'] == 'cdrom' and
sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
new_b['mode'] == 'r' and
sxp.child_value(dev_info, 'mode') == 'r'):
pass
else:
raise VmError('Refusing to reconfigure device %s:%d to %s' %
(dev_class, devid, dev_config))
# update XendConfig with new device info
self.info.device_update(dev_uuid, dev_sxp)
xen.xend.XendDomain.instance().managed_config_save(self)
return True
def waitForDevices(self):
"""Wait for this domain's configured devices to connect.
@raise VmError: if any device fails to initialise.
"""
for devclass in XendDevices.valid_devices():
self.getDeviceController(devclass).waitForDevices()
def hvm_destroyPCIDevice(self, pci_dev):
log.debug("hvm_destroyPCIDevice: %s", pci_dev)
if not self.info.is_hvm():
raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
# Check the co-assignment.
# To pci-detach a device D from domN, we should ensure: for each DD in the
# list of D's co-assignment devices, DD is not assigned (to domN).
#
from xen.xend.server.pciif import PciDevice
try:
pci_device = PciDevice(pci_dev)
except Exception, e:
raise VmError("pci: failed to locate device and "+
"parse its resources - "+str(e))
coassignment_list = pci_device.find_coassigned_devices()
coassignment_list.remove(pci_device.name)
assigned_pci_device_str_list = self._get_assigned_pci_devices()
for pci_str in coassignment_list:
if xoptions.get_pci_dev_assign_strict_check() and \
pci_str in assigned_pci_device_str_list:
raise VmError(("pci: failed to pci-detach %s from domain %s" + \
" because one of its co-assignment device %s is still " + \
" assigned to the domain." \
)% (pci_device.name, self.info['name_label'], pci_str))
bdf_str = pci_dict_to_bdf_str(pci_dev)
log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
if self.domid is not None:
self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
return 0
def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
deviceClass, devid)
if rm_cfg:
# Convert devid to device number. A device number is
# needed to remove its configuration.
dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
# Save current sxprs. A device number and a backend
# path are needed to remove its configuration but sxprs
# do not have those after calling destroyDevice.
sxprs = self.getDeviceSxprs(deviceClass)
rc = None
if self.domid is not None:
#new blktap implementation may need a sysfs write after everything is torn down.
if deviceClass == 'tap2':
dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
backpath = xstransact.Read(frontpath, "backend")
thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
if not force and rm_cfg:
# The backend path, other than the device itself,
# has to be passed because its accompanied frontend
# path may be void until its removal is actually
# issued. It is probable because destroyDevice is
# issued first.
for dev_num, dev_info in sxprs:
dev_num = int(dev_num)
if dev_num == dev:
for x in dev_info:
if x[0] == 'backend':
backend = x[1]
break
break
self._waitForDevice_destroy(deviceClass, devid, backend)
if rm_cfg and deviceClass != "vif2":
if deviceClass == 'vif':
if self.domid is not None:
mac = ''
for dev_num, dev_info in sxprs:
dev_num = int(dev_num)
if dev_num == dev:
for x in dev_info:
if x[0] == 'mac':
mac = x[1]
break
break
dev_info = self._getDeviceInfo_vif(mac)
else:
_, dev_info = sxprs[dev]
else: # 'vbd' or 'tap' or 'tap2'
dev_info = self._getDeviceInfo_vbd(dev)
# To remove the UUID of the device from refs,
# deviceClass must be always 'vbd'.
deviceClass = 'vbd'
if dev_info is None:
raise XendError("Device %s is not defined" % devid)
dev_uuid = sxp.child_value(dev_info, 'uuid')
del self.info['devices'][dev_uuid]
self.info['%s_refs' % deviceClass].remove(dev_uuid)
xen.xend.XendDomain.instance().managed_config_save(self)
return rc
def getDeviceSxprs(self, deviceClass):
if deviceClass == 'pci':
dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
if dev_info is None:
return []
dev_uuid = sxp.child_value(dev_info, 'uuid')
pci_devs = self.info['devices'][dev_uuid][1]['devs']
return pci_devs
if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
return self.getDeviceController(deviceClass).sxprs()
else:
sxprs = []
dev_num = 0
for dev_type, dev_info in self.info.all_devices_sxpr():
if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
(deviceClass != 'vbd' and dev_type != deviceClass):
continue
if deviceClass == 'vscsi':
vscsi_devs = ['devs', []]
for vscsi_dev in sxp.children(dev_info, 'dev'):
vscsi_dev.append(['frontstate', None])
vscsi_devs[1].append(vscsi_dev)
dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
elif deviceClass == 'vbd':
dev = sxp.child_value(dev_info, 'dev')
if 'ioemu:' in dev:
(_, dev) = dev.split(':', 1)
try:
(dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
except ValueError:
dev_name = dev
dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
sxprs.append([dev_num, dev_info])
else:
sxprs.append([dev_num, dev_info])
dev_num += 1
return sxprs
def getBlockDeviceClass(self, devid):
# if the domain is running we can get the device class from xenstore.
# This is more accurate, as blktap1 devices show up as blktap2 devices
# in the config.
if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
# All block devices have a vbd frontend, so we know the frontend path
dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
for devclass in XendDevices.valid_devices():
for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
if frontendPath == devFrontendPath:
return devclass
else: # the domain is not active so we must get the device class
# from the config
# To get a device number from the devid,
# we temporarily use the device controller of VBD.
dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
dev_info = self._getDeviceInfo_vbd(dev)
if dev_info:
return dev_info[0]
def _getDeviceInfo_vif(self, mac):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vif':
continue
if mac == sxp.child_value(dev_info, 'mac'):
return dev_info
def _getDeviceInfo_vbd(self, devid):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
continue
dev = sxp.child_value(dev_info, 'dev')
dev = dev.split(':')[0]
dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
if devid == dev:
return dev_info
def _getDeviceInfo_pci(self, devid):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'pci':
continue
return dev_info
return None
def _getDeviceInfo_vscsi(self, devid):
devid = int(devid)
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vscsi':
continue
devs = sxp.children(dev_info, 'dev')
if devid == int(sxp.child_value(devs[0], 'devid')):
return dev_info
return None
def _getDeviceInfo_vusb(self, devid):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vusb':
continue
return dev_info
return None
def _get_assigned_pci_devices(self, devid = 0):
if self.domid is not None:
return get_assigned_pci_devices(self.domid)
dev_info = self._getDeviceInfo_pci(devid)
if dev_info is None:
return []
dev_uuid = sxp.child_value(dev_info, 'uuid')
pci_conf = self.info['devices'][dev_uuid][1]
return map(pci_dict_to_bdf_str, pci_conf['devs'])
def setMemoryTarget(self, target):
"""Set the memory target of this domain.
@param target: In MiB.
"""
log.debug("Setting memory target of domain %s (%s) to %d MiB.",
self.info['name_label'], str(self.domid), target)
MiB = 1024 * 1024
memory_cur = self.get_memory_dynamic_max() / MiB
if self.domid == 0:
dom0_min_mem = xoptions.get_dom0_min_mem()
if target < memory_cur and dom0_min_mem > target:
raise XendError("memory_dynamic_max too small")
self._safe_set_memory('memory_dynamic_min', target * MiB)
self._safe_set_memory('memory_dynamic_max', target * MiB)
if self.domid >= 0:
if target > memory_cur:
balloon.free((target - memory_cur) * 1024, self)
self.storeVm("memory", target)
self.storeDom("memory/target", target << 10)
xc.domain_set_target_mem(self.domid,
(target * 1024))
xen.xend.XendDomain.instance().managed_config_save(self)
def setMemoryMaximum(self, limit):
"""Set the maximum memory limit of this domain
@param limit: In MiB.
"""
log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
self.info['name_label'], str(self.domid), limit)
maxmem_cur = self.get_memory_static_max()
MiB = 1024 * 1024
self._safe_set_memory('memory_static_max', limit * MiB)
if self.domid >= 0:
maxmem = | |
= locals()
all_params = [
'request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_self_service_verification_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'request' is set
if self.api_client.client_side_validation and ('request' not in local_var_params or # noqa: E501
local_var_params['request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request` when calling `get_self_service_verification_request`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'request' in local_var_params and local_var_params['request'] is not None: # noqa: E501
query_params.append(('request', local_var_params['request'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/self-service/browser/flows/requests/verification', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VerificationRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def initialize_self_service_browser_login_flow(self, **kwargs): # noqa: E501
"""Initialize browser-based login user flow # noqa: E501
This endpoint initializes a browser-based user login flow. Once initialized, the browser will be redirected to `selfservice.flows.login.ui_url` with the request ID set as a query parameter. If a valid user session exists already, the browser will be redirected to `urls.default_redirect_url`. > This endpoint is NOT INTENDED for API clients and only works with browsers (Chrome, Firefox, ...). More information can be found at [ORY Kratos User Login and User Registration Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-login-user-registration). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.initialize_self_service_browser_login_flow(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool refresh: Refresh a login session If set to true, this will refresh an existing login session by asking the user to sign in again. This will reset the authenticated_at time of the session.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.initialize_self_service_browser_login_flow_with_http_info(**kwargs) # noqa: E501
def initialize_self_service_browser_login_flow_with_http_info(self, **kwargs): # noqa: E501
"""Initialize browser-based login user flow # noqa: E501
This endpoint initializes a browser-based user login flow. Once initialized, the browser will be redirected to `selfservice.flows.login.ui_url` with the request ID set as a query parameter. If a valid user session exists already, the browser will be redirected to `urls.default_redirect_url`. > This endpoint is NOT INTENDED for API clients and only works with browsers (Chrome, Firefox, ...). More information can be found at [ORY Kratos User Login and User Registration Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-login-user-registration). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.initialize_self_service_browser_login_flow_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool refresh: Refresh a login session If set to true, this will refresh an existing login session by asking the user to sign in again. This will reset the authenticated_at time of the session.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'refresh'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method initialize_self_service_browser_login_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'refresh' in local_var_params and local_var_params['refresh'] is not None: # noqa: E501
query_params.append(('refresh', local_var_params['refresh'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/self-service/browser/flows/login', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def initialize_self_service_browser_logout_flow(self, **kwargs): # noqa: E501
"""Initialize Browser-Based Logout User Flow # noqa: E501
This endpoint initializes a logout flow. > This endpoint is NOT INTENDED for API clients and only works with browsers (Chrome, Firefox, ...). On successful logout, the browser will be redirected (HTTP 302 Found) to `urls.default_return_to`. More information can be found at [ORY Kratos User Logout Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-logout). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.initialize_self_service_browser_logout_flow(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.initialize_self_service_browser_logout_flow_with_http_info(**kwargs) # noqa: E501
def initialize_self_service_browser_logout_flow_with_http_info(self, **kwargs): # noqa: E501
"""Initialize Browser-Based Logout User Flow # noqa: E501
This endpoint initializes a logout flow. > This endpoint is NOT INTENDED for API clients and only works with browsers (Chrome, Firefox, ...). On successful logout, the browser will be redirected (HTTP 302 Found) to `urls.default_return_to`. More information can be found at [ORY Kratos User Logout Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-logout). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.initialize_self_service_browser_logout_flow_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method initialize_self_service_browser_logout_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/self-service/browser/flows/logout', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def initialize_self_service_browser_registration_flow(self, **kwargs): # noqa: E501
"""Initialize browser-based registration user flow # noqa: E501
This endpoint initializes a browser-based user registration flow. Once initialized, the browser will be redirected to `selfservice.flows.registration.ui_url` with the request ID set as a query parameter. If a valid user session exists already, the browser will be redirected to `urls.default_redirect_url`. > This endpoint is NOT INTENDED for API clients and only works with browsers (Chrome, Firefox, ...). More information can be found at [ORY Kratos User Login and User Registration Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-login-user-registration). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread | |
Affine or tuple.")
# Enable shortcut to create CRS from an EPSG ID.
if isinstance(crs, int):
crs = CRS.from_epsg(crs)
# If a 2-D ('single-band') array is passed in, give it a band dimension.
if len(data.shape) < 3:
data = np.expand_dims(data, 0)
# Preserves input mask
if isinstance(data, np.ma.masked_array):
if nodata is None:
if np.sum(data.mask) > 0:
raise ValueError("For masked arrays, a nodata value must be set")
else:
data.data[data.mask] = nodata
# Open handle to new memory file
mfh = MemoryFile()
# Create the memory file
with rio.open(
mfh,
"w",
height=data.shape[1],
width=data.shape[2],
count=data.shape[0],
dtype=data.dtype,
crs=crs,
transform=transform,
nodata=nodata,
driver="GTiff",
) as ds:
ds.write(data)
# Initialise a Raster object created with MemoryFile.
# (i.e., __init__ will now be run.)
return cls(mfh)
def __repr__(self) -> str:
"""Convert object to formal string representation."""
L = [getattr(self, item) for item in self._saved_attrs]
s: str = "{}.{}({})".format(type(self).__module__, type(self).__qualname__, ", ".join(map(str, L)))
return s
def __str__(self) -> str:
"""Provide string of information about Raster."""
return self.info()
def __eq__(self, other: object) -> bool:
"""Check if a Raster's data and georeferencing is equal to another."""
if not isinstance(other, type(self)): # TODO: Possibly add equals to SatelliteImage?
return NotImplemented
return all(
[
np.array_equal(self.data, other.data, equal_nan=True),
self.transform == other.transform,
self.crs == other.crs,
self.nodata == other.nodata,
]
)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __add__(self: RasterType, other: RasterType | np.ndarray | Number) -> RasterType:
"""
Sum up the data of two rasters or a raster and a numpy array, or a raster and single number.
If other is a Raster, it must have the same data.shape, transform and crs as self.
If other is a np.ndarray, it must have the same shape.
Otherwise, other must be a single number.
"""
# Check that other is of correct type
if not isinstance(other, (Raster, np.ndarray, Number)):
raise ValueError("Addition possible only with a Raster, np.ndarray or single number.")
# Case 1 - other is a Raster
if isinstance(other, Raster):
# Check that both data are loaded
if not (self.is_loaded & other.is_loaded):
raise ValueError("Raster's data must be loaded with self.load().")
# Check that both rasters have the same shape and georeferences
if (self.data.shape == other.data.shape) & (self.transform == other.transform) & (self.crs == other.crs):
pass
else:
raise ValueError("Both rasters must have the same shape, transform and CRS.")
other_data = other.data
# Case 2 - other is a numpy array
elif isinstance(other, np.ndarray):
# Check that both array have the same shape
if self.data.shape == other.shape:
pass
else:
raise ValueError("Both rasters must have the same shape.")
other_data = other
# Case 3 - other is a single number
else:
other_data = other
# Calculate the sum of arrays
data = self.data + other_data
# Save as a new Raster
out_rst = self.from_array(data, self.transform, self.crs, nodata=self.nodata)
return out_rst
def __neg__(self: RasterType) -> RasterType:
"""Return self with self.data set to -self.data"""
out_rst = self.copy()
out_rst.data = -out_rst.data
return out_rst
def __sub__(self, other: Raster | np.ndarray | Number) -> Raster:
"""
Subtract two rasters. Both rasters must have the same data.shape, transform and crs.
"""
if isinstance(other, Raster):
# Need to convert both rasters to a common type before doing the negation
ctype: np.dtype = np.find_common_type([*self.dtypes, *other.dtypes], [])
other = other.astype(ctype) # type: ignore
return self + -other # type: ignore
@overload
def astype(self, dtype: np.dtype | type | str, inplace: Literal[False]) -> Raster:
...
@overload
def astype(self, dtype: np.dtype | type | str, inplace: Literal[True]) -> None:
...
def astype(self, dtype: np.dtype | type | str, inplace: bool = False) -> Raster | None:
"""
Converts the data type of a Raster object.
:param dtype: Any numpy dtype or string accepted by numpy.astype
:param inplace: Set to True to modify the raster in place.
:returns: the output Raster with dtype changed.
"""
# Check that dtype is supported by rasterio
if not rio.dtypes.check_dtype(dtype):
raise TypeError(f"{dtype} is not supported by rasterio")
# Check that data type change will not result in a loss of information
if not rio.dtypes.can_cast_dtype(self.data, dtype):
warnings.warn(
"dtype conversion will result in a loss of information. "
f"{rio.dtypes.get_minimum_dtype(self.data)} is the minimum type to represent the data."
)
out_data = self.data.astype(dtype)
if inplace:
meta = self.ds.meta
meta.update({"dtype": dtype})
self._update(imgdata=out_data, metadata=meta)
return None
else:
return self.from_array(out_data, self.transform, self.crs, nodata=self.nodata)
def _get_rio_attrs(self) -> list[str]:
"""Get the attributes that have the same name in rio.DatasetReader and Raster."""
rio_attrs: list[str] = []
for attr in Raster.__annotations__.keys():
if "__" in attr or attr not in dir(self.ds):
continue
rio_attrs.append(attr)
return rio_attrs
def _read_attrs(self, attrs: list[str] | str | None = None) -> None:
# Copy most used attributes/methods
rio_attrs = self._get_rio_attrs()
for attr in self.__annotations__.keys():
if "__" in attr or attr not in dir(self.ds):
continue
rio_attrs.append(attr)
if attrs is None:
self._saved_attrs = rio_attrs
attrs = rio_attrs
else:
if isinstance(attrs, str):
attrs = [attrs]
for attr in rio_attrs:
if attr not in attrs:
attrs.append(attr)
self._saved_attrs = attrs
for attr in attrs:
setattr(self, attr, getattr(self.ds, attr))
@property
def is_modified(self) -> bool:
"""Check whether file has been modified since it was created/opened.
:returns: True if Raster has been modified.
"""
if not self._is_modified:
new_hash = hash((self._data.tobytes(), self.transform, self.crs, self.nodata))
self._is_modified = not (self._disk_hash == new_hash)
return self._is_modified
@property
def data(self) -> np.ndarray | np.ma.masked_array:
"""
Get data.
:returns: data array.
"""
return self._data
@data.setter
def data(self, new_data: np.ndarray | np.ma.masked_array) -> None:
"""
Set the contents of .data.
new_data must have the same shape as existing data! (bands dimension included)
:param new_data: New data to assign to this instance of Raster
"""
# Check that new_data is a Numpy array
if not isinstance(new_data, np.ndarray):
raise ValueError("New data must be a numpy array.")
# Check that new_data has correct shape
if self.is_loaded:
orig_shape = self._data.shape
else:
orig_shape = (self.count, self.height, self.width)
if new_data.shape != orig_shape:
raise ValueError(f"New data must be of the same shape as existing data: {orig_shape}.")
# Check that new_data has the right type
if new_data.dtype != self._data.dtype:
raise ValueError(
"New data must be of the same type as existing\
data: {}".format(
self.data.dtype
)
)
self._data = new_data
def _update(
self,
imgdata: np.ndarray | None = None,
metadata: dict[str, Any] | None = None,
vrt_to_driver: str = "GTiff",
) -> None:
"""
Update the object with a new image or metadata.
:param imgdata: image data to update with.
:param metadata: metadata to update with.
:param vrt_to_driver: name of driver to coerce a VRT to. This is required
because rasterio does not support writing to to a VRTSourcedRasterBand.
"""
memfile = MemoryFile()
if imgdata is None:
imgdata = self.data
if metadata is None:
metadata = self.ds.meta
if metadata["driver"] == "VRT":
metadata["driver"] = vrt_to_driver
with memfile.open(**metadata) as ds:
ds.write(imgdata)
self.ds = memfile.open()
self._read_attrs()
if self.is_loaded:
self.load()
self._is_modified = True
def info(self, stats: bool = False) -> str:
"""
Returns string of information about the raster (filename, coordinate system, number of columns/rows, etc.).
:param stats: Add statistics for each band of the dataset (max, min, median, mean, std. dev.). Default is to
not calculate statistics.
:returns: text information about Raster attributes.
"""
as_str = [
f"Driver: {self.driver} \n",
f"Opened from file: {self.filename} \n",
f"Filename: {self.name} \n",
f"Raster modified since disk load? {self._is_modified} \n",
f"Size: {self.width}, {self.height}\n",
f"Number of bands: {self.count:d}\n",
f"Data types: {self.dtypes}\n",
f"Coordinate System: EPSG:{self.crs.to_epsg()}\n",
f"NoData Value: {self.nodata}\n",
"Pixel Size: {}, {}\n".format(*self.res),
"Upper Left Corner: {}, {}\n".format(*self.bounds[:2]),
"Lower Right Corner: {}, {}\n".format(*self.bounds[2:]),
]
if stats:
if self.data is not None:
if self.nbands == 1:
as_str.append(f"[MAXIMUM]: {np.nanmax(self.data):.2f}\n")
as_str.append(f"[MINIMUM]: {np.nanmin(self.data):.2f}\n")
as_str.append(f"[MEDIAN]: {np.ma.median(self.data):.2f}\n")
as_str.append(f"[MEAN]: {np.nanmean(self.data):.2f}\n")
as_str.append(f"[STD DEV]: {np.nanstd(self.data):.2f}\n")
else:
for b in range(self.nbands):
# try to keep with rasterio convention.
as_str.append(f"Band {b + 1}:")
as_str.append(f"[MAXIMUM]: {np.nanmax(self.data[b, :, :]):.2f}\n")
as_str.append(f"[MINIMUM]: {np.nanmin(self.data[b, :, :]):.2f}\n")
as_str.append(f"[MEDIAN]: {np.ma.median(self.data[b, :, :]):.2f}\n")
as_str.append(f"[MEAN]: {np.nanmean(self.data[b, :, :]):.2f}\n")
as_str.append(f"[STD DEV]: {np.nanstd(self.data[b, :, :]):.2f}\n")
return "".join(as_str)
def copy(self: RasterType, new_array: np.ndarray | None = None) -> RasterType:
"""
Copy the Raster object in memory
:param new_array: New array | |
<reponame>Gametz/Helper_Bot<filename>bot_4u/hackgame.py
import json
import random
def hackmenu(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
return "📋 Твой профиль:" \
"\n" \
"\n🔎 Уровень: " + str(ff["hlevel"]) + \
"\n💊 ХП: " + str(ff["hhp"]) + " (+" + str(ff["php"]) + ")" + \
"\n🔫 Урон: " + str(ff["hdamage"]) + " (+" + str(ff["pdamage"]) + ")" + \
"\n🕶 Защита: " + str(ff["hdef"]) + " (+" + str(ff["pdef"]) + ")" + \
"\n" \
"\n🔑 Имущество" \
"\n 💻 Комп: " + ff["hcomp"] + \
"\n 🛡 VPN: " + ff["hvpn"] + \
"\n 🚪 Убежище: " + ff["hsheltr"] \
def darkshop():
return "🎴 DarkShop 🎴" \
"\n" \
"\n 💻 Компы - атака" \
"\n 🛡 VPN - защита" \
"\n 🚪 Убежища - хп" \
"\n" \
"\n📌 Для просмотра категории используйте ее название"
def comps():
return "💻 Компы 💻" \
"\nУровень | Название | Баффы | Цена" \
"\n" \
"\n 💎 1.  2 | Калькулятор | +1 к атаке | 20₿" \
"\n 💎 2.  5 | 4 ядра, 4 гига | +3 к атаке | 100₿" \
"\n 💎 3.  15 | Офисный | +5 к атаке | 300₿" \
"\n 💎 4.  30 | Игровой | +10 к атаке | 500₿" \
"\n 💎 5.  50 | Квантовый | +20 к атаке | 1500₿" \
"\n" \
"\n📌 Для покупки используйте 'ккомп [номер]'"
def vpns():
return "🛡 VPN 🛡" \
"\nУровень | Название | Баффы | Цена" \
"\n" \
"\n 💎 1.  3 | Wi-Fi соседа | +1 к защите | 20₿" \
"\n 💎 2.  7 | С форума | +4 к защите | 170₿" \
"\n 💎 3.  18 | Приватный | +8 к защите | 350₿" \
"\n 💎 4.  35 | Игровой | +15 к защите | 550₿" \
"\n 💎 5.  55 | Собственный | +20 к защите | 1700₿" \
"\n" \
"\n📌 Для покупки используйте 'квпн [номер]'"
def shltrs():
return "🚪 Убежища 🚪" \
"\nУровень | Название | Баффы | Цена" \
"\n" \
"\n 💎 1.  5 | Подвал дома | +2 к хп | 35₿" \
"\n 💎 2.  12 | Гараж деда | +5 к хп | 200₿" \
"\n 💎 3.  25 | Съемная квартира | +9 к хп | 400₿" \
"\n 💎 4.  50 | Бункер в горах | +16 к хп | 1800₿" \
"\n 💎 5.  60 | Дом Путина | +21 к хп | 2000₿" \
"\n" \
"\n📌 Для покупки используйте 'кубежище [номер]'"
def bcomp(id, n):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["hcomp"] == "":
if n == '1' and ff["btc"] >= 20 and ff["hlevel"] >= 2:
ff["btc"] -= 20
ff["hcomp"] = "Калькулятор"
ff["pdamage"] += 1
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hcomp"]) + " за 20₿\nВаш баланс: \n" + bal(id)
elif n == '2' and ff["btc"] >= 100 and ff["hlevel"] >= 5:
ff["btc"] -= 100
ff["hcomp"] = "4 ядра, 4 гига"
ff["pdamage"] += 3
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hcomp"]) + " за 100₿\nВаш баланс: \n" + bal(id)
elif n == '3' and ff["btc"] >= 300 and ff["hlevel"] >= 15:
ff["btc"] -= 300
ff["hcomp"] = "Офисный"
ff["pdamage"] += 5
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hcomp"]) + " за 300₿\nВаш баланс: \n" + bal(id)
elif n == '4' and ff["btc"] >= 500 and ff["hlevel"] >= 30:
ff["btc"] -= 500
ff["hcomp"] = "Игровой"
ff["pdamage"] += 10
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hcomp"]) + " за 500₿\nВаш баланс: \n" + bal(id)
elif n == '5' and ff["btc"] >= 1500 and ff["hlevel"] >= 50:
ff["btc"] -= 1500
ff["hcomp"] = "Квантовый"
ff["pdamage"] += 20
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hcomp"]) + " за 1500₿\nВаш баланс: \n" + bal(id)
else:
return "У вас не хватает денег/опыта или вы неправильно используете команду!\nПример: ккомп 1"
else:
return "У вас уже есть комп или вы неправильно используете команду!\nПример: ккомп 1\nЧтобы продать его, используйте 'пкомп'"
def scomps(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["hcomp"] != "":
if ff["hcomp"] == "Калькулятор":
temp = ff["hcomp"]
ff["hcomp"] = ""
ff["btc"] += 20
ff["pdamage"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 20₿\n" + bal(id)
elif ff["hcomp"] == "4 ядра, 4 гига":
temp = ff["hcomp"]
ff["hcomp"] = ""
ff["btc"] += 100
ff["pdamage"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 100₿\n" + bal(id)
elif ff["hcomp"] == "Офисный":
temp = ff["hcomp"]
ff["hcomp"] = ""
ff["btc"] += 300
ff["pdamage"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 300₿\n" + bal(id)
elif ff["hcomp"] == "Игровой":
temp = ff["hcomp"]
ff["hcomp"] = ""
ff["btc"] += 500
ff["pdamage"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 500₿\n" + bal(id)
elif ff["hcomp"] == "Квантовый":
temp = ff["hcomp"]
ff["hcomp"] = ""
ff["btc"] += 1500
ff["pdamage"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 1500₿\n" + bal(id)
else:
return 'У вас нет компа!'
def bvpn(id, n):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["hvpn"] == "":
if n == '1' and ff["btc"] >= 20 and ff["hlevel"] >= 3:
ff["btc"] -= 20
ff["hvpn"] = "Wi-Fi соседа"
ff["pdef"] += 1
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hvpn"]) + " за 20₿\nВаш баланс: \n" + bal(id)
elif n == '2' and ff["btc"] >= 170 and ff["hlevel"] >= 7:
ff["btc"] -= 100
ff["hvpn"] = "С форума"
ff["pdef"] += 4
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hvpn"]) + " за 100₿\nВаш баланс: \n" + bal(id)
elif n == '3' and ff["btc"] >= 350 and ff["hlevel"] >= 18:
ff["btc"] -= 300
ff["hvpn"] = "Приватный"
ff["pdef"] += 8
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hvpn"]) + " за 300₿\nВаш баланс: \n" + bal(id)
elif n == '4' and ff["btc"] >= 550 and ff["hlevel"] >= 35:
ff["btc"] -= 500
ff["hvpn"] = "Игровой"
ff["pdef"] += 15
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hvpn"]) + " за 500₿\nВаш баланс: \n" + bal(id)
elif n == '5' and ff["btc"] >= 1700 and ff["hlevel"] >= 55:
ff["btc"] -= 1500
ff["hvpn"] = "Собственный"
ff["pdef"] += 20
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["hvpn"]) + " за 1500₿\nВаш баланс: \n" + bal(id)
else:
return "У вас не хватает денег/опыта или вы неправильно используете команду!\nПример: квпн 1"
else:
return "У вас уже есть VPN или вы неправильно используете команду!\nПример: квпн 1\nЧтобы продать его, используйте 'пвпн'"
def svpn(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["hvpn"] != "":
if ff["hvpn"] == "Wi-Fi соседа":
temp = ff["hvpn"]
ff["hvpn"] = ""
ff["btc"] += 20
ff["pdef"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 20₿\n" + bal(id)
elif ff["hvpn"] == "С форума":
temp = ff["hvpn"]
ff["hvpn"] = ""
ff["btc"] += 170
ff["pdef"] = 0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали " + temp + " за 170₿\n" + bal(id)
| |
0x38,
],
"gEfiPeiGraphicsPpiGuid": [
0x6ECD1463,
0x4A4A,
0x461B,
0xAF,
0x5F,
0x5A,
0x33,
0xE3,
0xB2,
0x16,
0x2B,
],
"gEfiPeiMpServicesPpiGuid": [
0xEE16160A,
0xE8BE,
0x47A6,
0x82,
0xA,
0xC6,
0x90,
0xD,
0xB0,
0x25,
0xA,
],
"gEfiPeiCapsulePpiGuid": [
0x3ACF33EE,
0xD892,
0x40F4,
0xA2,
0xFC,
0x38,
0x54,
0xD2,
0xE1,
0x32,
0x3D,
],
"gPeiCapsulePpiGuid": [
0x3ACF33EE,
0xD892,
0x40F4,
0xA2,
0xFC,
0x38,
0x54,
0xD2,
0xE1,
0x32,
0x3D,
],
"gEfiPeiReset2PpiGuid": [
0x6CC45765,
0xCCE4,
0x42FD,
0xBC,
0x56,
0x1,
0x1A,
0xAA,
0xC6,
0xC9,
0xA8,
],
"gEfiPeiVirtualBlockIo2PpiGuid": [
0x26CC0FAD,
0xBEB3,
0x478A,
0x91,
0xB2,
0xC,
0x18,
0x8F,
0x72,
0x61,
0x98,
],
"gEfiSecPlatformInformation2PpiGuid": [
0x9E9F374B,
0x8F16,
0x4230,
0x98,
0x24,
0x58,
0x46,
0xEE,
0x76,
0x6A,
0x97,
],
"gEfiSecHobDataPpiGuid": [
0x3EBDAF20,
0x6667,
0x40D8,
0xB4,
0xEE,
0xF5,
0x99,
0x9A,
0xC1,
0xB7,
0x1F,
],
"gEfiPeiMmAccessPpiGuid": [
0x268F33A9,
0xCCCD,
0x48BE,
0x88,
0x17,
0x86,
0x5,
0x3A,
0xC3,
0x2E,
0xD6,
],
"gEfiPeiMmControlPpiGuid": [
0x61C68702,
0x4D7E,
0x4F43,
0x8D,
0xEF,
0xA7,
0x43,
0x5,
0xCE,
0x74,
0xC5,
],
"gEfiPeiCoreFvLocationPpiGuid": [
0x52888EAE,
0x5B10,
0x47D0,
0xA8,
0x7F,
0xB8,
0x22,
0xAB,
0xA0,
0xCA,
0xF4,
],
"gPcdProtocolGuid": [
0x11B34006,
0xD85B,
0x4D0A,
0xA2,
0x90,
0xD5,
0xA5,
0x71,
0x31,
0x0E,
0xF7,
],
"gGetPcdInfoProtocolGuid": [
0x5BE40F57,
0xFA68,
0x4610,
0xBB,
0xBF,
0xE9,
0xC5,
0xFC,
0xDA,
0xD3,
0x65,
],
"gEfiBdsArchProtocolGuid": [
0x665E3FF6,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"gEfiCpuArchProtocolGuid": [
0x26BACCB1,
0x6F42,
0x11D4,
0xBC,
0xE7,
0x00,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"gEfiMetronomeArchProtocolGuid": [
0x26BACCB2,
0x6F42,
0x11D4,
0xBC,
0xE7,
0x00,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"gEfiMonotonicCounterArchProtocolGuid": [
0x1DA97072,
0xBDDC,
0x4B30,
0x99,
0xF1,
0x72,
0xA0,
0xB5,
0x6F,
0xFF,
0x2A,
],
"gEfiRealTimeClockArchProtocolGuid": [
0x27CFAC87,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"gEfiResetArchProtocolGuid": [
0x27CFAC88,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"gEfiRuntimeArchProtocolGuid": [
0xB7DFB4E1,
0x052F,
0x449F,
0x87,
0xBE,
0x98,
0x18,
0xFC,
0x91,
0xB7,
0x33,
],
"gEfiSecurityArchProtocolGuid": [
0xA46423E3,
0x4617,
0x49F1,
0xB9,
0xFF,
0xD1,
0xBF,
0xA9,
0x11,
0x58,
0x39,
],
"gEfiSecurityPolicyProtocolGuid": [
0x78E4D245,
0xCD4D,
0x4A05,
0xA2,
0xBA,
0x47,
0x43,
0xE8,
0x6C,
0xFC,
0xAB,
],
"gEfiTimerArchProtocolGuid": [
0x26BACCB3,
0x6F42,
0x11D4,
0xBC,
0xE7,
0x00,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"gEfiVariableWriteArchProtocolGuid": [
0x6441F818,
0x6362,
0x4E44,
0xB5,
0x70,
0x7D,
0xBA,
0x31,
0xDD,
0x24,
0x53,
],
"gEfiVariableArchProtocolGuid": [
0x1E5668E2,
0x8481,
0x11D4,
0xBC,
0xF1,
0x00,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"gEfiWatchdogTimerArchProtocolGuid": [
0x665E3FF5,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"gEfiStatusCodeRuntimeProtocolGuid": [
0xD2B2B828,
0x0826,
0x48A7,
0xB3,
0xDF,
0x98,
0x3C,
0x00,
0x60,
0x24,
0xF0,
],
"gEfiSmbusHcProtocolGuid": [
0xE49D33ED,
0x513D,
0x4634,
0xB6,
0x98,
0x6F,
0x55,
0xAA,
0x75,
0x1C,
0x1B,
],
"gEfiFirmwareVolume2ProtocolGuid": [
0x220E73B6,
0x6BDB,
0x4413,
0x84,
0x5,
0xB9,
0x74,
0xB1,
0x8,
0x61,
0x9A,
],
"gEfiFirmwareVolumeBlockProtocolGuid": [
0x8F644FA9,
0xE850,
0x4DB1,
0x9C,
0xE2,
0xB,
0x44,
0x69,
0x8E,
0x8D,
0xA4,
],
"gEfiCapsuleArchProtocolGuid": [
0x5053697E,
0x2CBC,
0x4819,
0x90,
0xD9,
0x05,
0x80,
0xDE,
0xEE,
0x57,
0x54,
],
"gEfiMpServiceProtocolGuid": [
0x3FDDA605,
0xA76E,
0x4F46,
0xAD,
0x29,
0x12,
0xF4,
0x53,
0x1B,
0x3D,
0x08,
],
"gEfiPciHostBridgeResourceAllocationProtocolGuid": [
0xCF8034BE,
0x6768,
0x4D8B,
0xB7,
0x39,
0x7C,
0xCE,
0x68,
0x3A,
0x9F,
0xBE,
],
"gEfiPciPlatformProtocolGuid": [
0x07D75280,
0x27D4,
0x4D69,
0x90,
0xD0,
0x56,
0x43,
0xE2,
0x38,
0xB3,
0x41,
],
"gEfiPciOverrideProtocolGuid": [
0xB5B35764,
0x460C,
0x4A06,
0x99,
0xFC,
0x77,
0xA1,
0x7C,
0x1B,
0x5C,
0xEB,
],
"gEfiPciEnumerationCompleteProtocolGuid": [
0x30CFE3E7,
0x3DE1,
0x4586,
0xBE,
0x20,
0xDE,
0xAB,
0xA1,
0xB3,
0xB7,
0x93,
],
"gEfiIncompatiblePciDeviceSupportProtocolGuid": [
0xEB23F55A,
0x7863,
0x4AC2,
0x8D,
0x3D,
0x95,
0x65,
0x35,
0xDE,
0x03,
0x75,
],
"gEfiPciHotPlugInitProtocolGuid": [
0xAA0E8BC1,
0xDABC,
0x46B0,
0xA8,
0x44,
0x37,
0xB8,
0x16,
0x9B,
0x2B,
0xEA,
],
"gEfiPciHotPlugRequestProtocolGuid": [
0x19CB87AB,
0x2CB9,
0x4665,
0x83,
0x60,
0xDD,
0xCF,
0x60,
0x54,
0xF7,
0x9D,
],
"gEfiIdeControllerInitProtocolGuid": [
0xA1E37052,
0x80D9,
0x4E65,
0xA3,
0x17,
0x3E,
0x9A,
0x55,
0xC4,
0x3E,
0xC9,
],
"gEfiDiskInfoProtocolGuid": [
0xD432A67F,
0x14DC,
0x484B,
0xB3,
0xBB,
0x3F,
0x02,
0x91,
0x84,
0x93,
0x27,
],
"gEfiSmbiosProtocolGuid": [
0x3583FF6,
0xCB36,
0x4940,
0x94,
0x7E,
0xB9,
0xB3,
0x9F,
0x4A,
0xFA,
0xF7,
],
"gEfiS3SaveStateProtocolGuid": [
0xE857CAF6,
0xC046,
0x45DC,
0xBE,
0x3F,
0xEE,
0x7,
0x65,
0xFB,
0xA8,
0x87,
],
"gEfiS3SmmSaveStateProtocolGuid": [
0x320AFE62,
0xE593,
0x49CB,
0xA9,
0xF1,
0xD4,
0xC2,
0xF4,
0xAF,
0x1,
0x4C,
],
"gEfiRscHandlerProtocolGuid": [
0x86212936,
0xE76,
0x41C8,
0xA0,
0x3A,
0x2A,
0xF2,
0xFC,
0x1C,
0x39,
0xE2,
],
"gEfiSmmRscHandlerProtocolGuid": [
0x2FF29FA7,
0x5E80,
0x4ED9,
0xB3,
0x80,
0x1,
0x7D,
0x3C,
0x55,
0x4F,
0xF4,
],
"gEfiAcpiSdtProtocolGuid": [
0xEB97088E,
0xCFDF,
0x49C6,
0xBE,
0x4B,
0xD9,
0x6,
0xA5,
0xB2,
0xE,
0x86,
],
"gEfiSioProtocolGuid": [
0x215FDD18,
0xBD50,
0x4FEB,
0x89,
0xB,
0x58,
0xCA,
0xB,
0x47,
0x39,
0xE9,
],
"gEfiSmmCpuIo2ProtocolGuid": [
0x3242A9D8,
0xCE70,
0x4AA0,
0x95,
0x5D,
0x5E,
0x7B,
0x14,
0x0D,
0xE4,
0xD2,
],
"gEfiSmmBase2ProtocolGuid": [
0xF4CCBFB7,
0xF6E0,
0x47FD,
0x9D,
0xD4,
0x10,
0xA8,
0xF1,
0x50,
0xC1,
0x91,
],
"gEfiSmmAccess2ProtocolGuid": [
0xC2702B74,
0x800C,
0x4131,
0x87,
0x46,
0x8F,
0xB5,
0xB8,
0x9C,
0xE4,
0xAC,
],
"gEfiSmmControl2ProtocolGuid": [
0x843DC720,
0xAB1E,
0x42CB,
0x93,
0x57,
0x8A,
0x0,
0x78,
0xF3,
0x56,
0x1B,
],
"gEfiSmmConfigurationProtocolGuid": [
0x26EEB3DE,
0xB689,
0x492E,
0x80,
0xF0,
0xBE,
0x8B,
0xD7,
0xDA,
0x4B,
0xA7,
],
"gEfiSmmReadyToLockProtocolGuid": [
0x47B7FA8C,
0xF4BD,
0x4AF6,
0x82,
0x00,
0x33,
0x30,
0x86,
0xF0,
0xD2,
0xC8,
],
"gEfiDxeSmmReadyToLockProtocolGuid": [
0x60FF8964,
0xE906,
0x41D0,
0xAF,
0xED,
0xF2,
0x41,
0xE9,
0x74,
0xE0,
0x8E,
],
"gEfiSmmCommunicationProtocolGuid": [
0xC68ED8E2,
0x9DC6,
0x4CBD,
0x9D,
0x94,
0xDB,
0x65,
0xAC,
0xC5,
0xC3,
0x32,
],
"gEfiSmmStatusCodeProtocolGuid": [
0x6AFD2B77,
0x98C1,
0x4ACD,
0xA6,
0xF9,
0x8A,
0x94,
0x39,
0xDE,
0xF,
0xB1,
],
"gEfiSmmCpuProtocolGuid": [
0xEB346B97,
0x975F,
0x4A9F,
0x8B,
0x22,
0xF8,
0xE9,
0x2B,
0xB3,
0xD5,
0x69,
],
"gEfiSmmPciRootBridgeIoProtocolGuid": [
0x8BC1714D,
0xFFCB,
0x41C3,
0x89,
0xDC,
0x6C,
0x74,
0xD0,
0x6D,
0x98,
0xEA,
],
"gEfiSmmSwDispatch2ProtocolGuid": [
0x18A3C6DC,
0x5EEA,
0x48C8,
0xA1,
0xC1,
0xB5,
0x33,
0x89,
0xF9,
0x89,
0x99,
],
"gEfiSmmSxDispatch2ProtocolGuid": [
0x456D2859,
0xA84B,
0x4E47,
0xA2,
0xEE,
0x32,
0x76,
0xD8,
0x86,
0x99,
0x7D,
],
"gEfiSmmPeriodicTimerDispatch2ProtocolGuid": [
0x4CEC368E,
0x8E8E,
0x4D71,
0x8B,
0xE1,
0x95,
0x8C,
0x45,
0xFC,
0x8A,
0x53,
],
"gEfiSmmUsbDispatch2ProtocolGuid": [
0xEE9B8D90,
0xC5A6,
0x40A2,
0xBD,
0xE2,
0x52,
0x55,
0x8D,
0x33,
0xCC,
0xA1,
],
"gEfiSmmGpiDispatch2ProtocolGuid": [
0x25566B03,
0xB577,
0x4CBF,
0x95,
0x8C,
0xED,
0x66,
0x3E,
0xA2,
0x43,
0x80,
],
"gEfiSmmStandbyButtonDispatch2ProtocolGuid": [
0x7300C4A1,
0x43F2,
0x4017,
0xA5,
0x1B,
0xC8,
0x1A,
0x7F,
0x40,
0x58,
0x5B,
],
"gEfiSmmPowerButtonDispatch2ProtocolGuid": [
0x1B1183FA,
0x1823,
0x46A7,
0x88,
0x72,
0x9C,
0x57,
0x87,
0x55,
0x40,
0x9D,
],
"gEfiSmmIoTrapDispatch2ProtocolGuid": [
0x58DC368D,
0x7BFA,
0x4E77,
0xAB,
0xBC,
0xE,
0x29,
0x41,
0x8D,
0xF9,
0x30,
],
"gEfiPcdProtocolGuid": [
0x13A3F0F6,
0x264A,
0x3EF0,
0xF2,
0xE0,
0xDE,
0xC5,
0x12,
0x34,
0x2F,
0x34,
],
"gEfiFirmwareVolumeBlock2ProtocolGuid": [
0x8F644FA9,
0xE850,
0x4DB1,
0x9C,
0xE2,
0xB,
0x44,
0x69,
0x8E,
0x8D,
0xA4,
],
"gEfiCpuIo2ProtocolGuid": [
0xAD61F191,
0xAE5F,
0x4C0E,
0xB9,
0xFA,
0xE8,
0x69,
0xD2,
0x88,
0xC6,
0x4F,
],
"gEfiLegacyRegion2ProtocolGuid": [
0x70101EAF,
0x85,
0x440C,
0xB3,
0x56,
0x8E,
0xE3,
0x6F,
0xEF,
0x24,
0xF0,
],
"gEfiSecurity2ArchProtocolGuid": [
0x94AB2F58,
0x1438,
0x4EF1,
0x91,
0x52,
0x18,
0x94,
0x1A,
0x3A,
0x0E,
0x68,
],
"gEfiSmmEndOfDxeProtocolGuid": [
0x24E70042,
0xD5C5,
0x4260,
0x8C,
0x39,
0xA,
0xD3,
0xAA,
0x32,
0xE9,
0x3D,
],
"gEfiIsaHcProtocolGuid": [
0xBCDAF080,
0x1BDE,
0x4E22,
0xAE,
0x6A,
0x43,
0x54,
0x1E,
0x12,
0x8E,
0xC4,
],
"gEfiIsaHcServiceBindingProtocolGuid": [
0xFAD7933A,
0x6C21,
0x4234,
0xA4,
0x34,
0x0A,
0x8A,
0x0D,
0x2B,
0x07,
0x81,
],
"gEfiSioControlProtocolGuid": [
0xB91978DF,
0x9FC1,
0x427D,
0xBB,
0x5,
0x4C,
0x82,
0x84,
0x55,
0xCA,
0x27,
],
"gEfiGetPcdInfoProtocolGuid": [
0xFD0F4478,
0xEFD,
0x461D,
0xBA,
0x2D,
0xE5,
0x8C,
0x45,
0xFD,
0x5F,
0x5E,
],
"gEfiI2cMasterProtocolGuid": [
0xCD72881F,
0x45B5,
0x4FEB,
0x98,
0xC8,
0x31,
0x3D,
0xA8,
0x11,
0x74,
0x62,
],
"gEfiI2cIoProtocolGuid": [
0xB60A3E6B,
0x18C4,
0x46E5,
0xA2,
0x9A,
0xC9,
0xA1,
0x06,
0x65,
0xA2,
0x8E,
],
"gEfiI2cEnumerateProtocolGuid": [
0xDA8CD7C4,
0x1C00,
0x49E2,
0x80,
0x3E,
0x52,
0x14,
0xE7,
0x01,
0x89,
0x4C,
],
"gEfiI2cHostProtocolGuid": [
0xA5AAB9E3,
0xC727,
0x48CD,
0x8B,
0xBF,
0x42,
0x72,
0x33,
0x85,
0x49,
0x48,
],
"gEfiI2cBusConfigurationManagementProtocolGuid": [
0x55B71FB5,
0x17C6,
0x410E,
0xB5,
0xBD,
0x5F,
0xA2,
0xE3,
0xD4,
0x46,
0x6B,
],
"gEfiMmMpProtocolGuid": [
0x5D5450D7,
0x990C,
0x4180,
0xA8,
0x3,
0x8E,
0x63,
0xF0,
0x60,
0x83,
0x7,
],
"gEfiMmEndOfDxeProtocolGuid": [
0x24E70042,
0xD5C5,
0x4260,
0x8C,
0x39,
0xA,
0xD3,
0xAA,
0x32,
0xE9,
0x3D,
],
"gEfiMmIoTrapDispatchProtocolGuid": [
0x58DC368D,
0x7BFA,
0x4E77,
0xAB,
0xBC,
0xE,
0x29,
0x41,
0x8D,
0xF9,
0x30,
],
"gEfiMmPowerButtonDispatchProtocolGuid": [
0x1B1183FA,
0x1823,
0x46A7,
0x88,
0x72,
0x9C,
0x57,
0x87,
0x55,
0x40,
0x9D,
],
"gEfiMmStandbyButtonDispatchProtocolGuid": [
0x7300C4A1,
0x43F2,
0x4017,
0xA5,
0x1B,
0xC8,
0x1A,
0x7F,
0x40,
0x58,
0x5B,
],
"gEfiMmGpiDispatchProtocolGuid": [
0x25566B03,
0xB577,
0x4CBF,
0x95,
0x8C,
0xED,
0x66,
0x3E,
0xA2,
0x43,
0x80,
],
"gEfiMmUsbDispatchProtocolGuid": [
0xEE9B8D90,
0xC5A6,
0x40A2,
0xBD,
0xE2,
0x52,
0x55,
0x8D,
0x33,
0xCC,
0xA1,
],
"gEfiMmPeriodicTimerDispatchProtocolGuid": [
0x4CEC368E,
0x8E8E,
0x4D71,
0x8B,
0xE1,
0x95,
0x8C,
0x45,
0xFC,
0x8A,
0x53,
],
"gEfiMmSxDispatchProtocolGuid": [
0x456D2859,
0xA84B,
0x4E47,
0xA2,
0xEE,
0x32,
0x76,
0xD8,
0x86,
0x99,
0x7D,
],
"gEfiMmSwDispatchProtocolGuid": [
0x18A3C6DC,
0x5EEA,
| |
<filename>enas_lm/src/lstm_lib.py
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWD LSTM model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from enas_lm.src import data_utils
from enas_lm.src import utils
MOVING_AVERAGE_DECAY = 0.9995
MOVING_AVERAGE_DECAY = 0.9995
def _gen_mask(shape, drop_prob):
"""Generate a droppout mask."""
keep_prob = 1. - drop_prob
mask = tf.random_uniform(shape, dtype=tf.float32)
mask = tf.floor(mask + keep_prob) / keep_prob
return mask
def _lstm(x, prev_c, prev_h, w_lstm, layer_masks):
"""Multi-layer LSTM.
Args:
x: [batch_size, num_steps, hidden_size].
prev_c: [[batch_size, hidden_size] * num_layers].
prev_h: [[batch_size, hidden_size] * num_layers].
w_lstm: [[2 * hidden_size, 4 * hidden_size] * num_layers].
layer_masks: [([hidden_size, hidden_size] or None)* num_layers].
Returns:
next_c: [[batch_size, hidden_size] * num_layers].
next_h: [[batch_size, hidden_size] * num_layers].
all_h: [batch_size, num_steps, hidden_size].
"""
_, num_steps, _ = tf.unstack(tf.shape(x))
num_layers = len(w_lstm)
all_h = [tf.TensorArray(dtype=tf.float32, size=num_steps, infer_shape=False)
for _ in range(num_layers)]
def _condition(step, *unused_args):
return tf.less(step, num_steps)
def _body(step, pprev_c, pprev_h, all_h):
"""Apply LSTM at each step."""
next_c, next_h = [], []
for layer_id, (p_c, p_h, w, m) in enumerate(zip(
pprev_c, pprev_h, w_lstm, layer_masks)):
inp = x[:, step, :] if layer_id == 0 else next_h[-1]
if m is not None:
inp *= m
ifog = tf.matmul(tf.concat([inp, p_h], axis=1), w)
i, f, o, g = tf.split(ifog, 4, axis=1)
i = tf.sigmoid(i)
f = tf.sigmoid(f)
o = tf.sigmoid(o)
g = tf.tanh(g)
c = i * g + f * p_c
h = o * tf.tanh(c)
all_h[layer_id] = all_h[layer_id].write(step, h)
next_c.append(c)
next_h.append(h)
return step + 1, next_c, next_h, all_h
loop_inps = [tf.constant(0, dtype=tf.int32), prev_c, prev_h, all_h]
_, next_c, next_h, all_h = tf.while_loop(_condition, _body, loop_inps,
parallel_iterations=1)
all_h = [tf.transpose(h.stack(), [1, 0, 2])
for h in all_h]
return next_c, next_h, all_h
def _set_default_params(params):
"""Set default parameters."""
params.add_hparam('alpha', 2.) # activation L2 reg
params.add_hparam('best_valid_ppl_threshold', 7)
params.add_hparam('beta', 1.) # activation slowness reg
params.add_hparam('batch_size', 12)
params.add_hparam('bptt_steps', 70)
# for dropouts: dropping rate, NOT keeping rate
params.add_hparam('drop_e', 0.10) # word
params.add_hparam('drop_i', 0.65) # embeddings
params.add_hparam('drop_l', 0.30) # between layers
params.add_hparam('drop_o', 0.40) # output
params.add_hparam('drop_w', 0.50) # weight
params.add_hparam('emb_size', 400)
params.add_hparam('start_decay_epoch', 14)
params.add_hparam('decay_every_epoch', 1)
params.add_hparam('decay_rate', 0.98)
params.add_hparam('grad_bound', 0.25)
params.add_hparam('hidden_size', 1100)
params.add_hparam('init_range', 0.1)
params.add_hparam('learning_rate', 20.)
params.add_hparam('num_layers', 3)
params.add_hparam('num_train_epochs', 500)
params.add_hparam('vocab_size', 10000)
params.add_hparam('weight_decay', 1.2e-6)
return params
class LM(object):
"""Language model."""
def __init__(self, params, x_train, x_valid, x_test, name='language_model'):
print('-' * 80)
print('Building LM')
self.params = _set_default_params(params)
self.name = name
# train data
(self.x_train, self.y_train,
self.num_train_batches, self.reset_start_idx,
self.should_reset, self.base_bptt) = data_utils.input_producer(
x_train, params.batch_size, params.bptt_steps, random_len=True)
params.add_hparam(
'num_train_steps', self.num_train_batches * params.num_train_epochs)
# valid data
(self.x_valid, self.y_valid,
self.num_valid_batches) = data_utils.input_producer(
x_valid, params.batch_size, params.bptt_steps)
# test data
(self.x_test, self.y_test,
self.num_test_batches) = data_utils.input_producer(x_test, 1, 1)
params.add_hparam('start_decay_step',
params.start_decay_epoch * self.num_train_batches)
params.add_hparam('decay_every_step',
params.decay_every_epoch * self.num_train_batches)
self._build_params()
self._build_train()
self._build_valid()
self._build_test()
def _build_params(self):
"""Create and count model parameters."""
print('-' * 80)
print('Building model params')
with tf.variable_scope(self.name):
with tf.variable_scope('embedding'):
initializer = tf.initializers.random_uniform(
-self.params.init_range, self.params.init_range)
w_emb = tf.get_variable(
'w', [self.params.vocab_size, self.params.emb_size],
initializer=initializer)
dropped_w_emb = tf.layers.dropout(
w_emb, self.params.drop_e, [self.params.vocab_size, 1],
training=True)
w_lstm = []
dropped_w_lstm = []
with tf.variable_scope('lstm'):
for i in range(self.params.num_layers):
inp_size = self.params.emb_size if i == 0 else self.params.hidden_size
hid_size = (self.params.emb_size if i == self.params.num_layers - 1
else self.params.hidden_size)
init_range = 1.0 / np.sqrt(hid_size)
initializer = tf.initializers.random_uniform(-init_range, init_range)
with tf.variable_scope('layer_{0}'.format(i)):
w = tf.get_variable('w', [inp_size + hid_size, 4 * hid_size],
initializer=initializer)
i_mask = tf.ones([inp_size, 4 * hid_size], dtype=tf.float32)
h_mask = _gen_mask([hid_size, 4 * hid_size], self.params.drop_w)
mask = tf.concat([i_mask, h_mask], axis=0)
dropped_w = w * mask
w_lstm.append(w)
dropped_w_lstm.append(dropped_w)
with tf.variable_scope('init_states'):
batch_prev_c, batch_prev_h, batch_reset = [], [], []
test_prev_c, test_prev_h, test_reset = [], [], []
for i in range(self.params.num_layers):
inp_size = self.params.emb_size if i == 0 else self.params.hidden_size
hid_size = (self.params.emb_size if i == self.params.num_layers - 1
else self.params.hidden_size)
with tf.variable_scope('layer_{0}'.format(i)):
with tf.variable_scope('batch'):
init_shape = [self.params.batch_size, hid_size]
batch_prev_c.append(tf.get_variable(
'c', init_shape, dtype=tf.float32, trainable=False))
batch_prev_h.append(tf.get_variable(
'h', init_shape, dtype=tf.float32, trainable=False))
zeros = np.zeros(init_shape, dtype=np.float32)
batch_reset.append(tf.assign(batch_prev_c[-1], zeros))
batch_reset.append(tf.assign(batch_prev_h[-1], zeros))
with tf.variable_scope('test'):
init_shape = [1, hid_size]
test_prev_c.append(tf.get_variable(
'c', init_shape, dtype=tf.float32, trainable=False))
test_prev_h.append(tf.get_variable(
'h', init_shape, dtype=tf.float32, trainable=False))
zeros = np.zeros(init_shape, dtype=np.float32)
test_reset.append(tf.assign(test_prev_c[-1], zeros))
test_reset.append(tf.assign(test_prev_h[-1], zeros))
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
print('Model has {0} params'.format(num_params))
self.batch_init_states = {
'c': batch_prev_c,
'h': batch_prev_h,
'reset': batch_reset,
}
self.train_params = {
'w_emb': dropped_w_emb,
'w_lstm': dropped_w_lstm,
'w_soft': w_emb,
}
self.test_init_states = {
'c': test_prev_c,
'h': test_prev_h,
'reset': test_reset,
}
self.eval_params = {
'w_emb': w_emb,
'w_lstm': w_lstm,
'w_soft': w_emb,
}
def _forward(self, x, y, model_params, init_states, is_training=False):
"""Computes the logits.
Args:
x: [batch_size, num_steps], input batch.
y: [batch_size, num_steps], output batch.
model_params: a `dict` of params to use.
init_states: a `dict` of params to use.
is_training: if `True`, will apply regularizations.
Returns:
loss: scalar, cross-entropy loss
"""
w_emb = model_params['w_emb']
w_lstm = model_params['w_lstm']
w_soft = model_params['w_soft']
prev_c = init_states['c']
prev_h = init_states['h']
emb = tf.nn.embedding_lookup(w_emb, x)
if is_training:
emb = tf.layers.dropout(
emb, self.params.drop_i,
[self.params.batch_size, 1, self.params.emb_size], training=True)
layer_masks = [None]
for _ in range(1, self.params.num_layers - 1):
mask = _gen_mask([self.params.batch_size, self.params.hidden_size],
self.params.drop_l)
layer_masks.append(mask)
layer_masks.append(None)
else:
layer_masks = [None] * self.params.num_layers
out_c, out_h, all_h = _lstm(emb, prev_c, prev_h, w_lstm, layer_masks)
top_h = all_h[-1]
if is_training:
top_h = tf.layers.dropout(
top_h, self.params.drop_o,
[self.params.batch_size, 1, self.params.emb_size], training=True)
carry_on = []
for var, val in zip(prev_c + prev_h, out_c + out_h):
carry_on.append(tf.assign(var, val))
logits = tf.einsum('bnh,vh->bnv', top_h, w_soft)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(loss) # TODO(hyhieu): watch for num_steps
reg_loss = loss # loss + regularization_terms, for training only
if is_training:
# L2 weight reg
reg_loss += self.params.weight_decay * tf.add_n(
[tf.reduce_sum(w ** 2) for w in tf.trainable_variables()])
# activation L2 reg
reg_loss += self.params.alpha * tf.add_n(
[tf.reduce_mean(h ** 2) for h in all_h[:-1]])
# activation slowness L2 reg
reg_loss += self.params.beta * tf.add_n(
[tf.reduce_mean((h[:, 1:, :] - h[:, :-1, :]) ** 2)
for h in all_h[:-1]])
with tf.control_dependencies(carry_on):
loss = tf.identity(loss)
if is_training:
reg_loss = tf.identity(reg_loss)
return reg_loss, loss
def _build_train(self):
"""Build training ops."""
print('-' * 80)
print('Building train graph')
reg_loss, loss = self._forward(self.x_train, self.y_train,
self.train_params, self.batch_init_states,
is_training=True)
tf_vars = tf.trainable_variables()
global_step = tf.train.get_or_create_global_step()
lr_scale = (tf.cast(tf.shape(self.y_train)[-1], dtype=tf.float32) /
tf.cast(self.params.bptt_steps, dtype=tf.float32))
learning_rate = utils.get_lr(global_step, self.params) * lr_scale
# learning_rate = tf.Print(
# learning_rate,
# [learning_rate, lr_scale, self.base_bptt, tf.shape(self.y_train)],
# message='lr: ', summarize=3)
grads = tf.gradients(reg_loss, tf_vars)
clipped_grads, grad_norm = tf.clip_by_global_norm(grads,
self.params.grad_bound)
(self.update_moving_avg_ops, self.use_moving_avg_vars,
self.restore_normal_vars) = self._create_average_ops()
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(clipped_grads, tf_vars),
global_step=global_step)
self.train_loss = loss
self.train_op = train_op
self.grad_norm = grad_norm
self.learning_rate = learning_rate
def _create_average_ops(self):
"""Build moving average ops."""
print('Creating moving average ops')
with tf.variable_scope('moving_avg_flag'):
self.moving_avg_started = tf.get_variable(
'flag', [], tf.int32, initializer=tf.initializers.zeros(),
trainable=False)
self.start_moving_avg_op = tf.assign(self.moving_avg_started, 1)
all_vars = tf.trainable_variables()
average_pairs = []
var_cnt = 0
with tf.variable_scope('average'):
for v in all_vars:
avg_v = tf.get_variable(
str(var_cnt), shape=v.shape, dtype=v.dtype,
initializer=tf.zeros_initializer, trainable=False)
var_cnt += 1
average_pairs.append([v, avg_v])
backup_pairs = []
var_cnt = 0
with tf.variable_scope('backup'):
for v in all_vars:
backup_v = tf.get_variable(str(var_cnt), shape=v.shape, dtype=v.dtype,
trainable=False)
var_cnt += 1
backup_pairs.append([v, backup_v])
with tf.variable_scope('avg_step'):
avg_step = tf.get_variable('step', [], dtype=tf.float32, trainable=False)
with tf.control_dependencies([tf.assign_add(avg_step, 1.0)]):
average_op = []
for v, avg_v in average_pairs:
mu = 1 / avg_step
new_avg = mu * v + (1 - mu) * avg_v
with tf.control_dependencies([new_avg]):
average_op.append(tf.assign(avg_v, new_avg))
assert len(average_pairs) == len(all_vars)
assert len(average_pairs) == len(backup_pairs)
use_average_op = []
for i in range(len(average_pairs)):
v, avg_v = average_pairs[i]
_, backup_v = backup_pairs[i]
with tf.control_dependencies([tf.assign(backup_v, v)]):
use_average_op.append(tf.assign(v, avg_v))
use_average_op = tf.group(* use_average_op)
reverse_average_op = []
for v, backup_v in backup_pairs:
reverse_average_op.append(tf.assign(v, backup_v))
reverse_average_op = tf.group(* reverse_average_op)
return average_op, use_average_op, reverse_average_op
def _build_valid(self):
print('Building valid graph')
_, loss = self._forward(self.x_valid, self.y_valid,
self.eval_params, self.batch_init_states)
self.valid_loss = loss
def _build_test(self):
print('Building test graph')
_, loss = self._forward(self.x_test, self.y_test,
self.eval_params, self.test_init_states)
self.test_loss = loss
def eval_valid(self, sess, use_moving_avg=False):
"""Eval 1 round on valid set."""
total_loss = 0
if | |
<reponame>dmitryvinn/watchman<filename>build/fbcode_builder/getdeps/cargo.py
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import shutil
from .builder import BuilderBase
class CargoBuilder(BuilderBase):
def __init__(
self,
build_opts,
ctx,
manifest,
src_dir,
build_dir,
inst_dir,
build_doc,
workspace_dir,
manifests_to_build,
loader,
cargo_config_file,
) -> None:
super(CargoBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.build_doc = build_doc
self.ws_dir = workspace_dir
self.manifests_to_build = manifests_to_build and manifests_to_build.split(",")
self.loader = loader
self.cargo_config_file_subdir = cargo_config_file
def run_cargo(self, install_dirs, operation, args=None) -> None:
args = args or []
env = self._compute_env(install_dirs)
# Enable using nightly features with stable compiler
env["RUSTC_BOOTSTRAP"] = "1"
env["LIBZ_SYS_STATIC"] = "1"
cmd = [
"cargo",
operation,
"--workspace",
"-j%s" % self.num_jobs,
] + args
self._run_cmd(cmd, cwd=self.workspace_dir(), env=env)
def build_source_dir(self):
return os.path.join(self.build_dir, "source")
def workspace_dir(self):
return os.path.join(self.build_source_dir(), self.ws_dir or "")
def manifest_dir(self, manifest):
return os.path.join(self.build_source_dir(), manifest)
def recreate_dir(self, src, dst) -> None:
if os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def cargo_config_file(self):
build_source_dir = self.build_dir
if self.cargo_config_file_subdir:
return os.path.join(build_source_dir, self.cargo_config_file_subdir)
else:
return os.path.join(build_source_dir, ".cargo", "config")
def _create_cargo_config(self):
cargo_config_file = self.cargo_config_file()
cargo_config_dir = os.path.dirname(cargo_config_file)
if not os.path.isdir(cargo_config_dir):
os.mkdir(cargo_config_dir)
print(f"Writing cargo config for {self.manifest.name} to {cargo_config_file}")
with open(cargo_config_file, "w+") as f:
f.write(
"""\
# Generated by getdeps.py
[build]
target-dir = '''{}'''
[net]
git-fetch-with-cli = true
[profile.dev]
debug = false
incremental = false
""".format(
self.build_dir.replace("\\", "\\\\")
)
)
# Point to vendored sources from getdeps manifests
dep_to_git = self._resolve_dep_to_git()
for _dep, git_conf in dep_to_git.items():
if "cargo_vendored_sources" in git_conf:
with open(cargo_config_file, "a") as f:
vendored_dir = git_conf["cargo_vendored_sources"].replace(
"\\", "\\\\"
)
f.write(
f"""
[source."{git_conf["repo_url"]}"]
directory = "{vendored_dir}"
"""
)
# Point to vendored crates.io if possible
try:
from .facebook.rust import vendored_crates
vendored_crates(self.build_opts, cargo_config_file)
except ImportError:
# This FB internal module isn't shippped to github,
# so just rely on cargo downloading crates on it's own
pass
return dep_to_git
def _prepare(self, install_dirs, reconfigure):
build_source_dir = self.build_source_dir()
self.recreate_dir(self.src_dir, build_source_dir)
dep_to_git = self._create_cargo_config()
if self.ws_dir is not None:
self._patchup_workspace(dep_to_git)
def _build(self, install_dirs, reconfigure) -> None:
# _prepare has been run already. Actually do the build
build_source_dir = self.build_source_dir()
if self.manifests_to_build is None:
self.run_cargo(
install_dirs,
"build",
["--out-dir", os.path.join(self.inst_dir, "bin"), "-Zunstable-options"],
)
else:
for manifest in self.manifests_to_build:
self.run_cargo(
install_dirs,
"build",
[
"--out-dir",
os.path.join(self.inst_dir, "bin"),
"-Zunstable-options",
"--manifest-path",
self.manifest_dir(manifest),
],
)
self.recreate_dir(build_source_dir, os.path.join(self.inst_dir, "source"))
def run_tests(
self, install_dirs, schedule_type, owner, test_filter, retry, no_testpilot
) -> None:
if test_filter:
args = ["--", test_filter]
else:
args = []
if self.manifests_to_build is None:
self.run_cargo(install_dirs, "test", args)
if self.build_doc:
self.run_cargo(install_dirs, "doc", ["--no-deps"])
else:
for manifest in self.manifests_to_build:
margs = ["--manifest-path", self.manifest_dir(manifest)]
self.run_cargo(install_dirs, "test", args + margs)
if self.build_doc:
self.run_cargo(install_dirs, "doc", ["--no-deps"] + margs)
def _patchup_workspace(self, dep_to_git) -> None:
"""
This method makes some assumptions about the state of the project and
its cargo dependendies:
1. Crates from cargo dependencies can be extracted from Cargo.toml files
using _extract_crates function. It is using a heuristic so check its
code to understand how it is done.
2. The extracted cargo dependencies crates can be found in the
dependency's install dir using _resolve_crate_to_path function
which again is using a heuristic.
Notice that many things might go wrong here. E.g. if someone depends
on another getdeps crate by writing in their Cargo.toml file:
my-rename-of-crate = { package = "crate", git = "..." }
they can count themselves lucky because the code will raise an
Exception. There migh be more cases where the code will silently pass
producing bad results.
"""
workspace_dir = self.workspace_dir()
config = self._resolve_config(dep_to_git)
if config:
patch_cargo = os.path.join(workspace_dir, "Cargo.toml")
print(f"writing patch to {patch_cargo}")
with open(patch_cargo, "r+") as f:
manifest_content = f.read()
if "[package]" not in manifest_content:
# A fake manifest has to be crated to change the virtual
# manifest into a non-virtual. The virtual manifests are limited
# in many ways and the inability to define patches on them is
# one. Check https://github.com/rust-lang/cargo/issues/4934 to
# see if it is resolved.
null_file = "/dev/null"
if self.build_opts.is_windows():
null_file = "nul"
f.write(
f"""
[package]
name = "fake_manifest_of_{self.manifest.name}"
version = "0.0.0"
[lib]
path = "{null_file}"
"""
)
else:
f.write("\n")
f.write(config)
def _resolve_config(self, dep_to_git) -> str:
"""
Returns a configuration to be put inside root Cargo.toml file which
patches the dependencies git code with local getdeps versions.
See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section
"""
dep_to_crates = self._resolve_dep_to_crates(self.build_source_dir(), dep_to_git)
config = []
git_url_to_crates_and_paths = {}
for dep_name in sorted(dep_to_git.keys()):
git_conf = dep_to_git[dep_name]
req_crates = sorted(dep_to_crates.get(dep_name, []))
if not req_crates:
continue # nothing to patch, move along
git_url = git_conf.get("repo_url", None)
crate_source_map = git_conf["crate_source_map"]
if git_url and crate_source_map:
crates_to_patch_path = git_url_to_crates_and_paths.get(git_url, {})
for c in req_crates:
if c in crate_source_map and c not in crates_to_patch_path:
crates_to_patch_path[c] = crate_source_map[c]
print(
f"{self.manifest.name}: Patching crate {c} via virtual manifest in {self.workspace_dir()}"
)
if crates_to_patch_path:
git_url_to_crates_and_paths[git_url] = crates_to_patch_path
for git_url, crates_to_patch_path in git_url_to_crates_and_paths.items():
crates_patches = [
'{} = {{ path = "{}" }}'.format(
crate,
crates_to_patch_path[crate].replace("\\", "\\\\"),
)
for crate in sorted(crates_to_patch_path.keys())
]
config.append(f'\n[patch."{git_url}"]\n' + "\n".join(crates_patches))
return "\n".join(config)
def _resolve_dep_to_git(self):
"""
For each direct dependency of the currently build manifest check if it
is also cargo-builded and if yes then extract it's git configs and
install dir
"""
dependencies = self.manifest.get_dependencies(self.ctx)
if not dependencies:
return []
dep_to_git = {}
for dep in dependencies:
dep_manifest = self.loader.load_manifest(dep)
dep_builder = dep_manifest.get("build", "builder", ctx=self.ctx)
dep_cargo_conf = dep_manifest.get_section_as_dict("cargo", self.ctx)
dep_crate_map = dep_manifest.get_section_as_dict("crate.pathmap", self.ctx)
if (
not (dep_crate_map or dep_cargo_conf)
and dep_builder not in ["cargo"]
or dep == "rust"
):
# This dependency has no cargo rust content so ignore it.
# The "rust" dependency is an exception since it contains the
# toolchain.
continue
git_conf = dep_manifest.get_section_as_dict("git", self.ctx)
if dep != "rust" and "repo_url" not in git_conf:
raise Exception(
f"{dep}: A cargo dependency requires git.repo_url to be defined."
)
if dep_builder == "cargo":
dep_source_dir = self.loader.get_project_install_dir(dep_manifest)
dep_source_dir = os.path.join(dep_source_dir, "source")
else:
fetcher = self.loader.create_fetcher(dep_manifest)
dep_source_dir = fetcher.get_src_dir()
crate_source_map = {}
if dep_crate_map:
for (crate, subpath) in dep_crate_map.items():
if crate not in crate_source_map:
if self.build_opts.is_windows():
subpath = subpath.replace("/", "\\")
crate_path = os.path.join(dep_source_dir, subpath)
print(
f"{self.manifest.name}: Mapped crate {crate} to dep {dep} dir {crate_path}"
)
crate_source_map[crate] = crate_path
elif dep_cargo_conf:
# We don't know what crates are defined buy the dep, look for them
search_pattern = re.compile('\\[package\\]\nname = "(.*)"')
for crate_root, _, files in os.walk(dep_source_dir):
if "Cargo.toml" in files:
with open(os.path.join(crate_root, "Cargo.toml"), "r") as f:
content = f.read()
match = search_pattern.search(content)
if match:
crate = match.group(1)
if crate:
print(
f"{self.manifest.name}: Discovered crate {crate} in dep {dep} dir {crate_root}"
)
crate_source_map[crate] = crate_root
git_conf["crate_source_map"] = crate_source_map
if not dep_crate_map and dep_cargo_conf:
dep_cargo_dir = self.loader.get_project_build_dir(dep_manifest)
dep_cargo_dir = os.path.join(dep_cargo_dir, "source")
dep_ws_dir = dep_cargo_conf.get("workspace_dir", None)
if dep_ws_dir:
dep_cargo_dir = os.path.join(dep_cargo_dir, dep_ws_dir)
git_conf["cargo_vendored_sources"] = dep_cargo_dir
dep_to_git[dep] = git_conf
return dep_to_git
def _resolve_dep_to_crates(self, build_source_dir, dep_to_git):
"""
This function traverse the build_source_dir in search of Cargo.toml
files, extracts the crate names from them using _extract_crates
function and returns a merged result containing crate names per
dependency name from all Cargo.toml files in the project.
"""
if not dep_to_git:
return {} # no deps, so don't waste time traversing files
dep_to_crates = {}
# First populate explicit crate paths from depedencies
for name, git_conf in dep_to_git.items():
crates = git_conf["crate_source_map"].keys()
if crates:
dep_to_crates.setdefault(name, set()).update(crates)
# Now find from Cargo.tomls
for root, _, files in os.walk(build_source_dir):
for f in files:
if f == "Cargo.toml":
more_dep_to_crates = CargoBuilder._extract_crates_used(
os.path.join(root, f), dep_to_git
)
for dep_name, crates in more_dep_to_crates.items():
existing_crates = dep_to_crates.get(dep_name, set())
for c in crates:
if c not in existing_crates:
print(
f"Patch {self.manifest.name} uses {dep_name} crate {crates}"
)
existing_crates.insert(c)
dep_to_crates.setdefault(name, set()).update(existing_crates)
return dep_to_crates
@staticmethod
def _extract_crates_used(cargo_toml_file, dep_to_git):
"""
This functions reads content of provided cargo toml file and extracts
crate names per each dependency. The extraction is done by a heuristic
so it might be incorrect.
"""
deps_to_crates = {}
with open(cargo_toml_file, "r") as f:
for line in f.readlines():
if line.startswith("#") or "git = " not in line:
continue # filter out commented lines and ones without git deps
for dep_name, conf in dep_to_git.items():
# Only redirect deps that point to git URLS
if 'git = "{}"'.format(conf["repo_url"]) in line:
pkg_template = ' package | |
= 0
y = 0
# todo enable/disable bar graf here
if not self.enable_barGraph:
# float_value = ((lenght / (self.value_max - self.value_min)) * (self.value - self.value_min))
lenght = int(round((lenght / (self.value_max - self.value_min)) * (self.value - self.value_min)))
# print("f: %s, l: %s" %(float_value, lenght))
pass
# mymax = 0
for i in range(lenght+1): # add the points of polygon
t = w * i + start - self.angle_offset
x = outer_radius * math.cos(math.radians(t))
y = outer_radius * math.sin(math.radians(t))
polygon_pie.append(QPointF(x, y))
# create inner circle line from "start + lenght"-angle to "start"-angle
for i in range(lenght+1): # add the points of polygon
# print("2 " + str(i))
t = w * (lenght - i) + start - self.angle_offset
x = inner_raduis * math.cos(math.radians(t))
y = inner_raduis * math.sin(math.radians(t))
polygon_pie.append(QPointF(x, y))
# close outer line
polygon_pie.append(QPointF(x, y))
return polygon_pie
def draw_filled_polygon(self, outline_pen_with=0):
if not self.scale_polygon_colors == None:
painter_filled_polygon = QPainter(self)
painter_filled_polygon.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
painter_filled_polygon.translate(self.width() / 2, self.height() / 2)
painter_filled_polygon.setPen(Qt.NoPen)
self.pen.setWidth(outline_pen_with)
if outline_pen_with > 0:
painter_filled_polygon.setPen(self.pen)
colored_scale_polygon = self.create_polygon_pie(
((self.widget_diameter / 2) - (self.pen.width() / 2)) * self.gauge_color_outer_radius_factor,
(((self.widget_diameter / 2) - (self.pen.width() / 2)) * self.gauge_color_inner_radius_factor),
self.scale_angle_start_value, self.scale_angle_size)
gauge_rect = QRect(QPoint(0, 0), QSize(self.widget_diameter / 2 - 1, self.widget_diameter - 1))
grad = QConicalGradient(QPointF(0, 0), - self.scale_angle_size - self.scale_angle_start_value +
self.angle_offset - 1)
# todo definition scale color as array here
for eachcolor in self.scale_polygon_colors:
grad.setColorAt(eachcolor[0], eachcolor[1])
# grad.setColorAt(.00, Qt.red)
# grad.setColorAt(.1, Qt.yellow)
# grad.setColorAt(.15, Qt.green)
# grad.setColorAt(1, Qt.transparent)
painter_filled_polygon.setBrush(grad)
# self.brush = QBrush(QColor(255, 0, 255, 255))
# painter_filled_polygon.setBrush(self.brush)
painter_filled_polygon.drawPolygon(colored_scale_polygon)
# return painter_filled_polygon
###############################################################################################
# Scale Marker
###############################################################################################
def draw_big_scaled_markter(self):
my_painter = QPainter(self)
my_painter.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
my_painter.translate(self.width() / 2, self.height() / 2)
# my_painter.setPen(Qt.NoPen)
self.pen = QPen(QColor(0, 0, 0, 255))
self.pen.setWidth(2)
# # if outline_pen_with > 0:
my_painter.setPen(self.pen)
my_painter.rotate(self.scale_angle_start_value - self.angle_offset)
steps_size = (float(self.scale_angle_size) / float(self.scala_main_count))
scale_line_outer_start = self.widget_diameter/2
scale_line_lenght = (self.widget_diameter / 2) - (self.widget_diameter / 20)
# print(stepszize)
for i in range(self.scala_main_count+1):
my_painter.drawLine(scale_line_lenght, 0, scale_line_outer_start, 0)
my_painter.rotate(steps_size)
def create_scale_marker_values_text(self):
painter = QPainter(self)
# painter.setRenderHint(QPainter.HighQualityAntialiasing)
painter.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
painter.translate(self.width() / 2, self.height() / 2)
# painter.save()
font = QFont(self.scale_fontname, self.scale_fontsize)
fm = QFontMetrics(font)
pen_shadow = QPen()
pen_shadow.setBrush(self.ScaleValueColor)
painter.setPen(pen_shadow)
text_radius_factor = 0.8
text_radius = self.widget_diameter/2 * text_radius_factor
scale_per_div = int((self.value_max - self.value_min) / self.scala_main_count)
angle_distance = (float(self.scale_angle_size) / float(self.scala_main_count))
for i in range(self.scala_main_count + 1):
# text = str(int((self.value_max - self.value_min) / self.scala_main_count * i))
text = str(int(self.value_min + scale_per_div * i))
w = fm.width(text) + 1
h = fm.height()
painter.setFont(QFont(self.scale_fontname, self.scale_fontsize))
angle = angle_distance * i + float(self.scale_angle_start_value - self.angle_offset)
x = text_radius * math.cos(math.radians(angle))
y = text_radius * math.sin(math.radians(angle))
# print(w, h, x, y, text)
text = [x - int(w/2), y - int(h/2), int(w), int(h), Qt.AlignCenter, text]
painter.drawText(text[0], text[1], text[2], text[3], text[4], text[5])
# painter.restore()
def create_fine_scaled_marker(self):
# Description_dict = 0
my_painter = QPainter(self)
my_painter.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
my_painter.translate(self.width() / 2, self.height() / 2)
my_painter.setPen(Qt.black)
my_painter.rotate(self.scale_angle_start_value - self.angle_offset)
steps_size = (float(self.scale_angle_size) / float(self.scala_main_count * self.scala_subdiv_count))
scale_line_outer_start = self.widget_diameter/2
scale_line_lenght = (self.widget_diameter / 2) - (self.widget_diameter / 40)
for i in range((self.scala_main_count * self.scala_subdiv_count)+1):
my_painter.drawLine(scale_line_lenght, 0, scale_line_outer_start, 0)
my_painter.rotate(steps_size)
def create_values_text(self):
painter = QPainter(self)
# painter.setRenderHint(QPainter.HighQualityAntialiasing)
painter.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
painter.translate(self.width() / 2, self.height() / 2)
# painter.save()
# xShadow = 3.0
# yShadow = 3.0
font = QFont(self.value_fontname, self.value_fontsize)
fm = QFontMetrics(font)
pen_shadow = QPen()
pen_shadow.setBrush(self.DisplayValueColor)
painter.setPen(pen_shadow)
text_radius = self.widget_diameter / 2 * self.text_radius_factor
# angle_distance = (float(self.scale_angle_size) / float(self.scala_main_count))
# for i in range(self.scala_main_count + 1):
text = str(int(self.value))
w = fm.width(text) + 1
h = fm.height()
painter.setFont(QFont(self.value_fontname, self.value_fontsize))
# Mitte zwischen Skalenstart und Skalenende:
# Skalenende = Skalenanfang - 360 + Skalenlaenge
# Skalenmitte = (Skalenende - Skalenanfang) / 2 + Skalenanfang
angle_end = float(self.scale_angle_start_value + self.scale_angle_size - 360)
angle = (angle_end - self.scale_angle_start_value) / 2 + self.scale_angle_start_value
x = text_radius * math.cos(math.radians(angle))
y = text_radius * math.sin(math.radians(angle))
# print(w, h, x, y, text)
text = [x - int(w/2), y - int(h/2), int(w), int(h), Qt.AlignCenter, text]
painter.drawText(text[0], text[1], text[2], text[3], text[4], text[5])
# painter.restore()
def draw_big_needle_center_point(self, diameter=30):
painter = QPainter(self)
# painter.setRenderHint(QtGui.QPainter.HighQualityAntialiasing)
painter.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
painter.translate(self.width() / 2, self.height() / 2)
painter.setPen(Qt.NoPen)
# painter.setPen(Qt.NoPen)
painter.setBrush(self.CenterPointColor)
# diameter = diameter # self.widget_diameter/6
painter.drawEllipse(int(-diameter / 2), int(-diameter / 2), int(diameter), int(diameter))
def draw_needle(self):
painter = QPainter(self)
# painter.setRenderHint(QtGui.QPainter.HighQualityAntialiasing)
painter.setRenderHint(QPainter.Antialiasing)
# Koordinatenursprung in die Mitte der Flaeche legen
painter.translate(self.width() / 2, self.height() / 2)
painter.setPen(Qt.NoPen)
painter.setBrush(self.NeedleColor)
painter.rotate(((self.value - self.value_offset - self.value_min) * self.scale_angle_size /
(self.value_max - self.value_min)) + 90 + self.scale_angle_start_value)
painter.drawConvexPolygon(self.value_needle[0])
###############################################################################################
# Events
###############################################################################################
def resizeEvent(self, event):
# self.resized.emit()
# return super(self.parent, self).resizeEvent(event)
# print("resized")
# print(self.width())
self.rescale_method()
# self.emit(QtCore.SIGNAL("resize()"))
# print("resizeEvent")
def paintEvent(self, event):
# Main Drawing Event:
# Will be executed on every change
# vgl http://doc.qt.io/qt-4.8/qt-demos-affine-xform-cpp.html
# print("event", event)
# colored pie area
if self.enable_filled_Polygon:
self.draw_filled_polygon()
# draw scale marker lines
if self.enable_fine_scaled_marker:
self.create_fine_scaled_marker()
if self.enable_big_scaled_marker:
self.draw_big_scaled_markter()
# draw scale marker value text
if self.enable_scale_text:
self.create_scale_marker_values_text()
# Display Value
if self.enable_value_text:
self.create_values_text()
# draw needle 1
if self.enable_Needle_Polygon:
self.draw_needle()
# Draw Center Point
if self.enable_CenterPoint:
self.draw_big_needle_center_point(diameter=(self.widget_diameter / 6))
###############################################################################################
# MouseEvents
###############################################################################################
def setMouseTracking(self, flag):
def recursive_set(parent):
for child in parent.findChildren(QObject):
try:
child.setMouseTracking(flag)
except:
pass
recursive_set(child)
QWidget.setMouseTracking(self, flag)
recursive_set(self)
def mouseReleaseEvent(self, QMouseEvent):
# print("released")
self.NeedleColor = self.NeedleColorReleased
if not self.use_timer_event:
self.update()
pass
def mouseMoveEvent(self, event):
x, y = event.x() - (self.width() / 2), event.y() - (self.height() / 2)
if not x == 0:
angle = math.atan2(y, x) / math.pi * 180
# winkellaenge der anzeige immer positiv 0 - 360deg
# min wert + umskalierter wert
value = (float(math.fmod(angle - self.scale_angle_start_value + 720, 360)) / \
(float(self.scale_angle_size) / float(self.value_max - self.value_min))) + self.value_min
temp = value
fmod = float(math.fmod(angle - self.scale_angle_start_value + 720, 360))
state = 0
if (self.value - (self.value_max - self.value_min) * self.value_needle_snapzone) <= \
value <= \
(self.value + (self.value_max - self.value_min) * self.value_needle_snapzone):
self.NeedleColor = self.NeedleColorDrag
# todo: evtl ueberpruefen
#
state = 9
# if value >= self.value_max and self.last_value < (self.value_max - self.value_min) / 2:
if value >= self.value_max and self.last_value < (self.value_max - self.value_min) / 2:
state = 1
value = self.value_max
self.last_value = self.value_min
self.valueChanged.emit(int(value))
elif value >= self.value_max >= self.last_value:
state = 2
value = self.value_max
self.last_value = self.value_max
self.valueChanged.emit(int(value))
else:
state = 3
self.last_value = value
self.valueChanged.emit(int(value))
# todo: mouse event debug output
# self.update_value(value, mouse_controlled=True)
# self.valueChanged.emit(int(value))
# print(str(int(value)))
# self.valueChanged.emit()
# todo: convert print to logging debug
# print('mouseMoveEvent: x=%d, y=%d, a=%s, v=%s, fmod=%s, temp=%s, state=%s' % (
# x, y, angle, value, fmod, temp, state))
# def createPoly(self, n, r, s):
# polygon = QPolygonF()
#
# w = 360/n # angle per step
# for i in range(n): # add the points of polygon
# t = w*i + s
# x = r*math.cos(math.radians(t))
# y = r*math.sin(math.radians(t))
# # polygon.append(QtCore.QPointF(self.width()/2 +x, self.height()/2 + y))
# polygon.append(QtCore.QPointF(x, y))
#
# return polygon
################################################################################################
# DEMO Routine
# required: analoggaugewidget_demo.ui
# compile analoggaugewidget_demo.ui -> analoggaugewidget_demo_ui.py
# show a lot of variables and possibilities formodification
################################################################################################
if __name__ == '__main__':
def main():
import sys
app = QApplication(sys.argv)
my_gauge = AnalogGaugeWidget()
my_gauge.show()
sys.exit(app.exec_())
class mainclass():
#constructora
def __init__(self):
import os # Used in Testing Script
import sys
if used_Qt_Version == 4:
print("Compile QUI for Qt Version: " + str(used_Qt_Version))
os.system("pyuic4 -o analoggaugewidget_demo_ui.py analoggaugewidget_demo.ui")
elif used_Qt_Version == 5:
print("Compile QUI for Qt Version: " + str(used_Qt_Version))
os.system("pyuic5 -o analoggaugewidget_demo_ui.py analoggaugewidget_demo.ui")
from analoggaugewidget_demo_ui import Ui_MainWindow
self.app = QApplication(sys.argv)
window = QMainWindow()
self.my_gauge = Ui_MainWindow()
self.my_gauge.setupUi(window)
window.show()
self.my_gauge.widget.enable_barGraph = True
self.my_gauge.widget.value_needle_snapzone = 1
self.my_gauge.widget.value_min = 0
self.my_gauge.widget.value_max = 1100
self.my_gauge.widget.scala_main_count = 11
self.my_gauge.ActualSlider.setMaximum(self.my_gauge.widget.value_max)
self.my_gauge.ActualSlider.setMinimum(self.my_gauge.widget.value_min)
self.my_gauge.AussenRadiusSlider.setValue(self.my_gauge.widget.gauge_color_outer_radius_factor * 1000)
self.my_gauge.InnenRadiusSlider.setValue(self.my_gauge.widget.gauge_color_inner_radius_factor * 1000)
self.my_gauge.GaugeStartSlider.setValue(self.my_gauge.widget.scale_angle_start_value)
self.my_gauge.GaugeSizeSlider.setValue(self.my_gauge.widget.scale_angle_size)
# set Start Value
# self.my_gauge.widget.update_value(self.my_gauge.widget.value_min)
self.my_gauge.widget.update_value(int(self.my_gauge.widget.value_max - self.my_gauge.widget.value_min)/2)
################################################################################################
# Anzeigenadel Farbe anpassen
# auf Slider Aenderung reagieren
| |
glean request: 0
'''
}
golden_output2 = {'execute.return_value': '''
+++ XR1: executing command 'show ipv6 vrf all interface' +++d.parse()
show ipv6 vrf all interface
Fri Sep 6 09:50:49.892 EST
BVI100 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
BVI1401 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::259:14ff:feff:1 [TENTATIVE]
No global unicast address is configured
Joined group address(es): fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fffd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
MTU is 1514 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are enabled
ND DAD is enabled, number of DAD attempts 1
ND reachable time is 0 milliseconds
ND cache entry limit is 1000000000
ND advertised retransmit interval is 0 milliseconds
ND router advertisements are sent every 160 to 240 seconds
ND router advertisements live for 1800 seconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id is 0xe0800000
Complete protocol adjacency: 0
Complete glean adjacency: 0
Incomplete protocol adjacency: 0
Incomplete glean adjacency: 0
Dropped protocol request: 0
Dropped glean request: 0
BVI1403 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::259:14ff:feff:304 [TENTATIVE]
Global unicast address(es):
2001:60:1403::1, subnet is 2001:60:1403::/64 [TENTATIVE]
Joined group address(es): fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fffd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
MTU is 1514 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are enabled
ND DAD is enabled, number of DAD attempts 1
ND reachable time is 0 milliseconds
ND cache entry limit is 1000000000
ND advertised retransmit interval is 0 milliseconds
ND router advertisements are sent every 160 to 240 seconds
ND router advertisements live for 1800 seconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id is 0xe0800000
Complete protocol adjacency: 0
Complete glean adjacency: 0
Incomplete protocol adjacency: 0
Incomplete glean adjacency: 0
Dropped protocol request: 0
Dropped glean request: 0
BVI1405 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::259:14ff:feff:506 [TENTATIVE]
Global unicast address(es):
2001:59:1405::1, subnet is 2001:59:1405::/64 [TENTATIVE]
Joined group address(es): fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fffd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
MTU is 1514 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are enabled
ND DAD is enabled, number of DAD attempts 1
ND reachable time is 0 milliseconds
ND cache entry limit is 1000000000
ND advertised retransmit interval is 0 milliseconds
ND router advertisements are sent every 160 to 240 seconds
ND router advertisements live for 1800 seconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id is 0xe0800000
Complete protocol adjacency: 0
Complete glean adjacency: 0
Incomplete protocol adjacency: 0
Incomplete glean adjacency: 0
Dropped protocol request: 0
Dropped glean request: 0
BVI1407 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::259:14ff:feff:708 [TENTATIVE]
Global unicast address(es):
2001:60:1407::1, subnet is 2001:60:1407::/64 [TENTATIVE]
Joined group address(es): fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fffd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
MTU is 1514 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are enabled
ND DAD is enabled, number of DAD attempts 1
ND reachable time is 0 milliseconds
ND cache entry limit is 1000000000
ND advertised retransmit interval is 0 milliseconds
ND router advertisements are sent every 160 to 240 seconds
ND router advertisements live for 1800 seconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id is 0xe0800000
Complete protocol adjacency: 0
Complete glean adjacency: 0
Incomplete protocol adjacency: 0
Incomplete glean adjacency: 0
Dropped protocol request: 0
Dropped glean request: 0
BVI1410 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::259:14ff:feff:1011 [TENTATIVE]
Global unicast address(es):
2001:60:1410::1, subnet is 2001:60:1410::/64 [TENTATIVE]
Joined group address(es): fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fffd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
MTU is 1514 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are enabled
ND DAD is enabled, number of DAD attempts 1
ND reachable time is 0 milliseconds
ND cache entry limit is 1000000000
ND advertised retransmit interval is 0 milliseconds
ND router advertisements are sent every 160 to 240 seconds
ND router advertisements live for 1800 seconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id is 0xe0800000
Complete protocol adjacency: 0
Complete glean adjacency: 0
Incomplete protocol adjacency: 0
Incomplete glean adjacency: 0
Dropped protocol request: 0
Dropped glean request: 0
Bundle-Ether1 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::2bc:60ff:fe38:a8dc
Global unicast address(es):
2001:db8:79b7:fc34::10, subnet is 2001:db8:79b7:fc34::/64
Joined group address(es): fc00:e968:6179::de52:7100:ff00:10 fc00:e968:6179::de52:7100:ff38:a8dc fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
fc00:e968:6179::de52:7100
MTU is 1514 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are enabled
ND DAD is enabled, number of DAD attempts 1
ND reachable time is 0 milliseconds
ND cache entry limit is 1000000000
ND advertised retransmit interval is 0 milliseconds
ND router advertisements are sent every 160 to 240 seconds
ND router advertisements live for 1800 seconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id is 0xe0800000
Complete protocol adjacency: 0
Complete glean adjacency: 0
Incomplete protocol adjacency: 0
Incomplete glean adjacency: 0
Dropped protocol request: 0
Dropped glean request: 0
Bundle-Ether100 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether100.12 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether100.22 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether100.32 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001.100 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001.1400 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001.1402 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001.1404 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001.1406 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Bundle-Ether1001.1410 is Down, ipv6 protocol is Down, Vrfid is default (0x60000000)
IPv6 is disabled, link-local address unassigned
No global unicast address is configured
Loopback0 is Up, ipv6 protocol is Up, Vrfid is default (0x60000000)
IPv6 is enabled, link-local address is fe80::d121:1bff:fea4:a9f7
Global unicast address(es):
2001:db8:100::10, subnet is 2001:db8:100::10/128
Joined group address(es): fc00:e968:6179::de52:7100:ff00:10 fffd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:ffa4:a9f7 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
fc00:e968:6179::de52:7100
MTU is 1500 (1500 is available to IPv6)
ICMP redirects are disabled
ICMP unreachables are always on
ND DAD is disabled, number of DAD attempts 0
ND reachable time is 0 milliseconds
ND cache entry limit is 0
ND advertised retransmit interval is 0 milliseconds
Hosts use stateless autoconfig for addresses.
Outgoing access list is not set
Inbound common access list is not set, access list is not set
Table Id | |
self.trackReader.changeList
if change.frame not in self.trackReader.undone_changes
]
self.changelist_widget.setChangeList(change_list)
self.changelist_widget.setVisible(True)
@qc.pyqtSlot()
def saveChangeList(self):
if self.trackReader is None:
return
fname, _ = qw.QFileDialog.getSaveFileName(
self, 'Save list of changes', filter='Text (*.csv)'
)
if len(fname) > 0:
self.trackReader.saveChangeList(fname)
@qc.pyqtSlot()
def loadChangeList(self):
if self.trackReader is None:
return
fname, _ = qw.QFileDialog.getOpenFileName(
self, 'Load list of changes', filter='Text (*.csv *.txt)'
)
if len(fname) > 0:
self.trackReader.loadChangeList(fname)
@qc.pyqtSlot(bool)
def toggleSideBySideView(self, checked: bool) -> None:
self.leftView.setVisible(checked)
settings.setValue('review/sidebyside', checked)
if checked:
try:
self.sigProjectTrackHistLeft.disconnect(
self.rightView.frameScene.showTrackHist
)
except Exception:
pass
try:
self.left_list.sigSelected.disconnect(
self.rightView.sigSelected
)
except Exception:
pass
try:
self.all_list.sigSelected.disconnect(
self.rightView.sigSelected
)
except Exception:
pass
else:
self.sigProjectTrackHistLeft.connect(
self.rightView.frameScene.showTrackHist
)
self.left_list.sigSelected.connect(self.rightView.sigSelected)
self.all_list.sigSelected.connect(self.rightView.sigSelected)
@qc.pyqtSlot(qg.QPolygonF)
def setRoi(self, roi: qg.QPolygonF) -> None:
self.roi = roi
@qc.pyqtSlot()
def resetRoi(self) -> None:
self.roi = None
@qc.pyqtSlot()
def undoCurrentChanges(self):
self.sigUndoCurrentChanges.emit(self.frame_no)
@qc.pyqtSlot(np.ndarray, int)
def setFrame(self, frame: np.ndarray, pos: int) -> None:
logging.debug(f'Received frame: {pos}')
self.slider.blockSignals(True)
self.slider.setValue(pos)
self.slider.blockSignals(False)
self.pos_spin.blockSignals(True)
self.pos_spin.setValue(pos)
self.pos_spin.blockSignals(False)
tracks = self.trackReader.getTracks(pos)
# ts = time.perf_counter_ns()
if self.roi is not None:
# flag tracks outside ROI
keys = list(tracks.keys())
for tid in keys:
bbox = qg.QPolygonF(qc.QRectF(*tracks[tid][:4]))
if not self.roi.intersects(bbox):
self.trackReader.deleteTrack(self.frame_no, tid)
tracks.pop(tid)
# te = time.perf_counter_ns()
# print(f'Time with roi "{self.roi}": {(te - ts) * 1e-6} ms')
self.old_all_tracks = self.all_tracks.copy()
self.all_tracks = self._flag_tracks(self.all_tracks, tracks)
self.sigAllTracksList.emit(list(self.all_tracks.keys()))
if self.disableSeekAction.isChecked():
# Going sequentially through frames - copy right to left
self.frame_no = pos
self.left_frame = self.right_frame
if self.left_frame is not None:
self.sigLeftFrame.emit(self.left_frame, pos - 1)
self.right_frame = frame
if (
self.overlayAction.isChecked()
and (self.left_frame is not None)
and len(frame.shape) == 3
):
tmp = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.zeros(
(frame.shape[0], frame.shape[1], 3), dtype=np.uint8
)
frame[:, :, 1] = 0
frame[:, :, 0] = tmp
frame[:, :, 2] = cv2.cvtColor(
self.left_frame, cv2.COLOR_BGR2GRAY
)
if self.invertOverlayColorAction.isChecked():
frame = 255 - frame
self.sigRightFrame.emit(frame, pos)
self.left_tracks = self.right_tracks
self.right_tracks = self._flag_tracks({}, tracks)
if self.showOldTracksAction.isChecked():
self.sigLeftTracks.emit(self.old_all_tracks)
self.sigRightTracks.emit(self.all_tracks)
else:
self.sigLeftTracks.emit(self.left_tracks)
self.sigRightTracks.emit(self.right_tracks)
self.sigLeftTrackList.emit(list(self.left_tracks.keys()))
self.sigRightTrackList.emit(list(self.right_tracks.keys()))
elif pos == self.frame_no - 1:
logging.debug(f'Received left frame: {pos}')
self.left_frame = frame
self.sigLeftFrame.emit(self.left_frame, pos)
self.left_tracks = self._flag_tracks({}, tracks)
if self.showOldTracksAction.isChecked():
self.sigLeftTracks.emit(self.all_tracks)
else:
self.sigLeftTracks.emit(self.left_tracks)
self.sigLeftTrackList.emit(list(self.left_tracks.keys()))
self._wait_cond.set()
return # do not show popup message for old frame
elif pos == self.frame_no:
logging.debug(f'right frame: {pos}')
self.right_frame = frame
if (
self.overlayAction.isChecked()
and (self.left_frame is not None)
and len(frame.shape) == 3
):
tmp = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.zeros(
(frame.shape[0], frame.shape[1], 3), dtype=np.uint8
)
frame[:, :, 1] = 0
frame[:, :, 0] = tmp
frame[:, :, 2] = cv2.cvtColor(
self.left_frame, cv2.COLOR_BGR2GRAY
)
if self.invertOverlayColorAction.isChecked():
frame = 255 - frame
self.sigRightFrame.emit(frame, pos)
self.right_tracks = self._flag_tracks({}, tracks)
if self.showOldTracksAction.isChecked():
self.sigRightTracks.emit(self.all_tracks)
else:
self.sigRightTracks.emit(self.right_tracks)
self.sigRightTrackList.emit(list(self.right_tracks.keys()))
# Pause if there is a mismatch with the earlier tracks
else:
self._wait_cond.set()
raise Exception('This should not be reached')
self.breakpointMessage(pos)
self.entryExitMessage(self.left_tracks, self.right_tracks)
message = self._get_diff(self.showNewAction.isChecked())
if len(message) > 0:
if (
self.showDifferenceAction.isChecked()
or self.showNewAction.isChecked()
):
self.play_button.setChecked(False)
self.playVideo(False)
qw.QMessageBox.information(self, 'Track mismatch', message)
self.sigDiffMessage.emit(message)
self._wait_cond.set()
logging.debug('wait condition set')
def _get_diff(self, show_new):
right_keys = set(self.right_tracks.keys())
all_keys = set(self.old_all_tracks.keys())
new = right_keys - all_keys
if show_new:
if len(new) > 0:
return f'Frame {self.frame_no - 1}-{self.frame_no}: New track on right: {new}.'
return ''
left_keys = set(self.left_tracks.keys())
if left_keys != right_keys:
# logging.info(f'Tracks don\'t match between frames {self.frame_no - 1} '
# f'and {self.frame_no}: '
# f'{left_keys.symmetric_difference(right_keys)}')
left_only = left_keys - right_keys
left_message = (
f'Tracks only on left: {left_only}.'
if len(left_only) > 0
else ''
)
right_only = right_keys - left_keys
right_message = (
f'Tracks only on right: {right_only}.'
if len(right_only) > 0
else ''
)
if len(new) > 0:
right_message += f'New tracks: <b>{new}</b>'
return f'Frame {self.frame_no - 1}-{self.frame_no}: {left_message} {right_message}'
else:
return ''
def _setPathCmap(self, side):
input, accept = qw.QInputDialog.getItem(
self,
'Select colormap',
'Colormap',
[
'jet',
'viridis',
'rainbow',
'autumn',
'summer',
'winter',
'spring',
'cool',
'hot',
],
)
logging.debug(f'Setting colormap to {input}')
if not accept:
return
self.pathCmap[side] = input
settings.setValue(f'review/path_cmap_{side}', input)
@qc.pyqtSlot()
def setPathCmapLeft(self):
self._setPathCmap('left')
@qc.pyqtSlot()
def setPathCmapRight(self):
self._setPathCmap('right')
@qc.pyqtSlot()
def openTrackedData(self):
datadir = settings.value('data/directory', '.')
track_filename, filter = qw.QFileDialog.getOpenFileName(
self,
'Open tracked data',
datadir,
filter='HDF5 (*.h5 *.hdf);; Text (*.csv *.txt);; All files (*)',
)
logging.debug(f'filename:{track_filename}\nselected filter:{filter}')
if len(track_filename) == 0:
return
viddir = os.path.dirname(track_filename)
vid_filename, vfilter = qw.QFileDialog.getOpenFileName(
self,
f'Open video for {os.path.basename(track_filename)}',
viddir,
filter='Video (*.avi *.mp4 *.mpg *.mpeg *.ogg *.webm *.wmv *.mov);;'
' All files (*)',
)
logging.debug(f'filename:{vid_filename}\nselected filter:{vfilter}')
if len(vid_filename) == 0:
return
# fmt = 'csv' if filter.startswith('Text') else 'hdf'
self.setupReading(vid_filename, track_filename)
def setupReading(self, video_path, data_path):
try:
self.video_reader = VideoReader(video_path, self._wait_cond)
except IOError as e:
qw.QMessageBox.critical(
self,
'Error opening video',
f'Could not open video: {video_path}\n' f'{e}',
)
return False
self.trackReader = TrackReader(data_path)
self.video_filename = video_path
self.track_filename = data_path
settings.setValue(
'data/directory', os.path.dirname(self.track_filename)
)
settings.setValue(
'video/directory', os.path.dirname(self.video_filename)
)
self.vid_info.vidfile.setText(self.video_filename)
self.breakpoint = self.video_reader.frame_count
self.vid_info.frames.setText(f'{self.video_reader.frame_count}')
self.vid_info.fps.setText(f'{self.video_reader.fps}')
self.vid_info.frame_width.setText(f'{self.video_reader.frame_width}')
self.vid_info.frame_height.setText(f'{self.video_reader.frame_height}')
self.leftView.clearAll()
self.leftView.update()
self.all_tracks.clear()
self.left_list.clear()
self.right_list.clear()
self.left_tracks = {}
self.right_tracks = {}
self.left_frame = None
self.right_frame = None
self.history_length = self.trackReader.last_frame
self.leftView.frameScene.setHistGradient(self.history_length)
self.rightView.frameScene.setHistGradient(self.history_length)
self.rightView.resetArenaAction.trigger()
self.lim_widget.sigWmin.connect(self.trackReader.setWmin)
self.lim_widget.sigWmax.connect(self.trackReader.setWmax)
self.lim_widget.sigHmin.connect(self.trackReader.setHmin)
self.lim_widget.sigHmax.connect(self.trackReader.setHmax)
if self.disableSeekAction.isChecked():
self.sigNextFrame.connect(self.video_reader.read)
else:
self.sigGotoFrame.connect(self.video_reader.gotoFrame)
self.video_reader.sigFrameRead.connect(self.setFrame)
self.video_reader.sigSeekError.connect(self.catchSeekError)
self.video_reader.sigVideoEnd.connect(self.videoEnd)
self.sigQuit.connect(self.video_reader.close)
self.frame_interval = 1000.0 / self.video_reader.fps
self.pos_spin.blockSignals(True)
self.pos_spin.setRange(0, self.trackReader.last_frame)
self.pos_spin.blockSignals(False)
self.slider.blockSignals(True)
self.slider.setRange(0, self.trackReader.last_frame)
self.slider.blockSignals(False)
self.sigChangeTrack.connect(self.trackReader.changeTrack)
self.trackReader.sigChangeList.connect(
self.changelist_widget.setChangeList
)
self.trackReader.sigEnd.connect(self.trackEnd)
self.sigUndoCurrentChanges.connect(self.trackReader.undoChangeTrack)
self.sigDataFile.emit(self.track_filename, False)
self.gotoFrame(0)
self.updateGeometry()
self.tieViews(self.tieViewsAction.isChecked())
return True
@qc.pyqtSlot()
def saveReviewedTracks(self):
self.playVideo(False)
datadir = settings.value('data/directory', '.')
default_file = (
datadir if self.track_filename is None else self.track_filename
)
track_filename, filter = qw.QFileDialog.getSaveFileName(
self,
'Save reviewed data',
default_file,
filter='HDF5 (*.h5 *.hdf);; Text (*.csv)',
)
logging.debug(f'filename:{track_filename}\nselected filter:{filter}')
if len(track_filename) > 0:
if self.save_indicator is None:
self.save_indicator = qw.QProgressDialog(
'Saving track data',
None,
0,
self.trackReader.last_frame + 1,
self,
)
self.save_indicator.setWindowModality(qc.Qt.WindowModal)
self.save_indicator.resize(400, 200)
# self.trackReader.sigSavedFrames.connect(self.save_indicator.setValue)
else:
self.save_indicator.setRange(
0, self.trackReader.last_frame + 1
)
self.save_indicator.setValue(0)
try: # make sure same track reader is not connected multiple times
self.trackReader.sigSavedFrames.disconnect()
except TypeError:
pass
self.trackReader.sigSavedFrames.connect(
self.save_indicator.setValue
)
self.save_indicator.show()
try:
self.trackReader.saveChanges(track_filename)
self.trackReader.data_path = track_filename
self.track_filename = track_filename
self.sigDataFile.emit(track_filename, False)
self.save_indicator.setValue(self.trackReader.last_frame + 1)
except OSError as err:
qw.QMessageBox.critical(
self,
'Error opening file for writing',
f'File {track_filename} could not be opened.\n{err}',
)
@qc.pyqtSlot()
def doQuit(self):
# self._wait_cond.set()
self.vid_info.close()
self.changelist_widget.close()
if (
self.trackReader is not None
and len(self.trackReader.changeList) > 0
):
self.saveReviewedTracks()
diff = 0
if self.showNewAction.isChecked():
diff = 1
elif self.showDifferenceAction.isChecked():
diff = 2
settings.setValue('review/showdiff', diff)
settings.setValue(
'review/disable_seek', self.disableSeekAction.isChecked()
)
self.sigQuit.emit()
@qc.pyqtSlot(bool)
def playVideo(self, play: bool):
if self.video_reader is None:
return
if play:
self.play_button.setText('Pause (Space)')
self.playAction.setText('Pause (Space)')
self.timer.start(int(self.frame_interval / self.speed))
else:
self.play_button.setText('Play (Space)')
self.playAction.setText('Play (Space)')
self.timer.stop()
@qc.pyqtSlot()
def togglePlay(self):
if self.video_reader is None:
return
if self.play_button.isChecked():
self.play_button.setChecked(False)
self.playVideo(False)
else:
self.play_button.setChecked(True)
self.playVideo(True)
@qc.pyqtSlot()
def reset(self):
"""Reset video: reopen video and track file"""
print('Reset video ...')
if self.video_reader is None:
# Not initialized - do nothing
return
if len(self.trackReader.changeList) > 0:
ret = qw.QMessageBox.question(
self,
'Confirm reset',
'This will reset all changes!'
'\nTo save your work, press No and save data first',
qw.QMessageBox.Yes | qw.QMessageBox.No,
)
if ret == qw.QMessageBox.No:
return
self._wait_cond.set()
self.playVideo(False)
self.play_button.setChecked(False)
self.leftView.clearAll()
self.rightView.clearAll()
self.setupReading(self.video_filename, self.track_filename)
@qc.pyqtSlot(int, int, int, bool)
def mapTracks(
self, newId: int, origId: int, endFrame: int, swap: bool
) -> None:
"""Map newId to origId, up to and including endFrame.
If swap is True, do a swap, otherwise assign.
"""
if newId == origId:
return
if swap:
self.trackReader.swapTrack(self.frame_no, origId, newId, endFrame)
else:
self.trackReader.changeTrack(
self.frame_no, origId, newId, endFrame
)
tracks = self.trackReader.getTracks(self.frame_no)
self.sigRightTrackList.emit(list(tracks.keys()))
self.right_tracks = self._flag_tracks({}, tracks)
self.sigRightTracks.emit(self.right_tracks)
self.sigDataFile.emit(self.track_filename, True)
@qc.pyqtSlot()
def videoEnd(self):
self.playVideo(False)
self.play_button.setChecked(False)
qw.QMessageBox.information(
self, 'Finished processing', 'End of video reached.'
)
@qc.pyqtSlot()
def trackEnd(self):
self.playVideo(False)
self.play_button.setChecked(False)
qw.QMessageBox.information(
self, 'Finished processing', 'End of tracks reached reached.'
)
class ReviewerMain(qw.QMainWindow):
sigQuit = qc.pyqtSignal()
def __init__(self):
super(ReviewerMain, self).__init__()
self.reviewWidget = ReviewWidget()
fileMenu = self.menuBar().addMenu('&File')
fileMenu.addAction(self.reviewWidget.openAction)
fileMenu.addAction(self.reviewWidget.saveAction)
fileMenu.addAction(self.reviewWidget.loadChangeListAction)
fileMenu.addAction(self.reviewWidget.saveChangeListAction)
self.sc_quit = qw.QShortcut(qg.QKeySequence('Ctrl+Q'), self)
self.sc_quit.activated.connect(self.close)
self.quitAction = qw.QAction('Quit (Ctrl+Q)')
self.quitAction.triggered.connect(self.close)
fileMenu.addAction(self.quitAction)
diffMenu = self.menuBar().addMenu('&Diff settings')
diffMenu.addAction(self.reviewWidget.overlayAction)
diffMenu.addAction(self.reviewWidget.invertOverlayColorAction)
diffMenu.addAction(self.reviewWidget.toggleSideBySideAction)
diffgrp = qw.QActionGroup(self)
diffgrp.addAction(self.reviewWidget.showDifferenceAction)
diffgrp.addAction(self.reviewWidget.showNewAction)
diffgrp.addAction(self.reviewWidget.showNoneAction)
diffgrp.setExclusive(True)
diffMenu.addActions(diffgrp.actions())
viewMenu = self.menuBar().addMenu('&View')
viewMenu.addAction(self.reviewWidget.tieViewsAction)
viewMenu.addAction(self.reviewWidget.showGrayscaleAction)
viewMenu.addAction(self.reviewWidget.setColorAction)
viewMenu.addAction(self.reviewWidget.setSelectedColorAction)
viewMenu.addAction(self.reviewWidget.setAlphaUnselectedAction)
viewMenu.addAction(self.reviewWidget.autoColorAction)
viewMenu.addAction(self.reviewWidget.colormapAction)
viewMenu.addAction(self.reviewWidget.pathCmapLeftAction)
viewMenu.addAction(self.reviewWidget.pathCmapRightAction)
viewMenu.addAction(self.reviewWidget.keepSelectionAction)
viewMenu.addSeparator()
viewMenu.addAction(self.reviewWidget.fontSizeAction)
viewMenu.addAction(self.reviewWidget.relativeFontSizeAction)
viewMenu.addAction(self.reviewWidget.lineWidthAction)
viewMenu.addAction(self.reviewWidget.setPathDiaAction)
viewMenu.addAction(self.reviewWidget.setMarkerThicknessAction)
viewMenu.addSeparator()
viewMenu.addAction(self.reviewWidget.showIdAction)
viewMenu.addAction(self.reviewWidget.showBboxAction)
viewMenu.addAction(self.reviewWidget.labelInsideAction)
viewMenu.addAction(self.reviewWidget.showOldTracksAction)
viewMenu.addAction(self.reviewWidget.showHistoryAction)
viewMenu.addSeparator()
viewMenu.addAction(self.reviewWidget.showLimitsAction)
viewMenu.addAction(self.reviewWidget.histlenAction)
viewMenu.addAction(self.reviewWidget.histGradientAction)
viewMenu.addAction(self.reviewWidget.showChangeListAction)
themeGroup = qw.QActionGroup(self.reviewWidget)
themeGroup.setExclusive(True)
selected = settings.value('theme', 'dark')
for theme in STYLE_SHEETS:
action = themeGroup.addAction(theme.capitalize())
action.setCheckable(True)
action.triggered.connect(
lambda chk, name=theme: ut.setStyleSheet(name)
)
if theme == selected:
action.setChecked(True)
ut.setStyleSheet(selected)
themeMenu = viewMenu.addMenu('&Theme')
themeMenu.addActions(themeGroup.actions())
zoomMenu = self.menuBar().addMenu('&Zoom')
zoomMenu.addAction(self.reviewWidget.zoomInLeftAction)
zoomMenu.addAction(self.reviewWidget.zoomInRightAction)
zoomMenu.addAction(self.reviewWidget.zoomOutLeftAction)
zoomMenu.addAction(self.reviewWidget.zoomOutRightAction)
playMenu = self.menuBar().addMenu('&Play')
playMenu.addAction(self.reviewWidget.disableSeekAction)
playMenu.addAction(self.reviewWidget.playAction)
playMenu.addAction(self.reviewWidget.speedUpAction)
playMenu.addAction(self.reviewWidget.slowDownAction)
playMenu.addAction(self.reviewWidget.resetAction)
playMenu.addSeparator()
playMenu.addAction(self.reviewWidget.nextFrameAction)
| |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Blender-CoD: Blender Add-On for Call of Duty modding
Version: alpha 3
Copyright (c) 2011 CoDEmanX, Flybynyt -- <EMAIL>
http://code.google.com/p/blender-cod/
TODO
- UI for xmodel and xanim import (planned for alpha 4/5)
"""
bl_info = {
"name": "Blender-CoD - Add-On for Call of Duty modding (alpha 3)",
"author": "CoDEmanX, Flybynyt",
"version": (0, 3, 5),
"blender": (2, 62, 0),
"location": "File > Import | File > Export",
"description": "Export models to *.XMODEL_EXPORT and animations to *.XANIM_EXPORT",
"warning": "Alpha version, please report any bugs!",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Call_of_Duty_IO",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"support": "TESTING",
"category": "Import-Export"
}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
import imp
if "import_xmodel" in locals():
imp.reload(import_xmodel)
if "export_xmodel" in locals():
imp.reload(export_xmodel)
if "import_xanim" in locals():
imp.reload(import_xanim)
if "export_xanim" in locals():
imp.reload(export_xanim)
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty
import bpy_extras.io_utils
from bpy_extras.io_utils import ExportHelper, ImportHelper
import time
# Planned for alpha 4/5
class ImportXmodel(bpy.types.Operator, ImportHelper):
"""Load a CoD XMODEL_EXPORT File"""
bl_idname = "import_scene.xmodel"
bl_label = "Import XMODEL_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
#use_meshes = BoolProperty(name="Meshes", description="Import meshes", default=True)
#use_armature = BoolProperty(name="Armature", description="Import Armature", default=True)
#use_bind_armature = BoolProperty(name="Bind Meshes to Armature", description="Parent imported meshes to armature", default=True)
#use_split_objects = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
#use_split_groups = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
#use_image_search = BoolProperty(name="Image Search", description="Search subdirs for any associated images (Warning, may be slow)", default=True)
def execute(self, context):
from . import import_xmodel
start_time = time.clock()
result = import_xmodel.load(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Import finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
"""
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "use_meshes")
col.prop(self, "use_armature")
row = layout.row()
row.active = self.use_meshes and self.use_armature
row.prop(self, "use_bind_armature")
"""
@classmethod
def poll(self, context):
return (context.scene is not None)
class ImportXanim(bpy.types.Operator, ImportHelper):
"""Load a CoD XANIM_EXPORT File"""
bl_idname = "import_scene.xanim"
bl_label = "Import XANIM_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT;*.NT_EXPORT", options={'HIDDEN'})
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_xanim
return import_xanim.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
class ExportXmodel(bpy.types.Operator, ExportHelper):
"""Save a CoD XMODEL_EXPORT File"""
bl_idname = "export_scene.xmodel"
bl_label = 'Export XMODEL_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_version = EnumProperty(
name="Format Version",
description="XMODEL_EXPORT format version for export",
items=(('5', "Version 5", "vCoD, CoD:UO"),
('6', "Version 6", "CoD2, CoD4, CoD5, CoD7")),
default='6',
)
use_selection = BoolProperty(
name="Selection only",
description="Export selected meshes only (object or weight paint mode)",
default=False
)
use_vertex_colors = BoolProperty(
name="Vertex colors",
description="Export vertex colors (if disabled, white color will be used)",
default=True
)
use_vertex_colors_alpha = BoolProperty(
name="As alpha",
description="Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)",
default=False
)
use_apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply all mesh modifiers except Armature (preview resolution)",
default=True
)
use_armature = BoolProperty(
name="Armature",
description="Export bones (if disabled, only a 'tag_origin' bone will be written)",
default=True
)
use_vertex_cleanup = BoolProperty(
name="Clean up vertices",
description="Try this if you have problems converting to xmodel. Skips vertices which aren't used by any face and updates references.",
default=False
)
use_armature_pose = BoolProperty(
name="Pose animation to models",
description="Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files",
default=False
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_weight_min = BoolProperty(
name="Minimum bone weight",
description="Try this if you get 'too small weight' errors when converting",
default=False,
)
use_weight_min_threshold = FloatProperty(
name="Threshold",
description="Smallest allowed weight (minimum value)",
default=0.010097,
min=0.0,
max=1.0,
precision=6
)
def execute(self, context):
from . import export_xmodel
start_time = time.clock()
result = export_xmodel.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
#self.use_frame_start = context.scene.frame_start
self.use_frame_start = context.scene.frame_current
#self.use_frame_end = context.scene.frame_end
self.use_frame_end = context.scene.frame_current
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_version", expand=True)
# Calculate number of selected mesh objects
if context.mode in {'OBJECT', 'PAINT_WEIGHT'}:
meshes_selected = len([m for m in bpy.data.objects if m.type == 'MESH' and m.select])
else:
meshes_selected = 0
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i meshes)" % meshes_selected)
col.enabled = bool(meshes_selected)
col = layout.column(align=True)
col.prop(self, "use_apply_modifiers")
col = layout.column(align=True)
col.enabled = not self.use_armature_pose
if self.use_armature and self.use_armature_pose:
col.prop(self, "use_armature", "Armature (disabled)")
else:
col.prop(self, "use_armature")
if self.use_version == '6':
row = layout.row(align=True)
row.prop(self, "use_vertex_colors")
sub = row.split()
sub.active = self.use_vertex_colors
sub.prop(self, "use_vertex_colors_alpha")
col = layout.column(align=True)
col.label("Advanced:")
col = layout.column(align=True)
col.prop(self, "use_vertex_cleanup")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_armature_pose")
sub = box.column()
sub.active = self.use_armature_pose
sub.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = sub.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_weight_min")
sub = box.column()
sub.enabled = self.use_weight_min
sub.prop(self, "use_weight_min_threshold")
@classmethod
def poll(self, context):
return (context.scene is not None)
class ExportXanim(bpy.types.Operator, ExportHelper):
"""Save a XMODEL_XANIM File"""
bl_idname = "export_scene.xanim"
bl_label = 'Export XANIM_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_selection = BoolProperty(
name="Selection only",
description="Export selected bones only (pose mode)",
default=False
)
use_framerate = IntProperty(
name="Framerate",
description="Set frames per second for export, 30 fps is commonly used.",
default=24,
min=1,
max=100
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_notetrack = BoolProperty(
name="Notetrack",
description="Export timeline markers as notetrack nodes",
default=True
)
use_notetrack_format = EnumProperty(
name="Notetrack format",
description="Notetrack format to use. Always set 'CoD 7' for Black Ops, even if not using notetrack!",
items=(('5', "CoD 5", "Separate NT_EXPORT notetrack file for 'World at War'"),
('7', "CoD 7", "Separate NT_EXPORT notetrack file for 'Black Ops'"),
('1', "all other", "Inline notetrack data for all CoD versions except WaW and BO")),
default='1',
)
def execute(self, context):
from . import export_xanim
start_time = time.clock()
result = export_xanim.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
self.use_frame_start = context.scene.frame_start
self.use_frame_end = context.scene.frame_end
self.use_framerate = round(context.scene.render.fps / context.scene.render.fps_base)
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
bones_selected = 0
armature = None
# Take the first armature
for ob in bpy.data.objects:
if ob.type == 'ARMATURE' and len(ob.data.bones) > 0:
armature = ob.data
# Calculate number of selected bones if in pose-mode
if context.mode == 'POSE':
bones_selected = len([b for b in armature.bones if b.select])
# Prepare info string
armature_info = "%s (%i bones)" % (ob.name, len(armature.bones))
break
else:
armature_info = "Not found!"
if armature:
icon = 'NONE'
else:
icon = 'ERROR'
col = layout.column(align=True)
col.label("Armature: %s" % armature_info, icon)
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only | |
category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.FixCheck(
msg=msg, has_tags=has_tags,
absent_tags=absent_tags, description=description,
category=category,
)
@bind_entry
def match(
self, actual, expected, description=None, category=None,
include_tags=None, exclude_tags=None, report_all=True,
actual_description=None, expected_description=None,
):
"""
Matches two FIX messages, supports repeating groups (nested data).
Custom comparators can be used as values on the ``expected`` msg.
.. code-block:: python
result.fix.match(
actual={
36: 6,
22: 5,
55: 2,
38: 5,
555: [ .. more nested data here ... ]
},
expected={
36: 6,
22: 5,
55: lambda val: val in [2, 3, 4],
38: 5,
555: [ .. more nested data here ... ]
}
)
:param actual: Original FIX message.
:type actual: ``dict``
:param expected: Expected FIX message, can include compiled
regex patterns or callables for
advanced comparison.
:type expected: ``dict``
:param include_tags: Tags to exclusively consider in the comparison.
:type include_tags: ``list`` of ``object`` (items must be hashable)
:param exclude_tags: Keys to ignore in the comparison.
:type exclude_tags: ``list`` of ``object`` (items must be hashable)
:param report_all: Formatting flag, includes even
ignored tags in report if True.
:type report_all: ``bool``
:param actual_description: Column header description for original msg.
:type actual_description: ``str``
:param expected_description: Column header
description for expected msg.
:type expected_description: ``str``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.FixMatch(
value=actual,
expected=expected,
description=description,
category=category,
include_tags=include_tags,
exclude_tags=exclude_tags,
report_all=report_all,
expected_description=expected_description,
actual_description=actual_description,
)
@bind_entry
def match_all(
self, values, comparisons,
description=None, category=None, tag_weightings=None
):
"""
Match multiple unordered FIX messages.
Initially all value/expected comparison combinations are
evaluated and converted to an error weight.
If certain fix tags are more important than others (e.g. ID FIX tags),
it is possible to give them additional weighting during the comparison,
by specifying a "tag_weightings" dict.
The default weight of a mismatch is 100.
The values/comparisons permutation that results in
the least error appended to the report.
.. code-block:: python
result.dict.match_all(
values=[
{ 36: 6, 22: 5, 55: 2, ...},
{ 36: 7, ...},
...
],
comparisons=[
Expected({ 36: 6, 22: 5, 55: 2, ...},),
Expected({ 36: 7, ...})
...
],
# twice the default weight of 100
key_weightings={36: 200})
:param values: Original values.
:type values: ``list`` of ``dict``
:param comparisons: Comparison objects.
:type comparisons: ``list`` of
``testplan.common.utils.comparison.Expected``
:param tag_weightings: Per-tag overrides that specify a different
weight for different tags.
:type tag_weightings: ``dict``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.FixMatchAll(
values=values,
comparisons=comparisons,
tag_weightings=tag_weightings,
description=description,
category=category,
)
class Result(object):
"""
Contains assertion methods and namespaces for generating test data.
A new instance of ``Result`` object is passed to each testcase when a
suite is run.
"""
namespaces = {
'regex': RegexNamespace,
'table': TableNamespace,
'xml': XMLNamespace,
'dict': DictNamespace,
'fix': FixNamespace,
}
def __init__(
self,
stdout_style=None,
continue_on_failure=True,
_group_description=None,
_parent=None,
_summarize=False,
_num_passing=defaults.SUMMARY_NUM_PASSING,
_num_failing=defaults.SUMMARY_NUM_FAILING,
_scratch = None,
):
self.entries = []
self.stdout_style = stdout_style or STDOUT_STYLE
self.continue_on_failure = continue_on_failure
for key, value in self.get_namespaces().items():
if hasattr(self, key):
raise AttributeError(
'Name clash, cannot assign namespace: {}'.format(key))
setattr(self, key, value(result=self))
self._parent = _parent
self._group_description = _group_description
self._summarize = _summarize
self._num_passing = _num_passing
self._num_failing = _num_failing
self._scratch = _scratch
def __enter__(self):
if self._parent is None:
raise RuntimeError(
'Cannot use root level result objects as context managers.'
' Use `with result.group(...)` instead.')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._summarize:
entry_group = base.Summary(
entries=self.entries,
description=self._group_description,
num_passing=self._num_passing,
num_failing=self._num_failing
)
else:
entry_group = base.Group(
entries=self.entries,
description=self._group_description
)
self._parent.entries.append(entry_group)
return exc_type is None # re-raise errors if there is any
def get_namespaces(self):
"""
This method can be overridden for enabling
custom assertion namespaces for child classes.
"""
return self.namespaces or {}
def group(
self,
description=None,
summarize=False,
num_passing=defaults.SUMMARY_NUM_PASSING,
num_failing=defaults.SUMMARY_NUM_FAILING,
):
"""
Creates an assertion group or summary, which is helpful
for formatting assertion data on certain output
targets (e.g. PDF, JSON) and reducing the amount of
content that gets displayed.
Should be used as a context manager.
.. code-block:: python
# Group and sub groups
with result.group(description='Custom group description') as group:
group.not_equal(2, 3, description='Assertion within a group')
group.greater(5, 3)
with group.group() as sub_group:
sub_group.less(6, 3, description='Assertion in sub group')
# Summary example
with result.group(
summarize=True,
num_passing=4,
num_failing=10,
) as group:
for i in range(500):
# First 4 passing assertions will be displayed
group.equal(i, i)
# First 10 failing assertions will be displayed
group.equal(i, i + 1)
:param description: Text description for the assertion group.
:type description: ``str``
:param summarize: Flag for enabling summarization.
:type summarize: ``bool``
:param num_passing: Max limit for number of passing
assertions per category & assertion type.
:type num_passing: ``int``
:param num_failing: Max limit for number of failing
assertions per category & assertion type.
:type num_failing: ``int``
:return: A new result object that refers the current result as a parent.
:rtype: Result object
"""
return Result(
stdout_style=self.stdout_style,
continue_on_failure=self.continue_on_failure,
_group_description=description,
_parent=self,
_summarize=summarize,
_num_passing=num_passing,
_num_failing=num_failing
)
@property
def passed(self):
"""Entries stored passed status."""
return all(getattr(entry, 'passed', True) for entry in self.entries)
@bind_entry
def log(self, message):
"""
Create a string message entry, can be used for providing additional
context related to test steps.
.. code-block:: python
result.log('Custom log message ...')
:param message: Log message
:type message: ``str``
:return: ``True``
:rtype: ``bool``
"""
# TODO: Generate different entries per obj type (dict, table etc)
return base.Log(message=message)
@bind_entry
def fail(self, description, category=None):
"""
Failure assertion, can be used for explicitly failing a testcase.
Most common usage is within a conditional block.
.. code-block:: python
if not some_condition:
result.fail('Unexpected failure: {}'.format(...))
:param description: Text description of the failure.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: False
:rtype: ``bool``
"""
return assertions.Fail(description, category=category)
@bind_entry
def true(self, value, description=None, category=None):
"""
Boolean assertion, checks if ``value`` is truthy.
.. code-block:: python
result.true(some_obj, 'Custom description')
:param value: Value to be evaluated for truthiness.
:type value: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.IsTrue(
value, description=description, category=category)
@bind_entry
def false(self, value, description=None, category=None):
"""
Boolean assertion, checks if ``value`` is falsy.
.. code-block:: python
result.false(some_obj, 'Custom description')
:param value: Value to be evaluated for falsiness.
:type value: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.IsFalse(
value, description=description, category=category)
@bind_entry
def equal(self, actual, expected, description=None, category=None):
"""
Equality assertion, checks if ``actual == expected``.
Can be used via shortcut: ``result.eq``.
.. code-block:: python
result.equal('foo', 'foo', 'Custom description')
:param actual: First (actual) value of the comparison.
:type actual: ``object``
:param expected: Second (expected) value of the comparison.
:type expected: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.Equal(
actual, expected, description=description, category=category)
@bind_entry
def not_equal(self, actual, expected, description=None, category=None):
"""
Inequality assertion, checks if ``actual != expected``.
Can be used via shortcut: ``result.ne``.
.. code-block:: python
result.not_equal('foo', 'bar', 'Custom description')
:param actual: First (actual) value of the comparison.
:type actual: ``object``
:param expected: Second (expected) value of the comparison.
:type expected: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.NotEqual(
actual, expected, description=description, category=category)
@bind_entry
def less(self, first, second, description=None, category=None):
"""
Checks if ``first < second``.
Can be used via shortcut: ``result.lt``
.. code-block:: python
result.less(3, 5, 'Custom description')
:param first: Left side of the | |
(matrix) notation
uk = common.to_grid(uk, self.rows, self.cols)
#add pre-defined banks
uk = self.__add_banks(uk)
#smoothen
for i in range(self.rows):
uk[i,:] = common.smooth(uk[i,:],smoothie)
print "Finished [UK]."
self.z_interpol = uk
del uk
#plot
if plot:
self.plot(fignum='UK')
#TODO: PROBABLY ONLY POSSIBLE TO PERFORM IN SN system!
#Natural Neighbour interpolation [brute-force, bounded by river polygon]
#Warning: requires a lot of execution time
def natneigh(self, anisotropy=1.0, smoothie = 0, plane = 'sn', plot = False):
"""
Performs Natural Neighbor (NN) interpolation with defined anisotropy
to the instance's data. The algorithm assumes a brute-force way of
calculating the Voronoi cells every time an unsampled point's value is
computed.
Kwargs: anisotropy <float> : anisotropy parameter: if >1.0, it brings points closer in the
longitudinal (s) direction, if <1.0 it brings points closer
in the transverse (n) direction
smoothie <int> : smoothing degree (Gaussian window)
plane <str> : chooses plane for interpolation; 'xy' for Cartesian, 'sn' for flow-oriented
plot <boolean> : decides to plot or not the resulting NN values
"""
from scipy.spatial import voronoi_plot_2d
import matplotlib.pyplot as plt
gc.enable()
print "Calculating: Natural Neighbour [NN]"
nn = deepcopy(self.z) #avoid overwrite
#choose plane
if plane == 'sn':
x = self.s * 1./anisotropy #already flattened
y = self.n #already flattened
elif plane == 'xy':
x = self.x.flatten() * 1./anisotropy
y = self.y.flatten()
else:
print "Error: Plane for interpolation not correctly specified. No interpolation performed."
return
#DEFINE BOUNDARY FOR INTERPOLATION
#griddify points to choose boundary easier:
Gx = common.to_grid(x,self.rows,self.cols)
Gy = common.to_grid(y,self.rows,self.cols)
bx = np.hstack( (Gx[0,:],Gx[1:-1,-1],Gx[-1,:][::-1],Gx[1:-1,0][::-1]) )
by = np.hstack( (Gy[0,:],Gy[1:-1,-1],Gy[-1,:][::-1],Gy[1:-1,0][::-1]) )
#define boundary:
boundary = np.array(zip(bx,by))
#VORONOI OF SAMPLED DATA POINTS:
vorpoints = np.array(zip(x[self.isdata],y[self.isdata]))
#shift points around central point of the dataset (for precision purposes)
center = vorpoints.mean(axis=0)
vorpoints -= center
#construct Voronoi diagram from (centered) sampled data points
vor = Voronoi(vorpoints)
vor.close()
"""
#TODO: delete:
voronoi_plot_2d(vor)
plt.ion()
plt.axis('equal')
plt.show()
"""
#calculate areas of sampled dataset Voronoi cells
original_areas,vor = self.__find_areas(vor,boundary-center)
"""
#TODO: delete:
# colorize
for region in vor.regions[1:]:
polygon = vor.vertices[region]
plt.fill(*zip(*polygon), alpha=0.4)
plt.plot(vorpoints[:,0], vorpoints[:,1], 'ko')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
plt.axis('equal')
plt.ion()
plt.show()
"""
#ITERATE THROUGH UNSAMPLED DATA POINTS:
# ~ For each unsampled point, construct the new Voronoi diagram consisting of the
# ~ sampled data and the current unsampled point. Then calculate the new areas and
# ~ find the normalized weights based on how much of each area is "stolen away" from
# ~ each sampled dataset Voronoi cell (https://en.wikipedia.org/wiki/Natural_neighbor).
# ~ The areas are always bounded by the river polygon (based on the grid defined).
unknown = []
for i in range(len(x[self.nodata])):
if i%1000==0:
print i, "out of ", len(x[self.nodata])
#add new point shifted around central point
varys = np.vstack( (vorpoints,[x[self.nodata][i]-center[0],y[self.nodata][i]-center[1]]) )
#calculate new Voronoi
pntvor = Voronoi(varys)
pntvor.close()
#calculate areas
new_areas,pntvor = self.__find_areas(pntvor,boundary-center)
new_areas = new_areas[:-1] #exclude new point's area
w = new_areas / original_areas
w[w>1.0] = 1.0 #make sure that no area is larger than initial areas
areaweight = 1.0 - w
normalize = np.nansum(areaweight)
if normalize == 0.0: #to avoid division by 0
normalize = 1e-12
areaweight /= normalize
unknown.append( np.nansum(areaweight*self.z[self.isdata]) )
nn[self.nodata] = np.array(unknown)
#grid (matrix) notation
nn = common.to_grid(nn, self.rows, self.cols)
#add pre-defined banks
nn = self.__add_banks(nn)
#smoothen
for i in range(self.rows):
nn[i,:] = common.smooth(nn[i,:],smoothie)
print "Finished [NN]."
self.z_interpol = nn
del nn
#plot
if plot:
self.plot(fignum='NN')
"""
###############################################################################
##### LOCAL INTERPOLATION ? #########################
###############################################################################
#EIDW - Local
#TODO: Fix local interpolator by distance/neighbourhoods
#radius must be given. Radius describes the circle neighbourhood that would normally be taken into account in IDW,
#but the anisotropy ratio describes the ratio of major/minor axes centered at that circle.
#This means that anisotropy>1.0 transforms the circle into an ellipse longer in longitudinal direction (s)
#Anisotropy<1.0 should not be used, because it results in ellipses longer to the traverse direction (n)
def l_eidw(self, radius, pidw=2, anisotropy=5.0, smoothie=0, banks=False, plot=False):
print "Calculating: Elliptical Inverse Distance Weighting (local)"
print "Local EIDW anisotropy ratio:", anisotropy
x_to_s, y_to_n = self._to_sn(self.td)
l_eidw = copy(self.td)
leidw_nodata = copy(self.nodata)
while np.sum(leidw_nodata) > 0:
print "Still to calculate:",np.sum(leidw_nodata)
x0 = x_to_s[~leidw_nodata]
y0 = y_to_n[~leidw_nodata]
z0 = l_eidw[:,2][~leidw_nodata]
x1 = x_to_s[leidw_nodata]
y1 = y_to_n[leidw_nodata]
z1 = l_eidw[:,2][leidw_nodata]
pnts = np.vstack((x0,y0)).T
interpnts = np.vstack((x1,y1)).T
kdT = tree(pnts)
for p in range(interpnts.shape[0]):
neighs = kdT.query_ball_point(interpnts[p], radius*anisotropy)
if neighs:
z1[p] = self.eidw_interpol(kdT.data[neighs][:,0], kdT.data[neighs][:,1], z0[neighs],
[interpnts[p][0]], [interpnts[p][1]], pidw, anisotropy, radius)
else:
pass
l_eidw[:,2][leidw_nodata] = z1
if np.sum(np.isnan(l_eidw[:,2])) == np.sum(leidw_nodata):
print "Doubling the max distance traverse flow."
radius = radius*2 #max distance traverse flow
anisotropy = 0.5*anisotropy #anisotropy ratio
print "New Anisotropy Ratio:", anisotropy
leidw_nodata = np.isnan(l_eidw[:,2])
if banks:
l_eidw[:,2] = self.__add_banks(l_eidw[:,2])
#smoothen
neweidw = np.array([smooth(l_eidw[:,2],smoothie)]).T
l_eidw = np.hstack((l_eidw[:,0:2],neweidw))
#plot
if plot:
self.plot(l_eidw,'EIDW-LOCAL')
return l_eidw
"""
#############################
###### Save Functions #######
#############################
#Normal structure of 12 columns: inefficient (O(n^3)), but correct
def save_as_dep(self, outputfile):
z = deepcopy(self.z_interpol)
nanum = -999.0
z[np.isnan(z)] = nanum
S = z.shape
cols = 12
extra = (S[1]+1)%cols
fits = (S[1]-extra+1)
slices = fits / cols
newrow = np.tile(nanum, S[1])
newcolumn = np.tile(nanum, S[0]+1)
dep = np.vstack([z, newrow])
dep = np.insert(dep, S[1], newcolumn, axis=1)
#small check
outputfile = str(outputfile)
if not(outputfile.endswith('.dep')):
outputfile += '.dep'
f = open(outputfile, 'w')
for chunk in dep:
temp = np.split(chunk[:fits],slices)
for part in temp:
for value in part:
if value<0:
spacing = " "
else:
spacing = " "
f.write(spacing+'%.17E'%value)
f.write("\n")
if extra > 0:
temp = chunk[-extra:]
for value in temp:
if value<0:
spacing = " "
else:
spacing = " "
f.write(spacing+'%.17E'%value)
f.write("\n")
f.close()
def save_as_samp(self, outputfile):
gc.enable()
x = np.array(self.x.flatten(),ndmin=2)
y = np.array(self.y.flatten(),ndmin=2)
z = np.array(self.z_interpol.flatten(),ndmin=2)
keep = ~np.isnan(z)
dataset = np.hstack((x.T,y.T,z.T))
#small check
outputfile = str(outputfile)
if not(outputfile.endswith('.xyz')):
outputfile += '.xyz'
np.savetxt(outputfile, dataset[keep,:], fmt='%.17E', delimiter='\t')
del x,y,z,keep,dataset
def save_as_shape(self, outputfile):
z = self.z_interpol.flatten()
keep = (z!=-999.0) and (~np.isnan(z))
z = z[keep]
x = self.x.flatten()[keep]
y = self.y.flatten()[keep]
w = shapefile.Writer(shapefile.POINT)
sx = len(str(int(max(abs(x)))))+12
sy = len(str(int(max(abs(y)))))+12
sz = len(str(int(max(abs(z)))))+4
w.field("X","N",sx,12)
w.field("Y","N",sy,12)
w.field("Z","N",sz,3)
for i in range(len(z)):
w.point(x[i],y[i])
w.record(x[i],y[i],round(z[i],2))
#small check
outputfile = str(outputfile)
if not(outputfile.endswith('.shp')):
outputfile += '.shp'
w.save(outputfile)
#############################
##### Private Functions #####
#############################
#TODO: Probably exclude. Interpolation does what it does, no banks described.
def __add_banks(self, data, smoothen=True):
#add banks ONLY if the "skipped points" have
if sum(self.skip) > 0:
#CROSS-SECTIONAL SLOPES:
self.skip[1] = data.shape[0] - self.skip[1] #for better management
for j in range(self.cols): #for each cross-section
#linear slopes
data[:self.skip[0]+1,j] = np.linspace( self.wl, data[self.skip[0],j], self.skip[0]+1)
data[self.skip[1]-1:,j] = np.linspace( data[self.skip[1]-1,j], self.wl, data.shape[0]-self.skip[1]+1)
#cmn.smoothing: #???
#if smoothen:
#data[:,j] = cmn.smooth(data[:,j], (data.shape[0]-sum(self.skip))/2)
#data[:,j] = common.smooth(data[:,j], sum(self.skip))
self.skip[1] = data.shape[0] - self.skip[1] #return
else:
pass
return data
def __fill_nans(self,gridded_data):
for i in range(self.rows):
line = gridded_data[i,:] #one "line" of points on grid
lnan = np.isnan(line) #find where data is still missing on the line
chunks = []
flagon = False
start = 0
for j in range(self.cols):
if lnan[j] and not(flagon):
flagon = True
start = j
elif not(lnan[j]) and flagon:
flagon = False
chunks.append([start,j])
if flagon:
chunks.append([start,self.cols])
for j in range(len(chunks)):
s = chunks[j][0]
e = chunks[j][1]
if e == self.cols:
line[s-1] = line[s-2] #simple minor fix for artefacts
fill = [ line[s-1] ] * (e-s)
elif s == 0:
fill = [ line[e] ] * e
else:
gap = np.linspace(0.,1.,e-s+2)[1:-1]
fill = line[s-1]*gap[::-1] + line[e]*gap
line[s:e] = np.array(fill)
gridded_data[i,:] = line
return gridded_data
def __find_areas(self, vor, boundary):
import itertools
if boundary is not list:
boundary = boundary.tolist()
if boundary[0] != boundary[-1]:
boundary.append(boundary[0])
bounds = Polygon(boundary)
#diagonal of bounding box = safe maximum distance to | |
db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video6':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video6_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == "/My-Courses":
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
try:
um.read_sql_cell('python_data_analysis', 'Enrolled', index[0])
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
,[data_course_card_progress] )
except:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
,[html.H1('you dont have courses yet',
style={'textAlign':'center'})
] )
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
# If the user tries to reach a different page, return a 404 message
return ( html.H1("404: Not found", className="text-danger"), [],[] )
@app.callback(
Output('user-name', 'children'),
[Input('page-content', 'children')])
def cur_user(input1):
if current_user.is_authenticated:
return html.Div('Current user: ' + current_user.username)
# 'User authenticated' return username in get_id()
else:
return ''
@app.callback(
Output('logout', 'children'),
[Input('page-content', 'children')])
def user_logout(input1):
if current_user.is_authenticated:
return html.A('Logout', href='/logout')
else:
return ''
# first input is the button clicks , second input is quiz answer picked up by student
# first output is the msg apear after user enter answer second output is the style of this msg ( color )
@app.callback([Output('data_quiz1_answer', 'children') , Output('data_quiz1_answer', 'style') ],
Input('data_quiz1_submit', 'n_clicks'),State('data_quiz1_choices', 'value') )
def data_quiz1_answer(clicks,answer):
if answer=='hist': # check if answer is the correct answer
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db') #reading course table in database
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist() # reading the id of the current user
ans=um.read_sql_cell('python_data_analysis','quiz1_state',index[0]) # reading the quiz1 answer that is recorded in database
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0]) # reading the course progress for the current user
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 10, '%') # increase the course progress
if ans=='': # check if user already answered the quiz or its the first time
um.edit_sql_cell('python_data_analysis','quiz1_state',index[0],'passed') # update the quiz1 state to passed
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress) # update the course progress in database
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold')) # change the output string
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold')) # user already answered so no update in database only return string
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz1_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz1_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -10, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz1_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz1_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz2_answer', 'children') , Output('data_quiz2_answer', 'style') ],
Input('data_quiz2_submit', 'n_clicks'),State('data_quiz2_choices', 'value') )
def data_quiz2_answer(clicks,answer):
if answer=='pd.Dataframe':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz2_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz2_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz2_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz2_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz2_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz2_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz3_answer', 'children') , Output('data_quiz3_answer', 'style') ],
Input('data_quiz3_submit', 'n_clicks'),State('data_quiz3_choices', 'value') )
def data_quiz3_answer(clicks,answer):
if answer=='plotly':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz3_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz3_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz3_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz3_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz3_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz3_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz4_answer', 'children') , Output('data_quiz4_answer', 'style') ],
Input('data_quiz4_submit', 'n_clicks'),State('data_quiz4_choices', 'value') )
def data_quiz4_answer(clicks,answer):
if answer=='line chart':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz4_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz4_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz4_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz4_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz4_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz4_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz5_answer', 'children') , Output('data_quiz5_answer', 'style') ],
Input('data_quiz5_submit', 'n_clicks'),State('data_quiz5_choices', 'value') )
def data_quiz5_answer(clicks,answer):
if answer=='bootstrap':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz5_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz5_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz5_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz5_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz5_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz5_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz6_answer', 'children') , Output('data_quiz6_answer', 'style') ],
Input('data_quiz6_submit', 'n_clicks'),State('data_quiz6_choices', 'value') )
def data_quiz6_answer(clicks,answer):
if answer=='callbacks':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz6_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz6_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz6_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz6_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz6_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz6_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback(
Output("collapse", "is_open"),
[Input("rate_button", "n_clicks"),Input('submit_rating_button',"n_clicks")],
[State("collapse", "is_open"),State("rate_input", "value")],
)
def toggle_collapse(n1,n2, is_open,input_value):
if is_open==False:
if n1:
return True
else:
return False
elif is_open==True:
if n2 and (input_value>=1 and input_value<=5 ):
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
um.edit_sql_cell('python_data_analysis', 'Course_Rating', index[0], input_value)
return False
return True
@app.callback(Output('hidden_div1','children'),
Input('my_interval' , 'n_intervals')
)
def data_enrolled(time):
if current_user.is_authenticated:
df=pd.read_sql_table('python_data_analysis',con='sqlite:///users.db')
index=df.index[df['student_id']=='{}'.format(current_user.id)].tolist()
try:
um.read_sql_cell('python_data_analysis', 'Enrolled', index[0])
return 'enrolled'
except:
return 'not_enrolled'
@app.callback(Output('enroll_data','children'),
Input('hidden_div1','children'))
def data_enrolled2(enroll_state):
if enroll_state== 'enrolled' :
return 'Continue Course'
elif enroll_state== 'not_enrolled':
return 'Enroll Course'
return 'Enroll'
@app.callback(Output('enroll_data','active'),
[Input('enroll_data','n_clicks'), Input('hidden_div1','children') ]
)
def data_enrolled3(enroll_data_btn,enroll_state) :
if enroll_data_btn and enroll_state== 'enrolled':
return True
elif enroll_data_btn and enroll_state== 'not_enrolled':
if current_user.is_authenticated:
um.add_data_student(current_user.id,'yes','0%','','','','','','','','')
return True
return True
@app.callback(
Output('our-table', 'data'),
[Input('Add_Student', 'n_clicks')],
[State('our-table', | |
from __future__ import print_function
import hashlib
import os
import numpy as np
try:
from lensit.bicubic import bicubic
except ImportError:
print("***could not import bicubic fortran module")
print("***I wont be able to lens maps or invert a deflection field")
bicubic = 'could not import bicubic fortran module'
from lensit.misc import map_spliter
from lensit.misc import misc_utils as utils
from lensit.misc import rfft2_utils
from lensit.misc.misc_utils import PartialDerivativePeriodic as PDP, Log2ofPowerof2, Freq, flatindices
from lensit.pbs import pbs
class ffs_displacement(object):
r"""Flat-sky deflection-field class
Used to perform lensing on maps and to obtain the inverse deflection for iterative lensing estimation
Args:
dx: deflection field, x-component :math:`\alpha_x` (2d-array or path to array on disk)
dy: deflection field, y-component :math:`\alpha_y` (2d-array or path to array on disk)
lsides(tuple): physical size in radians of the flat-sky patch.
LD_res(optional): to perform inversion or lensing large maps are split into chunks sized these powers of two
verbose(optional): various prints out
NR_iter(optional): Number of Newton-Raphson iterations in deflection inversion.
Default works very well for LCDM-like deflection fields
cache_magn(optional): optionally caches magnification determinant matrix when needed
lib_dir(optional): required only if cache_magn is set
"""
def __init__(self, dx, dy, lsides, LD_res=(11, 11), verbose=False, NR_iter=3, lib_dir=None, cache_magn=False):
"""
dx and dy arrays or path to .npy arrays, x and y displacements. (displaced map(x) = map(x + d(x))
Note that the first index is 'y' and the second 'x'
"""
if not hasattr(dx, 'shape'): assert os.path.exists(dx), (pbs.rank, dx)
if not hasattr(dy, 'shape'): assert os.path.exists(dy), (pbs.rank, dy)
# dx, dy can be either the array of the path to the array.
assert len(lsides) == 2
self.dx = dx
self.dy = dy
self.verbose = verbose
self.rule = '4pts' # rule for derivatives
# Checking inputs :
self.shape = self.get_dx().shape
self.lsides = tuple(lsides)
self.rmin = (1. * np.array(self.lsides)) / np.array(self.shape)
HD_res = Log2ofPowerof2(self.shape)
LD_res = LD_res or HD_res
self.HD_res = (HD_res[0], HD_res[1])
self.LD_res = (min(LD_res[0], HD_res[0]), min(LD_res[1], HD_res[1]))
assert self.get_dx().shape == self.get_dy().shape
assert len(self.LD_res) == 2 and (np.array(self.LD_res) <= np.array(self.HD_res)).all()
# Buffer sizes and co :
# Here buffer size 6 times the maximal displacement in grid units.
# Might want to think about variable buffer size etc.
buffer0 = np.int16(np.max([10, (6 * np.max(np.abs(self.get_dy())) / self.rmin[0])]))
buffer1 = np.int16(np.max([10, (6 * np.max(np.abs(self.get_dx())) / self.rmin[1])]))
self.buffers = (max(buffer0, buffer1) * (self.LD_res[0] < self.HD_res[0]),
max(buffer0, buffer1) * (self.LD_res[1] < self.HD_res[1]))
self.chk_shape = 2 ** np.array(self.LD_res) + 2 * np.array(self.buffers) # shape of the chunks
self.N_chks = int(np.prod(2 ** (np.array(self.HD_res) - np.array(self.LD_res)))) # Number of chunks on each side.
if verbose:
print('rank %s, ffs_deflect::buffers size, chk_shape' % pbs.rank, (buffer0, buffer1), self.chk_shape)
self.NR_iter = NR_iter # Number of NR iterations for inverse displacement.
self.lib_dir = lib_dir
self.cache_magn = cache_magn
if self.lib_dir is not None:
if not os.path.exists(self.lib_dir):
try:
os.makedirs(self.lib_dir)
except:
print("ffs_displacement:: unable to create lib. dir. " + self.lib_dir)
@staticmethod
def load_map(m):
if isinstance(m, str):
return np.load(m)
else:
return m
def get_dx(self):
if isinstance(self.dx, str):
return np.load(self.dx)
else:
return self.dx
def get_dy(self):
if isinstance(self.dy, str):
return np.load(self.dy)
else:
return self.dy
def get_dx_ingridunits(self):
return self.get_dx() / self.rmin[1]
def get_dy_ingridunits(self):
return self.get_dy() / self.rmin[0]
def lens_map_crude(self, m, crude):
"""Performs crude approximations to lens operation
Args:
m: map to lens (2d-array of the right shape)
crude: approximation method key
Now supported are *crude* = 1 (nearest pixel rouding) or 2 (first order series expansion in deflection)
"""
if crude == 1:
# Plain interpolation to nearest pixel
ly, lx = np.indices(self.shape)
lx = np.int32(np.round((lx + self.get_dx_ingridunits()).flatten())) % self.shape[1] # Periodicity
ly = np.int32(np.round((ly + self.get_dy_ingridunits())).flatten()) % self.shape[0]
return self.load_map(m).flatten()[flatindices(np.array([ly, lx]), self.shape)].reshape(self.shape)
elif crude == 2:
# First order series expansion
return self.load_map(m) \
+ PDP(self.load_map(m), axis=0, h=self.rmin[0], rule=self.rule) * self.get_dy() \
+ PDP(self.load_map(m), axis=1, h=self.rmin[1], rule=self.rule) * self.get_dx()
else:
assert 0, crude
def lens_map(self, m, use_Pool=0, crude=0, do_not_prefilter=False):
"""Lens the input flat-sky map, using a bicubic spline interpolation algorithm
The task is split in chunks (of typically (2048 * 2048) or specified by the LD_res parameters)
with a buffer size to ensure the junctions are properly performed.
Args:
m: real-space map to deflect. numpy array, or the path to an array on disk.
use_Pool(optional): set this to < 0 to perform the operation on the GPU
crude(optional): uses an alternative crude approximation to lensing if set (check *lens_map_crude*)
do_not_prefilter(optional): sidesteps the bicubic interpolation prefiltering step.
Only use this if you know what you are doing
Returns: deflected real-space map (array)
"""
assert self.load_map(m).shape == self.shape, (self.load_map(m).shape, self.shape)
if crude > 0:
return self.lens_map_crude(m, crude)
if use_Pool < 0:
# use of GPU :
try:
from lensit.gpu import lens_GPU
except ImportError:
assert 0, 'Import of mllens lens_GPU failed !'
GPU_res = np.array(lens_GPU.GPU_HDres_max)
if np.all(np.array(self.HD_res) <= GPU_res):
return lens_GPU.lens_onGPU(m, self.get_dx_ingridunits(), self.get_dy_ingridunits(),
do_not_prefilter=do_not_prefilter)
LD_res, buffers = lens_GPU.get_GPUbuffers(GPU_res)
assert np.all(np.array(buffers) > (np.array(self.buffers) + 5.)), (buffers, self.buffers)
Nchunks = 2 ** (np.sum(np.array(self.HD_res) - np.array(LD_res)))
lensed_map = np.empty(self.shape) # Output
dx_N = np.empty((2 ** LD_res[0] + 2 * buffers[0], 2 ** LD_res[1] + 2 * buffers[1]))
dy_N = np.empty((2 ** LD_res[0] + 2 * buffers[0], 2 ** LD_res[1] + 2 * buffers[1]))
unl_CMBN = np.empty((2 ** LD_res[0] + 2 * buffers[0], 2 ** LD_res[1] + 2 * buffers[1]))
if self.verbose:
print('++ lensing map :' \
' splitting map on GPU , chunk shape %s, buffers %s' % (dx_N.shape, buffers))
spliter_lib = map_spliter.periodicmap_spliter() # library to split periodic maps.
for N in range(Nchunks):
sLDs, sHDs = spliter_lib.get_slices_chk_N(N, LD_res, self.HD_res, buffers)
for sLD, sHD in zip(sLDs, sHDs):
dx_N[sLD] = self.get_dx()[sHD] / self.rmin[1]
dy_N[sLD] = self.get_dy()[sHD] / self.rmin[0]
unl_CMBN[sLD] = self.load_map(m)[sHD]
sLDs, sHDs = spliter_lib.get_slices_chk_N(N, LD_res, self.HD_res, buffers, inverse=True)
lensed_map[sHDs[0]] = lens_GPU.lens_onGPU(unl_CMBN, dx_N, dy_N, do_not_prefilter=do_not_prefilter)[sLDs[0]]
return lensed_map
elif use_Pool == 0 or use_Pool == 1:
assert self.shape[0] == self.shape[1], self.shape
if do_not_prefilter:
filtmap = self.load_map(m).astype(np.float64)
else:
# TODO : may want to add pyFFTW here as well
filtmap = np.fft.rfft2(self.load_map(m))
w0 = 6. / (2. * np.cos(2. * np.pi * np.fft.fftfreq(filtmap.shape[0])) + 4.)
filtmap *= np.outer(w0, w0[0:filtmap.shape[1]])
filtmap = np.fft.irfft2(filtmap, self.shape)
i = np.arange(int(np.prod(self.shape)), dtype=int)
# new coordinates in grid units:
x_gu = self.get_dx_ingridunits().flatten() + i % self.shape[1]
y_gu = self.get_dy_ingridunits().flatten() + i // self.shape[1]
del i
return bicubic.deflect(filtmap, x_gu , y_gu).reshape(self.shape)
def lens_alm(self, lib_alm, alm, lib_alm_out=None, mult_magn=False, use_Pool=0):
"""Returns lensed harmonic coefficients from the unlensed input coefficients
Args:
lib_alm: *lensit.ffs_covs.ell_mat.ffs_alm* instance adapted to input *alm* array
alm: input unlensed flat-sky alm array
lib_alm_out(optional): output *ffs_alm* instance if difference from input
mult_magn(optional): optionally multiplies the real-space lensed map with the magnification det. if set.
use_Pool(optional): calculations are performed on the GPU if negative.
Returns:
lensed alm array
"""
if lib_alm_out is None: lib_alm_out = lib_alm
if use_Pool < 0: # can we fit the full map on the GPU ?
from lensit.gpu import lens_GPU
GPU_res = np.array(lens_GPU.GPU_HDres_max)
if np.all(np.array(self.HD_res) <= GPU_res):
return lens_GPU.lens_alm_onGPU(lib_alm, lib_alm.bicubic_prefilter(alm),
self.get_dx_ingridunits(), self.get_dy_ingridunits(),
do_not_prefilter=True, mult_magn=mult_magn, lib_alm_out=lib_alm_out)
temp_map = self.alm2lenmap(lib_alm, alm, use_Pool=use_Pool)
if mult_magn:
self.mult_wmagn(temp_map, inplace=True)
return lib_alm_out.map2alm(temp_map)
def mult_wmagn(self, m, inplace=False):
if not inplace:
return self.get_det_magn() * m
else:
m *= self.get_det_magn()
return
def alm2lenmap(self, lib_alm, alm, use_Pool=0, crude=0):
"""Return deflected position-space map from its unlensed input harmonic coeffients.
Args:
lib_alm: *lensit.ffs_covs.ell_mat.ffs_alm* instance adapted to input *alm* array
alm: input unlensed flat-sky alm array
Returns:
position space map of shape *lib_alm.shape*
"""
assert alm.shape == (lib_alm.alm_size,), (alm.shape, lib_alm.alm_size)
assert lib_alm.ell_mat.shape == self.shape, (lib_alm.ell_mat.shape, self.shape)
if use_Pool < 0: # can we fit the full map on the GPU ? If we can't we send it the lens_map
from lensit.gpu import lens_GPU
GPU_res = np.array(lens_GPU.GPU_HDres_max)
if np.all(np.array(self.HD_res) <= GPU_res):
return lens_GPU.alm2lenmap_onGPU(lib_alm, lib_alm.bicubic_prefilter(alm),
self.get_dx_ingridunits(), self.get_dy_ingridunits(),
do_not_prefilter=True)
else:
return self.lens_map(lib_alm.alm2map(lib_alm.bicubic_prefilter(alm)),
use_Pool=use_Pool, do_not_prefilter=True, crude=crude)
def get_det_magn(self):
r"""Returns magnification determinant map
:math:`|M| = \det \begin{pmatrix} 1 + \frac{\partial \alpha_x}{\partial x} & \frac{\partial \alpha_x}{\partial y}
\\ \frac{\partial \alpha_y}{\partial x} & 1 + \frac{\partial \alpha_y}{\partial y} \end{pmatrix}`
"""
# FIXME : bad
if not self.cache_magn:
det = (PDP(self.get_dx(), axis=1, h=self.rmin[1], rule=self.rule) + 1.) \
* (PDP(self.get_dy(), axis=0, h=self.rmin[0], rule=self.rule) + 1.)
det -= PDP(self.get_dy(), axis=1, h=self.rmin[1], rule=self.rule) * \
PDP(self.get_dx(), axis=0, | |
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge.readthedocs.io/
# FLEDGE_END
""" auth routes """
import re
import json
from collections import OrderedDict
from aiohttp import web
from fledge.services.core.user_model import User
from fledge.common.web.middleware import has_permission
from fledge.common import logger
from fledge.common.web.ssl_wrapper import SSLVerifier
__author__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_logger = logger.setup(__name__)
_help = """
------------------------------------------------------------------------------------
| GET | /fledge/user |
| PUT | /fledge/user/{id} |
| PUT | /fledge/user/{username}/password |
| GET | /fledge/user/role |
| POST | /fledge/login |
| PUT | /fledge/{user_id}/logout |
| POST | /fledge/admin/user |
| PUT | /fledge/admin/{user_id}/reset |
| DELETE | /fledge/admin/{user_id}/delete |
------------------------------------------------------------------------------------
"""
JWT_SECRET = 'f0gl@mp'
JWT_ALGORITHM = 'HS256'
JWT_EXP_DELTA_SECONDS = 30*60 # 30 minutes
MIN_USERNAME_LENGTH = 4
PASSWORD_REGEX_PATTERN = '((?=.*\d)(?=.*[A-Z])(?=.*\W).{6,}$)'
PASSWORD_ERROR_MSG = 'Password must contain at least one digit, one lowercase, one uppercase & one special character ' \
'and length of minimum 6 characters'
FORBIDDEN_MSG = 'Resource you were trying to reach is absolutely forbidden for some reason'
# TODO: remove me, use from roles table
ADMIN_ROLE_ID = 1
DEFAULT_ROLE_ID = 2
async def login(request):
""" Validate user with its username and password
:Example:
curl -d '{"username": "user", "password": "<PASSWORD>"}' -X POST http://localhost:8081/fledge/login
curl -T data/etc/certs/user.cert -X POST http://localhost:8081/fledge/login --insecure (--insecure or -k)
"""
auth_method = request.auth_method if 'auth_method' in dir(request) else "any"
data = await request.text()
# Check for appropriate payload per auth_method
if auth_method == 'certificate':
if not data.startswith("-----BEGIN CERTIFICATE-----"):
raise web.HTTPBadRequest(reason="Use a valid certificate to login.")
elif auth_method == 'password':
try:
user_data = json.loads(data)
except json.JSONDecodeError:
raise web.HTTPBadRequest(reason="Use a valid username and password to login.")
if data.startswith("-----BEGIN CERTIFICATE-----"):
peername = request.transport.get_extra_info('peername')
if peername is not None:
host, port = peername
try:
await User.Objects.verify_certificate(data)
username = SSLVerifier.get_subject()['commonName']
uid, token, is_admin = await User.Objects.certificate_login(username, host)
# set the user to request object
request.user = await User.Objects.get(uid=uid)
# set the token to request
request.token = token
except (SSLVerifier.VerificationError, User.DoesNotExist, OSError) as e:
raise web.HTTPUnauthorized(reason="Authentication failed")
except ValueError as ex:
raise web.HTTPUnauthorized(reason="Authentication failed: {}".format(str(ex)))
else:
try:
data = json.loads(data)
except json.JSONDecodeError:
raise web.HTTPBadRequest(reason="Invalid username and/or password.")
username = data.get('username')
password = data.get('password')
if not username or not password:
_logger.warning("Username and password are required to login")
raise web.HTTPBadRequest(reason="Username or password is missing")
username = str(username).lower()
peername = request.transport.get_extra_info('peername')
host = '0.0.0.0'
if peername is not None:
host, port = peername
try:
uid, token, is_admin = await User.Objects.login(username, password, host)
except (User.DoesNotExist, User.PasswordDoesNotMatch, ValueError) as ex:
_logger.warning(str(ex))
return web.HTTPNotFound(reason=str(ex))
except User.PasswordExpired as ex:
# delete all user token for this user
await User.Objects.delete_user_tokens(str(ex))
msg = 'Your password has been expired. Please set your password again'
_logger.warning(msg)
return web.HTTPUnauthorized(reason=msg)
_logger.info("User with username:<{}> has been logged in successfully".format(username))
return web.json_response({"message": "Logged in successfully", "uid": uid, "token": token, "admin": is_admin})
async def logout_me(request):
""" log out user
:Example:
curl -H "authorization: <token>" -X PUT http://localhost:8081/fledge/logout
"""
if request.is_auth_optional:
# no action needed
return web.json_response({"logout": True})
result = await User.Objects.delete_token(request.token)
if not result['rows_affected']:
_logger.warning("Logout requested with bad user token")
raise web.HTTPNotFound()
_logger.info("User has been logged out successfully")
return web.json_response({"logout": True})
async def logout(request):
""" log out user's all active sessions
:Example:
curl -H "authorization: <token>" -X PUT http://localhost:8081/fledge/{user_id}/logout
"""
if request.is_auth_optional:
_logger.warning(FORBIDDEN_MSG)
raise web.HTTPForbidden
user_id = request.match_info.get('user_id')
if int(request.user["role_id"]) == ADMIN_ROLE_ID or int(request.user["id"]) == int(user_id):
result = await User.Objects.delete_user_tokens(user_id)
if not result['rows_affected']:
_logger.warning("Logout requested with bad user")
raise web.HTTPNotFound()
_logger.info("User with id:<{}> has been logged out successfully".format(int(user_id)))
else:
# requester is not an admin but trying to take action for another user
raise web.HTTPUnauthorized(reason="admin privileges are required to logout other user")
return web.json_response({"logout": True})
async def get_roles(request):
""" get roles
:Example:
curl -H "authorization: <token>" -X GET http://localhost:8081/fledge/user/role
"""
result = await User.Objects.get_roles()
return web.json_response({'roles': result})
async def get_user(request):
""" get user info
:Example:
curl -H "authorization: <token>" -X GET http://localhost:8081/fledge/user
curl -H "authorization: <token>" -X GET http://localhost:8081/fledge/user?id=2
curl -H "authorization: <token>" -X GET http://localhost:8081/fledge/user?username=admin
curl -H "authorization: <token>" -X GET "http://localhost:8081/fledge/user?id=1&username=admin"
"""
user_id = None
user_name = None
if 'id' in request.query:
try:
user_id = int(request.query['id'])
if user_id <= 0:
raise ValueError
except ValueError:
_logger.warning("Get user requested with bad user id")
raise web.HTTPBadRequest(reason="Bad user id")
if 'username' in request.query and request.query['username'] != '':
user_name = request.query['username'].lower()
if user_id or user_name:
try:
user = await User.Objects.get(user_id, user_name)
u = OrderedDict()
u['userId'] = user.pop('id')
u['userName'] = user.pop('uname')
u['roleId'] = user.pop('role_id')
result = u
except User.DoesNotExist as ex:
_logger.warning(str(ex))
raise web.HTTPNotFound(reason=str(ex))
else:
users = await User.Objects.all()
res = []
for row in users:
u = OrderedDict()
u["userId"] = row["id"]
u["userName"] = row["uname"]
u["roleId"] = row["role_id"]
res.append(u)
result = {'users': res}
return web.json_response(result)
@has_permission("admin")
async def create_user(request):
""" create user
:Example:
curl -H "authorization: <token>" -X POST -d '{"username": "any1", "password": "<PASSWORD>"}' http://localhost:8081/fledge/admin/user
curl -H "authorization: <token>" -X POST -d '{"username": "admin1", "password": "<PASSWORD>!", "role_id": 1}' http://localhost:8081/fledge/admin/user
"""
if request.is_auth_optional:
_logger.warning(FORBIDDEN_MSG)
raise web.HTTPForbidden
data = await request.json()
username = data.get('username')
password = data.get('password')
role_id = data.get('role_id', DEFAULT_ROLE_ID)
if not username or not password:
_logger.warning("Username and password are required to create user")
raise web.HTTPBadRequest(reason="Username or password is missing")
if not isinstance(password, str):
_logger.warning(PASSWORD_ERROR_MSG)
raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG)
if not re.match(PASSWORD_REGEX_PATTERN, password):
_logger.warning(PASSWORD_ERROR_MSG)
raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG)
if not (await is_valid_role(role_id)):
_logger.warning("Create user requested with bad role id")
return web.HTTPBadRequest(reason="Invalid or bad role id")
# TODO: username regex? is email allowed?
username = username.lower().replace(" ", "")
if len(username) < MIN_USERNAME_LENGTH:
msg = "Username should be of minimum 4 characters"
_logger.warning(msg)
raise web.HTTPBadRequest(reason=msg)
try:
await User.Objects.get(username=username)
except User.DoesNotExist:
pass
else:
_logger.warning("Can not create a user, username already exists")
raise web.HTTPConflict(reason="User with the requested username already exists")
u = dict()
try:
result = await User.Objects.create(username, password, role_id)
if result['rows_affected']:
# FIXME: we should not do get again!
# we just need inserted user id; insert call should return that
user = await User.Objects.get(username=username)
u['userId'] = user.pop('id')
u['userName'] = user.pop('uname')
u['roleId'] = user.pop('role_id')
except ValueError as ex:
_logger.warning(str(ex))
raise web.HTTPBadRequest(reason=str(ex))
except Exception as exc:
_logger.exception(str(exc))
raise web.HTTPInternalServerError(reason=str(exc))
_logger.info("User has been created successfully")
return web.json_response({'message': 'User has been created successfully', 'user': u})
async def update_user(request):
if request.is_auth_optional:
_logger.warning(FORBIDDEN_MSG)
raise web.HTTPForbidden
# TODO: FOGL-1226 we don't have any user profile info yet except password, role
raise web.HTTPNotImplemented(reason='FOGL-1226')
async def update_password(request):
""" update password
:Example:
curl -X PUT -d '{"current_password": "<PASSWORD>!", "new_password": "<PASSWORD>"}' http://localhost:8081/fledge/user/<username>/password
"""
if request.is_auth_optional:
_logger.warning(FORBIDDEN_MSG)
raise web.HTTPForbidden
username = request.match_info.get('username')
data = await request.json()
current_password = data.get('current_password')
new_password = data.get('new_password')
if not current_password or not new_password:
msg = "Current or new password is missing"
_logger.warning(msg)
raise web.HTTPBadRequest(reason=msg)
if new_password and not isinstance(new_password, str):
_logger.warning(PASSWORD_ERROR_MSG)
raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG)
if new_password and not re.match(PASSWORD_REGEX_PATTERN, new_password):
_logger.warning(PASSWORD_ERROR_MSG)
raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG)
if current_password == <PASSWORD>password:
msg = "New password should not be same as current password"
_logger.warning(msg)
raise web.HTTPBadRequest(reason=msg)
user_id = await User.Objects.is_user_exists(username, current_password)
if not user_id:
msg = 'Invalid current password'
_logger.warning(msg)
raise web.HTTPNotFound(reason=msg)
try:
await User.Objects.update(int(user_id), {'password': <PASSWORD>})
except ValueError as ex:
_logger.warning(str(ex))
raise web.HTTPBadRequest(reason=str(ex))
except User.DoesNotExist:
msg = "User with id:<{}> does not exist".format(int(user_id))
_logger.warning(msg)
raise web.HTTPNotFound(reason=msg)
except User.PasswordAlreadyUsed:
msg = "The new password should be different from previous 3 used"
_logger.warning(msg)
raise web.HTTPBadRequest(reason=msg)
except Exception as exc:
_logger.exception(str(exc))
raise web.HTTPInternalServerError(reason=str(exc))
_logger.info("Password has been updated successfully for user id:<{}>".format(int(user_id)))
return web.json_response({'message': 'Password has been updated successfully for user id:<{}>'.format(int(user_id))})
@has_permission("admin")
async def reset(request):
""" reset user (only role and password)
:Example:
curl -H "authorization: <token>" -X PUT -d '{"role_id": "1"}' http://localhost:8081/fledge/admin/{user_id}/reset
curl -H "authorization: <token>" -X PUT -d '{"password": "<PASSWORD>!"}' http://localhost:8081/fledge/admin/{user_id}/reset
curl -H "authorization: <token>" -X PUT -d '{"role_id": 1, "password": "<PASSWORD>!"}' http://localhost:8081/fledge/admin/{user_id}/reset
"""
if request.is_auth_optional:
_logger.warning(FORBIDDEN_MSG)
raise web.HTTPForbidden
user_id = request.match_info.get('user_id')
if int(user_id) == 1:
msg = "Restricted for Super Admin user"
_logger.warning(msg)
raise web.HTTPNotAcceptable(reason=msg)
data = await request.json()
password = data.get('password')
role_id = data.get('role_id')
if not role_id and not password:
msg = "Nothing to update the user"
_logger.warning(msg)
raise web.HTTPBadRequest(reason=msg)
if role_id and not (await is_valid_role(role_id)):
msg = "Invalid or bad role id"
_logger.warning(msg)
return web.HTTPBadRequest(reason=msg)
if password and not isinstance(password, str):
_logger.warning(PASSWORD_ERROR_MSG)
raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG)
if password and not re.match(PASSWORD_REGEX_PATTERN, password):
_logger.warning(PASSWORD_ERROR_MSG)
raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG)
user_data = {}
if 'role_id' in | |
Crawler(object):
"""Crawls a site until a registration page is found or max level is reached.
Creates, uses and destroys Retriever objects. Creates a cookie temp file
needed for session cookies. It keeps track of 'visited links' and
'links to visit' of the site. To do this it uses the links discovered from
each Retriever object. Use Run() to crawl the site.
"""
try:
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
logger = logging.getLogger(__name__)
def __init__(self, url, logging_level=None):
"""Init crawler URL, links lists, logger, and creates a cookie temp file.
The cookie temp file is needed for session cookies.
Args:
url: the initial "seed" url of the site.
logging_level: the desired verbosity level, default is None.
"""
if logging_level:
self.logger.setLevel(logging_level)
self.url_error = False
url_parsed = urlparse.urlparse(url)
if not url_parsed[0].startswith('http'):
self.logger.error(
'Error: "%s" does not begin with http:// or https://', url)
self.url_error = True
return
# Example: if url is 'http://www.example.com?name=john' then value [1] or
# network location is 'www.example.com'.
if not url_parsed[1]:
self.logger.error('Error: "%s" is not a valid url', url)
self.url_error = True
return
self._url = url
self._domain = ''
# Http links that contain a clue from LINK_CLUES.
self._clues_general_links = []
# Http links that do not contain any clue from LINK_CLUES.
self._general_links = []
# Https links that contain a clue from LINK_CLUES.
self._clues_secure_links = []
# Https links that do not contain any clue from LINK_CLUES.
self._secure_links = []
# All links downloaded and parsed so far.
self._links_visited = []
self._retrievers_list = []
self._cookie_file = tempfile.NamedTemporaryFile(
suffix='.cookie', delete=False)
self._cookie_file.close()
self._cookie_file = self._cookie_file.name # Keep only the filename.
def __del__(self):
"""Deletes cookie file when Crawler instances are destroyed."""
if hasattr(self, '_cookie_file'):
self.logger.info('Deleting cookie file %s ...', self._cookie_file)
os.unlink(self._cookie_file)
def _MultiPerform(self, curl_multi_object):
"""Performs concurrent downloads using a CurlMulti object.
Args:
curl_multi_object: a curl object that downloads multiple pages
concurrently. The class of this object is |pycurl.CurlMulti|.
"""
# Following code uses the example from section for the CurlMulti object
# at http://pycurl.sourceforge.net/doc/curlmultiobject.html.
while True:
ret, no_handles = curl_multi_object.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while no_handles:
curl_multi_object.select(1.0)
while True:
ret, no_handles = curl_multi_object.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
def _GetLinksPages(self, curl_multi_object):
"""Downloads many pages concurrently using a CurlMulti Object.
Creates many Retriever objects and adds them to a list. The constant
MAX_SAME_DOMAIN_URLS_NO defines the number of pages that can be downloaded
concurrently from the same domain using the pycurl multi object. It's
currently set to 30 URLs. These URLs are taken from the links lists, which
are from csl, gcl, sl, and gl. The rules define how many URLs are taken from
each list during each iteration.
Example of the rules:
3/10 from csl results in 9 URLs
3/10 from cgl results in 9 URLs
2/10 from sl results in 6 URLs
2/10 from gl results in 6 URLs
Adding up the above URLs gives 30 URLs that can be downloaded concurrently.
If these lists have fewer items than the defined rules, such as if a site
does not contain any secure links, then csl and sl lists will be of 0 length
and only 15 pages would be downloaded concurrently from the same domain.
Since 30 URLs can be handled concurrently, the number of links taken from
other lists can be increased. This means that we can take 24 links from the
cgl list so that 24 from gfl + 6 from gl = 30 URLs. If the cgl list has less
than 24 links, e.g. there are only 21 links, then only 9 links may be taken
from gl so ) + 21 + 0 + 9 = 30.
Args:
curl_multi_object: Each Retriever object has a curl object which is
added to the CurlMulti Object.
"""
self._retrievers_list = []
csl_no = min(CLUE_SECURE_LINKS_NO, len(self._clues_secure_links))
cgl_no = min(CLUE_GENERAL_LINKS_NO, len(self._clues_general_links))
sl_no = min(SECURE_LINKS_NO, len(self._secure_links))
gl_no = min(GENERAL_LINKS_NO, len(self._general_links))
# If some links within the list have fewer items than needed, the missing
# links will be taken by the following priority: csl, cgl, sl, gl.
# c: clues, s: secure, g: general, l: list.
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
csl_no = min(csl_no + spare_links, len(self._clues_secure_links))
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
cgl_no = min(cgl_no + spare_links, len(self._clues_general_links))
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
sl_no = min(sl_no + spare_links, len(self._secure_links))
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
gl_no = min(gl_no + spare_links, len(self._general_links))
for no_of_links, links in [
(csl_no, self._clues_secure_links),
(sl_no, self._secure_links),
(cgl_no, self._clues_general_links),
(gl_no, self._general_links)]:
for i in xrange(no_of_links):
if not links:
break
url = links.pop(0)
self._links_visited.append(url)
r = Retriever(url, self._domain, self._cookie_file)
r.InitRequestHead()
curl_multi_object.add_handle(r._curl_object)
self._retrievers_list.append(r)
if self._retrievers_list:
try:
self._MultiPerform(curl_multi_object)
except pycurl.error as e:
self.logger.error('Error: %s, url: %s', e, self._url)
finally:
for r in self._retrievers_list:
curl_multi_object.remove_handle(r._curl_object)
# |_retrievers_list[:]| is a copy of |_retrievers_list| to avoid removing
# items from the iterated list.
for r in self._retrievers_list[:]:
r._url = urlparse.urljoin(r._url, r._curl_object.getinfo(
pycurl.EFFECTIVE_URL))
content_type = r._curl_object.getinfo(pycurl.CONTENT_TYPE)
if content_type and ('text/html' in content_type.lower()):
r.InitRequestGet()
curl_multi_object.add_handle(r._curl_object)
else:
self._retrievers_list.remove(r)
self.logger.info('\tSkipping: Not an HTML page <<< %s', r._url)
if self._retrievers_list:
try:
self._MultiPerform(curl_multi_object)
except pycurl.error as e:
self.logger.error('Error: %s, url: %s', e, self._url)
finally:
for r in self._retrievers_list:
curl_multi_object.remove_handle(r._curl_object)
self.logger.info('Downloaded: %s', r._url)
def _LogRegPageFound(self, retriever):
"""Display logging for registration page found.
Args:
retriever: The object that has retrieved the page.
"""
self.logger.info('\t##############################################')
self.logger.info('\t### %s ###', retriever._domain)
self.logger.info('\t##############################################')
self.logger.info('\t!!!!!!!!! registration page FOUND !!!!!!!!!!!')
self.logger.info('\t%s', retriever._url)
self.logger.info('\t##############################################')
def _GetNewLinks(self, retriever):
"""Appends new links discovered by each retriever to the appropriate lists.
Links are copied to the links list of the crawler object, which holds all
the links found from all retrievers that the crawler object created. The
Crawler object exists as far as a specific site is examined and the
Retriever object exists as far as a page of this site is examined.
Args:
retriever: a temporary object that downloads a specific page, parses the
content and gets the page's href link.
"""
for link in retriever._clues_secure_links:
if (not link in self._clues_secure_links and
not link in self._links_visited):
self._clues_secure_links.append(link)
for link in retriever._secure_links:
if (not link in self._secure_links and
not link in self._links_visited):
self._secure_links.append(link)
for link in retriever._clues_general_links:
if (not link in self._clues_general_links and
not link in self._links_visited):
self._clues_general_links.append(link)
for link in retriever._general_links:
if (not link in self._general_links and
not link in self._links_visited):
self._general_links.append(link)
def Run(self):
"""Runs the Crawler.
Creates a Retriever object and calls its run method to get the first links,
and then uses CurlMulti object and creates many Retriever objects to get
the subsequent pages.
The number of pages (=Retriever objs) created each time is restricted by
MAX_SAME_DOMAIN_URLS_NO. After this number of Retriever objects download
and parse their pages, we do the same again. The number of total pages
visited is kept in urls_visited.
If no registration page is found, the Crawler object will give up its try
after MAX_TOTAL_URLS_PER_DOMAIN is reached.
Returns:
True is returned if registration page is found, or False otherwise.
"""
reg_page_found = False
if self.url_error:
return False
r = Retriever(self._url, self._domain, self._cookie_file)
if r.Run():
self._LogRegPageFound(r)
reg_page_found = True
else:
self._url = r._url
self._domain = r._domain
self.logger.info('url to crawl: %s', self._url)
self.logger.info('domain: %s', self._domain)
self._links_visited.append(r._url)
self._GetNewLinks(r)
urls_visited = 1
while True:
if (not (self._clues_secure_links or self._secure_links or
self._clues_general_links or self._general_links) or
urls_visited >= MAX_TOTAL_URLS_PER_DOMAIN):
break # Registration page not found.
m = pycurl.CurlMulti()
self._GetLinksPages(m)
urls_visited += len(self._retrievers_list)
self.logger.info('\t<----- URLs visited for domain "%s": %d ----->',
self._domain, urls_visited)
for r in self._retrievers_list:
if r.ParseAndGetLinks():
self._LogRegPageFound(r)
reg_page_found = True
break
else:
self.logger.info('parsed: %s', r._url)
self._GetNewLinks(r)
m.close()
if reg_page_found:
break
while self._retrievers_list:
r = self._retrievers_list.pop()
return reg_page_found
class WorkerThread(threading.Thread):
"""Creates a new thread of execution."""
def __init__(self, url):
"""Creates _url and page_found attri to populate urls_with_no_reg_page file.
Used after thread's termination for the creation of a | |
"hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
h, error7 = a.process_action(0, "tenant-create", {'--name':'tenant2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
i, error8 = a.process_action(0, "user-create", {'--name':'user1', '--username':'username1', '--email':'<EMAIL>', '--role_id':g[2][1], '--tenant_id':h[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error8)
x, error9 = a.process_action(0, "user-list", {}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error9)
# !! TODO fix what is returned
return x, error
def process_system_list(self):
"""
Tests system-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "hemlock_test")
b, error1 = a.process_action(0, "system-list", {}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "register-local-system", {'--name':'local-system1', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':c[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
e, error4 = a.process_action(0, "system-list", {}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "tenant-create", {'--name':'tenant2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
g, error6 = a.process_action(0, "register-local-system", {'--name':'local-system1', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':f[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
x, error7 = a.process_action(0, "system-list", {}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
# !! TODO fix what is returned
return x, error
def process_list_all(self):
"""
Tests list-all action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "list-all", {}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "role-create", {'--name':'role1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
e, error4 = a.process_action(0, "user-create", {'--name':'user1', '--username':'username1', '--email':'<EMAIL>', '--role_id':c[2][1], '--tenant_id':d[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "tenant-create", {'--name':'tenant2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
x, error6 = a.process_action(0, "register-local-system", {'--name':'local-system1', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':f[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
# !! TODO fix what is returned
return x, error
def process_role_users_list(self):
"""
Tests role-users-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "role-create", {'--name':'role1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "role-users-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
e, error4 = a.process_action(0, "user-create", {'--name':'user1', '--username':'username1', '--email':'<EMAIL>', '--role_id':b[2][1], '--tenant_id':d[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "role-users-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
g, error6 = a.process_action(0, "tenant-create", {'--name':'tenant2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
h, error7 = a.process_action(0, "user-create", {'--name':'user2', '--username':'username2', '--email':'<EMAIL>', '--role_id':b[2][1], '--tenant_id':g[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
x, error8 = a.process_action(0, "role-users-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error8)
# !! TODO fix what is returned
return x, error
def process_system_tenants_list(self):
"""
Tests system-tenants-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "register-local-system", {'--name':'local-system1', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':b[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "system-tenants-list", {'--uuid':c[9][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
e, error4 = a.process_action(0, "tenant-create", {'--name':'tenant2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "system-add-tenant", {'--uuid':c[9][1], '--tenant_id':e[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
x, error6 = a.process_action(0, "system-tenants-list", {'--uuid':c[9][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
# !! TODO fix what is returned
return x, error
def process_tenant_systems_list(self):
"""
Tests tenant-systems-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "tenant-systems-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "register-local-system", {'--name':'local-system1', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':b[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
e, error4 = a.process_action(0, "tenant-systems-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "register-local-system", {'--name':'local-system2', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':b[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
x, error6 = a.process_action(0, "tenant-systems-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
# !! TODO fix what is returned
return x, error
def process_tenant_users_list(self):
"""
Tests tenant-users-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "tenant-users-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "role-create", {'--name':'role1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
e, error4 = a.process_action(0, "user-create", {'--name':'user1', '--username':'username1', '--email':'<EMAIL>', '--role_id':d[2][1], '--tenant_id':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "tenant-users-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
g, error6 = a.process_action(0, "role-create", {'--name':'role2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
h, error7 = a.process_action(0, "user-create", {'--name':'user2', '--username':'username2', '--email':'<EMAIL>', '--role_id':g[2][1], '--tenant_id':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
x, error8 = a.process_action(0, "tenant-users-list", {'--uuid':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error8)
# !! TODO fix what is returned
return x, error
def process_user_roles_list(self):
"""
Tests user-roles-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "role-create", {'--name':'role1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
d, error3 = a.process_action(0, "user-create", {'--name':'user1', '--username':'username1', '--email':'<EMAIL>', '--role_id':c[2][1], '--tenant_id':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
e, error4 = a.process_action(0, "user-roles-list", {'--uuid':d[7][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "role-create", {'--name':'role2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
g, error6 = a.process_action(0, "user-add-role", {'--uuid':d[7][1], '--role_id':f[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
x, error7 = a.process_action(0, "user-roles-list", {'--uuid':d[7][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
# !! TODO fix what is returned
return x, error
def process_user_tenants_list(self):
"""
Tests user-tenants-list action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "role-create", {'--name':'role1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
hemlock.getpass.getpass = lambda _: '<PASSWORD>'
d, error3 = a.process_action(0, "user-create", {'--name':'user1', '--username':'username1', '--email':'<EMAIL>', '--role_id':c[2][1], '--tenant_id':b[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
e, error4 = a.process_action(0, "user-tenants-list", {'--uuid':d[7][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "tenant-create", {'--name':'tenant2'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error5)
g, error6 = a.process_action(0, "user-add-tenant", {'--uuid':d[7][1], '--tenant_id':f[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
x, error7 = a.process_action(0, "user-tenants-list", {'--uuid':d[7][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
# !! TODO fix what is returned
return x, error
def process_deregister_local_system(self):
"""
Tests deregister-local-system action.
:return: returns any data | |
<gh_stars>0
#!/cosma/home/dp004/dc-rope1/.conda/envs/flares-env/bin/python
import matplotlib as ml
ml.use('Agg')
import numpy as np
import sphviewer as sph
from sphviewer.tools import QuickView, cmaps, camera_tools, Blend
import matplotlib.pyplot as plt
from astropy.cosmology import Planck13 as cosmo
import matplotlib.colors as mcolors
import scipy.ndimage as ndimage
import sys
from guppy import hpy; h=hpy()
from scipy.spatial import cKDTree
import os
from swiftsimio import load
import unyt
import gc
def hex_to_rgb(value):
'''
Converts hex to rgb colours
value: string of 6 characters representing a hex colour.
Returns: list length 3 of RGB values'''
value = value.strip("#") # removes hash symbol if present
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_dec(value):
'''
Converts rgb to decimal colours (i.e. divides each value by 256)
value: list (length 3) of RGB values
Returns: list (length 3) of decimal values'''
return [v/256 for v in value]
def get_continuous_cmap(hex_list, float_list=None):
''' creates and returns a color map that can be used in heat map figures.
If float_list is not provided, colour map graduates linearly between each color in hex_list.
If float_list is provided, each color in hex_list is mapped to the respective location in float_list.
Parameters
----------
hex_list: list of hex code strings
float_list: list of floats between 0 and 1, same length as hex_list. Must start with 0 and end with 1.
Returns
----------
colour map'''
rgb_list = [rgb_to_dec(hex_to_rgb(i)) for i in hex_list]
if float_list:
pass
else:
float_list = list(np.linspace(0, 1, len(rgb_list)))
cdict = dict()
for num, col in enumerate(['red', 'green', 'blue']):
col_list = [[float_list[i], rgb_list[i][num], rgb_list[i][num]] for i
in range(len(float_list))]
cdict[col] = col_list
cmp = mcolors.LinearSegmentedColormap('my_cmp', segmentdata=cdict, N=256)
return cmp
def get_normalised_image(img, vmin=None, vmax=None):
if vmin == None:
vmin = np.min(img)
if vmax == None:
vmax = np.max(img)
img = np.clip(img, vmin, vmax)
img = (img - vmin) / (vmax - vmin)
return img
def cart_to_spherical(pos):
s_pos = np.zeros_like(pos)
xy = pos[:, 0] ** 2 + pos[:, 1] ** 2
s_pos[:, 0] = np.sqrt(xy + pos[:, 2] ** 2)
s_pos[:, 1] = np.arctan2(np.sqrt(xy), pos[:, 2]) - (np.pi/2)
s_pos[:, 2] = np.arctan2(pos[:,1], pos[:,0])
return s_pos
def spherical_to_equirectangular(pos, t0=0, p0=0):
x = 1 * (pos[:, 1] - t0)
y = 1 * (pos[:, 2] - p0)
eq = np.zeros((pos.shape[0], 2))
eq[:, 0] = x
eq[:, 1] = y
return eq, pos[:, 0]
def getimage(data, poss, mass, hsml, num, img_dimens, cmap, Type="gas"):
print('There are', poss.shape[0], 'gas particles in the region')
# Set up particle objects
P = sph.Particles(poss, mass=mass, hsml=hsml)
print(np.min(mass))
# Initialise the scene
S = sph.Scene(P)
i = data[num]
i['xsize'] = img_dimens
i['ysize'] = img_dimens
i['roll'] = 0
S.update_camera(**i)
R = sph.Render(S)
R.set_logscale()
img = R.get_image()
img = ndimage.gaussian_filter(img, sigma=(3, 3), order=0)
if Type == "gas":
vmax =11
vmin = 6
print("gas", np.max(img))
else:
vmax = 13
vmin = 7.5
print("star", np.max(img))
# Convert images to rgb arrays
rgb = cmap(get_normalised_image(img, vmin=vmin, vmax=vmax))
return rgb, R.get_extent()
def make_soft_img(pos, poss, img_dimens, imgrange, ls, smooth, rs):
# Define x and y positions for the gaussians
Gy, Gx = np.meshgrid(np.linspace(imgrange[0][0], imgrange[0][1], img_dimens[1]),
np.linspace(imgrange[1][0], imgrange[1][1], img_dimens[0]))
# Define pixel position array for the KDTree
pix_pos = np.zeros((Gx.size, 2))
pix_pos[:, 0] = Gx.ravel()
pix_pos[:, 1] = Gy.ravel()
# Build KDTree
tree = cKDTree(pix_pos)
print("Pixel tree built")
# Define x and y positions of pixels
X, Y = np.meshgrid(np.arange(0, img_dimens[0], 1),
np.arange(0, img_dimens[1], 1))
# Define pixel position array for the KDTree
pix_pos = np.zeros((X.size, 2), dtype=int)
pix_pos[:, 0] = X.ravel()
pix_pos[:, 1] = Y.ravel()
# Initialise the image array
gsmooth_img = np.zeros((img_dimens[0], img_dimens[1]))
# Loop over each star computing the smoothed gaussian
# distribution for this particle
for ipos, l, sml, (i, r) in zip(pos, ls, smooth, enumerate(rs)):
if i % 100000 == 0:
print(i, end="\r")
x, y = ipos
# Query the tree for this particle
dist, inds = tree.query(ipos, k=int(np.pi * 5**2))
x_sph1 = r * np.arctan2(poss[i, 1], poss[i, 0])
x_sph2 = (r + sml) * np.arctan2(poss[i, 1] + sml, poss[i, 0] + sml)
xsml = x_sph2 - x_sph1
xy = poss[i, 0] ** 2 + poss[i, 1] ** 2
y_sph1 = r * np.arctan2(np.sqrt(xy), poss[i, 2]) - (np.pi / 2)
xy = (poss[i, 0] + sml) ** 2 + (poss[i, 1] + sml) ** 2
y_sph2 = (r + sml) * np.arctan2(np.sqrt(xy), poss[i, 2] + sml) - (np.pi / 2)
ysml = y_sph2 - y_sph1
# Compute the image
g = np.exp(-(((Gx[pix_pos[inds, 0], pix_pos[inds, 1]] - x) ** 2 / (2.0 * xsml ** 2))
+ ((Gy[pix_pos[inds, 0], pix_pos[inds, 1]] - y) ** 2 / (2.0 * ysml ** 2))))
# Get the sum of the gaussian
gsum = np.sum(g)
# If there are stars within the image in this gaussian
# add it to the image array
if gsum > 0:
gsmooth_img[pix_pos[inds, 0], pix_pos[inds, 1]] += g * l / gsum
gsmooth_img, xedges, yedges = np.histogram2d(pos[:, 0], pos[:, 1],
bins=img_dimens,
range=imgrange,
weights=ls)
return gsmooth_img
def single_frame(num, max_pixel, nframes):
snap = "%04d" % num
# Define path
path = '/cosma/home/dp004/dc-rope1/cosma7/SWIFT/hydro_1380_ani/data/ani_hydro_' + snap + ".hdf5"
snap = "%05d" % num
img_dimens = (2048, 4096)
data = load(path)
meta = data.metadata
boxsize = meta.boxsize[0]
z = meta.redshift
print("Boxsize:", boxsize)
# Define centre
cent = np.array([11.76119931, 3.95795609, 1.26561173])
# Define targets
targets = [[0, 0, 0]]
id_frames = np.arange(0, 1381, dtype=int)
rs = np.full(len(id_frames), 0., dtype=float)
simtimes = np.zeros(len(id_frames), dtype=int)
id_targets = np.zeros(len(id_frames), dtype=int)
zoom = np.full(len(id_frames), 1)
extent = np.full(len(id_frames), 10)
hex_list = ["#000000", "#590925", "#6c1c55", "#7e2e84", "#ba4051",
"#f6511d", "#ffb400", "#f7ec59", "#fbf6ac", "#ffffff"]
float_list = [0, 0.2, 0.3, 0.4, 0.45, 0.5, 0.7, 0.8, 0.9, 1]
cmap = get_continuous_cmap(hex_list, float_list=float_list)
poss = data.gas.coordinates.value
mass = data.gas.masses.value * 10 ** 10
rho_gas = data.gas.densities.value
# okinds = np.linalg.norm(poss - cent, axis=1) < 1
# cent = np.average(poss[okinds], weights=rho_gas[okinds], axis=0)
print("Centered on:", cent)
poss -= cent
hsmls = data.gas.smoothing_lengths.value
poss[np.where(poss > boxsize.value / 2)] -= boxsize.value
poss[np.where(poss < - boxsize.value / 2)] += boxsize.value
cart_poss = np.copy(poss)
poss = cart_to_spherical(poss)
print(poss.min(axis=0), poss.max(axis=0))
poss, rs = spherical_to_equirectangular(poss)
print(poss.min(axis=0), poss.max(axis=0))
max_rad = np.sqrt(3 * (boxsize.value / 2)**2)
# Define range and extent for the images in arc seconds
imgrange = ((-np.pi / 2, np.pi / 2), (-np.pi, np.pi))
# imgrange = ((poss[:, 0].min(), poss[:, 0].max()),
# (poss[:, 1].min(), poss[:, 1].max()))
imgextent = (-np.pi / 2, np.pi / 2, -np.pi, np.pi)
ini_img = make_soft_img(poss, cart_poss, img_dimens, imgrange, mass, hsmls, rs)
img = np.zeros_like(ini_img)
img[ini_img > 0] = np.log10(ini_img[ini_img > 0])
vmax = 9
vmin = 6
print(np.max(img), np.min(img[img > 0]))
img = cmap(get_normalised_image(img, vmin=vmin, vmax=vmax))
# # Get colormap
# cmap = ml.cm.Greys_r
#
# try:
# poss = data.stars.coordinates.value - cent
# mass = data.stars.masses.value * 10 ** 10
# hsmls = data.stars.smoothing_lengths.value
#
# if hsmls.max() == 0.0:
# print("Ill-defined smoothing lengths")
#
# last_snap = "%04d" % (num - 1)
#
# # Define path
# path = '/cosma/home/dp004/dc-rope1/cosma7/SWIFT/hydro_1380_ani/data/ani_hydro_' + last_snap + ".hdf5"
#
# data = load(path)
# old_hsmls = data.stars.smoothing_lengths.value
# hsmls[:old_hsmls.size] = old_hsmls
# hsmls[old_hsmls.size:] = np.median(old_hsmls)
#
# print(np.min(hsmls), np.max(hsmls))
#
# poss[np.where(poss > boxsize.value / 2)] -= boxsize.value
# poss[np.where(poss < - boxsize.value / 2)] += boxsize.value
#
# for proj_ind in range(6):
#
# ts = np.full(len(id_frames), t_projs[proj_ind])
# ps = np.full(len(id_frames), p_projs[proj_ind])
#
# proj = projs[proj_ind]
#
# # Define anchors dict for camera parameters
# anchors = {}
# anchors['sim_times'] = list(simtimes)
# anchors['id_frames'] = list(id_frames)
# anchors['id_targets'] = list(id_targets)
# anchors['r'] = list(rs)
# anchors['t'] = list(ts)
# anchors['p'] = list(ps)
# anchors['zoom'] = list(zoom)
# anchors['extent'] = list(extent)
#
# print(f"Processing projection {proj} with properties:")
# for key, val in anchors.items():
# print(key, "=", val[num])
#
# # Define the camera trajectory
# cam_data = camera_tools.get_camera_trajectory(targets, anchors)
#
# # Get images
# star_imgs[proj], ang_extent = getimage(cam_data, poss, mass,
# hsmls, num,
# img_dimens, cmap,
# Type="star")
# except AttributeError:
# for proj_ind in range(6):
# proj = projs[proj_ind]
# star_imgs[proj] = np.zeros_like(gas_imgs[proj])
#
# imgs = {}
#
# | |
# author: <NAME>
from p5 import *
import sympy as sym
import mpmath as mp
import numpy as np
from tkinter import Tk
from scipy.spatial import distance
import PIL
from PIL import Image
import argparse
import os
import csv
import mimetypes
DEBUG = False
parser = argparse.ArgumentParser(
description='Custom frame annotator implemented in p5 and python.')
parser.add_argument('--input', dest='input',
help='Path to the directory with the input images', required=False, type=str, default='input/'),
parser.add_argument('--output', dest='output',
help='Path to the directory with the output images', required=False, type=str, default='output/'),
parser.add_argument('--cache', dest='cache',
help='Path to the cache directory (DON\'T INCLUDE \\)', required=False, type=str, default='cache'),
parser.add_argument('--scale', dest='scale',
help='scaling factor for viewing images', required=False, type=float, default=0.3),
root = Tk()
width = root.winfo_screenwidth()
height = root.winfo_screenheight()
window_offset = 200
image_width = width - window_offset
image_height = (height/width) * image_width
args = parser.parse_args()
input_dir = args.input
output_dir = args.output
cache_dir = args.cache
dirs = []
images = []
img_size = []
index = 0
points = []
c_points = []
lines = []
rectangles = []
p_colors = []
l_colors = []
last_action = 'script started'
std_color = Color(255, 255, 255) # white
a_color = Color(255, 0, 0) # azure
b_color = Color(0, 255, 0) # rose
c_color = Color(0, 0, 255) # pastel orange
def validate_dirs():
global DEBUG, input_dir, output_dir, cache_dir
dir_list = [input_dir, output_dir, cache_dir]
for directory in dir_list:
if not os.path.exists(directory):
os.makedirs(directory)
if DEBUG:
print('[validate_dirs] Validated Directories')
def load():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
validate_dirs()
load_images_from_folder(input_dir)
rectangles = load_bbox_from_file()
last_action = 'loaded images'
def setup():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
size(width - window_offset, image_height)
title('Light-notator')
last_action = 'setup window'
no_loop()
rect_mode(mode='CENTER')
def check_index():
global index
if index > len(images) - 1:
index = 0
if index < 0:
index = len(images) - 1
def draw():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
background(255)
check_index()
image(images[index], (0, 0), (image_width, image_height))
text(f'index: {index}', (5, 5))
text(f'current image: ({dirs[index]})', (5, 15))
text(f'# points: {len(points)}', (5, 25))
text(f'last action: ({last_action})', (5, 35))
for m_rectangle in rectangles:
no_fill()
stroke_weight(2)
stroke(117, 255, 117)
x_translate = floor(m_rectangle[0] * img_size[index][0])
y_translate = floor(m_rectangle[1] * img_size[index][1])
rect_width = floor(m_rectangle[2] * img_size[index][0])
rect_height = floor(m_rectangle[3] * img_size[index][1])
translate(x_translate, y_translate)
rotate(m_rectangle[4])
rect((0, 0), rect_width, rect_height)
rotate(-1 * m_rectangle[4])
translate(-1 * x_translate, -1 * y_translate)
color_index = 0
for m_point in points:
fill(p_colors[color_index])
stroke_weight(1)
stroke(41)
ellipse((m_point[0], m_point[1]), 5, 5)
color_index += 1
color_index = 0
for m_line in lines:
fill(l_colors[color_index])
line(m_line[0], m_line[1])
color_index += 1
fill(std_color)
def mouse_pressed():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if DEBUG:
print(f'mouse pressed at ({mouse_x},{mouse_y})')
add_point(mouse_x, mouse_y, std_color)
constrain_square()
redraw()
def key_pressed():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if ((key == 'R') or (key == 'r')):
remove_point()
if ((key == 'c') or (key == 'C')):
points = []
lines = []
rectangles = []
p_colors = []
l_colors = []
last_action = 'cleared all points'
if (key == 'd'):
redraw()
if (key == "2"):
last_action = 'moved to next frame'
write_bbox_to_file()
index += 1
check_index()
rectangles = load_bbox_from_file()
if (key == "1"):
last_action = 'moved to previous frame'
write_bbox_to_file()
index -= 1
check_index()
rectangles = load_bbox_from_file()
redraw()
def load_images_from_folder(folder):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
for filename in os.listdir(folder):
img_dir = os.path.join(folder, filename)
file_type = str(mimetypes.guess_type(img_dir)[0])[0:5]
if file_type == 'image':
temp_img = Image.open(img_dir)
wsize = int((float(temp_img.size[0]) * float(args.scale)))
hsize = int((float(temp_img.size[1]) * float(args.scale)))
temp_img = temp_img.resize((wsize, hsize), PIL.Image.ANTIALIAS)
new_dir = os.path.join(args.cache, filename)
temp_img.save(f'{new_dir}')
img_size.append((image_width, image_height))
dirs.append(new_dir)
images.append(load_image(new_dir))
dirs, images, img_size = (list(t)
for t in zip(*sorted(zip(dirs, images, img_size))))
def add_point(in_x, in_y, color):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if in_x <= image_width and in_y <= image_height:
points.append((in_x, in_y))
p_colors.append(color)
last_action = 'added point'
def add_line(temp_point_0, temp_point_1, color):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
lines.append((temp_point_0, temp_point_1))
l_colors.append(Color(0, 0, 0))
def constrain_square():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if len(points) == 3:
dist = []
pairs = []
for pointA in points:
for pointB in points:
dist.append(abs(distance.euclidean(pointA, pointB)))
pairs.append((pointA, pointB))
for point in points:
# arbitrarily define temporary points in order to find pointC
if not ((point == pairs[dist.index(max(dist))][0]) or (point == pairs[dist.index(max(dist))][1])):
pointC = point
hypot = max(dist)
temp_distance_0 = abs(distance.euclidean(
pointC, pairs[dist.index(max(dist))][0]))
temp_distance_1 = abs(distance.euclidean(
pointC, pairs[dist.index(max(dist))][1]))
if (temp_distance_0 > temp_distance_1):
pointA = pairs[dist.index(max(dist))][0]
pointB = pairs[dist.index(max(dist))][1]
angle_flip = False
else:
pointA = pairs[dist.index(max(dist))][1]
pointB = pairs[dist.index(max(dist))][0]
angle_flip = True
if DEBUG:
p_colors[points.index(pointA)] = a_color
p_colors[points.index(pointB)] = b_color
p_colors[points.index(pointC)] = c_color
leg1 = abs(distance.euclidean(pointC, pointA))
hypot = abs(distance.euclidean(pointB, pointA))
leg1_vector = (pointC[0] - pointA[0], pointC[1] - pointA[1])
hypot_vector = (pointB[0] - pointA[0], pointB[1] - pointA[1])
if DEBUG:
add_line(pointA, pointB, std_color)
print(
f'leg vector is {leg1_vector} and hyp_vector is {hypot_vector}')
print(
f'pointA is {pointA} and pointB is {pointB} and pointC is {pointC}')
theta = sym.acos(
(leg1_vector[0]*hypot_vector[0]+leg1_vector[1]*hypot_vector[1])/(leg1*hypot))
std_unit_vector = (1, 0)
theta_prime = sym.acos((leg1_vector[0]*std_unit_vector[0] +
leg1_vector[1]*std_unit_vector[1])/(leg1))
leg2 = leg1 * mp.tan(theta)
increment = (leg2 * mp.sin(theta_prime),
leg2 * mp.cos(theta_prime))
temp_b_check = pointB[0] > pointA[0]
if pointC[1] > pointA[1]:
increment = (-1 * increment[0], increment[1])
if not (temp_b_check == (float(pointC[0] + increment[0]) > pointA[0])):
increment = (-1 * increment[0], -1 * increment[1])
third_point = (float(pointC[0] + increment[0]),
float(pointC[1] + increment[1]))
points[points.index(pointB)] = third_point
pointB = third_point
pointD = (float(pointA[0] + increment[0]),
float(pointA[1] + increment[1]))
add_point(pointD[0], pointD[1], std_color)
validate_constraint()
angle_factor = -1
rectangle_tilt = get_angle([pointC[0], pointC[1]], [pointA[0], pointA[1]], [
pointA[0] + 20, pointA[1]])
if DEBUG:
print(f'rectangle tilt is: {180 * rectangle_tilt / mp.pi}')
rectangle_tilt *= angle_factor
if DEBUG:
print(f'shifted rectangle tilt is: {180 * rectangle_tilt / mp.pi}')
rectangle_width = abs(distance.euclidean(pointC, pointA))
rectangle_height = abs(distance.euclidean(pointD, pointA))
averageX = 0
averageY = 0
for point in points:
averageX += point[0]
averageY += point[1]
averageX /= len(points)
averageY /= len(points)
add_rectangle(averageX, averageY, rectangle_width,
rectangle_height, rectangle_tilt)
points = []
else:
last_action = 'constrain_square failed: not enough points'
lines = []
def add_rectangle(in_x, in_y, rectangle_width, rectangle_height, rectangle_tilt):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
x_relative = in_x/img_size[index][0]
y_relative = in_y/img_size[index][1]
rect_width_relative = rectangle_width/img_size[index][0]
rect_height_relative = rectangle_height/img_size[index][1]
rectangles.append((x_relative, y_relative, rect_width_relative,
rect_height_relative, rectangle_tilt))
def validate_constraint():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
angles = []
for pointA in points:
for pointB in points:
if pointB == pointA:
continue
for pointC in points:
if pointC == pointA or pointC == pointB:
continue
angle = 180 * get_angle(pointA, pointB, pointC) / np.pi
if angle == 90 or (angle > 89.9 and angle < 90.1):
angles.append(angle)
if DEBUG:
print(f'validated constraints: corner angles are {angles[0:4]}')
def get_angle(pointA, pointB, pointC):
v1 = [pointA[0] - pointB[0], pointA[1] - pointB[1]]
v2 = [pointC[0] - pointB[0], pointC[1] - pointB[1]]
angle = np.arccos(
np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
if pointA[1] > pointC[1]:
angle *= -1
return angle
def remove_point():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
curr_pos = (mouse_x, mouse_y)
dist = []
for point in points:
dist.append(distance.euclidean(point, curr_pos))
points.pop(dist.index(min(dist)))
last_action = 'removed closest point'
constrain_square()
def load_bbox_from_file():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
file_dir = dirs[index].replace('cache', 'input')
file_dir = os.path.splitext(file_dir)[0]+'.csv'
if os.path.isfile(file_dir):
temp_rectangles = []
if DEBUG:
print('There are encoded annotations in corresponding text file.')
with open(file_dir) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if not | |
in range(lump.length//thingSize):
if zStyle:
lump.read(2)
x = int.from_bytes(lump.read(2), "little", signed=True)
# same reason for inverting Y as for vertices
y = -int.from_bytes(lump.read(2), "little", signed=True)
lump.read(2)
angle = int.from_bytes(lump.read(2), "little", signed=True)
typeID = int.from_bytes(lump.read(2), "little", signed=True)
options = int.from_bytes(lump.read(2), "little", signed=True)
lump.read(6)
else:
# Coordinates to place the thing at
x = int.from_bytes(lump.read(2), "little", signed=True)
# same reason for inverting Y as for vertices
y = -int.from_bytes(lump.read(2), "little", signed=True)
# 0-359. Angle at which it is rotated
# 0 is East, then goes anti-clockwise
angle = int.from_bytes(lump.read(2), "little", signed=True)
# Thing's typeID (what is it)
# List of types are later in the program
typeID = int.from_bytes(lump.read(2), "little", signed=True)
# bits, which difficulty this thing appears at
options = int.from_bytes(lump.read(2), "little", signed=True)
# create new Thing object, return list of Thing objects
newThing = Thing(x, y, angle, typeID, options)
things.append(newThing)
return things
def getBasicData(wad, zStyle=False):
''' Putting it all together: get all geometry data off a WAD object
Returns vertexes, linedefs, sidedefs, sectors, things, pallete, colorMap
'''
pallete = getPallete(wad.getLump("PLAYPAL"))
colorMap = getColorMap(wad.getLump("COLORMAP"))
# in map does not exist - leave
if not wad.mapFound:
# but return pallete and colormap
# (it is needed for maps with non-standard names,
# they use pallete and colormap from iWADs)
return False, False, False, False, \
False, pallete, colorMap
# otherwise get the geometry + pallete + color map
vertexes = getVertixes(wad.getLump("VERTEXES"))
linedefs = getLineDefs(wad.getLump("LINEDEFS"), zStyle)
sidedefs = getSideDefs(wad.getLump("SIDEDEFS"))
sectors = getSectors(wad.getLump("SECTORS"))
things = getThings(wad.getLump("THINGS"), zStyle)
return vertexes, linedefs, sidedefs,\
sectors, things, pallete, colorMap
# Functions to facilitate vertixes transformation
# This is for rotation, scale down and isometric view fo the map
################################################################
def rotatePoint(x, y, rotateDeg):
''' Rotate one set of coordinates by rotateDeg degrees
around the (0,0). Return new coordinates
'''
rotateRad = math.radians(rotateDeg)
currAngleRad = math.atan2(y, x)
dist = math.sqrt(x ** 2 + y ** 2)
resultAngleRad = currAngleRad + rotateRad
newy = math.sin(resultAngleRad) * dist
newx = math.cos(resultAngleRad) * dist
return int(newx), int(newy)
def applyRotation(vertexes, things, rotate):
''' Rotate all vertixes and things by "rotate" angle
'''
# Just go through all XY coordinates and apply rotatePoint to each
for vertex in vertexes:
x, y = vertex.x, vertex.y
newx, newy = rotatePoint(x, y, rotate)
vertex.x, vertex.y = newx, newy
for thing in things:
x, y = thing.x, thing.y
newx, newy = rotatePoint(x, y, rotate)
thing.x, thing.y = newx, newy
# One extra thing to do for things:
# if the map rotates, we need to rotate them same degrees
# in the opposite directions
# So they would face the same direction relative to the map
thing.angle -= rotate
if thing.angle < 0:
thing.angle += 360
def applyScaleY(vertexes, things, scaleY):
''' Scale vertexes and things along Y axis by factor of scaleY
This is to create isometric view (as if viewing from a side, not from
directly above). ScaleY is usually 0.5-0.9
'''
for vertex in vertexes:
y = vertex.y
newy = int(y * scaleY)
vertex.y = newy
for thing in things:
y = thing.y
newy = int(y * scaleY)
thing.y = newy
def applyShrinkage(vertexes, things, sidedefs, sectors, shrink):
''' Scale trasnformation (make everything smaller by SHRINK factor
vertexes & things coords, sector's floors and ceilings
'''
for vertex in vertexes:
vertex.x //= shrink
vertex.y //= shrink
for thing in things:
thing.x //= shrink
thing.y //= shrink
for sidedef in sidedefs:
sidedef.xOffset //= shrink
sidedef.yOffset //= shrink
for sector in sectors:
sector.floorHeight //= shrink
sector.ceilingHeight //= shrink
# Functions to get various graphic info from lumps (patches, textures, flats)
############################################################################
# First, functions that deal with colors and color transformation
#################################################################
def getPallete(lump):
''' Get the pallete from PLAYPAL lump
Pallete is a list of 256 tuples, (256 colors used in the game)
each tuple has 3 0-255 integers (RGB color)
'''
if lump is None:
return []
pallete = []
for i in range(256):
pixel = []
for j in range(3):
pixel.append(int.from_bytes(lump.read(1), "little",
signed=False))
pallete.append(tuple(pixel))
return pallete
def getColorMap(lump):
''' Get the ColorMap from COLORMAP lump
Color Map is used to map colors to new colors for various light levels
Returns list of 34 maps, each map is a list of indexes in pallete to map to
'''
if lump is None:
return []
colorMap = []
for i in range(34):
colorMap.append([])
for j in range(256):
colorMap[-1].append(int.from_bytes(lump.read(1), "little",
signed=False))
return colorMap
def genColorConversion(pallete, colorMap):
''' Combines Pallette and ColorMap into Color Conversion table:
Map which RGB color to which, for various light levels
litColor = colorConv[lightLevel][originalColor]
'''
colorConv = []
for i in range(34):
colorConv.append({})
for j in range(256):
colorConv[-1][pallete[j]] = pallete[colorMap[i][j]]
return colorConv
# Function that deal with pictures in Doom format (patches and sprites)
#####################################################################
def palletizePic(im, pallete):
''' Make image im conform with Doom's picture requirements:
All pixels should be from "pallete"
All transparency is either 0 or 255
(used for PNG and for scaled down assets)
'''
def closestPix(pixel, pallete):
''' Find closest pixel in the pallete
'''
# First, dynamic programming: look in the cached values
nonlocal palleteMemory
if pixel in palleteMemory:
# Need to return the copy, otherwise main function
# would updates this value
return palleteMemory[pixel].copy()
# Otherwise find the closest pixel (min sum of by-channel differences)
closest = (0,0,0)
minDistance = 256 * 4
for pal in pallete:
distance = 0
for i in range(3):
distance += abs(pixel[i]-pal[i])
#distance += (pixel[i]-pal[i])**2
#distance = int(math.sqrt(distance))
if distance < minDistance:
minDistance = distance
closest = pal
palleteMemory[pixel] = list(closest)
return list(closest)
# Dynamic programming cache to speed up conversion to the pallete colors
palleteMemory = {}
px = im.load()
for i in range(im.size[0]):
for j in range(im.size[1]):
transparency = 255
if len(px[i,j]) == 4:
transparency = 0 if px[i,j][3] < 128 else 255
# Check it it is in the pallete
if tuple(px[i,j][:3]) not in pallete:
# and use closest if it isn't
newpix = list(closestPix(tuple(px[i,j][:3]), pallete))
else:
newpix = list(px[i,j][:3])
newpix.append(transparency)
px[i,j] = tuple(newpix)
return im
def picResize(pic, shrink, pallete):
''' Shrink a picture, make sure the result is Doom compliant
'''
newW = max(pic.size[0] // shrink, 1)
newH = max(pic.size[1] // shrink, 1)
pic = pic.resize((newW, newH), Image.LANCZOS)
pic = palletizePic(pic, pallete)
return pic
def massResize(pics, shrink, pallete):
''' Mass shrink a dict of pictures
'''
for picName in pics:
pics[picName] = picResize(pics[picName], shrink, pallete)
def massResizeFlats(flats, shrink, pallete):
''' Mass shrink a dict of flats
'''
for flatName in flats:
flatpic = flat2pic(flats[flatName])
flatpic = picResize(flatpic, shrink, pallete)
flats[flatName] = pic2flat(flatpic)
def png2pic(pngdata, pallete):
''' convert PNG data into a PIL pic
Using external library "pypng" as PIL often can't read WAD's PNG properly
'''
pngpic = png.Reader(bytes=pngdata)
width, height, rows, info = pngpic.read(lenient=True)
# Should resulting byte stream be grouped in 3s or 4s
bytesize = info["planes"]
# or in 1s (for paletted PNG)
if bytesize == 1:
pngPallete = info["palette"]
# resulting image should have alpha channel anyway
im = Image.new("RGBA", (width, height), (0, 0, 0, 0))
px = im.load()
temppix = []
# iterating through the resilts of PNG reader
# and copy pixels to a new PIL Image
for i, row in enumerate(rows):
for j, value in enumerate(row):
# constructing pixel from a bytestream
temppix.append(value)
# if it is long enough - time to write this byte to the image
if len(temppix) == bytesize:
# if it is one byte (palleted PNG)
# read the value from PNG pallete
if len(temppix) == 1:
temppix = list(pngPallete[temppix[0]])
newpix = temppix[:3]
# add transprency byte, or copy from the original
if bytesize in (3, 1):
newpix.append(255)
else:
newpix.append(255 if temppix[3] > 127 else 0)
# copy the result to the final image, clear the pixel buffer
px[j//bytesize,i] = tuple(newpix)
temppix = []
im = palletizePic(im, pallete)
return im
def getPatchesNames(lump):
''' Get all patches names (texture building components) from PNAME lump
They will be referenced by ID, not by names, so store them in a list, not dict
'''
patchesNames | |
# only functions to generate, load, save, check itp..
from random import randint
from math import log
import inspect
import os
import sys
import pickle
from .__tools_single import __cre_rotor, __save_rotor, __load_rotor, __check_rand_rotor, generate_from_64b_inter_key, \
bcolors
from .core import EncryptNextRotor, EncryptSet, DecryptNextRotor, DecryptSet
def create_rotors(size_in_bit, mix, number_of_rotors): # todo daj mix Tru i na koncu
size = 2 ** size_in_bit
return ([__cre_rotor(size, mix) for _ in range(0, number_of_rotors)])
def check_rand_rotors(rotors):
__key_min = min(rotors[0])
__key_max = max(rotors[0])
__value_min = min(rotors[0].values())
__value_max = max(rotors[0].values())
__len = len(rotors[0])
__random = __check_rand_rotor(rotors[0])
for rotor in rotors:
if __key_min != min(rotor) or __key_max != max(rotor) or __len != len(rotor) or __random != __check_rand_rotor(
rotor) \
or __value_min != min(rotor.values()) or __value_max != max(rotor.values()):
return False, __random
return True, __random
number_of_printd = 0
def print_format_table():
"""
prints table of formatted text format options
"""
for style in range(10):
for fg in range(20, 40):
s1 = ''
for bg in range(35, 60):
format = ';'.join([str(style), str(fg), str(bg)])
s1 += '\x1b[%sm %s \x1b[0m' % (format, format)
print(s1)
print('\n')
# def gen_text(char_max, ran, size_triple, rotor_for_gen, *char_size):
# print("Dorób proces postępu gen_text ")
# x = []
# if len(char_size) == 1:
# char = char_size[0]
# size = 1
# else:
# if len(char_size) == 2:
# char = char_size[0]
# size = char_size[1]
# else:
# char = 0
# size = 1
#
# rotor_for_gen = dict(rotor_for_gen)
# [[[x.append(randint(0, len(rotor_for_gen) - 1) if ran else char) for _ in range(3 * len(rotor_for_gen))]
# if not char_max else [x.append(len(rotor_for_gen) - 1) for _ in range(3 * len(rotor_for_gen))]]
# if size_triple else [[x.append(randint(0, len(rotor_for_gen) - 1) if ran else char) for _ in range(size)]
# if not char_max else [x.append(len(rotor_for_gen) - 1) for _ in range(size)]]]
# return x
def check_text_const(text_before):
for i in range(1, len(text_before)):
if text_before[0] != text_before[i]:
return False
return True
def encrypt(rotors, key_enc, text_before, show=False):
encrypt_rotors = [EncryptNextRotor(rotor) for rotor in rotors]
encrypt_first = EncryptSet(key_enc[:], text_before[:], rotors, show=show)
enc = [True]
while True:
enc = encrypt_first.set_enc_chain(enc)
for encrypt_rotor in encrypt_rotors:
enc = encrypt_rotor.encrypt(enc)
if not enc[-1]:
break
text_encrypt = encrypt_first.get_encrypt_list()
return text_encrypt
def decrypt(rotors, key_dec, text_encrypt, show=False):
decrypt_rotors = [DecryptNextRotor(rotor) for rotor in rotors]
decrypt_first = DecryptSet(key_dec[:], text_encrypt[:], rotors, show=show)
dec = [True]
while True:
dec = decrypt_first.set_dec_chain(dec)
for decrypt_rotor in reversed(decrypt_rotors):
dec = decrypt_rotor.decrypt(dec)
if not dec[-1]:
break
text_decrypt = decrypt_first.get_decrypt_list()
return text_decrypt
def check_all_patterns(text_encrypt, min_pattern, max_pattern, max_num_patterns=1, number_of_check_patterns=0,
del_patterns=None, show=False,
mark=-1):
if del_patterns is None:
del_patterns = []
t_i = text_encrypt[:]
min_p = min_pattern
max_p = max_pattern
stop_if = max_pattern
p_l = []
min = 0
pf = 0
ps = 0
i_m = mark
if del_patterns:
for i in del_patterns:
p_len = i[0]
i.pop(0)
i.pop(0)
for ii in i:
t_i[ii: p_len + ii] = [i_m for _ in range(ii, p_len + ii)]
while True:
if number_of_check_patterns != 0:
if stop_if - max_p >= number_of_check_patterns:
print(("\rPatterns shorter, only {} first:".format(max_num_patterns) + " " * 20)[:49] + str(
[p_l if p_l else "[None]"])[2:-2]) if show else None
sys.stdout.write("\r")
return p_l
sys.stdout.write(("\rPatterns shorter, only {} first in progress ...".format(max_num_patterns) + " " * 7)[:49] +
str(p_l)[1:-1] + str([', ' if p_l else ""])[2:-2] + str(
list([len(t_i[min + pf: max_p + pf]), min + pf, max_p + pf + ps]))) \
if show else None
if len(p_l) >= max_num_patterns:
print(("\rPatterns shorter, only {} first:".format(max_num_patterns) + " " * 20)[:49] + str(
[p_l if p_l else "[None]"])[1:-1]) if show else None
sys.stdout.write("\r")
return p_l
if i_m not in t_i[min + pf: max_p + pf]:
if i_m not in t_i[max_p + pf + ps: 2 * max_p + pf + ps]:
if t_i[min + pf: max_p + pf] == t_i[max_p + pf + ps: 2 * max_p + pf + ps]:
p_l.append([len(t_i[min + pf: max_p + pf]), min + pf, max_p + pf + ps])
t_i[max_p + pf + ps: 2 * max_p + pf + ps] = [i_m for _ in
range(max_p + pf + ps, 2 * max_p + pf + ps)]
if t_i[max_p + pf + ps: 2 * max_p + pf + ps] == t_i[-len(t_i[max_p + pf + ps: 2 * max_p + pf + ps]):]:
if t_i[min + pf: max_p + pf] + t_i[max_p + pf + ps: 2 * max_p + pf + ps] == t_i[-2 * len(
t_i[max_p + pf + ps: 2 * max_p + pf + ps]):]:
if len(t_i[min + pf: max_p + pf]) < min_p:
print(
("\rPatterns shorter, only {} first:".format(max_num_patterns) + " " * 20)[:49] + str(p_l)[
1:-1]) if show else None
sys.stdout.write("\r")
return p_l
else:
max_p -= 1
pf = 0
ps = 0
else:
pf += 1
ps = 0
else:
ps += 1
else:
if t_i[min + pf: max_p + pf] + t_i[max_p + pf + ps: 2 * max_p + pf + ps] == t_i[-2 * len(
t_i[max_p + pf + ps: 2 * max_p + pf + ps]):]:
if len(t_i[min + pf: max_p + pf]) < min_p:
print(("\rPatterns shorter, only {} first:".format(max_num_patterns) + " " * 20)[:49] +
str(p_l)[1:-1]) if show else None
sys.stdout.write("\r")
return p_l
else:
max_p -= 1
pf = 0
ps = 0
else:
pf += 1
ps = 0
def check_patterns(text_to_check, min_len=4, max_num_patterns=1,
show=False):
p_l = []
m = min_len
nr_del = 0
text_to_check_inter = text_to_check[:]
while True:
for i in range(0, len(text_to_check_inter)):
if show:
sys.stdout.write((("\rPatterns over, only {} first in progress ...".format(max_num_patterns) + " " * 7)[
:49] + str(p_l)[1:-1] + str([', ' if p_l else ""])[2:-2] +
str([len(text_to_check_inter[0: m + i]), nr_del, nr_del + (m + i),
nr_del + (m + i) * 2])))
if (m + i) * 3 > len(text_to_check_inter):
break
if text_to_check_inter[0: m + i] == text_to_check_inter[m + i: (m + i) * 2] == \
text_to_check_inter[(m + i) * 2: (m + i) * 3]:
p_l.append([len(text_to_check_inter[0: m + i]), nr_del, nr_del + (m + i), nr_del + (m + i) * 2])
if len(p_l) == max_num_patterns:
if show:
print(
str("\rPatterns over, first {}:".format(max_num_patterns) + " " * 30)[:49] + str(p_l)[1:-1])
return p_l
if len(text_to_check_inter) >= 1:
text_to_check_inter.pop(0)
nr_del += 1
else:
break
if p_l:
if show:
print(str("\rPatterns over, first {}:".format(max_num_patterns) + " " * 30)[:49] + (str(p_l))[1:-1])
return p_l
sys.stdout.write("\r")
return False
def key_from_64b_to_dec(key_my):
dic_64b_1 = {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "a": 10,
"b": 11, "c": 12, "d": 13, "e": 14, "f": 15, "g": 16, "h": 17, "i": 18, "j": 19, "k": 20,
"l": 21, "m": 22, "n": 23, "o": 24, "p": 25, "q": 26, "r": 27, "s": 28, "t": 29, "u": 30,
"v": 31, "w": 32, "x": 33, "y": 34, "z": 35, "A": 36, "B": 37, "C": 38, "D": 39, "E": 40,
"F": 41, "G": 42, "H": 43, "I": 44, "J": 45, "K": 46, "L": 47, "M": 48, "N": 49, "O": 50,
"P": 51, "Q": 52, "R": 53, "S": 54, "T": 55, "U": 56, "V": 57, "W": 58, "X": 59, "Y": 60,
"Z": 61, "+": 62, "/": 63}
number_in_dec = 0
i = 0
if isinstance(key_my, str):
for h in key_my:
number_in_dec += dic_64b_1[h] * (64 ** i)
i += 1
return number_in_dec
def check_64b_key(key_my):
dic_64b_1 = {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "a": 10,
"b": 11, "c": 12, "d": 13, "e": 14, "f": 15, "g": 16, "h": 17, "i": 18, "j": 19, "k": 20,
"l": 21, "m": 22, "n": 23, "o": 24, "p": 25, "q": 26, "r": 27, "s": 28, "t": 29, "u": 30,
"v": 31, "w": 32, "x": 33, "y": 34, "z": 35, "A": 36, "B": 37, "C": 38, "D": 39, "E": 40,
"F": 41, "G": 42, "H": 43, "I": 44, "J": 45, "K": 46, "L": 47, "M": 48, "N": 49, "O": 50,
"P": 51, "Q": 52, "R": 53, "S": 54, "T": 55, "U": 56, "V": 57, "W": 58, "X": 59, "Y": 60,
"Z": 61, "+": 62, "/": 63}
for k in key_my:
if k in dic_64b_1:
continue
else:
return False
return True
def create_random_64b_key(size_in_bit=2, max_print_length=110, show=False):
dic_64b_1 = {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "a": 10,
"b": 11, "c": 12, "d": 13, "e": 14, "f": 15, "g": 16, "h": 17, "i": 18, "j": 19, "k": 20,
"l": 21, "m": 22, "n": 23, "o": 24, "p": 25, "q": 26, "r": 27, "s": 28, "t": 29, "u": 30,
"v": 31, "w": 32, "x": 33, "y": 34, "z": 35, "A": 36, "B": 37, "C": 38, "D": 39, "E": 40,
"F": 41, "G": 42, "H": 43, "I": 44, "J": 45, "K": 46, "L": 47, "M": 48, "N": 49, "O": 50,
"P": 51, "Q": 52, "R": 53, "S": 54, "T": 55, "U": 56, "V": 57, "W": 58, "X": 59, "Y": 60,
"Z": 61, "+": 62, "/": 63}
dic_64b_2 = {}
for inter_key, value in dic_64b_1.items():
dic_64b_2[value] = inter_key
# Random number in DEC
rand_num_in_DEC = randint(2 ** size_in_bit, (2 ** (size_in_bit + 1)) - 1)
if show:
print("Size of the number in bit: ", int(log(rand_num_in_DEC, 2)))
print("Random number in DEC: ")
for i in range(0, len(str(rand_num_in_DEC)) + max_print_length, max_print_length):
if str(rand_num_in_DEC)[i: max_print_length + i]:
print(str(rand_num_in_DEC)[i: max_print_length + i])
else:
print("")
break
# DEC | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _GeomTools.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_GeomTools', [dirname(__file__)])
except ImportError:
import _GeomTools
return _GeomTools
if fp is not None:
try:
_mod = imp.load_module('_GeomTools', fp, pathname, description)
finally:
fp.close()
return _mod
_GeomTools = swig_import_helper()
del swig_import_helper
else:
import _GeomTools
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _GeomTools.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_GeomTools.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_GeomTools.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_GeomTools.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_GeomTools.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_GeomTools.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_GeomTools.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_GeomTools.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_GeomTools.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_GeomTools.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_GeomTools.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_GeomTools.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_GeomTools.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_GeomTools.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_GeomTools.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_GeomTools.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_GeomTools.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _GeomTools.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.Geom
import OCC.MMgt
import OCC.Standard
import OCC.gp
import OCC.TCollection
import OCC.GeomAbs
import OCC.TColgp
import OCC.TColStd
import OCC.Geom2d
import OCC.Message
class geomtools(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def Dump(*args):
"""
* A set of Curves from Geom2d. Dumps the surface on the stream.
:param S:
:type S: Handle_Geom_Surface &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the Curve on the stream.
:param C:
:type C: Handle_Geom_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the Curve on the stream.
:param C:
:type C: Handle_Geom2d_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
"""
return _GeomTools.geomtools_Dump(*args)
Dump = staticmethod(Dump)
def Write(*args):
"""
* Writes the surface on the stream.
:param S:
:type S: Handle_Geom_Surface &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Writes the Curve on the stream.
:param C:
:type C: Handle_Geom_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Writes the Curve on the stream.
:param C:
:type C: Handle_Geom2d_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
"""
return _GeomTools.geomtools_Write(*args)
Write = staticmethod(Write)
def Read(*args):
"""
* Reads the surface from the stream.
:param S:
:type S: Handle_Geom_Surface &
:param IS:
:type IS: Standard_IStream &
:rtype: void
* Reads the Curve from the stream.
:param C:
:type C: Handle_Geom_Curve &
:param IS:
:type IS: Standard_IStream &
:rtype: void
* Reads the Curve from the stream.
:param C:
:type C: Handle_Geom2d_Curve &
:param IS:
:type IS: Standard_IStream &
:rtype: void
"""
return _GeomTools.geomtools_Read(*args)
Read = staticmethod(Read)
def SetUndefinedTypeHandler(*args):
"""
:param aHandler:
:type aHandler: Handle_GeomTools_UndefinedTypeHandler &
:rtype: void
"""
return _GeomTools.geomtools_SetUndefinedTypeHandler(*args)
SetUndefinedTypeHandler = staticmethod(SetUndefinedTypeHandler)
def GetUndefinedTypeHandler(*args):
"""
:rtype: Handle_GeomTools_UndefinedTypeHandler
"""
return _GeomTools.geomtools_GetUndefinedTypeHandler(*args)
GetUndefinedTypeHandler = staticmethod(GetUndefinedTypeHandler)
def __init__(self):
_GeomTools.geomtools_swiginit(self,_GeomTools.new_geomtools())
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
geomtools._kill_pointed = new_instancemethod(_GeomTools.geomtools__kill_pointed,None,geomtools)
geomtools_swigregister = _GeomTools.geomtools_swigregister
geomtools_swigregister(geomtools)
def geomtools_Dump(*args):
"""
* A set of Curves from Geom2d. Dumps the surface on the stream.
:param S:
:type S: Handle_Geom_Surface &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the Curve on the stream.
:param C:
:type C: Handle_Geom_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the Curve on the stream.
:param C:
:type C: Handle_Geom2d_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
"""
return _GeomTools.geomtools_Dump(*args)
def geomtools_Write(*args):
"""
* Writes the surface on the stream.
:param S:
:type S: Handle_Geom_Surface &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Writes the Curve on the stream.
:param C:
:type C: Handle_Geom_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Writes the Curve on the stream.
:param C:
:type C: Handle_Geom2d_Curve &
:param OS:
:type OS: Standard_OStream &
:rtype: void
"""
return _GeomTools.geomtools_Write(*args)
def geomtools_Read(*args):
"""
* Reads the surface from the stream.
:param S:
:type S: Handle_Geom_Surface &
:param IS:
:type IS: Standard_IStream &
:rtype: void
* Reads the Curve from the stream.
:param C:
:type C: Handle_Geom_Curve &
:param IS:
:type IS: Standard_IStream &
:rtype: void
* Reads the Curve from the stream.
:param C:
:type C: Handle_Geom2d_Curve &
:param IS:
:type IS: Standard_IStream &
:rtype: void
"""
return _GeomTools.geomtools_Read(*args)
def geomtools_SetUndefinedTypeHandler(*args):
"""
:param aHandler:
:type aHandler: Handle_GeomTools_UndefinedTypeHandler &
:rtype: void
"""
return _GeomTools.geomtools_SetUndefinedTypeHandler(*args)
def geomtools_GetUndefinedTypeHandler(*args):
"""
:rtype: Handle_GeomTools_UndefinedTypeHandler
"""
return _GeomTools.geomtools_GetUndefinedTypeHandler(*args)
class GeomTools_Curve2dSet(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Returns an empty set of Curves.
:rtype: None
"""
_GeomTools.GeomTools_Curve2dSet_swiginit(self,_GeomTools.new_GeomTools_Curve2dSet(*args))
def Clear(self, *args):
"""
* Clears the content of the set.
:rtype: None
"""
return _GeomTools.GeomTools_Curve2dSet_Clear(self, *args)
def Add(self, *args):
"""
* Incorporate a new Curve in the set and returns its index.
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: int
"""
return _GeomTools.GeomTools_Curve2dSet_Add(self, *args)
def Curve2d(self, *args):
"""
* Returns the Curve of index <I>.
:param I:
:type I: int
:rtype: Handle_Geom2d_Curve
"""
return _GeomTools.GeomTools_Curve2dSet_Curve2d(self, *args)
def Index(self, *args):
"""
* Returns the index of <L>.
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: int
"""
return _GeomTools.GeomTools_Curve2dSet_Index(self, *args)
def DumpToString(self):
"""DumpToString(GeomTools_Curve2dSet self) -> std::string"""
return _GeomTools.GeomTools_Curve2dSet_DumpToString(self)
def WriteToString(self):
"""WriteToString(GeomTools_Curve2dSet self) -> std::string"""
return _GeomTools.GeomTools_Curve2dSet_WriteToString(self)
def ReadFromString(self, *args):
"""ReadFromString(GeomTools_Curve2dSet self, std::string src)"""
return _GeomTools.GeomTools_Curve2dSet_ReadFromString(self, *args)
def PrintCurve2d(*args):
"""
* Dumps the curve on the stream, if compact is True use the compact format that can be read back.
:param C:
:type C: Handle_Geom2d_Curve &
:param OS:
:type OS: Standard_OStream &
:param compact: default value is Standard_False
:type compact: bool
:rtype: void
"""
return _GeomTools.GeomTools_Curve2dSet_PrintCurve2d(*args)
PrintCurve2d = staticmethod(PrintCurve2d)
def ReadCurve2d(*args):
"""
* Reads the curve from the stream. The curve is assumed to have been writtent with the Print method (compact = True).
:param IS:
:type IS: Standard_IStream &
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: Standard_IStream
"""
return _GeomTools.GeomTools_Curve2dSet_ReadCurve2d(*args)
ReadCurve2d = staticmethod(ReadCurve2d)
def SetProgress(self, *args):
"""
:param PR:
:type PR: Handle_Message_ProgressIndicator &
:rtype: None
"""
return _GeomTools.GeomTools_Curve2dSet_SetProgress(self, *args)
def GetProgress(self, *args):
"""
:rtype: Handle_Message_ProgressIndicator
"""
return _GeomTools.GeomTools_Curve2dSet_GetProgress(self, *args)
def _kill_pointed(self):
"""_kill_pointed(GeomTools_Curve2dSet self)"""
return _GeomTools.GeomTools_Curve2dSet__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
GeomTools_Curve2dSet.Clear = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_Clear,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.Add = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_Add,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.Curve2d = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_Curve2d,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.Index = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_Index,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.DumpToString = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_DumpToString,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.WriteToString = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_WriteToString,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.ReadFromString = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_ReadFromString,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.SetProgress = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_SetProgress,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet.GetProgress = new_instancemethod(_GeomTools.GeomTools_Curve2dSet_GetProgress,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet._kill_pointed = new_instancemethod(_GeomTools.GeomTools_Curve2dSet__kill_pointed,None,GeomTools_Curve2dSet)
GeomTools_Curve2dSet_swigregister = _GeomTools.GeomTools_Curve2dSet_swigregister
GeomTools_Curve2dSet_swigregister(GeomTools_Curve2dSet)
def GeomTools_Curve2dSet_PrintCurve2d(*args):
"""
* Dumps the curve on the stream, if compact is True use the compact format that can be read back.
:param C:
:type C: Handle_Geom2d_Curve &
:param OS:
:type OS: Standard_OStream &
:param compact: default value is Standard_False
:type compact: bool
:rtype: void
"""
return _GeomTools.GeomTools_Curve2dSet_PrintCurve2d(*args)
def GeomTools_Curve2dSet_ReadCurve2d(*args):
"""
* Reads the curve from the stream. The curve is assumed to have been writtent with the Print method (compact = True).
:param IS:
:type IS: Standard_IStream &
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: Standard_IStream
"""
return _GeomTools.GeomTools_Curve2dSet_ReadCurve2d(*args)
class GeomTools_CurveSet(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Returns an empty set of Curves.
:rtype: None
"""
_GeomTools.GeomTools_CurveSet_swiginit(self,_GeomTools.new_GeomTools_CurveSet(*args))
def Clear(self, *args):
"""
* Clears the content of the set.
:rtype: None
"""
return _GeomTools.GeomTools_CurveSet_Clear(self, *args)
def Add(self, *args):
"""
* Incorporate a | |
# -*- coding: utf-8 -*-
import csv
import datetime
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse
import ftplib
import glob
import os
import os.path
import pickle
import pprint
import re
import shutil
import sys
import time
import webbrowser
import dataset
import pyautogui
import pyperclip
import names_and_codes as nc
class BlueChipError(Exception):
pass
def get_anaesthetist():
while True:
initials = input('Anaesthetist: ').lower()
if initials == 'h':
pprint.pprint(nc.ANAESTHETISTS)
continue
if initials in nc.ANAESTHETISTS:
anaesthetist = nc.ANAESTHETISTS[initials]
break
return anaesthetist
def get_endoscopist():
while True:
initials = input('Endoscopist: ').lower()
if initials in nc.DOC_DIC:
doctor = nc.DOC_DIC[initials]
break
if doctor in nc.LOCUMS:
while True:
initials = input('Who is Dr {} covering? '.format(
doctor.split()[-1]))
if nc.DOC_DIC[initials] in nc.PARTNERS:
consultant = nc.DOC_DIC[initials]
break
else:
consultant = doctor
return doctor, consultant
def get_nurse():
while True:
print()
initials = input('Nurse (h)elp: ')
if initials == 'h':
pprint.pprint(nc.NURSES_DIC)
continue
if initials in nc.NURSES_DIC:
nurse = nc.NURSES_DIC[initials]
break
return nurse
def episode_update(consultant, doctor, anaesthetist, data_entry):
(asa, upper, colon, banding, consult, message, time_in_theatre,
ref, full_fund, insur_code, fund_number, clips,
varix_flag, varix_lot) = data_entry
(in_formatted, out_formatted,
anaesthetic_time, today_for_db) = time_calculater(time_in_theatre)
message = episode_opener(message)
episode_procedures(upper, colon, banding, asa)
mrn, print_name, address, dob, mcn = episode_scrape()
message += 'Updated this patient. Check Blue Chip is correct.'
stored_index, first_patient = make_index(
out_formatted, doctor, print_name, consult,
upper, colon, banding, message, anaesthetist)
offsite(stored_index)
time.sleep(1)
pyautogui.click(x=780, y=90)
def send_message(anaesthetist):
base = '<b>Message from {}</b> - '.format(anaesthetist)
print('Type your message. Your name is automatically included.')
new = input()
message = base + new + '<br>\n'
stored_index = make_index(message)
offsite(stored_index)
def get_consult(consultant, upper, lower, time_in_theatre, loop_flag):
consult = 'none'
if consultant == 'Dr <NAME>' or consultant not in nc.PARTNERS:
return (consult, loop_flag)
if consultant in nc.CONSULTERS:
while True:
print()
consult = input('Consult 110 or 116: ')
if consult == 'q':
loop_flag = True
break
if consult in {'110', '116'}:
break
print('\033[31;1m' + 'TRY AGAIN!')
return (consult, loop_flag)
if consultant == 'Dr <NAME>':
print("Dr Feller does 110's on new patients only")
while True:
print()
consult = input('Consult 110 or 0: ')
if consult == 'q':
loop_flag = True
break
if consult in {'110', '0'}:
break
if consult == '0':
consult = 'none'
print('\033[31;1m' + 'TRY AGAIN!')
return (consult, loop_flag)
if consultant == 'Dr <NAME>':
while True:
print()
consult = input('Consult: ')
if consult == 'q':
loop_flag = True
break
if consult in {'110', '116', '0'}:
break
print('\033[31;1m' + 'TRY AGAIN!')
if consult == '0':
consult = 'none'
return (consult, loop_flag)
if consultant == 'Dr <NAME>':
if int(time_in_theatre) > 30 and lower != '0':
print()
print('\033[31;1m' + 'Dr Williams will bill a 110.')
while True:
response = input('Confirm (y/n) ')
if response.lower() in {'y', 'n'}:
break
if response == 'y':
consult = '110'
return (consult, loop_flag)
if consultant == 'Dr <NAME>':
pu = upper in {'pb', 'pp', 'od'}
pl = lower in {'cb', 'cp', 'sb', 'sp', 'csp'}
if pu or pl:
consult = '116'
return (consult, loop_flag)
def get_banding(consultant, lower, message, loop_flag):
if consultant not in nc.BANDERS or lower == '0':
banding = '0'
return banding, message, loop_flag
while True:
banding = input('Anal: ')
b_match = re.match(r'^[abq0]$', banding)
if b_match:
if banding == 'b':
message += ' - Banding haemorrhoids'
elif banding == 'a':
message += '-Anal dilatation'
elif banding == 'q':
loop_flag = True
if banding in {'a', 'b'} and consultant == 'Dr <NAME>':
message += ' - Bill bilateral pudendal blocks'
break
print('\033[31;1m' + 'TRY AGAIN!')
return banding, message, loop_flag
def bill_process(bc_dob, upper, lower, asa, mcn, insur_code):
"""
Turn rawdata into stuff ready to go into my account.
Generates and stores an incremented invoice number.
"""
today_raw = datetime.datetime.today()
today = today_raw.strftime('%d' + '//' + '%m' + '//' + '%Y')
dob = parse(bc_dob, dayfirst=True)
age_sep = relativedelta(today_raw, dob)
if age_sep.years >= 70:
age_seventy = 'Yes'
else:
age_seventy = 'No'
if upper != '0':
upper_done = 'Yes'
else:
upper_done = 'No'
if lower != '0':
lower_done = 'Yes'
else:
lower_done = 'No'
if asa == '3' or asa == '4':
asa_three = 'Yes'
else:
asa_three = 'No'
if insur_code == 'os':
mcn = ''
with open('d:\\JOHN TILLET\\episode_data\\'
'jtdata\\invoice_store.py', 'rb') as handle:
invoice = pickle.load(handle)
invoice += 1
with open('d:\\JOHN TILLET\\episode_data\\'
'jtdata\\invoice_store.py', 'wb') as handle:
pickle.dump(invoice, handle)
return today, upper_done, lower_done, age_seventy, asa_three, invoice, mcn
def time_calculater(time_in_theatre):
nowtime = datetime.datetime.now()
today_str = nowtime.strftime('%Y' + '-' + '%m' + '-' + '%d')
time_in_theatre = int(time_in_theatre)
outtime = nowtime + relativedelta(minutes=+3)
intime = nowtime + relativedelta(minutes=-time_in_theatre)
out_formatted = outtime.strftime('%H' + ':' + '%M')
in_formatted = intime.strftime('%H' + ':' + '%M')
time_base = '230'
time_last = '10'
second_last_digit = 1 + time_in_theatre // 15
remainder = time_in_theatre % 15
if remainder < 6:
last_digit = 1
elif remainder < 11:
last_digit = 2
else:
last_digit = 3
if time_in_theatre > 15:
time_last = '%d%d' % (second_last_digit, last_digit)
anaesthetic_time = time_base + time_last
return (in_formatted, out_formatted, anaesthetic_time, today_str)
def make_episode_string(outtime, doctor, print_name, consult,
upper, colon, banding, message, anaesthetist, room):
doc_surname = doctor.split()[-1]
if doc_surname == 'Vivekanandarajah':
doc_surname = 'Suhir'
anaesthetist_surname = anaesthetist.split()[-1]
docs_for_web = doc_surname + '/' + anaesthetist_surname
if consult != 'none':
consult = '<b>' + consult + '</b>'
web_upper = nc.UPPER_DIC[upper]
web_lower = nc.COLON_DIC[colon]
html = '<b>{0}</b> - {7} - {1} - {2} - CONSULT: {3} - UPPER: {4} - LOWER: {5} <b>{6}</b><br>\n'
out_string = html.format(
outtime, docs_for_web, print_name, consult,
web_upper, web_lower, message, room)
return out_string
def make_index(out_str):
today = datetime.datetime.now()
today_str = today.strftime('%A' + ' ' + '%d' + ':' + '%m' + ':' + '%Y')
head_string = "DEC procedures for {}<br><br>\n".format(today_str)
date_file_str = today.strftime('%Y' + '-' + '%m' + '-' + '%d')
date_filename = date_file_str + '.html'
stored_index = os.path.join('d:\\JOHN TILLET\\'
'episode_data\\' + date_filename)
if os.path.isfile(stored_index):
with open(stored_index, 'r') as original:
original.readline()
data = original.read()
with open(stored_index, 'w') as modified:
modified.write(head_string + out_str + data)
else:
base = 'd:\\JOHN TILLET\\episode_data\\'
dest = 'd:\\JOHN TILLET\\episode_data\\html-backup'
for src in glob.glob(base + '*.html'):
shutil.move(src, dest)
with open(stored_index, 'w') as new_index:
new_index.write(head_string + out_str)
return stored_index
def episode_opener(message):
while True:
if not pyautogui.pixelMatchesColor(150, 630, (255, 0, 0)):
print('Open the patient file.')
input('Hit Enter when ready.')
else:
break
pyautogui.moveTo(150, 50)
pyautogui.click()
pyautogui.press('f8')
while not pyautogui.pixelMatchesColor(
534, 330, (102, 203, 234), tolerance=10):
time.sleep(1)
pyautogui.press('n')
while not pyautogui.pixelMatchesColor(
820, 130, (195, 90, 80), tolerance=10):
time.sleep(1)
pyautogui.typewrite(['down'] * 11, interval=0.1)
pyautogui.press('enter')
pyautogui.hotkey('alt', 'f')
time.sleep(1)
if pyautogui.pixelMatchesColor(520, 380, (25, 121, 202),
tolerance=10):
time.sleep(1)
pyautogui.press('enter')
pyautogui.press('c')
pyautogui.hotkey('alt', 'f4')
time.sleep(1)
pyautogui.press('f8')
time.sleep(1)
pyautogui.press('enter')
time.sleep(1)
pyautogui.press('enter')
time.sleep(1)
pyautogui.press('enter')
message += 'New episode made -'
return message
def episode_discharge(intime, outtime, anaesthetist, doctor):
pyautogui.hotkey('alt', 'i')
time.sleep(1)
pyautogui.PAUSE = 0.1
pyautogui.typewrite(['enter'] * 4, interval=0.1)
test = pyperclip.copy('empty')
pyautogui.hotkey('ctrl', 'c')
test = pyperclip.paste()
if test != 'empty':
pyautogui.alert(
text='Data here already! Try Again', title='', button='OK')
time.sleep(1)
pyautogui.hotkey('alt', 'f4')
raise BlueChipError
pyautogui.typewrite(intime)
pyautogui.typewrite(['enter'] * 2, interval=0.1)
pyautogui.typewrite(outtime)
pyautogui.typewrite(['enter'] * 3, interval=0.1)
if anaesthetist != 'locum':
pyautogui.typewrite(['tab'] * 6, interval=0.1)
pyautogui.typewrite(anaesthetist)
pyautogui.typewrite('\n')
pyperclip.copy('fail')
else:
pyautogui.typewrite(['tab'] * 7, interval=0.1)
pyautogui.typewrite(doctor)
def episode_procedures(upper, colon, banding, asa):
pe_flag = False # use these to keep state when entering lower lines
banding_flag = False
asa_flag = False
def gastro_chooser(in_str):
if in_str == '0':
return False
up_str = nc.UPPER_DIC[in_str]
pyautogui.typewrite(up_str + '\n')
pyautogui.press('enter')
def asa_chooser(asa):
if asa == '0':
return True
a_str = nc.ASA_DIC[asa]
pyautogui.typewrite(a_str + '\n')
pyautogui.press('enter')
return True
pyautogui.hotkey('alt', 'p')
if colon == '0':
pe_flag = gastro_chooser(upper)
else:
col_str = nc.COLON_DIC[colon]
pyautogui.typewrite(col_str + '\n')
pyautogui.press('enter')
pyautogui.typewrite(['tab'] * 6, interval=0.1)
if upper != '0' and pe_flag is False:
gastro_chooser(upper)
elif banding == 'b':
banding_flag = True
pyautogui.typewrite('32135-00\n')
pyautogui.press('enter')
elif banding == 'a':
banding_flag = True
pyautogui.typewrite('32153-00\n')
pyautogui.press('enter')
else:
asa_flag = asa_chooser(asa)
if asa_flag:
return
else:
pyautogui.typewrite(['tab'] * 2, interval=0.1)
if banding == 'b'and banding_flag is False:
banding_flag = True
pyautogui.typewrite('32135-00\n')
pyautogui.press('enter')
elif banding == 'a':
banding_flag = True
pyautogui.typewrite('32153-00\n')
pyautogui.press('enter')
else:
asa_flag = asa_chooser(asa)
if asa_flag:
return
else:
pyautogui.typewrite(['tab'] * 2, interval=0.1)
asa_flag = asa_chooser(asa)
if asa_flag:
return
def episode_theatre(doctor, nurse, clips, varix_flag, varix_lot):
pyautogui.hotkey('alt', 'n')
pyautogui.typewrite(['left'] * 2, interval=0.1)
pyautogui.moveTo(50, 155)
pyautogui.click()
pyautogui.press('tab')
doc_test = pyperclip.copy('empty')
pyautogui.hotkey('ctrl', 'c')
doc_test = pyperclip.paste()
if doc_test == 'Endoscopist':
pyautogui.press('tab')
pyautogui.typewrite(['enter'] * 2, interval=0.1)
pyautogui.moveTo(450, 155)
pyautogui.click()
pyautogui.typewrite(['tab'] * 2, interval=0.1)
pyautogui.typewrite(['enter'] * 2, | |
<filename>demisto_sdk/commands/update_release_notes/tests/update_rn_test.py
import os
import shutil
import unittest
import pytest
from demisto_sdk.commands.common.git_tools import git_path
from demisto_sdk.commands.common.tools import get_json
class TestRNUpdate(unittest.TestCase):
FILES_PATH = os.path.normpath(os.path.join(__file__, f'{git_path()}/demisto_sdk/tests', 'test_files'))
def test_build_rn_template_integration(self):
"""
Given:
- a dict of changed items
When:
- we want to produce a release notes template
Then:
- return a markdown string
"""
expected_result = \
"\n#### Classifiers\n##### Hello World Classifier\n- %%UPDATE_RN%%\n" \
"\n#### Connections\n##### Hello World Connection\n- %%UPDATE_RN%%\n" \
"\n#### Dashboards\n##### Hello World Dashboard\n- %%UPDATE_RN%%\n" \
"\n#### Incident Fields\n##### Hello World IncidentField\n- %%UPDATE_RN%%\n" \
"\n#### Incident Types\n##### Hello World Incident Type\n- %%UPDATE_RN%%\n" \
"\n#### Indicator Types\n##### Hello World Indicator Type\n- %%UPDATE_RN%%\n" \
"\n#### Integrations\n##### Hello World Integration\n- %%UPDATE_RN%%\n" \
"\n#### Layouts\n##### Hello World Layout\n- %%UPDATE_RN%%\n" \
"##### Second Hello World Layout\n- %%UPDATE_RN%%\n" \
"\n#### Playbooks\n##### Hello World Playbook\n- %%UPDATE_RN%%\n" \
"\n#### Reports\n##### Hello World Report\n- %%UPDATE_RN%%\n" \
"\n#### Scripts\n##### Hello World Script\n- %%UPDATE_RN%%\n" \
"\n#### Widgets\n##### Hello World Widget\n- %%UPDATE_RN%%\n"
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
changed_items = {
"Hello World Integration": "Integration",
"Hello World Playbook": "Playbook",
"Hello World Script": "Script",
"Hello World IncidentField": "Incident Fields",
"Hello World Classifier": "Classifiers",
"N/A": "Integration",
"Hello World Layout": "Layouts",
"Hello World Incident Type": "Incident Types",
"Hello World Indicator Type": "Indicator Types",
"Second Hello World Layout": "Layouts",
"Hello World Widget": "Widgets",
"Hello World Dashboard": "Dashboards",
"Hello World Connection": "Connections",
"Hello World Report": "Reports",
}
release_notes = update_rn.build_rn_template(changed_items)
assert expected_result == release_notes
def test_find_corresponding_yml(self):
"""
Given:
- a filepath containing a python file
When:
- determining the changed file
Then:
- return only the yml of the changed file
"""
expected_result = "Integration/HelloWorld.yml"
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
filepath = 'Integration/HelloWorld.py'
filename = update_rn.find_corresponding_yml(filepath)
assert expected_result == filename
def test_return_release_notes_path(self):
"""
Given:
- a pack name and version
When:
- building the release notes file within the ReleaseNotes directory
Then:
- the filepath of the correct release notes.
"""
expected_result = 'Packs/HelloWorld/ReleaseNotes/1_1_1.md'
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
input_version = '1.1.1'
result = update_rn.return_release_notes_path(input_version)
assert expected_result == result
def test_bump_version_number_minor(self):
"""
Given:
- a pack name and version
When:
- bumping the version number in the metadata.json
Then:
- return the correct bumped version number
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'))
expected_version = '1.1.0'
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json')
version_number, _ = update_rn.bump_version_number(pre_release=False, specific_version=None)
assert version_number == expected_version
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
def test_bump_version_number_major(self):
"""
Given:
- a pack name and version
When:
- bumping the version number in the metadata.json
Then:
- return the correct bumped version number
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'))
expected_version = '2.0.0'
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='major', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json')
version_number, _ = update_rn.bump_version_number(pre_release=False, specific_version=None)
assert version_number == expected_version
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
def test_bump_version_number_revision(self):
"""
Given:
- a pack name and version
When:
- bumping the version number in the metadata.json
Then:
- return the correct bumped version number
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'))
expected_version = '1.0.1'
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='revision', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json')
version_number, _ = update_rn.bump_version_number(pre_release=False, specific_version=None)
assert version_number == expected_version
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
def test_bump_version_number_specific(self):
"""
Given:
- a pack name and specific version
When:
- bumping the version number in the metadata.json
Then:
- return the correct bumped version number
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'))
expected_version = '2.0.0'
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type=None, specific_version='2.0.0',
pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH,
'fake_pack/pack_metadata.json')
version_number, _ = update_rn.bump_version_number(pre_release=False,
specific_version='2.0.0')
assert version_number == expected_version
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack/pack_metadata.json'))
def test_bump_version_number_revision_overflow(self):
"""
Given:
- a pack name and a version before an overflow condition
When:
- bumping the version number in the metadata.json
Then:
- return ValueError
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json'))
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='revision', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json')
self.assertRaises(ValueError, update_rn.bump_version_number)
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'))
def test_bump_version_number_minor_overflow(self):
"""
Given:
- a pack name and a version before an overflow condition
When:
- bumping the version number in the metadata.json
Then:
- return ValueError
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json'))
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json')
self.assertRaises(ValueError, update_rn.bump_version_number)
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'))
def test_bump_version_number_major_overflow(self):
"""
Given:
- a pack name and a version before an overflow condition
When:
- bumping the version number in the metadata.json
Then:
- return ValueError
"""
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json'))
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='major', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json')
self.assertRaises(ValueError, update_rn.bump_version_number)
os.remove(os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'))
shutil.copy(src=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json'),
dst=os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json'))
def test_bump_version_file_not_found(self):
"""
Given:
- a pack name and a metadata which does not exist
When:
- bumping the version number in the metadata.json
Then:
- return ValueError
"""
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='major', pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata_.json')
self.assertRaises(SystemExit, update_rn.bump_version_number)
def test_bump_version_no_version(self):
"""
Given:
- a pack name and a version before an overflow condition
When:
- bumping the version number in the metadata.json
Then:
- return ValueError
"""
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type=None, pack_files={'HelloWorld'}, added_files=set())
update_rn.metadata_path = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json')
self.assertRaises(ValueError, update_rn.bump_version_number)
class TestRNUpdateUnit:
FILES_PATH = os.path.normpath(os.path.join(__file__, f'{git_path()}/demisto_sdk/tests', 'test_files'))
CURRENT_RN = """
#### Incident Types
##### Cortex XDR Incident
- %%UPDATE_RN%%
#### Incident Fields
##### XDR Alerts
- %%UPDATE_RN%%
"""
CHANGED_FILES = {
"Cortex XDR Incident": "Incident Type",
"XDR Alerts": "Incident Field",
"Sample IncidentField": "Incident Field",
"Cortex XDR - IR": "Integration",
"Nothing": None,
"Sample": "Integration",
}
EXPECTED_RN_RES = """
#### Incident Types
##### Cortex XDR Incident
- %%UPDATE_RN%%
#### Incident Fields
##### Sample IncidentField
- %%UPDATE_RN%%
##### XDR Alerts
- %%UPDATE_RN%%
#### Integration
##### Sample
- %%UPDATE_RN%%
##### Cortex XDR - IR
- %%UPDATE_RN%%
"""
diff_package = [('Layouts/VulnDB/VulnDB.json', ('VulnDB', 'Layouts')),
('Classifiers/VulnDB/VulnDB.json', ('VulnDB', 'Classifiers')),
('IncidentTypes/VulnDB/VulnDB.json', ('VulnDB', 'Incident Types')),
('IncidentFields/VulnDB/VulnDB.json', ('VulnDB', 'Incident Fields')),
('Playbooks/VulnDB/VulnDB_playbook.yml', ('VulnDB', 'Playbook')),
('Script/VulnDB/VulnDB.py', ('VulnDB', 'Script')),
('ReleaseNotes/1_0_1.md', ('N/A', None)),
('Integrations/VulnDB/VulnDB.yml', ('VulnDB', 'Integration')),
('Connections/VulnDB/VulnDB.yml', ('VulnDB', 'Connections')),
('Dashboards/VulnDB/VulnDB.yml', ('VulnDB', 'Dashboards')),
('Widgets/VulnDB/VulnDB.yml', ('VulnDB', 'Widgets')),
('Reports/VulnDB/VulnDB.yml', ('VulnDB', 'Reports')),
('IndicatorTypes/VulnDB/VulnDB.yml', ('VulnDB', 'Indicator Types')),
('TestPlaybooks/VulnDB/VulnDB.yml', ('VulnDB', None)),
]
@pytest.mark.parametrize('path, expected_result', diff_package)
def test_ident_changed_file_type(self, path, expected_result, mocker):
"""
Given:
- a filepath of a changed file
When:
- determining the type of item changed (e.g. Integration, Script, Layout, etc.)
Then:
- return tuple where first value is the pack name, and second is the item type
"""
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="VulnDB", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
filepath = os.path.join(TestRNUpdate.FILES_PATH, path)
mocker.patch.object(UpdateRN, 'find_corresponding_yml', return_value='Integrations/VulnDB/VulnDB.yml')
mocker.patch.object(UpdateRN, 'get_display_name', return_value='VulnDB')
result = update_rn.identify_changed_file_type(filepath)
assert expected_result == result
def test_check_rn_directory(self):
"""
Given:
- a filepath for a release notes directory
When:
- determining if the directory exists
Then:
- create the directory if it does not exist
"""
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
filepath = os.path.join(TestRNUpdate.FILES_PATH, 'ReleaseNotes')
update_rn = UpdateRN(pack="VulnDB", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
update_rn.check_rn_dir(filepath)
def test_create_markdown(self):
"""
Given:
- a filepath for a release notes file and a markdown string
When:
- creating a new markdown file
Then:
- create the file or skip if it exists.
"""
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="VulnDB", update_type='minor', pack_files={'HelloWorld'}, added_files=set())
filepath = os.path.join(TestRNUpdate.FILES_PATH, 'ReleaseNotes/1_1_1.md')
md_string = '### Test'
update_rn.create_markdown(release_notes_path=filepath, rn_string=md_string, changed_files={})
def test_update_existing_rn(self):
"""
Given:
- Existing release notes and set of changed files
When:
- rerunning the update command
Then:
- return updated release notes while preserving the integrity of the existing notes.
"""
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'},
added_files=set())
new_rn = update_rn.update_existing_rn(self.CURRENT_RN, self.CHANGED_FILES)
assert self.EXPECTED_RN_RES == new_rn
def test_commit_to_bump(self):
"""
Given:
- No inputs, but a condition where bumping the version is ready
When:
- running update
Then:
- update the metadata json by the version designated.
"""
ORIGINAL = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/pack_metadata.json')
TEMP_FILE = os.path.join(TestRNUpdate.FILES_PATH, 'fake_pack_invalid/_pack_metadata.json')
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
update_rn = UpdateRN(pack="HelloWorld", update_type='minor', pack_files={'HelloWorld'},
added_files=set())
shutil.copy(src=ORIGINAL, dst=TEMP_FILE)
data_dict = get_json(TEMP_FILE)
update_rn.metadata_path = TEMP_FILE
update_rn.commit_to_bump(data_dict)
os.remove(ORIGINAL)
shutil.copy(src=TEMP_FILE, dst=ORIGINAL)
def test_find_added_pack_files(self):
"""
Given:
- List of added files
When:
- searching for relevant pack files
Then:
- return a list of relevant pack files which were added.
"""
from demisto_sdk.commands.update_release_notes.update_rn import | |
<filename>sam_jax/training_utils/flax_training.py
# Copyright 2020 The Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to train the networks for image classification tasks."""
import functools
import math
import os
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import flags
from absl import logging
import flax
from flax import jax_utils
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
from flax.training import lr_schedule
import jax
import jax.numpy as jnp
import numpy as np
from sam.sam_jax.datasets import dataset_source as dataset_source_lib
from sam.sam_jax.efficientnet import optim as efficientnet_optim
import tensorflow as tf
from tensorflow.io import gfile
FLAGS = flags.FLAGS
# Training hyper-parameters
flags.DEFINE_float('gradient_clipping', 5.0, 'Gradient clipping.')
flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.')
flags.DEFINE_bool('use_learning_rate_schedule', True,
'Whether to use a cosine schedule or keep the learning rate '
'constant. Training on cifar should always use the schedule '
', this flag is mostly for testing purpose.')
flags.DEFINE_float('weight_decay', 0.001, 'Weight decay coefficient.')
flags.DEFINE_integer('run_seed', 0,
'Seed to use to generate pseudo random number during '
'training (for dropout for instance). Has no influence on '
'the dataset shuffling.')
flags.DEFINE_bool('use_rmsprop', False, 'If True, uses RMSprop instead of SGD')
flags.DEFINE_enum('lr_schedule', 'cosine', ['cosine', 'exponential'],
'Learning rate schedule to use.')
# Additional flags that don't affect the model.
flags.DEFINE_integer('save_progress_seconds', 3600, 'Save progress every...s')
flags.DEFINE_multi_integer(
'additional_checkpoints_at_epochs', [],
'Additional epochs when we should save the model for later analysis. '
'No matter the value of this flag, the most recent version of the model '
'will be saved regularly to resume training if needed.')
flags.DEFINE_bool('also_eval_on_training_set', False,
'If set to true, the model will also be evaluated on the '
'(non-augmented) training set at the end of each epoch.')
flags.DEFINE_bool('compute_top_5_error_rate', False,
'If true, will also compute top 5 error rate.')
flags.DEFINE_float('label_smoothing', 0.0, 'Label smoothing for cross entropy.')
flags.DEFINE_float('ema_decay', 0.0, 'If not zero, use EMA on all weights.')
flags.DEFINE_bool('no_weight_decay_on_bn', False,
'If set to True, will not apply weight decay on the batch '
'norm parameters.')
flags.DEFINE_integer('evaluate_every', 1,
'Evaluate on the test set every n epochs.')
# SAM related flags.
flags.DEFINE_float('sam_rho', 0.0,
'Size of the neighborhood considered for the SAM '
'perturbation. If set to zero, SAM will not be used.')
flags.DEFINE_bool('sync_perturbations', False,
'If set to True, sync the adversarial perturbation between '
'replicas.')
flags.DEFINE_integer('inner_group_size', None,
'Inner group size for syncing the adversarial gradients.'
'If None, we sync the adversarial perturbation across all '
'replicas. Else, we sync the perturbations inside groups '
'of inner_group_size replicas. Has no effect if '
'sync_perturbations is set to False.')
def local_replica_groups(inner_group_size: int) -> List[List[int]]:
"""Constructs local nearest-neighbor rings given the JAX device assignment.
For inner_group_size=8, each inner group is a tray with replica order:
0/1 2/3
7/6 5/4
Args:
inner_group_size: Number of replica in each group.
Returns:
A list of replica id groups.
"""
world_size = jax.device_count()
outer_group_size, ragged = divmod(world_size, inner_group_size)
assert not ragged, 'inner group size must evenly divide global device count'
# the last device should have maximal x and y coordinate
def bounds_from_last_device(device):
x, y, z = device.coords
return (x + 1) * (device.core_on_chip + 1), (y + 1) * (z + 1)
global_x, _ = bounds_from_last_device(jax.devices()[-1])
per_host_x, per_host_y = bounds_from_last_device(jax.local_devices(0)[-1])
assert inner_group_size in [2 ** i for i in range(1, 15)], \
'inner group size must be a power of two'
if inner_group_size <= 4:
# inner group is Nx1 (core, chip, 2x1)
inner_x, inner_y = inner_group_size, 1
inner_perm = range(inner_group_size)
else:
if inner_group_size <= global_x * 2:
# inner group is Nx2 (2x2 tray, 4x2 DF pod host, row of hosts)
inner_x, inner_y = inner_group_size // 2, 2
else:
# inner group covers the full x dimension and must be >2 in y
inner_x, inner_y = global_x, inner_group_size // global_x
p = np.arange(inner_group_size)
per_group_hosts_x = 1 if inner_x < per_host_x else inner_x // per_host_x
p = p.reshape(inner_y // per_host_y, per_group_hosts_x,
per_host_y, inner_x // per_group_hosts_x)
p = p.transpose(0, 2, 1, 3)
p = p.reshape(inner_y // 2, 2, inner_x)
p[:, 1, :] = p[:, 1, ::-1]
inner_perm = p.reshape(-1)
inner_replica_groups = [[o * inner_group_size + i for i in inner_perm]
for o in range(outer_group_size)]
return inner_replica_groups
def restore_checkpoint(
optimizer: flax.optim.Optimizer,
model_state: Any,
directory: str) -> Tuple[flax.optim.Optimizer, flax.nn.Collection, int]:
"""Restores a model and its state from a given checkpoint.
If several checkpoints are saved in the checkpoint directory, the latest one
will be loaded (based on the `epoch`).
Args:
optimizer: The optimizer containing the model that we are training.
model_state: Current state associated with the model.
directory: Directory where the checkpoints should be saved.
Returns:
The restored optimizer and model state, along with the number of epochs the
model was trained for.
"""
train_state = dict(optimizer=optimizer, model_state=model_state, epoch=0)
restored_state = checkpoints.restore_checkpoint(directory, train_state)
return (restored_state['optimizer'],
restored_state['model_state'],
restored_state['epoch'])
def save_checkpoint(optimizer: flax.optim.Optimizer,
model_state: Any,
directory: str,
epoch: int):
"""Saves a model and its state.
Removes a checkpoint if it already exists for a given epoch. For multi-host
training, only the first host will save the checkpoint.
Args:
optimizer: The optimizer containing the model that we are training.
model_state: Current state associated with the model.
directory: Directory where the checkpoints should be saved.
epoch: Number of epochs the model has been trained for.
"""
if jax.host_id() != 0:
return
# Sync across replicas before saving.
optimizer = jax.tree_map(lambda x: x[0], optimizer)
model_state = jax.tree_map(lambda x: jnp.mean(x, axis=0), model_state)
train_state = dict(optimizer=optimizer,
model_state=model_state,
epoch=epoch)
if gfile.exists(os.path.join(directory, 'checkpoint_' + str(epoch))):
gfile.remove(os.path.join(directory, 'checkpoint_' + str(epoch)))
checkpoints.save_checkpoint(directory, train_state, epoch, keep=2)
def create_optimizer(model: flax.nn.Model,
learning_rate: float,
beta: float = 0.9) -> flax.optim.Optimizer:
"""Creates an optimizer.
Learning rate will be ignored when using a learning rate schedule.
Args:
model: The FLAX model to optimize.
learning_rate: Learning rate for the gradient descent.
beta: Momentum parameter.
Returns:
A SGD (or RMSProp) optimizer that targets the model.
"""
if FLAGS.use_rmsprop:
# We set beta2 and epsilon to the values used in the efficientnet paper.
optimizer_def = efficientnet_optim.RMSProp(
learning_rate=learning_rate, beta=beta, beta2=0.9, eps=0.001)
else:
optimizer_def = optim.Momentum(learning_rate=learning_rate,
beta=beta,
nesterov=True)
optimizer = optimizer_def.create(model)
return optimizer
def pr_dim(hid):
N = hid.shape[0]
hid = hid - hid.mean(0)
if hid.shape[0] >= hid.shape[1]:
C = hid.T @ hid / (N-1)
else:
C = hid @ hid.T / (N-1)
ew, ev = jnp.linalg.eigh(hid)
return jnp.sum(ew)**2/jnp.sum(ew**2)
def pr_dim_loss(hid_list: List[jnp.ndarray],
dim_target: float) -> jnp.ndarray:
"""returns the participation ratio dimensionality regularizer loss.
args:
hid_list: list of hidden unit activations.
dim_target: target dimensionality.
returns:
mse between pr dim and target
"""
dim_loss = 0
for hid in hid_list:
hid = hid.reshape(hid.shape[0], -1)
hprdim = pr_dim(hid)
dim_loss = dim_loss + (hprdim - dim_target)**2
return jnp.nan_to_num(dim_loss) # set to zero if there is no non-masked samples.
def cross_entropy_loss(logits: jnp.ndarray,
one_hot_labels: jnp.ndarray,
mask: optional[jnp.ndarray] = none) -> jnp.ndarray:
"""returns the cross entropy loss between some logits and some labels.
args:
logits: output of the model.
one_hot_labels: one-hot encoded labels. dimensions should match the logits.
mask: mask to apply to the loss to ignore some samples (usually, the padding
of the batch). array of ones and zeros.
returns:
the cross entropy, averaged over the first dimension (samples).
"""
if flags.label_smoothing > 0:
smoothing = jnp.ones_like(one_hot_labels) / one_hot_labels.shape[-1]
one_hot_labels = ((1-flags.label_smoothing) * one_hot_labels
+ flags.label_smoothing * smoothing)
log_softmax_logits = jax.nn.log_softmax(logits)
if mask is none:
mask = jnp.ones([logits.shape[0]])
mask = mask.reshape([logits.shape[0], 1])
loss = -jnp.sum(one_hot_labels * log_softmax_logits * mask) / mask.sum()
return jnp.nan_to_num(loss) # set to zero if there is no non-masked samples.
def error_rate_metric(logits: jnp.ndarray,
one_hot_labels: jnp.ndarray,
mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Returns the error rate between some predictions and some labels.
Args:
logits: Output of the model.
one_hot_labels: One-hot encoded labels. Dimensions should match the logits.
mask: Mask to apply to the loss to ignore some samples (usually, the padding
of the batch). Array of ones and zeros.
Returns:
The error rate (1 - accuracy), averaged over | |
#!/usr/bin/python3
########################################################################################
#
# ISPWatcher2
# <NAME>
# November 1, 2009
#
# - Testing -
# Tested on MacOS Snow Leopard (10.6) and Ubuntu Linux 9.04 and 9.10
#
# - Version History -
# 2.0.0 - November 1, 2009
# Converted from .NET to Python (hence the 2.0)
# 2.0.1 - November 21, 2009
# Added command line options for help (-h), quiet (-q), test (-t) and version (-v)
# 2.0.2 - November 30, 2011
# Removed single ticks around HTTP host emails so that iPhone and other devices will
# link straight to website from email instead of creating a 404
# 2.1.0 - November 28, 2012
# Lower Cased Node Names - no more case sensitive XML
# JSON control file support support (also lower cased variable names)
# Streamlined code
# 2.1.1 - April 1, 2014
# Added Date to the email headers as some clients can't derive the Date: from Received
# 3.0.0. - Dec 17, 2019
# Upgraded to Python3
########################################################################################
VERSION = "3.0.1"
import datetime
import urllib
import urllib.request
import smtplib
import sys, os
import poplib
import imaplib
import getopt
import json
# Global Variables for command line option handling (see VERSION variable above)
SENDEMAILS = 1
CHATTY = 1
MAILSERVER = "smtp.gmail.com"
MAILSERVERPORT = 587
MAILSERVERUSERNAME = ""
MAILSERVERPASSWORD = ""
MAILSERVERSTARTTLS = 1
MAILFROM = ""
MAILSUBJECT = "ISPWatcher2 Failure"
EMAILS = {'':''}
def printversion():
"""Prints the version of ISPWatcher2
Returns nothing."""
print("* ISPWatcher Version " + VERSION)
def printusage():
"""Prints the usage of ISPWatcher2
Returns nothing."""
printversion()
print("\tispwatcher.py -h - Prints help screen")
print("\tispwatcher.ph -v - Prints Version information.")
print("\tispwatcher.py -t - Outputs only to standard output, sends no emails")
print("\tispwatcher.py -q - Checks servers quietly")
try:
opts, args = getopt.getopt(sys.argv[1:], "hvtq" )
except:
print(str(err) )
printusage()
sys.exit(2)
for o, a in opts:
if o == "-h":
printusage()
sys.exit()
if o == "-t":
SENDEMAILS = 0
if o == "-q":
CHATTY = 0
if o == "-v":
printversion()
sys.exit()
from xml.dom import minidom
#reload(sys)
#sys.setdefaultencoding("latin1")
def CheckServerJSON(settings):
# print settings
for key, value in settings["options"].items():
if key.lower() == "mailserver":
MAILSERVER = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserver: " + MAILSERVER)
if key.lower() == "mailserverport":
MAILSERVERPORT = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverport: " + MAILSERVERPORT)
if key.lower() == "mailserverusername":
MAILSERVERUSERNAME = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverusername: " + MAILSERVERUSERNAME)
if key.lower() == "mailserverstarttls":
MAILSERVERSTARTTLS = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverstarttls: " + MAILSERVERSTARTTLS)
if key.lower() == "mailserverpassword":
MAILSERVERPASSWORD = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverpassword: " + MAILSERVERPASSWORD)
if key.lower() == "mailfrom":
MAILFROM = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailfrom: " + MAILFROM)
if key.lower() == "mailsubject":
MAILSUBJECT = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailsubject: " + MAILSUBJECT)
for server in settings["servers"]:
sms = "0"
host = ""
recipients = []
watchfor = ""
warnif = ""
port = "0"
type = ""
active = "1"
timeoutalert = "0"
for key, value in server.items():
key = key.lower()
if key == "type":
type = value
if key == "host":
host = value
if key == "recipients":
recipients.append(value)
if key == "watchfor":
watchfor = value
if key == "warnif":
warnif = value
if key == "port":
port = value
if key == "timeoutalert":
timeoutalert = value
if key == "active":
active = value
if type == "http":
if port == "0":
port = "80"
if active == "1":
CheckHTTPServer(host, port, recipients, watchfor, warnif, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "smtp":
if port == "0":
port = "25"
if active == "1":
CheckSMTPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3":
if port == "0":
port = "110"
if active == "1":
CheckPOP3Server(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imap" or type == "imap4":
if port == "0":
port = "143"
if active == "1":
CheckIMAPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3ssl" or type == "popssl":
if port == "0":
port = "995"
if active == "1":
CheckPOP3SSLServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imapssl" or type == "imap4ssl":
if port == "0":
port = "993"
if active == "1":
CheckIMAPSSLServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
def CheckServerXML(oServer):
"""Parses through XML Object of Server and delegates oServer object to otehr functionstype in server type
Returns nothing."""
global MAILSERVER
global MAILSERVERPORT
global MAILSERVERUSERNAME
global MAILSERVERPASSWORD
global MAILFROM
global MAILSERVERSTARTTLS
global MAILSUBJECT
type = ""
for oAttributes in oServer.childNodes:
if oAttributes.nodeType != minidom.Node.TEXT_NODE:
if oAttributes.nodeName.lower() == "type":
type = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "mailserver":
MAILSERVER = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServer: " + MAILSERVER)
if oAttributes.nodeName.lower() == "mailserverport":
MAILSERVERPORT = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerPort: " + MAILSERVERPORT)
if oAttributes.nodeName.lower() == "mailserverusername":
MAILSERVERUSERNAME = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerUserName: " + MAILSERVERUSERNAME)
if oAttributes.nodeName.lower() == "mailserverstarttls":
MAILSERVERSTARTTLS = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerStartTLS: " + MAILSERVERSTARTTLS)
if oAttributes.nodeName.lower() == "mailserverpassword":
MAILSERVERPASSWORD = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerPassword: " + MAILSERVERPASSWORD)
if oAttributes.nodeName.lower() == "mailfrom":
MAILFROM = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailFrom: " + MAILFROM)
if oAttributes.nodeName.lower() == "mailsubject":
MAILSUBJECT = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailSubject: " + MAILSUBJECT)
sms = "0"
host = ""
recipients = []
watchfor = ""
warnif = ""
port = "0"
active = "1"
timeoutalert = "0"
for oAttributes in oServer.childNodes:
if oAttributes.nodeType != minidom.Node.TEXT_NODE:
if oAttributes.nodeName.lower() == "host":
host = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "port":
port = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "recipients":
recipients.append(oAttributes.childNodes[0].nodeValue)
if oAttributes.nodeName.lower() == "warnif":
warnif = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "watchfor":
watchfor = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "sms":
sms = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "timeoutalert":
timeoutalert = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "active":
active = oAttributes.childNodes[0].nodeValue
if type == "http":
if port == "0":
port = "80"
if active == "1":
CheckHTTPServer(host, port, recipients, watchfor, warnif, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "smtp":
if port == "0":
port = "25"
if active == "1":
CheckSMTPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3" or type == "pop":
if port == "0":
port = "110"
if active == "1":
CheckPOP3Server(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imap" or type == "imap4":
if port == "0":
port = "143"
if active == "1":
CheckIMAPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port | |
template_name = 'dcim/device_component_add.html'
class InterfaceEditView(ObjectEditView):
queryset = Interface.objects.all()
model_form = forms.InterfaceForm
template_name = 'dcim/interface_edit.html'
class InterfaceDeleteView(ObjectDeleteView):
queryset = Interface.objects.all()
class InterfaceBulkImportView(BulkImportView):
queryset = Interface.objects.all()
model_form = forms.InterfaceCSVForm
table = tables.InterfaceTable
class InterfaceBulkEditView(BulkEditView):
queryset = Interface.objects.all()
filterset = filters.InterfaceFilterSet
table = tables.InterfaceTable
form = forms.InterfaceBulkEditForm
class InterfaceBulkRenameView(BulkRenameView):
queryset = Interface.objects.all()
class InterfaceBulkDisconnectView(BulkDisconnectView):
queryset = Interface.objects.all()
class InterfaceBulkDeleteView(BulkDeleteView):
queryset = Interface.objects.all()
filterset = filters.InterfaceFilterSet
table = tables.InterfaceTable
#
# Front ports
#
class FrontPortListView(ObjectListView):
queryset = FrontPort.objects.prefetch_related('device', 'cable')
filterset = filters.FrontPortFilterSet
filterset_form = forms.FrontPortFilterForm
table = tables.FrontPortTable
action_buttons = ('import', 'export')
class FrontPortView(ObjectView):
queryset = FrontPort.objects.all()
class FrontPortCreateView(ComponentCreateView):
queryset = FrontPort.objects.all()
form = forms.FrontPortCreateForm
model_form = forms.FrontPortForm
template_name = 'dcim/device_component_add.html'
class FrontPortEditView(ObjectEditView):
queryset = FrontPort.objects.all()
model_form = forms.FrontPortForm
template_name = 'dcim/device_component_edit.html'
class FrontPortDeleteView(ObjectDeleteView):
queryset = FrontPort.objects.all()
class FrontPortBulkImportView(BulkImportView):
queryset = FrontPort.objects.all()
model_form = forms.FrontPortCSVForm
table = tables.FrontPortTable
class FrontPortBulkEditView(BulkEditView):
queryset = FrontPort.objects.all()
filterset = filters.FrontPortFilterSet
table = tables.FrontPortTable
form = forms.FrontPortBulkEditForm
class FrontPortBulkRenameView(BulkRenameView):
queryset = FrontPort.objects.all()
class FrontPortBulkDisconnectView(BulkDisconnectView):
queryset = FrontPort.objects.all()
class FrontPortBulkDeleteView(BulkDeleteView):
queryset = FrontPort.objects.all()
filterset = filters.FrontPortFilterSet
table = tables.FrontPortTable
#
# Rear ports
#
class RearPortListView(ObjectListView):
queryset = RearPort.objects.prefetch_related('device', 'cable')
filterset = filters.RearPortFilterSet
filterset_form = forms.RearPortFilterForm
table = tables.RearPortTable
action_buttons = ('import', 'export')
class RearPortView(ObjectView):
queryset = RearPort.objects.all()
class RearPortCreateView(ComponentCreateView):
queryset = RearPort.objects.all()
form = forms.RearPortCreateForm
model_form = forms.RearPortForm
template_name = 'dcim/device_component_add.html'
class RearPortEditView(ObjectEditView):
queryset = RearPort.objects.all()
model_form = forms.RearPortForm
template_name = 'dcim/device_component_edit.html'
class RearPortDeleteView(ObjectDeleteView):
queryset = RearPort.objects.all()
class RearPortBulkImportView(BulkImportView):
queryset = RearPort.objects.all()
model_form = forms.RearPortCSVForm
table = tables.RearPortTable
class RearPortBulkEditView(BulkEditView):
queryset = RearPort.objects.all()
filterset = filters.RearPortFilterSet
table = tables.RearPortTable
form = forms.RearPortBulkEditForm
class RearPortBulkRenameView(BulkRenameView):
queryset = RearPort.objects.all()
class RearPortBulkDisconnectView(BulkDisconnectView):
queryset = RearPort.objects.all()
class RearPortBulkDeleteView(BulkDeleteView):
queryset = RearPort.objects.all()
filterset = filters.RearPortFilterSet
table = tables.RearPortTable
#
# Device bays
#
class DeviceBayListView(ObjectListView):
queryset = DeviceBay.objects.prefetch_related('device', 'installed_device')
filterset = filters.DeviceBayFilterSet
filterset_form = forms.DeviceBayFilterForm
table = tables.DeviceBayTable
action_buttons = ('import', 'export')
class DeviceBayView(ObjectView):
queryset = DeviceBay.objects.all()
class DeviceBayCreateView(ComponentCreateView):
queryset = DeviceBay.objects.all()
form = forms.DeviceBayCreateForm
model_form = forms.DeviceBayForm
template_name = 'dcim/device_component_add.html'
class DeviceBayEditView(ObjectEditView):
queryset = DeviceBay.objects.all()
model_form = forms.DeviceBayForm
template_name = 'dcim/device_component_edit.html'
class DeviceBayDeleteView(ObjectDeleteView):
queryset = DeviceBay.objects.all()
class DeviceBayPopulateView(ObjectEditView):
queryset = DeviceBay.objects.all()
def get(self, request, pk):
device_bay = get_object_or_404(self.queryset, pk=pk)
form = forms.PopulateDeviceBayForm(device_bay)
return render(request, 'dcim/devicebay_populate.html', {
'device_bay': device_bay,
'form': form,
'return_url': reverse('dcim:device', kwargs={'pk': device_bay.device.pk}),
})
def post(self, request, pk):
device_bay = get_object_or_404(self.queryset, pk=pk)
form = forms.PopulateDeviceBayForm(device_bay, request.POST)
if form.is_valid():
device_bay.installed_device = form.cleaned_data['installed_device']
device_bay.save()
messages.success(request, "Added {} to {}.".format(device_bay.installed_device, device_bay))
return redirect('dcim:device', pk=device_bay.device.pk)
return render(request, 'dcim/devicebay_populate.html', {
'device_bay': device_bay,
'form': form,
'return_url': reverse('dcim:device', kwargs={'pk': device_bay.device.pk}),
})
class DeviceBayDepopulateView(ObjectEditView):
queryset = DeviceBay.objects.all()
def get(self, request, pk):
device_bay = get_object_or_404(self.queryset, pk=pk)
form = ConfirmationForm()
return render(request, 'dcim/devicebay_depopulate.html', {
'device_bay': device_bay,
'form': form,
'return_url': reverse('dcim:device', kwargs={'pk': device_bay.device.pk}),
})
def post(self, request, pk):
device_bay = get_object_or_404(self.queryset, pk=pk)
form = ConfirmationForm(request.POST)
if form.is_valid():
removed_device = device_bay.installed_device
device_bay.installed_device = None
device_bay.save()
messages.success(request, "{} has been removed from {}.".format(removed_device, device_bay))
return redirect('dcim:device', pk=device_bay.device.pk)
return render(request, 'dcim/devicebay_depopulate.html', {
'device_bay': device_bay,
'form': form,
'return_url': reverse('dcim:device', kwargs={'pk': device_bay.device.pk}),
})
class DeviceBayBulkImportView(BulkImportView):
queryset = DeviceBay.objects.all()
model_form = forms.DeviceBayCSVForm
table = tables.DeviceBayTable
class DeviceBayBulkEditView(BulkEditView):
queryset = DeviceBay.objects.all()
filterset = filters.DeviceBayFilterSet
table = tables.DeviceBayTable
form = forms.DeviceBayBulkEditForm
class DeviceBayBulkRenameView(BulkRenameView):
queryset = DeviceBay.objects.all()
class DeviceBayBulkDeleteView(BulkDeleteView):
queryset = DeviceBay.objects.all()
filterset = filters.DeviceBayFilterSet
table = tables.DeviceBayTable
#
# Inventory items
#
class InventoryItemListView(ObjectListView):
queryset = InventoryItem.objects.prefetch_related('device', 'manufacturer')
filterset = filters.InventoryItemFilterSet
filterset_form = forms.InventoryItemFilterForm
table = tables.InventoryItemTable
action_buttons = ('import', 'export')
class InventoryItemView(ObjectView):
queryset = InventoryItem.objects.all()
class InventoryItemEditView(ObjectEditView):
queryset = InventoryItem.objects.all()
model_form = forms.InventoryItemForm
class InventoryItemCreateView(ComponentCreateView):
queryset = InventoryItem.objects.all()
form = forms.InventoryItemCreateForm
model_form = forms.InventoryItemForm
template_name = 'dcim/device_component_add.html'
class InventoryItemDeleteView(ObjectDeleteView):
queryset = InventoryItem.objects.all()
class InventoryItemBulkImportView(BulkImportView):
queryset = InventoryItem.objects.all()
model_form = forms.InventoryItemCSVForm
table = tables.InventoryItemTable
class InventoryItemBulkEditView(BulkEditView):
queryset = InventoryItem.objects.prefetch_related('device', 'manufacturer')
filterset = filters.InventoryItemFilterSet
table = tables.InventoryItemTable
form = forms.InventoryItemBulkEditForm
class InventoryItemBulkRenameView(BulkRenameView):
queryset = InventoryItem.objects.all()
class InventoryItemBulkDeleteView(BulkDeleteView):
queryset = InventoryItem.objects.prefetch_related('device', 'manufacturer')
table = tables.InventoryItemTable
template_name = 'dcim/inventoryitem_bulk_delete.html'
#
# Bulk Device component creation
#
class DeviceBulkAddConsolePortView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.ConsolePortBulkCreateForm
queryset = ConsolePort.objects.all()
model_form = forms.ConsolePortForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
class DeviceBulkAddConsoleServerPortView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.ConsoleServerPortBulkCreateForm
queryset = ConsoleServerPort.objects.all()
model_form = forms.ConsoleServerPortForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
class DeviceBulkAddPowerPortView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.PowerPortBulkCreateForm
queryset = PowerPort.objects.all()
model_form = forms.PowerPortForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
class DeviceBulkAddPowerOutletView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.PowerOutletBulkCreateForm
queryset = PowerOutlet.objects.all()
model_form = forms.PowerOutletForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
class DeviceBulkAddInterfaceView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.InterfaceBulkCreateForm
queryset = Interface.objects.all()
model_form = forms.InterfaceForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
# class DeviceBulkAddFrontPortView(BulkComponentCreateView):
# parent_model = Device
# parent_field = 'device'
# form = forms.FrontPortBulkCreateForm
# queryset = FrontPort.objects.all()
# model_form = forms.FrontPortForm
# filterset = filters.DeviceFilterSet
# table = tables.DeviceTable
# default_return_url = 'dcim:device_list'
class DeviceBulkAddRearPortView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.RearPortBulkCreateForm
queryset = RearPort.objects.all()
model_form = forms.RearPortForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
class DeviceBulkAddDeviceBayView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.DeviceBayBulkCreateForm
queryset = DeviceBay.objects.all()
model_form = forms.DeviceBayForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
class DeviceBulkAddInventoryItemView(BulkComponentCreateView):
parent_model = Device
parent_field = 'device'
form = forms.InventoryItemBulkCreateForm
queryset = InventoryItem.objects.all()
model_form = forms.InventoryItemForm
filterset = filters.DeviceFilterSet
table = tables.DeviceTable
default_return_url = 'dcim:device_list'
#
# Cables
#
class CableListView(ObjectListView):
queryset = Cable.objects.prefetch_related(
'termination_a', 'termination_b'
)
filterset = filters.CableFilterSet
filterset_form = forms.CableFilterForm
table = tables.CableTable
action_buttons = ('import', 'export')
class CableView(ObjectView):
queryset = Cable.objects.all()
def get(self, request, pk):
cable = get_object_or_404(self.queryset, pk=pk)
return render(request, 'dcim/cable.html', {
'cable': cable,
})
class CableTraceView(ObjectView):
"""
Trace a cable path beginning from the given termination.
"""
additional_permissions = ['dcim.view_cable']
def dispatch(self, request, *args, **kwargs):
model = kwargs.pop('model')
self.queryset = model.objects.all()
return super().dispatch(request, *args, **kwargs)
def get(self, request, pk):
obj = get_object_or_404(self.queryset, pk=pk)
path, split_ends, position_stack = obj.trace()
total_length = sum(
[entry[1]._abs_length for entry in path if entry[1] and entry[1]._abs_length]
)
return render(request, 'dcim/cable_trace.html', {
'obj': obj,
'trace': path,
'split_ends': split_ends,
'position_stack': position_stack,
'total_length': total_length,
})
class CableCreateView(ObjectEditView):
queryset = Cable.objects.all()
template_name = 'dcim/cable_connect.html'
def dispatch(self, request, *args, **kwargs):
# Set the model_form class based on the type of component being connected
self.model_form = {
'console-port': forms.ConnectCableToConsolePortForm,
'console-server-port': forms.ConnectCableToConsoleServerPortForm,
'power-port': forms.ConnectCableToPowerPortForm,
'power-outlet': forms.ConnectCableToPowerOutletForm,
'interface': forms.ConnectCableToInterfaceForm,
'front-port': forms.ConnectCableToFrontPortForm,
'rear-port': forms.ConnectCableToRearPortForm,
'power-feed': forms.ConnectCableToPowerFeedForm,
'circuit-termination': forms.ConnectCableToCircuitTerminationForm,
}[kwargs.get('termination_b_type')]
return super().dispatch(request, *args, **kwargs)
def alter_obj(self, obj, request, url_args, url_kwargs):
termination_a_type = url_kwargs.get('termination_a_type')
termination_a_id = url_kwargs.get('termination_a_id')
termination_b_type_name = url_kwargs.get('termination_b_type')
self.termination_b_type = ContentType.objects.get(model=termination_b_type_name.replace('-', ''))
# Initialize Cable termination attributes
obj.termination_a = termination_a_type.objects.get(pk=termination_a_id)
obj.termination_b_type = self.termination_b_type
return obj
def get(self, request, *args, **kwargs):
obj = self.alter_obj(self.get_object(kwargs), request, args, kwargs)
# Parse initial data manually to avoid setting field values as lists
initial_data = {k: request.GET[k] for k in request.GET}
# Set initial site and rack based on side A termination (if not already set)
termination_a_site = getattr(obj.termination_a.parent, 'site', None)
if termination_a_site and 'termination_b_region' not in initial_data:
initial_data['termination_b_region'] = termination_a_site.region
if 'termination_b_site' not in initial_data:
initial_data['termination_b_site'] = termination_a_site
if 'termination_b_rack' not in initial_data:
initial_data['termination_b_rack'] = getattr(obj.termination_a.parent, 'rack', None)
form = self.model_form(instance=obj, initial=initial_data)
return render(request, self.template_name, {
'obj': obj,
'obj_type': Cable._meta.verbose_name,
'termination_b_type': self.termination_b_type.name,
'form': form,
'return_url': self.get_return_url(request, obj),
})
class CableEditView(ObjectEditView):
queryset = Cable.objects.all()
model_form = forms.CableForm
template_name = 'dcim/cable_edit.html'
class CableDeleteView(ObjectDeleteView):
queryset = Cable.objects.all()
class CableBulkImportView(BulkImportView):
queryset = Cable.objects.all()
model_form = forms.CableCSVForm
table = tables.CableTable
class CableBulkEditView(BulkEditView):
queryset = Cable.objects.prefetch_related('termination_a', 'termination_b')
filterset = filters.CableFilterSet
table = tables.CableTable
form = forms.CableBulkEditForm
class CableBulkDeleteView(BulkDeleteView):
queryset = Cable.objects.prefetch_related('termination_a', 'termination_b')
filterset = filters.CableFilterSet
table = tables.CableTable
#
# Connections
#
class ConsoleConnectionsListView(ObjectListView):
queryset = ConsolePort.objects.prefetch_related(
'device', 'connected_endpoint__device'
).filter(
connected_endpoint__isnull=False
).order_by(
'cable', 'connected_endpoint__device__name', 'connected_endpoint__name'
)
filterset = filters.ConsoleConnectionFilterSet
filterset_form = forms.ConsoleConnectionFilterForm
table = tables.ConsoleConnectionTable
template_name = 'dcim/console_connections_list.html'
def queryset_to_csv(self):
csv_data = [
# Headers
','.join(['console_server', 'port', 'device', 'console_port', 'connection_status'])
]
for obj in self.queryset:
csv = csv_format([
obj.connected_endpoint.device.identifier if obj.connected_endpoint else None,
obj.connected_endpoint.name if obj.connected_endpoint else None,
obj.device.identifier,
obj.name,
obj.get_connection_status_display(),
])
csv_data.append(csv)
return '\n'.join(csv_data)
class PowerConnectionsListView(ObjectListView):
queryset = PowerPort.objects.prefetch_related(
'device', '_connected_poweroutlet__device'
).filter(
_connected_poweroutlet__isnull=False
).order_by(
'cable', '_connected_poweroutlet__device__name', '_connected_poweroutlet__name'
)
filterset = filters.PowerConnectionFilterSet
filterset_form = forms.PowerConnectionFilterForm
table = tables.PowerConnectionTable
template_name = 'dcim/power_connections_list.html'
def queryset_to_csv(self):
csv_data = [
# Headers
','.join(['pdu', 'outlet', 'device', 'power_port', 'connection_status'])
]
for obj in self.queryset:
csv = csv_format([
obj.connected_endpoint.device.identifier if obj.connected_endpoint else None,
obj.connected_endpoint.name if obj.connected_endpoint else None,
obj.device.identifier,
obj.name,
obj.get_connection_status_display(),
])
csv_data.append(csv)
return '\n'.join(csv_data)
class InterfaceConnectionsListView(ObjectListView):
queryset = Interface.objects.prefetch_related(
'device', 'cable', '_connected_interface__device'
).filter(
# Avoid duplicate connections by only selecting the lower PK in a connected pair
_connected_interface__isnull=False,
pk__lt=F('_connected_interface')
).order_by(
'device'
| |
code
"""
if opts.dst_node is not None:
ToStderr("New secondary node given (disabling iallocator), hence evacuating"
" secondary instances only.")
opts.secondary_only = True
opts.primary_only = False
if opts.secondary_only and opts.primary_only:
raise errors.OpPrereqError("Only one of the --primary-only and"
" --secondary-only options can be passed",
errors.ECODE_INVAL)
elif opts.primary_only:
mode = constants.NODE_EVAC_PRI
elif opts.secondary_only:
mode = constants.NODE_EVAC_SEC
else:
mode = constants.NODE_EVAC_ALL
# Determine affected instances
fields = []
if not opts.secondary_only:
fields.append("pinst_list")
if not opts.primary_only:
fields.append("sinst_list")
cl = GetClient()
qcl = GetClient()
result = qcl.QueryNodes(names=args, fields=fields, use_locking=False)
qcl.Close()
instances = set(itertools.chain(*itertools.chain(*itertools.chain(result))))
if not instances:
# No instances to evacuate
ToStderr("No instances to evacuate on node(s) %s, exiting.",
utils.CommaJoin(args))
return constants.EXIT_SUCCESS
if not (opts.force or
AskUser("Relocate instance(s) %s from node(s) %s?" %
(utils.CommaJoin(utils.NiceSort(instances)),
utils.CommaJoin(args)))):
return constants.EXIT_CONFIRMATION
# Evacuate node
op = opcodes.OpNodeEvacuate(node_name=args[0], mode=mode,
remote_node=opts.dst_node,
iallocator=opts.iallocator,
early_release=opts.early_release,
ignore_soft_errors=opts.ignore_soft_errors)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All instances evacuated successfully.")
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors during the evacuation.", bad_cnt)
rcode = constants.EXIT_FAILURE
return rcode
def FailoverNode(opts, args):
"""Failover all primary instance on a node.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
force = opts.force
selected_fields = ["name", "pinst_list"]
# these fields are static data anyway, so it doesn't matter, but
# locking=True should be safer
qcl = GetClient()
result = qcl.QueryNodes(names=args, fields=selected_fields,
use_locking=False)
qcl.Close()
node, pinst = result[0]
if not pinst:
ToStderr("No primary instances on node %s, exiting.", node)
return 0
pinst = utils.NiceSort(pinst)
retcode = 0
if not force and not AskUser("Fail over instance(s) %s?" %
(",".join("'%s'" % name for name in pinst))):
return 2
jex = JobExecutor(cl=cl, opts=opts)
for iname in pinst:
op = opcodes.OpInstanceFailover(instance_name=iname,
ignore_consistency=opts.ignore_consistency,
iallocator=opts.iallocator)
jex.QueueJob(iname, op)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All %d instance(s) failed over successfully.", len(results))
else:
ToStdout("There were errors during the failover:\n"
"%d error(s) out of %d instance(s).", bad_cnt, len(results))
return retcode
def MigrateNode(opts, args):
"""Migrate all primary instance on a node.
"""
cl = GetClient()
force = opts.force
selected_fields = ["name", "pinst_list"]
qcl = GetClient()
result = qcl.QueryNodes(names=args, fields=selected_fields, use_locking=False)
qcl.Close()
((node, pinst), ) = result
if not pinst:
ToStdout("No primary instances on node %s, exiting." % node)
return 0
pinst = utils.NiceSort(pinst)
if not (force or
AskUser("Migrate instance(s) %s?" %
utils.CommaJoin(utils.NiceSort(pinst)))):
return constants.EXIT_CONFIRMATION
# this should be removed once --non-live is deprecated
if not opts.live and opts.migration_mode is not None:
raise errors.OpPrereqError("Only one of the --non-live and "
"--migration-mode options can be passed",
errors.ECODE_INVAL)
if not opts.live: # --non-live passed
mode = constants.HT_MIGRATION_NONLIVE
else:
mode = opts.migration_mode
op = opcodes.OpNodeMigrate(node_name=args[0], mode=mode,
iallocator=opts.iallocator,
target_node=opts.dst_node,
allow_runtime_changes=opts.allow_runtime_chgs,
ignore_ipolicy=opts.ignore_ipolicy)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All instances migrated successfully.")
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors during the node migration.", bad_cnt)
rcode = constants.EXIT_FAILURE
return rcode
def _FormatNodeInfo(node_info):
"""Format node information for L{cli.PrintGenericInfo()}.
"""
(name, primary_ip, secondary_ip, pinst, sinst, is_mc, drained, offline,
master_capable, vm_capable, powered, ndparams, ndparams_custom) = node_info
info = [
("Node name", name),
("primary ip", primary_ip),
("secondary ip", secondary_ip),
("master candidate", is_mc),
("drained", drained),
("offline", offline),
]
if powered is not None:
info.append(("powered", powered))
info.extend([
("master_capable", master_capable),
("vm_capable", vm_capable),
])
if vm_capable:
info.extend([
("primary for instances",
[iname for iname in utils.NiceSort(pinst)]),
("secondary for instances",
[iname for iname in utils.NiceSort(sinst)]),
])
info.append(("node parameters",
FormatParamsDictInfo(ndparams_custom, ndparams)))
return info
def ShowNodeConfig(opts, args):
"""Show node information.
@param opts: the command line options selected by the user
@type args: list
@param args: should either be an empty list, in which case
we show information about all nodes, or should contain
a list of nodes to be queried for information
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
result = cl.QueryNodes(fields=["name", "pip", "sip",
"pinst_list", "sinst_list",
"master_candidate", "drained", "offline",
"master_capable", "vm_capable", "powered",
"ndparams", "custom_ndparams"],
names=args, use_locking=False)
PrintGenericInfo([
_FormatNodeInfo(node_info)
for node_info in result
])
return 0
def RemoveNode(opts, args):
"""Remove a node from the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the node to be removed
@rtype: int
@return: the desired exit code
"""
op = opcodes.OpNodeRemove(node_name=args[0])
SubmitOpCode(op, opts=opts)
return 0
def PowercycleNode(opts, args):
"""Remove a node from the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the node to be removed
@rtype: int
@return: the desired exit code
"""
node = args[0]
if (not opts.confirm and
not AskUser("Are you sure you want to hard powercycle node %s?" % node)):
return 2
op = opcodes.OpNodePowercycle(node_name=node, force=opts.force)
result = SubmitOrSend(op, opts)
if result:
ToStderr(result)
return 0
def PowerNode(opts, args):
"""Change/ask power state of a node.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the node to be removed
@rtype: int
@return: the desired exit code
"""
command = args.pop(0)
if opts.no_headers:
headers = None
else:
headers = {"node": "Node", "status": "Status"}
if command not in _LIST_POWER_COMMANDS:
ToStderr("power subcommand %s not supported." % command)
return constants.EXIT_FAILURE
oob_command = "power-%s" % command
if oob_command in _OOB_COMMAND_ASK:
if not args:
ToStderr("Please provide at least one node for this command")
return constants.EXIT_FAILURE
elif not opts.force and not ConfirmOperation(args, "nodes",
"power %s" % command):
return constants.EXIT_FAILURE
assert len(args) > 0
opcodelist = []
if not opts.ignore_status and oob_command == constants.OOB_POWER_OFF:
# TODO: This is a little ugly as we can't catch and revert
for node in args:
opcodelist.append(opcodes.OpNodeSetParams(node_name=node, offline=True,
auto_promote=opts.auto_promote))
opcodelist.append(opcodes.OpOobCommand(node_names=args,
command=oob_command,
ignore_status=opts.ignore_status,
timeout=opts.oob_timeout,
power_delay=opts.power_delay))
cli.SetGenericOpcodeOpts(opcodelist, opts)
job_id = cli.SendJob(opcodelist)
# We just want the OOB Opcode status
# If it fails PollJob gives us the error message in it
result = cli.PollJob(job_id)[-1]
errs = 0
data = []
for node_result in result:
(node_tuple, data_tuple) = node_result
(_, node_name) = node_tuple
(data_status, data_node) = data_tuple
if data_status == constants.RS_NORMAL:
if oob_command == constants.OOB_POWER_STATUS:
if data_node[constants.OOB_POWER_STATUS_POWERED]:
text = "powered"
else:
text = "unpowered"
data.append([node_name, text])
else:
# We don't expect data here, so we just say, it was successfully invoked
data.append([node_name, "invoked"])
else:
errs += 1
data.append([node_name, cli.FormatResultError(data_status, True)])
data = GenerateTable(separator=opts.separator, headers=headers,
fields=["node", "status"], data=data)
for line in data:
ToStdout(line)
if errs:
return constants.EXIT_FAILURE
else:
return constants.EXIT_SUCCESS
def Health(opts, args):
"""Show health of a node using OOB.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the node to be removed
@rtype: int
@return: the desired exit code
"""
op = opcodes.OpOobCommand(node_names=args, command=constants.OOB_HEALTH,
timeout=opts.oob_timeout)
result = SubmitOpCode(op, opts=opts)
if opts.no_headers:
headers = None
else:
headers = {"node": "Node", "status": "Status"}
errs = 0
data = []
for node_result in result:
(node_tuple, data_tuple) = node_result
(_, node_name) = node_tuple
(data_status, data_node) = data_tuple
if data_status == constants.RS_NORMAL:
data.append([node_name, "%s=%s" % tuple(data_node[0])])
for item, status in data_node[1:]:
data.append(["", "%s=%s" % (item, status)])
else:
errs += 1
data.append([node_name, cli.FormatResultError(data_status, True)])
data = GenerateTable(separator=opts.separator, headers=headers,
fields=["node", "status"], data=data)
for line in data:
ToStdout(line)
if errs:
return constants.EXIT_FAILURE
else:
return constants.EXIT_SUCCESS
def ListVolumes(opts, args):
"""List logical volumes on node(s).
@param opts: the command line options selected by the user
@type args: list
@param args: should either be an empty list, in which case
we list data for all nodes, or contain a list of nodes
to display data only for those
@rtype: int
@return: the | |
starting {0}".format(
# service))
# node.run_cmd("service {0} start".format(service))
sleep(1)
logger.debug("{0} is up on {1}!".format(service, node.name))
def verify(self, builds, progress, node_up, node_down=None):
"""
Verifies state persistence
"""
logger.info("Verifying cluster integrity...")
progress.set_stages("Progress", 14)
progress.update("Progress", 0)
# Verify that node_up IS INDEED up... (yes it's necessary)
while not self.is_online(node_up.ipaddress):
sleep(1)
# Checks if RS Cloud libvirt issue has been resolved
computes_reporting = False
while not computes_reporting:
logger.debug("Checking if compute nodes are checked in")
progress.update("Progress")
libvirt = node_up.run_cmd(";".join(["source openrc",
("nova service-list | "
"grep 'nova-compute' "
"| awk '{print $10}'")]))['return']
if "down" in libvirt:
logger.warning(Color.yellow("Compute nodes are unchecked"))
continue
elif "up" in libvirt:
logger.debug(Color.green("Compute nodes are checked in."))
computes_reporting = True
progress.update("Progress", 1)
# Check RPCS services (ha_proxy, keepalived, rpc daemon)
services = ['haproxy', 'keepalived', 'rpcdaemon']
for service in services:
self.wait_service(service, node_up)
progress.update("Progress", 1)
if node_down:
for service in services:
self.wait_service(service, node_down)
progress.update("Progress", 1)
else:
progress.update("Progress", 3)
# Check that the VIPS moved over to node_up
logger.debug("Checking for vips on {0}".format(node_up.name))
exec_vips = node_up.run_cmd("ip netns exec vips ip a")['return']
progress.update("Progress", 1)
exec_vips_down = " "
if node_down:
logger.debug("Checking for vips on {0}".format(
node_down.name))
exec_vips_down = node_down.run_cmd("ip netns exec vips ip a")[
'return']
progress.update("Progress", 1)
vips = self.deployment.override_attrs['vips']['config'].keys()
progress.update("Progress", 1)
for vip in vips:
logger.debug("VIP: {0}".format(vip))
logger.debug("Verifying VIP namespace.")
# Checks if the vips are absent from both controllers
while (vip not in exec_vips) and (vip not in exec_vips_down):
sleep(1)
exec_vips = node_up.run_cmd("ip netns exec vips "
"ip a")['return']
if node_down:
exec_vips_down = node_down.run_cmd("ip netns exec vips "
"ip a")['return']
# Verifies that the vips do not reside on both servers
if (vip in exec_vips) and (vip in exec_vips_down):
assert vip not in exec_vips, ("{0} vip found on both "
"controllers").format(vip)
# Checks for the vips on node_up controller
elif vip in exec_vips:
logger.debug("{0} vip found in {1}...".format(
vip, node_up.name))
# Checks for the vips on the node_down controller
else:
logger.debug("{0} vip found on {1}...".format(
vip, node_down.name))
progress.update("Progress", 1)
###########################################################################
# IP NETNS NEEDS TO CONTAIN NEUTRON NET-LIST?
# ip_netns_value = node_up.run_cmd("ip netns")['return'].rstrip()
###########################################################################
# Check networks rescheduled
for build in builds:
logger.debug("Checking DHCP on {0}".format(build.name))
self.wait_dhcp_agent_alive(build.network_id, progress)
progress.update("Progress", 1)
#-----------------------------------------------------------------
# Check connectivity to builds
logger.info("Checking connectivity to builds...")
for build in builds:
logger.debug("Skipping connectivity test: {0}".format(build.name))
# while not self.is_online(build.ip_info['floating_ip_address']):
# logger.debug("Build {0} with IP {1} IS NOT "
# "responding...".
# format(build.name,
# build.ip_info[
# 'floating_ip_address']))
# progress.update("Progress")
# logger.debug("Build {0} with IP {1} IS responding...".
# format(build.name,
# build.ip_info['floating_ip_address']))
progress.update("Progress")
#-----------------------------------------------------------------
###########################################################################
# Check MySQL replication isn't broken and Controller2 is master.
#CAM
###########################################################################
# Check rabbitmq
self.test_rabbit_status(progress, node_up, node_down)
progress.update("Progress", 1)
###########################################################################
# Check if all the configured Openstack Services are functional.
# Run tempest based on the features enabled.
#SELECTIVE TEMPEST RUN
###########################################################################
###################################################################
# Verifies that the compute nodes are able to report
###################################################################
nova_status = "down"
while "down" in nova_status:
logger.debug("Checking if nova is up on compute")
progress.update("Progress")
nova_status = node_up.run_cmd(";".join(["source openrc", "nova "
"service-list | grep "
"compute | awk '{print "
"$10}'"
""]))['return'].rstrip()
if "down" in nova_status:
logger.warning(Color.yellow(
"At least one compute node isn't properly reporting"))
else:
logger.debug("All compute nodes are properly reporting")
progress.update("Progress", 1)
def wait_dhcp_agent_alive(self, net, progress, wait=240):
"""
Waits until dhcp agent for net is alive
"""
count = 0
neutron_up = False
while not neutron_up:
try:
dhcp_stat = self.neutron.list_dhcp_agent_hosting_networks(net)
neutron_up = True
except:
logger.warning("Neutron is not up yet")
in_time = lambda x: wait > x
while not dhcp_stat['agents'] and in_time(count):
logger.debug("Waiting for agents to populate {0}".format(
count))
progress.update("Progress")
sleep(1)
count += 1
dhcp_stat = self.neutron.list_dhcp_agent_hosting_networks(net)
assert in_time(count), "Agents failed to populate in time"
alive = False
while not alive and in_time(count):
logger.debug("Waiting for agents to arise {0}".format(
count))
progress.update("Progress")
sleep(1)
count += 1
dhcp_stat = self.neutron.list_dhcp_agent_hosting_networks(net)
try:
alive = dhcp_stat['agents'][0]['alive']
except IndexError:
logger.warning("Failed to retrieve alive DCHP agent.")
alive = False
continue
assert in_time(count), "Agents failed to rise in time"
logger.debug("DHCP is alive")
def run_tests(self):
"""
Run ha tests
"""
#-------------------#
# Preparation Begin #
#-------------------#
#branch = TempestQuantum.tempest_branch(self.deployment.branch)
#if "grizzly" in branch:
# tempest = TempestQuantum(self.deployment)
#else:
# tempest = TempestNeutron(self.deployment)
images = self.nova.images.list()
server_image = next(i for i in images if "cirros" in i.name)
flavors = self.nova.flavors.list()
server_flavor = next(f for f in flavors if "tiny" in f.name)
#-----------------#
# Preparation End #
#-----------------#
max_waves = 2
iterations = self.iterations
build_stages = 4 + (len(self.nova.hypervisors.list()) * 2 * max_waves)
verify_stages = 5
failover_stages = 2
failback_stages = 2
progress_stages = 100
bars = [{'name': 'Iteration', 'current': self.current_iteration,
'total': iterations, 'size': 100},
{'name': 'Build', 'current': 0,
'total': build_stages, 'size': 100},
{'name': 'Verify', 'current': 0,
'total': verify_stages, 'size': 100},
{'name': 'Failover', 'current': 0,
'total': failover_stages, 'size': 100},
{'name': 'Failback', 'current': 0,
'total': failback_stages, 'size': 100},
{'name': 'Progress', 'current': 0,
'total': progress_stages, 'size': 100}]
progress = Progress(bars, self.progress)
builds = []
node_up = self.controller1
node_down = self.controller2
stage = 0
wave = 0
while wave < max_waves:
wave += 1
#os.system('clear')
progress.display("Verify")
self.verify(builds, progress, node_up, node_down)
progress.advance("Verify")
#os.system('clear')
progress.display("Build")
hyp_name = None
build = self.build("testbuild{0}".format(stage),
server_image, server_flavor,
"testnetwork{0}".format(stage),
"testsubnet{0}".format(stage),
"testrouter{0}".format(stage),
"172.32.{0}.0/24".format(stage),
progress,
hyp_name)
stage += 1
builds.append(build)
progress.advance("Build")
for hypervisor in self.nova.hypervisors.list():
hyp_name = hypervisor.hypervisor_hostname
build = self.build("testbuild{0}".format(stage),
server_image, server_flavor,
"testnetwork{0}".format(stage),
"testsubnet{0}".format(stage),
"testrouter{0}".format(stage),
"172.32.{0}.0/24".format(stage),
progress,
"nova:{0}".format(hyp_name))
stage += 1
builds.append(build)
progress.advance("Build")
progress.display("Failover")
self.failover(progress, node_up, node_down)
progress.advance("Failover")
progress.display("Verify")
self.verify(builds, progress, node_up)
progress.advance("Verify")
progress.display("Build")
hyp_name = None
build = self.build("testbuild{0}".format(stage),
server_image, server_flavor,
"testnetwork{0}".format(stage),
"testsubnet{0}".format(stage),
"testrouter{0}".format(stage),
"172.32.{0}.0/24".format(stage),
progress,
hyp_name)
stage += 1
builds.append(build)
progress.advance("Build")
for hypervisor in self.nova.hypervisors.list():
hyp_name = hypervisor.hypervisor_hostname
build = self.build("testbuild{0}".format(stage),
server_image, server_flavor,
"testnetwork{0}".format(stage),
"testsubnet{0}".format(stage),
"testrouter{0}".format(stage),
"172.32.{0}.0/24".format(stage),
progress,
"nova:{0}".format(hyp_name))
stage += 1
builds.append(build)
progress.advance("Build")
progress.display("Failback")
self.failback(node_down, progress)
progress.advance("Failback")
node_temp = node_up
node_up = node_down
node_down = node_temp
progress.display("Verify")
self.verify(builds, progress, node_up, node_down)
progress.advance("Verify")
progress.display("Iteration")
for build in builds:
build.destroy(self.nova, self.neutron, progress, node_up,
node_down)
progress.advance("Iteration")
progress.display("Iteration")
self.current_iteration += 1
#tempest.test_node = node_up
#tempest.test()
def kill_rabbit(self, node1, node2=None):
logger.warning(Color.yellow("Remidiation: Hard restarting RabbitMQ"))
node1.run_cmd(";".join([
"for i in `ps aux | grep [r]abbitmq | ",
"awk '{print $2}'`",
"do kill -9 $i", "done",
"service rabbitmq-server start"
]))
if node2:
node2.run_cmd(";".join([
"for i in `ps aux | grep [r]abbitmq | ",
"awk '{print $2}'`",
"do kill -9 $i",
"done",
"service rabbitmq-server start"
]))
def test_rabbit_status(self, progress, node1, node2=None):
"""
Assures rabbit is alive
"""
status = False
cycle = 1
max_cycle = 120
while not status:
logger.debug("Testing if RabbitMQ is alive: {0}".format(cycle))
progress.update("Progress")
try:
status = self.rabbit.is_alive()
logger.debug("RabbitMQ is alive")
except:
status = False
cycle += 1
if cycle > max_cycle:
self.kill_rabbit(node1, node2)
cycle = 1
sleep(1)
def test_list_queues(self):
"""
Assures rabbit can list queues
"""
queues = self.rabbit.list_queues()
assert queues is not None, "queues empty"
def collect_results(self):
"""
Collect report and merge tests
"""
xunit_merge()
def test(self, iterations):
self.iterations = iterations
self.run_tests()
self.collect_results()
# from threading import Thread
# t1 = Thread(target=self.run_multiple, args=(self.controller1, 1, ))
# t2 = Thread(target=self.run_multiple, args=(self.controller2, 2, ))
# t1.start()
# sleep(1)
# t2.start()
# t1.join()
# print "t1 finished!"
# t2.join()
# print "t2 finished!"
# def run_multiple(self, node, which):
# print "Running command {0}!".format(which)
# remote_cmd = ";".join(["for i in {0..10000}", "do echo $i", "done"])
# env.user = node.user
# env.host_string = node.ipaddress
# env.password = env.passwords[env.host_string] = node.password
# run(remote_cmd)
#from fabric.api import *
#from fabric.state import env
class Progress(object):
def __init__(self, bars, progress):
self.bars = bars
self.current = None
self.progress = progress
def advance(self, bar_name, adv_amount=1):
#logger.debug("Advancing {0}...".format(bar_name))
if not self.progress:
return
for bar in self.bars:
if bar['name'] == bar_name:
bar['current'] += adv_amount
def display(self, current_bar_name):
if not self.progress:
return
#logger.debug('Flushing print buffer for status bar...')
self.current = current_bar_name
#for i in range(210):
# sys.stdout.write("\b")
os.system('clear')
for bar in self.bars:
if bar['name'] == "Iteration":
self.print_bar(bar, bar['size'], 1)
elif bar['name'] == current_bar_name:
self.print_bar(bar, bar['size'], 1)
else:
self.print_bar(bar, bar['size'], 0)
sys.stdout.flush()
call(["tail -n 50 logs/monster.log | ",
"sed 's/^.*RPC-QE //' | ",
"cut -c-118"], shell=True)
def set_stages(self, bar_name, stages):
if not self.progress:
return
for bar in self.bars:
if bar['name'] == bar_name:
bar['total'] = stages
def update(self, bar_name, adv_amount=None):
if not self.progress:
return
# Advances bar without changing current bar indicator
# If value is 0, resets current progress position
if | |
<reponame>vast-data/vast-csi
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter server."""
import os
import socket
from concurrent import futures
from functools import wraps
from pprint import pformat
import inspect
from uuid import uuid4
import psutil
from plumbum import local, ProcessExecutionError
from plumbum.typed_env import TypedEnv
import grpc
from easypy.tokens import Token, ROUNDROBIN, RANDOM, CONTROLLER_AND_NODE, CONTROLLER, NODE
from easypy.misc import kwargs_resilient, at_least
from easypy.caching import cached_property
from easypy.collections import shuffled
from easypy.exceptions import TException
from . logging import logger, init_logging
from . utils import patch_traceback_format, RESTSession, ApiError
from . import csi_pb2_grpc
from .csi_pb2_grpc import ControllerServicer, NodeServicer, IdentityServicer
from . import csi_types as types
LOAD_BALANCING_STRATEGIES = {ROUNDROBIN, RANDOM}
class Config(TypedEnv):
class Path(TypedEnv.Str):
convert = staticmethod(local.path)
plugin_name, plugin_version, git_commit = open("version.info").read().strip().split()
controller_root_mount = Path("X_CSI_CTRL_ROOT_MOUNT", default=local.path("/csi-volumes"))
mock_vast = TypedEnv.Bool("X_CSI_MOCK_VAST", default=False)
nfs_server = TypedEnv.Str("X_CSI_NFS_SERVER", default="127.0.0.1")
root_export = Path("X_CSI_NFS_EXPORT", default=local.path("/k8s"))
log_level = TypedEnv.Str("X_CSI_LOG_LEVEL", default="info")
csi_sanity_test = TypedEnv.Bool("X_CSI_SANITY_TEST", default=False)
node_id = TypedEnv.Str("X_CSI_NODE_ID", default=socket.getfqdn())
vms_host = TypedEnv.Str("X_CSI_VMS_HOST", default="vast")
vip_pool_name = TypedEnv.Str("X_CSI_VIP_POOL_NAME", default="k8s")
vms_user = TypedEnv.Str("X_CSI_VMS_USER", default="admin")
vms_password = TypedEnv.Str("X_CSI_VMS_PASSWORD", default="<PASSWORD>")
ssl_verify = TypedEnv.Bool("X_CSI_DISABLE_VMS_SSL_VERIFICATION", default=False)
volume_name_fmt = TypedEnv.Str("X_CSI_VOLUME_NAME_FMT", default="csi:{namespace}:{name}:{id}")
_mount_options = TypedEnv.Str("X_CSI_MOUNT_OPTIONS", default="") # For example: "port=2049,nolock,vers=3"
@property
def mount_options(self):
s = self._mount_options.strip()
return list({p for p in s.split(',') if p})
_load_balancing = TypedEnv.Str("X_CSI_LB_STRATEGY", default="roundrobin")
_mode = TypedEnv.Str("CSI_MODE", default="controller_and_node")
_endpoint = TypedEnv.Str("CSI_ENDPOINT", default='unix:///var/run/csi.sock')
@property
def load_balancing(self):
lb = Token(self._load_balancing.upper())
if lb not in LOAD_BALANCING_STRATEGIES:
raise Exception(f"invalid load balancing strategy: {lb} (use {'|'.join(LOAD_BALANCING_STRATEGIES)})")
return lb
@property
def mode(self):
mode = Token(self._mode.upper())
assert mode in {CONTROLLER_AND_NODE, CONTROLLER, NODE}, f"invalid mode: {mode}"
return mode
@property
def endpoint(self):
return self._endpoint.strip("tcp://")
CONF = None
################################################################
#
# Helpers
#
################################################################
FAILED_PRECONDITION = grpc.StatusCode.FAILED_PRECONDITION
INVALID_ARGUMENT = grpc.StatusCode.INVALID_ARGUMENT
ALREADY_EXISTS = grpc.StatusCode.ALREADY_EXISTS
NOT_FOUND = grpc.StatusCode.NOT_FOUND
ABORTED = grpc.StatusCode.ABORTED
UNKNOWN = grpc.StatusCode.UNKNOWN
OUT_OF_RANGE = grpc.StatusCode.OUT_OF_RANGE
SUPPORTED_ACCESS = [
types.AccessModeType.SINGLE_NODE_WRITER,
# types.AccessModeType.SINGLE_NODE_READER_ONLY,
# types.AccessModeType.MULTI_NODE_READER_ONLY,
# types.AccessModeType.MULTI_NODE_SINGLE_WRITER,
types.AccessModeType.MULTI_NODE_MULTI_WRITER,
]
class MountFailed(TException):
template = "Mounting {src} failed"
def mount(src, tgt, flags=""):
cmd = local.cmd.mount
flags = flags.split(",")
flags += CONF.mount_options
if CONF.mock_vast:
flags += "port=2049,nolock,vers=3".split(",")
if flags:
cmd = cmd["-o", ",".join(flags)]
try:
cmd[src, tgt] & logger.pipe_info("mount >>")
except ProcessExecutionError as exc:
if exc.retcode == 32:
raise MountFailed(detail=exc.stderr, src=src, tgt=tgt)
raise
def _validate_capabilities(capabilities):
for capability in capabilities:
if capability.access_mode.mode not in SUPPORTED_ACCESS:
raise Abort(
INVALID_ARGUMENT,
f'Unsupported access mode: {capability.access_mode.mode} (use {SUPPORTED_ACCESS})')
if not capability.HasField('mount'):
pass
elif not capability.mount.fs_type:
pass
elif capability.mount.fs_type != "ext4":
raise Abort(
INVALID_ARGUMENT,
f'Unsupported file system type: {capability.mount.fs_type}')
class Abort(Exception):
@property
def code(self):
return self.args[0]
@property
def message(self):
return self.args[1]
class Instrumented():
SILENCED = ["Probe", "NodeGetCapabilities"]
@classmethod
def logged(cls, func):
method = func.__name__
log = logger.debug if (method in cls.SILENCED) else logger.info
parameters = inspect.signature(func).parameters
required_params = {
name for name, p in parameters.items() if p.default is p.empty}
required_params.discard("self")
func = kwargs_resilient(func)
@wraps(func)
def wrapper(self, request, context):
peer = context.peer()
params = {fld.name: value for fld, value in request.ListFields()}
missing = required_params - {"request", "context"} - set(params)
log(f"{peer} >>> {method}:")
if params:
for line in pformat(params).splitlines():
log(f" {line}")
try:
if missing:
msg = f'Missing required fields: {", ".join(sorted(missing))}'
logger.error(f"{peer} <<< {method}: {msg}")
raise Abort(INVALID_ARGUMENT, msg)
ret = func(self, request=request, context=context, **params)
except Abort as exc:
logger.info(f'{peer} <<< {method} ABORTED with {exc.code} ("{exc.message}")')
logger.debug("Traceback", exc_info=True)
context.abort(exc.code, exc.message)
except Exception as exc:
err_key = f"<{<KEY>
logger.exception(f"Exception during {method} ({err_key}): {type(exc)}")
context.abort(UNKNOWN, f"Exception during {method}: {err_key}")
if ret:
log(f"{peer} <<< {method}:")
for line in pformat(ret).splitlines():
log(f" {line}")
log(f"{peer} --- {method}: Done")
return ret
return wrapper
@classmethod
def __init_subclass__(cls):
for name, _ in inspect.getmembers(cls.__base__, inspect.isfunction):
if name.startswith("_"):
continue
func = getattr(cls, name)
setattr(cls, name, cls.logged(func))
super().__init_subclass__()
################################################################
#
# Identity
#
################################################################
class Identity(IdentityServicer, Instrumented):
def __init__(self):
self.capabilities = []
self.controller = None
self.node = None
def GetPluginInfo(self, request, context):
return types.InfoResp(
name=CONF.plugin_name,
vendor_version=CONF.plugin_version,
)
def GetPluginCapabilities(self, request, context):
return types.CapabilitiesResp(
capabilities=[
types.Capability(service=types.Service(type=cap))
for cap in self.capabilities])
def Probe(self, request, context):
if self.node:
return types.ProbeRespOK
elif CONF.mock_vast:
return types.ProbeRespOK
elif self.controller:
try:
self.controller.get_vip()
except ApiError as exc:
raise Abort(FAILED_PRECONDITION, str(exc))
return types.ProbeRespOK
else:
return types.ProbeRespNotReady
################################################################
#
# Controller
#
################################################################
class Controller(ControllerServicer, Instrumented):
CAPABILITIES = [
types.CtrlCapabilityType.CREATE_DELETE_VOLUME,
types.CtrlCapabilityType.PUBLISH_UNPUBLISH_VOLUME,
types.CtrlCapabilityType.LIST_VOLUMES,
types.CtrlCapabilityType.EXPAND_VOLUME,
# types.CtrlCapabilityType.GET_CAPACITY,
# types.CtrlCapabilityType.CREATE_DELETE_SNAPSHOT,
# types.CtrlCapabilityType.LIST_SNAPSHOTS,
# types.CtrlCapabilityType.CLONE_VOLUME,
# types.CtrlCapabilityType.PUBLISH_READONLY,
]
mock_db = local.path("/tmp/")
@cached_property
def vms_session(self):
auth = CONF.vms_user, CONF.vms_password
return RESTSession(
base_url=f"https://{CONF.vms_host}/api",
auth=auth, ssl_verify=CONF.ssl_verify)
_vip_round_robin_idx = -1
def get_vip(self):
if CONF.mock_vast:
return CONF.nfs_server
vips = [vip for vip in self.vms_session.vips() if vip.vippool == CONF.vip_pool_name]
if not vips:
raise Exception(f"No vips in pool {CONF.vip_pool_name}")
if CONF.load_balancing == ROUNDROBIN:
self._vip_round_robin_idx = (self._vip_round_robin_idx + 1) % len(vips)
vip = vips[self._vip_round_robin_idx]
elif CONF.load_balancing == RANDOM:
vip = shuffled(vips)[0]
else:
raise Exception(f"Invalid load_balancing mode: '{CONF.load_balancing}'")
logger.info(f"Using {CONF.load_balancing} - chose {vip.title}, currently connected to {vip.cnode}")
return vip.ip
def get_quota(self, volume_id):
quotas = self.vms_session.quotas(path__contains=str(CONF.root_export[volume_id]))
if not quotas:
return
elif len(quotas) > 1:
names = ", ".join(sorted(q.name for q in quotas))
raise Exception(f"Too many quotas on {volume_id}: {names}")
else:
return quotas[0]
@cached_property
def root_mount(self):
target_path = CONF.controller_root_mount
if not target_path.exists():
target_path.mkdir()
target_path["NOT_MOUNTED"].touch()
logger.info(f"created successfully: {target_path}")
if target_path["NOT_MOUNTED"].exists():
nfs_server = self.get_vip()
mount_spec = f"{nfs_server}:{CONF.root_export}"
mount(mount_spec, target_path)
logger.info(f"mounted successfully: {target_path}")
return target_path
def ControllerGetCapabilities(self):
return types.CtrlCapabilityResp(capabilities=[
types.CtrlCapability(rpc=types.CtrlCapability.RPC(type=rpc))
for rpc in self.CAPABILITIES])
def ValidateVolumeCapabilities(self, context, volume_id, volume_capabilities, volume_context=None, parameters=None):
vol = self.root_mount[volume_id]
if not vol.exists():
raise Abort(NOT_FOUND, f'Volume {volume_id} does not exist')
try:
_validate_capabilities(volume_capabilities)
except Abort as exc:
return types.ValidateResp(message=exc.message)
confirmed = types.ValidateResp.Confirmed(
volume_context=volume_context,
volume_capabilities=volume_capabilities,
parameters=parameters)
return types.ValidateResp(confirmed=confirmed)
def ListVolumes(self, starting_token=None, max_entries=None):
if starting_token:
try:
starting_inode = int(starting_token)
except ValueError:
raise Abort(ABORTED, "Invalid starting_token")
else:
starting_inode = 0
fields = {'entries': []}
vols = (d for d in os.scandir(self.root_mount) if d.is_dir())
vols = sorted(vols, key=lambda d: d.inode())
if not vols:
logger.info(f"No volumes in {self.root_mount}")
return types.ListResp(**fields)
logger.info(f"Got {len(vols)} volumes in {self.root_mount}")
start_idx = 0
logger.info(f"Skipping to {starting_inode}")
for start_idx, d in enumerate(vols):
if d.inode() > starting_inode:
break
del vols[:start_idx]
remain = 0
if max_entries:
remain = at_least(0, len(vols) - max_entries)
vols = vols[:max_entries]
if remain:
fields['next_token'] = str(vols[-1].inode())
fields['entries'] = [types.ListResp.Entry(
volume=self._to_volume(vol.name))
for vol in vols]
return types.ListResp(**fields)
def _to_volume(self, vol_id):
vol_dir = self.root_mount[vol_id]
logger.info(f"{vol_dir}")
if not vol_dir.is_dir():
logger.info(f"{vol_dir} is not dir")
return
with self.mock_db[vol_id].open("rb") as f:
vol = types.Volume()
vol.ParseFromString(f.read())
return vol
def CreateVolume(self, name, volume_capabilities, capacity_range=None, parameters=None):
_validate_capabilities(volume_capabilities)
volume_id = name
volume_name = f"csi-{volume_id}"
if parameters:
pvc_name = parameters.get("csi.storage.k8s.io/pvc/name")
pvc_namespace = parameters.get("csi.storage.k8s.io/pvc/namespace")
if pvc_namespace and pvc_name:
volume_name = CONF.volume_name_fmt.format(namespace=pvc_namespace, name=pvc_name, id=volume_id)
volume_name = volume_name[:64] # crop to Vast's max-length
requested_capacity = capacity_range.required_bytes if capacity_range else 0
existing_capacity = 0
volume_context = {}
if CONF.mock_vast:
volume = self._to_volume(volume_id)
if volume:
existing_capacity = volume.capacity_bytes
else:
quota = self.get_quota(volume_id)
if quota:
existing_capacity = quota.hard_limit
if not existing_capacity:
pass
elif existing_capacity != requested_capacity:
raise Abort(
ALREADY_EXISTS,
"Volume already exists with different capacity than requested"
f"({existing_capacity})")
if CONF.mock_vast:
vol_dir = self.root_mount[volume_id]
vol_dir.mkdir()
else:
data = dict(
create_dir=True,
name=volume_name,
path=str(CONF.root_export[volume_id]),
)
if requested_capacity:
data.update(hard_limit=requested_capacity)
quota = self.vms_session.post("quotas", data=data)
volume_context.update(quota_id=quota.id)
volume = types.Volume(
capacity_bytes=requested_capacity, volume_id=volume_id,
volume_context={k: str(v) for k, v in volume_context.items()})
if CONF.mock_vast:
with self.mock_db[volume_id].open("wb") as f:
f.write(volume.SerializeToString())
return types.CreateResp(volume=volume)
def DeleteVolume(self, volume_id):
vol_dir = self.root_mount[volume_id]
vol_dir.delete()
if not CONF.mock_vast:
quota = self.get_quota(volume_id)
if quota:
self.vms_session.delete(f"quotas/{quota.id}")
logger.info(f"Quota removed: {quota.id}")
else:
self.mock_db[volume_id].delete()
logger.info(f"Removed volume: {vol_dir}")
return types.DeleteResp()
def GetCapacity(self):
cap = os.statvfs(self.root_mount).f_favail
return types.CapacityResp(available_capacity=cap)
def ControllerPublishVolume(self, node_id, volume_id, volume_capability):
_validate_capabilities([volume_capability])
found = bool(self._to_volume(volume_id) if CONF.mock_vast else self.get_quota(volume_id))
if not found:
raise Abort(NOT_FOUND, f"Unknown volume: {volume_id}")
if CONF.csi_sanity_test and CONF.node_id != node_id:
# for a test that tries to fake a non-existent node
raise Abort(NOT_FOUND, f"Unknown volume: {node_id}")
nfs_server_ip = self.get_vip()
return types.CtrlPublishResp(
publish_context=dict(
export_path=str(CONF.root_export),
nfs_server_ip=nfs_server_ip,
))
def ControllerUnpublishVolume(self, node_id, volume_id):
return types.CtrlUnpublishResp()
def ControllerExpandVolume(self, volume_id, capacity_range):
requested_capacity = capacity_range.required_bytes
if CONF.mock_vast:
volume = self._to_volume(volume_id)
if volume:
existing_capacity = volume.capacity_bytes
else:
quota = self.get_quota(volume_id)
if quota:
existing_capacity = quota.hard_limit
if requested_capacity <= existing_capacity:
capacity_bytes = existing_capacity
elif CONF.mock_vast:
volume.capacity_bytes = capacity_bytes = requested_capacity
else:
try:
self.vms_session.patch(f"quotas/{quota.id}", data=dict(hard_limit=requested_capacity))
except ApiError as exc:
raise Abort(
OUT_OF_RANGE,
f"Failed updating quota {quota.id}: {exc}")
capacity_bytes = requested_capacity
return types.CtrlExpandResp(
| |
op in sum_op], [4, 2, 4, 2])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 7-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [8, 4])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2) + SummedOp([X ^ X * 2, Z ^ Z], 3)
with self.subTest('SummedOp test 8-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 6, 3])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 8-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [10, 2, 3])
def test_compose_op_of_different_dim(self):
"""
Test if smaller operator expands to correct dim when composed with bigger operator.
Test if PrimitiveOps compose methods are consistent.
"""
# PauliOps of different dim
xy_p = (X ^ Y)
xyz_p = (X ^ Y ^ Z)
pauli_op = xy_p @ xyz_p
expected_result = (I ^ I ^ Z)
self.assertEqual(pauli_op, expected_result)
# MatrixOps of different dim
xy_m = xy_p.to_matrix_op()
xyz_m = xyz_p.to_matrix_op()
matrix_op = xy_m @ xyz_m
self.assertEqual(matrix_op, expected_result.to_matrix_op())
# CircuitOps of different dim
xy_c = xy_p.to_circuit_op()
xyz_c = xyz_p.to_circuit_op()
circuit_op = xy_c @ xyz_c
self.assertTrue(np.array_equal(pauli_op.to_matrix(), matrix_op.to_matrix()))
self.assertTrue(np.allclose(pauli_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
self.assertTrue(np.allclose(matrix_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
def test_permute_on_primitive_op(self):
""" Test if permute methods of PrimitiveOps are consistent and work as expected. """
indices = [1, 2, 4]
# PauliOp
pauli_op = (X ^ Y ^ Z)
permuted_pauli_op = pauli_op.permute(indices)
expected_pauli_op = (X ^ I ^ Y ^ Z ^ I)
self.assertEqual(permuted_pauli_op, expected_pauli_op)
# CircuitOp
circuit_op = pauli_op.to_circuit_op()
permuted_circuit_op = circuit_op.permute(indices)
expected_circuit_op = expected_pauli_op.to_circuit_op()
self.assertEqual(permuted_circuit_op.primitive.__str__(),
expected_circuit_op.primitive.__str__())
# MatrixOp
matrix_op = pauli_op.to_matrix_op()
permuted_matrix_op = matrix_op.permute(indices)
expected_matrix_op = expected_pauli_op.to_matrix_op()
equal = np.allclose(permuted_matrix_op.to_matrix(), expected_matrix_op.to_matrix())
self.assertTrue(equal)
def test_permute_on_list_op(self):
""" Test if ListOp permute method is consistent with PrimitiveOps permute methods. """
op1 = (X ^ Y ^ Z).to_circuit_op()
op2 = (Z ^ X ^ Y)
# ComposedOp
indices = [1, 2, 0]
primitive_op = op1 @ op2
primitive_op_perm = primitive_op.permute(indices) # CircuitOp.permute
composed_op = ComposedOp([op1, op2])
composed_op_perm = composed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = composed_op_perm.oplist[0] @ composed_op_perm.oplist[1]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# TensoredOp
indices = [3, 5, 4, 0, 2, 1]
primitive_op = op1 ^ op2
primitive_op_perm = primitive_op.permute(indices)
tensored_op = TensoredOp([op1, op2])
tensored_op_perm = tensored_op.permute(indices)
# reduce the ListOp to PrimitiveOp
composed_oplist = tensored_op_perm.oplist
to_primitive = \
composed_oplist[0] @ (composed_oplist[1].oplist[0] ^ composed_oplist[1].oplist[1]) @ \
composed_oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# SummedOp
primitive_op = (X ^ Y ^ Z)
summed_op = SummedOp([primitive_op])
indices = [1, 2, 0]
primitive_op_perm = primitive_op.permute(indices) # PauliOp.permute
summed_op_perm = summed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = summed_op_perm.oplist[0] @ primitive_op @ summed_op_perm.oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
def test_expand_on_list_op(self):
""" Test if expanded ListOp has expected num_qubits. """
add_qubits = 3
# ComposedOp
composed_op = ComposedOp([(X ^ Y ^ Z), (H ^ T), (Z ^ X ^ Y ^ Z).to_matrix_op()])
expanded = composed_op._expand_dim(add_qubits)
self.assertEqual(composed_op.num_qubits + add_qubits, expanded.num_qubits)
# TensoredOp
tensored_op = TensoredOp([(X ^ Y), (Z ^ I)])
expanded = tensored_op._expand_dim(add_qubits)
self.assertEqual(tensored_op.num_qubits + add_qubits, expanded.num_qubits)
# SummedOp
summed_op = SummedOp([(X ^ Y), (Z ^ I ^ Z)])
expanded = summed_op._expand_dim(add_qubits)
self.assertEqual(summed_op.num_qubits + add_qubits, expanded.num_qubits)
def test_expand_on_state_fn(self):
""" Test if expanded StateFn has expected num_qubits. """
num_qubits = 3
add_qubits = 2
# case CircuitStateFn, with primitive QuantumCircuit
qc2 = QuantumCircuit(num_qubits)
qc2.cx(0, 1)
cfn = CircuitStateFn(qc2, is_measurement=True)
cfn_exp = cfn._expand_dim(add_qubits)
self.assertEqual(cfn_exp.num_qubits, add_qubits + num_qubits)
# case OperatorStateFn, with OperatorBase primitive, in our case CircuitStateFn
osfn = OperatorStateFn(cfn)
osfn_exp = osfn._expand_dim(add_qubits)
self.assertEqual(osfn_exp.num_qubits, add_qubits + num_qubits)
# case DictStateFn
dsfn = DictStateFn('1'*num_qubits, is_measurement=True)
self.assertEqual(dsfn.num_qubits, num_qubits)
dsfn_exp = dsfn._expand_dim(add_qubits)
self.assertEqual(dsfn_exp.num_qubits, num_qubits + add_qubits)
# case VectorStateFn
vsfn = VectorStateFn(np.ones(2**num_qubits, dtype=complex))
self.assertEqual(vsfn.num_qubits, num_qubits)
vsfn_exp = vsfn._expand_dim(add_qubits)
self.assertEqual(vsfn_exp.num_qubits, num_qubits + add_qubits)
def test_permute_on_state_fn(self):
""" Test if StateFns permute are consistent. """
num_qubits = 4
dim = 2**num_qubits
primitive_list = [1.0/(i+1) for i in range(dim)]
primitive_dict = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
dict_fn = DictStateFn(primitive=primitive_dict, is_measurement=True)
vec_fn = VectorStateFn(primitive=primitive_list, is_measurement=True)
# check if dict_fn and vec_fn are equivalent
equivalent = np.allclose(dict_fn.to_matrix(), vec_fn.to_matrix())
self.assertTrue(equivalent)
# permute
indices = [2, 3, 0, 1]
permute_dict = dict_fn.permute(indices)
permute_vect = vec_fn.permute(indices)
equivalent = np.allclose(permute_dict.to_matrix(), permute_vect.to_matrix())
self.assertTrue(equivalent)
def test_compose_consistency(self):
"""Test if PrimitiveOp @ ComposedOp is consistent with ComposedOp @ PrimitiveOp."""
# PauliOp
op1 = (X ^ Y ^ Z)
op2 = (X ^ Y ^ Z)
op3 = (X ^ Y ^ Z).to_circuit_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# CircitOp
op1 = op1.to_circuit_op()
op2 = op2.to_circuit_op()
op3 = op3.to_matrix_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# MatrixOp
op1 = op1.to_matrix_op()
op2 = op2.to_matrix_op()
op3 = op3.to_pauli_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
def test_compose_with_indices(self):
""" Test compose method using its permutation feature."""
pauli_op = (X ^ Y ^ Z)
circuit_op = (T ^ H)
matrix_op = (X ^ Y ^ H ^ T).to_matrix_op()
evolved_op = EvolvedOp(matrix_op)
# composition of PrimitiveOps
num_qubits = 4
primitive_op = pauli_op @ circuit_op @ matrix_op
composed_op = pauli_op @ circuit_op @ evolved_op
self.assertEqual(primitive_op.num_qubits, num_qubits)
self.assertEqual(composed_op.num_qubits, num_qubits)
# with permutation
num_qubits = 5
indices = [1, 4]
permuted_primitive_op = evolved_op @ circuit_op.permute(indices) @ pauli_op @ matrix_op
composed_primitive_op = \
evolved_op @ pauli_op.compose(circuit_op, permutation=indices, front=True) @ matrix_op
self.assertTrue(np.allclose(permuted_primitive_op.to_matrix(),
composed_primitive_op.to_matrix()))
self.assertEqual(num_qubits, permuted_primitive_op.num_qubits)
# ListOp
num_qubits = 6
tensored_op = TensoredOp([pauli_op, circuit_op])
summed_op = pauli_op + circuit_op.permute([2, 1])
composed_op = circuit_op @ evolved_op @ matrix_op
list_op = summed_op @ composed_op.compose(tensored_op, permutation=[1, 2, 3, 5, 4],
front=True)
self.assertEqual(num_qubits, list_op.num_qubits)
num_qubits = 4
circuit_fn = CircuitStateFn(primitive=circuit_op.primitive, is_measurement=True)
operator_fn = OperatorStateFn(primitive=circuit_op ^ circuit_op, is_measurement=True)
no_perm_op = circuit_fn @ operator_fn
self.assertEqual(no_perm_op.num_qubits, num_qubits)
indices = [0, 4]
perm_op = operator_fn.compose(circuit_fn, permutation=indices, front=True)
self.assertEqual(perm_op.num_qubits, max(indices) + 1)
# StateFn
num_qubits = 3
dim = 2**num_qubits
vec = [1.0/(i+1) for i in range(dim)]
dic = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
is_measurement = True
op_state_fn = OperatorStateFn(matrix_op, is_measurement=is_measurement) # num_qubit = 4
vec_state_fn = VectorStateFn(vec, is_measurement=is_measurement) # 3
dic_state_fn = DictStateFn(dic, is_measurement=is_measurement) # 3
circ_state_fn = CircuitStateFn(circuit_op.to_circuit(), is_measurement=is_measurement) # 2
composed_op = op_state_fn @ vec_state_fn @ dic_state_fn @ circ_state_fn
self.assertEqual(composed_op.num_qubits, op_state_fn.num_qubits)
# with permutation
perm = [2, 4, 6]
composed = \
op_state_fn @ dic_state_fn.compose(vec_state_fn, permutation=perm, front=True) @ \
circ_state_fn
self.assertEqual(composed.num_qubits, max(perm) + 1)
def test_summed_op_equals(self):
"""Test corner cases of SummedOp's equals function."""
with self.subTest('multiplicative factor'):
self.assertEqual(2 * X, X + X)
with self.subTest('commutative'):
self.assertEqual(X + Z, Z + X)
with self.subTest('circuit and paulis'):
z = CircuitOp(ZGate())
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix op and paulis'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix multiplicative'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(2 * z, z + z)
with self.subTest('parameter coefficients'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(expr * z, expr * z)
with self.subTest('different coefficient types'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertNotEqual(expr * z, 2 * z)
with self.subTest('additions aggregation'):
z = MatrixOp([[1, 0], [0, -1]])
a = z + z + Z
b = 2 * z + Z
c = z + Z + z
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(a, c)
def test_circuit_compose_register_independent(self):
"""Test that CircuitOp uses combines circuits independent of the register.
I.e. that is uses ``QuantumCircuit.compose`` over ``combine`` or ``extend``.
"""
op = Z ^ 2
qr = QuantumRegister(2, 'my_qr')
circuit = QuantumCircuit(qr)
composed = op.compose(CircuitOp(circuit))
self.assertEqual(composed.num_qubits, 2)
def test_matrix_op_conversions(self):
"""Test to reveal QiskitError when to_instruction or to_circuit method is called on
parametrized matrix op."""
m = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]])
matrix_op = MatrixOp(m, Parameter('beta'))
for method in ['to_instruction', 'to_circuit']:
with self.subTest(method):
# QiskitError: multiplication of Operator with ParameterExpression isn't implemented
self.assertRaises(QiskitError, getattr(matrix_op, method))
def test_list_op_to_circuit(self):
"""Test if unitary ListOps transpile | |
<reponame>skadge/doit
import os
import time
import sys
import tempfile
import uuid
from sys import executable
import pytest
from doit.task import Task
from doit.dependency import get_md5, get_file_md5
from doit.dependency import DbmDB, JsonDB, SqliteDB, Dependency
from doit.dependency import DatabaseException, UptodateCalculator
from doit.dependency import FileChangedChecker, MD5Checker, TimestampChecker
from doit.dependency import DependencyStatus
from doit.dependency import JSONCodec
from .conftest import get_abspath, dep_manager_fixture
# path to test folder
TEST_PATH = os.path.dirname(__file__)
PROGRAM = "%s %s/sample_process.py" % (executable, TEST_PATH)
def test_unicode_md5():
data = "我"
# no exception is raised
assert get_md5(data)
def test_md5():
filePath = os.path.join(os.path.dirname(__file__), "sample_md5.txt")
# result got using command line md5sum, with different line-endings
# to deal with different GIT configurations:
expected_lf = "45d1503cb985898ab5bd8e58973007dd"
expected_crlf = "cf7b48b2fec3b581b135f7c9a1f7ae04"
assert get_file_md5(filePath) in {expected_lf, expected_crlf}
def test_sqlite_import():
"""
Checks that SQLite module is not imported until the SQLite class is instantiated
"""
filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
assert 'sqlite3' not in sys.modules
SqliteDB(filename, JSONCodec())
assert 'sqlite3' in sys.modules
os.remove(filename)
####
# dependencies are files only (not other tasks).
#
# whenever a task has a dependency the runner checks if this dependency
# was modified since last successful run. if not the task is skipped.
# since more than one task might have the same dependency, and the tasks
# might have different results (success/failure). the signature is associated
# not only with the file, but also with the task.
#
# save in db (task - dependency - (timestamp, size, signature))
# taskId_dependency => signature(dependency)
# taskId is md5(CmdTask.task)
# test parametrization, execute tests for all DB backends.
# create a separate fixture to be used only by this module
# because only here it is required to test with all backends
@pytest.fixture(params=[JsonDB, DbmDB, SqliteDB])
def pdep_manager(request):
return dep_manager_fixture(request, request.param)
# FIXME there was major refactor breaking classes from dependency,
# unit-tests could be more specific to base classes.
class TestDependencyDb(object):
# adding a new value to the DB
def test_get_set(self, pdep_manager):
pdep_manager._set("taskId_X", "dependency_A", "da_md5")
value = pdep_manager._get("taskId_X", "dependency_A")
assert "da_md5" == value, value
def test_get_set_unicode_name(self, pdep_manager):
pdep_manager._set("taskId_我", "dependency_A", "da_md5")
value = pdep_manager._get("taskId_我", "dependency_A")
assert "da_md5" == value, value
#
def test_dump(self, pdep_manager):
# save and close db
pdep_manager._set("taskId_X", "dependency_A", "da_md5")
pdep_manager.close()
# open it again and check the value
d2 = Dependency(pdep_manager.db_class, pdep_manager.name)
value = d2._get("taskId_X", "dependency_A")
assert "da_md5" == value, value
def test_corrupted_file(self, pdep_manager):
if pdep_manager.whichdb is None: # pragma: no cover
pytest.skip('dumbdbm too dumb to detect db corruption')
# create some corrupted files
for name_ext in pdep_manager.name_ext:
full_name = pdep_manager.name + name_ext
fd = open(full_name, 'w')
fd.write("""{"x": y}""")
fd.close()
pytest.raises(DatabaseException, Dependency,
pdep_manager.db_class, pdep_manager.name)
def test_corrupted_file_unrecognized_excep(self, monkeypatch, pdep_manager):
if pdep_manager.db_class is not DbmDB:
pytest.skip('test doesnt apply to non DBM DB')
if pdep_manager.whichdb is None: # pragma: no cover
pytest.skip('dumbdbm too dumb to detect db corruption')
# create some corrupted files
for name_ext in pdep_manager.name_ext:
full_name = pdep_manager.name + name_ext
fd = open(full_name, 'w')
fd.write("""{"x": y}""")
fd.close()
monkeypatch.setattr(DbmDB, 'DBM_CONTENT_ERROR_MSG', 'xxx')
pytest.raises(DatabaseException, Dependency,
pdep_manager.db_class, pdep_manager.name)
# _get must return None if entry doesnt exist.
def test_getNonExistent(self, pdep_manager):
assert pdep_manager._get("taskId_X", "dependency_A") == None
def test_in(self, pdep_manager):
pdep_manager._set("taskId_ZZZ", "dep_1", "12")
assert pdep_manager._in("taskId_ZZZ")
assert not pdep_manager._in("taskId_hohoho")
def test_remove(self, pdep_manager):
pdep_manager._set("taskId_ZZZ", "dep_1", "12")
pdep_manager._set("taskId_ZZZ", "dep_2", "13")
pdep_manager._set("taskId_YYY", "dep_1", "14")
pdep_manager.remove("taskId_ZZZ")
assert None == pdep_manager._get("taskId_ZZZ", "dep_1")
assert None == pdep_manager._get("taskId_ZZZ", "dep_2")
assert "14" == pdep_manager._get("taskId_YYY", "dep_1")
# special test for DBM backend and "dirty"/caching mechanism
def test_remove_from_non_empty_file(self, pdep_manager):
# 1 - put 2 tasks of file
pdep_manager._set("taskId_XXX", "dep_1", "x")
pdep_manager._set("taskId_YYY", "dep_1", "x")
pdep_manager.close()
# 2 - re-open and remove one task
reopened = Dependency(pdep_manager.db_class, pdep_manager.name)
reopened.remove("taskId_YYY")
reopened.close()
# 3 - re-open again and check task was really removed
reopened2 = Dependency(pdep_manager.db_class, pdep_manager.name)
assert reopened2._in("taskId_XXX")
assert not reopened2._in("taskId_YYY")
def test_remove_all(self, pdep_manager):
pdep_manager._set("taskId_ZZZ", "dep_1", "12")
pdep_manager._set("taskId_ZZZ", "dep_2", "13")
pdep_manager._set("taskId_YYY", "dep_1", "14")
pdep_manager.remove_all()
assert None == pdep_manager._get("taskId_ZZZ", "dep_1")
assert None == pdep_manager._get("taskId_ZZZ", "dep_2")
assert None == pdep_manager._get("taskId_YYY", "dep_1")
class TestSaveSuccess(object):
def test_save_result(self, pdep_manager):
t1 = Task('t_name', None)
t1.result = "result"
pdep_manager.save_success(t1)
assert get_md5("result") == pdep_manager._get(t1.name, "result:")
assert get_md5("result") == pdep_manager.get_result(t1.name)
def test_save_result_hash(self, pdep_manager):
t1 = Task('t_name', None)
t1.result = "result"
pdep_manager.save_success(t1, result_hash='abc')
assert 'abc' == pdep_manager._get(t1.name, "result:")
def test_save_resultNone(self, pdep_manager):
t1 = Task('t_name', None)
pdep_manager.save_success(t1)
assert None is pdep_manager._get(t1.name, "result:")
def test_save_result_dict(self, pdep_manager):
t1 = Task('t_name', None)
t1.result = {'d': "result"}
pdep_manager.save_success(t1)
assert {'d': "result"} == pdep_manager._get(t1.name, "result:")
def test_save_file_md5(self, pdep_manager):
# create a test dependency file
filePath = get_abspath("data/dependency1")
ff = open(filePath, "w")
ff.write("i am the first dependency ever for doit")
ff.close()
# save it
t1 = Task("taskId_X", None, [filePath])
pdep_manager.save_success(t1)
expected = "a1bb792202ce163b4f0d17cb264c04e1"
value = pdep_manager._get("taskId_X", filePath)
assert os.path.getmtime(filePath) == value[0] # timestamp
assert 39 == value[1] # size
assert expected == value[2] # MD5
def test_save_skip(self, pdep_manager, monkeypatch):
filePath = get_abspath("data/dependency1")
t1 = Task("taskId_X", None, [filePath])
pdep_manager._set(t1.name, filePath, (345, 0, "fake"))
monkeypatch.setattr(os.path, 'getmtime', lambda x: 345)
# save but md5 is not modified
pdep_manager.save_success(t1)
got = pdep_manager._get("taskId_X", filePath)
assert "fake" == got[2]
def test_save_files(self, pdep_manager):
filePath = get_abspath("data/dependency1")
ff = open(filePath, "w")
ff.write("part1")
ff.close()
filePath2 = get_abspath("data/dependency2")
ff = open(filePath2, "w")
ff.write("part2")
ff.close()
assert pdep_manager._get("taskId_X", filePath) is None
assert pdep_manager._get("taskId_X", filePath2) is None
t1 = Task("taskId_X", None, [filePath, filePath2])
pdep_manager.save_success(t1)
assert pdep_manager._get("taskId_X", filePath) is not None
assert pdep_manager._get("taskId_X", filePath2) is not None
assert set(pdep_manager._get("taskId_X", 'deps:')) == t1.file_dep
def test_save_values(self, pdep_manager):
t1 = Task('t1', None)
t1.values = {'x':5, 'y':10}
pdep_manager.save_success(t1)
assert {'x':5, 'y':10} == pdep_manager._get("t1", "_values_:")
class TestGetValue(object):
def test_all_values(self, pdep_manager):
t1 = Task('t1', None)
t1.values = {'x':5, 'y':10}
pdep_manager.save_success(t1)
assert {'x':5, 'y':10} == pdep_manager.get_values('t1')
def test_ok(self, pdep_manager):
t1 = Task('t1', None)
t1.values = {'x':5, 'y':10}
pdep_manager.save_success(t1)
assert 5 == pdep_manager.get_value('t1', 'x')
def test_ok_dot_on_task_name(self, pdep_manager):
t1 = Task('t1:a.ext', None)
t1.values = {'x':5, 'y':10}
pdep_manager.save_success(t1)
assert 5 == pdep_manager.get_value('t1:a.ext', 'x')
def test_invalid_taskid(self, pdep_manager):
t1 = Task('t1', None)
t1.values = {'x':5, 'y':10}
pdep_manager.save_success(t1)
pytest.raises(Exception, pdep_manager.get_value, 'nonono', 'x')
def test_invalid_key(self, pdep_manager):
t1 = Task('t1', None)
t1.values = {'x':5, 'y':10}
pdep_manager.save_success(t1)
pytest.raises(Exception, pdep_manager.get_value, 't1', 'z')
class TestRemoveSuccess(object):
def test_save_result(self, pdep_manager):
t1 = Task('t_name', None)
t1.result = "result"
pdep_manager.save_success(t1)
assert get_md5("result") == pdep_manager._get(t1.name, "result:")
pdep_manager.remove_success(t1)
assert None is pdep_manager._get(t1.name, "result:")
class TestIgnore(object):
def test_save_result(self, pdep_manager):
t1 = Task('t_name', None)
pdep_manager.ignore(t1)
assert '1' == pdep_manager._get(t1.name, "ignore:")
class TestMD5Checker(object):
def test_timestamp(self, dependency1):
checker = MD5Checker()
state = checker.get_state(dependency1, None)
state2 = (state[0], state[1]+1, '')
file_stat = os.stat(dependency1)
# dep considered the same as long as timestamp is unchanged
assert not checker.check_modified(dependency1, file_stat, state2)
def test_size(self, dependency1):
checker = MD5Checker()
state = checker.get_state(dependency1, None)
state2 = (state[0]+1, state[1]+1, state[2])
file_stat = os.stat(dependency1)
# if size changed for sure modified (md5 is not checked)
assert checker.check_modified(dependency1, file_stat, state2)
def test_md5(self, dependency1):
checker = MD5Checker()
state = checker.get_state(dependency1, None)
file_stat = os.stat(dependency1)
# same size and md5
state2 = (state[0]+1, state[1], state[2])
assert not checker.check_modified(dependency1, file_stat, state2)
# same size, different md5
state3 = (state[0]+1, state[1], 'not me')
assert checker.check_modified(dependency1, file_stat, state3)
class TestCustomChecker(object):
def test_not_implemented(self, dependency1):
class MyChecker(FileChangedChecker):
pass
checker = MyChecker()
pytest.raises(NotImplementedError, checker.get_state, None, None)
pytest.raises(NotImplementedError, checker.check_modified,
None, None, None)
class TestTimestampChecker(object):
def test_timestamp(self, dependency1):
checker = TimestampChecker()
state = checker.get_state(dependency1, None)
file_stat = os.stat(dependency1)
assert not checker.check_modified(dependency1, file_stat, state)
assert checker.check_modified(dependency1, file_stat, state+1)
class TestDependencyStatus(object):
def test_add_reason(self):
result = DependencyStatus(True)
assert 'up-to-date' == result.status
assert not result.add_reason('changed_file_dep', 'f1')
assert 'run' == result.status
assert not result.add_reason('changed_file_dep', 'f2')
assert ['f1', 'f2'] == result.reasons['changed_file_dep']
def test_add_reason_error(self):
result = DependencyStatus(True)
assert 'up-to-date' == result.status
assert not result.add_reason('missing_file_dep', 'f1', 'error')
assert 'error' == result.status
assert ['f1'] == result.reasons['missing_file_dep']
def test_set_reason(self):
result = DependencyStatus(True)
assert 'up-to-date' == result.status
assert not result.set_reason('has_no_dependencies', True)
assert 'run' == result.status
assert True == result.reasons['has_no_dependencies']
def test_no_log(self):
result = DependencyStatus(False)
assert 'up-to-date' == result.status
assert result.set_reason('has_no_dependencies', True)
assert 'run' == result.status
def test_get_error_message(self):
result = DependencyStatus(False)
assert None == result.get_error_message()
result.error_reason = 'foo xxx'
assert 'foo xxx' == result.get_error_message()
class TestGetStatus(object):
def test_ignore(self, pdep_manager):
t1 = Task("t1", None)
# before ignore
assert not pdep_manager.status_is_ignore(t1)
# after ignote
pdep_manager.ignore(t1)
assert pdep_manager.status_is_ignore(t1)
def test_fileDependencies(self, pdep_manager):
filePath = get_abspath("data/dependency1")
ff = open(filePath, "w")
ff.write("part1")
ff.close()
dependencies = [filePath]
t1 = Task("t1", None, dependencies)
# first time execute
assert 'run' == pdep_manager.get_status(t1, {}).status
assert dependencies == t1.dep_changed
# second time no
pdep_manager.save_success(t1)
assert 'up-to-date' == pdep_manager.get_status(t1, {}).status
assert [] == t1.dep_changed
# FIXME - mock timestamp
time.sleep(1) # required otherwise timestamp is not modified!
# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.