max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
back-end/grocerybot/spiders/plus_spider.py | TvanSchagen/grocerybot | 1 | 6612851 | from datetime import datetime as dt
import scrapy
from grocerybot.items import create_grocery_bot_item
from grocerybot.helpers.weight_standardizer import WeightStandardizer
class ProductsSpider(scrapy.Spider):
name = 'plus_products'
start_urls = ['https://www.plus.nl/']
def parse(self, response):
# follow product categorie pages
for href in response.css('li.category-menu__item--sub').css('a::attr(href)'):
yield response.follow(href, self.parse_categories)
def parse_categories(self, response):
pages = int(response.css("div.number-items-per-page input").xpath('@value').getall()[0])
for x in range(0, pages - 1):
next = '?PageNumber={page}'.format(page=x)
yield response.follow(next, self.parse_products)
def parse_products(self, response):
for href in response.css('li.ish-productList-item').css('a::attr(href)'):
yield response.follow(href, self.save_product)
def save_product(self, response):
product_name = response.css("li.page-header__breadcrumb").css("a::text").getall()[-1]
# product_name = response.css('div.pdp-right-block h1::text').get()
page_title = response.css("title::text").get()
img_src = "https://www.plus.nl/" + response.css("img.lazy").xpath("@data-src").get()
description = None
number_of_units = response.css('div.product-detail-packing::text').get()
if ' \n' in number_of_units:
number_of_units = number_of_units.strip(' \n')
if number_of_units is not None:
if 'stuks' in number_of_units:
size = number_of_units
weight_q = None
weight_ind = None
else:
weight_q = WeightStandardizer.standardize_quantity(number_of_units)
weight_ind = WeightStandardizer.standardize_indicator(number_of_units)
size = None
else:
size = None
weight_q = None
weight_ind = None
try:
euros = response.css('span.price span::text').getall()[-1]
cents = response.css('span.price sup::text').get()
price = euros + '.' + cents
except:
print("COULD NOT GET TRUE PRICE")
price = response.css('span.price span::text').get()
try:
category = response.css("li.page-header__breadcrumb").css("a::text").getall()[:2]
except:
category = None
print("Could not find category")
yield create_grocery_bot_item(product_name, page_title, description, 'plus', response.url, dt.now(), weight_q, weight_ind,
size, category, price, img_src)
| from datetime import datetime as dt
import scrapy
from grocerybot.items import create_grocery_bot_item
from grocerybot.helpers.weight_standardizer import WeightStandardizer
class ProductsSpider(scrapy.Spider):
name = 'plus_products'
start_urls = ['https://www.plus.nl/']
def parse(self, response):
# follow product categorie pages
for href in response.css('li.category-menu__item--sub').css('a::attr(href)'):
yield response.follow(href, self.parse_categories)
def parse_categories(self, response):
pages = int(response.css("div.number-items-per-page input").xpath('@value').getall()[0])
for x in range(0, pages - 1):
next = '?PageNumber={page}'.format(page=x)
yield response.follow(next, self.parse_products)
def parse_products(self, response):
for href in response.css('li.ish-productList-item').css('a::attr(href)'):
yield response.follow(href, self.save_product)
def save_product(self, response):
product_name = response.css("li.page-header__breadcrumb").css("a::text").getall()[-1]
# product_name = response.css('div.pdp-right-block h1::text').get()
page_title = response.css("title::text").get()
img_src = "https://www.plus.nl/" + response.css("img.lazy").xpath("@data-src").get()
description = None
number_of_units = response.css('div.product-detail-packing::text').get()
if ' \n' in number_of_units:
number_of_units = number_of_units.strip(' \n')
if number_of_units is not None:
if 'stuks' in number_of_units:
size = number_of_units
weight_q = None
weight_ind = None
else:
weight_q = WeightStandardizer.standardize_quantity(number_of_units)
weight_ind = WeightStandardizer.standardize_indicator(number_of_units)
size = None
else:
size = None
weight_q = None
weight_ind = None
try:
euros = response.css('span.price span::text').getall()[-1]
cents = response.css('span.price sup::text').get()
price = euros + '.' + cents
except:
print("COULD NOT GET TRUE PRICE")
price = response.css('span.price span::text').get()
try:
category = response.css("li.page-header__breadcrumb").css("a::text").getall()[:2]
except:
category = None
print("Could not find category")
yield create_grocery_bot_item(product_name, page_title, description, 'plus', response.url, dt.now(), weight_q, weight_ind,
size, category, price, img_src)
| en | 0.230751 | # follow product categorie pages # product_name = response.css('div.pdp-right-block h1::text').get() | 2.732915 | 3 |
sota_extractor/taskdb/v01/__init__.py | sotabench/sota-extractor | 0 | 6612852 | __all__ = ["Link", "SotaRow", "Sota", "Dataset", "Task", "TaskDB"]
from sota_extractor.taskdb.v01.models import Link, SotaRow, Sota, Dataset, Task
from sota_extractor.taskdb.v01.taskdb import TaskDB
| __all__ = ["Link", "SotaRow", "Sota", "Dataset", "Task", "TaskDB"]
from sota_extractor.taskdb.v01.models import Link, SotaRow, Sota, Dataset, Task
from sota_extractor.taskdb.v01.taskdb import TaskDB
| none | 1 | 1.3986 | 1 | |
vae_lm/models/base/encoders/encoder.py | Nemexur/nonauto-lm | 3 | 6612853 | <filename>vae_lm/models/base/encoders/encoder.py
from typing import NamedTuple
import torch
from abc import ABC, abstractmethod
from torch_nlp_utils.common import Registrable
from vae_lm.models.base.torch_module import TorchModule
class EncoderOutput(NamedTuple):
"""NamedTuple of Encoder module outputs."""
output: torch.Tensor
ctx: torch.Tensor
mask: torch.Tensor
class Encoder(ABC, TorchModule, Registrable):
"""
Generic Encoder for NonAuto Model.
Parameters
----------
input_size : `int`, required
Size of input features.
"""
def __init__(self, input_size: int) -> None:
super().__init__()
self._input_size = input_size
def forward(self, tokens: torch.Tensor, mask: torch.Tensor) -> EncoderOutput:
# tokens ~ (batch_size, seq length, hidden size)
# mask ~ (batch size, seq length)
embedded_input = self._preprocess_embedding(tokens)
output = self.encoder(embedded_input, mask)
batch = mask.size(0)
last_idx = mask.sum(dim=1).long() - 1
ctx = output[torch.arange(batch, device=mask.device), last_idx]
return EncoderOutput(output, ctx, mask)
def _preprocess_embedding(self, embedded_input: torch.Tensor) -> torch.Tensor:
"""Preprocess embedding if needed."""
return embedded_input
def get_input_size(self) -> int:
return self._input_size
@abstractmethod
def get_output_size(self) -> int:
pass
@abstractmethod
def encoder(self, embedded_input: torch.Tensor, mask: torch.LongTensor) -> torch.Tensor:
"""Perform encoding for `embedded input` with `mask` on tokens."""
pass
| <filename>vae_lm/models/base/encoders/encoder.py
from typing import NamedTuple
import torch
from abc import ABC, abstractmethod
from torch_nlp_utils.common import Registrable
from vae_lm.models.base.torch_module import TorchModule
class EncoderOutput(NamedTuple):
"""NamedTuple of Encoder module outputs."""
output: torch.Tensor
ctx: torch.Tensor
mask: torch.Tensor
class Encoder(ABC, TorchModule, Registrable):
"""
Generic Encoder for NonAuto Model.
Parameters
----------
input_size : `int`, required
Size of input features.
"""
def __init__(self, input_size: int) -> None:
super().__init__()
self._input_size = input_size
def forward(self, tokens: torch.Tensor, mask: torch.Tensor) -> EncoderOutput:
# tokens ~ (batch_size, seq length, hidden size)
# mask ~ (batch size, seq length)
embedded_input = self._preprocess_embedding(tokens)
output = self.encoder(embedded_input, mask)
batch = mask.size(0)
last_idx = mask.sum(dim=1).long() - 1
ctx = output[torch.arange(batch, device=mask.device), last_idx]
return EncoderOutput(output, ctx, mask)
def _preprocess_embedding(self, embedded_input: torch.Tensor) -> torch.Tensor:
"""Preprocess embedding if needed."""
return embedded_input
def get_input_size(self) -> int:
return self._input_size
@abstractmethod
def get_output_size(self) -> int:
pass
@abstractmethod
def encoder(self, embedded_input: torch.Tensor, mask: torch.LongTensor) -> torch.Tensor:
"""Perform encoding for `embedded input` with `mask` on tokens."""
pass
| en | 0.501919 | NamedTuple of Encoder module outputs. Generic Encoder for NonAuto Model. Parameters ---------- input_size : `int`, required Size of input features. # tokens ~ (batch_size, seq length, hidden size) # mask ~ (batch size, seq length) Preprocess embedding if needed. Perform encoding for `embedded input` with `mask` on tokens. | 2.610987 | 3 |
minder_utils/models/utils/feature_selector.py | alexcapstick/minder_utils | 0 | 6612854 | from abc import ABC, abstractmethod
from minder_utils.configurations import feature_selector_config
class Feature_selector(ABC):
def __init__(self, model):
self.name = self.methods[model]
self.model = getattr(self, model)()
@property
def config(self) -> dict:
return feature_selector_config[self.__class__.__name__.lower()]
@property
@abstractmethod
def methods(self):
pass
def reset_model(self, model_name):
self.name = self.methods[model_name]
self.model = getattr(self, model_name)()
def get_info(self, verbose=False):
if verbose:
print('Available methods:')
for idx, key in enumerate(self.methods):
print(str(idx).ljust(10, ' '), key.ljust(10, ' '), self.methods[key].ljust(10, ' '))
return self.methods
@abstractmethod
def fit(self, X, y):
pass
@abstractmethod
def transform(self, X):
pass
| from abc import ABC, abstractmethod
from minder_utils.configurations import feature_selector_config
class Feature_selector(ABC):
def __init__(self, model):
self.name = self.methods[model]
self.model = getattr(self, model)()
@property
def config(self) -> dict:
return feature_selector_config[self.__class__.__name__.lower()]
@property
@abstractmethod
def methods(self):
pass
def reset_model(self, model_name):
self.name = self.methods[model_name]
self.model = getattr(self, model_name)()
def get_info(self, verbose=False):
if verbose:
print('Available methods:')
for idx, key in enumerate(self.methods):
print(str(idx).ljust(10, ' '), key.ljust(10, ' '), self.methods[key].ljust(10, ' '))
return self.methods
@abstractmethod
def fit(self, X, y):
pass
@abstractmethod
def transform(self, X):
pass
| none | 1 | 2.835568 | 3 | |
Project/Jobs_DB_Project/Scrapers/pwc_Scraper.py | nikbearbrown/INFO_6210 | 20 | 6612855 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 23:38:23 2019
@author: msaji
"""
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import re
import sys
import time
import requests
import logging
from array import *
import pandas as pd
from datetime import datetime, timedelta
import csv
logger = logging.getLogger(__name__)
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get('https://pwc.recsolu.com/job_boards/eh3Ue7-NR5woRcVvMh9EXQ')
time.sleep(5)
pause=2
html = driver.page_source
soup = BeautifulSoup(html,features = "lxml")
listOfJobs = soup.findAll("li", { "class" : "WKYF WN3N WF5 WB0F" })
jobPositionName=[]
locations = []
jobIDs= []
postedDates=[]
listOfJobs = soup.findAll("a", {"class" : "search-results__req_title"})
listOfPostedDate = soup.findAll("div", {"class" : "search-results__post-time pull-right"})
listOfLocations = soup.findAll("div", {"class" : "clearfix"})
for job in listOfJobs:
jobPosition = re.sub('<a.*"en">','', str(job)).replace('</a>','')
jobPositionName.append(jobPosition)
for loc in listOfLocations[1:]:
location = re.sub(r'span>','',str(loc).split('><')[5].replace('</span',''))
jobID = str(loc).split('><')[6].replace('</span','').replace('span>','')
postedDate = re.sub(r'di.*">','', str(loc).split('><')[-2].replace('</div',''))
locations.append(loc)
jobIDs.append(jobID)
postedDates.append(postedDate)
Job_df = pd.DataFrame({"Job Position Name":jobPositionName,
"Location":locations,
"Job ID":jobIDs,
"Posted Date":postedDates
})
Job_df.to_csv('PWC_Jobs.csv')
driver.close()
| # -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 23:38:23 2019
@author: msaji
"""
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import re
import sys
import time
import requests
import logging
from array import *
import pandas as pd
from datetime import datetime, timedelta
import csv
logger = logging.getLogger(__name__)
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get('https://pwc.recsolu.com/job_boards/eh3Ue7-NR5woRcVvMh9EXQ')
time.sleep(5)
pause=2
html = driver.page_source
soup = BeautifulSoup(html,features = "lxml")
listOfJobs = soup.findAll("li", { "class" : "WKYF WN3N WF5 WB0F" })
jobPositionName=[]
locations = []
jobIDs= []
postedDates=[]
listOfJobs = soup.findAll("a", {"class" : "search-results__req_title"})
listOfPostedDate = soup.findAll("div", {"class" : "search-results__post-time pull-right"})
listOfLocations = soup.findAll("div", {"class" : "clearfix"})
for job in listOfJobs:
jobPosition = re.sub('<a.*"en">','', str(job)).replace('</a>','')
jobPositionName.append(jobPosition)
for loc in listOfLocations[1:]:
location = re.sub(r'span>','',str(loc).split('><')[5].replace('</span',''))
jobID = str(loc).split('><')[6].replace('</span','').replace('span>','')
postedDate = re.sub(r'di.*">','', str(loc).split('><')[-2].replace('</div',''))
locations.append(loc)
jobIDs.append(jobID)
postedDates.append(postedDate)
Job_df = pd.DataFrame({"Job Position Name":jobPositionName,
"Location":locations,
"Job ID":jobIDs,
"Posted Date":postedDates
})
Job_df.to_csv('PWC_Jobs.csv')
driver.close()
| en | 0.826986 | # -*- coding: utf-8 -*- Created on Wed Apr 24 23:38:23 2019 @author: msaji | 2.712677 | 3 |
fastapi/security/__init__.py | Aryabhata-Rootspring/fastapi | 53,007 | 6612856 | from .api_key import APIKeyCookie as APIKeyCookie
from .api_key import APIKeyHeader as APIKeyHeader
from .api_key import APIKeyQuery as APIKeyQuery
from .http import HTTPAuthorizationCredentials as HTTPAuthorizationCredentials
from .http import HTTPBasic as HTTPBasic
from .http import HTTPBasicCredentials as HTTPBasicCredentials
from .http import HTTPBearer as HTTPBearer
from .http import HTTPDigest as HTTPDigest
from .oauth2 import OAuth2 as OAuth2
from .oauth2 import OAuth2AuthorizationCodeBearer as OAuth2AuthorizationCodeBearer
from .oauth2 import OAuth2PasswordBearer as OAuth2PasswordBearer
from .oauth2 import OAuth2PasswordRequestForm as OAuth2PasswordRequestForm
from .oauth2 import OAuth2PasswordRequestFormStrict as OAuth2PasswordRequestFormStrict
from .oauth2 import SecurityScopes as SecurityScopes
from .open_id_connect_url import OpenIdConnect as OpenIdConnect
| from .api_key import APIKeyCookie as APIKeyCookie
from .api_key import APIKeyHeader as APIKeyHeader
from .api_key import APIKeyQuery as APIKeyQuery
from .http import HTTPAuthorizationCredentials as HTTPAuthorizationCredentials
from .http import HTTPBasic as HTTPBasic
from .http import HTTPBasicCredentials as HTTPBasicCredentials
from .http import HTTPBearer as HTTPBearer
from .http import HTTPDigest as HTTPDigest
from .oauth2 import OAuth2 as OAuth2
from .oauth2 import OAuth2AuthorizationCodeBearer as OAuth2AuthorizationCodeBearer
from .oauth2 import OAuth2PasswordBearer as OAuth2PasswordBearer
from .oauth2 import OAuth2PasswordRequestForm as OAuth2PasswordRequestForm
from .oauth2 import OAuth2PasswordRequestFormStrict as OAuth2PasswordRequestFormStrict
from .oauth2 import SecurityScopes as SecurityScopes
from .open_id_connect_url import OpenIdConnect as OpenIdConnect
| none | 1 | 1.118626 | 1 | |
instance/config.py | TeamCGS/Dublin_Bikes | 0 | 6612857 | <reponame>TeamCGS/Dublin_Bikes
SECRET_KEY = 'some_secret'
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://CGSdatabase:<EMAIL>/dublinbikes' | SECRET_KEY = 'some_secret'
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://CGSdatabase:<EMAIL>/dublinbikes' | none | 1 | 1.197818 | 1 | |
nidaqmx_examples/every_n_samples_event.py | hboshnak/nidaqmx-python | 0 | 6612858 | import pprint
import nidaqmx
from nidaqmx.constants import AcquisitionType
pp = pprint.PrettyPrinter(indent=4)
with nidaqmx.Task() as task:
task.ai_channels.add_ai_voltage_chan("Dev1/ai0")
task.timing.cfg_samp_clk_timing(1000, sample_mode=AcquisitionType.CONTINUOUS)
samples = []
def callback(task_handle, every_n_samples_event_type,
number_of_samples, callback_data):
print('Every N Samples callback invoked.')
samples.extend(task.read(number_of_samples_per_channel=1000))
return 0
task.register_every_n_samples_acquired_into_buffer_event(
1000, callback)
task.start()
input('Running task. Press Enter to stop and see number of '
'accumulated samples.\n')
print(len(samples)) | import pprint
import nidaqmx
from nidaqmx.constants import AcquisitionType
pp = pprint.PrettyPrinter(indent=4)
with nidaqmx.Task() as task:
task.ai_channels.add_ai_voltage_chan("Dev1/ai0")
task.timing.cfg_samp_clk_timing(1000, sample_mode=AcquisitionType.CONTINUOUS)
samples = []
def callback(task_handle, every_n_samples_event_type,
number_of_samples, callback_data):
print('Every N Samples callback invoked.')
samples.extend(task.read(number_of_samples_per_channel=1000))
return 0
task.register_every_n_samples_acquired_into_buffer_event(
1000, callback)
task.start()
input('Running task. Press Enter to stop and see number of '
'accumulated samples.\n')
print(len(samples)) | none | 1 | 2.234329 | 2 | |
beetsplug/recordingdate.py | tweitzel/beets-recordingdate | 7 | 6612859 | <reponame>tweitzel/beets-recordingdate
# -- coding: utf-8 --
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import autotag, library, ui, util, config
from beets.autotag import hooks
import mediafile
import musicbrainzngs
musicbrainzngs.set_useragent(
"Beets recording date plugin",
"0.2",
"http://github.com/tweitzel"
)
class RecordingDatePlugin(BeetsPlugin):
def __init__(self):
super(RecordingDatePlugin, self).__init__()
self.import_stages = [self.on_import]
self.config.add({
'auto': True,
'force': False,
'write_over': False,
'relations': {'edit', 'first track release', 'remaster'},
})
#grab global MusicBrainz host setting
musicbrainzngs.set_hostname(config['musicbrainz']['host'].get())
for recording_field in (
u'recording_year',
u'recording_month',
u'recording_day',
u'recording_disambiguation'):
field = mediafile.MediaField(
mediafile.MP3DescStorageStyle(recording_field),
mediafile.MP4StorageStyle('----:com.apple.iTunes:{}'.format(
recording_field)),
mediafile.StorageStyle(recording_field))
self.add_media_field(recording_field, field)
def commands(self):
recording_date_command = ui.Subcommand(
'recordingdate',
help="Retrieve the date of the first known recording of a track.",
aliases=['rdate'])
recording_date_command.func = self.func
return [recording_date_command]
def func(self, lib, opts, args):
query = ui.decargs(args)
self.recording_date(lib, query)
def recording_date(self, lib, query):
for item in lib.items(query):
self.process_file(item)
def on_import(self, session, task):
if self.config['auto']:
for item in task.imported_items():
self.process_file(item)
def process_file(self, item):
item_formatted = format(item)
if not item.mb_trackid:
self._log.info(u'Skipping track with no mb_trackid: {0}',
item_formatted)
return
# check for the recording_year and if it exists and not empty
# skips the track if force is not configured
if u'recording_year' in item and item.recording_year and not self.config['force']:
self._log.info(u'Skipping already processed track: {0}', item_formatted)
return
# Get the MusicBrainz recording info.
(recording_date, disambig) = self.get_first_recording_year(
item.mb_trackid)
if not recording_date:
self._log.info(u'Recording ID not found: {0} for track {0}',
item.mb_trackid,
item_formatted)
return
# Apply.
write = False
for recording_field in ('year', 'month', 'day'):
if recording_field in recording_date.keys():
item[u'recording_' +
recording_field] = recording_date[recording_field]
# writes over the year tag if configured
if self.config['write_over'] and recording_field == u'year':
item[recording_field] = recording_date[recording_field]
self._log.info(u'overwriting year field for: {0}', item_formatted)
write = True
if disambig is not None:
item[u'recording_disambiguation'] = str(disambig)
write = True
if write:
self._log.info(u'Applying changes to {0}', item_formatted)
item.write()
item.store()
else:
self._log.info(u'Error: {0}', recording_date)
def _make_date_values(self, date_str):
date_parts = date_str.split('-')
date_values = {}
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
date_values[key] = date_num
return date_values
def _recurse_relations(self, mb_track_id, oldest_release, relation_type):
x = musicbrainzngs.get_recording_by_id(
mb_track_id,
includes=['releases', 'recording-rels'])
if 'recording-relation-list' in x['recording'].keys():
# recurse down into edits and remasters.
# Note remasters are deprecated in musicbrainz, but some entries
# may still exist.
for subrecording in x['recording']['recording-relation-list']:
if ('direction' in subrecording.keys() and
subrecording['direction'] == 'backward'):
continue
# skip new relationship category samples
if subrecording['type'] not in self.config['relations'].as_str_seq():
continue
if 'artist' in x['recording'].keys() and x['recording']['artist'] != subrecording['artist']:
self._log.info(
u'Skipping relation with arist {0} that does not match {1}',
subrecording['artist'], x['recording']['artist'])
continue
(oldest_release, relation_type) = self._recurse_relations(
subrecording['target'],
oldest_release,
subrecording['type'])
for release in x['recording']['release-list']:
if 'date' not in release.keys():
# A release without a date. Skip over it.
continue
release_date = self._make_date_values(release['date'])
if (oldest_release['year'] is None or
oldest_release['year'] > release_date['year']):
oldest_release = release_date
elif oldest_release['year'] == release_date['year']:
if ('month' in release_date.keys() and
'month' in oldest_release.keys() and
oldest_release['month'] > release_date['month']):
oldest_release = release_date
return (oldest_release, relation_type)
def get_first_recording_year(self, mb_track_id):
relation_type = None
oldest_release = {'year': None}
(oldest_release, relation_type) = self._recurse_relations(
mb_track_id,
oldest_release,
relation_type)
return (oldest_release, relation_type)
| # -- coding: utf-8 --
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import autotag, library, ui, util, config
from beets.autotag import hooks
import mediafile
import musicbrainzngs
musicbrainzngs.set_useragent(
"Beets recording date plugin",
"0.2",
"http://github.com/tweitzel"
)
class RecordingDatePlugin(BeetsPlugin):
def __init__(self):
super(RecordingDatePlugin, self).__init__()
self.import_stages = [self.on_import]
self.config.add({
'auto': True,
'force': False,
'write_over': False,
'relations': {'edit', 'first track release', 'remaster'},
})
#grab global MusicBrainz host setting
musicbrainzngs.set_hostname(config['musicbrainz']['host'].get())
for recording_field in (
u'recording_year',
u'recording_month',
u'recording_day',
u'recording_disambiguation'):
field = mediafile.MediaField(
mediafile.MP3DescStorageStyle(recording_field),
mediafile.MP4StorageStyle('----:com.apple.iTunes:{}'.format(
recording_field)),
mediafile.StorageStyle(recording_field))
self.add_media_field(recording_field, field)
def commands(self):
recording_date_command = ui.Subcommand(
'recordingdate',
help="Retrieve the date of the first known recording of a track.",
aliases=['rdate'])
recording_date_command.func = self.func
return [recording_date_command]
def func(self, lib, opts, args):
query = ui.decargs(args)
self.recording_date(lib, query)
def recording_date(self, lib, query):
for item in lib.items(query):
self.process_file(item)
def on_import(self, session, task):
if self.config['auto']:
for item in task.imported_items():
self.process_file(item)
def process_file(self, item):
item_formatted = format(item)
if not item.mb_trackid:
self._log.info(u'Skipping track with no mb_trackid: {0}',
item_formatted)
return
# check for the recording_year and if it exists and not empty
# skips the track if force is not configured
if u'recording_year' in item and item.recording_year and not self.config['force']:
self._log.info(u'Skipping already processed track: {0}', item_formatted)
return
# Get the MusicBrainz recording info.
(recording_date, disambig) = self.get_first_recording_year(
item.mb_trackid)
if not recording_date:
self._log.info(u'Recording ID not found: {0} for track {0}',
item.mb_trackid,
item_formatted)
return
# Apply.
write = False
for recording_field in ('year', 'month', 'day'):
if recording_field in recording_date.keys():
item[u'recording_' +
recording_field] = recording_date[recording_field]
# writes over the year tag if configured
if self.config['write_over'] and recording_field == u'year':
item[recording_field] = recording_date[recording_field]
self._log.info(u'overwriting year field for: {0}', item_formatted)
write = True
if disambig is not None:
item[u'recording_disambiguation'] = str(disambig)
write = True
if write:
self._log.info(u'Applying changes to {0}', item_formatted)
item.write()
item.store()
else:
self._log.info(u'Error: {0}', recording_date)
def _make_date_values(self, date_str):
date_parts = date_str.split('-')
date_values = {}
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
date_values[key] = date_num
return date_values
def _recurse_relations(self, mb_track_id, oldest_release, relation_type):
x = musicbrainzngs.get_recording_by_id(
mb_track_id,
includes=['releases', 'recording-rels'])
if 'recording-relation-list' in x['recording'].keys():
# recurse down into edits and remasters.
# Note remasters are deprecated in musicbrainz, but some entries
# may still exist.
for subrecording in x['recording']['recording-relation-list']:
if ('direction' in subrecording.keys() and
subrecording['direction'] == 'backward'):
continue
# skip new relationship category samples
if subrecording['type'] not in self.config['relations'].as_str_seq():
continue
if 'artist' in x['recording'].keys() and x['recording']['artist'] != subrecording['artist']:
self._log.info(
u'Skipping relation with arist {0} that does not match {1}',
subrecording['artist'], x['recording']['artist'])
continue
(oldest_release, relation_type) = self._recurse_relations(
subrecording['target'],
oldest_release,
subrecording['type'])
for release in x['recording']['release-list']:
if 'date' not in release.keys():
# A release without a date. Skip over it.
continue
release_date = self._make_date_values(release['date'])
if (oldest_release['year'] is None or
oldest_release['year'] > release_date['year']):
oldest_release = release_date
elif oldest_release['year'] == release_date['year']:
if ('month' in release_date.keys() and
'month' in oldest_release.keys() and
oldest_release['month'] > release_date['month']):
oldest_release = release_date
return (oldest_release, relation_type)
def get_first_recording_year(self, mb_track_id):
relation_type = None
oldest_release = {'year': None}
(oldest_release, relation_type) = self._recurse_relations(
mb_track_id,
oldest_release,
relation_type)
return (oldest_release, relation_type) | en | 0.843542 | # -- coding: utf-8 -- #grab global MusicBrainz host setting # check for the recording_year and if it exists and not empty # skips the track if force is not configured # Get the MusicBrainz recording info. # Apply. # writes over the year tag if configured # recurse down into edits and remasters. # Note remasters are deprecated in musicbrainz, but some entries # may still exist. # skip new relationship category samples # A release without a date. Skip over it. | 2.070678 | 2 |
mod_data.py | dafatskin/CEP_FinalProject_2018 | 0 | 6612860 | <gh_stars>0
#############
#data module#
#############
import openpyxl
import timeit
def configure_variables(config_file):
"""
Returns all the variables stored in the config file
{data type:{data}}
"""
print("configuring variables...")
dic = {} #will return to config_vars
try: #test (and open) file
fp = open(config_file, "r")
except IOError:
print(e)
print("The config file has been moved or renamed.")
print("Please return it back to this directory or rename it to 'Config file'.")
data = fp.readlines()
fp.close()
section = "None" #different section = different data format
to_add = {}
for line in data: #each line
if line[:2] == "--" and line[8:].strip("\n") != section: #new section?
section = line[8:].strip("\n")
to_add = {}
elif line[:2] == "--" and line[8:].strip("\n") == section: #end of section?
dic[section[:-2]] = to_add
section = "None"
to_add = {}
else:
if section == "data formats--": #section specifying data form
elements = line.strip("\n").split(":")
elements.append(elements[1].split(","))
del elements[1]
areas = []
for i in range(len(elements[1])): #for each container
var = elements[1][i].split("-")
areas.append({"slice_coords":[var[0], var[1]], "ID_header":var[2]})
elements.append(areas)
del elements[1]
to_add[elements[0]] = elements[1]
elif section == "file names--": #section specifying file names
elements = line.strip("\n").split(":")
to_add[elements[0]] = elements[1]
elif section == "scoring details--": #section specifying scoring details
elements = line.strip("\n").split(":")
elements.append(elements[1].split(","))
del elements[1]
details = {}
for i in range(len(elements[1])): #for each detail
if elements[1][i] == "on" or elements[1][i] == "off":
details["rankscoring"] = elements[1][i]
else:
var = elements[1][i].split("-")
lst = ["Height", "Weight", "30m", "IPU", "SBJ", "1km"]
details[lst[i]] = {"default":var[0], "other":var[1], "criteria":var[2]}
elements.append(details)
del elements[1]
to_add[elements[0]] = elements[1]
elif section == "reassignment variables--":
elements = line.strip("\n").split(":")
to_add[elements[0]] = elements[1]
elif section == "None":
pass
else:
print("Error occured on line 50: section '{}' not found".format(section))
return dic
def extract_data(file, config_vars):
"""
Reads all data from the file specified in config file, and writes the data to a nested dictionary
{sheet:{ID:{data}}
Returns this dictionary
"""
print("extracting data...")
dic = {} #will return to master_list
try: #test (and open) file
fp = open(file, "r")
except IOError as e:
print(e)
print("File: '{}' not found. Please check the config file.".format(file))
wb = openpyxl.load_workbook(file) #open workbook
sheet_names = wb.sheetnames #get names of all sheets
for name in sheet_names: #for every sheet in the workbook
sheet = wb[name] #define the worksheet
sheet_list = []
areas = config_vars["data formats"][name]
sheet_list = extract_sheet(file, name, areas) #see extract_sheet
dic[name] = sheet_list #define sheet as a list of data containers
fp.close()
return dic
def extract_sheet(file, sheetname, areas):
"""
Extracts an individual sheet, used in extract_data
"""
lst = [] #will return to sheet_data
try: #test (and open) file
fp = open(file, "r")
except IOError as e:
print(e)
print("File: '{}' not found. Please check the config file.".format(file))
wb = openpyxl.load_workbook(file) #open workbook
try: #test (and open) spreadsheet
ws = wb[sheetname]
except KeyError as e:
print(e)
print("Sheet: '{}' not found. Please check the config file.".format(sheetname))
for i in areas: #for each area
area = ws[i["slice_coords"][0]:i["slice_coords"][1]]
area_dic = {}
ID_value = ""
for row in area: #for each row in area
row_dic = {}
for cell in row: #for each cell in row
col_letter = cell.column #this be column of cell
header = ws[col_letter + i["slice_coords"][0][1:]].value #this be header value of cell
if header == i["ID_header"]: #if its the ID column
ID_value = cell.value #get the ID value
else:
row_dic[header] = cell.value #define column of ID as value
area_dic[ID_value] = row_dic #define ID of area as column
lst.append(area_dic) #add to list of areas
fp.close()
return lst
def data_to_LOS(dic):
"""
Returns a list of all students in directory
[name]
"""
final_lst = []
dic_classlist = dic["classlist"][0] #relevant sheet
for key, value in classlist.items(): #name:data
final_lst.append(key)
del final_lst[0]
return final_lst
def data_to_LOC(dic):
"""
Returns a dictionary of core cca choices of each student
{rank of choice:{student:cca}}
"""
final_dic = {} #will return to list_of_firsts
dic_choices = dic["choices"][0] #the relevant sheet
pdic = {"LO1":"CORE1", "LO2":"CORE2", "LO3":"CORE3", "LO4":"CORE4", "LO5":"CORE5", "LO6":"CORE6", "LO7":"CORE7", "LO8":"CORE8", "LO9":"CORE9"}
qdic = {}
for key, value in pdic.items(): #for each rank:name of rank
for NRIC, choices in dic_choices.items(): #for each student:choices
choice = ""
if choices[value] == "01SCOUT": #these 2 values have changes later on. Standardising
choice = "O1"
elif choices[value] == "02SCOUT":
choice = "O2"
else:
choice = choices[value]
qdic[NRIC] = choice
final_dic[key] = qdic
qdic = {}
return final_dic
def data_to_merit(dic):
"""
Returns a dictionary of merit cca choices of each student
{student:merit cca}
"""
final_dic = {}
dic_choices = dic["choices"][0] #relevant sheet
for NRIC, choices in dic_choices.items():
final_dic[NRIC] = choices["MERIT1"] #just take first choice; no limit for merit CCAs
del final_dic["NRIC"]
return final_dic
def data_to_MEP(dic):
"""
Returns a list of MEP students
[name]
"""
final_lst = []
dic_MEP = dic["MEP"][0] #relevant sheet
for key, value in dic_MEP.items():
final_lst.append(key) #just append the name
del final_lst[0]
return final_lst
def data_to_DSA(dic):
"""
Returns a dictionary of DSA students
{name:CCA}
"""
final_dic = {} #will return to DSA_students
dic_DSA = dic["DSA"][0] #the relevant sheet
for key, value in dic_DSA.items():
final_dic[key] = value["Sports"]
del final_dic["Name"]
return final_dic
def data_to_quota(dic):
"""
Returns a dictionary of quota of each CCA
{CCA type:{CCA:quota}}
"""
final_dic = {} #will return to CCA_quota
dic_quota = dic["ccaquota"] #the relevant sheet
for dic in dic_quota: #SPORTS, UNIFORMED GROUPS, etc.
groupname = ""
groupdic = {}
for key, value in dic.items(): #SPORTS: {}
if value[None] == None:
final_dic[groupname] = groupdic
groupname = key
groupdic = {}
else:
groupdic[key] = value["QUOTA"]
final_dic[groupname] = groupdic
del final_dic[""]
return final_dic
def data_to_psychomotor(dic):
"""
Returns a dictionary of psychomotor details of each student
{name:{details}}
"""
final_dic = {} #will return to psychomotor
dic_psymo = dic["psychomotor"][0] #the relevant sheet
for key, value in dic_psymo.items():
del value["AGE"]
final_dic[key] = value
del final_dic["Name"]
return final_dic
def data_to_CCA(dic, CCA):
"""
Returns a dictionary of ranking details of each CCA
{name:{placeholder:rank}
"""
final_dic = {}
dic_CCA = dic[CCA][0] #the cca sheet
for key, value in dic_CCA.items():
try: #delete all the useless info
del value["Class"]
except KeyError:
del value["CLASS"]
try:
del value["Category"]
except:
pass
final_dic[key] = value
try:
del final_dic["Name"]
except KeyError:
pass
return final_dic
def data_to_nameCat(LOC, quota, rank, CCA):
"""
Returns a dictionary of the category of a CCA
"""
final_dic = {}
dic_quota = quota.dic #dictionary
cat = ""
for category, dic_CCAs in dic_quota.items(): #for each category
for cca, quota in dic_CCAs.items(): #for each cca
if cca == CCA:
cat = category #variable = category of cca
else:
pass
CCA_LOC = {} #reverse LOC
for name, cca in LOC.dic[rank].items():
try:
lst = CCA_LOC[cca]
lst.append(name)
CCA_LOC[cca] = lst
except KeyError:
CCA_LOC[cca] = [name]
try:
for name in CCA_LOC[CCA]:
final_dic[name] = cat #name:category
except KeyError:
pass
try:
del final_dic["Name"]
except KeyError:
pass
return final_dic
def data_to_nameClass(master_list):
"""
Returns a dictionary of students' classes
{name:class}
"""
final_dic = {}
dic_classlist = master_list["classlist"][0] #relevant sheet
for name, data in dic_classlist.items():
final_dic[name] = data["CLASS"]
del final_dic["NAME"]
return final_dic
| #############
#data module#
#############
import openpyxl
import timeit
def configure_variables(config_file):
"""
Returns all the variables stored in the config file
{data type:{data}}
"""
print("configuring variables...")
dic = {} #will return to config_vars
try: #test (and open) file
fp = open(config_file, "r")
except IOError:
print(e)
print("The config file has been moved or renamed.")
print("Please return it back to this directory or rename it to 'Config file'.")
data = fp.readlines()
fp.close()
section = "None" #different section = different data format
to_add = {}
for line in data: #each line
if line[:2] == "--" and line[8:].strip("\n") != section: #new section?
section = line[8:].strip("\n")
to_add = {}
elif line[:2] == "--" and line[8:].strip("\n") == section: #end of section?
dic[section[:-2]] = to_add
section = "None"
to_add = {}
else:
if section == "data formats--": #section specifying data form
elements = line.strip("\n").split(":")
elements.append(elements[1].split(","))
del elements[1]
areas = []
for i in range(len(elements[1])): #for each container
var = elements[1][i].split("-")
areas.append({"slice_coords":[var[0], var[1]], "ID_header":var[2]})
elements.append(areas)
del elements[1]
to_add[elements[0]] = elements[1]
elif section == "file names--": #section specifying file names
elements = line.strip("\n").split(":")
to_add[elements[0]] = elements[1]
elif section == "scoring details--": #section specifying scoring details
elements = line.strip("\n").split(":")
elements.append(elements[1].split(","))
del elements[1]
details = {}
for i in range(len(elements[1])): #for each detail
if elements[1][i] == "on" or elements[1][i] == "off":
details["rankscoring"] = elements[1][i]
else:
var = elements[1][i].split("-")
lst = ["Height", "Weight", "30m", "IPU", "SBJ", "1km"]
details[lst[i]] = {"default":var[0], "other":var[1], "criteria":var[2]}
elements.append(details)
del elements[1]
to_add[elements[0]] = elements[1]
elif section == "reassignment variables--":
elements = line.strip("\n").split(":")
to_add[elements[0]] = elements[1]
elif section == "None":
pass
else:
print("Error occured on line 50: section '{}' not found".format(section))
return dic
def extract_data(file, config_vars):
"""
Reads all data from the file specified in config file, and writes the data to a nested dictionary
{sheet:{ID:{data}}
Returns this dictionary
"""
print("extracting data...")
dic = {} #will return to master_list
try: #test (and open) file
fp = open(file, "r")
except IOError as e:
print(e)
print("File: '{}' not found. Please check the config file.".format(file))
wb = openpyxl.load_workbook(file) #open workbook
sheet_names = wb.sheetnames #get names of all sheets
for name in sheet_names: #for every sheet in the workbook
sheet = wb[name] #define the worksheet
sheet_list = []
areas = config_vars["data formats"][name]
sheet_list = extract_sheet(file, name, areas) #see extract_sheet
dic[name] = sheet_list #define sheet as a list of data containers
fp.close()
return dic
def extract_sheet(file, sheetname, areas):
"""
Extracts an individual sheet, used in extract_data
"""
lst = [] #will return to sheet_data
try: #test (and open) file
fp = open(file, "r")
except IOError as e:
print(e)
print("File: '{}' not found. Please check the config file.".format(file))
wb = openpyxl.load_workbook(file) #open workbook
try: #test (and open) spreadsheet
ws = wb[sheetname]
except KeyError as e:
print(e)
print("Sheet: '{}' not found. Please check the config file.".format(sheetname))
for i in areas: #for each area
area = ws[i["slice_coords"][0]:i["slice_coords"][1]]
area_dic = {}
ID_value = ""
for row in area: #for each row in area
row_dic = {}
for cell in row: #for each cell in row
col_letter = cell.column #this be column of cell
header = ws[col_letter + i["slice_coords"][0][1:]].value #this be header value of cell
if header == i["ID_header"]: #if its the ID column
ID_value = cell.value #get the ID value
else:
row_dic[header] = cell.value #define column of ID as value
area_dic[ID_value] = row_dic #define ID of area as column
lst.append(area_dic) #add to list of areas
fp.close()
return lst
def data_to_LOS(dic):
"""
Returns a list of all students in directory
[name]
"""
final_lst = []
dic_classlist = dic["classlist"][0] #relevant sheet
for key, value in classlist.items(): #name:data
final_lst.append(key)
del final_lst[0]
return final_lst
def data_to_LOC(dic):
"""
Returns a dictionary of core cca choices of each student
{rank of choice:{student:cca}}
"""
final_dic = {} #will return to list_of_firsts
dic_choices = dic["choices"][0] #the relevant sheet
pdic = {"LO1":"CORE1", "LO2":"CORE2", "LO3":"CORE3", "LO4":"CORE4", "LO5":"CORE5", "LO6":"CORE6", "LO7":"CORE7", "LO8":"CORE8", "LO9":"CORE9"}
qdic = {}
for key, value in pdic.items(): #for each rank:name of rank
for NRIC, choices in dic_choices.items(): #for each student:choices
choice = ""
if choices[value] == "01SCOUT": #these 2 values have changes later on. Standardising
choice = "O1"
elif choices[value] == "02SCOUT":
choice = "O2"
else:
choice = choices[value]
qdic[NRIC] = choice
final_dic[key] = qdic
qdic = {}
return final_dic
def data_to_merit(dic):
"""
Returns a dictionary of merit cca choices of each student
{student:merit cca}
"""
final_dic = {}
dic_choices = dic["choices"][0] #relevant sheet
for NRIC, choices in dic_choices.items():
final_dic[NRIC] = choices["MERIT1"] #just take first choice; no limit for merit CCAs
del final_dic["NRIC"]
return final_dic
def data_to_MEP(dic):
"""
Returns a list of MEP students
[name]
"""
final_lst = []
dic_MEP = dic["MEP"][0] #relevant sheet
for key, value in dic_MEP.items():
final_lst.append(key) #just append the name
del final_lst[0]
return final_lst
def data_to_DSA(dic):
"""
Returns a dictionary of DSA students
{name:CCA}
"""
final_dic = {} #will return to DSA_students
dic_DSA = dic["DSA"][0] #the relevant sheet
for key, value in dic_DSA.items():
final_dic[key] = value["Sports"]
del final_dic["Name"]
return final_dic
def data_to_quota(dic):
"""
Returns a dictionary of quota of each CCA
{CCA type:{CCA:quota}}
"""
final_dic = {} #will return to CCA_quota
dic_quota = dic["ccaquota"] #the relevant sheet
for dic in dic_quota: #SPORTS, UNIFORMED GROUPS, etc.
groupname = ""
groupdic = {}
for key, value in dic.items(): #SPORTS: {}
if value[None] == None:
final_dic[groupname] = groupdic
groupname = key
groupdic = {}
else:
groupdic[key] = value["QUOTA"]
final_dic[groupname] = groupdic
del final_dic[""]
return final_dic
def data_to_psychomotor(dic):
"""
Returns a dictionary of psychomotor details of each student
{name:{details}}
"""
final_dic = {} #will return to psychomotor
dic_psymo = dic["psychomotor"][0] #the relevant sheet
for key, value in dic_psymo.items():
del value["AGE"]
final_dic[key] = value
del final_dic["Name"]
return final_dic
def data_to_CCA(dic, CCA):
"""
Returns a dictionary of ranking details of each CCA
{name:{placeholder:rank}
"""
final_dic = {}
dic_CCA = dic[CCA][0] #the cca sheet
for key, value in dic_CCA.items():
try: #delete all the useless info
del value["Class"]
except KeyError:
del value["CLASS"]
try:
del value["Category"]
except:
pass
final_dic[key] = value
try:
del final_dic["Name"]
except KeyError:
pass
return final_dic
def data_to_nameCat(LOC, quota, rank, CCA):
"""
Returns a dictionary of the category of a CCA
"""
final_dic = {}
dic_quota = quota.dic #dictionary
cat = ""
for category, dic_CCAs in dic_quota.items(): #for each category
for cca, quota in dic_CCAs.items(): #for each cca
if cca == CCA:
cat = category #variable = category of cca
else:
pass
CCA_LOC = {} #reverse LOC
for name, cca in LOC.dic[rank].items():
try:
lst = CCA_LOC[cca]
lst.append(name)
CCA_LOC[cca] = lst
except KeyError:
CCA_LOC[cca] = [name]
try:
for name in CCA_LOC[CCA]:
final_dic[name] = cat #name:category
except KeyError:
pass
try:
del final_dic["Name"]
except KeyError:
pass
return final_dic
def data_to_nameClass(master_list):
"""
Returns a dictionary of students' classes
{name:class}
"""
final_dic = {}
dic_classlist = master_list["classlist"][0] #relevant sheet
for name, data in dic_classlist.items():
final_dic[name] = data["CLASS"]
del final_dic["NAME"]
return final_dic | en | 0.659044 | ############# #data module# ############# Returns all the variables stored in the config file
{data type:{data}} #will return to config_vars #test (and open) file #different section = different data format #each line #new section? #end of section? #section specifying data form #for each container #section specifying file names #section specifying scoring details #for each detail Reads all data from the file specified in config file, and writes the data to a nested dictionary
{sheet:{ID:{data}}
Returns this dictionary #will return to master_list #test (and open) file #open workbook #get names of all sheets #for every sheet in the workbook #define the worksheet #see extract_sheet #define sheet as a list of data containers Extracts an individual sheet, used in extract_data #will return to sheet_data #test (and open) file #open workbook #test (and open) spreadsheet #for each area #for each row in area #for each cell in row #this be column of cell #this be header value of cell #if its the ID column #get the ID value #define column of ID as value #define ID of area as column #add to list of areas Returns a list of all students in directory
[name] #relevant sheet #name:data Returns a dictionary of core cca choices of each student
{rank of choice:{student:cca}} #will return to list_of_firsts #the relevant sheet #for each rank:name of rank #for each student:choices #these 2 values have changes later on. Standardising Returns a dictionary of merit cca choices of each student
{student:merit cca} #relevant sheet #just take first choice; no limit for merit CCAs Returns a list of MEP students
[name] #relevant sheet #just append the name Returns a dictionary of DSA students
{name:CCA} #will return to DSA_students #the relevant sheet Returns a dictionary of quota of each CCA
{CCA type:{CCA:quota}} #will return to CCA_quota #the relevant sheet #SPORTS, UNIFORMED GROUPS, etc. #SPORTS: {} Returns a dictionary of psychomotor details of each student
{name:{details}} #will return to psychomotor #the relevant sheet Returns a dictionary of ranking details of each CCA
{name:{placeholder:rank} #the cca sheet #delete all the useless info Returns a dictionary of the category of a CCA #dictionary #for each category #for each cca #variable = category of cca #reverse LOC #name:category Returns a dictionary of students' classes
{name:class} #relevant sheet | 2.959138 | 3 |
run.py | cdvx/etl-python | 0 | 6612861 | <reponame>cdvx/etl-python
"""entry point to run pipeline"""
from pipeline import main
if __name__ == "__main__":
main() | """entry point to run pipeline"""
from pipeline import main
if __name__ == "__main__":
main() | en | 0.829544 | entry point to run pipeline | 1.020341 | 1 |
projects/WSL/wsl/__init__.py | XuYunqiu/DRN-WSOD-pytorch | 40 | 6612862 | from .modeling import (
build_vgg_backbone,
build_ws_resnet_backbone,
)
| from .modeling import (
build_vgg_backbone,
build_ws_resnet_backbone,
)
| none | 1 | 1.000602 | 1 | |
pycatia/hybrid_shape_interfaces/hybrid_shape_revol.py | evereux/catia_python | 90 | 6612863 | <reponame>evereux/catia_python
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.knowledge_interfaces.angle import Angle
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeRevol(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeRevol
|
| The Revol feature : an Revol is made up of a face to process and one Revol parameter.
| Role: To access the data of the hybrid shape revol feature
| object.
|
| LICENSING INFORMATION: Creation of volume result requires GSO
| License
| if GSO License is not granted , setting of Volume context has not
| effect
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_revol = com_object
@property
def axis(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Axis() As Reference
|
| Role: To get_Axis on the object.
|
| Parameters:
|
| oDir
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.Axis)
@axis.setter
def axis(self, reference_axis: Reference):
"""
:param Reference reference_axis:
"""
self.hybrid_shape_revol.Axis = reference_axis.com_object
@property
def begin_angle(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property BeginAngle() As Angle (Read Only)
|
| Role: To get_BeginAngle on the object.
|
| Parameters:
|
| oAngle
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Angle
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_revol.BeginAngle)
@property
def context(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Context() As long
|
| Returns or sets the context on Revolve feature.
| Legal values:
|
| 0 This option creates surface of revolution.
| 1 This option creates volume of revolution.
|
|
| Note: Setting volume result requires GSO License.
|
| Example:
| This example retrieves in oContext the context for the Revol hybrid
| shape feature.
|
| Dim oContext
| Set oContext = Revol.Context
:return: int
:rtype: int
"""
return self.hybrid_shape_revol.Context
@context.setter
def context(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_revol.Context = value
@property
def end_angle(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property EndAngle() As Angle (Read Only)
|
| Role: To get_EndAngle on the object.
|
| Parameters:
|
| oAngle
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Angle
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_revol.EndAngle)
@property
def first_limit_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstLimitType() As long
|
| Returns or sets the First limit type.
| Legal values:
|
| 0
| Unknown Limit type.
| 1
| Limit type is Dimension. It implies that limit is defined by
| length
| 2
| Limit type is UptoElement. It implies that limit is defined by a
| geometrical element
|
| Example:
| This example retrieves in oLim1Type the first limit type for the Revolve
| hybrid shape feature.
|
| Dim oLim1Type
| Set oLim1Type = Revolve.FirstLimitType
:return: int
:rtype: int
"""
return self.hybrid_shape_revol.FirstLimitType
@first_limit_type.setter
def first_limit_type(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_revol.FirstLimitType = value
@property
def first_upto_element(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstUptoElement() As Reference
|
| Returns or sets the First up-to element used to limit
| Revolution.
|
| Example:
| This example retrieves in Lim1Elem the First up-to element for the
| Revolve hybrid shape feature.
|
| Dim Lim1Elem As Reference
| Set Lim1Elem = Revolve.FirstUptoElement
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.FirstUptoElement)
@first_upto_element.setter
def first_upto_element(self, reference_element: Reference):
"""
:param Reference reference_element:
"""
self.hybrid_shape_revol.FirstUptoElement = reference_element.com_object
@property
def orientation(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Orientation(boolean iOrientation)
|
| Gets or sets orientation of the revolution.
| Orientation
| TRUE : The natural orientation of the axis is taken.
| FALSE : The opposite orientation is taken This example retrieves in IsInverted orientation of the
| revolution for the Revol hybrid shape feature.
|
| Dim IsInverted As boolean
| IsInverted = Revol.Orientation
:return: bool
:rtype: bool
"""
return self.hybrid_shape_revol.Orientation
@orientation.setter
def orientation(self, value: bool):
"""
:param bool value:
"""
self.hybrid_shape_revol.Orientation = value
@property
def profile(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Profil() As Reference
|
| Role: To get_Profil on the object.
|
| Parameters:
|
| oProfil
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.Profil)
@profile.setter
def profile(self, reference_profile: Reference):
"""
:param Reference reference_profile:
"""
self.hybrid_shape_revol.Profil = reference_profile.com_object
@property
def second_limit_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondLimitType() As long
|
| Returns or sets the Second limit type.
| Legal values:
|
| 0
| Unknown Limit type.
| 1
| Limit type is Dimension. It implies that limit is defined by
| length
| 2
| Limit type is UptoElement. It implies that limit is defined by a
| geometrical element
|
| Example:
| This example retrieves in oLim2Type the second limit type for the Revolve
| hybrid shape feature.
|
| Dim oLim2Type
| Set oLim2Type = RevolveSecondLimitType
:return: int
:rtype: int
"""
return self.hybrid_shape_revol.SecondLimitType
@second_limit_type.setter
def second_limit_type(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_revol.SecondLimitType = value
@property
def second_upto_element(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondUptoElement() As Reference
|
| Returns or sets the Second up-to element used to limit
| Revolution.
|
| Example:
| This example retrieves in Lim2Elem the Second up-to element for the
| Revolve hybrid shape feature.
|
| Dim Lim2Elem As Reference
| Set Lim2Elem = Revolve.SecondUptoElement
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.SecondUptoElement)
@second_upto_element.setter
def second_upto_element(self, reference_element: Reference):
"""
:param Reference reference_element:
"""
self.hybrid_shape_revol.SecondUptoElement = reference_element.com_object
def __repr__(self):
return f'HybridShapeRevol(name="{self.name}")'
| #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.knowledge_interfaces.angle import Angle
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeRevol(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeRevol
|
| The Revol feature : an Revol is made up of a face to process and one Revol parameter.
| Role: To access the data of the hybrid shape revol feature
| object.
|
| LICENSING INFORMATION: Creation of volume result requires GSO
| License
| if GSO License is not granted , setting of Volume context has not
| effect
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_revol = com_object
@property
def axis(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Axis() As Reference
|
| Role: To get_Axis on the object.
|
| Parameters:
|
| oDir
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.Axis)
@axis.setter
def axis(self, reference_axis: Reference):
"""
:param Reference reference_axis:
"""
self.hybrid_shape_revol.Axis = reference_axis.com_object
@property
def begin_angle(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property BeginAngle() As Angle (Read Only)
|
| Role: To get_BeginAngle on the object.
|
| Parameters:
|
| oAngle
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Angle
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_revol.BeginAngle)
@property
def context(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Context() As long
|
| Returns or sets the context on Revolve feature.
| Legal values:
|
| 0 This option creates surface of revolution.
| 1 This option creates volume of revolution.
|
|
| Note: Setting volume result requires GSO License.
|
| Example:
| This example retrieves in oContext the context for the Revol hybrid
| shape feature.
|
| Dim oContext
| Set oContext = Revol.Context
:return: int
:rtype: int
"""
return self.hybrid_shape_revol.Context
@context.setter
def context(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_revol.Context = value
@property
def end_angle(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property EndAngle() As Angle (Read Only)
|
| Role: To get_EndAngle on the object.
|
| Parameters:
|
| oAngle
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Angle
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_revol.EndAngle)
@property
def first_limit_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstLimitType() As long
|
| Returns or sets the First limit type.
| Legal values:
|
| 0
| Unknown Limit type.
| 1
| Limit type is Dimension. It implies that limit is defined by
| length
| 2
| Limit type is UptoElement. It implies that limit is defined by a
| geometrical element
|
| Example:
| This example retrieves in oLim1Type the first limit type for the Revolve
| hybrid shape feature.
|
| Dim oLim1Type
| Set oLim1Type = Revolve.FirstLimitType
:return: int
:rtype: int
"""
return self.hybrid_shape_revol.FirstLimitType
@first_limit_type.setter
def first_limit_type(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_revol.FirstLimitType = value
@property
def first_upto_element(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstUptoElement() As Reference
|
| Returns or sets the First up-to element used to limit
| Revolution.
|
| Example:
| This example retrieves in Lim1Elem the First up-to element for the
| Revolve hybrid shape feature.
|
| Dim Lim1Elem As Reference
| Set Lim1Elem = Revolve.FirstUptoElement
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.FirstUptoElement)
@first_upto_element.setter
def first_upto_element(self, reference_element: Reference):
"""
:param Reference reference_element:
"""
self.hybrid_shape_revol.FirstUptoElement = reference_element.com_object
@property
def orientation(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Orientation(boolean iOrientation)
|
| Gets or sets orientation of the revolution.
| Orientation
| TRUE : The natural orientation of the axis is taken.
| FALSE : The opposite orientation is taken This example retrieves in IsInverted orientation of the
| revolution for the Revol hybrid shape feature.
|
| Dim IsInverted As boolean
| IsInverted = Revol.Orientation
:return: bool
:rtype: bool
"""
return self.hybrid_shape_revol.Orientation
@orientation.setter
def orientation(self, value: bool):
"""
:param bool value:
"""
self.hybrid_shape_revol.Orientation = value
@property
def profile(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Profil() As Reference
|
| Role: To get_Profil on the object.
|
| Parameters:
|
| oProfil
| return value for CATScript applications, with (IDLRETVAL) function
| type
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.Profil)
@profile.setter
def profile(self, reference_profile: Reference):
"""
:param Reference reference_profile:
"""
self.hybrid_shape_revol.Profil = reference_profile.com_object
@property
def second_limit_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondLimitType() As long
|
| Returns or sets the Second limit type.
| Legal values:
|
| 0
| Unknown Limit type.
| 1
| Limit type is Dimension. It implies that limit is defined by
| length
| 2
| Limit type is UptoElement. It implies that limit is defined by a
| geometrical element
|
| Example:
| This example retrieves in oLim2Type the second limit type for the Revolve
| hybrid shape feature.
|
| Dim oLim2Type
| Set oLim2Type = RevolveSecondLimitType
:return: int
:rtype: int
"""
return self.hybrid_shape_revol.SecondLimitType
@second_limit_type.setter
def second_limit_type(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_revol.SecondLimitType = value
@property
def second_upto_element(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondUptoElement() As Reference
|
| Returns or sets the Second up-to element used to limit
| Revolution.
|
| Example:
| This example retrieves in Lim2Elem the Second up-to element for the
| Revolve hybrid shape feature.
|
| Dim Lim2Elem As Reference
| Set Lim2Elem = Revolve.SecondUptoElement
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_revol.SecondUptoElement)
@second_upto_element.setter
def second_upto_element(self, reference_element: Reference):
"""
:param Reference reference_element:
"""
self.hybrid_shape_revol.SecondUptoElement = reference_element.com_object
def __repr__(self):
return f'HybridShapeRevol(name="{self.name}")' | en | 0.581262 | #! usr/bin/python3.6 Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384 .. warning:: The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only. They are there as a guide as to how the visual basic / catscript functions work and thus help debugging in pycatia. .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | System.IUnknown | System.IDispatch | System.CATBaseUnknown | System.CATBaseDispatch | System.AnyObject | MecModInterfaces.HybridShape | HybridShapeRevol | | The Revol feature : an Revol is made up of a face to process and one Revol parameter. | Role: To access the data of the hybrid shape revol feature | object. | | LICENSING INFORMATION: Creation of volume result requires GSO | License | if GSO License is not granted , setting of Volume context has not | effect .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property Axis() As Reference | | Role: To get_Axis on the object. | | Parameters: | | oDir | return value for CATScript applications, with (IDLRETVAL) function | type | | See also: | Reference | Returns: | HRESULT S_OK if Ok E_FAIL else return error code for C++ | Implementations | See also: | HybridShapeFactory :return: Reference :rtype: Reference :param Reference reference_axis: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property BeginAngle() As Angle (Read Only) | | Role: To get_BeginAngle on the object. | | Parameters: | | oAngle | return value for CATScript applications, with (IDLRETVAL) function | type | | See also: | Angle | Returns: | HRESULT S_OK if Ok E_FAIL else return error code for C++ | Implementations | See also: | HybridShapeFactory :return: Angle :rtype: Angle .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property Context() As long | | Returns or sets the context on Revolve feature. | Legal values: | | 0 This option creates surface of revolution. | 1 This option creates volume of revolution. | | | Note: Setting volume result requires GSO License. | | Example: | This example retrieves in oContext the context for the Revol hybrid | shape feature. | | Dim oContext | Set oContext = Revol.Context :return: int :rtype: int :param int value: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property EndAngle() As Angle (Read Only) | | Role: To get_EndAngle on the object. | | Parameters: | | oAngle | return value for CATScript applications, with (IDLRETVAL) function | type | | See also: | Angle | Returns: | HRESULT S_OK if Ok E_FAIL else return error code for C++ | Implementations | See also: | HybridShapeFactory :return: Angle :rtype: Angle .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property FirstLimitType() As long | | Returns or sets the First limit type. | Legal values: | | 0 | Unknown Limit type. | 1 | Limit type is Dimension. It implies that limit is defined by | length | 2 | Limit type is UptoElement. It implies that limit is defined by a | geometrical element | | Example: | This example retrieves in oLim1Type the first limit type for the Revolve | hybrid shape feature. | | Dim oLim1Type | Set oLim1Type = Revolve.FirstLimitType :return: int :rtype: int :param int value: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property FirstUptoElement() As Reference | | Returns or sets the First up-to element used to limit | Revolution. | | Example: | This example retrieves in Lim1Elem the First up-to element for the | Revolve hybrid shape feature. | | Dim Lim1Elem As Reference | Set Lim1Elem = Revolve.FirstUptoElement :return: Reference :rtype: Reference :param Reference reference_element: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property Orientation(boolean iOrientation) | | Gets or sets orientation of the revolution. | Orientation | TRUE : The natural orientation of the axis is taken. | FALSE : The opposite orientation is taken This example retrieves in IsInverted orientation of the | revolution for the Revol hybrid shape feature. | | Dim IsInverted As boolean | IsInverted = Revol.Orientation :return: bool :rtype: bool :param bool value: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property Profil() As Reference | | Role: To get_Profil on the object. | | Parameters: | | oProfil | return value for CATScript applications, with (IDLRETVAL) function | type | | See also: | Reference | Returns: | HRESULT S_OK if Ok E_FAIL else return error code for C++ | Implementations | See also: | HybridShapeFactory :return: Reference :rtype: Reference :param Reference reference_profile: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property SecondLimitType() As long | | Returns or sets the Second limit type. | Legal values: | | 0 | Unknown Limit type. | 1 | Limit type is Dimension. It implies that limit is defined by | length | 2 | Limit type is UptoElement. It implies that limit is defined by a | geometrical element | | Example: | This example retrieves in oLim2Type the second limit type for the Revolve | hybrid shape feature. | | Dim oLim2Type | Set oLim2Type = RevolveSecondLimitType :return: int :rtype: int :param int value: .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384) | o Property SecondUptoElement() As Reference | | Returns or sets the Second up-to element used to limit | Revolution. | | Example: | This example retrieves in Lim2Elem the Second up-to element for the | Revolve hybrid shape feature. | | Dim Lim2Elem As Reference | Set Lim2Elem = Revolve.SecondUptoElement :return: Reference :rtype: Reference :param Reference reference_element: | 1.884005 | 2 |
Sector.py | Zacland/startrek1971 | 0 | 6612864 | import Glyphs
import gettext
# _ = gettext.gettext
class Sector():
def __init__(self, num=-1, name='',
aliens=-1, stars=-1,
starbases=-1, lines=[]):
self.name = name
self.number = num
self.lines = lines
self.area_klingons = aliens
self.area_stars = stars
self.area_starbases = starbases
def is_null(self):
return self.num == -1
@staticmethod
def from_area(area):
if not area:
return Sector()
name = area.name
num = area.number
map = area.get_map()
return Sector(num, name,
area.count_glyphs(Glyphs.KLINGON),
area.count_glyphs(Glyphs.STAR),
area.count_glyphs(Glyphs.STARBASE),
map)
@staticmethod
def display_area(game, sector):
game.enterprise.condition = _("GREEN")
if sector.area_klingons > 0:
game.enterprise.condition = _("RED")
elif game.enterprise.energy < 300:
game.enterprise.condition = _("YELLOW")
sb = " a b c d e f g h \n"
sb += _(" -=--=--=--=--=--=--=--=- Sector: ") + sector.name + "\n"
info = list()
info.append(_(" Number: [{number}]\n").format(number=sector.number))
info.append(_(" Hazzards: [{hazzards}]\n").format(hazzards=sector.area_stars + sector.area_klingons))
info.append(_(" Stardate: {star_date}\n").format(star_date=game.star_date))
info.append(_(" Condition: {condition}\n").format(condition=game.enterprise.condition))
info.append(_(" Energy: {energy}\n").format(energy=game.enterprise.energy))
info.append(_(" Shields: {shield_level}\n").format(shield_level=game.enterprise.shield_level))
info.append(_(" Photon Torpedoes: {photon_torpedoes}\n").format(photon_torpedoes=game.enterprise.photon_torpedoes))
info.append(_(" Time remaining: {time_remaining}\n").format(time_remaining=game.time_remaining))
for row, line in enumerate(sector.lines):
sb += f" {row+1} |"
for col in line:
sb += col
sb += info[row]
sb += _(" -=--=--=--=--=--=--=--=- Docked: {docked}\n").format(docked=game.enterprise.docked)
sb += " a b c d e f g h \n"
print(sb, end='')
if sector.area_klingons > 0:
game.display()
game.display(_("Condition RED: Klingon ship{0} detected.").format("" if sector.area_klingons == 1 else "s"))
if game.enterprise.shield_level == 0 and not game.enterprise.docked:
game.display(_("Warning: Shields are down."))
elif game.enterprise.energy < 300:
game.display()
game.display(_("Condition YELLOW: Low energy level."))
game.enterprise.condition = _("YELLOW")
| import Glyphs
import gettext
# _ = gettext.gettext
class Sector():
def __init__(self, num=-1, name='',
aliens=-1, stars=-1,
starbases=-1, lines=[]):
self.name = name
self.number = num
self.lines = lines
self.area_klingons = aliens
self.area_stars = stars
self.area_starbases = starbases
def is_null(self):
return self.num == -1
@staticmethod
def from_area(area):
if not area:
return Sector()
name = area.name
num = area.number
map = area.get_map()
return Sector(num, name,
area.count_glyphs(Glyphs.KLINGON),
area.count_glyphs(Glyphs.STAR),
area.count_glyphs(Glyphs.STARBASE),
map)
@staticmethod
def display_area(game, sector):
game.enterprise.condition = _("GREEN")
if sector.area_klingons > 0:
game.enterprise.condition = _("RED")
elif game.enterprise.energy < 300:
game.enterprise.condition = _("YELLOW")
sb = " a b c d e f g h \n"
sb += _(" -=--=--=--=--=--=--=--=- Sector: ") + sector.name + "\n"
info = list()
info.append(_(" Number: [{number}]\n").format(number=sector.number))
info.append(_(" Hazzards: [{hazzards}]\n").format(hazzards=sector.area_stars + sector.area_klingons))
info.append(_(" Stardate: {star_date}\n").format(star_date=game.star_date))
info.append(_(" Condition: {condition}\n").format(condition=game.enterprise.condition))
info.append(_(" Energy: {energy}\n").format(energy=game.enterprise.energy))
info.append(_(" Shields: {shield_level}\n").format(shield_level=game.enterprise.shield_level))
info.append(_(" Photon Torpedoes: {photon_torpedoes}\n").format(photon_torpedoes=game.enterprise.photon_torpedoes))
info.append(_(" Time remaining: {time_remaining}\n").format(time_remaining=game.time_remaining))
for row, line in enumerate(sector.lines):
sb += f" {row+1} |"
for col in line:
sb += col
sb += info[row]
sb += _(" -=--=--=--=--=--=--=--=- Docked: {docked}\n").format(docked=game.enterprise.docked)
sb += " a b c d e f g h \n"
print(sb, end='')
if sector.area_klingons > 0:
game.display()
game.display(_("Condition RED: Klingon ship{0} detected.").format("" if sector.area_klingons == 1 else "s"))
if game.enterprise.shield_level == 0 and not game.enterprise.docked:
game.display(_("Warning: Shields are down."))
elif game.enterprise.energy < 300:
game.display()
game.display(_("Condition YELLOW: Low energy level."))
game.enterprise.condition = _("YELLOW")
| it | 0.569444 | # _ = gettext.gettext | 3.10789 | 3 |
spotseeker_server/forms/item.py | uw-it-aca/spotseeker_server | 5 | 6612865 | <gh_stars>1-10
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from spotseeker_server.default_forms.item import (
DefaultItemForm,
DefaultItemExtendedInfoForm,
)
from spotseeker_server.load_module import ModuleObjectLoader
class ItemExtendedInfoForm(ModuleObjectLoader):
setting_name = "SPOTSEEKER_ITEMEXTENDEDINFO_FORM"
default = DefaultItemExtendedInfoForm
class ItemForm(ModuleObjectLoader):
setting_name = "SPOTSEEKER_ITEM_FORM"
default = DefaultItemForm
| # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from spotseeker_server.default_forms.item import (
DefaultItemForm,
DefaultItemExtendedInfoForm,
)
from spotseeker_server.load_module import ModuleObjectLoader
class ItemExtendedInfoForm(ModuleObjectLoader):
setting_name = "SPOTSEEKER_ITEMEXTENDEDINFO_FORM"
default = DefaultItemExtendedInfoForm
class ItemForm(ModuleObjectLoader):
setting_name = "SPOTSEEKER_ITEM_FORM"
default = DefaultItemForm | en | 0.374447 | # Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 | 1.586086 | 2 |
Tests/test_basics.py | tcompa/Laughlin-Metropolis | 3 | 6612866 | <filename>Tests/test_basics.py
import numpy
import sys
sys.path.append('..')
from lib_laughlin_metropolis import main_laughlin_mc
def test_start_from_scratch():
N = 11
m = 2.0
Nqh = 3
xqh = numpy.random.uniform(-1.0, 1.0, (Nqh, 2))
delta = 0.1
nsteps = 1000
main_laughlin_mc(N, m, Nqh, xqh, delta, nsteps,
skip_for_rsq=0, skip_for_xy_hist=0,
ContinuePreviousRun=0, xmax=100.0)
def test_continue_previous_run():
N = 11
m = 2.0
Nqh = 3
xqh = numpy.random.uniform(-1.0, 1.0, (Nqh, 2))
delta = 0.1
nsteps = 1000
main_laughlin_mc(N, m, Nqh, xqh, delta, nsteps,
skip_for_rsq=0, skip_for_xy_hist=0,
ContinuePreviousRun=0, xmax=100.0)
main_laughlin_mc(N, m, Nqh, xqh, delta, nsteps,
skip_for_rsq=0, skip_for_xy_hist=0,
ContinuePreviousRun=1)
| <filename>Tests/test_basics.py
import numpy
import sys
sys.path.append('..')
from lib_laughlin_metropolis import main_laughlin_mc
def test_start_from_scratch():
N = 11
m = 2.0
Nqh = 3
xqh = numpy.random.uniform(-1.0, 1.0, (Nqh, 2))
delta = 0.1
nsteps = 1000
main_laughlin_mc(N, m, Nqh, xqh, delta, nsteps,
skip_for_rsq=0, skip_for_xy_hist=0,
ContinuePreviousRun=0, xmax=100.0)
def test_continue_previous_run():
N = 11
m = 2.0
Nqh = 3
xqh = numpy.random.uniform(-1.0, 1.0, (Nqh, 2))
delta = 0.1
nsteps = 1000
main_laughlin_mc(N, m, Nqh, xqh, delta, nsteps,
skip_for_rsq=0, skip_for_xy_hist=0,
ContinuePreviousRun=0, xmax=100.0)
main_laughlin_mc(N, m, Nqh, xqh, delta, nsteps,
skip_for_rsq=0, skip_for_xy_hist=0,
ContinuePreviousRun=1)
| none | 1 | 2.954383 | 3 | |
utils/localization/modules/locale_generator/excel_input.py | Open-Speech-EkStep/crowdsource-dataplatform | 22 | 6612867 | from helper.reader.excel_file_reader import ExcelReader
from helper.reader.json_file_reader import JsonReader
from helper.utils.utils import get_excel_files
from modules.locale_generator.utils import get_excel_files
from abc import ABC, abstractmethod
import os
class ExcelInput(ABC):
def __init__(self, input_json_path, meta_input_path):
self.input_json_path = input_json_path
self.meta_input_path = meta_input_path
self.json_reader = JsonReader()
self.excel_reader = ExcelReader()
@abstractmethod
def read_translation_file(self, language_code, columns):
pass
@abstractmethod
def read_meta_file(self, language_code, columns):
pass
def read_json_file(self, language_code):
json_path = '{input_json_path}/{locale}/common.json'.format(input_json_path=self.input_json_path, locale=language_code)
return self.json_reader.read_as_df(json_path)
class SingleExcelInput(ExcelInput):
def __init__(self, input_json_path, input_excel_path, meta_input_path):
super().__init__(input_json_path, meta_input_path)
self.input_json_path = input_json_path
# file path
self.input_excel_path = input_excel_path
# file path
self.meta_input_path = meta_input_path
def read_meta_file(self, language_code, columns):
return self.excel_reader.read_as_df(self.meta_input_path, columns)
def read_translation_file(self, language_code, columns):
return self.excel_reader.read_as_df(self.input_excel_path, columns)
class MultiExcelInput(ExcelInput):
def __init__(self, input_json_path, input_base_path, meta_input_path):
super().__init__(input_json_path, meta_input_path)
self.input_json_path = input_json_path
# folder path
self.input_base_path = input_base_path
# file path
self.meta_input_path = meta_input_path
def read_meta_file(self, language_code, columns):
return self.excel_reader.read_as_df(os.path.join(self.meta_input_path, language_code + ".xlsx"), columns)
def read_translation_file(self, language_code, columns=None):
if columns is None:
columns = []
path_to_excels = os.path.join(self.input_base_path, language_code)
translation_excel_files = get_excel_files(path_to_excels)
excel_df = self.excel_reader.read_files(translation_excel_files, columns=columns)
return excel_df
| from helper.reader.excel_file_reader import ExcelReader
from helper.reader.json_file_reader import JsonReader
from helper.utils.utils import get_excel_files
from modules.locale_generator.utils import get_excel_files
from abc import ABC, abstractmethod
import os
class ExcelInput(ABC):
def __init__(self, input_json_path, meta_input_path):
self.input_json_path = input_json_path
self.meta_input_path = meta_input_path
self.json_reader = JsonReader()
self.excel_reader = ExcelReader()
@abstractmethod
def read_translation_file(self, language_code, columns):
pass
@abstractmethod
def read_meta_file(self, language_code, columns):
pass
def read_json_file(self, language_code):
json_path = '{input_json_path}/{locale}/common.json'.format(input_json_path=self.input_json_path, locale=language_code)
return self.json_reader.read_as_df(json_path)
class SingleExcelInput(ExcelInput):
def __init__(self, input_json_path, input_excel_path, meta_input_path):
super().__init__(input_json_path, meta_input_path)
self.input_json_path = input_json_path
# file path
self.input_excel_path = input_excel_path
# file path
self.meta_input_path = meta_input_path
def read_meta_file(self, language_code, columns):
return self.excel_reader.read_as_df(self.meta_input_path, columns)
def read_translation_file(self, language_code, columns):
return self.excel_reader.read_as_df(self.input_excel_path, columns)
class MultiExcelInput(ExcelInput):
def __init__(self, input_json_path, input_base_path, meta_input_path):
super().__init__(input_json_path, meta_input_path)
self.input_json_path = input_json_path
# folder path
self.input_base_path = input_base_path
# file path
self.meta_input_path = meta_input_path
def read_meta_file(self, language_code, columns):
return self.excel_reader.read_as_df(os.path.join(self.meta_input_path, language_code + ".xlsx"), columns)
def read_translation_file(self, language_code, columns=None):
if columns is None:
columns = []
path_to_excels = os.path.join(self.input_base_path, language_code)
translation_excel_files = get_excel_files(path_to_excels)
excel_df = self.excel_reader.read_files(translation_excel_files, columns=columns)
return excel_df
| en | 0.918902 | # file path # file path # folder path # file path | 2.829252 | 3 |
mars/learn/datasets/samples_generator.py | sighingnow/mars | 0 | 6612868 | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import tensor as mt
from ...tensor.utils import check_random_state
from ...tensor import linalg
# -------------------------------------------------------------------
# Original implementation is in `sklearn.datasets.samples_generator`.
# -------------------------------------------------------------------
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None, chunk_size=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n, chunk_size=chunk_size))
v, _ = linalg.qr(generator.randn(n_features, n, chunk_size=chunk_size))
# Index of the singular values
singular_ind = mt.arange(n, dtype=mt.float64, chunk_size=chunk_size)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
mt.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * mt.exp(-0.1 * singular_ind / effective_rank)
s = mt.identity(n) * (low_rank + tail)
return mt.dot(mt.dot(u, s), v.T)
| # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import tensor as mt
from ...tensor.utils import check_random_state
from ...tensor import linalg
# -------------------------------------------------------------------
# Original implementation is in `sklearn.datasets.samples_generator`.
# -------------------------------------------------------------------
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None, chunk_size=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n, chunk_size=chunk_size))
v, _ = linalg.qr(generator.randn(n_features, n, chunk_size=chunk_size))
# Index of the singular values
singular_ind = mt.arange(n, dtype=mt.float64, chunk_size=chunk_size)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
mt.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * mt.exp(-0.1 * singular_ind / effective_rank)
s = mt.identity(n) * (low_rank + tail)
return mt.dot(mt.dot(u, s), v.T)
| en | 0.697414 | # Copyright 1999-2018 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------- # Original implementation is in `sklearn.datasets.samples_generator`. # ------------------------------------------------------------------- Generate a mostly low rank matrix with bell-shaped singular values Most of the variance can be explained by a bell-shaped curve of width effective_rank: the low rank part of the singular values profile is:: (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) The remaining singular values' tail is fat, decreasing as:: tail_strength * exp(-0.1 * i / effective_rank). The low rank part of the profile can be considered the structured signal part of the data while the tail can be considered the noisy part of the data that cannot be summarized by a low number of linear components (singular vectors). This kind of singular profiles is often seen in practice, for instance: - gray level pictures of faces - TF-IDF vectors of text documents crawled from the web Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. effective_rank : int, optional (default=10) The approximate number of singular vectors required to explain most of the data by linear combinations. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. chunk_size : int or tuple of int or tuple of ints, optional Desired chunk size on each dimension Returns ------- X : array of shape [n_samples, n_features] The matrix. # Random (ortho normal) vectors # Index of the singular values # Build the singular profile by assembling signal and noise components | 2.235819 | 2 |
house/migrations/0001_initial.py | talhaibnmahmud/Sysrem-Development-Dackend | 0 | 6612869 | <reponame>talhaibnmahmud/Sysrem-Development-Dackend
# Generated by Django 3.1.5 on 2021-01-28 12:41
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100, null=True)),
('price', models.PositiveIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(10000000)])),
('type', models.CharField(choices=[('Apartment', 'Apartment'), ('Duplex', 'Duplex'), ('Triplex', 'Triplex')], max_length=15)),
('description', models.TextField(blank=True, max_length=300)),
],
),
]
| # Generated by Django 3.1.5 on 2021-01-28 12:41
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100, null=True)),
('price', models.PositiveIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(10000000)])),
('type', models.CharField(choices=[('Apartment', 'Apartment'), ('Duplex', 'Duplex'), ('Triplex', 'Triplex')], max_length=15)),
('description', models.TextField(blank=True, max_length=300)),
],
),
] | en | 0.822017 | # Generated by Django 3.1.5 on 2021-01-28 12:41 | 1.922415 | 2 |
models/models.py | MarkZaidi/hypoxia-det | 0 | 6612870 | import torch
import torch.nn as nn
import torchvision
import numpy as np
#from . import resnet, resnext, mobilenet, dpn, drn
from lib.nn import SynchronizedBatchNorm2d
import math
from collections import OrderedDict
'''
I have already implemented the classes SegmentationModuleBase,
SegmentationModule, and ModelBuilder. Your task is to write the
code for your model of choice in the Model class.
'''
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
preds = preds.unsqueeze(1)
valid = (label >= 1).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
valid_neg = (label < 1).long()
acc_sum_neg = torch.sum(valid_neg * (preds == label).long())
acc_all = (acc_sum.float() + acc_sum_neg.float()) / \
(preds.shape[-1]*preds.shape[-1]*preds.shape[0])
# When you +falsePos, acc == Jaccard.
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
# class 1
v1 = (label == 1).long()
pred1 = (preds == 1).long()
anb1 = torch.sum(v1 * pred1)
try:
j1 = anb1.float() / (torch.sum(v1).float() +
torch.sum(pred1).float() - anb1.float() + 1e-10)
except:
j1 = 0
j1 = j1 if j1 <= 1 else 0
jaccard = j1
return acc, jaccard, acc_all
# ACCURACY THAT TAKES INTO ACCOUNT BOTH TP AND FP.
def jaccard(self, pred, label):
AnB = torch.sum(pred.long() & label) # TAKE THE AND
return AnB/(pred.view(-1).sum().float() + label.view(-1).sum().float() - AnB)
# MSE metrics
def mse(self, pred, label):
return torch.mean((pred - label) ** 2)
# percentage metrics
def percentage(self, pred, label, threshold=0.15):
# percent above threshold
pred_pct = (pred > threshold).sum().to(
dtype=torch.float) / float(pred.numel())
label_pct = (label > threshold).sum().to(
dtype=torch.float) / float(label.numel())
return pred_pct, label_pct
class SegmentationModule(SegmentationModuleBase):
def __init__(self, model, crit):
super(SegmentationModule, self).__init__()
self.model = model
self.crit = crit
def forward(self, feed_dict, *, mode='train'):
assert mode in ['train', 'test', 'result']
# training
if mode == 'train':
'''
Note: since we want the logits to use in the loss function,
we do not softmax pred.
'''
pred = self.model(feed_dict['image']) # (4,1,64,64)
loss = self.crit(pred, feed_dict['mask'])
# acc = self.pixel_acc(torch.round(nn.functional.softmax(
# pred, dim=1)).long(), feed_dict['mask'].long())
metric = self.mse(pred, feed_dict['mask'])
pred_pct, label_pct = self.percentage(pred, feed_dict['mask'])
return loss, [metric, pred_pct, label_pct]
# inference
else:
p = self.model(feed_dict['image'].unsqueeze(0))
loss = self.crit(p, feed_dict['mask'].unsqueeze(0))
'''
Note: we softmax the pred after calculating the validation loss.
The values in pred are now in the range [0, 1].
'''
metric = self.mse(p, feed_dict['mask'])
pred_pct, label_pct = self.percentage(p, feed_dict['mask'])
pred = p
return pred, loss, [metric, pred_pct, label_pct]
def infer(self, input):
pred = self.model(input)
return pred
class ModelBuilder():
# custom weights initialization
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
def build_model(self, args, arch='default', weights='i'):
arch = arch.lower()
if arch == 'default':
model = Model(in_channels=args.in_channels, out_channels=1)
else:
raise Exception('Architecture undefined!')
if len(weights) > 0:
model.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
print("Loaded pretrained model weights.")
return model.double()
class Model(nn.Module):
'''
Implement any model you wish here.
Do some research on some standard
models used in medical imaging segmentation.
Let us know why you chose the model you chose.
Also let us know the pros and cons of the model
you chose.
'''
# code adapted from https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/unet.py
def __init__(self, in_channels=3, out_channels=1, init_features=32):
super(Model, self).__init__()
features = init_features
self.encoder1 = Model._block(in_channels, features, name="enc1")
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder2 = Model._block(features, features * 2, name="enc2")
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder3 = Model._block(features * 2, features * 4, name="enc3")
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder4 = Model._block(features * 4, features * 8, name="enc4")
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bottleneck = Model._block(
features * 8, features * 16, name="bottleneck")
self.upconv4 = nn.ConvTranspose2d(
features * 16, features * 8, kernel_size=2, stride=2
)
self.decoder4 = Model._block(
(features * 8) * 2, features * 8, name="dec4")
self.upconv3 = nn.ConvTranspose2d(
features * 8, features * 4, kernel_size=2, stride=2
)
self.decoder3 = Model._block(
(features * 4) * 2, features * 4, name="dec3")
self.upconv2 = nn.ConvTranspose2d(
features * 4, features * 2, kernel_size=2, stride=2
)
self.decoder2 = Model._block(
(features * 2) * 2, features * 2, name="dec2")
self.upconv1 = nn.ConvTranspose2d(
features * 2, features, kernel_size=2, stride=2
)
self.decoder1 = Model._block(features * 2, features, name="dec1")
self.conv = nn.Conv2d(
in_channels=features, out_channels=out_channels, kernel_size=1
)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
dec4 = self.upconv4(bottleneck)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
return self.conv(dec1)
# other funcs can be tested, tanh, relu, softplus
# return nn.functional.sigmoid(self.conv(dec1))
@staticmethod
def _block(in_channels, features, name):
return nn.Sequential(
OrderedDict(
[
(
name + "conv1",
nn.Conv2d(
in_channels=in_channels,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm1", nn.BatchNorm2d(num_features=features)),
(name + "relu1", nn.ReLU(inplace=True)),
(
name + "conv2",
nn.Conv2d(
in_channels=features,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm2", nn.BatchNorm2d(num_features=features)),
(name + "relu2", nn.ReLU(inplace=True)),
]
)
)
| import torch
import torch.nn as nn
import torchvision
import numpy as np
#from . import resnet, resnext, mobilenet, dpn, drn
from lib.nn import SynchronizedBatchNorm2d
import math
from collections import OrderedDict
'''
I have already implemented the classes SegmentationModuleBase,
SegmentationModule, and ModelBuilder. Your task is to write the
code for your model of choice in the Model class.
'''
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
preds = preds.unsqueeze(1)
valid = (label >= 1).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
valid_neg = (label < 1).long()
acc_sum_neg = torch.sum(valid_neg * (preds == label).long())
acc_all = (acc_sum.float() + acc_sum_neg.float()) / \
(preds.shape[-1]*preds.shape[-1]*preds.shape[0])
# When you +falsePos, acc == Jaccard.
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
# class 1
v1 = (label == 1).long()
pred1 = (preds == 1).long()
anb1 = torch.sum(v1 * pred1)
try:
j1 = anb1.float() / (torch.sum(v1).float() +
torch.sum(pred1).float() - anb1.float() + 1e-10)
except:
j1 = 0
j1 = j1 if j1 <= 1 else 0
jaccard = j1
return acc, jaccard, acc_all
# ACCURACY THAT TAKES INTO ACCOUNT BOTH TP AND FP.
def jaccard(self, pred, label):
AnB = torch.sum(pred.long() & label) # TAKE THE AND
return AnB/(pred.view(-1).sum().float() + label.view(-1).sum().float() - AnB)
# MSE metrics
def mse(self, pred, label):
return torch.mean((pred - label) ** 2)
# percentage metrics
def percentage(self, pred, label, threshold=0.15):
# percent above threshold
pred_pct = (pred > threshold).sum().to(
dtype=torch.float) / float(pred.numel())
label_pct = (label > threshold).sum().to(
dtype=torch.float) / float(label.numel())
return pred_pct, label_pct
class SegmentationModule(SegmentationModuleBase):
def __init__(self, model, crit):
super(SegmentationModule, self).__init__()
self.model = model
self.crit = crit
def forward(self, feed_dict, *, mode='train'):
assert mode in ['train', 'test', 'result']
# training
if mode == 'train':
'''
Note: since we want the logits to use in the loss function,
we do not softmax pred.
'''
pred = self.model(feed_dict['image']) # (4,1,64,64)
loss = self.crit(pred, feed_dict['mask'])
# acc = self.pixel_acc(torch.round(nn.functional.softmax(
# pred, dim=1)).long(), feed_dict['mask'].long())
metric = self.mse(pred, feed_dict['mask'])
pred_pct, label_pct = self.percentage(pred, feed_dict['mask'])
return loss, [metric, pred_pct, label_pct]
# inference
else:
p = self.model(feed_dict['image'].unsqueeze(0))
loss = self.crit(p, feed_dict['mask'].unsqueeze(0))
'''
Note: we softmax the pred after calculating the validation loss.
The values in pred are now in the range [0, 1].
'''
metric = self.mse(p, feed_dict['mask'])
pred_pct, label_pct = self.percentage(p, feed_dict['mask'])
pred = p
return pred, loss, [metric, pred_pct, label_pct]
def infer(self, input):
pred = self.model(input)
return pred
class ModelBuilder():
# custom weights initialization
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
def build_model(self, args, arch='default', weights='i'):
arch = arch.lower()
if arch == 'default':
model = Model(in_channels=args.in_channels, out_channels=1)
else:
raise Exception('Architecture undefined!')
if len(weights) > 0:
model.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
print("Loaded pretrained model weights.")
return model.double()
class Model(nn.Module):
'''
Implement any model you wish here.
Do some research on some standard
models used in medical imaging segmentation.
Let us know why you chose the model you chose.
Also let us know the pros and cons of the model
you chose.
'''
# code adapted from https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/unet.py
def __init__(self, in_channels=3, out_channels=1, init_features=32):
super(Model, self).__init__()
features = init_features
self.encoder1 = Model._block(in_channels, features, name="enc1")
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder2 = Model._block(features, features * 2, name="enc2")
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder3 = Model._block(features * 2, features * 4, name="enc3")
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder4 = Model._block(features * 4, features * 8, name="enc4")
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bottleneck = Model._block(
features * 8, features * 16, name="bottleneck")
self.upconv4 = nn.ConvTranspose2d(
features * 16, features * 8, kernel_size=2, stride=2
)
self.decoder4 = Model._block(
(features * 8) * 2, features * 8, name="dec4")
self.upconv3 = nn.ConvTranspose2d(
features * 8, features * 4, kernel_size=2, stride=2
)
self.decoder3 = Model._block(
(features * 4) * 2, features * 4, name="dec3")
self.upconv2 = nn.ConvTranspose2d(
features * 4, features * 2, kernel_size=2, stride=2
)
self.decoder2 = Model._block(
(features * 2) * 2, features * 2, name="dec2")
self.upconv1 = nn.ConvTranspose2d(
features * 2, features, kernel_size=2, stride=2
)
self.decoder1 = Model._block(features * 2, features, name="dec1")
self.conv = nn.Conv2d(
in_channels=features, out_channels=out_channels, kernel_size=1
)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
dec4 = self.upconv4(bottleneck)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
return self.conv(dec1)
# other funcs can be tested, tanh, relu, softplus
# return nn.functional.sigmoid(self.conv(dec1))
@staticmethod
def _block(in_channels, features, name):
return nn.Sequential(
OrderedDict(
[
(
name + "conv1",
nn.Conv2d(
in_channels=in_channels,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm1", nn.BatchNorm2d(num_features=features)),
(name + "relu1", nn.ReLU(inplace=True)),
(
name + "conv2",
nn.Conv2d(
in_channels=features,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm2", nn.BatchNorm2d(num_features=features)),
(name + "relu2", nn.ReLU(inplace=True)),
]
)
)
| en | 0.73239 | #from . import resnet, resnext, mobilenet, dpn, drn I have already implemented the classes SegmentationModuleBase, SegmentationModule, and ModelBuilder. Your task is to write the code for your model of choice in the Model class. # When you +falsePos, acc == Jaccard. # class 1 # ACCURACY THAT TAKES INTO ACCOUNT BOTH TP AND FP. # TAKE THE AND # MSE metrics # percentage metrics # percent above threshold # training Note: since we want the logits to use in the loss function, we do not softmax pred. # (4,1,64,64) # acc = self.pixel_acc(torch.round(nn.functional.softmax( # pred, dim=1)).long(), feed_dict['mask'].long()) # inference Note: we softmax the pred after calculating the validation loss. The values in pred are now in the range [0, 1]. # custom weights initialization Implement any model you wish here. Do some research on some standard models used in medical imaging segmentation. Let us know why you chose the model you chose. Also let us know the pros and cons of the model you chose. # code adapted from https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/unet.py # other funcs can be tested, tanh, relu, softplus # return nn.functional.sigmoid(self.conv(dec1)) | 2.636047 | 3 |
buildroot/support/testing/tests/package/sample_python_passlib.py | rbrenton/hassos | 349 | 6612871 | from passlib.hash import pbkdf2_sha256
hash = pbkdf2_sha256.hash("password")
assert(pbkdf2_sha256.verify("passWord", hash) is False)
assert(pbkdf2_sha256.verify("password", hash) is True)
| from passlib.hash import pbkdf2_sha256
hash = pbkdf2_sha256.hash("password")
assert(pbkdf2_sha256.verify("passWord", hash) is False)
assert(pbkdf2_sha256.verify("password", hash) is True)
| none | 1 | 2.473995 | 2 | |
readyml/labels/labels_loader.py | houseofai/readyml | 0 | 6612872 | import pkgutil
labels_path = {
'ms_coco': 'mscoco_labels.json',
'imagenet':'ImageNetLabels.txt',
'tfhub_biggan_categories':'tfhub_biggan_categories.json',
}
def _get_labels_path(name):
if name in labels_path:
return labels_path.get(name)
else:
raise ValueError(f"No label file foudn with name '{name}'")
def get_labels(name):
return pkgutil.get_data(__name__, _get_labels_path(name))
| import pkgutil
labels_path = {
'ms_coco': 'mscoco_labels.json',
'imagenet':'ImageNetLabels.txt',
'tfhub_biggan_categories':'tfhub_biggan_categories.json',
}
def _get_labels_path(name):
if name in labels_path:
return labels_path.get(name)
else:
raise ValueError(f"No label file foudn with name '{name}'")
def get_labels(name):
return pkgutil.get_data(__name__, _get_labels_path(name))
| none | 1 | 2.505999 | 3 | |
python/qiskit/exceptions.py | seunomonije/quantum | 0 | 6612873 | <gh_stars>0
class InvalidDeviceException(Exception):
pass
class DuplicateNameException(Exception):
pass
class InvalidTupleTypeException(Exception):
pass | class InvalidDeviceException(Exception):
pass
class DuplicateNameException(Exception):
pass
class InvalidTupleTypeException(Exception):
pass | none | 1 | 1.619655 | 2 | |
stitches.py | EMCain/knitpatterns | 0 | 6612874 | <reponame>EMCain/knitpatterns<filename>stitches.py
from classes import Stitch, StitchPattern
# some standard stitches
knit = Stitch('k', u'|', 1, 1)
purl = Stitch('p', u'-', 1, 1, knit)
knit.reverse = purl
# functions to create common stitch patterns
def knpn(k: int, p: int):
return StitchPattern([*[knit]*k, *[purl]*p], name=f'k{k}p{p}') | from classes import Stitch, StitchPattern
# some standard stitches
knit = Stitch('k', u'|', 1, 1)
purl = Stitch('p', u'-', 1, 1, knit)
knit.reverse = purl
# functions to create common stitch patterns
def knpn(k: int, p: int):
return StitchPattern([*[knit]*k, *[purl]*p], name=f'k{k}p{p}') | en | 0.77909 | # some standard stitches # functions to create common stitch patterns | 3.089728 | 3 |
volatility.py | ali0003433/precious-metals-mining-vs-bullion | 0 | 6612875 | <gh_stars>0
import pandas as pd
import numpy as np
def compute_volatility(df, target_var, target_symbol, volability_period = 'M'):
"""
This function compute the average volatility for each month
:param df: input clean data frame
:param target_var: variable including (close, open, high, low)
:target_symbol including 'SLV', 'SIL', 'GLD', 'GDX', 'DJI'
:return VOL_ranking_mean: a data frame including average of
volatility ranking per month and standard deviation of volatility ranking per month
:return monthly_vol: the value of volability per month for all years
"""
df = df.loc[df['symbol'] == target_symbol]
if df.index.name != 'date':
df.set_index('date', inplace = True)
#To compute daily % change and drop the first value
daily_change = df[target_var].pct_change()
daily_change.dropna(inplace=True)
#Use standard deviation as a measure of volatility
# and multiplying by sqrt of number of months (12) or number of season
if volability_period == 'M':
num_s = 12
elif volability_period == 'Q':
num_s = 4
else:
raise ValueError(f'The volability_period of {volability_period} is not valid')
monthly_vol = daily_change.resample(volability_period).std()* np.sqrt(num_s)
#Rank the data on ascending order
ranked_months = pd.DataFrame(monthly_vol.groupby(monthly_vol.index.year).rank()).reset_index()
ranked_months.columns = ['period', 'ranking']
#To build a data frame
monthly_vol_df = pd.DataFrame(monthly_vol).reset_index()
monthly_vol_df.columns = ['period', 'volatility']
if volability_period == 'M':
ranked_months['period'] = ranked_months['period'].map(lambda x: x.strftime('%b'))
monthly_vol_df['period'] = monthly_vol_df['period'].map(lambda x: x.strftime('%b'))
elif volability_period == 'Q':
ranked_months['period'] = ranked_months['period'].dt.quarter.map(lambda x: 'Quarter ' + str(x))
monthly_vol_df['period'] = monthly_vol_df['period'].dt.quarter.map(lambda x: 'Quarter ' + str(x))
else:
raise ValueError(f'The volability_period of {volability_period} is not valid')
return (monthly_vol_df, ranked_months) | import pandas as pd
import numpy as np
def compute_volatility(df, target_var, target_symbol, volability_period = 'M'):
"""
This function compute the average volatility for each month
:param df: input clean data frame
:param target_var: variable including (close, open, high, low)
:target_symbol including 'SLV', 'SIL', 'GLD', 'GDX', 'DJI'
:return VOL_ranking_mean: a data frame including average of
volatility ranking per month and standard deviation of volatility ranking per month
:return monthly_vol: the value of volability per month for all years
"""
df = df.loc[df['symbol'] == target_symbol]
if df.index.name != 'date':
df.set_index('date', inplace = True)
#To compute daily % change and drop the first value
daily_change = df[target_var].pct_change()
daily_change.dropna(inplace=True)
#Use standard deviation as a measure of volatility
# and multiplying by sqrt of number of months (12) or number of season
if volability_period == 'M':
num_s = 12
elif volability_period == 'Q':
num_s = 4
else:
raise ValueError(f'The volability_period of {volability_period} is not valid')
monthly_vol = daily_change.resample(volability_period).std()* np.sqrt(num_s)
#Rank the data on ascending order
ranked_months = pd.DataFrame(monthly_vol.groupby(monthly_vol.index.year).rank()).reset_index()
ranked_months.columns = ['period', 'ranking']
#To build a data frame
monthly_vol_df = pd.DataFrame(monthly_vol).reset_index()
monthly_vol_df.columns = ['period', 'volatility']
if volability_period == 'M':
ranked_months['period'] = ranked_months['period'].map(lambda x: x.strftime('%b'))
monthly_vol_df['period'] = monthly_vol_df['period'].map(lambda x: x.strftime('%b'))
elif volability_period == 'Q':
ranked_months['period'] = ranked_months['period'].dt.quarter.map(lambda x: 'Quarter ' + str(x))
monthly_vol_df['period'] = monthly_vol_df['period'].dt.quarter.map(lambda x: 'Quarter ' + str(x))
else:
raise ValueError(f'The volability_period of {volability_period} is not valid')
return (monthly_vol_df, ranked_months) | en | 0.760615 | This function compute the average volatility for each month :param df: input clean data frame :param target_var: variable including (close, open, high, low) :target_symbol including 'SLV', 'SIL', 'GLD', 'GDX', 'DJI' :return VOL_ranking_mean: a data frame including average of volatility ranking per month and standard deviation of volatility ranking per month :return monthly_vol: the value of volability per month for all years #To compute daily % change and drop the first value #Use standard deviation as a measure of volatility # and multiplying by sqrt of number of months (12) or number of season #Rank the data on ascending order #To build a data frame | 3.76462 | 4 |
gamefixes/200940.py | manueliglesiasgarcia/protonfixes | 54 | 6612876 | <filename>gamefixes/200940.py
""" Game fix for Sonic CD
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Installs d3dcompiler_43, d3dx9_43, mdx. Locks fps to 60.
"""
util.protontricks('d3dcompiler_43')
util.protontricks('d3dx9_43')
util.protontricks('mdx')
util.set_environment('DXVK_FRAME_RATE', '60')
| <filename>gamefixes/200940.py
""" Game fix for Sonic CD
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Installs d3dcompiler_43, d3dx9_43, mdx. Locks fps to 60.
"""
util.protontricks('d3dcompiler_43')
util.protontricks('d3dx9_43')
util.protontricks('mdx')
util.set_environment('DXVK_FRAME_RATE', '60')
| en | 0.506181 | Game fix for Sonic CD #pylint: disable=C0103 Installs d3dcompiler_43, d3dx9_43, mdx. Locks fps to 60. | 1.508029 | 2 |
app.py | mentix02/hoshdb | 0 | 6612877 | <filename>app.py<gh_stars>0
from __future__ import annotations
import hashlib
from typing import List
from datetime import datetime
from mongoengine import signals
from flask_mongoengine import MongoEngine
from mongoengine.errors import DoesNotExist
from flask import Flask, request, Response, render_template
app = Flask(__name__)
app.config["MONGODB_SETTINGS"] = {
"port": 27017,
"db": "hoshdb",
"host": "localhost",
}
db = MongoEngine()
db.init_app(app)
class Entry(db.DynamicDocument):
timestamp = db.DateTimeField(default=datetime.now)
word = db.StringField(max_length=200, required=True)
HASH_TYPES: List[str] = [
"md4",
"md5",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
"sha3_224",
"sha3_256",
]
meta = {
"indexes": [
{"fields": ["word"], "unique": True},
],
}
@classmethod
def pre_save(cls, _, document: Entry, **__):
word = document.word.encode("utf-8")
for hash_type in cls.HASH_TYPES:
setattr(document, hash_type, hashlib.new(hash_type, word).hexdigest())
signals.pre_save.connect(Entry.pre_save, sender=Entry)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/search")
def search():
q = request.args.get('word')
try:
entry = Entry.objects(word=q).get()
except DoesNotExist:
entry = Entry(word=q).save()
finally:
return Response(entry.to_json(), mimetype='application/json')
if __name__ == "__main__":
app.run(debug=True)
| <filename>app.py<gh_stars>0
from __future__ import annotations
import hashlib
from typing import List
from datetime import datetime
from mongoengine import signals
from flask_mongoengine import MongoEngine
from mongoengine.errors import DoesNotExist
from flask import Flask, request, Response, render_template
app = Flask(__name__)
app.config["MONGODB_SETTINGS"] = {
"port": 27017,
"db": "hoshdb",
"host": "localhost",
}
db = MongoEngine()
db.init_app(app)
class Entry(db.DynamicDocument):
timestamp = db.DateTimeField(default=datetime.now)
word = db.StringField(max_length=200, required=True)
HASH_TYPES: List[str] = [
"md4",
"md5",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
"sha3_224",
"sha3_256",
]
meta = {
"indexes": [
{"fields": ["word"], "unique": True},
],
}
@classmethod
def pre_save(cls, _, document: Entry, **__):
word = document.word.encode("utf-8")
for hash_type in cls.HASH_TYPES:
setattr(document, hash_type, hashlib.new(hash_type, word).hexdigest())
signals.pre_save.connect(Entry.pre_save, sender=Entry)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/search")
def search():
q = request.args.get('word')
try:
entry = Entry.objects(word=q).get()
except DoesNotExist:
entry = Entry(word=q).save()
finally:
return Response(entry.to_json(), mimetype='application/json')
if __name__ == "__main__":
app.run(debug=True)
| none | 1 | 2.420058 | 2 | |
Hackerearth Set/RoyAndCodingContest.py | Siddharth2016/PYTHON3_prog | 2 | 6612878 | # ROY AND CODING CONTEST #INCOMPLETE
for _ in range(int(input())):
n,m = list(map(int, input().split()))
if n==1:
print(0)
elif m==1:
print(n)
else:
countmin = 0
pd = 0
mac = 1
while(pd<m):
mac += pd
#print(mac)
if (mac-pd)<=(m-pd):
pd += (mac-pd)
elif (m-pd)<(mac-pd):
pd += (m-pd)
else:
pd += 1
countmin += 1
#print(mac,pd,countmin,'###')
if mac>=n:
break
if mac>=n:
print(countmin)
#print('#%')
else:
q = (n-mac)//m
rem = (n-mac)%m
if rem==0:
print(countmin+q)
#print('#')
else:
print(countmin+q+1)
#print('##')
| # ROY AND CODING CONTEST #INCOMPLETE
for _ in range(int(input())):
n,m = list(map(int, input().split()))
if n==1:
print(0)
elif m==1:
print(n)
else:
countmin = 0
pd = 0
mac = 1
while(pd<m):
mac += pd
#print(mac)
if (mac-pd)<=(m-pd):
pd += (mac-pd)
elif (m-pd)<(mac-pd):
pd += (m-pd)
else:
pd += 1
countmin += 1
#print(mac,pd,countmin,'###')
if mac>=n:
break
if mac>=n:
print(countmin)
#print('#%')
else:
q = (n-mac)//m
rem = (n-mac)%m
if rem==0:
print(countmin+q)
#print('#')
else:
print(countmin+q+1)
#print('##')
| en | 0.145627 | # ROY AND CODING CONTEST #INCOMPLETE #print(mac) #print(mac,pd,countmin,'###') #print('#%') #print('#') #print('##') | 3.153752 | 3 |
algorithmU.py | AlbertMukhammadiev/SteinerProblem | 1 | 6612879 | <filename>algorithmU.py<gh_stars>1-10
#https://codereview.stackexchange.com/questions/1526/finding-all-k-subset-partitions
def algorithm_u(ns, m):
def visit(n, a):
ps = [[] for i in range(m)]
for j in range(n):
ps[a[j + 1]].append(ns[j])
return ps
def f(mu, nu, sigma, n, a):
if mu == 2:
yield visit(n, a)
else:
for v in f(mu - 1, nu - 1, (mu + sigma) % 2, n, a):
yield v
if nu == mu + 1:
a[mu] = mu - 1
yield visit(n, a)
while a[nu] > 0:
a[nu] = a[nu] - 1
yield visit(n, a)
elif nu > mu + 1:
if (mu + sigma) % 2 == 1:
a[nu - 1] = mu - 1
else:
a[mu] = mu - 1
if (a[nu] + sigma) % 2 == 1:
for v in b(mu, nu - 1, 0, n, a):
yield v
else:
for v in f(mu, nu - 1, 0, n, a):
yield v
while a[nu] > 0:
a[nu] = a[nu] - 1
if (a[nu] + sigma) % 2 == 1:
for v in b(mu, nu - 1, 0, n, a):
yield v
else:
for v in f(mu, nu - 1, 0, n, a):
yield v
def b(mu, nu, sigma, n, a):
if nu == mu + 1:
while a[nu] < mu - 1:
yield visit(n, a)
a[nu] = a[nu] + 1
yield visit(n, a)
a[mu] = 0
elif nu > mu + 1:
if (a[nu] + sigma) % 2 == 1:
for v in f(mu, nu - 1, 0, n, a):
yield v
else:
for v in b(mu, nu - 1, 0, n, a):
yield v
while a[nu] < mu - 1:
a[nu] = a[nu] + 1
if (a[nu] + sigma) % 2 == 1:
for v in f(mu, nu - 1, 0, n, a):
yield v
else:
for v in b(mu, nu - 1, 0, n, a):
yield v
if (mu + sigma) % 2 == 1:
a[nu - 1] = 0
else:
a[mu] = 0
if mu == 2:
yield visit(n, a)
else:
for v in b(mu - 1, nu - 1, (mu + sigma) % 2, n, a):
yield v
n = len(ns)
a = [0] * (n + 1)
for j in range(1, m + 1):
a[n - m + j] = j - 1
return f(m, n, 0, n, a)
def pretty_print(parts):
print ('; '.join('|'.join(''.join(str(e) for e in loe) for loe in part) for part in parts))
def fun(xs):
mydict, sets = {}, [xs]
while sets:
set_ = sets.pop()
partitions = list(algorithm_u(set_, 2))
mydict[tuple(set_)] = partitions
for partition in partitions:
for subset in partition:
if len(subset) > 2:
sets.append(subset)
for key, value in mydict.items():
print(key, value)
fun([1,2,3,4])
| <filename>algorithmU.py<gh_stars>1-10
#https://codereview.stackexchange.com/questions/1526/finding-all-k-subset-partitions
def algorithm_u(ns, m):
def visit(n, a):
ps = [[] for i in range(m)]
for j in range(n):
ps[a[j + 1]].append(ns[j])
return ps
def f(mu, nu, sigma, n, a):
if mu == 2:
yield visit(n, a)
else:
for v in f(mu - 1, nu - 1, (mu + sigma) % 2, n, a):
yield v
if nu == mu + 1:
a[mu] = mu - 1
yield visit(n, a)
while a[nu] > 0:
a[nu] = a[nu] - 1
yield visit(n, a)
elif nu > mu + 1:
if (mu + sigma) % 2 == 1:
a[nu - 1] = mu - 1
else:
a[mu] = mu - 1
if (a[nu] + sigma) % 2 == 1:
for v in b(mu, nu - 1, 0, n, a):
yield v
else:
for v in f(mu, nu - 1, 0, n, a):
yield v
while a[nu] > 0:
a[nu] = a[nu] - 1
if (a[nu] + sigma) % 2 == 1:
for v in b(mu, nu - 1, 0, n, a):
yield v
else:
for v in f(mu, nu - 1, 0, n, a):
yield v
def b(mu, nu, sigma, n, a):
if nu == mu + 1:
while a[nu] < mu - 1:
yield visit(n, a)
a[nu] = a[nu] + 1
yield visit(n, a)
a[mu] = 0
elif nu > mu + 1:
if (a[nu] + sigma) % 2 == 1:
for v in f(mu, nu - 1, 0, n, a):
yield v
else:
for v in b(mu, nu - 1, 0, n, a):
yield v
while a[nu] < mu - 1:
a[nu] = a[nu] + 1
if (a[nu] + sigma) % 2 == 1:
for v in f(mu, nu - 1, 0, n, a):
yield v
else:
for v in b(mu, nu - 1, 0, n, a):
yield v
if (mu + sigma) % 2 == 1:
a[nu - 1] = 0
else:
a[mu] = 0
if mu == 2:
yield visit(n, a)
else:
for v in b(mu - 1, nu - 1, (mu + sigma) % 2, n, a):
yield v
n = len(ns)
a = [0] * (n + 1)
for j in range(1, m + 1):
a[n - m + j] = j - 1
return f(m, n, 0, n, a)
def pretty_print(parts):
print ('; '.join('|'.join(''.join(str(e) for e in loe) for loe in part) for part in parts))
def fun(xs):
mydict, sets = {}, [xs]
while sets:
set_ = sets.pop()
partitions = list(algorithm_u(set_, 2))
mydict[tuple(set_)] = partitions
for partition in partitions:
for subset in partition:
if len(subset) > 2:
sets.append(subset)
for key, value in mydict.items():
print(key, value)
fun([1,2,3,4])
| en | 0.715316 | #https://codereview.stackexchange.com/questions/1526/finding-all-k-subset-partitions | 3.019679 | 3 |
noscrapy/job.py | hwms/noscrapy | 3 | 6612880 | from urllib.parse import urljoin
import requests
from .sitemap import Sitemap
class Job(object):
def __init__(self, url, parent_id=None, scraper=None, parent_job=None, base_data=None):
if parent_job:
self.url = self.combine_urls(parent_job.url, url)
else:
self.url = url
self.parent_id = parent_id
self.scraper = scraper
self.data_items = []
self.base_data = base_data or {}
def combine_urls(self, parent_url, child_url):
return urljoin(parent_url, child_url)
def execute(self):
sitemap = Sitemap(self.scraper.sitemap, parent_id=self.parent_id)
response = requests.get(self.url)
sitemap.parent_item = response.content
sitemap_data = list(sitemap.get_data())
# merge data with data from initialization
for result in sitemap_data:
result.update(result, **self.base_data)
self.data_items.append(result)
def get_results(self):
return self.data_items
| from urllib.parse import urljoin
import requests
from .sitemap import Sitemap
class Job(object):
def __init__(self, url, parent_id=None, scraper=None, parent_job=None, base_data=None):
if parent_job:
self.url = self.combine_urls(parent_job.url, url)
else:
self.url = url
self.parent_id = parent_id
self.scraper = scraper
self.data_items = []
self.base_data = base_data or {}
def combine_urls(self, parent_url, child_url):
return urljoin(parent_url, child_url)
def execute(self):
sitemap = Sitemap(self.scraper.sitemap, parent_id=self.parent_id)
response = requests.get(self.url)
sitemap.parent_item = response.content
sitemap_data = list(sitemap.get_data())
# merge data with data from initialization
for result in sitemap_data:
result.update(result, **self.base_data)
self.data_items.append(result)
def get_results(self):
return self.data_items
| en | 0.853889 | # merge data with data from initialization | 2.956551 | 3 |
tests/test_replicants.py | refractionPOINT/python-limacharlie | 9 | 6612881 | <reponame>refractionPOINT/python-limacharlie<filename>tests/test_replicants.py
import limacharlie
def test_credentials( oid, key ):
lc = limacharlie.Manager( oid, key )
assert( lc.testAuth( [
'org.get',
'sensor.get',
'sensor.list',
'replicant.get',
'replicant.task',
] ) )
def test_replicants_available( oid, key ):
lc = limacharlie.Manager( oid, key )
replicants = list( lc.getAvailableReplicants() )
assert( 0 != len( replicants ) ) | import limacharlie
def test_credentials( oid, key ):
lc = limacharlie.Manager( oid, key )
assert( lc.testAuth( [
'org.get',
'sensor.get',
'sensor.list',
'replicant.get',
'replicant.task',
] ) )
def test_replicants_available( oid, key ):
lc = limacharlie.Manager( oid, key )
replicants = list( lc.getAvailableReplicants() )
assert( 0 != len( replicants ) ) | none | 1 | 2.139218 | 2 | |
using-amazon-root-ca/workspace/aws_backend.py | boraozgen/personalize-optiga-trust | 6 | 6612882 | #!/usr/bin/env python
import json
import os
import subprocess
class AwsiotViaShell:
def __init__(self):
self._caller_id = self._get_caller_id()
self._region = self._get_region()
self._list_policies = self._get_list_policies()
@property
def caller_id(self):
return self._caller_id
@property
def region(self):
return self._region
@property
def list_policies(self):
return self._list_policies
@staticmethod
def _get_caller_id():
subprocess.call(
'aws sts get-caller-identity > .caller_id', shell=True
)
with open(".caller_id", "r") as caller_id_file:
caller_id = json.load(caller_id_file)
os.remove(".caller_id")
return caller_id
@staticmethod
def _get_region():
subprocess.call(
'aws configure get region > .my_region',
shell=True
)
with open(".my_region", "r") as region_file:
region = region_file.read()
region = region[:-1]
os.remove(".my_region")
return region
@staticmethod
def create_thing(thing_name):
subprocess.call(
'aws iot create-thing --thing-name "{0}" >> last.log'.format(thing_name),
shell=True
)
@staticmethod
def _get_list_policies():
subprocess.call(
'aws iot list-policies > .list_policies',
shell=True
)
with open(".list_policies", "r") as list_policies_file:
list_policies = json.load(list_policies_file)
os.remove(".list_policies")
return list_policies
def _is_policy_exist(self, policy_name):
if next((item for item in self.list_policies['policies'] if item["policyName"] == policy_name), None):
return True
else:
return False
def create_policy(self, policy_name, policy_document):
if not self._is_policy_exist(policy_name):
subprocess.call(
'aws iot '
'create-policy --policy-name "{0}" --policy-document file://{1} >> last.log'.format(policy_name, policy_document),
shell=True
)
else:
print("Warning: Using existing policy")
@staticmethod
def attach_thing_principal(thing_name, certificate):
subprocess.call(
'aws iot '
'attach-thing-principal '
'--thing-name {0} --principal {1} >> last.log'.format(str(thing_name), str(certificate['certificateArn'])),
shell=True
)
@staticmethod
def attach_policy_principal(policy_name, certificate):
subprocess.call(
'aws iot '
'attach-principal-policy '
'--policy-name {0} --principal {1} >> last.log'.format(str(policy_name), str(certificate['certificateArn'])),
shell=True
)
def create_certificate_from_csr(self, csr_fingerprint_sha1):
subprocess.call(
'aws iot create-certificate-from-csr '
'--region {0} '
'--certificate-signing-request file://{1}.csr '
'--certificate-pem-outfile {2}.pem '
'--set-as-active > .reg_cert'.format(self.region, csr_fingerprint_sha1, csr_fingerprint_sha1),
shell=True
)
with open(".reg_cert", "r") as reg_cert_file:
reg_certificate = json.load(reg_cert_file)
os.remove(".reg_cert")
return reg_certificate
| #!/usr/bin/env python
import json
import os
import subprocess
class AwsiotViaShell:
def __init__(self):
self._caller_id = self._get_caller_id()
self._region = self._get_region()
self._list_policies = self._get_list_policies()
@property
def caller_id(self):
return self._caller_id
@property
def region(self):
return self._region
@property
def list_policies(self):
return self._list_policies
@staticmethod
def _get_caller_id():
subprocess.call(
'aws sts get-caller-identity > .caller_id', shell=True
)
with open(".caller_id", "r") as caller_id_file:
caller_id = json.load(caller_id_file)
os.remove(".caller_id")
return caller_id
@staticmethod
def _get_region():
subprocess.call(
'aws configure get region > .my_region',
shell=True
)
with open(".my_region", "r") as region_file:
region = region_file.read()
region = region[:-1]
os.remove(".my_region")
return region
@staticmethod
def create_thing(thing_name):
subprocess.call(
'aws iot create-thing --thing-name "{0}" >> last.log'.format(thing_name),
shell=True
)
@staticmethod
def _get_list_policies():
subprocess.call(
'aws iot list-policies > .list_policies',
shell=True
)
with open(".list_policies", "r") as list_policies_file:
list_policies = json.load(list_policies_file)
os.remove(".list_policies")
return list_policies
def _is_policy_exist(self, policy_name):
if next((item for item in self.list_policies['policies'] if item["policyName"] == policy_name), None):
return True
else:
return False
def create_policy(self, policy_name, policy_document):
if not self._is_policy_exist(policy_name):
subprocess.call(
'aws iot '
'create-policy --policy-name "{0}" --policy-document file://{1} >> last.log'.format(policy_name, policy_document),
shell=True
)
else:
print("Warning: Using existing policy")
@staticmethod
def attach_thing_principal(thing_name, certificate):
subprocess.call(
'aws iot '
'attach-thing-principal '
'--thing-name {0} --principal {1} >> last.log'.format(str(thing_name), str(certificate['certificateArn'])),
shell=True
)
@staticmethod
def attach_policy_principal(policy_name, certificate):
subprocess.call(
'aws iot '
'attach-principal-policy '
'--policy-name {0} --principal {1} >> last.log'.format(str(policy_name), str(certificate['certificateArn'])),
shell=True
)
def create_certificate_from_csr(self, csr_fingerprint_sha1):
subprocess.call(
'aws iot create-certificate-from-csr '
'--region {0} '
'--certificate-signing-request file://{1}.csr '
'--certificate-pem-outfile {2}.pem '
'--set-as-active > .reg_cert'.format(self.region, csr_fingerprint_sha1, csr_fingerprint_sha1),
shell=True
)
with open(".reg_cert", "r") as reg_cert_file:
reg_certificate = json.load(reg_cert_file)
os.remove(".reg_cert")
return reg_certificate
| ru | 0.26433 | #!/usr/bin/env python | 2.364987 | 2 |
burger_war_dev/scripts/enemy_pos_from_lider.py | BolaDeArroz/burger_war_dev | 0 | 6612883 | <reponame>BolaDeArroz/burger_war_dev
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import math
import numpy as np
import roslib
import rospy
from std_msgs.msg import Bool
from nav_msgs.msg import OccupancyGrid
from sensor_msgs.msg import LaserScan,PointCloud2,PointCloud
from geometry_msgs.msg import Point
from visualization_msgs.msg import Marker
from burger_war_dev.msg import MyPose
import tf
from PIL import Image
import os
import cv2
from laser_geometry import LaserProjection
from obstacle_detector.msg import Obstacles
class enemy_pos_from_lider:
def __init__(self):
# /Obstaclesトピックサブスクライブ用
self.obstacles=Obstacles()
self.obstacles_sub = rospy.Subscriber('/obstacles', Obstacles, self.obstacle_callback)
# /敵位置トピックパブ用
self.pub_enemy_pos=rospy.Publisher('enemy_pos_from_lider',Point,queue_size=1)
# /最終敵位置トピックパブ用
self.pub_last_enemy_pos=rospy.Publisher('enemy_pos_from_lider_last',Point,queue_size=1)
self.last_enemy_pos=Point(0,1.3,0)
# /敵位置マーカ
self.marker=Marker()
self.marker.header.frame_id="map"
self.marker.ns = "basic_shapes"
self.marker.id = 0
self.marker.scale.x=self.marker.scale.y=self.marker.scale.z=0.20
self.marker.color.a=1.0
self.marker.color.r=1.0
self.marker.type=Marker.CUBE
self.marker.action = Marker.ADD
self.enemy_marker_pub = rospy.Publisher('enemy_pos_from_lider_marker',Marker,queue_size=1)
# /最終敵位置マーカ
self.last_marker=Marker()
self.last_marker.header.frame_id="map"
self.last_marker.ns = "basic_shapes"
self.last_marker.id = 0
self.last_marker.scale.x=self.last_marker.scale.y=self.last_marker.scale.z=0.20
self.last_marker.color.a=1.0
self.last_marker.color.b=1.0
self.last_marker.type=Marker.CUBE
self.last_marker.action = Marker.ADD
self.last_marker.pose.position.x= 1.3
self.last_marker.pose.position.y= 0
self.enemy_last_marker_pub = rospy.Publisher('enemy_pos_from_lider_last_marker',Marker,queue_size=1)
self.enemy_potential_array=np.zeros((240,240))#10mm毎
def obstacle_callback(self, data):
self.obstacles=data
def enemy_pos_move_avg(self,x,y,potential):
margin=5 #margin+-50mm 発見地の周辺何mmまで確率付与するか
x_idx_max=self.enemy_potential_array.shape[0]-1
y_idx_max=self.enemy_potential_array.shape[1]-1
rot_x=x*math.cos(math.radians(45))-y*math.sin(math.radians(45))
rot_y=x*math.sin(math.radians(45))+y*math.cos(math.radians(45))
rot_x=(rot_x+1.2)*100 #m->10mm
rot_y=(rot_y+1.2)*100 #m->10mm
if(rot_x>=x_idx_max):rot_x=self.enemy_potential_array[0]-1
elif(rot_x<=0):rot_x=0
if(rot_y>=y_idx_max):rot_y=self.enemy_potential_array[1]-1
elif(rot_y<=0):rot_y=0
x_start=int(0 if rot_x-margin<= 0 else rot_x-margin)
x_end =int(x_idx_max-1 if rot_x+margin>=x_idx_max-1 else rot_x+margin)
y_start=int(0 if rot_y-margin<= 0 else rot_y-margin)
y_end =int(y_idx_max-1 if rot_y+margin>=y_idx_max-1 else rot_y+margin)
self.enemy_potential_array[x_start:x_end,y_start:y_end]=self.enemy_potential_array[x_start:x_end,y_start:y_end]+potential #+確率
max_idx=np.unravel_index(np.argmax(self.enemy_potential_array),self.enemy_potential_array.shape)
#print(self.enemy_potential_array[max_idx])
if (self.enemy_potential_array[max_idx]>=100):
origin_x=float(max_idx[0])/100-1.2
origin_y=float(max_idx[1])/100-1.2
#print(origin_x,origin_y)
ori_x=origin_x*math.cos(math.radians(-45))-origin_y*math.sin(math.radians(-45))
ori_y=origin_x*math.sin(math.radians(-45))+origin_y*math.cos(math.radians(-45))
#print(ori_x,ori_y)
return True,ori_x,ori_y
return False,0,0
def run(self):
r=rospy.Rate(5)
while not rospy.is_shutdown():
# self.object_marker_pub.publish(self.object_marker)
obstacles=self.obstacles
for obs in obstacles.circles:
enemy_pos=Point()
#横軸x,縦軸yの座標に戻す
enemy_pos.x=-obs.center.y
enemy_pos.y= obs.center.x
#敵とオブジェクトを見分けるマージン[m]。値が大きいほど、オブジェクトだと判定するエリアが大きくなる。
radius_mergin=0.0#半径
center_mergin=0.15#センター
cornar_mergin=0.2#コーナー
wall_mergin=0.05#壁
potential=80#敵確率初期値
#フィルタリング
#障害物の半径が10センチ以上か
if(obs.radius>=0.10-radius_mergin):
continue
elif(obs.radius>=0.10):
potential=50
#センターオブジェクトか
if(abs(enemy_pos.x) <=0.175+center_mergin and abs(enemy_pos.y) <=0.175+center_mergin):
continue
elif(abs(enemy_pos.x) <=0.175 and abs(enemy_pos.y) <=0.175 ):
potential=30
#コーナーオブジェクトか
if((abs(enemy_pos.x) >=0.430-cornar_mergin and abs(enemy_pos.x) <=0.640+cornar_mergin) and \
(abs(enemy_pos.y) >=0.455-cornar_mergin and abs(enemy_pos.y) <=0.605+cornar_mergin)):
continue
elif((abs(enemy_pos.x) >=0.430 and abs(enemy_pos.x) <=0.640) and \
(abs(enemy_pos.y) >=0.455 and abs(enemy_pos.y) <=0.605)):
potential=30
#壁か(2400*ルート2/2=1.697)
if((abs(enemy_pos.y)+abs(enemy_pos.x)) >=1.697-wall_mergin):
# print("is_wall",enemy_pos)
continue
elif((abs(enemy_pos.y)+abs(enemy_pos.x)) >=1.697):
potential=50
is_enemy_ext,x,y=self.enemy_pos_move_avg(enemy_pos.x,enemy_pos.y,potential)
if is_enemy_ext:
self.pub_enemy_pos.publish(Point(x,y,0))
self.last_enemy_pos=enemy_pos
#敵位置マーカー
self.marker.pose.position=obs.center
self.marker.header.stamp = rospy.Time.now()
self.marker.id = 1
self.marker.color.r=1.0
self.marker.color.b=0.0
self.marker.lifetime=rospy.Duration(0.1)
self.enemy_marker_pub.publish(self.marker)
self.last_marker=self.marker
self.enemy_potential_array=self.enemy_potential_array*0.7#減衰
self.enemy_potential_array=self.enemy_potential_array.clip(0,100)
self.pub_last_enemy_pos.publish(self.last_enemy_pos)
#最終敵位置マーカー
self.last_marker.id = 2
self.last_marker.color.r=0.0
self.last_marker.color.b=1.0
self.enemy_last_marker_pub.publish(self.last_marker)
r.sleep()
def main(args):
rospy.init_node('enemy_pos_from_lider', anonymous=True)
ra = enemy_pos_from_lider()
# print('[enemy_pos_from_lider]initialized')
ra.run()
if __name__=='__main__':
main(sys.argv)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import math
import numpy as np
import roslib
import rospy
from std_msgs.msg import Bool
from nav_msgs.msg import OccupancyGrid
from sensor_msgs.msg import LaserScan,PointCloud2,PointCloud
from geometry_msgs.msg import Point
from visualization_msgs.msg import Marker
from burger_war_dev.msg import MyPose
import tf
from PIL import Image
import os
import cv2
from laser_geometry import LaserProjection
from obstacle_detector.msg import Obstacles
class enemy_pos_from_lider:
def __init__(self):
# /Obstaclesトピックサブスクライブ用
self.obstacles=Obstacles()
self.obstacles_sub = rospy.Subscriber('/obstacles', Obstacles, self.obstacle_callback)
# /敵位置トピックパブ用
self.pub_enemy_pos=rospy.Publisher('enemy_pos_from_lider',Point,queue_size=1)
# /最終敵位置トピックパブ用
self.pub_last_enemy_pos=rospy.Publisher('enemy_pos_from_lider_last',Point,queue_size=1)
self.last_enemy_pos=Point(0,1.3,0)
# /敵位置マーカ
self.marker=Marker()
self.marker.header.frame_id="map"
self.marker.ns = "basic_shapes"
self.marker.id = 0
self.marker.scale.x=self.marker.scale.y=self.marker.scale.z=0.20
self.marker.color.a=1.0
self.marker.color.r=1.0
self.marker.type=Marker.CUBE
self.marker.action = Marker.ADD
self.enemy_marker_pub = rospy.Publisher('enemy_pos_from_lider_marker',Marker,queue_size=1)
# /最終敵位置マーカ
self.last_marker=Marker()
self.last_marker.header.frame_id="map"
self.last_marker.ns = "basic_shapes"
self.last_marker.id = 0
self.last_marker.scale.x=self.last_marker.scale.y=self.last_marker.scale.z=0.20
self.last_marker.color.a=1.0
self.last_marker.color.b=1.0
self.last_marker.type=Marker.CUBE
self.last_marker.action = Marker.ADD
self.last_marker.pose.position.x= 1.3
self.last_marker.pose.position.y= 0
self.enemy_last_marker_pub = rospy.Publisher('enemy_pos_from_lider_last_marker',Marker,queue_size=1)
self.enemy_potential_array=np.zeros((240,240))#10mm毎
def obstacle_callback(self, data):
self.obstacles=data
def enemy_pos_move_avg(self,x,y,potential):
margin=5 #margin+-50mm 発見地の周辺何mmまで確率付与するか
x_idx_max=self.enemy_potential_array.shape[0]-1
y_idx_max=self.enemy_potential_array.shape[1]-1
rot_x=x*math.cos(math.radians(45))-y*math.sin(math.radians(45))
rot_y=x*math.sin(math.radians(45))+y*math.cos(math.radians(45))
rot_x=(rot_x+1.2)*100 #m->10mm
rot_y=(rot_y+1.2)*100 #m->10mm
if(rot_x>=x_idx_max):rot_x=self.enemy_potential_array[0]-1
elif(rot_x<=0):rot_x=0
if(rot_y>=y_idx_max):rot_y=self.enemy_potential_array[1]-1
elif(rot_y<=0):rot_y=0
x_start=int(0 if rot_x-margin<= 0 else rot_x-margin)
x_end =int(x_idx_max-1 if rot_x+margin>=x_idx_max-1 else rot_x+margin)
y_start=int(0 if rot_y-margin<= 0 else rot_y-margin)
y_end =int(y_idx_max-1 if rot_y+margin>=y_idx_max-1 else rot_y+margin)
self.enemy_potential_array[x_start:x_end,y_start:y_end]=self.enemy_potential_array[x_start:x_end,y_start:y_end]+potential #+確率
max_idx=np.unravel_index(np.argmax(self.enemy_potential_array),self.enemy_potential_array.shape)
#print(self.enemy_potential_array[max_idx])
if (self.enemy_potential_array[max_idx]>=100):
origin_x=float(max_idx[0])/100-1.2
origin_y=float(max_idx[1])/100-1.2
#print(origin_x,origin_y)
ori_x=origin_x*math.cos(math.radians(-45))-origin_y*math.sin(math.radians(-45))
ori_y=origin_x*math.sin(math.radians(-45))+origin_y*math.cos(math.radians(-45))
#print(ori_x,ori_y)
return True,ori_x,ori_y
return False,0,0
def run(self):
r=rospy.Rate(5)
while not rospy.is_shutdown():
# self.object_marker_pub.publish(self.object_marker)
obstacles=self.obstacles
for obs in obstacles.circles:
enemy_pos=Point()
#横軸x,縦軸yの座標に戻す
enemy_pos.x=-obs.center.y
enemy_pos.y= obs.center.x
#敵とオブジェクトを見分けるマージン[m]。値が大きいほど、オブジェクトだと判定するエリアが大きくなる。
radius_mergin=0.0#半径
center_mergin=0.15#センター
cornar_mergin=0.2#コーナー
wall_mergin=0.05#壁
potential=80#敵確率初期値
#フィルタリング
#障害物の半径が10センチ以上か
if(obs.radius>=0.10-radius_mergin):
continue
elif(obs.radius>=0.10):
potential=50
#センターオブジェクトか
if(abs(enemy_pos.x) <=0.175+center_mergin and abs(enemy_pos.y) <=0.175+center_mergin):
continue
elif(abs(enemy_pos.x) <=0.175 and abs(enemy_pos.y) <=0.175 ):
potential=30
#コーナーオブジェクトか
if((abs(enemy_pos.x) >=0.430-cornar_mergin and abs(enemy_pos.x) <=0.640+cornar_mergin) and \
(abs(enemy_pos.y) >=0.455-cornar_mergin and abs(enemy_pos.y) <=0.605+cornar_mergin)):
continue
elif((abs(enemy_pos.x) >=0.430 and abs(enemy_pos.x) <=0.640) and \
(abs(enemy_pos.y) >=0.455 and abs(enemy_pos.y) <=0.605)):
potential=30
#壁か(2400*ルート2/2=1.697)
if((abs(enemy_pos.y)+abs(enemy_pos.x)) >=1.697-wall_mergin):
# print("is_wall",enemy_pos)
continue
elif((abs(enemy_pos.y)+abs(enemy_pos.x)) >=1.697):
potential=50
is_enemy_ext,x,y=self.enemy_pos_move_avg(enemy_pos.x,enemy_pos.y,potential)
if is_enemy_ext:
self.pub_enemy_pos.publish(Point(x,y,0))
self.last_enemy_pos=enemy_pos
#敵位置マーカー
self.marker.pose.position=obs.center
self.marker.header.stamp = rospy.Time.now()
self.marker.id = 1
self.marker.color.r=1.0
self.marker.color.b=0.0
self.marker.lifetime=rospy.Duration(0.1)
self.enemy_marker_pub.publish(self.marker)
self.last_marker=self.marker
self.enemy_potential_array=self.enemy_potential_array*0.7#減衰
self.enemy_potential_array=self.enemy_potential_array.clip(0,100)
self.pub_last_enemy_pos.publish(self.last_enemy_pos)
#最終敵位置マーカー
self.last_marker.id = 2
self.last_marker.color.r=0.0
self.last_marker.color.b=1.0
self.enemy_last_marker_pub.publish(self.last_marker)
r.sleep()
def main(args):
rospy.init_node('enemy_pos_from_lider', anonymous=True)
ra = enemy_pos_from_lider()
# print('[enemy_pos_from_lider]initialized')
ra.run()
if __name__=='__main__':
main(sys.argv) | ja | 0.960506 | #!/usr/bin/env python # -*- coding: utf-8 -*- # /Obstaclesトピックサブスクライブ用 # /敵位置トピックパブ用 # /最終敵位置トピックパブ用 # /敵位置マーカ # /最終敵位置マーカ #10mm毎 #margin+-50mm 発見地の周辺何mmまで確率付与するか #m->10mm #m->10mm #+確率 #print(self.enemy_potential_array[max_idx]) #print(origin_x,origin_y) #print(ori_x,ori_y) # self.object_marker_pub.publish(self.object_marker) #横軸x,縦軸yの座標に戻す #敵とオブジェクトを見分けるマージン[m]。値が大きいほど、オブジェクトだと判定するエリアが大きくなる。 #半径 #センター #コーナー #壁 #敵確率初期値 #フィルタリング #障害物の半径が10センチ以上か #センターオブジェクトか #コーナーオブジェクトか #壁か(2400*ルート2/2=1.697) # print("is_wall",enemy_pos) #敵位置マーカー #減衰 #最終敵位置マーカー # print('[enemy_pos_from_lider]initialized') | 2.258834 | 2 |
etc/git_version.py | LucaDiStasio/CompDam_DGD | 81 | 6612884 | """
Generates a fortran version file. Runs on git-hook post-checkout.
"""
import subprocess
if __name__ == "__main__":
sha = subprocess.check_output("git rev-parse HEAD", shell=True)
t = subprocess.check_output("git show -s --format=%ci", shell=True)
with open('for/version.for', 'w') as f:
f.write(' Module version_Mod\n')
f.write(' Character(len=40), parameter :: hash = "' + str(sha).strip() + '"\n')
f.write(' Character(len=50), parameter :: timestamp = "' + str(t).strip() + '"\n')
f.write(' End Module\n')
| """
Generates a fortran version file. Runs on git-hook post-checkout.
"""
import subprocess
if __name__ == "__main__":
sha = subprocess.check_output("git rev-parse HEAD", shell=True)
t = subprocess.check_output("git show -s --format=%ci", shell=True)
with open('for/version.for', 'w') as f:
f.write(' Module version_Mod\n')
f.write(' Character(len=40), parameter :: hash = "' + str(sha).strip() + '"\n')
f.write(' Character(len=50), parameter :: timestamp = "' + str(t).strip() + '"\n')
f.write(' End Module\n')
| en | 0.573925 | Generates a fortran version file. Runs on git-hook post-checkout. | 2.203542 | 2 |
tools/generateCsvFromProgress.py | monoclecat/latent-conditioned-SAC | 1 | 6612885 | import os
import csv
import numpy as np
def getPrintableArrayFromNumpyArray(array):
arrayStr = array.astype(str)
array[array=='nan'] = 'NaN'
printArray = np.zeros(array.size, dtype=[('key_name', int), ("value", 'U6')])
printArray['key_name'] = np.arange(1, array.size+1)
printArray["value"] = array
return printArray
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def generateMixedCsv(baseFolder):
directories = [root for root, dirs, files in os.walk(baseFolder)]
if baseFolder + "/toolOutput" in directories:
directories.remove(baseFolder + "/toolOutput")
files = list()
for directory in directories:
if os.path.isfile(directory + "/progress.txt"):
with open(directory + "/progress.txt") as tsv:
file = dict()
for column in zip(*[line for line in csv.reader(tsv, dialect="excel-tab")]):
if column[0] == "Epoch":
continue
file[column[0]] = column[1:]
files.append(file)
os.makedirs(baseFolder + "/toolOutput", exist_ok=True)
moving_average_steps = [1, 2, 4, 8, 16]
for key in files[0]:
keyValues = np.zeros(shape=(len(files), len(files[0][key])))
for index in range(len(files)):
if key in files[index]:
keyValues[index] = files[index][key]
if "TestEpRet" in key:
keyValues = np.divide(keyValues, 10)
for step in moving_average_steps:
pathToKeyOutput = baseFolder + "/toolOutput/" + "movingAverage" + str(step) + "/" + key
os.makedirs(pathToKeyOutput, exist_ok=True)
maxValues = moving_average(np.amax(keyValues, axis=0), step)
minValues = moving_average(np.amin(keyValues, axis=0), step)
mean = moving_average(np.mean(keyValues, axis=0), step)
median = moving_average(np.median(keyValues, axis=0), step)
std = moving_average(np.std(keyValues, axis=0), step)
std_pos = mean + 2 * std
std_neg = mean - 2 * std
np.savetxt(pathToKeyOutput + "/max.csv", getPrintableArrayFromNumpyArray(maxValues),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/min.csv", getPrintableArrayFromNumpyArray(minValues),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/mean.csv", getPrintableArrayFromNumpyArray(mean),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/median.csv", getPrintableArrayFromNumpyArray(median),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/std.csv", getPrintableArrayFromNumpyArray(std),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/std_pos.csv", getPrintableArrayFromNumpyArray(std_pos),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/std_neg.csv", getPrintableArrayFromNumpyArray(std_neg),fmt="%d ,%s",header="Epoch, " + key)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--base_folder', required=True)
args = parser.parse_args()
generateMixedCsv(args.base_folder)
| import os
import csv
import numpy as np
def getPrintableArrayFromNumpyArray(array):
arrayStr = array.astype(str)
array[array=='nan'] = 'NaN'
printArray = np.zeros(array.size, dtype=[('key_name', int), ("value", 'U6')])
printArray['key_name'] = np.arange(1, array.size+1)
printArray["value"] = array
return printArray
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def generateMixedCsv(baseFolder):
directories = [root for root, dirs, files in os.walk(baseFolder)]
if baseFolder + "/toolOutput" in directories:
directories.remove(baseFolder + "/toolOutput")
files = list()
for directory in directories:
if os.path.isfile(directory + "/progress.txt"):
with open(directory + "/progress.txt") as tsv:
file = dict()
for column in zip(*[line for line in csv.reader(tsv, dialect="excel-tab")]):
if column[0] == "Epoch":
continue
file[column[0]] = column[1:]
files.append(file)
os.makedirs(baseFolder + "/toolOutput", exist_ok=True)
moving_average_steps = [1, 2, 4, 8, 16]
for key in files[0]:
keyValues = np.zeros(shape=(len(files), len(files[0][key])))
for index in range(len(files)):
if key in files[index]:
keyValues[index] = files[index][key]
if "TestEpRet" in key:
keyValues = np.divide(keyValues, 10)
for step in moving_average_steps:
pathToKeyOutput = baseFolder + "/toolOutput/" + "movingAverage" + str(step) + "/" + key
os.makedirs(pathToKeyOutput, exist_ok=True)
maxValues = moving_average(np.amax(keyValues, axis=0), step)
minValues = moving_average(np.amin(keyValues, axis=0), step)
mean = moving_average(np.mean(keyValues, axis=0), step)
median = moving_average(np.median(keyValues, axis=0), step)
std = moving_average(np.std(keyValues, axis=0), step)
std_pos = mean + 2 * std
std_neg = mean - 2 * std
np.savetxt(pathToKeyOutput + "/max.csv", getPrintableArrayFromNumpyArray(maxValues),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/min.csv", getPrintableArrayFromNumpyArray(minValues),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/mean.csv", getPrintableArrayFromNumpyArray(mean),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/median.csv", getPrintableArrayFromNumpyArray(median),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/std.csv", getPrintableArrayFromNumpyArray(std),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/std_pos.csv", getPrintableArrayFromNumpyArray(std_pos),fmt="%d ,%s",header="Epoch, " + key)
np.savetxt(pathToKeyOutput + "/std_neg.csv", getPrintableArrayFromNumpyArray(std_neg),fmt="%d ,%s",header="Epoch, " + key)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--base_folder', required=True)
args = parser.parse_args()
generateMixedCsv(args.base_folder)
| none | 1 | 2.646924 | 3 | |
bin/tsv.py | allydunham/foldx_interface_ddg | 0 | 6612886 | #!/usr/bin/env python3
"""
Combine FoldX AnalyseComplex output from many complexes
"""
import sys
import argparse
import pandas as pd
from pathlib import Path
def import_complex_dir(path):
"""
Import tables from an AnalyseComplex output directory
"""
path = path.rstrip('/')
interactions = pd.read_csv(f'{path}/interactions.tsv', sep='\t')
interactions = interactions.rename({'interface_residues': 'number_of_interface_residues'},
axis='columns')
interface = pd.read_csv(f'{path}/interface_residues.tsv', sep='\t')
comb = pd.merge(interactions, interface, how='outer', on=['chain', 'position', 'wt', 'mut'])
comb['complex'] = path.split("/")[-2]
comb['interface'] = path.split("/")[-1]
cols = ['complex', 'interface', 'chain', 'position', 'wt', 'mut']
comb = comb[cols + [c for c in comb.columns if not c in cols]]
return comb
def main(args):
"""Main"""
complex_dfs = [import_complex_dir(path) for path in args.dir]
complexes = pd.concat(complex_dfs)
sort_cols = ['complex', 'interface', 'chain', 'position', 'wt', 'mut']
complexes = complexes.sort_values(axis='rows', by=sort_cols).reset_index(drop=True)
complexes.to_csv(sys.stdout, sep='\t', index=False)
def parse_args():
"""Process input arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dir', metavar='D', nargs='+',
help="Directories containing the output of the AnalyseComplex pipeline")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args()) | #!/usr/bin/env python3
"""
Combine FoldX AnalyseComplex output from many complexes
"""
import sys
import argparse
import pandas as pd
from pathlib import Path
def import_complex_dir(path):
"""
Import tables from an AnalyseComplex output directory
"""
path = path.rstrip('/')
interactions = pd.read_csv(f'{path}/interactions.tsv', sep='\t')
interactions = interactions.rename({'interface_residues': 'number_of_interface_residues'},
axis='columns')
interface = pd.read_csv(f'{path}/interface_residues.tsv', sep='\t')
comb = pd.merge(interactions, interface, how='outer', on=['chain', 'position', 'wt', 'mut'])
comb['complex'] = path.split("/")[-2]
comb['interface'] = path.split("/")[-1]
cols = ['complex', 'interface', 'chain', 'position', 'wt', 'mut']
comb = comb[cols + [c for c in comb.columns if not c in cols]]
return comb
def main(args):
"""Main"""
complex_dfs = [import_complex_dir(path) for path in args.dir]
complexes = pd.concat(complex_dfs)
sort_cols = ['complex', 'interface', 'chain', 'position', 'wt', 'mut']
complexes = complexes.sort_values(axis='rows', by=sort_cols).reset_index(drop=True)
complexes.to_csv(sys.stdout, sep='\t', index=False)
def parse_args():
"""Process input arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dir', metavar='D', nargs='+',
help="Directories containing the output of the AnalyseComplex pipeline")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args()) | en | 0.370261 | #!/usr/bin/env python3 Combine FoldX AnalyseComplex output from many complexes Import tables from an AnalyseComplex output directory Main Process input arguments | 2.583426 | 3 |
src/test/testingDeciding.py | McCoy-Software-Solutions/Kreatures | 0 | 6612887 | # Copyright (c) 2022 McCoy Software Solutions
# Apache License 2.0
import random
chanceToLove = 33.3
chanceToFight = 33.3
chanceToBefriend = 33.3
decision = random.randint(0,100)
print("Chance To Love: %d" % chanceToLove)
print("Chance To Fight: %d" % chanceToFight)
print("Chance To Befriend: %d" % chanceToBefriend)
print("Decision: %d" % decision)
if decision <= 0 + chanceToLove:
print("love")
elif chanceToLove < decision < chanceToLove + chanceToFight:
print("fight")
elif chanceToLove + chanceToFight < decision < 100:
print("befriend")
| # Copyright (c) 2022 McCoy Software Solutions
# Apache License 2.0
import random
chanceToLove = 33.3
chanceToFight = 33.3
chanceToBefriend = 33.3
decision = random.randint(0,100)
print("Chance To Love: %d" % chanceToLove)
print("Chance To Fight: %d" % chanceToFight)
print("Chance To Befriend: %d" % chanceToBefriend)
print("Decision: %d" % decision)
if decision <= 0 + chanceToLove:
print("love")
elif chanceToLove < decision < chanceToLove + chanceToFight:
print("fight")
elif chanceToLove + chanceToFight < decision < 100:
print("befriend")
| en | 0.602137 | # Copyright (c) 2022 McCoy Software Solutions # Apache License 2.0 | 3.574887 | 4 |
hy/core/__init__.py | Tritlo/hy | 0 | 6612888 | STDLIB = [
"hy.core.language",
"hy.core.tailrec",
"hy.core.shadow"
]
| STDLIB = [
"hy.core.language",
"hy.core.tailrec",
"hy.core.shadow"
]
| none | 1 | 1.072141 | 1 | |
maquinaria/usuarios/serializers/usuarios.py | CFredy9/Maquinaria | 0 | 6612889 | <reponame>CFredy9/Maquinaria<gh_stars>0
"""Users serializers"""
#Django
from django.conf import settings
from django.contrib.auth import password_validation, authenticate
from django.core.mail import EmailMultiAlternatives
from django.core.validators import RegexValidator
from django.template.loader import render_to_string
from django.utils import timezone
#Django REST Framework
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
#Models
from maquinaria.usuarios.models import User
#Utilities
import jwt
from datetime import timedelta
class UserModelSerializer(serializers.ModelSerializer):
"""User model serializer"""
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'phone_number',
)
class UserSignUpSerializer(serializers.Serializer):
"""User sign up serializer"""
email = serializers.EmailField(
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
min_length=4,
max_length=20,
validators=[UniqueValidator(queryset=User.objects.all())]
)
#Phone number
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message='Phone number must be entered in the format: +999999999 Up to 15 digits allowed'
)
phone_number = serializers.CharField(validators=[phone_regex])
#password
password = serializers.CharField(min_length=8, max_length=64)
password_confirmation = serializers.CharField(min_length=8, max_length=64)
#Name
first_name = serializers.CharField(min_length=2, max_length=30)
last_name = serializers.CharField(min_length=2, max_length=30)
def validate(self, data):
"""Verify passwords match"""
passwd = data['password']
passwd_conf = data['password_confirmation']
if passwd != passwd_conf:
raise serializers.ValidationError('Passwords does not mach')
password_validation.validate_password(passwd)
return data
def create(self, data):
data.pop('password_confirmation')
user = User.objects.create_user(**data)
#self.send_confirmation_email(user)
return user
def gen_verification_token(self, user):
"""Create JWT token that the user can use to verify its account"""
exp_date = timezone.now() + timedelta(days=3)
payload = {
'user': user.username,
'exp': int(exp_date.timestamp()),
'type': 'email_confirmation'
}
token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')
return Response(token)
class UserLoginSerializer(serializers.Serializer):
"""User Login serializer"""
#email = serializers.EmailField()
username = serializers.CharField(min_length=4, max_length=20)
password = serializers.CharField(min_length=8, max_length=64)
def validate(self, data):
"""Check credentials"""
user = authenticate(username=data['username'], password=data['password'])
if not user:
raise serializers.ValidationError('Invalid credentials')
if not user.is_verified:
raise serializers.ValidationError('Account is not active yet :(')
self.context['user'] = user
return data
def create(self, data):
"""Generate or retrieve new token"""
token, created = Token.objects.get_or_create(user=self.context['user'])
return self.context['user'], token.key | """Users serializers"""
#Django
from django.conf import settings
from django.contrib.auth import password_validation, authenticate
from django.core.mail import EmailMultiAlternatives
from django.core.validators import RegexValidator
from django.template.loader import render_to_string
from django.utils import timezone
#Django REST Framework
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
#Models
from maquinaria.usuarios.models import User
#Utilities
import jwt
from datetime import timedelta
class UserModelSerializer(serializers.ModelSerializer):
"""User model serializer"""
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'phone_number',
)
class UserSignUpSerializer(serializers.Serializer):
"""User sign up serializer"""
email = serializers.EmailField(
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
min_length=4,
max_length=20,
validators=[UniqueValidator(queryset=User.objects.all())]
)
#Phone number
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message='Phone number must be entered in the format: +999999999 Up to 15 digits allowed'
)
phone_number = serializers.CharField(validators=[phone_regex])
#password
password = serializers.CharField(min_length=8, max_length=64)
password_confirmation = serializers.CharField(min_length=8, max_length=64)
#Name
first_name = serializers.CharField(min_length=2, max_length=30)
last_name = serializers.CharField(min_length=2, max_length=30)
def validate(self, data):
"""Verify passwords match"""
passwd = data['password']
passwd_conf = data['password_confirmation']
if passwd != passwd_conf:
raise serializers.ValidationError('Passwords does not mach')
password_validation.validate_password(passwd)
return data
def create(self, data):
data.pop('password_confirmation')
user = User.objects.create_user(**data)
#self.send_confirmation_email(user)
return user
def gen_verification_token(self, user):
"""Create JWT token that the user can use to verify its account"""
exp_date = timezone.now() + timedelta(days=3)
payload = {
'user': user.username,
'exp': int(exp_date.timestamp()),
'type': 'email_confirmation'
}
token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')
return Response(token)
class UserLoginSerializer(serializers.Serializer):
"""User Login serializer"""
#email = serializers.EmailField()
username = serializers.CharField(min_length=4, max_length=20)
password = serializers.CharField(min_length=8, max_length=64)
def validate(self, data):
"""Check credentials"""
user = authenticate(username=data['username'], password=data['password'])
if not user:
raise serializers.ValidationError('Invalid credentials')
if not user.is_verified:
raise serializers.ValidationError('Account is not active yet :(')
self.context['user'] = user
return data
def create(self, data):
"""Generate or retrieve new token"""
token, created = Token.objects.get_or_create(user=self.context['user'])
return self.context['user'], token.key | en | 0.64708 | Users serializers #Django #Django REST Framework #Models #Utilities User model serializer User sign up serializer #Phone number #password #Name Verify passwords match #self.send_confirmation_email(user) Create JWT token that the user can use to verify its account User Login serializer #email = serializers.EmailField() Check credentials Generate or retrieve new token | 2.60244 | 3 |
backend/api/controller/user/update.py | Vedant1202/sepsis | 0 | 6612890 | # import pymysql
from db import mysql
import json
from flask import jsonify
from flask import flash, request
from werkzeug.security import generate_password_hash, check_password_hash
# from flask_cors import CORS
from utils.utils import not_found, verify_session
def user_update():
try:
_fname = request.form.getlist("fname")[0]
_lname = request.form.getlist("lname")[0]
_dept = request.form.getlist("dept")[0]
_email = request.form.getlist("email")[0]
# _password = request.form.getlist("password")[0]
_type = request.form.getlist("type")[0]
_gender = request.form.getlist("gender")[0]
_dob = request.form.getlist("dob")[0]
_phone = request.form.getlist("phone")[0]
_specialization = request.form.getlist("specialization")[0]
_experience = request.form.getlist("experience")[0]
_registration = request.form.getlist("registration")[0]
_uid = request.form.getlist("uid")[0]
# _skey = request.form.getlist("skey")[0]
# validate the received values
if _fname and _lname and _dept and _email and _type and _gender and _dob and _phone and _specialization and _experience and _registration and _uid and request.method == "POST":
# if verify_session(_skey, _uid):
# do not save password as a plain text
# _hashed_password = generate_password_hash(_password)
# save edits
sql = "UPDATE user SET fname=%s, lname=%s, dept=%s, email=%s, type=%s, gender=%s , dob=%s ,phone=%s , specialization=%s , experience=%s , registration=%s WHERE uid=%s;"
data = (_fname, _lname , _dept, _email , _type ,_gender, _dob, _phone , _specialization, _experience , _registration, _uid)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(sql, data)
conn.commit()
resp = jsonify("Success")
resp.status_code = 200
# else:
# resp = jsonify('Unauthorised')
# resp.status_code = 405
print(resp)
return resp
else:
return not_found()
except Exception as e:
print('====================== EXCEPTION ========================')
print(e)
finally:
print('Done')
# cursor.close()
# conn.close()
| # import pymysql
from db import mysql
import json
from flask import jsonify
from flask import flash, request
from werkzeug.security import generate_password_hash, check_password_hash
# from flask_cors import CORS
from utils.utils import not_found, verify_session
def user_update():
try:
_fname = request.form.getlist("fname")[0]
_lname = request.form.getlist("lname")[0]
_dept = request.form.getlist("dept")[0]
_email = request.form.getlist("email")[0]
# _password = request.form.getlist("password")[0]
_type = request.form.getlist("type")[0]
_gender = request.form.getlist("gender")[0]
_dob = request.form.getlist("dob")[0]
_phone = request.form.getlist("phone")[0]
_specialization = request.form.getlist("specialization")[0]
_experience = request.form.getlist("experience")[0]
_registration = request.form.getlist("registration")[0]
_uid = request.form.getlist("uid")[0]
# _skey = request.form.getlist("skey")[0]
# validate the received values
if _fname and _lname and _dept and _email and _type and _gender and _dob and _phone and _specialization and _experience and _registration and _uid and request.method == "POST":
# if verify_session(_skey, _uid):
# do not save password as a plain text
# _hashed_password = generate_password_hash(_password)
# save edits
sql = "UPDATE user SET fname=%s, lname=%s, dept=%s, email=%s, type=%s, gender=%s , dob=%s ,phone=%s , specialization=%s , experience=%s , registration=%s WHERE uid=%s;"
data = (_fname, _lname , _dept, _email , _type ,_gender, _dob, _phone , _specialization, _experience , _registration, _uid)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(sql, data)
conn.commit()
resp = jsonify("Success")
resp.status_code = 200
# else:
# resp = jsonify('Unauthorised')
# resp.status_code = 405
print(resp)
return resp
else:
return not_found()
except Exception as e:
print('====================== EXCEPTION ========================')
print(e)
finally:
print('Done')
# cursor.close()
# conn.close()
| en | 0.427542 | # import pymysql # from flask_cors import CORS # _password = request.form.getlist("password")[0] # _skey = request.form.getlist("skey")[0] # validate the received values # if verify_session(_skey, _uid): # do not save password as a plain text # _hashed_password = generate_password_hash(_password) # save edits # else: # resp = jsonify('Unauthorised') # resp.status_code = 405 # cursor.close() # conn.close() | 2.496897 | 2 |
sppas/sppas/scripts/acmsplit.py | mirfan899/MTTS | 0 | 6612891 | <filename>sppas/sppas/scripts/acmsplit.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
scripts.acmsplit.py
~~~~~~~~~~~~~~~~~~~
... a script to split a hmmdefs file into individual hmm files.
"""
import sys
import os.path
from argparse import ArgumentParser
PROGRAM = os.path.abspath(__file__)
SPPAS = os.path.dirname(os.path.dirname(os.path.dirname(PROGRAM)))
sys.path.append(SPPAS)
from sppas.src.models.acm.acmodelhtkio import sppasHtkIO
# ----------------------------------------------------------------------------
# Verify and extract args:
# ----------------------------------------------------------------------------
parser = ArgumentParser(usage="%s -i hmmdef -o dir" % os.path.basename(PROGRAM),
description="... a script to split a hmmdef file into hmms.")
parser.add_argument("-i",
metavar="file",
required=True,
help='Input file name (hmmdefs) or directory (hmmdefs+monophones.repl)')
parser.add_argument("-o",
metavar="dir",
required=True,
help='Output directory name')
parser.add_argument("--quiet",
action='store_true',
help="Disable the verbosity")
if len(sys.argv) <= 1:
sys.argv.append('-h')
args = parser.parse_args()
# ----------------------------------------------------------------------------
if not os.path.isdir(args.o):
print("Error: {0} must be an existing directory.".format(args.o))
sys.exit(1)
# ----------------------------------------------------------------------------
if args.quiet is False:
print("Loading AC:")
acmodel1 = sppasHtkIO()
if os.path.isfile(args.i):
acmodel1.read(os.path.dirname(args.i), os.path.basename(args.i))
else:
acmodel1.read(folder=args.i)
if args.quiet is False:
print("... done")
# ----------------------------------------------------------------------------
acmodel = acmodel1.extract_monophones()
acmodel.replace_phones()
for hmm in acmodel.get_hmms():
filename = os.path.join(args.o, hmm.name)
filename = filename + ".hmm"
if args.quiet is False:
print("{:s}: {:s}".format(hmm.name, filename))
sppasHtkIO.write_hmm(hmm, filename)
| <filename>sppas/sppas/scripts/acmsplit.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
scripts.acmsplit.py
~~~~~~~~~~~~~~~~~~~
... a script to split a hmmdefs file into individual hmm files.
"""
import sys
import os.path
from argparse import ArgumentParser
PROGRAM = os.path.abspath(__file__)
SPPAS = os.path.dirname(os.path.dirname(os.path.dirname(PROGRAM)))
sys.path.append(SPPAS)
from sppas.src.models.acm.acmodelhtkio import sppasHtkIO
# ----------------------------------------------------------------------------
# Verify and extract args:
# ----------------------------------------------------------------------------
parser = ArgumentParser(usage="%s -i hmmdef -o dir" % os.path.basename(PROGRAM),
description="... a script to split a hmmdef file into hmms.")
parser.add_argument("-i",
metavar="file",
required=True,
help='Input file name (hmmdefs) or directory (hmmdefs+monophones.repl)')
parser.add_argument("-o",
metavar="dir",
required=True,
help='Output directory name')
parser.add_argument("--quiet",
action='store_true',
help="Disable the verbosity")
if len(sys.argv) <= 1:
sys.argv.append('-h')
args = parser.parse_args()
# ----------------------------------------------------------------------------
if not os.path.isdir(args.o):
print("Error: {0} must be an existing directory.".format(args.o))
sys.exit(1)
# ----------------------------------------------------------------------------
if args.quiet is False:
print("Loading AC:")
acmodel1 = sppasHtkIO()
if os.path.isfile(args.i):
acmodel1.read(os.path.dirname(args.i), os.path.basename(args.i))
else:
acmodel1.read(folder=args.i)
if args.quiet is False:
print("... done")
# ----------------------------------------------------------------------------
acmodel = acmodel1.extract_monophones()
acmodel.replace_phones()
for hmm in acmodel.get_hmms():
filename = os.path.join(args.o, hmm.name)
filename = filename + ".hmm"
if args.quiet is False:
print("{:s}: {:s}".format(hmm.name, filename))
sppasHtkIO.write_hmm(hmm, filename)
| en | 0.608961 | #!/usr/bin/env python # -*- coding: UTF-8 -*- .. --------------------------------------------------------------------- ___ __ __ __ ___ / | \ | \ | \ / the automatic \__ |__/ |__/ |___| \__ annotation and \ | | | | \ analysis ___/ | | | | ___/ of speech http://www.sppas.org/ Use of this software is governed by the GNU Public License, version 3. SPPAS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SPPAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SPPAS. If not, see <http://www.gnu.org/licenses/>. This banner notice must not be removed. --------------------------------------------------------------------- scripts.acmsplit.py ~~~~~~~~~~~~~~~~~~~ ... a script to split a hmmdefs file into individual hmm files. # ---------------------------------------------------------------------------- # Verify and extract args: # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- | 2.222321 | 2 |
ndic/search.py | naliemalta/ndic | 0 | 6612892 | <filename>ndic/search.py
# -*- coding: utf-8 -*-
"""
This module provides functions for searching the word by Ndic
"""
from __future__ import absolute_import
from .utils import make_naver_endic_url
from .utils import request_naver_endic_url
from .utils import get_word_meaning
from .utils import get_word_meanings
def search(search_word, xth=1):
"""
Search the word in English-Korean and Korean-English dictionaries
and return the corresponding Korean word(s) or English word(s).
Args:
search_word: the word which user want to search
xth: a specific meaning in the list of definitions returned (if there are multiple),
denoted by the index in the result. Defaults to the first one
Returns:
English word(s) or Korean word(s) corresponding to the search_word
Raises:
NdicConnectionError: if network connection is lost.
"""
naver_endic_url = make_naver_endic_url(search_word)
response = request_naver_endic_url(naver_endic_url)
word_meaning = get_word_meaning(response, xth)
return word_meaning
def search_all(search_word):
"""
Search the word in English-Korean and Korean-English dictionaries
and return all corresponding Korean word(s) or English word(s) meanings.
Args:
search_word: the word which user want to search
Returns:
List of English word(s) or Korean word(s) corresponding to the search_word
Raises:
NdicConnectionError: if network connection is lost.
"""
naver_endic_url = make_naver_endic_url(search_word)
response = request_naver_endic_url(naver_endic_url)
word_meaning = get_word_meanings(response)
return word_meaning
| <filename>ndic/search.py
# -*- coding: utf-8 -*-
"""
This module provides functions for searching the word by Ndic
"""
from __future__ import absolute_import
from .utils import make_naver_endic_url
from .utils import request_naver_endic_url
from .utils import get_word_meaning
from .utils import get_word_meanings
def search(search_word, xth=1):
"""
Search the word in English-Korean and Korean-English dictionaries
and return the corresponding Korean word(s) or English word(s).
Args:
search_word: the word which user want to search
xth: a specific meaning in the list of definitions returned (if there are multiple),
denoted by the index in the result. Defaults to the first one
Returns:
English word(s) or Korean word(s) corresponding to the search_word
Raises:
NdicConnectionError: if network connection is lost.
"""
naver_endic_url = make_naver_endic_url(search_word)
response = request_naver_endic_url(naver_endic_url)
word_meaning = get_word_meaning(response, xth)
return word_meaning
def search_all(search_word):
"""
Search the word in English-Korean and Korean-English dictionaries
and return all corresponding Korean word(s) or English word(s) meanings.
Args:
search_word: the word which user want to search
Returns:
List of English word(s) or Korean word(s) corresponding to the search_word
Raises:
NdicConnectionError: if network connection is lost.
"""
naver_endic_url = make_naver_endic_url(search_word)
response = request_naver_endic_url(naver_endic_url)
word_meaning = get_word_meanings(response)
return word_meaning
| en | 0.81099 | # -*- coding: utf-8 -*- This module provides functions for searching the word by Ndic Search the word in English-Korean and Korean-English dictionaries and return the corresponding Korean word(s) or English word(s). Args: search_word: the word which user want to search xth: a specific meaning in the list of definitions returned (if there are multiple), denoted by the index in the result. Defaults to the first one Returns: English word(s) or Korean word(s) corresponding to the search_word Raises: NdicConnectionError: if network connection is lost. Search the word in English-Korean and Korean-English dictionaries and return all corresponding Korean word(s) or English word(s) meanings. Args: search_word: the word which user want to search Returns: List of English word(s) or Korean word(s) corresponding to the search_word Raises: NdicConnectionError: if network connection is lost. | 3.413324 | 3 |
Hausaufgabe2/todoApp/urls.py | SozialeNetzwerke2016/Hausaufgabe2 | 0 | 6612893 | <filename>Hausaufgabe2/todoApp/urls.py<gh_stars>0
"""Hausaufgabe2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from . import views
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt, csrf_protect
app_name = 'todoApp'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^newTodo/$', views.newTodo, name='newTodo'),
url(r'^impressum/$', views.impressum, name='impressum'),
url(r'^addTodo/$', views.addTodo, name='addTodo'),
url(r'^(?P<todo_id>[0-9]+)/$', views.editTodo, name='editTodo'),
url(r'^deleteTodo/(?P<todo_id>[0-9]+)/$', views.deleteTodo, name='deleteTodo'),
url(r'^changeTodo/(?P<todo_id>[0-9]+)/$', views.changeTodo, name='changeTodo'),
]
| <filename>Hausaufgabe2/todoApp/urls.py<gh_stars>0
"""Hausaufgabe2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from . import views
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt, csrf_protect
app_name = 'todoApp'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^newTodo/$', views.newTodo, name='newTodo'),
url(r'^impressum/$', views.impressum, name='impressum'),
url(r'^addTodo/$', views.addTodo, name='addTodo'),
url(r'^(?P<todo_id>[0-9]+)/$', views.editTodo, name='editTodo'),
url(r'^deleteTodo/(?P<todo_id>[0-9]+)/$', views.deleteTodo, name='deleteTodo'),
url(r'^changeTodo/(?P<todo_id>[0-9]+)/$', views.changeTodo, name='changeTodo'),
]
| en | 0.584387 | Hausaufgabe2 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) | 2.841321 | 3 |
old/main_old/alembic/versions/cda240ae1ea5_first_tables.py | madpin/renthub | 0 | 6612894 | """first tables
Revision ID: cda240ae1ea5
Revises: <KEY>
Create Date: 2021-10-31 23:47:10.313256
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cda240ae1ea5'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| """first tables
Revision ID: cda240ae1ea5
Revises: <KEY>
Create Date: 2021-10-31 23:47:10.313256
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cda240ae1ea5'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| en | 0.501512 | first tables Revision ID: cda240ae1ea5 Revises: <KEY> Create Date: 2021-10-31 23:47:10.313256 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.443952 | 1 |
firewood/layers/lr_equalizers.py | kynk94/torch-firewood | 1 | 6612895 | import math
from collections import OrderedDict
from typing import List, Optional, Tuple, TypedDict, Union
import torch
import torch.nn as nn
import torch.nn.init as init
from torch import Tensor
from torch.nn import Parameter
from firewood import utils
_NEED_RECURSIVE = {
"AdaptiveNorm",
"DepthSepConv1d",
"DepthSepConv2d",
"DepthSepConv3d",
"DepthSepConvTranspose1d",
"DepthSepConvTranspose2d",
"DepthSepConvTranspose3d",
"SpatialSepConv2d",
"SpatialSepConv3d",
"SpatialSepConvTranspose2d",
"SpatialSepConvTranspose3d",
}
class BiasLREqualizer:
def __init__(self, name: str = "bias") -> None:
self.name = name
self.target_name = self.name
@staticmethod
def apply(
module: nn.Module,
name: str = "bias",
lr_multiplier: float = 1.0,
init: Optional[Union[float, Tensor]] = None,
recursive: bool = False,
reapply: bool = False,
) -> Optional["BiasLREqualizer"]:
if recursive:
for _module in module.modules():
BiasLREqualizer.apply(
module=_module,
name=name,
lr_multiplier=lr_multiplier,
init=init,
recursive=False,
reapply=reapply,
)
return None
module_name = utils.get_name(module)
if module_name not in _NEED_RECURSIVE and module_name.endswith("Norm"):
return None
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", True)
if getattr(module, name, None) is None:
return None
if init is None:
init = 0.0
if has_bias_lr_equalizer(module):
if not reapply or getattr(module, "bias_init", None) == init:
return None
_remove_bias_lr_equalizer(module, recursive=False)
fn = BiasLREqualizer(name=name)
module.register_forward_pre_hook(fn)
forward_pre_hooks = list(module._forward_pre_hooks.items())
forward_pre_hooks = forward_pre_hooks[-1:] + forward_pre_hooks[:-1]
module._forward_pre_hooks = OrderedDict(forward_pre_hooks)
# other norm use `name + '_orig'` to save the original bias
if hasattr(module, name + "_orig"):
setattr(fn, "target_name", name + "_orig")
bias: Tensor = utils.popattr(module, fn.target_name).clone()
bias = torch.tensor(init, dtype=bias.dtype, device=bias.device).expand(
bias.shape
)
setattr(module, "bias_init", init)
setattr(module, fn.target_name, bias.data)
module.register_parameter(name + "_param", Parameter(bias.clone()))
module.register_buffer(
"bias_gain",
torch.tensor(lr_multiplier, dtype=bias.dtype, device=bias.device),
)
return fn
def remove(self, module: nn.Module) -> None:
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", False)
with torch.no_grad():
bias = self.compute_bias(module)
delattr(module, self.name + "_param")
if hasattr(module, self.name + "_orig"):
module.register_parameter(
self.target_name, Parameter(bias.detach())
)
else:
delattr(module, self.name)
module.register_parameter(self.name, Parameter(bias.detach()))
def compute_bias(self, module: nn.Module) -> Tensor:
bias: Parameter = getattr(module, self.name + "_param")
bias_gain = getattr(module, "bias_gain")
return bias * bias_gain
def __call__(self, module: nn.Module, input: Tensor) -> None:
setattr(module, self.target_name, self.compute_bias(module))
class WeightLREqualizer:
"""
Note:
LREqualizer hook should be applied after other weight norm hooks.
"""
def __init__(self, name: str = "weight") -> None:
self.name = name
self.target_name = self.name
@staticmethod
def apply(
module: nn.Module,
name: str = "weight",
lr_multiplier: float = 1.0,
init_std: Optional[float] = None,
recursive: bool = False,
reapply: bool = False,
) -> Optional["WeightLREqualizer"]:
if recursive:
for _module in module.modules():
WeightLREqualizer.apply(
module=_module,
name=name,
lr_multiplier=lr_multiplier,
init_std=init_std,
recursive=False,
reapply=reapply,
)
return None
module_name = utils.get_name(module)
if module_name not in _NEED_RECURSIVE and module_name.endswith("Norm"):
return None
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", True)
_weight: Optional[Tensor] = getattr(module, name, None)
if _weight is None or _weight.ndim == 1:
return None
if init_std is None:
init_std = 1.0
if has_weight_lr_equalizer(module):
if (
not reapply
or getattr(module, "weight_init_std", None) == init_std
):
return None
_remove_weight_lr_equalizer(module, recursive=False)
fn = WeightLREqualizer(name=name)
module.register_forward_pre_hook(fn)
forward_pre_hooks = list(module._forward_pre_hooks.items())
forward_pre_hooks = forward_pre_hooks[-1:] + forward_pre_hooks[:-1]
module._forward_pre_hooks = OrderedDict(forward_pre_hooks)
# other weight norm use `name + '_orig'` to save the original weight
if hasattr(module, name + "_orig"):
setattr(fn, "target_name", name + "_orig")
weight: Tensor = utils.popattr(module, fn.target_name).clone()
setattr(module, "weight_init_std", init_std)
init.normal_(weight, mean=0, std=init_std / lr_multiplier)
setattr(module, fn.target_name, weight.data)
module.register_parameter(
name + "_param", Parameter(weight.detach().clone())
)
fan_in = weight.data[0].numel()
weight_gain = lr_multiplier / math.sqrt(fan_in)
module.register_buffer(
"weight_gain",
torch.tensor(weight_gain, dtype=weight.dtype, device=weight.device),
)
return fn
def remove(self, module: nn.Module) -> None:
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", False)
with torch.no_grad():
weight = self.compute_weight(module).clone()
delattr(module, self.name + "_param")
if hasattr(module, self.name + "_orig"):
module.register_parameter(
self.target_name, Parameter(weight.detach())
)
else:
delattr(module, self.name)
module.register_parameter(self.name, Parameter(weight.detach()))
def compute_weight(self, module: nn.Module) -> Tensor:
weight: Parameter = getattr(module, self.name + "_param")
weight_gain = getattr(module, "weight_gain")
return weight * weight_gain
def __call__(self, module: nn.Module, input: Tensor) -> None:
# For the case of applying spectral norm after applying lr equalizer.
if (
self.target_name == self.name
and getattr(module, self.name + "_orig", None) is not None
):
self.target_name = self.name + "_orig"
setattr(module, self.target_name, self.compute_weight(module))
def lr_equalizer(
module: Union[
nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]
],
weight_name: str = "weight",
bias_name: str = "bias",
lr_multiplier: float = 1.0,
weight_init_std: float = 1.0,
bias_init: Optional[float] = None,
recursive: bool = False,
reapply: bool = False,
) -> Union[nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]]:
if isinstance(module, (nn.ModuleList, list, tuple)):
for _module in module:
lr_equalizer(
module=_module,
weight_name=weight_name,
bias_name=bias_name,
lr_multiplier=lr_multiplier,
weight_init_std=weight_init_std,
bias_init=bias_init,
recursive=recursive,
reapply=reapply,
)
return module
if (
getattr(module, "weight_layer", None) is not None
or utils.get_name(module) in _NEED_RECURSIVE
):
recursive = True
BiasLREqualizer.apply(
module=module,
name=bias_name,
lr_multiplier=lr_multiplier,
init=bias_init,
recursive=recursive,
reapply=reapply,
)
WeightLREqualizer.apply(
module=module,
name=weight_name,
lr_multiplier=lr_multiplier,
init_std=weight_init_std,
recursive=recursive,
reapply=reapply,
)
return module
def _remove_bias_lr_equalizer(
module: nn.Module,
recursive: bool = False,
) -> nn.Module:
if recursive:
for _module in module.modules():
_remove_bias_lr_equalizer(_module, recursive=False)
return module
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BiasLREqualizer):
hook.remove(module)
del module._forward_pre_hooks[k]
break
return module
def _remove_weight_lr_equalizer(
module: nn.Module,
recursive: bool = False,
) -> nn.Module:
if recursive:
for _module in module.modules():
_remove_weight_lr_equalizer(_module, recursive=False)
return module
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightLREqualizer):
hook.remove(module)
del module._forward_pre_hooks[k]
break
return module
def remove_lr_equalizer(
module: Union[
nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]
],
recursive: bool = False,
) -> Union[nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]]:
if isinstance(module, (nn.ModuleList, list, tuple)):
for _module in module:
remove_lr_equalizer(_module, recursive=recursive)
return module
if recursive:
for _module in module.modules():
remove_lr_equalizer(_module, recursive=False)
return module
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, (WeightLREqualizer, BiasLREqualizer)):
hook.remove(module)
del module._forward_pre_hooks[k]
break
return module
def has_bias_lr_equalizer(module: nn.Module) -> bool:
for hook in module._forward_pre_hooks.values():
if isinstance(hook, BiasLREqualizer):
return True
return False
def has_weight_lr_equalizer(module: nn.Module) -> bool:
for hook in module._forward_pre_hooks.values():
if isinstance(hook, WeightLREqualizer):
return True
return False
def pop_bias_lr_equalizer(module: nn.Module) -> BiasLREqualizer:
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BiasLREqualizer):
del module._forward_pre_hooks[k]
return hook
raise ValueError("No BiasLREqualizer found in module's forward pre hooks")
def pop_weight_lr_equalizer(module: nn.Module) -> WeightLREqualizer:
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightLREqualizer):
del module._forward_pre_hooks[k]
return hook
raise ValueError("No WeightLREqualizer found in module's forward pre hooks")
class BIAS_ATTRS(TypedDict):
bias: Optional[Parameter]
bias_init: Optional[Union[float, Tensor]]
bias_gain: float
bias_hook: Optional[BiasLREqualizer]
def pop_bias_attrs(
module: nn.Module,
) -> BIAS_ATTRS:
name = "bias"
bias_hook = None
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BiasLREqualizer):
hook.remove(module)
del module._forward_pre_hooks[k]
name = hook.name
bias_hook = hook
break
if not hasattr(module, name):
return {
"bias": None,
"bias_init": None,
"bias_gain": 1.0,
"bias_hook": None,
}
bias: Parameter = utils.popattr(module, name)
module.register_parameter("bias", None)
bias_init = getattr(module, "bias_init", None)
bias_gain = getattr(module, "bias_gain", 1.0)
if bias_hook is not None:
bias_hook.name = "bias"
return {
"bias": bias,
"bias_init": bias_init,
"bias_gain": bias_gain,
"bias_hook": bias_hook,
}
def transfer_bias_attrs(
source_module: nn.Module,
target_module: nn.Module,
preserve_source_bias: bool = False,
) -> nn.Module:
bias_attrs = pop_bias_attrs(source_module)
if bias_attrs["bias"] is None:
raise ValueError("Source module has no bias")
if bias_attrs["bias_hook"] is None:
utils.popattr(target_module, "bias", None)
target_module.register_parameter("bias", bias_attrs["bias"])
return target_module
pop_bias_attrs(target_module)
name = bias_attrs["bias_hook"].name
target_module.register_parameter(name, bias_attrs["bias"])
BiasLREqualizer.apply(
module=target_module,
name=name,
lr_multiplier=bias_attrs["bias_gain"],
init=bias_attrs["bias_init"],
recursive=False,
)
if preserve_source_bias:
delattr(target_module, name + "_param")
target_module.register_parameter(
name + "_param", Parameter(bias_attrs["bias"].data)
)
return target_module
| import math
from collections import OrderedDict
from typing import List, Optional, Tuple, TypedDict, Union
import torch
import torch.nn as nn
import torch.nn.init as init
from torch import Tensor
from torch.nn import Parameter
from firewood import utils
_NEED_RECURSIVE = {
"AdaptiveNorm",
"DepthSepConv1d",
"DepthSepConv2d",
"DepthSepConv3d",
"DepthSepConvTranspose1d",
"DepthSepConvTranspose2d",
"DepthSepConvTranspose3d",
"SpatialSepConv2d",
"SpatialSepConv3d",
"SpatialSepConvTranspose2d",
"SpatialSepConvTranspose3d",
}
class BiasLREqualizer:
def __init__(self, name: str = "bias") -> None:
self.name = name
self.target_name = self.name
@staticmethod
def apply(
module: nn.Module,
name: str = "bias",
lr_multiplier: float = 1.0,
init: Optional[Union[float, Tensor]] = None,
recursive: bool = False,
reapply: bool = False,
) -> Optional["BiasLREqualizer"]:
if recursive:
for _module in module.modules():
BiasLREqualizer.apply(
module=_module,
name=name,
lr_multiplier=lr_multiplier,
init=init,
recursive=False,
reapply=reapply,
)
return None
module_name = utils.get_name(module)
if module_name not in _NEED_RECURSIVE and module_name.endswith("Norm"):
return None
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", True)
if getattr(module, name, None) is None:
return None
if init is None:
init = 0.0
if has_bias_lr_equalizer(module):
if not reapply or getattr(module, "bias_init", None) == init:
return None
_remove_bias_lr_equalizer(module, recursive=False)
fn = BiasLREqualizer(name=name)
module.register_forward_pre_hook(fn)
forward_pre_hooks = list(module._forward_pre_hooks.items())
forward_pre_hooks = forward_pre_hooks[-1:] + forward_pre_hooks[:-1]
module._forward_pre_hooks = OrderedDict(forward_pre_hooks)
# other norm use `name + '_orig'` to save the original bias
if hasattr(module, name + "_orig"):
setattr(fn, "target_name", name + "_orig")
bias: Tensor = utils.popattr(module, fn.target_name).clone()
bias = torch.tensor(init, dtype=bias.dtype, device=bias.device).expand(
bias.shape
)
setattr(module, "bias_init", init)
setattr(module, fn.target_name, bias.data)
module.register_parameter(name + "_param", Parameter(bias.clone()))
module.register_buffer(
"bias_gain",
torch.tensor(lr_multiplier, dtype=bias.dtype, device=bias.device),
)
return fn
def remove(self, module: nn.Module) -> None:
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", False)
with torch.no_grad():
bias = self.compute_bias(module)
delattr(module, self.name + "_param")
if hasattr(module, self.name + "_orig"):
module.register_parameter(
self.target_name, Parameter(bias.detach())
)
else:
delattr(module, self.name)
module.register_parameter(self.name, Parameter(bias.detach()))
def compute_bias(self, module: nn.Module) -> Tensor:
bias: Parameter = getattr(module, self.name + "_param")
bias_gain = getattr(module, "bias_gain")
return bias * bias_gain
def __call__(self, module: nn.Module, input: Tensor) -> None:
setattr(module, self.target_name, self.compute_bias(module))
class WeightLREqualizer:
"""
Note:
LREqualizer hook should be applied after other weight norm hooks.
"""
def __init__(self, name: str = "weight") -> None:
self.name = name
self.target_name = self.name
@staticmethod
def apply(
module: nn.Module,
name: str = "weight",
lr_multiplier: float = 1.0,
init_std: Optional[float] = None,
recursive: bool = False,
reapply: bool = False,
) -> Optional["WeightLREqualizer"]:
if recursive:
for _module in module.modules():
WeightLREqualizer.apply(
module=_module,
name=name,
lr_multiplier=lr_multiplier,
init_std=init_std,
recursive=False,
reapply=reapply,
)
return None
module_name = utils.get_name(module)
if module_name not in _NEED_RECURSIVE and module_name.endswith("Norm"):
return None
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", True)
_weight: Optional[Tensor] = getattr(module, name, None)
if _weight is None or _weight.ndim == 1:
return None
if init_std is None:
init_std = 1.0
if has_weight_lr_equalizer(module):
if (
not reapply
or getattr(module, "weight_init_std", None) == init_std
):
return None
_remove_weight_lr_equalizer(module, recursive=False)
fn = WeightLREqualizer(name=name)
module.register_forward_pre_hook(fn)
forward_pre_hooks = list(module._forward_pre_hooks.items())
forward_pre_hooks = forward_pre_hooks[-1:] + forward_pre_hooks[:-1]
module._forward_pre_hooks = OrderedDict(forward_pre_hooks)
# other weight norm use `name + '_orig'` to save the original weight
if hasattr(module, name + "_orig"):
setattr(fn, "target_name", name + "_orig")
weight: Tensor = utils.popattr(module, fn.target_name).clone()
setattr(module, "weight_init_std", init_std)
init.normal_(weight, mean=0, std=init_std / lr_multiplier)
setattr(module, fn.target_name, weight.data)
module.register_parameter(
name + "_param", Parameter(weight.detach().clone())
)
fan_in = weight.data[0].numel()
weight_gain = lr_multiplier / math.sqrt(fan_in)
module.register_buffer(
"weight_gain",
torch.tensor(weight_gain, dtype=weight.dtype, device=weight.device),
)
return fn
def remove(self, module: nn.Module) -> None:
if hasattr(module, "lr_equalization"):
setattr(module, "lr_equalization", False)
with torch.no_grad():
weight = self.compute_weight(module).clone()
delattr(module, self.name + "_param")
if hasattr(module, self.name + "_orig"):
module.register_parameter(
self.target_name, Parameter(weight.detach())
)
else:
delattr(module, self.name)
module.register_parameter(self.name, Parameter(weight.detach()))
def compute_weight(self, module: nn.Module) -> Tensor:
weight: Parameter = getattr(module, self.name + "_param")
weight_gain = getattr(module, "weight_gain")
return weight * weight_gain
def __call__(self, module: nn.Module, input: Tensor) -> None:
# For the case of applying spectral norm after applying lr equalizer.
if (
self.target_name == self.name
and getattr(module, self.name + "_orig", None) is not None
):
self.target_name = self.name + "_orig"
setattr(module, self.target_name, self.compute_weight(module))
def lr_equalizer(
module: Union[
nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]
],
weight_name: str = "weight",
bias_name: str = "bias",
lr_multiplier: float = 1.0,
weight_init_std: float = 1.0,
bias_init: Optional[float] = None,
recursive: bool = False,
reapply: bool = False,
) -> Union[nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]]:
if isinstance(module, (nn.ModuleList, list, tuple)):
for _module in module:
lr_equalizer(
module=_module,
weight_name=weight_name,
bias_name=bias_name,
lr_multiplier=lr_multiplier,
weight_init_std=weight_init_std,
bias_init=bias_init,
recursive=recursive,
reapply=reapply,
)
return module
if (
getattr(module, "weight_layer", None) is not None
or utils.get_name(module) in _NEED_RECURSIVE
):
recursive = True
BiasLREqualizer.apply(
module=module,
name=bias_name,
lr_multiplier=lr_multiplier,
init=bias_init,
recursive=recursive,
reapply=reapply,
)
WeightLREqualizer.apply(
module=module,
name=weight_name,
lr_multiplier=lr_multiplier,
init_std=weight_init_std,
recursive=recursive,
reapply=reapply,
)
return module
def _remove_bias_lr_equalizer(
module: nn.Module,
recursive: bool = False,
) -> nn.Module:
if recursive:
for _module in module.modules():
_remove_bias_lr_equalizer(_module, recursive=False)
return module
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BiasLREqualizer):
hook.remove(module)
del module._forward_pre_hooks[k]
break
return module
def _remove_weight_lr_equalizer(
module: nn.Module,
recursive: bool = False,
) -> nn.Module:
if recursive:
for _module in module.modules():
_remove_weight_lr_equalizer(_module, recursive=False)
return module
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightLREqualizer):
hook.remove(module)
del module._forward_pre_hooks[k]
break
return module
def remove_lr_equalizer(
module: Union[
nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]
],
recursive: bool = False,
) -> Union[nn.Module, nn.ModuleList, List[nn.Module], Tuple[nn.Module, ...]]:
if isinstance(module, (nn.ModuleList, list, tuple)):
for _module in module:
remove_lr_equalizer(_module, recursive=recursive)
return module
if recursive:
for _module in module.modules():
remove_lr_equalizer(_module, recursive=False)
return module
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, (WeightLREqualizer, BiasLREqualizer)):
hook.remove(module)
del module._forward_pre_hooks[k]
break
return module
def has_bias_lr_equalizer(module: nn.Module) -> bool:
for hook in module._forward_pre_hooks.values():
if isinstance(hook, BiasLREqualizer):
return True
return False
def has_weight_lr_equalizer(module: nn.Module) -> bool:
for hook in module._forward_pre_hooks.values():
if isinstance(hook, WeightLREqualizer):
return True
return False
def pop_bias_lr_equalizer(module: nn.Module) -> BiasLREqualizer:
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BiasLREqualizer):
del module._forward_pre_hooks[k]
return hook
raise ValueError("No BiasLREqualizer found in module's forward pre hooks")
def pop_weight_lr_equalizer(module: nn.Module) -> WeightLREqualizer:
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightLREqualizer):
del module._forward_pre_hooks[k]
return hook
raise ValueError("No WeightLREqualizer found in module's forward pre hooks")
class BIAS_ATTRS(TypedDict):
bias: Optional[Parameter]
bias_init: Optional[Union[float, Tensor]]
bias_gain: float
bias_hook: Optional[BiasLREqualizer]
def pop_bias_attrs(
module: nn.Module,
) -> BIAS_ATTRS:
name = "bias"
bias_hook = None
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BiasLREqualizer):
hook.remove(module)
del module._forward_pre_hooks[k]
name = hook.name
bias_hook = hook
break
if not hasattr(module, name):
return {
"bias": None,
"bias_init": None,
"bias_gain": 1.0,
"bias_hook": None,
}
bias: Parameter = utils.popattr(module, name)
module.register_parameter("bias", None)
bias_init = getattr(module, "bias_init", None)
bias_gain = getattr(module, "bias_gain", 1.0)
if bias_hook is not None:
bias_hook.name = "bias"
return {
"bias": bias,
"bias_init": bias_init,
"bias_gain": bias_gain,
"bias_hook": bias_hook,
}
def transfer_bias_attrs(
source_module: nn.Module,
target_module: nn.Module,
preserve_source_bias: bool = False,
) -> nn.Module:
bias_attrs = pop_bias_attrs(source_module)
if bias_attrs["bias"] is None:
raise ValueError("Source module has no bias")
if bias_attrs["bias_hook"] is None:
utils.popattr(target_module, "bias", None)
target_module.register_parameter("bias", bias_attrs["bias"])
return target_module
pop_bias_attrs(target_module)
name = bias_attrs["bias_hook"].name
target_module.register_parameter(name, bias_attrs["bias"])
BiasLREqualizer.apply(
module=target_module,
name=name,
lr_multiplier=bias_attrs["bias_gain"],
init=bias_attrs["bias_init"],
recursive=False,
)
if preserve_source_bias:
delattr(target_module, name + "_param")
target_module.register_parameter(
name + "_param", Parameter(bias_attrs["bias"].data)
)
return target_module
| en | 0.698402 | # other norm use `name + '_orig'` to save the original bias Note: LREqualizer hook should be applied after other weight norm hooks. # other weight norm use `name + '_orig'` to save the original weight # For the case of applying spectral norm after applying lr equalizer. | 2.178334 | 2 |
src/game.py | applied-ml-research/snake-game-nn | 0 | 6612896 | import random
WIDTH = 800
HEIGHT = 600
BLOCK_DIM = 5
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
STARTING_DIRECTION = RIGHT
SNAKE_Y = 0
SNAKE_X = 1
SNAKE_DELTA = 1
class Game:
def __init__(self, height=HEIGHT//BLOCK_DIM, width=WIDTH//BLOCK_DIM):
self.height = height
self.width = width
self.open = {(y, x) for y in range(height) for x in range(width)}
self.snake = [(height//2, width//2)]
self.direction = STARTING_DIRECTION
self.alive = True
def __pick_open_square(self):
return random.choice(tuple(self.open))
def set_direction(self, direction):
if (self.direction == UP and direction != DOWN) or (self.direction == DOWN and direction != UP) or (self.direction == LEFT and direction != RIGHT) or (self.direction == RIGHT and direction != LEFT):
self.direction = direction
def update(self):
head = self.snake[-1]
if self.direction == UP:
new = (head[SNAKE_Y] - SNAKE_DELTA, head[SNAKE_X])
elif self.direction == DOWN:
new = (head[SNAKE_Y] + SNAKE_DELTA, head[SNAKE_X])
elif self.direction == LEFT:
new = (head[SNAKE_Y], head[SNAKE_X] - SNAKE_DELTA)
elif self.direction == RIGHT:
new = (head[SNAKE_Y], head[SNAKE_X] + SNAKE_DELTA)
if new in self.open:
self.open.remove(new)
self.snake.append(new)
self.open.add(self.snake[0])
self.snake = self.snake[1:]
else:
self.alive = False
def cleanup(self):
del self.open
del self.snake
| import random
WIDTH = 800
HEIGHT = 600
BLOCK_DIM = 5
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
STARTING_DIRECTION = RIGHT
SNAKE_Y = 0
SNAKE_X = 1
SNAKE_DELTA = 1
class Game:
def __init__(self, height=HEIGHT//BLOCK_DIM, width=WIDTH//BLOCK_DIM):
self.height = height
self.width = width
self.open = {(y, x) for y in range(height) for x in range(width)}
self.snake = [(height//2, width//2)]
self.direction = STARTING_DIRECTION
self.alive = True
def __pick_open_square(self):
return random.choice(tuple(self.open))
def set_direction(self, direction):
if (self.direction == UP and direction != DOWN) or (self.direction == DOWN and direction != UP) or (self.direction == LEFT and direction != RIGHT) or (self.direction == RIGHT and direction != LEFT):
self.direction = direction
def update(self):
head = self.snake[-1]
if self.direction == UP:
new = (head[SNAKE_Y] - SNAKE_DELTA, head[SNAKE_X])
elif self.direction == DOWN:
new = (head[SNAKE_Y] + SNAKE_DELTA, head[SNAKE_X])
elif self.direction == LEFT:
new = (head[SNAKE_Y], head[SNAKE_X] - SNAKE_DELTA)
elif self.direction == RIGHT:
new = (head[SNAKE_Y], head[SNAKE_X] + SNAKE_DELTA)
if new in self.open:
self.open.remove(new)
self.snake.append(new)
self.open.add(self.snake[0])
self.snake = self.snake[1:]
else:
self.alive = False
def cleanup(self):
del self.open
del self.snake
| none | 1 | 3.258534 | 3 | |
Question11.py | Schrodinger73/PracticalJournal_Class11 | 13 | 6612897 | # Question
# WAP to get a fibonacci series till 'n' terms
# CODE :
# I ask the user to tell the number of terms
nterm=int(input('How many terms? '))
# Since fibonacci series always start wit 0 and 1...
n1,n2=0,1
# Introduced 'count' as a variable which will move on to the next value if the previous value satisfies conditons
count=0
# Since fibonacci series can never have a -ve value, it will tell the user to enter a positive value
if nterm<=-1:
print(f'{nterm} is negative. Enter a positive value')
# If number of terms is specified as 1, then there will only be 1 value in the series i.e. 0
elif nterm==1:
print('Fibonacci Series: ')
print(n1)
# For the number of terms =>2...
else:
print('Fibonacci Series: ')
# Here we want the series to be only as ling as specified by the user, so count cannot exceed the number of terms
while count<nterm:
print(n1)
nth=n1+n2
# Update the values
n1=n2
n2=nth
count+=1
# No addtional comments
# OUTPUT :
# How many terms? 7
# Fibonacci Series:
# 0
# 1
# 1
# 2
# 3
# 5
# 8 | # Question
# WAP to get a fibonacci series till 'n' terms
# CODE :
# I ask the user to tell the number of terms
nterm=int(input('How many terms? '))
# Since fibonacci series always start wit 0 and 1...
n1,n2=0,1
# Introduced 'count' as a variable which will move on to the next value if the previous value satisfies conditons
count=0
# Since fibonacci series can never have a -ve value, it will tell the user to enter a positive value
if nterm<=-1:
print(f'{nterm} is negative. Enter a positive value')
# If number of terms is specified as 1, then there will only be 1 value in the series i.e. 0
elif nterm==1:
print('Fibonacci Series: ')
print(n1)
# For the number of terms =>2...
else:
print('Fibonacci Series: ')
# Here we want the series to be only as ling as specified by the user, so count cannot exceed the number of terms
while count<nterm:
print(n1)
nth=n1+n2
# Update the values
n1=n2
n2=nth
count+=1
# No addtional comments
# OUTPUT :
# How many terms? 7
# Fibonacci Series:
# 0
# 1
# 1
# 2
# 3
# 5
# 8 | en | 0.869668 | # Question # WAP to get a fibonacci series till 'n' terms # CODE : # I ask the user to tell the number of terms # Since fibonacci series always start wit 0 and 1... # Introduced 'count' as a variable which will move on to the next value if the previous value satisfies conditons # Since fibonacci series can never have a -ve value, it will tell the user to enter a positive value # If number of terms is specified as 1, then there will only be 1 value in the series i.e. 0 # For the number of terms =>2... # Here we want the series to be only as ling as specified by the user, so count cannot exceed the number of terms # Update the values # No addtional comments # OUTPUT : # How many terms? 7 # Fibonacci Series: # 0 # 1 # 1 # 2 # 3 # 5 # 8 | 4.310266 | 4 |
chain/crypto/objects/transactions/ipfs.py | tsifrer/ark | 5 | 6612898 | from .base import BaseTransaction
class IPFSTransaction(BaseTransaction):
pass
| from .base import BaseTransaction
class IPFSTransaction(BaseTransaction):
pass
| none | 1 | 1.215643 | 1 | |
usuario/models.py | Miguelrom/EasyApproval | 0 | 6612899 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
TIPOS = (
(0, 'Alumno'),
(1, 'Instructor'),
(2, 'Miembro del consejo académico'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
institucion = models.CharField(max_length=40, blank=True)
tipo = models.SmallIntegerField(choices=TIPOS, null=True, blank=True)
numero_borradores = models.IntegerField(default=0)
cv = models.FileField(null=True, blank=True)
@property
def nombre(self):
return self.user.first_name
@property
def apellido(self):
return self.user.last_name
@property
def get_tipo(self):
return self.TIPOS[int(self.tipo)][1]
def __str__(self):
return str(self.nombre) + (" " + str(self.apellido) if self.apellido != None else "")
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
TIPOS = (
(0, 'Alumno'),
(1, 'Instructor'),
(2, 'Miembro del consejo académico'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
institucion = models.CharField(max_length=40, blank=True)
tipo = models.SmallIntegerField(choices=TIPOS, null=True, blank=True)
numero_borradores = models.IntegerField(default=0)
cv = models.FileField(null=True, blank=True)
@property
def nombre(self):
return self.user.first_name
@property
def apellido(self):
return self.user.last_name
@property
def get_tipo(self):
return self.TIPOS[int(self.tipo)][1]
def __str__(self):
return str(self.nombre) + (" " + str(self.apellido) if self.apellido != None else "")
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| none | 1 | 2.280514 | 2 | |
src/python/pants/bin/local_pants_runner_integration_test.py | yoav-orca/pants | 1,806 | 6612900 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Sequence
from pants.testutil.pants_integration_test import PantsResult, run_pants
def test_print_stacktrace() -> None:
def run(args: Sequence[str]) -> PantsResult:
return run_pants(command=[*args, "list", "definitely-does-not-exist::"])
no_print_stacktrace = run(["--no-print-stacktrace"])
assert "Traceback" not in no_print_stacktrace.stderr
assert "traceback" not in no_print_stacktrace.stderr
print_stacktrace = run(["--print-stacktrace"])
assert "Traceback" in print_stacktrace.stderr
| # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Sequence
from pants.testutil.pants_integration_test import PantsResult, run_pants
def test_print_stacktrace() -> None:
def run(args: Sequence[str]) -> PantsResult:
return run_pants(command=[*args, "list", "definitely-does-not-exist::"])
no_print_stacktrace = run(["--no-print-stacktrace"])
assert "Traceback" not in no_print_stacktrace.stderr
assert "traceback" not in no_print_stacktrace.stderr
print_stacktrace = run(["--print-stacktrace"])
assert "Traceback" in print_stacktrace.stderr
| en | 0.514785 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). | 2.264253 | 2 |
website/files/models/ext.py | DanielSBrown/osf.io | 1 | 6612901 | <filename>website/files/models/ext.py
"""website.files.models.ext is home to subclasses of FileNode that provide
additional functionality and have no place in website.files.models.base
"""
import os
from website.files.models.base import FileNode
class PathFollowingFileNode(FileNode):
"""A helper class that will attempt to track the its file
through changes in the parent addons settings
ie: Moving you dropbox director up or down X levels
stored_object's path will always be the full path
from the providers root directory
"""
FOLDER_ATTR_NAME = 'folder'
@classmethod
def get_or_create(cls, node, path):
"""Forces path to extend to the add-on's root directory
"""
node_settings = node.get_addon(cls.provider)
path = os.path.join(getattr(node_settings, cls.FOLDER_ATTR_NAME).strip('/'), path.lstrip('/'))
return super(PathFollowingFileNode, cls).get_or_create(node, '/' + path)
@property
def path(self):
"""Mutates the underlying stored_object's path to be relative to _get_connected_path
"""
return '/' + self.stored_object.path.replace(self._get_connected_path(), '', 1).lstrip('/')
def _get_connected_path(self):
"""Returns the path of the connected provider add-on
>>> pffn._get_connected_path() # /MyDropbox/FolderImSharingOnTheOsf
"""
node_settings = self.node.get_addon(self.provider)
assert node_settings is not None, 'Connected node has no {} account'.format(self.provider)
return getattr(node_settings, self.FOLDER_ATTR_NAME).strip('/')
| <filename>website/files/models/ext.py
"""website.files.models.ext is home to subclasses of FileNode that provide
additional functionality and have no place in website.files.models.base
"""
import os
from website.files.models.base import FileNode
class PathFollowingFileNode(FileNode):
"""A helper class that will attempt to track the its file
through changes in the parent addons settings
ie: Moving you dropbox director up or down X levels
stored_object's path will always be the full path
from the providers root directory
"""
FOLDER_ATTR_NAME = 'folder'
@classmethod
def get_or_create(cls, node, path):
"""Forces path to extend to the add-on's root directory
"""
node_settings = node.get_addon(cls.provider)
path = os.path.join(getattr(node_settings, cls.FOLDER_ATTR_NAME).strip('/'), path.lstrip('/'))
return super(PathFollowingFileNode, cls).get_or_create(node, '/' + path)
@property
def path(self):
"""Mutates the underlying stored_object's path to be relative to _get_connected_path
"""
return '/' + self.stored_object.path.replace(self._get_connected_path(), '', 1).lstrip('/')
def _get_connected_path(self):
"""Returns the path of the connected provider add-on
>>> pffn._get_connected_path() # /MyDropbox/FolderImSharingOnTheOsf
"""
node_settings = self.node.get_addon(self.provider)
assert node_settings is not None, 'Connected node has no {} account'.format(self.provider)
return getattr(node_settings, self.FOLDER_ATTR_NAME).strip('/')
| en | 0.807622 | website.files.models.ext is home to subclasses of FileNode that provide additional functionality and have no place in website.files.models.base A helper class that will attempt to track the its file through changes in the parent addons settings ie: Moving you dropbox director up or down X levels stored_object's path will always be the full path from the providers root directory Forces path to extend to the add-on's root directory Mutates the underlying stored_object's path to be relative to _get_connected_path Returns the path of the connected provider add-on >>> pffn._get_connected_path() # /MyDropbox/FolderImSharingOnTheOsf | 2.657007 | 3 |
GUI/Matplot.py | JaneHQ1/Predicting-Stroke-Severity-from-Computed-Tomography-Images | 0 | 6612902 | <filename>GUI/Matplot.py<gh_stars>0
"""
Use Matplotlib display dicom
"""
import pydicom
from pydicom.data import get_testdata_files
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import os
class Matplot():
def __init__(self):
pass
# load DCM file folder
def DCM_loader(path):
dcms=[]
for file in os.listdir(path):
# pydicom.dataset.FileDataset' object cannot be append to list
# The append() method adds a single item to the existing list.
# It doesn't return a new list; rather it modifies the original list.
# pydicom.filereader.dcmread(fp, defer_size=None, stop_before_pixels=False, force=False, specific_tags=None)
# fp:str or file-like: Either a file-like object, or a string containing the file name.
# If a file-like object, the caller is responsible for closing it.
# return: An instance of FileDataset that represents a parsed DICOM file.
dcms.append(pydicom.dcmread(os.path.join(path, file)))
return dcms
# update canvas
def update(val):
index = int(s_index.val)
# set_data(x, y, A)
# Set the grid for the pixel centers, and the pixel values.
# x and y are monotonic 1-D ndarrays of lengths N and M, respectively, specifying pixel centers
canvas.set_data(dcms[index].pixel_array)
plt.draw()
# Remember to add the r before the path
path = r"C:\Users\janej\OneDrive\MelbUni\MASTER OF ENGINEERING\CapstoneProject_2018\Test_Images\Series 002 [CT - Crane SPC]"
dcms = Matplot.DCM_loader(path)
a = 1
# print(dcms)
# matplotlib.pyplot.imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None,
# origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, *, data=None, **kwargs)[source]
# X : array-like or PIL image
# The image data. Supported array shapes are:
#(M, N): an image with scalar data. The data is visualized using a colormap.
# cmap : str or Colormap, optional
# A Colormap instance or registered colormap name. The colormap maps scalar data to colors.
# pixel_array one of the information in dcms.
# Matplotlib has a number of built-in colormaps accessible via matplotlib.cm.get_cmap.
# bone: sequential increasing black-white color map with a tinge of blue, to emulate X-ray film
canvas = plt.imshow(dcms[0].pixel_array, cmap=plt.cm.bone)
# plt.axes(rect, projection=None, polar=False, **kwargs)
# 4-tuple of floats rect = [left, bottom, width, height]. Basically it is the size of the index.
# Returns: Axes (or a subclass of Axes)
ax_index = plt.axes([0.226, 0.005, 0.572, 0.02], facecolor='lightgoldenrodyellow')
# class matplotlib.widgets.Slider(ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f', closedmin=True, closedmax=True,
# slidermin=None, slidermax=None, dragging=True, valstep=None, **kwargs)[source]
# A slider representing a floating point range.
# val : float - Slider value.
# ax : Axes - The Axes to put the slider in.
# label : str - Slider label.
# valmin : float - The minimum value of the slider.
# valmax : float - The maximum value of the slider.
# valinit : float, optional, default: 0.5 - The slider initial position.
# valstep : float, optional, default: None - If given, the slider will snap to multiples of valstep.
s_index = Slider(ax_index, 'Index', 0, len(dcms)-1, valinit=0, valstep=1)
# on_changed(func)
# Function to call when slider is changed.
# Returns: cid : int - Connection id (which can be used to disconnect func)
s_index.on_changed(Matplot.update)
plot = plt.show()
# Matplot.plotz(path)
# def toggle_images(event):
# plt.imshow(ds2.pixel_array,cmap=plt.cm.bone)
# plt.draw()
#plt.connect('button_press_event', toggle_images)
# print(plt.get_backend())
| <filename>GUI/Matplot.py<gh_stars>0
"""
Use Matplotlib display dicom
"""
import pydicom
from pydicom.data import get_testdata_files
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import os
class Matplot():
def __init__(self):
pass
# load DCM file folder
def DCM_loader(path):
dcms=[]
for file in os.listdir(path):
# pydicom.dataset.FileDataset' object cannot be append to list
# The append() method adds a single item to the existing list.
# It doesn't return a new list; rather it modifies the original list.
# pydicom.filereader.dcmread(fp, defer_size=None, stop_before_pixels=False, force=False, specific_tags=None)
# fp:str or file-like: Either a file-like object, or a string containing the file name.
# If a file-like object, the caller is responsible for closing it.
# return: An instance of FileDataset that represents a parsed DICOM file.
dcms.append(pydicom.dcmread(os.path.join(path, file)))
return dcms
# update canvas
def update(val):
index = int(s_index.val)
# set_data(x, y, A)
# Set the grid for the pixel centers, and the pixel values.
# x and y are monotonic 1-D ndarrays of lengths N and M, respectively, specifying pixel centers
canvas.set_data(dcms[index].pixel_array)
plt.draw()
# Remember to add the r before the path
path = r"C:\Users\janej\OneDrive\MelbUni\MASTER OF ENGINEERING\CapstoneProject_2018\Test_Images\Series 002 [CT - Crane SPC]"
dcms = Matplot.DCM_loader(path)
a = 1
# print(dcms)
# matplotlib.pyplot.imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None,
# origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, *, data=None, **kwargs)[source]
# X : array-like or PIL image
# The image data. Supported array shapes are:
#(M, N): an image with scalar data. The data is visualized using a colormap.
# cmap : str or Colormap, optional
# A Colormap instance or registered colormap name. The colormap maps scalar data to colors.
# pixel_array one of the information in dcms.
# Matplotlib has a number of built-in colormaps accessible via matplotlib.cm.get_cmap.
# bone: sequential increasing black-white color map with a tinge of blue, to emulate X-ray film
canvas = plt.imshow(dcms[0].pixel_array, cmap=plt.cm.bone)
# plt.axes(rect, projection=None, polar=False, **kwargs)
# 4-tuple of floats rect = [left, bottom, width, height]. Basically it is the size of the index.
# Returns: Axes (or a subclass of Axes)
ax_index = plt.axes([0.226, 0.005, 0.572, 0.02], facecolor='lightgoldenrodyellow')
# class matplotlib.widgets.Slider(ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f', closedmin=True, closedmax=True,
# slidermin=None, slidermax=None, dragging=True, valstep=None, **kwargs)[source]
# A slider representing a floating point range.
# val : float - Slider value.
# ax : Axes - The Axes to put the slider in.
# label : str - Slider label.
# valmin : float - The minimum value of the slider.
# valmax : float - The maximum value of the slider.
# valinit : float, optional, default: 0.5 - The slider initial position.
# valstep : float, optional, default: None - If given, the slider will snap to multiples of valstep.
s_index = Slider(ax_index, 'Index', 0, len(dcms)-1, valinit=0, valstep=1)
# on_changed(func)
# Function to call when slider is changed.
# Returns: cid : int - Connection id (which can be used to disconnect func)
s_index.on_changed(Matplot.update)
plot = plt.show()
# Matplot.plotz(path)
# def toggle_images(event):
# plt.imshow(ds2.pixel_array,cmap=plt.cm.bone)
# plt.draw()
#plt.connect('button_press_event', toggle_images)
# print(plt.get_backend())
| en | 0.587597 | Use Matplotlib display dicom # load DCM file folder # pydicom.dataset.FileDataset' object cannot be append to list # The append() method adds a single item to the existing list. # It doesn't return a new list; rather it modifies the original list. # pydicom.filereader.dcmread(fp, defer_size=None, stop_before_pixels=False, force=False, specific_tags=None) # fp:str or file-like: Either a file-like object, or a string containing the file name. # If a file-like object, the caller is responsible for closing it. # return: An instance of FileDataset that represents a parsed DICOM file. # update canvas # set_data(x, y, A) # Set the grid for the pixel centers, and the pixel values. # x and y are monotonic 1-D ndarrays of lengths N and M, respectively, specifying pixel centers # Remember to add the r before the path # print(dcms) # matplotlib.pyplot.imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, # origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, *, data=None, **kwargs)[source] # X : array-like or PIL image # The image data. Supported array shapes are: #(M, N): an image with scalar data. The data is visualized using a colormap. # cmap : str or Colormap, optional # A Colormap instance or registered colormap name. The colormap maps scalar data to colors. # pixel_array one of the information in dcms. # Matplotlib has a number of built-in colormaps accessible via matplotlib.cm.get_cmap. # bone: sequential increasing black-white color map with a tinge of blue, to emulate X-ray film # plt.axes(rect, projection=None, polar=False, **kwargs) # 4-tuple of floats rect = [left, bottom, width, height]. Basically it is the size of the index. # Returns: Axes (or a subclass of Axes) # class matplotlib.widgets.Slider(ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f', closedmin=True, closedmax=True, # slidermin=None, slidermax=None, dragging=True, valstep=None, **kwargs)[source] # A slider representing a floating point range. # val : float - Slider value. # ax : Axes - The Axes to put the slider in. # label : str - Slider label. # valmin : float - The minimum value of the slider. # valmax : float - The maximum value of the slider. # valinit : float, optional, default: 0.5 - The slider initial position. # valstep : float, optional, default: None - If given, the slider will snap to multiples of valstep. # on_changed(func) # Function to call when slider is changed. # Returns: cid : int - Connection id (which can be used to disconnect func) # Matplot.plotz(path) # def toggle_images(event): # plt.imshow(ds2.pixel_array,cmap=plt.cm.bone) # plt.draw() #plt.connect('button_press_event', toggle_images) # print(plt.get_backend()) | 2.833633 | 3 |
main.py | kjin67511/morning-pi | 16 | 6612903 | import datetime
import time
from config import ConfigSectionMap
from run import run, reset, button_pushed, lcd_ready
from utils.timer import int_time, timer_list
interval = int(ConfigSectionMap("run")['interval'])
duration = int(ConfigSectionMap("run")['duration'])
schedule_time = ConfigSectionMap("schedule")['time']
timers = []
start_time = int_time()
elapsed_time = 0
schedule_toggle = False
def check_schedule(time_str):
global schedule_toggle
if schedule_toggle is True:
return False
scheduled_time = time.strptime(time_str, "%H:%M")
current_time = datetime.datetime.now()
if scheduled_time.tm_hour == current_time.hour and scheduled_time.tm_min == current_time.minute:
return True
else:
return False
def start_timer():
"""
initialize time variables and timer_list to run within the main loop
"""
global start_time
global elapsed_time
global timers
timers = timer_list(duration, interval)
start_time = int_time()
elapsed_time = 0
if __name__ == "__main__":
print("start")
start_timer()
if lcd_ready():
try:
while True:
elapsed_time = int_time() - start_time
if button_pushed() or check_schedule(schedule_time):
schedule_toggle = True
start_timer()
if len(timers) > 0 and int(elapsed_time) == timers[0]:
timers.pop(0)
if len(timers) == 0: # end of timer
schedule_toggle = False
reset()
else:
run()
time.sleep(0.01)
except KeyboardInterrupt:
reset()
else: # test purpose in non-rpi
run()
| import datetime
import time
from config import ConfigSectionMap
from run import run, reset, button_pushed, lcd_ready
from utils.timer import int_time, timer_list
interval = int(ConfigSectionMap("run")['interval'])
duration = int(ConfigSectionMap("run")['duration'])
schedule_time = ConfigSectionMap("schedule")['time']
timers = []
start_time = int_time()
elapsed_time = 0
schedule_toggle = False
def check_schedule(time_str):
global schedule_toggle
if schedule_toggle is True:
return False
scheduled_time = time.strptime(time_str, "%H:%M")
current_time = datetime.datetime.now()
if scheduled_time.tm_hour == current_time.hour and scheduled_time.tm_min == current_time.minute:
return True
else:
return False
def start_timer():
"""
initialize time variables and timer_list to run within the main loop
"""
global start_time
global elapsed_time
global timers
timers = timer_list(duration, interval)
start_time = int_time()
elapsed_time = 0
if __name__ == "__main__":
print("start")
start_timer()
if lcd_ready():
try:
while True:
elapsed_time = int_time() - start_time
if button_pushed() or check_schedule(schedule_time):
schedule_toggle = True
start_timer()
if len(timers) > 0 and int(elapsed_time) == timers[0]:
timers.pop(0)
if len(timers) == 0: # end of timer
schedule_toggle = False
reset()
else:
run()
time.sleep(0.01)
except KeyboardInterrupt:
reset()
else: # test purpose in non-rpi
run()
| en | 0.817346 | initialize time variables and timer_list to run within the main loop # end of timer # test purpose in non-rpi | 3.362171 | 3 |
examples/list_rgs.py | gbowerman/azurerm | 44 | 6612904 | '''list_rgs.py - list Azure resource groups in a subscription'''
import json
import os
import sys
import azurerm
def main():
'''Main routine.'''
# if in Azure cloud shell, authenticate using the MSI endpoint
if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ:
access_token = azurerm.get_access_token_from_cli()
subscription_id = azurerm.get_subscription_from_cli()
else: # load service principal details from a config file
try:
with open('azurermconfig.json') as configfile:
configdata = json.load(configfile)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = configdata['tenantId']
app_id = configdata['appId']
app_secret = configdata['appSecret']
subscription_id = configdata['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# list resource groups
resource_groups = azurerm.list_resource_groups(access_token, subscription_id)
for rgname in resource_groups['value']:
print(rgname['name'] + ', ' + rgname['location'])
'''
rg_details = azurerm.get_resource_group(access_token, subscription_id, rgname['name'])
print(json.dumps(rg_details, sort_keys=False, indent=2, separators=(',', ': ')))
'''
if __name__ == "__main__":
main() | '''list_rgs.py - list Azure resource groups in a subscription'''
import json
import os
import sys
import azurerm
def main():
'''Main routine.'''
# if in Azure cloud shell, authenticate using the MSI endpoint
if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ:
access_token = azurerm.get_access_token_from_cli()
subscription_id = azurerm.get_subscription_from_cli()
else: # load service principal details from a config file
try:
with open('azurermconfig.json') as configfile:
configdata = json.load(configfile)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = configdata['tenantId']
app_id = configdata['appId']
app_secret = configdata['appSecret']
subscription_id = configdata['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# list resource groups
resource_groups = azurerm.list_resource_groups(access_token, subscription_id)
for rgname in resource_groups['value']:
print(rgname['name'] + ', ' + rgname['location'])
'''
rg_details = azurerm.get_resource_group(access_token, subscription_id, rgname['name'])
print(json.dumps(rg_details, sort_keys=False, indent=2, separators=(',', ': ')))
'''
if __name__ == "__main__":
main() | en | 0.486091 | list_rgs.py - list Azure resource groups in a subscription Main routine. # if in Azure cloud shell, authenticate using the MSI endpoint # load service principal details from a config file # authenticate # list resource groups rg_details = azurerm.get_resource_group(access_token, subscription_id, rgname['name']) print(json.dumps(rg_details, sort_keys=False, indent=2, separators=(',', ': '))) | 2.596364 | 3 |
compiler/optimizer.py | pfalcon/python-compiler | 42 | 6612905 | <gh_stars>10-100
import ast
import operator
from ast import Constant, Num, Str, Bytes, Ellipsis, NameConstant, copy_location
from typing import Iterable, Optional
from compiler.peephole import safe_multiply, safe_power, safe_mod, safe_lshift
from compiler.visitor import ASTRewriter
def is_const(node):
return isinstance(node, (Constant, Num, Str, Bytes, Ellipsis, NameConstant))
def get_const_value(node):
if isinstance(node, (Constant, NameConstant)):
return node.value
elif isinstance(node, Num):
return node.n
elif isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Ellipsis):
return ...
raise TypeError("Bad constant value")
class Py37Limits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 256
MAX_STR_SIZE = 4096
MAX_TOTAL_ITEMS = 1024
UNARY_OPS = {
ast.Invert: operator.invert,
ast.Not: operator.not_,
ast.UAdd: operator.pos,
ast.USub: operator.neg,
}
INVERSE_OPS = {
ast.Is: ast.IsNot,
ast.IsNot: ast.Is,
ast.In: ast.NotIn,
ast.NotIn: ast.In,
}
BIN_OPS = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: lambda l, r: safe_multiply(l, r, Py37Limits),
ast.Div: operator.truediv,
ast.FloorDiv: operator.floordiv,
ast.Mod: lambda l, r: safe_mod(l, r, Py37Limits),
ast.Pow: lambda l, r: safe_power(l, r, Py37Limits),
ast.LShift: lambda l, r: safe_lshift(l, r, Py37Limits),
ast.RShift: operator.rshift,
ast.BitOr: operator.or_,
ast.BitXor: operator.xor,
ast.BitAnd: operator.and_,
}
class AstOptimizer(ASTRewriter):
def __init__(self, optimize = False):
super().__init__()
self.optimize = optimize
def visitUnaryOp(self, node: ast.UnaryOp) -> ast.expr:
op = self.visit(node.operand)
if is_const(op):
conv = UNARY_OPS[type(node.op)]
val = get_const_value(op)
try:
return copy_location(Constant(conv(val)), node)
except:
pass
elif (
isinstance(node.op, ast.Not)
and isinstance(node.operand, ast.Compare)
and len(node.operand.ops) == 1
):
cmp_op = node.operand.ops[0]
new_op = INVERSE_OPS.get(type(cmp_op))
if new_op is not None:
return self.update_node(node.operand, ops=[new_op()])
return self.update_node(node, operand=op)
def visitBinOp(self, node: ast.BinOp) -> ast.expr:
l = self.visit(node.left)
r = self.visit(node.right)
if is_const(l) and is_const(r):
handler = BIN_OPS.get(type(node.op))
if handler is not None:
lval = get_const_value(l)
rval = get_const_value(r)
try:
return copy_location(Constant(handler(lval, rval)), node)
except:
pass
return self.update_node(node, left=l, right=r)
def makeConstTuple(self, elts: Iterable[ast.expr]) -> Optional[Constant]:
if all(is_const(elt) for elt in elts):
return Constant(tuple(get_const_value(elt) for elt in elts))
return None
def visitTuple(self, node: ast.Tuple) -> ast.expr:
elts = self.walk_list(node.elts)
if isinstance(node.ctx, ast.Load):
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
def visitSubscript(self, node: ast.Subscript) -> ast.expr:
value = self.visit(node.value)
slice = self.visit(node.slice)
if (
isinstance(node.ctx, ast.Load)
and is_const(value)
and isinstance(slice, ast.Index)
and is_const(slice.value)
):
try:
return copy_location(
Constant(get_const_value(value)[get_const_value(slice.value)]), node
)
except:
pass
return self.update_node(node, value=value, slice=slice)
def _visitIter(self, node: ast.expr) -> ast.expr:
if isinstance(node, ast.List):
elts = self.visit(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
elif isinstance(node, ast.Set):
elts = self.visit(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(Constant(frozenset(res.value)), node)
return self.update_node(node, elts=elts)
return self.generic_visit(node)
def visitcomprehension(self, node: ast.comprehension) -> ast.expr:
target = self.visit(node.target)
iter = self.visit(node.iter)
ifs = self.visit(node.ifs)
iter = self._visitIter(iter)
return self.update_node(node, target=target, iter=iter, ifs=ifs)
def visitFor(self, node: ast.For) -> ast.expr:
target = self.visit(node.target)
iter = self.visit(node.iter)
body = self.visit(node.body)
orelse = self.visit(node.orelse)
iter = self._visitIter(iter)
return self.update_node(
node, target=target, iter=iter, body=body, orelse=orelse
)
def visitCompare(self, node: ast.Compare) -> ast.expr:
left = self.visit(node.left)
comparators = self.visit(node.comparators)
if isinstance(node.ops[-1], (ast.In, ast.NotIn)):
new_iter = self._visitIter(comparators[-1])
if new_iter is not None and new_iter is not comparators[-1]:
comparators = list(comparators)
comparators[-1] = new_iter
return self.update_node(node, left=left, comparators=comparators)
def visitName(self, node: ast.Name):
if node.id == "__debug__":
return copy_location(Constant(not self.optimize), node)
return self.generic_visit(node)
| import ast
import operator
from ast import Constant, Num, Str, Bytes, Ellipsis, NameConstant, copy_location
from typing import Iterable, Optional
from compiler.peephole import safe_multiply, safe_power, safe_mod, safe_lshift
from compiler.visitor import ASTRewriter
def is_const(node):
return isinstance(node, (Constant, Num, Str, Bytes, Ellipsis, NameConstant))
def get_const_value(node):
if isinstance(node, (Constant, NameConstant)):
return node.value
elif isinstance(node, Num):
return node.n
elif isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Ellipsis):
return ...
raise TypeError("Bad constant value")
class Py37Limits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 256
MAX_STR_SIZE = 4096
MAX_TOTAL_ITEMS = 1024
UNARY_OPS = {
ast.Invert: operator.invert,
ast.Not: operator.not_,
ast.UAdd: operator.pos,
ast.USub: operator.neg,
}
INVERSE_OPS = {
ast.Is: ast.IsNot,
ast.IsNot: ast.Is,
ast.In: ast.NotIn,
ast.NotIn: ast.In,
}
BIN_OPS = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: lambda l, r: safe_multiply(l, r, Py37Limits),
ast.Div: operator.truediv,
ast.FloorDiv: operator.floordiv,
ast.Mod: lambda l, r: safe_mod(l, r, Py37Limits),
ast.Pow: lambda l, r: safe_power(l, r, Py37Limits),
ast.LShift: lambda l, r: safe_lshift(l, r, Py37Limits),
ast.RShift: operator.rshift,
ast.BitOr: operator.or_,
ast.BitXor: operator.xor,
ast.BitAnd: operator.and_,
}
class AstOptimizer(ASTRewriter):
def __init__(self, optimize = False):
super().__init__()
self.optimize = optimize
def visitUnaryOp(self, node: ast.UnaryOp) -> ast.expr:
op = self.visit(node.operand)
if is_const(op):
conv = UNARY_OPS[type(node.op)]
val = get_const_value(op)
try:
return copy_location(Constant(conv(val)), node)
except:
pass
elif (
isinstance(node.op, ast.Not)
and isinstance(node.operand, ast.Compare)
and len(node.operand.ops) == 1
):
cmp_op = node.operand.ops[0]
new_op = INVERSE_OPS.get(type(cmp_op))
if new_op is not None:
return self.update_node(node.operand, ops=[new_op()])
return self.update_node(node, operand=op)
def visitBinOp(self, node: ast.BinOp) -> ast.expr:
l = self.visit(node.left)
r = self.visit(node.right)
if is_const(l) and is_const(r):
handler = BIN_OPS.get(type(node.op))
if handler is not None:
lval = get_const_value(l)
rval = get_const_value(r)
try:
return copy_location(Constant(handler(lval, rval)), node)
except:
pass
return self.update_node(node, left=l, right=r)
def makeConstTuple(self, elts: Iterable[ast.expr]) -> Optional[Constant]:
if all(is_const(elt) for elt in elts):
return Constant(tuple(get_const_value(elt) for elt in elts))
return None
def visitTuple(self, node: ast.Tuple) -> ast.expr:
elts = self.walk_list(node.elts)
if isinstance(node.ctx, ast.Load):
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
def visitSubscript(self, node: ast.Subscript) -> ast.expr:
value = self.visit(node.value)
slice = self.visit(node.slice)
if (
isinstance(node.ctx, ast.Load)
and is_const(value)
and isinstance(slice, ast.Index)
and is_const(slice.value)
):
try:
return copy_location(
Constant(get_const_value(value)[get_const_value(slice.value)]), node
)
except:
pass
return self.update_node(node, value=value, slice=slice)
def _visitIter(self, node: ast.expr) -> ast.expr:
if isinstance(node, ast.List):
elts = self.visit(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
elif isinstance(node, ast.Set):
elts = self.visit(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(Constant(frozenset(res.value)), node)
return self.update_node(node, elts=elts)
return self.generic_visit(node)
def visitcomprehension(self, node: ast.comprehension) -> ast.expr:
target = self.visit(node.target)
iter = self.visit(node.iter)
ifs = self.visit(node.ifs)
iter = self._visitIter(iter)
return self.update_node(node, target=target, iter=iter, ifs=ifs)
def visitFor(self, node: ast.For) -> ast.expr:
target = self.visit(node.target)
iter = self.visit(node.iter)
body = self.visit(node.body)
orelse = self.visit(node.orelse)
iter = self._visitIter(iter)
return self.update_node(
node, target=target, iter=iter, body=body, orelse=orelse
)
def visitCompare(self, node: ast.Compare) -> ast.expr:
left = self.visit(node.left)
comparators = self.visit(node.comparators)
if isinstance(node.ops[-1], (ast.In, ast.NotIn)):
new_iter = self._visitIter(comparators[-1])
if new_iter is not None and new_iter is not comparators[-1]:
comparators = list(comparators)
comparators[-1] = new_iter
return self.update_node(node, left=left, comparators=comparators)
def visitName(self, node: ast.Name):
if node.id == "__debug__":
return copy_location(Constant(not self.optimize), node)
return self.generic_visit(node) | none | 1 | 2.428385 | 2 | |
test/field/test_reference.py | marrow/mongo | 22 | 6612906 | <reponame>marrow/mongo
# encoding: utf-8
from __future__ import unicode_literals
import pytest
from common import FieldExam
from marrow.mongo import Document
from marrow.mongo.field import Reference, String
from marrow.mongo.trait import Collection
class Concrete(Collection):
__collection__ = 'collection'
foo = String()
bar = String()
class TestReferenceField(FieldExam):
__field__ = Reference
__args__ = (Document, )
def test_foreign(self, Sample):
assert Sample.field._field.__foreign__ == 'objectId'
def test_foreign_cast_document_fail(self, Sample):
inst = Sample()
doc = Document()
with pytest.raises(ValueError):
inst.field = doc
def test_foreign_cast_document(self, Sample):
inst = Sample()
doc = Document()
doc['_id'] = 27
inst.field = doc
assert inst['field'] == 27
def test_oid_failure(self, Sample):
inst = Sample(field='z' * 24)
assert inst['field'] == 'z' * 24
class TestConcreteReferenceField(FieldExam):
__field__ = Reference
__args__ = (Document, )
__kwargs__ = {'concrete': True}
def test_concrete_reference(self, Sample):
inst = Sample(field=Concrete(foo="a", bar="b"))
assert inst.__data__['field'].collection == 'collection'
| # encoding: utf-8
from __future__ import unicode_literals
import pytest
from common import FieldExam
from marrow.mongo import Document
from marrow.mongo.field import Reference, String
from marrow.mongo.trait import Collection
class Concrete(Collection):
__collection__ = 'collection'
foo = String()
bar = String()
class TestReferenceField(FieldExam):
__field__ = Reference
__args__ = (Document, )
def test_foreign(self, Sample):
assert Sample.field._field.__foreign__ == 'objectId'
def test_foreign_cast_document_fail(self, Sample):
inst = Sample()
doc = Document()
with pytest.raises(ValueError):
inst.field = doc
def test_foreign_cast_document(self, Sample):
inst = Sample()
doc = Document()
doc['_id'] = 27
inst.field = doc
assert inst['field'] == 27
def test_oid_failure(self, Sample):
inst = Sample(field='z' * 24)
assert inst['field'] == 'z' * 24
class TestConcreteReferenceField(FieldExam):
__field__ = Reference
__args__ = (Document, )
__kwargs__ = {'concrete': True}
def test_concrete_reference(self, Sample):
inst = Sample(field=Concrete(foo="a", bar="b"))
assert inst.__data__['field'].collection == 'collection' | en | 0.83829 | # encoding: utf-8 | 2.126102 | 2 |
sdk/python/pulumi_alicloud/ros/stack.py | pulumi/pulumi-alicloud | 42 | 6612907 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StackArgs', 'Stack']
@pulumi.input_type
class StackArgs:
def __init__(__self__, *,
stack_name: pulumi.Input[str],
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Stack resource.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
pulumi.set(__self__, "stack_name", stack_name)
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if deletion_protection is not None:
pulumi.set(__self__, "deletion_protection", deletion_protection)
if disable_rollback is not None:
pulumi.set(__self__, "disable_rollback", disable_rollback)
if notification_urls is not None:
pulumi.set(__self__, "notification_urls", notification_urls)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if ram_role_name is not None:
pulumi.set(__self__, "ram_role_name", ram_role_name)
if replacement_option is not None:
pulumi.set(__self__, "replacement_option", replacement_option)
if retain_all_resources is not None:
pulumi.set(__self__, "retain_all_resources", retain_all_resources)
if retain_resources is not None:
pulumi.set(__self__, "retain_resources", retain_resources)
if stack_policy_body is not None:
pulumi.set(__self__, "stack_policy_body", stack_policy_body)
if stack_policy_during_update_body is not None:
pulumi.set(__self__, "stack_policy_during_update_body", stack_policy_during_update_body)
if stack_policy_during_update_url is not None:
pulumi.set(__self__, "stack_policy_during_update_url", stack_policy_during_update_url)
if stack_policy_url is not None:
pulumi.set(__self__, "stack_policy_url", stack_policy_url)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if template_body is not None:
pulumi.set(__self__, "template_body", template_body)
if template_url is not None:
pulumi.set(__self__, "template_url", template_url)
if template_version is not None:
pulumi.set(__self__, "template_version", template_version)
if timeout_in_minutes is not None:
pulumi.set(__self__, "timeout_in_minutes", timeout_in_minutes)
if use_previous_parameters is not None:
pulumi.set(__self__, "use_previous_parameters", use_previous_parameters)
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> pulumi.Input[str]:
"""
The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
"""
return pulumi.get(self, "stack_name")
@stack_name.setter
def stack_name(self, value: pulumi.Input[str]):
pulumi.set(self, "stack_name", value)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to delete the stack after it is created.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="deletionProtection")
def deletion_protection(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
"""
return pulumi.get(self, "deletion_protection")
@deletion_protection.setter
def deletion_protection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deletion_protection", value)
@property
@pulumi.getter(name="disableRollback")
def disable_rollback(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to disable rollback on stack creation failure. Default to: `false`.
"""
return pulumi.get(self, "disable_rollback")
@disable_rollback.setter
def disable_rollback(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_rollback", value)
@property
@pulumi.getter(name="notificationUrls")
def notification_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
"""
return pulumi.get(self, "notification_urls")
@notification_urls.setter
def notification_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_urls", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]:
"""
The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="ramRoleName")
def ram_role_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
"""
return pulumi.get(self, "ram_role_name")
@ram_role_name.setter
def ram_role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ram_role_name", value)
@property
@pulumi.getter(name="replacementOption")
def replacement_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
"""
return pulumi.get(self, "replacement_option")
@replacement_option.setter
def replacement_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replacement_option", value)
@property
@pulumi.getter(name="retainAllResources")
def retain_all_resources(self) -> Optional[pulumi.Input[bool]]:
"""
The retain all resources.
"""
return pulumi.get(self, "retain_all_resources")
@retain_all_resources.setter
def retain_all_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "retain_all_resources", value)
@property
@pulumi.getter(name="retainResources")
def retain_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies whether to retain the resources in the stack.
"""
return pulumi.get(self, "retain_resources")
@retain_resources.setter
def retain_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "retain_resources", value)
@property
@pulumi.getter(name="stackPolicyBody")
def stack_policy_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_body")
@stack_policy_body.setter
def stack_policy_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateBody")
def stack_policy_during_update_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_during_update_body")
@stack_policy_during_update_body.setter
def stack_policy_during_update_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateUrl")
def stack_policy_during_update_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_during_update_url")
@stack_policy_during_update_url.setter
def stack_policy_during_update_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_url", value)
@property
@pulumi.getter(name="stackPolicyUrl")
def stack_policy_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_url")
@stack_policy_url.setter
def stack_policy_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_url", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
"""
return pulumi.get(self, "template_body")
@template_body.setter
def template_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_body", value)
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "template_url")
@template_url.setter
def template_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_url", value)
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the template.
"""
return pulumi.get(self, "template_version")
@template_version.setter
def template_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_version", value)
@property
@pulumi.getter(name="timeoutInMinutes")
def timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The timeout period that is specified for the stack creation request. Default to: `60`.
"""
return pulumi.get(self, "timeout_in_minutes")
@timeout_in_minutes.setter
def timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_in_minutes", value)
@property
@pulumi.getter(name="usePreviousParameters")
def use_previous_parameters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
return pulumi.get(self, "use_previous_parameters")
@use_previous_parameters.setter
def use_previous_parameters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_previous_parameters", value)
@pulumi.input_type
class _StackState:
def __init__(__self__, *,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Stack resources.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] status: The status of Stack.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if deletion_protection is not None:
pulumi.set(__self__, "deletion_protection", deletion_protection)
if disable_rollback is not None:
pulumi.set(__self__, "disable_rollback", disable_rollback)
if notification_urls is not None:
pulumi.set(__self__, "notification_urls", notification_urls)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if ram_role_name is not None:
pulumi.set(__self__, "ram_role_name", ram_role_name)
if replacement_option is not None:
pulumi.set(__self__, "replacement_option", replacement_option)
if retain_all_resources is not None:
pulumi.set(__self__, "retain_all_resources", retain_all_resources)
if retain_resources is not None:
pulumi.set(__self__, "retain_resources", retain_resources)
if stack_name is not None:
pulumi.set(__self__, "stack_name", stack_name)
if stack_policy_body is not None:
pulumi.set(__self__, "stack_policy_body", stack_policy_body)
if stack_policy_during_update_body is not None:
pulumi.set(__self__, "stack_policy_during_update_body", stack_policy_during_update_body)
if stack_policy_during_update_url is not None:
pulumi.set(__self__, "stack_policy_during_update_url", stack_policy_during_update_url)
if stack_policy_url is not None:
pulumi.set(__self__, "stack_policy_url", stack_policy_url)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if template_body is not None:
pulumi.set(__self__, "template_body", template_body)
if template_url is not None:
pulumi.set(__self__, "template_url", template_url)
if template_version is not None:
pulumi.set(__self__, "template_version", template_version)
if timeout_in_minutes is not None:
pulumi.set(__self__, "timeout_in_minutes", timeout_in_minutes)
if use_previous_parameters is not None:
pulumi.set(__self__, "use_previous_parameters", use_previous_parameters)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to delete the stack after it is created.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="deletionProtection")
def deletion_protection(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
"""
return pulumi.get(self, "deletion_protection")
@deletion_protection.setter
def deletion_protection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deletion_protection", value)
@property
@pulumi.getter(name="disableRollback")
def disable_rollback(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to disable rollback on stack creation failure. Default to: `false`.
"""
return pulumi.get(self, "disable_rollback")
@disable_rollback.setter
def disable_rollback(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_rollback", value)
@property
@pulumi.getter(name="notificationUrls")
def notification_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
"""
return pulumi.get(self, "notification_urls")
@notification_urls.setter
def notification_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_urls", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]:
"""
The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="ramRoleName")
def ram_role_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
"""
return pulumi.get(self, "ram_role_name")
@ram_role_name.setter
def ram_role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ram_role_name", value)
@property
@pulumi.getter(name="replacementOption")
def replacement_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
"""
return pulumi.get(self, "replacement_option")
@replacement_option.setter
def replacement_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replacement_option", value)
@property
@pulumi.getter(name="retainAllResources")
def retain_all_resources(self) -> Optional[pulumi.Input[bool]]:
"""
The retain all resources.
"""
return pulumi.get(self, "retain_all_resources")
@retain_all_resources.setter
def retain_all_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "retain_all_resources", value)
@property
@pulumi.getter(name="retainResources")
def retain_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies whether to retain the resources in the stack.
"""
return pulumi.get(self, "retain_resources")
@retain_resources.setter
def retain_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "retain_resources", value)
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> Optional[pulumi.Input[str]]:
"""
The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
"""
return pulumi.get(self, "stack_name")
@stack_name.setter
def stack_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_name", value)
@property
@pulumi.getter(name="stackPolicyBody")
def stack_policy_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_body")
@stack_policy_body.setter
def stack_policy_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateBody")
def stack_policy_during_update_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_during_update_body")
@stack_policy_during_update_body.setter
def stack_policy_during_update_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateUrl")
def stack_policy_during_update_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_during_update_url")
@stack_policy_during_update_url.setter
def stack_policy_during_update_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_url", value)
@property
@pulumi.getter(name="stackPolicyUrl")
def stack_policy_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_url")
@stack_policy_url.setter
def stack_policy_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_url", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Stack.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
"""
return pulumi.get(self, "template_body")
@template_body.setter
def template_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_body", value)
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "template_url")
@template_url.setter
def template_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_url", value)
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the template.
"""
return pulumi.get(self, "template_version")
@template_version.setter
def template_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_version", value)
@property
@pulumi.getter(name="timeoutInMinutes")
def timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The timeout period that is specified for the stack creation request. Default to: `60`.
"""
return pulumi.get(self, "timeout_in_minutes")
@timeout_in_minutes.setter
def timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_in_minutes", value)
@property
@pulumi.getter(name="usePreviousParameters")
def use_previous_parameters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
return pulumi.get(self, "use_previous_parameters")
@use_previous_parameters.setter
def use_previous_parameters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_previous_parameters", value)
class Stack(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a ROS Stack resource.
For information about ROS Stack and how to use it, see [What is Stack](https://www.alibabacloud.com/help/en/doc-detail/132086.htm).
> **NOTE:** Available in v1.106.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.ros.Stack("example",
stack_name="tf-testaccstack",
stack_policy_body=\"\"\" {
"Statement": [{
"Action": "Update:Delete",
"Resource": "*",
"Effect": "Allow",
"Principal": "*"
}]
}
\"\"\",
template_body=\"\"\" {
"ROSTemplateFormatVersion": "2015-09-01"
}
\"\"\")
```
## Import
ROS Stack can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ros/stack:Stack example <stack_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StackArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a ROS Stack resource.
For information about ROS Stack and how to use it, see [What is Stack](https://www.alibabacloud.com/help/en/doc-detail/132086.htm).
> **NOTE:** Available in v1.106.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.ros.Stack("example",
stack_name="tf-testaccstack",
stack_policy_body=\"\"\" {
"Statement": [{
"Action": "Update:Delete",
"Resource": "*",
"Effect": "Allow",
"Principal": "*"
}]
}
\"\"\",
template_body=\"\"\" {
"ROSTemplateFormatVersion": "2015-09-01"
}
\"\"\")
```
## Import
ROS Stack can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ros/stack:Stack example <stack_id>
```
:param str resource_name: The name of the resource.
:param StackArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StackArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StackArgs.__new__(StackArgs)
__props__.__dict__["create_option"] = create_option
__props__.__dict__["deletion_protection"] = deletion_protection
__props__.__dict__["disable_rollback"] = disable_rollback
__props__.__dict__["notification_urls"] = notification_urls
__props__.__dict__["parameters"] = parameters
__props__.__dict__["ram_role_name"] = ram_role_name
__props__.__dict__["replacement_option"] = replacement_option
__props__.__dict__["retain_all_resources"] = retain_all_resources
__props__.__dict__["retain_resources"] = retain_resources
if stack_name is None and not opts.urn:
raise TypeError("Missing required property 'stack_name'")
__props__.__dict__["stack_name"] = stack_name
__props__.__dict__["stack_policy_body"] = stack_policy_body
__props__.__dict__["stack_policy_during_update_body"] = stack_policy_during_update_body
__props__.__dict__["stack_policy_during_update_url"] = stack_policy_during_update_url
__props__.__dict__["stack_policy_url"] = stack_policy_url
__props__.__dict__["tags"] = tags
__props__.__dict__["template_body"] = template_body
__props__.__dict__["template_url"] = template_url
__props__.__dict__["template_version"] = template_version
__props__.__dict__["timeout_in_minutes"] = timeout_in_minutes
__props__.__dict__["use_previous_parameters"] = use_previous_parameters
__props__.__dict__["status"] = None
super(Stack, __self__).__init__(
'alicloud:ros/stack:Stack',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None) -> 'Stack':
"""
Get an existing Stack resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] status: The status of Stack.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _StackState.__new__(_StackState)
__props__.__dict__["create_option"] = create_option
__props__.__dict__["deletion_protection"] = deletion_protection
__props__.__dict__["disable_rollback"] = disable_rollback
__props__.__dict__["notification_urls"] = notification_urls
__props__.__dict__["parameters"] = parameters
__props__.__dict__["ram_role_name"] = ram_role_name
__props__.__dict__["replacement_option"] = replacement_option
__props__.__dict__["retain_all_resources"] = retain_all_resources
__props__.__dict__["retain_resources"] = retain_resources
__props__.__dict__["stack_name"] = stack_name
__props__.__dict__["stack_policy_body"] = stack_policy_body
__props__.__dict__["stack_policy_during_update_body"] = stack_policy_during_update_body
__props__.__dict__["stack_policy_during_update_url"] = stack_policy_during_update_url
__props__.__dict__["stack_policy_url"] = stack_policy_url
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["template_body"] = template_body
__props__.__dict__["template_url"] = template_url
__props__.__dict__["template_version"] = template_version
__props__.__dict__["timeout_in_minutes"] = timeout_in_minutes
__props__.__dict__["use_previous_parameters"] = use_previous_parameters
return Stack(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to delete the stack after it is created.
"""
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="deletionProtection")
def deletion_protection(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
"""
return pulumi.get(self, "deletion_protection")
@property
@pulumi.getter(name="disableRollback")
def disable_rollback(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to disable rollback on stack creation failure. Default to: `false`.
"""
return pulumi.get(self, "disable_rollback")
@property
@pulumi.getter(name="notificationUrls")
def notification_urls(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
"""
return pulumi.get(self, "notification_urls")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Sequence['outputs.StackParameter']]]:
"""
The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="ramRoleName")
def ram_role_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
"""
return pulumi.get(self, "ram_role_name")
@property
@pulumi.getter(name="replacementOption")
def replacement_option(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
"""
return pulumi.get(self, "replacement_option")
@property
@pulumi.getter(name="retainAllResources")
def retain_all_resources(self) -> pulumi.Output[Optional[bool]]:
"""
The retain all resources.
"""
return pulumi.get(self, "retain_all_resources")
@property
@pulumi.getter(name="retainResources")
def retain_resources(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies whether to retain the resources in the stack.
"""
return pulumi.get(self, "retain_resources")
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> pulumi.Output[str]:
"""
The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
"""
return pulumi.get(self, "stack_name")
@property
@pulumi.getter(name="stackPolicyBody")
def stack_policy_body(self) -> pulumi.Output[Optional[str]]:
"""
The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_body")
@property
@pulumi.getter(name="stackPolicyDuringUpdateBody")
def stack_policy_during_update_body(self) -> pulumi.Output[Optional[str]]:
"""
The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_during_update_body")
@property
@pulumi.getter(name="stackPolicyDuringUpdateUrl")
def stack_policy_during_update_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_during_update_url")
@property
@pulumi.getter(name="stackPolicyUrl")
def stack_policy_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_url")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of Stack.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> pulumi.Output[Optional[str]]:
"""
The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
"""
return pulumi.get(self, "template_body")
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "template_url")
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> pulumi.Output[Optional[str]]:
"""
The version of the template.
"""
return pulumi.get(self, "template_version")
@property
@pulumi.getter(name="timeoutInMinutes")
def timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The timeout period that is specified for the stack creation request. Default to: `60`.
"""
return pulumi.get(self, "timeout_in_minutes")
@property
@pulumi.getter(name="usePreviousParameters")
def use_previous_parameters(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
return pulumi.get(self, "use_previous_parameters")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StackArgs', 'Stack']
@pulumi.input_type
class StackArgs:
def __init__(__self__, *,
stack_name: pulumi.Input[str],
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Stack resource.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
pulumi.set(__self__, "stack_name", stack_name)
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if deletion_protection is not None:
pulumi.set(__self__, "deletion_protection", deletion_protection)
if disable_rollback is not None:
pulumi.set(__self__, "disable_rollback", disable_rollback)
if notification_urls is not None:
pulumi.set(__self__, "notification_urls", notification_urls)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if ram_role_name is not None:
pulumi.set(__self__, "ram_role_name", ram_role_name)
if replacement_option is not None:
pulumi.set(__self__, "replacement_option", replacement_option)
if retain_all_resources is not None:
pulumi.set(__self__, "retain_all_resources", retain_all_resources)
if retain_resources is not None:
pulumi.set(__self__, "retain_resources", retain_resources)
if stack_policy_body is not None:
pulumi.set(__self__, "stack_policy_body", stack_policy_body)
if stack_policy_during_update_body is not None:
pulumi.set(__self__, "stack_policy_during_update_body", stack_policy_during_update_body)
if stack_policy_during_update_url is not None:
pulumi.set(__self__, "stack_policy_during_update_url", stack_policy_during_update_url)
if stack_policy_url is not None:
pulumi.set(__self__, "stack_policy_url", stack_policy_url)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if template_body is not None:
pulumi.set(__self__, "template_body", template_body)
if template_url is not None:
pulumi.set(__self__, "template_url", template_url)
if template_version is not None:
pulumi.set(__self__, "template_version", template_version)
if timeout_in_minutes is not None:
pulumi.set(__self__, "timeout_in_minutes", timeout_in_minutes)
if use_previous_parameters is not None:
pulumi.set(__self__, "use_previous_parameters", use_previous_parameters)
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> pulumi.Input[str]:
"""
The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
"""
return pulumi.get(self, "stack_name")
@stack_name.setter
def stack_name(self, value: pulumi.Input[str]):
pulumi.set(self, "stack_name", value)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to delete the stack after it is created.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="deletionProtection")
def deletion_protection(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
"""
return pulumi.get(self, "deletion_protection")
@deletion_protection.setter
def deletion_protection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deletion_protection", value)
@property
@pulumi.getter(name="disableRollback")
def disable_rollback(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to disable rollback on stack creation failure. Default to: `false`.
"""
return pulumi.get(self, "disable_rollback")
@disable_rollback.setter
def disable_rollback(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_rollback", value)
@property
@pulumi.getter(name="notificationUrls")
def notification_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
"""
return pulumi.get(self, "notification_urls")
@notification_urls.setter
def notification_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_urls", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]:
"""
The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="ramRoleName")
def ram_role_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
"""
return pulumi.get(self, "ram_role_name")
@ram_role_name.setter
def ram_role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ram_role_name", value)
@property
@pulumi.getter(name="replacementOption")
def replacement_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
"""
return pulumi.get(self, "replacement_option")
@replacement_option.setter
def replacement_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replacement_option", value)
@property
@pulumi.getter(name="retainAllResources")
def retain_all_resources(self) -> Optional[pulumi.Input[bool]]:
"""
The retain all resources.
"""
return pulumi.get(self, "retain_all_resources")
@retain_all_resources.setter
def retain_all_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "retain_all_resources", value)
@property
@pulumi.getter(name="retainResources")
def retain_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies whether to retain the resources in the stack.
"""
return pulumi.get(self, "retain_resources")
@retain_resources.setter
def retain_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "retain_resources", value)
@property
@pulumi.getter(name="stackPolicyBody")
def stack_policy_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_body")
@stack_policy_body.setter
def stack_policy_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateBody")
def stack_policy_during_update_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_during_update_body")
@stack_policy_during_update_body.setter
def stack_policy_during_update_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateUrl")
def stack_policy_during_update_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_during_update_url")
@stack_policy_during_update_url.setter
def stack_policy_during_update_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_url", value)
@property
@pulumi.getter(name="stackPolicyUrl")
def stack_policy_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_url")
@stack_policy_url.setter
def stack_policy_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_url", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
"""
return pulumi.get(self, "template_body")
@template_body.setter
def template_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_body", value)
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "template_url")
@template_url.setter
def template_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_url", value)
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the template.
"""
return pulumi.get(self, "template_version")
@template_version.setter
def template_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_version", value)
@property
@pulumi.getter(name="timeoutInMinutes")
def timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The timeout period that is specified for the stack creation request. Default to: `60`.
"""
return pulumi.get(self, "timeout_in_minutes")
@timeout_in_minutes.setter
def timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_in_minutes", value)
@property
@pulumi.getter(name="usePreviousParameters")
def use_previous_parameters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
return pulumi.get(self, "use_previous_parameters")
@use_previous_parameters.setter
def use_previous_parameters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_previous_parameters", value)
@pulumi.input_type
class _StackState:
def __init__(__self__, *,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Stack resources.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] status: The status of Stack.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if deletion_protection is not None:
pulumi.set(__self__, "deletion_protection", deletion_protection)
if disable_rollback is not None:
pulumi.set(__self__, "disable_rollback", disable_rollback)
if notification_urls is not None:
pulumi.set(__self__, "notification_urls", notification_urls)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if ram_role_name is not None:
pulumi.set(__self__, "ram_role_name", ram_role_name)
if replacement_option is not None:
pulumi.set(__self__, "replacement_option", replacement_option)
if retain_all_resources is not None:
pulumi.set(__self__, "retain_all_resources", retain_all_resources)
if retain_resources is not None:
pulumi.set(__self__, "retain_resources", retain_resources)
if stack_name is not None:
pulumi.set(__self__, "stack_name", stack_name)
if stack_policy_body is not None:
pulumi.set(__self__, "stack_policy_body", stack_policy_body)
if stack_policy_during_update_body is not None:
pulumi.set(__self__, "stack_policy_during_update_body", stack_policy_during_update_body)
if stack_policy_during_update_url is not None:
pulumi.set(__self__, "stack_policy_during_update_url", stack_policy_during_update_url)
if stack_policy_url is not None:
pulumi.set(__self__, "stack_policy_url", stack_policy_url)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if template_body is not None:
pulumi.set(__self__, "template_body", template_body)
if template_url is not None:
pulumi.set(__self__, "template_url", template_url)
if template_version is not None:
pulumi.set(__self__, "template_version", template_version)
if timeout_in_minutes is not None:
pulumi.set(__self__, "timeout_in_minutes", timeout_in_minutes)
if use_previous_parameters is not None:
pulumi.set(__self__, "use_previous_parameters", use_previous_parameters)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to delete the stack after it is created.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="deletionProtection")
def deletion_protection(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
"""
return pulumi.get(self, "deletion_protection")
@deletion_protection.setter
def deletion_protection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deletion_protection", value)
@property
@pulumi.getter(name="disableRollback")
def disable_rollback(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to disable rollback on stack creation failure. Default to: `false`.
"""
return pulumi.get(self, "disable_rollback")
@disable_rollback.setter
def disable_rollback(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_rollback", value)
@property
@pulumi.getter(name="notificationUrls")
def notification_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
"""
return pulumi.get(self, "notification_urls")
@notification_urls.setter
def notification_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_urls", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]:
"""
The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="ramRoleName")
def ram_role_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
"""
return pulumi.get(self, "ram_role_name")
@ram_role_name.setter
def ram_role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ram_role_name", value)
@property
@pulumi.getter(name="replacementOption")
def replacement_option(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
"""
return pulumi.get(self, "replacement_option")
@replacement_option.setter
def replacement_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replacement_option", value)
@property
@pulumi.getter(name="retainAllResources")
def retain_all_resources(self) -> Optional[pulumi.Input[bool]]:
"""
The retain all resources.
"""
return pulumi.get(self, "retain_all_resources")
@retain_all_resources.setter
def retain_all_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "retain_all_resources", value)
@property
@pulumi.getter(name="retainResources")
def retain_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies whether to retain the resources in the stack.
"""
return pulumi.get(self, "retain_resources")
@retain_resources.setter
def retain_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "retain_resources", value)
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> Optional[pulumi.Input[str]]:
"""
The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
"""
return pulumi.get(self, "stack_name")
@stack_name.setter
def stack_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_name", value)
@property
@pulumi.getter(name="stackPolicyBody")
def stack_policy_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_body")
@stack_policy_body.setter
def stack_policy_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateBody")
def stack_policy_during_update_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_during_update_body")
@stack_policy_during_update_body.setter
def stack_policy_during_update_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_body", value)
@property
@pulumi.getter(name="stackPolicyDuringUpdateUrl")
def stack_policy_during_update_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_during_update_url")
@stack_policy_during_update_url.setter
def stack_policy_during_update_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_during_update_url", value)
@property
@pulumi.getter(name="stackPolicyUrl")
def stack_policy_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_url")
@stack_policy_url.setter
def stack_policy_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stack_policy_url", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Stack.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> Optional[pulumi.Input[str]]:
"""
The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
"""
return pulumi.get(self, "template_body")
@template_body.setter
def template_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_body", value)
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "template_url")
@template_url.setter
def template_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_url", value)
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the template.
"""
return pulumi.get(self, "template_version")
@template_version.setter
def template_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_version", value)
@property
@pulumi.getter(name="timeoutInMinutes")
def timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The timeout period that is specified for the stack creation request. Default to: `60`.
"""
return pulumi.get(self, "timeout_in_minutes")
@timeout_in_minutes.setter
def timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_in_minutes", value)
@property
@pulumi.getter(name="usePreviousParameters")
def use_previous_parameters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
return pulumi.get(self, "use_previous_parameters")
@use_previous_parameters.setter
def use_previous_parameters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_previous_parameters", value)
class Stack(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a ROS Stack resource.
For information about ROS Stack and how to use it, see [What is Stack](https://www.alibabacloud.com/help/en/doc-detail/132086.htm).
> **NOTE:** Available in v1.106.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.ros.Stack("example",
stack_name="tf-testaccstack",
stack_policy_body=\"\"\" {
"Statement": [{
"Action": "Update:Delete",
"Resource": "*",
"Effect": "Allow",
"Principal": "*"
}]
}
\"\"\",
template_body=\"\"\" {
"ROSTemplateFormatVersion": "2015-09-01"
}
\"\"\")
```
## Import
ROS Stack can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ros/stack:Stack example <stack_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StackArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a ROS Stack resource.
For information about ROS Stack and how to use it, see [What is Stack](https://www.alibabacloud.com/help/en/doc-detail/132086.htm).
> **NOTE:** Available in v1.106.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.ros.Stack("example",
stack_name="tf-testaccstack",
stack_policy_body=\"\"\" {
"Statement": [{
"Action": "Update:Delete",
"Resource": "*",
"Effect": "Allow",
"Principal": "*"
}]
}
\"\"\",
template_body=\"\"\" {
"ROSTemplateFormatVersion": "2015-09-01"
}
\"\"\")
```
## Import
ROS Stack can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ros/stack:Stack example <stack_id>
```
:param str resource_name: The name of the resource.
:param StackArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StackArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StackArgs.__new__(StackArgs)
__props__.__dict__["create_option"] = create_option
__props__.__dict__["deletion_protection"] = deletion_protection
__props__.__dict__["disable_rollback"] = disable_rollback
__props__.__dict__["notification_urls"] = notification_urls
__props__.__dict__["parameters"] = parameters
__props__.__dict__["ram_role_name"] = ram_role_name
__props__.__dict__["replacement_option"] = replacement_option
__props__.__dict__["retain_all_resources"] = retain_all_resources
__props__.__dict__["retain_resources"] = retain_resources
if stack_name is None and not opts.urn:
raise TypeError("Missing required property 'stack_name'")
__props__.__dict__["stack_name"] = stack_name
__props__.__dict__["stack_policy_body"] = stack_policy_body
__props__.__dict__["stack_policy_during_update_body"] = stack_policy_during_update_body
__props__.__dict__["stack_policy_during_update_url"] = stack_policy_during_update_url
__props__.__dict__["stack_policy_url"] = stack_policy_url
__props__.__dict__["tags"] = tags
__props__.__dict__["template_body"] = template_body
__props__.__dict__["template_url"] = template_url
__props__.__dict__["template_version"] = template_version
__props__.__dict__["timeout_in_minutes"] = timeout_in_minutes
__props__.__dict__["use_previous_parameters"] = use_previous_parameters
__props__.__dict__["status"] = None
super(Stack, __self__).__init__(
'alicloud:ros/stack:Stack',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[str]] = None,
disable_rollback: Optional[pulumi.Input[bool]] = None,
notification_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]]] = None,
ram_role_name: Optional[pulumi.Input[str]] = None,
replacement_option: Optional[pulumi.Input[str]] = None,
retain_all_resources: Optional[pulumi.Input[bool]] = None,
retain_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stack_name: Optional[pulumi.Input[str]] = None,
stack_policy_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_body: Optional[pulumi.Input[str]] = None,
stack_policy_during_update_url: Optional[pulumi.Input[str]] = None,
stack_policy_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
timeout_in_minutes: Optional[pulumi.Input[int]] = None,
use_previous_parameters: Optional[pulumi.Input[bool]] = None) -> 'Stack':
"""
Get an existing Stack resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created.
:param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
:param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
:param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
:param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
:param pulumi.Input[bool] retain_all_resources: The retain all resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack.
:param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
:param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
:param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] status: The status of Stack.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
:param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
:param pulumi.Input[str] template_version: The version of the template.
:param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`.
:param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _StackState.__new__(_StackState)
__props__.__dict__["create_option"] = create_option
__props__.__dict__["deletion_protection"] = deletion_protection
__props__.__dict__["disable_rollback"] = disable_rollback
__props__.__dict__["notification_urls"] = notification_urls
__props__.__dict__["parameters"] = parameters
__props__.__dict__["ram_role_name"] = ram_role_name
__props__.__dict__["replacement_option"] = replacement_option
__props__.__dict__["retain_all_resources"] = retain_all_resources
__props__.__dict__["retain_resources"] = retain_resources
__props__.__dict__["stack_name"] = stack_name
__props__.__dict__["stack_policy_body"] = stack_policy_body
__props__.__dict__["stack_policy_during_update_body"] = stack_policy_during_update_body
__props__.__dict__["stack_policy_during_update_url"] = stack_policy_during_update_url
__props__.__dict__["stack_policy_url"] = stack_policy_url
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["template_body"] = template_body
__props__.__dict__["template_url"] = template_url
__props__.__dict__["template_version"] = template_version
__props__.__dict__["timeout_in_minutes"] = timeout_in_minutes
__props__.__dict__["use_previous_parameters"] = use_previous_parameters
return Stack(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to delete the stack after it is created.
"""
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="deletionProtection")
def deletion_protection(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled`
"""
return pulumi.get(self, "deletion_protection")
@property
@pulumi.getter(name="disableRollback")
def disable_rollback(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to disable rollback on stack creation failure. Default to: `false`.
"""
return pulumi.get(self, "disable_rollback")
@property
@pulumi.getter(name="notificationUrls")
def notification_urls(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5.
"""
return pulumi.get(self, "notification_urls")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Sequence['outputs.StackParameter']]]:
"""
The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="ramRoleName")
def ram_role_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role.
"""
return pulumi.get(self, "ram_role_name")
@property
@pulumi.getter(name="replacementOption")
def replacement_option(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled.
"""
return pulumi.get(self, "replacement_option")
@property
@pulumi.getter(name="retainAllResources")
def retain_all_resources(self) -> pulumi.Output[Optional[bool]]:
"""
The retain all resources.
"""
return pulumi.get(self, "retain_all_resources")
@property
@pulumi.getter(name="retainResources")
def retain_resources(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies whether to retain the resources in the stack.
"""
return pulumi.get(self, "retain_resources")
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> pulumi.Output[str]:
"""
The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter.
"""
return pulumi.get(self, "stack_name")
@property
@pulumi.getter(name="stackPolicyBody")
def stack_policy_body(self) -> pulumi.Output[Optional[str]]:
"""
The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_body")
@property
@pulumi.getter(name="stackPolicyDuringUpdateBody")
def stack_policy_during_update_body(self) -> pulumi.Output[Optional[str]]:
"""
The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length.
"""
return pulumi.get(self, "stack_policy_during_update_body")
@property
@pulumi.getter(name="stackPolicyDuringUpdateUrl")
def stack_policy_during_update_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_during_update_url")
@property
@pulumi.getter(name="stackPolicyUrl")
def stack_policy_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "stack_policy_url")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of Stack.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> pulumi.Output[Optional[str]]:
"""
The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs.
"""
return pulumi.get(self, "template_body")
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default.
"""
return pulumi.get(self, "template_url")
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> pulumi.Output[Optional[str]]:
"""
The version of the template.
"""
return pulumi.get(self, "template_version")
@property
@pulumi.getter(name="timeoutInMinutes")
def timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The timeout period that is specified for the stack creation request. Default to: `60`.
"""
return pulumi.get(self, "timeout_in_minutes")
@property
@pulumi.getter(name="usePreviousParameters")
def use_previous_parameters(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request.
"""
return pulumi.get(self, "use_previous_parameters")
| en | 0.713825 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Stack resource. :param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. :param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created. :param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` :param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. :param pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. :param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. :param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. :param pulumi.Input[bool] retain_all_resources: The retain all resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack. :param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. :param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. :param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] template_version: The version of the template. :param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`. :param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. Specifies whether to delete the stack after it is created. Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` Specifies whether to disable rollback on stack creation failure. Default to: `false`. The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. The retain all resources. Specifies whether to retain the resources in the stack. The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. A mapping of tags to assign to the resource. The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The version of the template. The timeout period that is specified for the stack creation request. Default to: `60`. Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. Input properties used for looking up and filtering Stack resources. :param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created. :param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` :param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. :param pulumi.Input[Sequence[pulumi.Input['StackParameterArgs']]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. :param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. :param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. :param pulumi.Input[bool] retain_all_resources: The retain all resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack. :param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. :param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] status: The status of Stack. :param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. :param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. :param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] template_version: The version of the template. :param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`. :param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. Specifies whether to delete the stack after it is created. Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` Specifies whether to disable rollback on stack creation failure. Default to: `false`. The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. The retain all resources. Specifies whether to retain the resources in the stack. The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The status of Stack. A mapping of tags to assign to the resource. The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The version of the template. The timeout period that is specified for the stack creation request. Default to: `60`. Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. Provides a ROS Stack resource. For information about ROS Stack and how to use it, see [What is Stack](https://www.alibabacloud.com/help/en/doc-detail/132086.htm). > **NOTE:** Available in v1.106.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud example = alicloud.ros.Stack("example", stack_name="tf-testaccstack", stack_policy_body=\"\"\" { "Statement": [{ "Action": "Update:Delete", "Resource": "*", "Effect": "Allow", "Principal": "*" }] } \"\"\", template_body=\"\"\" { "ROSTemplateFormatVersion": "2015-09-01" } \"\"\") ``` ## Import ROS Stack can be imported using the id, e.g. ```sh $ pulumi import alicloud:ros/stack:Stack example <stack_id> ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created. :param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` :param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. :param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. :param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. :param pulumi.Input[bool] retain_all_resources: The retain all resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack. :param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. :param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. :param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. :param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] template_version: The version of the template. :param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`. :param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. Provides a ROS Stack resource. For information about ROS Stack and how to use it, see [What is Stack](https://www.alibabacloud.com/help/en/doc-detail/132086.htm). > **NOTE:** Available in v1.106.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud example = alicloud.ros.Stack("example", stack_name="tf-testaccstack", stack_policy_body=\"\"\" { "Statement": [{ "Action": "Update:Delete", "Resource": "*", "Effect": "Allow", "Principal": "*" }] } \"\"\", template_body=\"\"\" { "ROSTemplateFormatVersion": "2015-09-01" } \"\"\") ``` ## Import ROS Stack can be imported using the id, e.g. ```sh $ pulumi import alicloud:ros/stack:Stack example <stack_id> ``` :param str resource_name: The name of the resource. :param StackArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Stack resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] create_option: Specifies whether to delete the stack after it is created. :param pulumi.Input[str] deletion_protection: Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` :param pulumi.Input[bool] disable_rollback: Specifies whether to disable rollback on stack creation failure. Default to: `false`. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_urls: The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StackParameterArgs']]]] parameters: The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. :param pulumi.Input[str] ram_role_name: The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. :param pulumi.Input[str] replacement_option: Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. :param pulumi.Input[bool] retain_all_resources: The retain all resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] retain_resources: Specifies whether to retain the resources in the stack. :param pulumi.Input[str] stack_name: The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. :param pulumi.Input[str] stack_policy_body: The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_body: The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. :param pulumi.Input[str] stack_policy_during_update_url: The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] stack_policy_url: The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] status: The status of Stack. :param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. :param pulumi.Input[str] template_body: The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. :param pulumi.Input[str] template_url: The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. :param pulumi.Input[str] template_version: The version of the template. :param pulumi.Input[int] timeout_in_minutes: The timeout period that is specified for the stack creation request. Default to: `60`. :param pulumi.Input[bool] use_previous_parameters: Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. Specifies whether to delete the stack after it is created. Specifies whether to enable deletion protection on the stack. Valid values: `Disabled`, `Enabled`. Default to: `Disabled` Specifies whether to disable rollback on stack creation failure. Default to: `false`. The callback URL for receiving stack event N. Only HTTP POST is supported. Maximum value of N: 5. The parameters. If the parameter name and value are not specified, ROS will use the default value specified in the template. The name of the RAM role. ROS assumes the specified RAM role to create the stack and call API operations by using the credentials of the role. Specifies whether to enable replacement update after a resource attribute that does not support modification update is changed. Modification update keeps the physical ID of the resource unchanged. However, the resource is deleted and then recreated, and its physical ID is changed if replacement update is enabled. The retain all resources. Specifies whether to retain the resources in the stack. The name can be up to 255 characters in length and can contain digits, letters, hyphens (-), and underscores (_). It must start with a digit or letter. The structure that contains the stack policy body. The stack policy body must be 1 to 16,384 bytes in length. The structure that contains the body of the temporary overriding stack policy. The stack policy body must be 1 to 16,384 bytes in length. The URL of the file that contains the temporary overriding stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The URL of the file that contains the stack policy. The URL must point to a policy located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/stack-policy/demo and oss://ros/stack-policy/demo?RegionId=cn-hangzhou. The policy can be up to 16,384 bytes in length and the URL can be up to 1,350 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The status of Stack. A mapping of tags to assign to the resource. The structure that contains the template body. The template body must be 1 to 524,288 bytes in length. If the length of the template body is longer than required, we recommend that you add parameters to the HTTP POST request body to avoid request failures due to excessive length of URLs. The URL of the file that contains the template body. The URL must point to a template located in an HTTP or HTTPS web server or an Alibaba Cloud OSS bucket. Examples: oss://ros/template/demo and oss://ros/template/demo?RegionId=cn-hangzhou. The template must be 1 to 524,288 bytes in length. If the region of the OSS bucket is not specified, the RegionId value is used by default. The version of the template. The timeout period that is specified for the stack creation request. Default to: `60`. Specifies whether to use the values that were passed last time for the parameters that you do not specify in the current request. | 1.534027 | 2 |
dlp_mpi/util.py | boeddeker/dlp_mpi | 6 | 6612908 | <reponame>boeddeker/dlp_mpi<filename>dlp_mpi/util.py
import os
import logging
import contextlib
LOG = logging.getLogger('dlp_mpi')
@contextlib.contextmanager
def progress_bar(
sequence,
display_progress_bar,
):
try:
length = len(sequence)
except TypeError:
length = None
if display_progress_bar:
try:
from tqdm import tqdm
except ImportError:
LOG.warning('Can not import tqdm. Disable the progress bar.')
else:
# Smoothing has problems with a huge amount of workers (e.g. 200)
with tqdm(
total=length,
# disable=not display_progress_bar,
mininterval=2,
smoothing=None,
) as pbar:
yield pbar
else:
class DummyPBar:
def set_description(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
yield DummyPBar()
def ensure_single_thread_numeric():
"""
When you parallelize your input pipeline you often want each worker to work
on a single thread.
These variables are all candidates to be set to 1, but the ones checked in
this function are mandatory as far as we know.
GOMP_NUM_THREADS
OMP_NUM_THREADS
OPENBLAS_NUM_THREADS
MKL_NUM_THREADS
VECLIB_MAXIMUM_THREADS
NUMEXPR_NUM_THREADS
"""
candidates = [
'OMP_NUM_THREADS',
'MKL_NUM_THREADS',
]
for key in candidates:
if not os.environ.get(key) == '1':
raise EnvironmentError(
'Make sure to set the following environment variables to '
'ensure that each worker works on a single thread:\n'
'export OMP_NUM_THREADS=1\n'
'export MKL_NUM_THREADS=1\n\n'
f'But you use: {key}={os.environ.get(key)}'
)
| import os
import logging
import contextlib
LOG = logging.getLogger('dlp_mpi')
@contextlib.contextmanager
def progress_bar(
sequence,
display_progress_bar,
):
try:
length = len(sequence)
except TypeError:
length = None
if display_progress_bar:
try:
from tqdm import tqdm
except ImportError:
LOG.warning('Can not import tqdm. Disable the progress bar.')
else:
# Smoothing has problems with a huge amount of workers (e.g. 200)
with tqdm(
total=length,
# disable=not display_progress_bar,
mininterval=2,
smoothing=None,
) as pbar:
yield pbar
else:
class DummyPBar:
def set_description(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
yield DummyPBar()
def ensure_single_thread_numeric():
"""
When you parallelize your input pipeline you often want each worker to work
on a single thread.
These variables are all candidates to be set to 1, but the ones checked in
this function are mandatory as far as we know.
GOMP_NUM_THREADS
OMP_NUM_THREADS
OPENBLAS_NUM_THREADS
MKL_NUM_THREADS
VECLIB_MAXIMUM_THREADS
NUMEXPR_NUM_THREADS
"""
candidates = [
'OMP_NUM_THREADS',
'MKL_NUM_THREADS',
]
for key in candidates:
if not os.environ.get(key) == '1':
raise EnvironmentError(
'Make sure to set the following environment variables to '
'ensure that each worker works on a single thread:\n'
'export OMP_NUM_THREADS=1\n'
'export MKL_NUM_THREADS=1\n\n'
f'But you use: {key}={os.environ.get(key)}'
) | en | 0.851933 | # Smoothing has problems with a huge amount of workers (e.g. 200) # disable=not display_progress_bar, When you parallelize your input pipeline you often want each worker to work on a single thread. These variables are all candidates to be set to 1, but the ones checked in this function are mandatory as far as we know. GOMP_NUM_THREADS OMP_NUM_THREADS OPENBLAS_NUM_THREADS MKL_NUM_THREADS VECLIB_MAXIMUM_THREADS NUMEXPR_NUM_THREADS | 2.58408 | 3 |
htdocs/plotting/auto/scripts100/p167.py | jamayfieldjr/iem | 1 | 6612909 | """Flight category by hour"""
import datetime
import numpy as np
import pytz
from pandas.io.sql import read_sql
import matplotlib.colors as mpcolors
from matplotlib.patches import Rectangle
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn, utc
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This chart summarizes Flight Category by hour
and day of a given month. In the case of multiple observations for a
given hour, the worst category is plotted.
<table class="table table-condensed table-bordered">
<thead><tr><th>code</th><th>Label</th><th>Description</th></tr></thead>
<tbody>
<tr><td>Unknown</td><td>Unknown</td><td>No report or missing visibility for
that hour</td></tr>
<tr><td>VFR</td><td>Visual Flight Rules</td><td>
Ceiling >3000' AGL and visibility >5 statutes miles (green)</td></tr>
<tr><td>MVFR</td><td>Marginal Visual Flight Rules</td><td>
1000-3000' ceilings and/or 3-5 statute miles, inclusive (blue)</td></tr>
<tr><td>IFR</td><td>Instrument Fight Rules</td><td>
500 - <1000' ceilings and/or 1 to <3 statute miles (red)</td></tr>
<tr><td>LIFR</td><td>Low Instrument Flight Rules</td><td>
< 500' AGL ceilings and/or < 1 mile (magenta)</td></tr>
</tbody>
</table>
</tbody>
</table>
"""
today = datetime.date.today()
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
label='Select Station:', network='IA_ASOS'),
dict(type='month', name='month', label='Select Month:',
default=today.month),
dict(type='year', name='year', label='Select Year:',
default=today.year, min=1970),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('asos')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['zstation']
year = ctx['year']
month = ctx['month']
tzname = ctx['_nt'].sts[station]['tzname']
tzinfo = pytz.timezone(tzname)
# Figure out the 1rst and last of this month in the local time zone
sts = utc(year, month, 3)
sts = sts.astimezone(tzinfo).replace(day=1, hour=0, minute=0)
ets = (sts + datetime.timedelta(days=35)).replace(day=1)
days = (ets-sts).days
data = np.zeros((24, days))
df = read_sql("""
SELECT valid at time zone %s as ts,
skyc1, skyc2, skyc3, skyc4, skyl1, skyl2, skyl3, skyl4,
vsby
from alldata where station = %s and valid BETWEEN %s and %s
and vsby is not null and report_type = 2
ORDER by valid ASC
""", pgconn, params=(tzname, station, sts, ets), index_col=None)
if df.empty:
raise NoDataFound("No database entries found for station, sorry!")
# 0 Unknown
# 1 VFR: Ceiling >3000' AGL and visibility >5 statutes miles (green)
# 2 MVFR: 1000-3000' and/or 3-5 statute miles, inclusive (blue)
# 3 IFR: 500 - <1000' and/or 1 to <3 statute miles (red)
# 4 LIFR: < 500' AGL and/or < 1 mile (magenta)
lookup = {4: 'LIFR', 3: 'IFR', 2: 'MVFR', 1: 'VFR', 0: 'UNKNOWN'}
conds = []
for _, row in df.iterrows():
x = row['ts'].day - 1
y = row['ts'].hour
val = 1
level = 100000 # arb high number
coverages = [row['skyc1'], row['skyc2'], row['skyc3'], row['skyc4']]
if 'OVC' in coverages:
idx = coverages.index('OVC')
level = [row['skyl1'], row['skyl2'], row['skyl3'], row['skyl4']
][idx]
if level < 500 or row['vsby'] < 1:
val = 4
elif (level < 1000 and level >= 500) or row['vsby'] < 3:
val = 3
elif (level < 3000 and level >= 1000) or row['vsby'] < 5:
val = 2
elif level >= 3000 and row['vsby'] >= 5:
val = 1
else:
val = 0
data[y, x] = max(data[y, x], val)
conds.append(lookup[val])
# print row['ts'], y, x, val, data[y, x], level, row['vsby']
df['flstatus'] = conds
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
ax.set_facecolor('skyblue')
ax.set_title(('[%s] %s %s Flight Category\n'
'based on Hourly METAR Cloud Amount/Level'
' and Visibility Reports'
) % (station, ctx['_nt'].sts[station]['name'],
sts.strftime("%b %Y")))
colors = ['#EEEEEE', 'green', 'blue', 'red', 'magenta']
cmap = mpcolors.ListedColormap(colors)
norm = mpcolors.BoundaryNorm(boundaries=range(6), ncolors=5)
ax.imshow(np.flipud(data), aspect='auto', extent=[0.5, days + 0.5, -0.5,
23.5],
cmap=cmap, interpolation='nearest', norm=norm)
ax.set_yticks(range(0, 24, 3))
ax.set_yticklabels(['Mid', '3 AM', '6 AM', '9 AM', 'Noon',
'3 PM', '6 PM', '9 PM'])
ax.set_xticks(range(1, days+1))
ax.set_ylabel("Hour of Local Day (%s)" % (tzname, ))
ax.set_xlabel("Day of %s" % (sts.strftime("%b %Y"),))
rects = []
for color in colors:
rects.append(Rectangle((0, 0), 1, 1, fc=color))
ax.grid(True)
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
ax.legend(rects, ['Unknown', 'VFR', 'MVFR', "IFR", "LIFR"],
loc='upper center', fontsize=14,
bbox_to_anchor=(0.5, -0.09), fancybox=True, shadow=True, ncol=5)
return fig, df
if __name__ == '__main__':
plotter(dict(station='DSM', year=2009, month=1, network='IA_ASOS'))
| """Flight category by hour"""
import datetime
import numpy as np
import pytz
from pandas.io.sql import read_sql
import matplotlib.colors as mpcolors
from matplotlib.patches import Rectangle
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn, utc
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This chart summarizes Flight Category by hour
and day of a given month. In the case of multiple observations for a
given hour, the worst category is plotted.
<table class="table table-condensed table-bordered">
<thead><tr><th>code</th><th>Label</th><th>Description</th></tr></thead>
<tbody>
<tr><td>Unknown</td><td>Unknown</td><td>No report or missing visibility for
that hour</td></tr>
<tr><td>VFR</td><td>Visual Flight Rules</td><td>
Ceiling >3000' AGL and visibility >5 statutes miles (green)</td></tr>
<tr><td>MVFR</td><td>Marginal Visual Flight Rules</td><td>
1000-3000' ceilings and/or 3-5 statute miles, inclusive (blue)</td></tr>
<tr><td>IFR</td><td>Instrument Fight Rules</td><td>
500 - <1000' ceilings and/or 1 to <3 statute miles (red)</td></tr>
<tr><td>LIFR</td><td>Low Instrument Flight Rules</td><td>
< 500' AGL ceilings and/or < 1 mile (magenta)</td></tr>
</tbody>
</table>
</tbody>
</table>
"""
today = datetime.date.today()
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
label='Select Station:', network='IA_ASOS'),
dict(type='month', name='month', label='Select Month:',
default=today.month),
dict(type='year', name='year', label='Select Year:',
default=today.year, min=1970),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('asos')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['zstation']
year = ctx['year']
month = ctx['month']
tzname = ctx['_nt'].sts[station]['tzname']
tzinfo = pytz.timezone(tzname)
# Figure out the 1rst and last of this month in the local time zone
sts = utc(year, month, 3)
sts = sts.astimezone(tzinfo).replace(day=1, hour=0, minute=0)
ets = (sts + datetime.timedelta(days=35)).replace(day=1)
days = (ets-sts).days
data = np.zeros((24, days))
df = read_sql("""
SELECT valid at time zone %s as ts,
skyc1, skyc2, skyc3, skyc4, skyl1, skyl2, skyl3, skyl4,
vsby
from alldata where station = %s and valid BETWEEN %s and %s
and vsby is not null and report_type = 2
ORDER by valid ASC
""", pgconn, params=(tzname, station, sts, ets), index_col=None)
if df.empty:
raise NoDataFound("No database entries found for station, sorry!")
# 0 Unknown
# 1 VFR: Ceiling >3000' AGL and visibility >5 statutes miles (green)
# 2 MVFR: 1000-3000' and/or 3-5 statute miles, inclusive (blue)
# 3 IFR: 500 - <1000' and/or 1 to <3 statute miles (red)
# 4 LIFR: < 500' AGL and/or < 1 mile (magenta)
lookup = {4: 'LIFR', 3: 'IFR', 2: 'MVFR', 1: 'VFR', 0: 'UNKNOWN'}
conds = []
for _, row in df.iterrows():
x = row['ts'].day - 1
y = row['ts'].hour
val = 1
level = 100000 # arb high number
coverages = [row['skyc1'], row['skyc2'], row['skyc3'], row['skyc4']]
if 'OVC' in coverages:
idx = coverages.index('OVC')
level = [row['skyl1'], row['skyl2'], row['skyl3'], row['skyl4']
][idx]
if level < 500 or row['vsby'] < 1:
val = 4
elif (level < 1000 and level >= 500) or row['vsby'] < 3:
val = 3
elif (level < 3000 and level >= 1000) or row['vsby'] < 5:
val = 2
elif level >= 3000 and row['vsby'] >= 5:
val = 1
else:
val = 0
data[y, x] = max(data[y, x], val)
conds.append(lookup[val])
# print row['ts'], y, x, val, data[y, x], level, row['vsby']
df['flstatus'] = conds
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
ax.set_facecolor('skyblue')
ax.set_title(('[%s] %s %s Flight Category\n'
'based on Hourly METAR Cloud Amount/Level'
' and Visibility Reports'
) % (station, ctx['_nt'].sts[station]['name'],
sts.strftime("%b %Y")))
colors = ['#EEEEEE', 'green', 'blue', 'red', 'magenta']
cmap = mpcolors.ListedColormap(colors)
norm = mpcolors.BoundaryNorm(boundaries=range(6), ncolors=5)
ax.imshow(np.flipud(data), aspect='auto', extent=[0.5, days + 0.5, -0.5,
23.5],
cmap=cmap, interpolation='nearest', norm=norm)
ax.set_yticks(range(0, 24, 3))
ax.set_yticklabels(['Mid', '3 AM', '6 AM', '9 AM', 'Noon',
'3 PM', '6 PM', '9 PM'])
ax.set_xticks(range(1, days+1))
ax.set_ylabel("Hour of Local Day (%s)" % (tzname, ))
ax.set_xlabel("Day of %s" % (sts.strftime("%b %Y"),))
rects = []
for color in colors:
rects.append(Rectangle((0, 0), 1, 1, fc=color))
ax.grid(True)
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
ax.legend(rects, ['Unknown', 'VFR', 'MVFR', "IFR", "LIFR"],
loc='upper center', fontsize=14,
bbox_to_anchor=(0.5, -0.09), fancybox=True, shadow=True, ncol=5)
return fig, df
if __name__ == '__main__':
plotter(dict(station='DSM', year=2009, month=1, network='IA_ASOS'))
| en | 0.64982 | Flight category by hour Return a dict describing how to call this plotter This chart summarizes Flight Category by hour and day of a given month. In the case of multiple observations for a given hour, the worst category is plotted. <table class="table table-condensed table-bordered"> <thead><tr><th>code</th><th>Label</th><th>Description</th></tr></thead> <tbody> <tr><td>Unknown</td><td>Unknown</td><td>No report or missing visibility for that hour</td></tr> <tr><td>VFR</td><td>Visual Flight Rules</td><td> Ceiling >3000' AGL and visibility >5 statutes miles (green)</td></tr> <tr><td>MVFR</td><td>Marginal Visual Flight Rules</td><td> 1000-3000' ceilings and/or 3-5 statute miles, inclusive (blue)</td></tr> <tr><td>IFR</td><td>Instrument Fight Rules</td><td> 500 - <1000' ceilings and/or 1 to <3 statute miles (red)</td></tr> <tr><td>LIFR</td><td>Low Instrument Flight Rules</td><td> < 500' AGL ceilings and/or < 1 mile (magenta)</td></tr> </tbody> </table> </tbody> </table> Go # Figure out the 1rst and last of this month in the local time zone SELECT valid at time zone %s as ts, skyc1, skyc2, skyc3, skyc4, skyl1, skyl2, skyl3, skyl4, vsby from alldata where station = %s and valid BETWEEN %s and %s and vsby is not null and report_type = 2 ORDER by valid ASC # 0 Unknown # 1 VFR: Ceiling >3000' AGL and visibility >5 statutes miles (green) # 2 MVFR: 1000-3000' and/or 3-5 statute miles, inclusive (blue) # 3 IFR: 500 - <1000' and/or 1 to <3 statute miles (red) # 4 LIFR: < 500' AGL and/or < 1 mile (magenta) # arb high number # print row['ts'], y, x, val, data[y, x], level, row['vsby'] # Shrink current axis's height by 10% on the bottom | 3.07783 | 3 |
spo_dataset/spo_generator.py | ortslil64/SPO-dataset | 0 | 6612910 | <reponame>ortslil64/SPO-dataset<filename>spo_dataset/spo_generator.py
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.transform import warp
import cv2
import time
from tqdm import tqdm
import skvideo.io
import imutils
import dlib
from imutils import face_utils
def drc(xy,c_xy,radius):
xy_output = xy
for ii in range(len(xy)):
r = np.sqrt((xy[ii,0]-c_xy[0])**2 + (xy[ii,1]-c_xy[1])**2)
if r < radius:
v = c_xy - xy[ii]
if np.linalg.norm(v) > 0:
v = v/np.linalg.norm(v)
xy_output[ii,:] = xy[ii,:] + radius*v*(np.exp(-0.0001*r))
return xy_output
def add_circle_mag(image, c_xy, radius):
warp_func = lambda xy: drc(xy,c_xy,radius)
return warp(image, warp_func)
def get_dataset_from_image(image, n = 500, n_circles = 2, radius = None, v = None, pose = None, partial = False, mask = None):
x = []
z = []
possible_speeds = [-3, -2, -1, 1, 2, 3]
if radius is None:
radius = []
for ii in range(n_circles):
radius_temp = np.random.randint(10,30, dtype=np.int8)
radius.append(radius_temp)
if v is None:
v = []
for ii in range(n_circles):
v_temp = np.random.choice(possible_speeds, size = 2)
v.append(v_temp)
if pose is None:
pose = []
for ii in range(n_circles):
pose_temp = np.empty(2)
pose_temp[0] = np.random.randint(0, image.shape[0], dtype=np.int8)
pose_temp[1] = np.random.randint(0, image.shape[1], dtype=np.int8)
pose.append(pose_temp)
for ii in tqdm(range(n)):
swirled = image.copy()
state = np.zeros_like(swirled)
for jj in range(n_circles):
pose[jj][0] = np.int32(pose[jj][0] + v[jj][0])
pose[jj][1] = np.int32(pose[jj][1] + v[jj][1])
if pose[jj][0] > image.shape[0] -1:
pose[jj][0] = image.shape[0] -1
v[jj][0] = -v[jj][0]
if pose[jj][1] > image.shape[1]-1:
pose[jj][1] = image.shape[1]-1
v[jj][1] = -v[jj][1]
if pose[jj][0] < 1:
pose[jj][0] = 1
v[jj][0] = -v[jj][0]
if pose[jj][1] < 1:
pose[jj][1] = 1
v[jj][1] = -v[jj][1]
if partial == True and ii > 0 and mask is None:
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
swirled[20:90,20:90] = 0.5
# if pose[jj][0] < 30 or pose[jj][0] > 80 or pose[jj][1] < 30 or pose[jj][1] > 80:
# swirled = add_circle_mag(swirled, pose[jj], radius[jj])
elif mask is not None:
if mask[int(pose[jj][1]),int(pose[jj][0])]<255:
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
else:
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
state = cv2.circle(state,(int(pose[jj][0]), int(pose[jj][1])),radius[jj],(255,255,255),-1)
x.append(state//255.0)
z.append(swirled)
x = np.array(x)
z = np.array(z)
return x,z
def get_dataset_from_video(images, n = 500, n_circles = 2, radius = None):
x = []
z = []
v = []
pose = []
if radius is None:
radius = []
for ii in range(n_circles):
radius_temp = np.random.randint(10,30, dtype=np.int8)
radius.append(radius_temp)
image = images[0]
possible_speeds = [-3, -2, -1, 1, 2, 3]
if len(images) < n:
n = len(images)
for ii in range(n_circles):
pose_temp = np.empty(2)
pose_temp[0] = np.random.randint(0, image.shape[0], dtype=np.int8)
pose_temp[1] = np.random.randint(0, image.shape[1], dtype=np.int8)
pose.append(pose_temp)
v_temp = np.random.choice(possible_speeds, size = 2)
v.append(v_temp)
for ii in tqdm(range(n)):
image = images[ii]
swirled = image.copy()
state = np.zeros_like(swirled)
for jj in range(n_circles):
pose[jj] = np.int32(pose[jj] + v[jj])
if pose[jj][0] > image.shape[0] -1:
pose[jj][0] = image.shape[0] -1
v[jj][0] = -v[jj][0]
if pose[jj][1] > image.shape[1]-1:
pose[jj][1] = image.shape[1]-1
v[jj][1] = -v[jj][1]
if pose[jj][0] < 1:
pose[jj][0] = 1
v[jj][0] = -v[jj][0]
if pose[jj][1] < 1:
pose[jj][1] = 1
v[jj][1] = -v[jj][1]
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
state = cv2.circle(state,(pose[jj][0], pose[jj][1]),radius[jj],(255,255,255),-1)
x.append(state//255.0)
z.append(swirled)
x = np.array(x)
z = np.array(z)
return x,z
def get_dataset_rotating_objects(n = 500,var = 255, image_shape = (128,128)):
a = [50,15]
b = [25,25]
c = [[50,20],[100,100]]
theta = [0.5, 1.2]
v = [[1,2],[-2,-1]]
omega = [0.05, -0.05]
x = []
z = []
for ii in tqdm(range(n)):
image = np.zeros(image_shape)
for jj in range(2):
pts = [[int(c[jj][0] + 0.5*a[jj]*np.cos(theta[jj]) - 0.5*b[jj]*np.sin(theta[jj])), int( c[jj][1] + 0.5*a[jj]*np.sin(theta[jj]) + 0.5*b[jj]*np.cos(theta[jj]))],
[int(c[jj][0] - 0.5*a[jj]*np.cos(theta[jj]) - 0.5*b[jj]*np.sin(theta[jj])),int( c[jj][1] - 0.5*a[jj]*np.sin(theta[jj]) + 0.5*b[jj]*np.cos(theta[jj]))],
[int(c[jj][0] - 0.5*a[jj]*np.cos(theta[jj]) + 0.5*b[jj]*np.sin(theta[jj])),int( c[jj][1] - 0.5*a[jj]*np.sin(theta[jj]) - 0.5*b[jj]*np.cos(theta[jj]))],
[int(c[jj][0] + 0.5*a[jj]*np.cos(theta[jj]) + 0.5*b[jj]*np.sin(theta[jj])),int( c[jj][1] + 0.5*a[jj]*np.sin(theta[jj]) - 0.5*b[jj]*np.cos(theta[jj]))]]
pts = np.array(pts)
pts = pts.reshape((-1, 1, 2))
color = (255)
# Line thickness of 8 px
thickness = 2
isClosed = True
image = cv2.polylines(image, [pts],
isClosed, color,
thickness)
image = cv2.fillPoly(image, [pts], 255)
c[jj][0] = c[jj][0] + v[jj][0]
c[jj][1] = c[jj][1] + v[jj][1]
if c[jj][0] > image_shape[0] or c[jj][0] < 0:
v[jj][0] = -v[jj][0]
if c[jj][1] > image_shape[1] or c[jj][1] < 0:
v[jj][1] = -v[jj][1]
theta[jj] = theta[jj] + omega[jj]
noise = np.random.normal(0, var, image_shape)
noise[noise < 0] = 0
noise[noise > 255] = 255
noisy_image = 0.5*image + noise
noisy_image[noisy_image > 255] = 255
x.append(image/255)
z.append(noisy_image/255)
return x, z
def generate_image(r = 0.1):
image = np.zeros((128,128))
for ii in range(128):
for jj in range(128):
image[ii,jj] = np.random.binomial(1,r)
return image
def generate_deterministic_image(n_x = 10, n_y = 10):
image = np.zeros((128,128))
for ii in range(128):
for jj in range(128):
if ii % n_x == 0 and jj % n_y == 0:
image[ii,jj] = 1
return image
def get_video(video_path, n_frames = None):
cap = cv2.VideoCapture(video_path)
images_gray = []
images_color = []
n_images = 0
if n_frames is None:
while(cap.isOpened()):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = gray/255.0
gray = cv2.resize(gray,(128,128))
col = cv2.resize(frame,(128,128))
col = col/255.0
images_color.append(col)
images_gray.append(gray)
else:
while(cap.isOpened() and n_images < n_frames):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = gray/255.0
gray = cv2.resize(gray,(128,128))
col = cv2.resize(frame,(128,128))
col = col/255.0
images_color.append(col)
images_gray.append(gray)
n_images += 1
cap.release()
return images_gray, images_color
def video2dataset(observation_video_path, frame_size = (256,256)):
observation_images = []
observation_cap = cv2.VideoCapture(observation_video_path)
while(observation_cap.isOpened()):
ret, frame = observation_cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = gray/255.0
gray = cv2.resize(gray,frame_size)
observation_images.append(gray)
observation_cap.release()
return np.array(observation_images)
def face_detection(video_path, frame_size = (256,256)):
name_list = ['mouth', 'left_eyebrow', 'right_eyebrow']
state_cap = cv2.VideoCapture(video_path)
state_cap = cv2.VideoCapture(video_path)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('source_video/facial_expression/shape_predictor_68_face_landmarks.dat')
state_images = []
while(state_cap.isOpened()):
ret, frame = state_cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
blank = np.zeros_like(gray)
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
if name in name_list:
for (x, y) in shape[i:j]:
cv2.circle(blank, (x, y), 1, 255, -1)
blank = blank/255.0
blank = cv2.resize(blank,frame_size)
state_images.append(blank)
return np.array(state_images)
def generate_dataset(n = 100,video_path = None, image_path = None, image_type = None, output_path = None, visualize = False, output_type = "images", output_folder = "dataset/images/dots/", partial = False, mask = None):
frames = []
if mask is not None:
mask = cv2.imread(mask,0)
mask = cv2.resize(mask, (128,128),interpolation = cv2.INTER_AREA)
if video_path is not None:
images, _ = get_video(video_path, n)
x,z = get_dataset_from_video(images, n)
elif image_path is not None:
image = cv2.imread(image_path,0)
image = cv2.resize(image, (128,128),interpolation = cv2.INTER_AREA)
x,z = get_dataset_from_image(image/255, n, radius = [15, 25], partial = partial, mask = mask)
elif image_type == "dots":
image = generate_image(0.01)
x,z = get_dataset_from_image(image, n, radius = [15, 25], partial = partial, mask = mask)
elif image_type == "checkers":
image = np.array(data.checkerboard()).astype(np.float64)
image = cv2.resize(image, (128,128),interpolation = cv2.INTER_AREA)
x,z = get_dataset_from_image(image/255, n, radius = [15, 25], partial = partial, mask = mask)
for ii in range(n):
swirled = z[ii]
state = x[ii]
if visualize:
cv2.imshow('swirled',swirled)
cv2.imshow('state',state)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
time.sleep(0.01)
frame = np.concatenate((state,swirled),axis = 1)
if output_type == "video":
frames.append(frame*255)
elif output_type == "images":
fname = output_folder+str(ii)+".jpg"
skimage.io.imsave(fname,frame*255)
if visualize:
cv2.destroyAllWindows()
if output_type == "video":
fname = output_folder+"dataset.mp4"
skvideo.io.vwrite(fname, frames)
| #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.transform import warp
import cv2
import time
from tqdm import tqdm
import skvideo.io
import imutils
import dlib
from imutils import face_utils
def drc(xy,c_xy,radius):
xy_output = xy
for ii in range(len(xy)):
r = np.sqrt((xy[ii,0]-c_xy[0])**2 + (xy[ii,1]-c_xy[1])**2)
if r < radius:
v = c_xy - xy[ii]
if np.linalg.norm(v) > 0:
v = v/np.linalg.norm(v)
xy_output[ii,:] = xy[ii,:] + radius*v*(np.exp(-0.0001*r))
return xy_output
def add_circle_mag(image, c_xy, radius):
warp_func = lambda xy: drc(xy,c_xy,radius)
return warp(image, warp_func)
def get_dataset_from_image(image, n = 500, n_circles = 2, radius = None, v = None, pose = None, partial = False, mask = None):
x = []
z = []
possible_speeds = [-3, -2, -1, 1, 2, 3]
if radius is None:
radius = []
for ii in range(n_circles):
radius_temp = np.random.randint(10,30, dtype=np.int8)
radius.append(radius_temp)
if v is None:
v = []
for ii in range(n_circles):
v_temp = np.random.choice(possible_speeds, size = 2)
v.append(v_temp)
if pose is None:
pose = []
for ii in range(n_circles):
pose_temp = np.empty(2)
pose_temp[0] = np.random.randint(0, image.shape[0], dtype=np.int8)
pose_temp[1] = np.random.randint(0, image.shape[1], dtype=np.int8)
pose.append(pose_temp)
for ii in tqdm(range(n)):
swirled = image.copy()
state = np.zeros_like(swirled)
for jj in range(n_circles):
pose[jj][0] = np.int32(pose[jj][0] + v[jj][0])
pose[jj][1] = np.int32(pose[jj][1] + v[jj][1])
if pose[jj][0] > image.shape[0] -1:
pose[jj][0] = image.shape[0] -1
v[jj][0] = -v[jj][0]
if pose[jj][1] > image.shape[1]-1:
pose[jj][1] = image.shape[1]-1
v[jj][1] = -v[jj][1]
if pose[jj][0] < 1:
pose[jj][0] = 1
v[jj][0] = -v[jj][0]
if pose[jj][1] < 1:
pose[jj][1] = 1
v[jj][1] = -v[jj][1]
if partial == True and ii > 0 and mask is None:
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
swirled[20:90,20:90] = 0.5
# if pose[jj][0] < 30 or pose[jj][0] > 80 or pose[jj][1] < 30 or pose[jj][1] > 80:
# swirled = add_circle_mag(swirled, pose[jj], radius[jj])
elif mask is not None:
if mask[int(pose[jj][1]),int(pose[jj][0])]<255:
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
else:
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
state = cv2.circle(state,(int(pose[jj][0]), int(pose[jj][1])),radius[jj],(255,255,255),-1)
x.append(state//255.0)
z.append(swirled)
x = np.array(x)
z = np.array(z)
return x,z
def get_dataset_from_video(images, n = 500, n_circles = 2, radius = None):
x = []
z = []
v = []
pose = []
if radius is None:
radius = []
for ii in range(n_circles):
radius_temp = np.random.randint(10,30, dtype=np.int8)
radius.append(radius_temp)
image = images[0]
possible_speeds = [-3, -2, -1, 1, 2, 3]
if len(images) < n:
n = len(images)
for ii in range(n_circles):
pose_temp = np.empty(2)
pose_temp[0] = np.random.randint(0, image.shape[0], dtype=np.int8)
pose_temp[1] = np.random.randint(0, image.shape[1], dtype=np.int8)
pose.append(pose_temp)
v_temp = np.random.choice(possible_speeds, size = 2)
v.append(v_temp)
for ii in tqdm(range(n)):
image = images[ii]
swirled = image.copy()
state = np.zeros_like(swirled)
for jj in range(n_circles):
pose[jj] = np.int32(pose[jj] + v[jj])
if pose[jj][0] > image.shape[0] -1:
pose[jj][0] = image.shape[0] -1
v[jj][0] = -v[jj][0]
if pose[jj][1] > image.shape[1]-1:
pose[jj][1] = image.shape[1]-1
v[jj][1] = -v[jj][1]
if pose[jj][0] < 1:
pose[jj][0] = 1
v[jj][0] = -v[jj][0]
if pose[jj][1] < 1:
pose[jj][1] = 1
v[jj][1] = -v[jj][1]
swirled = add_circle_mag(swirled, pose[jj], radius[jj])
state = cv2.circle(state,(pose[jj][0], pose[jj][1]),radius[jj],(255,255,255),-1)
x.append(state//255.0)
z.append(swirled)
x = np.array(x)
z = np.array(z)
return x,z
def get_dataset_rotating_objects(n = 500,var = 255, image_shape = (128,128)):
a = [50,15]
b = [25,25]
c = [[50,20],[100,100]]
theta = [0.5, 1.2]
v = [[1,2],[-2,-1]]
omega = [0.05, -0.05]
x = []
z = []
for ii in tqdm(range(n)):
image = np.zeros(image_shape)
for jj in range(2):
pts = [[int(c[jj][0] + 0.5*a[jj]*np.cos(theta[jj]) - 0.5*b[jj]*np.sin(theta[jj])), int( c[jj][1] + 0.5*a[jj]*np.sin(theta[jj]) + 0.5*b[jj]*np.cos(theta[jj]))],
[int(c[jj][0] - 0.5*a[jj]*np.cos(theta[jj]) - 0.5*b[jj]*np.sin(theta[jj])),int( c[jj][1] - 0.5*a[jj]*np.sin(theta[jj]) + 0.5*b[jj]*np.cos(theta[jj]))],
[int(c[jj][0] - 0.5*a[jj]*np.cos(theta[jj]) + 0.5*b[jj]*np.sin(theta[jj])),int( c[jj][1] - 0.5*a[jj]*np.sin(theta[jj]) - 0.5*b[jj]*np.cos(theta[jj]))],
[int(c[jj][0] + 0.5*a[jj]*np.cos(theta[jj]) + 0.5*b[jj]*np.sin(theta[jj])),int( c[jj][1] + 0.5*a[jj]*np.sin(theta[jj]) - 0.5*b[jj]*np.cos(theta[jj]))]]
pts = np.array(pts)
pts = pts.reshape((-1, 1, 2))
color = (255)
# Line thickness of 8 px
thickness = 2
isClosed = True
image = cv2.polylines(image, [pts],
isClosed, color,
thickness)
image = cv2.fillPoly(image, [pts], 255)
c[jj][0] = c[jj][0] + v[jj][0]
c[jj][1] = c[jj][1] + v[jj][1]
if c[jj][0] > image_shape[0] or c[jj][0] < 0:
v[jj][0] = -v[jj][0]
if c[jj][1] > image_shape[1] or c[jj][1] < 0:
v[jj][1] = -v[jj][1]
theta[jj] = theta[jj] + omega[jj]
noise = np.random.normal(0, var, image_shape)
noise[noise < 0] = 0
noise[noise > 255] = 255
noisy_image = 0.5*image + noise
noisy_image[noisy_image > 255] = 255
x.append(image/255)
z.append(noisy_image/255)
return x, z
def generate_image(r = 0.1):
image = np.zeros((128,128))
for ii in range(128):
for jj in range(128):
image[ii,jj] = np.random.binomial(1,r)
return image
def generate_deterministic_image(n_x = 10, n_y = 10):
image = np.zeros((128,128))
for ii in range(128):
for jj in range(128):
if ii % n_x == 0 and jj % n_y == 0:
image[ii,jj] = 1
return image
def get_video(video_path, n_frames = None):
cap = cv2.VideoCapture(video_path)
images_gray = []
images_color = []
n_images = 0
if n_frames is None:
while(cap.isOpened()):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = gray/255.0
gray = cv2.resize(gray,(128,128))
col = cv2.resize(frame,(128,128))
col = col/255.0
images_color.append(col)
images_gray.append(gray)
else:
while(cap.isOpened() and n_images < n_frames):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = gray/255.0
gray = cv2.resize(gray,(128,128))
col = cv2.resize(frame,(128,128))
col = col/255.0
images_color.append(col)
images_gray.append(gray)
n_images += 1
cap.release()
return images_gray, images_color
def video2dataset(observation_video_path, frame_size = (256,256)):
observation_images = []
observation_cap = cv2.VideoCapture(observation_video_path)
while(observation_cap.isOpened()):
ret, frame = observation_cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = gray/255.0
gray = cv2.resize(gray,frame_size)
observation_images.append(gray)
observation_cap.release()
return np.array(observation_images)
def face_detection(video_path, frame_size = (256,256)):
name_list = ['mouth', 'left_eyebrow', 'right_eyebrow']
state_cap = cv2.VideoCapture(video_path)
state_cap = cv2.VideoCapture(video_path)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('source_video/facial_expression/shape_predictor_68_face_landmarks.dat')
state_images = []
while(state_cap.isOpened()):
ret, frame = state_cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
blank = np.zeros_like(gray)
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
if name in name_list:
for (x, y) in shape[i:j]:
cv2.circle(blank, (x, y), 1, 255, -1)
blank = blank/255.0
blank = cv2.resize(blank,frame_size)
state_images.append(blank)
return np.array(state_images)
def generate_dataset(n = 100,video_path = None, image_path = None, image_type = None, output_path = None, visualize = False, output_type = "images", output_folder = "dataset/images/dots/", partial = False, mask = None):
frames = []
if mask is not None:
mask = cv2.imread(mask,0)
mask = cv2.resize(mask, (128,128),interpolation = cv2.INTER_AREA)
if video_path is not None:
images, _ = get_video(video_path, n)
x,z = get_dataset_from_video(images, n)
elif image_path is not None:
image = cv2.imread(image_path,0)
image = cv2.resize(image, (128,128),interpolation = cv2.INTER_AREA)
x,z = get_dataset_from_image(image/255, n, radius = [15, 25], partial = partial, mask = mask)
elif image_type == "dots":
image = generate_image(0.01)
x,z = get_dataset_from_image(image, n, radius = [15, 25], partial = partial, mask = mask)
elif image_type == "checkers":
image = np.array(data.checkerboard()).astype(np.float64)
image = cv2.resize(image, (128,128),interpolation = cv2.INTER_AREA)
x,z = get_dataset_from_image(image/255, n, radius = [15, 25], partial = partial, mask = mask)
for ii in range(n):
swirled = z[ii]
state = x[ii]
if visualize:
cv2.imshow('swirled',swirled)
cv2.imshow('state',state)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
time.sleep(0.01)
frame = np.concatenate((state,swirled),axis = 1)
if output_type == "video":
frames.append(frame*255)
elif output_type == "images":
fname = output_folder+str(ii)+".jpg"
skimage.io.imsave(fname,frame*255)
if visualize:
cv2.destroyAllWindows()
if output_type == "video":
fname = output_folder+"dataset.mp4"
skvideo.io.vwrite(fname, frames) | en | 0.416802 | #!/usr/bin/env python3 # if pose[jj][0] < 30 or pose[jj][0] > 80 or pose[jj][1] < 30 or pose[jj][1] > 80: # swirled = add_circle_mag(swirled, pose[jj], radius[jj]) # Line thickness of 8 px # determine the facial landmarks for the face region, then # convert the landmark (x, y)-coordinates to a NumPy array | 2.243501 | 2 |
src/utils/utils.py | jopetty/transd-dev | 0 | 6612911 | import logging
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities import rank_zero_only
def set_all_seeds(seed: int, workers: bool = True):
seed_everything(seed=seed, workers=workers)
def get_logger(name=__name__) -> logging.Logger:
logger = logging.getLogger(name)
for level in (
"debug",
"info",
"warning",
"error",
"exception",
"fatal",
"critical",
):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
@rank_zero_only
def print_config(
config: DictConfig,
fields: Sequence[str] = (
"trainer",
"model",
"datamodule",
"callbacks",
"logger",
"test_after_training",
"seed",
"name",
),
resolve: bool = True,
) -> None:
tree = rich.tree.Tree("CONFIG")
for field in fields:
branch = tree.add(field)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(
rich.syntax.Syntax(
branch_content, "yaml", theme="default", background_color="default"
)
)
rich.print(tree)
with open("config_tree.log", "w") as fp:
rich.print(tree, file=fp)
@rank_zero_only
def log_hyperparameters(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
):
hparams = {}
hparams["trainer"] = config["trainer"]
hparams["model"] = config["model"]
hparams["datamodule"] = config["datamodule"]
if "seed" in config:
hparams["seed"] = config["seed"]
if "callbacks" in config:
hparams["callbacks"] = config["callbacks"]
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
trainer.logger.log_hyperparams(hparams)
def finish(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
):
for lg in logger:
if isinstance(lg, pl.loggers.wandb.WandbLogger):
import wandb
wandb.finish()
| import logging
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities import rank_zero_only
def set_all_seeds(seed: int, workers: bool = True):
seed_everything(seed=seed, workers=workers)
def get_logger(name=__name__) -> logging.Logger:
logger = logging.getLogger(name)
for level in (
"debug",
"info",
"warning",
"error",
"exception",
"fatal",
"critical",
):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
@rank_zero_only
def print_config(
config: DictConfig,
fields: Sequence[str] = (
"trainer",
"model",
"datamodule",
"callbacks",
"logger",
"test_after_training",
"seed",
"name",
),
resolve: bool = True,
) -> None:
tree = rich.tree.Tree("CONFIG")
for field in fields:
branch = tree.add(field)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(
rich.syntax.Syntax(
branch_content, "yaml", theme="default", background_color="default"
)
)
rich.print(tree)
with open("config_tree.log", "w") as fp:
rich.print(tree, file=fp)
@rank_zero_only
def log_hyperparameters(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
):
hparams = {}
hparams["trainer"] = config["trainer"]
hparams["model"] = config["model"]
hparams["datamodule"] = config["datamodule"]
if "seed" in config:
hparams["seed"] = config["seed"]
if "callbacks" in config:
hparams["callbacks"] = config["callbacks"]
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
trainer.logger.log_hyperparams(hparams)
def finish(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
):
for lg in logger:
if isinstance(lg, pl.loggers.wandb.WandbLogger):
import wandb
wandb.finish()
| none | 1 | 2.297205 | 2 | |
CSharpExample/CSharpBasic/ConApp/Model/Calculator.py | huruiyi/CSharpExample | 3 | 6612912 | class Calculator:
def Add(self, a, b):
return a + b
def GetCalculator():
return Calculator() | class Calculator:
def Add(self, a, b):
return a + b
def GetCalculator():
return Calculator() | none | 1 | 2.827409 | 3 | |
v_m_b/S3WorkFileManager.py | buda-base/volume-manifest-builder | 1 | 6612913 | <reponame>buda-base/volume-manifest-builder
import boto3
class S3WorkFileManager:
"""
Manages volume manifest tool work files
"""
_hostname: str
# noinspection PyBroadException
@staticmethod
def me_instance() -> str:
"""
Returns a string representing an instance id
:return:
"""
instance_id: str = "unknown instance"
try:
import requests
response = requests.get('http://169.254.169.254/latest/meta-data/instance-id', timeout=2)
instance_id = response.text
except Exception:
from os import getpid
import platform
import datetime
# Build the destination name
now: datetime = datetime.datetime.now()
hostname: str = platform.node()
pid: int = getpid()
instance_id = f"{now.year}-{now.month}-{now.day}_{now.hour}_{now.minute}_{now.second}-{hostname}.{pid}"
return instance_id
def s3_move(self, src_object: str, dest_object: str, src_folder, dest_folder):
"""
Moves a source list from one "folder" to another in S3,
The bucket is found in the constructor
:param src_object: source object name
:param dest_object: destination
:param src_folder: source path under bucket
:param dest_folder: destination path
Throws on error
"""
src_object = f'{src_folder}{src_object}'
dest_object = f'{dest_folder}{dest_object}'
self.s3.Object(self._bucket_name, dest_object).copy_from(
CopySource={'Bucket': self._bucket_name, 'Key': src_object})
self.s3.Object(self._bucket_name, src_object).delete()
def s3_move_list(self, src_list: [], dest_list: [], src_path: str, dest_path: str):
for src, dest in zip(src_list, dest_list):
self.s3_move(src, dest, src_path, dest_path)
def local_name_work_file(self, file_name: str):
"""
Generate a name unique to this instance
:param file_name: Name to be transformed
:return: file_name-instance-id
"""
return f'{file_name}-{self._hostname}'
def mark_underway(self, object_list: [], dest_name_list: []):
"""
Moves a set of files from the instance's to do into underway.
Caller can use local_name_work_file() to rename
:param object_list:
:param dest_name_list:
:return:
"""
self.s3_move_list(object_list, dest_name_list, self._src_folder, self._underway_folder)
def mark_done(self, object_list: [], dest_name_list: []):
"""
Moves a set of files from the instance's underway folder to a done folder
:param object_list:
:param dest_name_list:
:return:
"""
self.s3_move_list(object_list, dest_name_list, self._underway_folder, self._done_folder, )
def __init__(self, bucket_name: str, src_folder: str, underway_folder: str, done_folder: str):
"""
Initializer:
:param bucket_name: scope of all operations
:param src_folder: location of work list
:param underway_folder: folder inside bucket where in progress files go
:param done_folder: folder inside bucket where completed files go
"""
self._bucket_name = bucket_name
self._hostname = self.me_instance()
self._src_folder = src_folder
self._underway_folder = underway_folder
self._done_folder = done_folder
self.s3 = boto3.resource('s3')
| import boto3
class S3WorkFileManager:
"""
Manages volume manifest tool work files
"""
_hostname: str
# noinspection PyBroadException
@staticmethod
def me_instance() -> str:
"""
Returns a string representing an instance id
:return:
"""
instance_id: str = "unknown instance"
try:
import requests
response = requests.get('http://169.254.169.254/latest/meta-data/instance-id', timeout=2)
instance_id = response.text
except Exception:
from os import getpid
import platform
import datetime
# Build the destination name
now: datetime = datetime.datetime.now()
hostname: str = platform.node()
pid: int = getpid()
instance_id = f"{now.year}-{now.month}-{now.day}_{now.hour}_{now.minute}_{now.second}-{hostname}.{pid}"
return instance_id
def s3_move(self, src_object: str, dest_object: str, src_folder, dest_folder):
"""
Moves a source list from one "folder" to another in S3,
The bucket is found in the constructor
:param src_object: source object name
:param dest_object: destination
:param src_folder: source path under bucket
:param dest_folder: destination path
Throws on error
"""
src_object = f'{src_folder}{src_object}'
dest_object = f'{dest_folder}{dest_object}'
self.s3.Object(self._bucket_name, dest_object).copy_from(
CopySource={'Bucket': self._bucket_name, 'Key': src_object})
self.s3.Object(self._bucket_name, src_object).delete()
def s3_move_list(self, src_list: [], dest_list: [], src_path: str, dest_path: str):
for src, dest in zip(src_list, dest_list):
self.s3_move(src, dest, src_path, dest_path)
def local_name_work_file(self, file_name: str):
"""
Generate a name unique to this instance
:param file_name: Name to be transformed
:return: file_name-instance-id
"""
return f'{file_name}-{self._hostname}'
def mark_underway(self, object_list: [], dest_name_list: []):
"""
Moves a set of files from the instance's to do into underway.
Caller can use local_name_work_file() to rename
:param object_list:
:param dest_name_list:
:return:
"""
self.s3_move_list(object_list, dest_name_list, self._src_folder, self._underway_folder)
def mark_done(self, object_list: [], dest_name_list: []):
"""
Moves a set of files from the instance's underway folder to a done folder
:param object_list:
:param dest_name_list:
:return:
"""
self.s3_move_list(object_list, dest_name_list, self._underway_folder, self._done_folder, )
def __init__(self, bucket_name: str, src_folder: str, underway_folder: str, done_folder: str):
"""
Initializer:
:param bucket_name: scope of all operations
:param src_folder: location of work list
:param underway_folder: folder inside bucket where in progress files go
:param done_folder: folder inside bucket where completed files go
"""
self._bucket_name = bucket_name
self._hostname = self.me_instance()
self._src_folder = src_folder
self._underway_folder = underway_folder
self._done_folder = done_folder
self.s3 = boto3.resource('s3') | en | 0.736775 | Manages volume manifest tool work files # noinspection PyBroadException Returns a string representing an instance id :return: # Build the destination name Moves a source list from one "folder" to another in S3, The bucket is found in the constructor :param src_object: source object name :param dest_object: destination :param src_folder: source path under bucket :param dest_folder: destination path Throws on error Generate a name unique to this instance :param file_name: Name to be transformed :return: file_name-instance-id Moves a set of files from the instance's to do into underway. Caller can use local_name_work_file() to rename :param object_list: :param dest_name_list: :return: Moves a set of files from the instance's underway folder to a done folder :param object_list: :param dest_name_list: :return: Initializer: :param bucket_name: scope of all operations :param src_folder: location of work list :param underway_folder: folder inside bucket where in progress files go :param done_folder: folder inside bucket where completed files go | 2.278212 | 2 |
hide_elements/models.py | chkgk/otree_hide_elements | 0 | 6612914 | <filename>hide_elements/models.py<gh_stars>0
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
author = '<NAME>'
doc = """
A code snippet to show how elements can be hidden permanently, i.e. they do not reappear and restart the timer
when the page is reloaded.
"""
class Constants(BaseConstants):
name_in_url = 'hide_elements'
players_per_group = None
num_rounds = 1
element_display_time = 15 # seconds
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
element_first_seen = models.IntegerField(initial=0)
| <filename>hide_elements/models.py<gh_stars>0
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
author = '<NAME>'
doc = """
A code snippet to show how elements can be hidden permanently, i.e. they do not reappear and restart the timer
when the page is reloaded.
"""
class Constants(BaseConstants):
name_in_url = 'hide_elements'
players_per_group = None
num_rounds = 1
element_display_time = 15 # seconds
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
element_first_seen = models.IntegerField(initial=0)
| en | 0.880651 | A code snippet to show how elements can be hidden permanently, i.e. they do not reappear and restart the timer when the page is reloaded. # seconds | 2.254405 | 2 |
CT/bioinformatics/q3.py | jfdur/durham-year1-archive | 0 | 6612915 | <reponame>jfdur/durham-year1-archive
import networkx as nx
import matplotlib as plt
import time
import copy
def wpgma(fileName):
f = open(fileName, 'r')
m = []
species = []
first = True
for line in f:
lineTokens = line.strip().split(' ')
lineTokensNoFirst = lineTokens[1:]
if first:
species = lineTokensNoFirst
first = False
continue
m.append([float(x) for x in lineTokensNoFirst])
f.close()
originalSpecies = copy.copy(species)
G = nx.Graph()
level = 0
print(species)
for i in m:
print(i)
while(len(m) > 1):
print()
r = reduceMatrix(m, species, G, originalSpecies, level)
m = r[0]
species = r[1]
level = r[2]
nx.draw(G, with_labels=True)
plt.pyplot.draw()
plt.pyplot.savefig(fileName + '.png')
def reduceMatrix(m, species, G, originalSpecies, level):
currentSpecies = species
minRow = -1
minCol = -1
minVal = -1
for i in range(0, len(m)):
col, val = min(enumerate(m[i]), key=lambda x: x[1] if x[1] > 0 else float('inf'))
if val != 0 and (minVal == -1 or val < minVal):
minRow = i
minCol = col
minVal = val
for i in range(0, len(m)):
for j in range(0, len(m[i])):
if ((i == minRow or i == minCol) and j != minRow and j != minCol):
m[i][j] = (m[minRow][j] + m[minCol][j]) / 2
elif ((j == minRow or j == minCol) and i != minRow and i != minCol):
m[i][j] = (m[i][minRow] + m[i][minCol]) / 2
speciesGroup = '(' + currentSpecies[minRow] + ',' + currentSpecies[minCol] + ')'
if not G.has_node(currentSpecies[minRow]):
G.add_node(currentSpecies[minRow])
if not G.has_node(currentSpecies[minCol]):
G.add_node(currentSpecies[minCol])
if not G.has_node(speciesGroup):
G.add_node(speciesGroup)
G.add_edge(currentSpecies[minRow], speciesGroup)
G.add_edge(currentSpecies[minCol], speciesGroup)
currentSpecies[minRow] = speciesGroup
currentSpecies.pop(minCol)
print(currentSpecies)
m.pop(minCol)
for i in m:
del i[minCol]
print(i)
return [m, currentSpecies, level + 1]
start = time.time()
wpgma('matrix2(1).txt')
stop = time.time()
print('Time taken to calculate matrices and draw phylogenetic tree: ' + str(stop - start))
| import networkx as nx
import matplotlib as plt
import time
import copy
def wpgma(fileName):
f = open(fileName, 'r')
m = []
species = []
first = True
for line in f:
lineTokens = line.strip().split(' ')
lineTokensNoFirst = lineTokens[1:]
if first:
species = lineTokensNoFirst
first = False
continue
m.append([float(x) for x in lineTokensNoFirst])
f.close()
originalSpecies = copy.copy(species)
G = nx.Graph()
level = 0
print(species)
for i in m:
print(i)
while(len(m) > 1):
print()
r = reduceMatrix(m, species, G, originalSpecies, level)
m = r[0]
species = r[1]
level = r[2]
nx.draw(G, with_labels=True)
plt.pyplot.draw()
plt.pyplot.savefig(fileName + '.png')
def reduceMatrix(m, species, G, originalSpecies, level):
currentSpecies = species
minRow = -1
minCol = -1
minVal = -1
for i in range(0, len(m)):
col, val = min(enumerate(m[i]), key=lambda x: x[1] if x[1] > 0 else float('inf'))
if val != 0 and (minVal == -1 or val < minVal):
minRow = i
minCol = col
minVal = val
for i in range(0, len(m)):
for j in range(0, len(m[i])):
if ((i == minRow or i == minCol) and j != minRow and j != minCol):
m[i][j] = (m[minRow][j] + m[minCol][j]) / 2
elif ((j == minRow or j == minCol) and i != minRow and i != minCol):
m[i][j] = (m[i][minRow] + m[i][minCol]) / 2
speciesGroup = '(' + currentSpecies[minRow] + ',' + currentSpecies[minCol] + ')'
if not G.has_node(currentSpecies[minRow]):
G.add_node(currentSpecies[minRow])
if not G.has_node(currentSpecies[minCol]):
G.add_node(currentSpecies[minCol])
if not G.has_node(speciesGroup):
G.add_node(speciesGroup)
G.add_edge(currentSpecies[minRow], speciesGroup)
G.add_edge(currentSpecies[minCol], speciesGroup)
currentSpecies[minRow] = speciesGroup
currentSpecies.pop(minCol)
print(currentSpecies)
m.pop(minCol)
for i in m:
del i[minCol]
print(i)
return [m, currentSpecies, level + 1]
start = time.time()
wpgma('matrix2(1).txt')
stop = time.time()
print('Time taken to calculate matrices and draw phylogenetic tree: ' + str(stop - start)) | none | 1 | 2.40662 | 2 | |
PictCorect/get_image.py | ringo156/IdolFaceClassify | 0 | 6612916 | #-*- coding:utf-8 -*-
import os
import sys
import time
import bs4
import urllib.request
class getImage:
def crawring(self, url, extensions):
"""
Content:
クローリング
Param:
url: クローリングするURL
extensions: 取得するリソースの拡張子(list)
"""
# 指定したURLのHTMLを取得
html = self.get_html_string(url)
if len(html) < 1:
print("HTMLが取得できませんでした。")
print("URLを確認してください。")
sys.exit(1)
# リソース取得
self.get_resource(html, extensions)
def get_resource(self, html, extensions):
"""
Content:
リソース取得
Param
html: HTML
extensions 拡張子のリスト
"""
resource_list = []
soup = bs4.BeautifulSoup(html, "lxml")
for a_tag in soup.find_all("a"):
href_str = a_tag.get("href")
try:
(path, ext) = os.path.splitext(href_str)
if ext in extensions:
resource_list.append(href_str)
except:
pass
resource_list = sorted(set(resource_list), key=resource_list.index)
for resource in resource_list:
try:
print("download ---> [%s]" % os.path.basename(resource))
request = urllib.request.urlopen(resource)
f = open(os.path.basename(resource), "wb")
f.write(request.read())
except Exception as e:
print(e)
print("download failed ... [%s]" % os.path.basename(resource))
finally:
time.sleep(1)
def get_html_string(self, url):
"""
Content:
HTML取得
Param:
url HTMLを取得するURL
"""
decoded_html = ""
# HTMLを取得
try:
request = urllib.request.urlopen(url)
html = request.read()
except:
return decoded_html
# エンコードを取得
enc = self.check_encoding(html)
if enc == None:
return decoded_html
# HTMLをデコード
decoded_html = html.decode(enc)
return decoded_html
def check_encoding(self, byte_string):
"""
Content:
文字コード確認
Param:
byte_string: バイト文字列
"""
encoding_list = ["utf-8", "utf_8", "euc_jp",
"euc_jis_2004", "euc_jisx0213", "shift_jis",
"shift_jis_2004","shift_jisx0213", "iso2022jp",
"iso2022_jp_1", "iso2022_jp_2", "iso2022_jp_3",
"iso2022_jp_ext","latin_1", "ascii"]
for enc in encoding_list:
try:
byte_string.decode(enc)
break
except:
enc = None
return enc
def check_args(self):
"""
Content:
起動引数確認
"""
if len(sys.argv) == 3:
return True
else:
return False
def print_usage(self):
print("Usage: %s URL Extensions" % __file__)
print("URLにはクロールしたいウェブサイトのアドレスを指定してください。")
print("Extensionsにはクロールしたときに取得するファイルの拡張子を指定してください。")
print("Extensionsはカンマ区切りで複数指定できます。")
def main(self):
"""
Content:
main
"""
# 引数確認
if self.check_args() is False:
print_usage()
sys.exit(1)
url = sys.argv[1]
extensions = sys.argv[2].split(",")
# クロール開始
classgetimage.crawring(url, extensions)
if __name__ == "__main__":
classgetimage=getImage()
classgetimage.main()
| #-*- coding:utf-8 -*-
import os
import sys
import time
import bs4
import urllib.request
class getImage:
def crawring(self, url, extensions):
"""
Content:
クローリング
Param:
url: クローリングするURL
extensions: 取得するリソースの拡張子(list)
"""
# 指定したURLのHTMLを取得
html = self.get_html_string(url)
if len(html) < 1:
print("HTMLが取得できませんでした。")
print("URLを確認してください。")
sys.exit(1)
# リソース取得
self.get_resource(html, extensions)
def get_resource(self, html, extensions):
"""
Content:
リソース取得
Param
html: HTML
extensions 拡張子のリスト
"""
resource_list = []
soup = bs4.BeautifulSoup(html, "lxml")
for a_tag in soup.find_all("a"):
href_str = a_tag.get("href")
try:
(path, ext) = os.path.splitext(href_str)
if ext in extensions:
resource_list.append(href_str)
except:
pass
resource_list = sorted(set(resource_list), key=resource_list.index)
for resource in resource_list:
try:
print("download ---> [%s]" % os.path.basename(resource))
request = urllib.request.urlopen(resource)
f = open(os.path.basename(resource), "wb")
f.write(request.read())
except Exception as e:
print(e)
print("download failed ... [%s]" % os.path.basename(resource))
finally:
time.sleep(1)
def get_html_string(self, url):
"""
Content:
HTML取得
Param:
url HTMLを取得するURL
"""
decoded_html = ""
# HTMLを取得
try:
request = urllib.request.urlopen(url)
html = request.read()
except:
return decoded_html
# エンコードを取得
enc = self.check_encoding(html)
if enc == None:
return decoded_html
# HTMLをデコード
decoded_html = html.decode(enc)
return decoded_html
def check_encoding(self, byte_string):
"""
Content:
文字コード確認
Param:
byte_string: バイト文字列
"""
encoding_list = ["utf-8", "utf_8", "euc_jp",
"euc_jis_2004", "euc_jisx0213", "shift_jis",
"shift_jis_2004","shift_jisx0213", "iso2022jp",
"iso2022_jp_1", "iso2022_jp_2", "iso2022_jp_3",
"iso2022_jp_ext","latin_1", "ascii"]
for enc in encoding_list:
try:
byte_string.decode(enc)
break
except:
enc = None
return enc
def check_args(self):
"""
Content:
起動引数確認
"""
if len(sys.argv) == 3:
return True
else:
return False
def print_usage(self):
print("Usage: %s URL Extensions" % __file__)
print("URLにはクロールしたいウェブサイトのアドレスを指定してください。")
print("Extensionsにはクロールしたときに取得するファイルの拡張子を指定してください。")
print("Extensionsはカンマ区切りで複数指定できます。")
def main(self):
"""
Content:
main
"""
# 引数確認
if self.check_args() is False:
print_usage()
sys.exit(1)
url = sys.argv[1]
extensions = sys.argv[2].split(",")
# クロール開始
classgetimage.crawring(url, extensions)
if __name__ == "__main__":
classgetimage=getImage()
classgetimage.main()
| ja | 0.985194 | #-*- coding:utf-8 -*- Content: クローリング Param: url: クローリングするURL extensions: 取得するリソースの拡張子(list) # 指定したURLのHTMLを取得 # リソース取得 Content: リソース取得 Param html: HTML extensions 拡張子のリスト Content: HTML取得 Param: url HTMLを取得するURL # HTMLを取得 # エンコードを取得 # HTMLをデコード Content: 文字コード確認 Param: byte_string: バイト文字列 Content: 起動引数確認 Content: main # 引数確認 # クロール開始 | 3.029192 | 3 |
name.py | sshell/osint-names | 3 | 6612917 | <gh_stars>1-10
### name.py [name] [m/f] ###
### spanning 138 years : 1880 - 2017 ###
import os
import sys
import numpy as np
import matplotlib.pylab as plt
fol = 'namedata'
d = {}
nm = sys.argv[1].title()
sx = sys.argv[2].upper()
combo = nm + ',' + sx
cl = len(combo)+1
for file in os.listdir(fol):
with open(fol+'/'+file, 'r') as f:
for line in f:
if combo in line:
year = file[3:7]
d[year] = line[cl:]
yr = list(map(int, d.keys()))
fr = list(map(int, d.values()))
plt.title('popularity of the name '+ nm +' ('+ sx +') over time')
plt.plot(yr,fr)
#plt.yscale('log') # switch to log scaling
plt.show()
| ### name.py [name] [m/f] ###
### spanning 138 years : 1880 - 2017 ###
import os
import sys
import numpy as np
import matplotlib.pylab as plt
fol = 'namedata'
d = {}
nm = sys.argv[1].title()
sx = sys.argv[2].upper()
combo = nm + ',' + sx
cl = len(combo)+1
for file in os.listdir(fol):
with open(fol+'/'+file, 'r') as f:
for line in f:
if combo in line:
year = file[3:7]
d[year] = line[cl:]
yr = list(map(int, d.keys()))
fr = list(map(int, d.values()))
plt.title('popularity of the name '+ nm +' ('+ sx +') over time')
plt.plot(yr,fr)
#plt.yscale('log') # switch to log scaling
plt.show() | en | 0.186465 | ### name.py [name] [m/f] ### ### spanning 138 years : 1880 - 2017 ### #plt.yscale('log') # switch to log scaling | 2.872789 | 3 |
app/blueprints/web/__init__.py | AlwaysMessy/steam-fun | 0 | 6612918 | from flask import Blueprint
web = Blueprint('web', __name__)
#注册视图函数
from app.blueprints.web import hello | from flask import Blueprint
web = Blueprint('web', __name__)
#注册视图函数
from app.blueprints.web import hello | none | 1 | 1.434115 | 1 | |
Resene naloge/euler65.py | CadezDavid/ProjectEuler | 0 | 6612919 | <reponame>CadezDavid/ProjectEuler
import fractions
import math
def modulus(n):
if n % 3 == 0:
return 2 * ( n // 3 )
elif n == 1:
return 2
else:
return 1
def priblizek(n, i=1):
if i == n:
return fractions.Fraction(modulus(i), 1)
return fractions.Fraction(modulus(i) + fractions.Fraction(1, priblizek(n, i + 1)), 1)
def vsotastevca(n):
return sum([int(i) for i in str(n.numerator)]) | import fractions
import math
def modulus(n):
if n % 3 == 0:
return 2 * ( n // 3 )
elif n == 1:
return 2
else:
return 1
def priblizek(n, i=1):
if i == n:
return fractions.Fraction(modulus(i), 1)
return fractions.Fraction(modulus(i) + fractions.Fraction(1, priblizek(n, i + 1)), 1)
def vsotastevca(n):
return sum([int(i) for i in str(n.numerator)]) | none | 1 | 3.621505 | 4 | |
src/baskerville/simulation/real_timeish_simulation.py | equalitie/baskerville | 25 | 6612920 | # Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import os
import time
import traceback
import pandas as pd
from baskerville.util.helpers import get_logger, lines_in_file
from dateutil.tz import tzutc
logger = get_logger(__name__)
COUNTER = 0
SESSION_COUNTER = 0
topic_name = 'deflect.logs'
def load_logs(path):
"""
Load json logs from a path
:param str path: the path to file.json
:return: a pandas Dataframe with the logs
:rtype: pandas.DataFrame
"""
return pd.read_json(path, orient='records', lines=True, encoding='utf-8')
def simulation(
path,
time_window,
kafka_url='0.0.0.0:9092',
zookeeper_url='localhost:2181',
topic_name='deflect.logs',
sleep=True,
verbose=False,
spark=None,
use_spark=False
):
"""
Loads raw logs, groups them by the defined time window and publishes
the grouped raw logs in Kafka if a producer is given, else, it prints out
the groups. After publishing the logs line by line, it will sleep for the
x remaining seconds of the time window if any.
:param str path: the path to raw logs as they are stored in ELS
:param timedelta time_window: the time window for the interval
:param str kafka_url: the url to kafka, defaults to '0.0.0.0:9092'
:param str zookeeper_url: the url to zookeeper, defaults to
'localhost:2181'
:param bytes topic_name: the topic name to publish to
:param bool sleep: if True, the program will sleep after publishing each
group of time windowed logs, for the remaining seconds until a time window
is complete.
:param bool verbose: verbose flag
:return: None
"""
# a short delay for warming up the pipeline
time.sleep(30)
producer = None
if topic_name:
from confluent_kafka import Producer
producer = Producer({'bootstrap.servers': kafka_url})
if not use_spark and lines_in_file(path) < 1e6:
# pandas can usually handle well files under 1M lines - but that
# depends on the machine running the script (amount of RAM)
df = load_logs(path)
publish_df_split_in_time_windows(
time_window, producer, topic_name, df, verbose, sleep
)
else:
from pyspark.sql import functions as F
active_columns = [
'@timestamp', 'timestamp', 'client_request_host', 'client_ip',
'client_ua', 'client_url', 'content_type',
'http_response_code', 'querystring',
'reply_length_bytes'
]
if not spark:
from baskerville.spark import get_spark_session
spark = get_spark_session()
spark.conf.set('spark.driver.memory', '8G')
print('Starting...')
df = spark.read.json(path).cache()
df = df.withColumn('timestamp', F.col('@timestamp').cast('timestamp'))
common_active_cols = [c for c in active_columns if c in df.columns]
df = df.select(*common_active_cols).sort('@timestamp')
print('Dataframe read...')
min_max_df = df.agg(
F.min(F.col('timestamp')).alias('min_ts'),
F.max(F.col('timestamp')).alias('max_ts')
).collect()[0]
current_window = min_max_df[0]
max_window = min_max_df[1]
window_df = None
try:
while True:
filter_ = (
(F.col('timestamp') >= current_window) &
(F.col('timestamp') <= current_window + time_window)
)
if verbose:
logger.info(f'Current window: {current_window}, '
f'Max window: {max_window}')
logger.info(f'Running for {str(filter_._jc)}')
window_df = df.select(
*common_active_cols).where(filter_).cache()
pandas_df = window_df.toPandas()
if not pandas_df.empty:
publish_df_split_in_time_windows(
time_window, producer, topic_name, pandas_df, verbose, sleep
)
current_window = current_window + time_window
logger.info(f'{current_window} {max_window} {time_window}')
if current_window > max_window:
logger.info(
f'>> EOF for Simulation, {current_window} {max_window}'
)
break
except Exception:
traceback.print_exc()
pass
finally:
if df:
df.unpersist()
if window_df:
window_df.unpersist()
if spark:
spark.catalog.clearCache()
def publish_df_split_in_time_windows(
time_window, producer, topic_name, df, verbose=False, sleep=True
):
"""
Publish the dataframe split in time_window seconds.
:param int time_window: the duration of the time window in seconds
:param confluent_kafka.Producer producer: the kafka producer
:topic_name the kafka topic_name
:param pandas.DataFrame df: the dataframe to publish
:param boolean verbose:
:param boolean sleep:if True, sleep for the remaining of the time window
seconds
:return: None
"""
global COUNTER, SESSION_COUNTER
# load logs and set the timestamp index
df = df.set_index(pd.DatetimeIndex(df['@timestamp']))
df.index = pd.to_datetime(df['@timestamp'], utc=True)
# sort by time
df.sort_index(inplace=True)
# group by timeframe - supporting minutes for now
groupped_df = df.groupby(pd.Grouper(freq=time_window))
for group in groupped_df:
time_start = datetime.datetime.now(tz=tzutc())
request_sets_df = group[1].groupby(
['client_request_host', 'client_ip']
)
len_request_sets = len(request_sets_df)
SESSION_COUNTER += len_request_sets
request_sets_df = None
json_lines = json.loads(group[1].to_json(orient='records'))
num_lines = len(json_lines)
COUNTER += num_lines
if verbose:
logger.info('=' * 60)
logger.info(f'request_set count: {len_request_sets}')
logger.info(f'request_set count so far: {SESSION_COUNTER}')
for line in json_lines:
if producer is not None:
producer.produce(
topic_name, json.dumps(line).encode('utf-8')
)
producer.poll(2)
t_elapsed = datetime.datetime.now(tz=tzutc()) - time_start
if verbose:
logger.info('-' * 60)
logger.info(f'>> Line count in this batch: {num_lines}')
logger.info(f'>> Line count so far: {COUNTER}')
logger.info(f'* Started at:{time_start}')
logger.info(f'* Time elapsed: {t_elapsed}')
logger.info(f'* Time window: {time_window}')
logger.info('=' * 60)
if sleep and t_elapsed < time_window:
sleep_time = time_window - t_elapsed
if verbose:
logger.info(
f'Going to sleep for {sleep_time.total_seconds()} '
f'seconds...'
)
time.sleep(sleep_time.total_seconds())
if __name__ == '__main__':
curr_working_dir = os.path.abspath('')
path_to_raw_logs = f'{curr_working_dir}' \
f'/../../../data/samples/test_data_1k.json'
time_window = datetime.timedelta(seconds=120)
kafka_url = '0.0.0.0:9092'
simulation(
path_to_raw_logs,
time_window,
kafka_url,
topic_name=topic_name,
verbose=True,
sleep=True,
use_spark=True
)
| # Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import os
import time
import traceback
import pandas as pd
from baskerville.util.helpers import get_logger, lines_in_file
from dateutil.tz import tzutc
logger = get_logger(__name__)
COUNTER = 0
SESSION_COUNTER = 0
topic_name = 'deflect.logs'
def load_logs(path):
"""
Load json logs from a path
:param str path: the path to file.json
:return: a pandas Dataframe with the logs
:rtype: pandas.DataFrame
"""
return pd.read_json(path, orient='records', lines=True, encoding='utf-8')
def simulation(
path,
time_window,
kafka_url='0.0.0.0:9092',
zookeeper_url='localhost:2181',
topic_name='deflect.logs',
sleep=True,
verbose=False,
spark=None,
use_spark=False
):
"""
Loads raw logs, groups them by the defined time window and publishes
the grouped raw logs in Kafka if a producer is given, else, it prints out
the groups. After publishing the logs line by line, it will sleep for the
x remaining seconds of the time window if any.
:param str path: the path to raw logs as they are stored in ELS
:param timedelta time_window: the time window for the interval
:param str kafka_url: the url to kafka, defaults to '0.0.0.0:9092'
:param str zookeeper_url: the url to zookeeper, defaults to
'localhost:2181'
:param bytes topic_name: the topic name to publish to
:param bool sleep: if True, the program will sleep after publishing each
group of time windowed logs, for the remaining seconds until a time window
is complete.
:param bool verbose: verbose flag
:return: None
"""
# a short delay for warming up the pipeline
time.sleep(30)
producer = None
if topic_name:
from confluent_kafka import Producer
producer = Producer({'bootstrap.servers': kafka_url})
if not use_spark and lines_in_file(path) < 1e6:
# pandas can usually handle well files under 1M lines - but that
# depends on the machine running the script (amount of RAM)
df = load_logs(path)
publish_df_split_in_time_windows(
time_window, producer, topic_name, df, verbose, sleep
)
else:
from pyspark.sql import functions as F
active_columns = [
'@timestamp', 'timestamp', 'client_request_host', 'client_ip',
'client_ua', 'client_url', 'content_type',
'http_response_code', 'querystring',
'reply_length_bytes'
]
if not spark:
from baskerville.spark import get_spark_session
spark = get_spark_session()
spark.conf.set('spark.driver.memory', '8G')
print('Starting...')
df = spark.read.json(path).cache()
df = df.withColumn('timestamp', F.col('@timestamp').cast('timestamp'))
common_active_cols = [c for c in active_columns if c in df.columns]
df = df.select(*common_active_cols).sort('@timestamp')
print('Dataframe read...')
min_max_df = df.agg(
F.min(F.col('timestamp')).alias('min_ts'),
F.max(F.col('timestamp')).alias('max_ts')
).collect()[0]
current_window = min_max_df[0]
max_window = min_max_df[1]
window_df = None
try:
while True:
filter_ = (
(F.col('timestamp') >= current_window) &
(F.col('timestamp') <= current_window + time_window)
)
if verbose:
logger.info(f'Current window: {current_window}, '
f'Max window: {max_window}')
logger.info(f'Running for {str(filter_._jc)}')
window_df = df.select(
*common_active_cols).where(filter_).cache()
pandas_df = window_df.toPandas()
if not pandas_df.empty:
publish_df_split_in_time_windows(
time_window, producer, topic_name, pandas_df, verbose, sleep
)
current_window = current_window + time_window
logger.info(f'{current_window} {max_window} {time_window}')
if current_window > max_window:
logger.info(
f'>> EOF for Simulation, {current_window} {max_window}'
)
break
except Exception:
traceback.print_exc()
pass
finally:
if df:
df.unpersist()
if window_df:
window_df.unpersist()
if spark:
spark.catalog.clearCache()
def publish_df_split_in_time_windows(
time_window, producer, topic_name, df, verbose=False, sleep=True
):
"""
Publish the dataframe split in time_window seconds.
:param int time_window: the duration of the time window in seconds
:param confluent_kafka.Producer producer: the kafka producer
:topic_name the kafka topic_name
:param pandas.DataFrame df: the dataframe to publish
:param boolean verbose:
:param boolean sleep:if True, sleep for the remaining of the time window
seconds
:return: None
"""
global COUNTER, SESSION_COUNTER
# load logs and set the timestamp index
df = df.set_index(pd.DatetimeIndex(df['@timestamp']))
df.index = pd.to_datetime(df['@timestamp'], utc=True)
# sort by time
df.sort_index(inplace=True)
# group by timeframe - supporting minutes for now
groupped_df = df.groupby(pd.Grouper(freq=time_window))
for group in groupped_df:
time_start = datetime.datetime.now(tz=tzutc())
request_sets_df = group[1].groupby(
['client_request_host', 'client_ip']
)
len_request_sets = len(request_sets_df)
SESSION_COUNTER += len_request_sets
request_sets_df = None
json_lines = json.loads(group[1].to_json(orient='records'))
num_lines = len(json_lines)
COUNTER += num_lines
if verbose:
logger.info('=' * 60)
logger.info(f'request_set count: {len_request_sets}')
logger.info(f'request_set count so far: {SESSION_COUNTER}')
for line in json_lines:
if producer is not None:
producer.produce(
topic_name, json.dumps(line).encode('utf-8')
)
producer.poll(2)
t_elapsed = datetime.datetime.now(tz=tzutc()) - time_start
if verbose:
logger.info('-' * 60)
logger.info(f'>> Line count in this batch: {num_lines}')
logger.info(f'>> Line count so far: {COUNTER}')
logger.info(f'* Started at:{time_start}')
logger.info(f'* Time elapsed: {t_elapsed}')
logger.info(f'* Time window: {time_window}')
logger.info('=' * 60)
if sleep and t_elapsed < time_window:
sleep_time = time_window - t_elapsed
if verbose:
logger.info(
f'Going to sleep for {sleep_time.total_seconds()} '
f'seconds...'
)
time.sleep(sleep_time.total_seconds())
if __name__ == '__main__':
curr_working_dir = os.path.abspath('')
path_to_raw_logs = f'{curr_working_dir}' \
f'/../../../data/samples/test_data_1k.json'
time_window = datetime.timedelta(seconds=120)
kafka_url = '0.0.0.0:9092'
simulation(
path_to_raw_logs,
time_window,
kafka_url,
topic_name=topic_name,
verbose=True,
sleep=True,
use_spark=True
)
| en | 0.813736 | # Copyright (c) 2020, eQualit.ie inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. Load json logs from a path :param str path: the path to file.json :return: a pandas Dataframe with the logs :rtype: pandas.DataFrame Loads raw logs, groups them by the defined time window and publishes the grouped raw logs in Kafka if a producer is given, else, it prints out the groups. After publishing the logs line by line, it will sleep for the x remaining seconds of the time window if any. :param str path: the path to raw logs as they are stored in ELS :param timedelta time_window: the time window for the interval :param str kafka_url: the url to kafka, defaults to '0.0.0.0:9092' :param str zookeeper_url: the url to zookeeper, defaults to 'localhost:2181' :param bytes topic_name: the topic name to publish to :param bool sleep: if True, the program will sleep after publishing each group of time windowed logs, for the remaining seconds until a time window is complete. :param bool verbose: verbose flag :return: None # a short delay for warming up the pipeline # pandas can usually handle well files under 1M lines - but that # depends on the machine running the script (amount of RAM) Publish the dataframe split in time_window seconds. :param int time_window: the duration of the time window in seconds :param confluent_kafka.Producer producer: the kafka producer :topic_name the kafka topic_name :param pandas.DataFrame df: the dataframe to publish :param boolean verbose: :param boolean sleep:if True, sleep for the remaining of the time window seconds :return: None # load logs and set the timestamp index # sort by time # group by timeframe - supporting minutes for now | 2.564015 | 3 |
lib/geneUsageLib.py | ngannguyen/immunoseq | 2 | 6612921 | #nknguyen soe ucsc edu
#Tue Jul 17 10:56:47 PDT 2012
#Library of functions used to compute the gene usage
import sys, re, os, random, copy
from optparse import OptionParser
from scipy.stats.stats import pearsonr, spearmanr, kendalltau
from sonLib.bioio import system
import numpy as np
import immunoseq.lib.immunoseqLib as iseqlib
def addAvrSample( samples ):
''' Add the average and standardDev of all the samples '''
if len(samples) == 0:
return
avrusage = {'v':{}, 'j':{}, 'vj':{}} #'v':{ 'vgene':[totalreads, uniqseqs] }
stdusage = {'v':{}, 'j':{}, 'vj':{}} #'v':{ 'vgene':[totalreads, uniqseqs] }
#get accumulate count across samples:
for s in samples:
for type in avrusage:
g2c = s.usage[type]
typeusage = avrusage[type]
for g in g2c:
if g not in typeusage:
typeusage[g] = [ g2c[g] ]
else:
typeusage[g].append( g2c[g] )
#typeusage[g][1] += g2c[g][1]
#average:
avrsample = Sample('average')
stdsample = Sample('std')
for type in avrusage:
for g in avrusage[type]:
totalreads = [ sample[0] for sample in avrusage[type][g] ]
uniqseqs = [ sample[1] for sample in avrusage[type][g] ]
avrusage[type][g] = [np.mean(totalreads), np.mean(uniqseqs)]
stdusage[type][g] = [np.std(totalreads), np.std(uniqseqs)]
avrsample.usage = avrusage
avrsample.setCounts()
stdsample.usage = stdusage
stdsample.setCounts()
samples.append(avrsample)
samples.append(stdsample)
def getGenes(seq, type):
if type not in ['v', 'j', 'd']:
raise ValueError("singleUsage, %s is not a valid genetype. Valid choices are v, d, j" %type)
if type == 'v':
return seq.vs
elif type == 'j':
return seq.js
else:
return seq.ds
def singleUsage(seqs, type):
gene2count = {} #key = genename, val = [totalreads, uniqseqs]
for seq in seqs.values():
genes = getGenes(seq, type)
#filter out unvalid genes:
if len(genes) == 0 or '(undefined)' in genes or '' in genes:
continue
count = float(seq.count)/len(genes)
for gene in genes:
if gene not in gene2count:
gene2count[gene] = [count, 1.0/len(genes)]
else:
currcount = gene2count[gene]
gene2count[gene] = [currcount[0] + count, currcount[1] + 1.0/len(genes)]
return gene2count
def combinationUsage( seqs, types ):
comb2count = {} #key = combination of geneNames, val = [totalReads, uniqueSeqs]
for seq in seqs.values():
type2genes = {}
totalCombinations = 1
for type in types:
genes = getGenes(seq, type)
type2genes[type] = genes
totalCombinations *= len(genes)
if totalCombinations == 0:
continue
count = float(seq.count)/totalCombinations
combs = type2genes[ types[0] ]
for i in xrange(1, len(types)):
type = types[i]
currcombs = []
for gene in type2genes[type]:
for comb in combs:
currcombs.append( "|".join([comb, gene]) )
combs = currcombs
for comb in combs:
if comb not in comb2count:
comb2count[comb] = [count, 1.0/totalCombinations]
else:
currcount = comb2count[comb]
comb2count[comb] = [ currcount[0] + count, currcount[1] + 1.0/totalCombinations ]
return comb2count
def getGene2count(seqs):
#Single:
type2gene2count = { 'v':{}, 'j':{}, 'd': {}, 'dj':{}, 'vj':{}, 'vdj':{} }
singletypes = ['v', 'j', 'd']
for type in singletypes:
gene2count = singleUsage(seqs, type)
type2gene2count[type] = gene2count
#Combination:
combs = ['dj', 'vj', 'vdj']
for comb in combs:
types = [c for c in comb]
comb2count = combinationUsage(seqs, types)
type2gene2count[comb] = comb2count
similarGenes = ['TRBV6-5', 'TRBV6-6']
combineVgenes(type2gene2count, similarGenes)
return type2gene2count
def combineVgenes(type2gene2count, genes):
'''Combine the genes in 'genes' as one gene
'''
newcounts = [0.0, 0.0]
#Calculate combined counts
for v, counts in type2gene2count['v'].iteritems():
if v in genes:
newcounts[0] += counts[0]
newcounts[1] += counts[1]
#Delete single genes
for g in genes:
if g in type2gene2count['v']:
del type2gene2count['v'][g]
#Add combined newgene
newgene = '/'.join(genes)
type2gene2count['v'][newgene] = newcounts
#Combinations: vj, vdj
combs = ['vj', 'vdj']
for c in combs:
if c not in type2gene2count:
continue
g2counts = {} #key = j or dj gene(s), val = counts
delkeys = []
gene2count = type2gene2count[c]
#Calculate combined counts
for g, counts in gene2count.iteritems(): #Each VJ or VDJ combination
items = g.split('|')
v = items[0] #current V
if v in genes:
delkeys.append(g)
othergene= '|'.join(items[1:]) #current J or DJ
if othergene not in g2counts:
g2counts[othergene] = [counts[0], counts[1]]
else:
g2counts[othergene][0] += counts[0]
g2counts[othergene][1] += counts[1]
#Delete combinations with single gene in genes
for k in delkeys:
del gene2count[k]
#Add new combinations with new combined gene:
for othergene, newcounts in g2counts.iteritems():
newcomb = '|'.join([newgene, othergene])
gene2count[newcomb] = newcounts
#print gene2count
def getUnionGeneList(samples, type):
#Get the union of vgenes lists from all samples.
genes = []
for s in samples:
#print s.usage[type].keys()
for g in s.usage[type].keys():
if g not in genes:
genes.append(g)
#print genes
#If a sample doesn't have a vgene, put the count of that vgene to 0
genes.sort()
for g in genes:
for s in samples:
if g not in s.usage[type].keys():
s.usage[type][g] = [0,0]
return genes
def addSamplingStats(type2gene2count, aggType2gene2count, i):
#i is the order of the current sampling (base 0), or, it's the number of samplings that have already added to aggStats
for type, gene2count in type2gene2count.iteritems():
if type not in aggType2gene2count:
aggType2gene2count[type] = {}
for gene, counts in gene2count.iteritems():
aggType2gene2count[type][gene] = [ [c] for c in counts ]
else:
aggGene2count = aggType2gene2count[type]
for gene, counts in gene2count.iteritems():
if gene not in aggGene2count:
aggGene2count[gene] = [ [0.0]*i + [c] for c in counts] #previous simulation didn't have this gene
else:
aggCounts = aggGene2count[gene]
aggCounts[0].append(counts[0])
aggCounts[1].append(counts[1])
aggType2gene2count[type][gene] = aggCounts
def avrSamplingStats(aggType2gene2count):
#Average stats of the samplings:
avrtype2gene2count = {}
stdtype2gene2count = {}
for type, gene2count in aggType2gene2count.iteritems():
avrtype2gene2count[type] = {}
stdtype2gene2count[type] = {}
for gene, counts in gene2count.iteritems():
meanReads = np.mean(counts[0])
meanUniqs = np.mean(counts[1])
avrtype2gene2count[type][gene] = [meanReads, meanUniqs]
stdReads = np.std(counts[0])
stdUniqs = np.std(counts[1])
stdtype2gene2count[type][gene] = [stdReads, stdUniqs]
return avrtype2gene2count, stdtype2gene2count
def usageTab(types, sample, avrstats, stdstats, type2genelist, outdir):
for type in types:
avrgene2count = {}
stdgene2count = {}
totalreads = 0
totaluniqs = 0
if type in avrstats:
avrgene2count = avrstats[type]
stdgene2count = stdstats[type]
totalreads = sum([counts[0] for counts in avrgene2count.values()])
totaluniqs = sum([counts[1] for counts in avrgene2count.values()])
#if totalreads == 0 or totaluniqs == 0:
# raise ValueError("sample with zero read/sequence")
if type in type2genelist:
genes = type2genelist[type]
else:
genes = sorted( avrgene2count.keys() )
typedir = os.path.join(outdir, type)
outfile = os.path.join(typedir, "%s-%s.txt" %(sample, type) )
f = open(outfile, 'w')
f.write("Gene\tReads\t%Reads\tUniq\t%uniq\tStdReads\tStdUniq\n")
#numpass = 0
for g in genes:
if g not in avrgene2count:
sys.stderr.write("Gene %s is not in avrgene2count %s\n" %(g, ','.join(avrgene2count.keys()) ))
avrcounts = [0.0, 0.0]
stdcounts = [0.0, 0.0]
else:
#numpass += 1
avrcounts = avrgene2count[g]
stdcounts = stdgene2count[g]
read = avrcounts[0]
uniq = avrcounts[1]
readPc = 0.0
uniqPc = 0.0
if totalreads > 0:
readPc = iseqlib.getPc(read, totalreads)
if totaluniqs > 0:
uniqPc = iseqlib.getPc(uniq, totaluniqs)
f.write("%s\t%d\t%f\t%d\t%f\t%d\t%d\n" %(g, read, readPc, uniq, uniqPc, stdcounts[0], stdcounts[1]))
f.close()
#if numpass == 0:
# raise ValueError("ERROR\n")
def geneUsedSample(avrstats):
types = ['v', 'j', 'd', 'dj', 'vj', 'vdj']
type2count = {} #key = genetype, val = count
for type in types:
if type not in avrstats:
continue
avrgene2count = avrstats[type]
used = 0
for counts in avrgene2count.values():
if counts[0] > 0:
used += 1
type2count[type] = used
return type2count
def geneUsed(avrstats, type2genelist, outfile):
types = ['v', 'j', 'd', 'dj', 'vj', 'vdj']
f = open(outfile, 'w')
f.write("Genetype\tTotal\tUsed\tPercentage\n")
type2count = geneUsedSample(avrstats)
for type in types:
if type not in type2count:
continue
used = type2count[type]
if type in type2genelist:
total = len(type2genelist[type])
if total == 0:
raise ValueError("Genetype %s has zero genes\n" %type)
pc = 100.0*used/total
f.write("%s\t%d\t%d\t%f\n" %(type, total, used, pc))
else:
f.write("%s\tNA\t%d\tNA\n" %(type, used))
f.close()
def geneUsedSummary(sample2stats, type2genelist, group2samples, outfile, abs):
#Row = sample, column = genetype
f = open(outfile, 'w')
types = ['d', 'j', 'v', 'dj', 'vj', 'vdj']
f.write("Sample\t%s\n" %('\t'.join(types)))
for group in sorted(group2samples.keys()):
samples = group2samples[group]
type2avrcount = {'d':0, 'j':0, 'v':0, 'dj':0, 'vj':0, 'vdj':0}
#Print stats of each sample
for sample in samples:
f.write("%s" %sample)
type2count = geneUsedSample( sample2stats[sample] )
for type in types:
count = 0
if type in type2count:
count = type2count[type]
type2avrcount[type] += count
if abs:
f.write("\t%d" %count)
else:#calculate percentage
if type in type2genelist:
total = len( type2genelist[type] )
if total == 0:
raise ValueError("Genetype %s has zero genes\n" %type)
pc = 100.0*count/total
f.write("\t%f" %pc)
else:
f.write("\tNA")
f.write("\n")
#Group average stats:
f.write("%s" %group)
for type in types:
avrcount = float(type2avrcount[type])/len(samples)
if abs:
f.write("\t%d" % avrcount)
else:
if type in type2genelist:
total = len( type2genelist[type] )
if total == 0:
raise ValueError("Genetype %s has zero genes\n" %type)
pc = 100.0*avrcount/total
f.write("\t%f" %pc)
else:
f.write("\tNA")
f.write("\n")
f.close()
#def getUsage(samples, outdir, type):
# genes = getUnionGeneList(samples, type)
# sys.stderr.write("Done getting uniqGeneList\n")
# #Print out usage table for each sample:
# for s in samples:
# g2c = s.usage[type]
# tabfile = os.path.join( outdir, "%s-%s.txt" %(s.name, type) )
# f = open( tabfile, 'w')
# f.write("Gene\tTotal\tUniq\n")
# for g in genes:
# f.write( "%s\t%d\t%d\n" %(g, g2c[g][0], g2c[g][1]) )
# f.close()
#def getVJusage(sample, type2gene2count, type2genelist, outdir, abs, uniq, std):
def getVJusage(sample, rowtype, coltype, type2gene2count, type2genelist, outdir, abs, uniq, std):
#If abs is True: print absolute count, otherwise print frequencies.
#If uniq is True: using the Uniq sequence Count as the unit, otherwise, use read count
#rowtype = genetype represented by the rows, coltype = genetype represented by the columns
#(For exmple to represent vj recombinations, rows can be Vs and columns can be Js)
if rowtype not in type2gene2count or coltype not in type2gene2count or (rowtype + coltype) not in type2gene2count:
return
v2c = type2gene2count[rowtype]
j2c = type2gene2count[coltype]
vj2c = type2gene2count[rowtype + coltype]
totaluniqs = sum([c[1] for c in vj2c.values() ])
totalreads = sum([c[0] for c in vj2c.values()])
if totaluniqs == 0 or totalreads == 0:
return
#print vj2c
#raise ValueError("Sample %s has zero sequence. rowtype: %s, coltype: %s. Totaluniqs: %d, totalreads: %d" %(sample, rowtype, coltype, totaluniqs, totalreads))
if abs:
outdirname = 'abs'
else:
outdirname = 'rel'
if uniq:
outdirname += "uniq"
outdir = os.path.join(outdir, outdirname)
if not std:
file = os.path.join(outdir, "%s-vj.txt" %sample)
else:
file = os.path.join(outdir, "std%s-vj.txt" %sample)
f = open(file, 'w')
jgenes = [j for j in sorted(j2c.keys())] #column genes
if coltype in type2genelist:
jgenes = type2genelist[coltype]
vgenes = [v for v in sorted(v2c.keys())] #row genes
if rowtype in type2genelist:
vgenes = type2genelist[rowtype]
f.write( "\t%s\n" %( '\t'.join(jgenes) ) )
for v in vgenes:
if v == '' or re.search('undefined', v):
continue
f.write( "%s" %v )
for j in jgenes:
vj = '|'.join([v, j])
if vj not in vj2c:
f.write("\t0")
else:
if uniq:#uniq seq count
count = vj2c[vj][1]
else:#read count
count = vj2c[vj][0]
if abs:
f.write("\t%d" %count)
else:#relative
if uniq:
count = float(count)/totaluniqs
else:
count = float(count)/totalreads
f.write("\t%f" %count)
f.write("\n")
f.close()
def getVJusageSample(sample, rowtype, coltype, avrstats, stdstats, type2genelist, outdir):
#Print vj
abs = True
uniq = True
std = True #if true, print the standard deviation
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, abs, uniq, not std)
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, abs, not uniq, not std)
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, not abs, uniq, not std)
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, not abs, not uniq, not std)
#print stds:
if stdstats:
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, abs, uniq, std)
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, abs, not uniq, std)
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, not abs, uniq, std)
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, not abs, not uniq, std)
#def getGeneUsage(sample, outdir):
# '''Get V, D, J, VDJ, VJ and DJ usage
# '''
# getVJusage()
# sample.setCounts()
# sys.stderr.write("Done getting usage for %s\n" %sample.name)
#
# #Adding the average of all samples the the sample list
# addAvrSample( samples )
# sys.stderr.write("Done adding average and std sample\n")
#
# vjUsage(samples, options.outdir)
# sys.stderr.write("Done v usage and j usage\n")
#
# vjoutdir = os.path.join( options.outdir, "vj")
# system("mkdir -p %s" %vjoutdir)
# #Generate VJ using the uniq sequence count or using the read count, relative or absolute count
# abs = True
# uniq = True
# getVJusage(samples, vjoutdir, abs, not uniq)
# sys.stderr.write("Done vj usage with absolute read count\n")
# getVJusage(samples, vjoutdir, not abs, not uniq)
# sys.stderr.write("Done vj usage with relative read count\n")
# getVJusage(samples, vjoutdir, abs, uniq)
# sys.stderr.write("Done vj usage with absolute uniqSeq count\n")
# getVJusage(samples, vjoutdir, not abs, uniq)
# sys.stderr.write("Done vj usage with relative read count\n")
| #nknguyen soe ucsc edu
#Tue Jul 17 10:56:47 PDT 2012
#Library of functions used to compute the gene usage
import sys, re, os, random, copy
from optparse import OptionParser
from scipy.stats.stats import pearsonr, spearmanr, kendalltau
from sonLib.bioio import system
import numpy as np
import immunoseq.lib.immunoseqLib as iseqlib
def addAvrSample( samples ):
''' Add the average and standardDev of all the samples '''
if len(samples) == 0:
return
avrusage = {'v':{}, 'j':{}, 'vj':{}} #'v':{ 'vgene':[totalreads, uniqseqs] }
stdusage = {'v':{}, 'j':{}, 'vj':{}} #'v':{ 'vgene':[totalreads, uniqseqs] }
#get accumulate count across samples:
for s in samples:
for type in avrusage:
g2c = s.usage[type]
typeusage = avrusage[type]
for g in g2c:
if g not in typeusage:
typeusage[g] = [ g2c[g] ]
else:
typeusage[g].append( g2c[g] )
#typeusage[g][1] += g2c[g][1]
#average:
avrsample = Sample('average')
stdsample = Sample('std')
for type in avrusage:
for g in avrusage[type]:
totalreads = [ sample[0] for sample in avrusage[type][g] ]
uniqseqs = [ sample[1] for sample in avrusage[type][g] ]
avrusage[type][g] = [np.mean(totalreads), np.mean(uniqseqs)]
stdusage[type][g] = [np.std(totalreads), np.std(uniqseqs)]
avrsample.usage = avrusage
avrsample.setCounts()
stdsample.usage = stdusage
stdsample.setCounts()
samples.append(avrsample)
samples.append(stdsample)
def getGenes(seq, type):
if type not in ['v', 'j', 'd']:
raise ValueError("singleUsage, %s is not a valid genetype. Valid choices are v, d, j" %type)
if type == 'v':
return seq.vs
elif type == 'j':
return seq.js
else:
return seq.ds
def singleUsage(seqs, type):
gene2count = {} #key = genename, val = [totalreads, uniqseqs]
for seq in seqs.values():
genes = getGenes(seq, type)
#filter out unvalid genes:
if len(genes) == 0 or '(undefined)' in genes or '' in genes:
continue
count = float(seq.count)/len(genes)
for gene in genes:
if gene not in gene2count:
gene2count[gene] = [count, 1.0/len(genes)]
else:
currcount = gene2count[gene]
gene2count[gene] = [currcount[0] + count, currcount[1] + 1.0/len(genes)]
return gene2count
def combinationUsage( seqs, types ):
comb2count = {} #key = combination of geneNames, val = [totalReads, uniqueSeqs]
for seq in seqs.values():
type2genes = {}
totalCombinations = 1
for type in types:
genes = getGenes(seq, type)
type2genes[type] = genes
totalCombinations *= len(genes)
if totalCombinations == 0:
continue
count = float(seq.count)/totalCombinations
combs = type2genes[ types[0] ]
for i in xrange(1, len(types)):
type = types[i]
currcombs = []
for gene in type2genes[type]:
for comb in combs:
currcombs.append( "|".join([comb, gene]) )
combs = currcombs
for comb in combs:
if comb not in comb2count:
comb2count[comb] = [count, 1.0/totalCombinations]
else:
currcount = comb2count[comb]
comb2count[comb] = [ currcount[0] + count, currcount[1] + 1.0/totalCombinations ]
return comb2count
def getGene2count(seqs):
#Single:
type2gene2count = { 'v':{}, 'j':{}, 'd': {}, 'dj':{}, 'vj':{}, 'vdj':{} }
singletypes = ['v', 'j', 'd']
for type in singletypes:
gene2count = singleUsage(seqs, type)
type2gene2count[type] = gene2count
#Combination:
combs = ['dj', 'vj', 'vdj']
for comb in combs:
types = [c for c in comb]
comb2count = combinationUsage(seqs, types)
type2gene2count[comb] = comb2count
similarGenes = ['TRBV6-5', 'TRBV6-6']
combineVgenes(type2gene2count, similarGenes)
return type2gene2count
def combineVgenes(type2gene2count, genes):
'''Combine the genes in 'genes' as one gene
'''
newcounts = [0.0, 0.0]
#Calculate combined counts
for v, counts in type2gene2count['v'].iteritems():
if v in genes:
newcounts[0] += counts[0]
newcounts[1] += counts[1]
#Delete single genes
for g in genes:
if g in type2gene2count['v']:
del type2gene2count['v'][g]
#Add combined newgene
newgene = '/'.join(genes)
type2gene2count['v'][newgene] = newcounts
#Combinations: vj, vdj
combs = ['vj', 'vdj']
for c in combs:
if c not in type2gene2count:
continue
g2counts = {} #key = j or dj gene(s), val = counts
delkeys = []
gene2count = type2gene2count[c]
#Calculate combined counts
for g, counts in gene2count.iteritems(): #Each VJ or VDJ combination
items = g.split('|')
v = items[0] #current V
if v in genes:
delkeys.append(g)
othergene= '|'.join(items[1:]) #current J or DJ
if othergene not in g2counts:
g2counts[othergene] = [counts[0], counts[1]]
else:
g2counts[othergene][0] += counts[0]
g2counts[othergene][1] += counts[1]
#Delete combinations with single gene in genes
for k in delkeys:
del gene2count[k]
#Add new combinations with new combined gene:
for othergene, newcounts in g2counts.iteritems():
newcomb = '|'.join([newgene, othergene])
gene2count[newcomb] = newcounts
#print gene2count
def getUnionGeneList(samples, type):
#Get the union of vgenes lists from all samples.
genes = []
for s in samples:
#print s.usage[type].keys()
for g in s.usage[type].keys():
if g not in genes:
genes.append(g)
#print genes
#If a sample doesn't have a vgene, put the count of that vgene to 0
genes.sort()
for g in genes:
for s in samples:
if g not in s.usage[type].keys():
s.usage[type][g] = [0,0]
return genes
def addSamplingStats(type2gene2count, aggType2gene2count, i):
#i is the order of the current sampling (base 0), or, it's the number of samplings that have already added to aggStats
for type, gene2count in type2gene2count.iteritems():
if type not in aggType2gene2count:
aggType2gene2count[type] = {}
for gene, counts in gene2count.iteritems():
aggType2gene2count[type][gene] = [ [c] for c in counts ]
else:
aggGene2count = aggType2gene2count[type]
for gene, counts in gene2count.iteritems():
if gene not in aggGene2count:
aggGene2count[gene] = [ [0.0]*i + [c] for c in counts] #previous simulation didn't have this gene
else:
aggCounts = aggGene2count[gene]
aggCounts[0].append(counts[0])
aggCounts[1].append(counts[1])
aggType2gene2count[type][gene] = aggCounts
def avrSamplingStats(aggType2gene2count):
#Average stats of the samplings:
avrtype2gene2count = {}
stdtype2gene2count = {}
for type, gene2count in aggType2gene2count.iteritems():
avrtype2gene2count[type] = {}
stdtype2gene2count[type] = {}
for gene, counts in gene2count.iteritems():
meanReads = np.mean(counts[0])
meanUniqs = np.mean(counts[1])
avrtype2gene2count[type][gene] = [meanReads, meanUniqs]
stdReads = np.std(counts[0])
stdUniqs = np.std(counts[1])
stdtype2gene2count[type][gene] = [stdReads, stdUniqs]
return avrtype2gene2count, stdtype2gene2count
def usageTab(types, sample, avrstats, stdstats, type2genelist, outdir):
for type in types:
avrgene2count = {}
stdgene2count = {}
totalreads = 0
totaluniqs = 0
if type in avrstats:
avrgene2count = avrstats[type]
stdgene2count = stdstats[type]
totalreads = sum([counts[0] for counts in avrgene2count.values()])
totaluniqs = sum([counts[1] for counts in avrgene2count.values()])
#if totalreads == 0 or totaluniqs == 0:
# raise ValueError("sample with zero read/sequence")
if type in type2genelist:
genes = type2genelist[type]
else:
genes = sorted( avrgene2count.keys() )
typedir = os.path.join(outdir, type)
outfile = os.path.join(typedir, "%s-%s.txt" %(sample, type) )
f = open(outfile, 'w')
f.write("Gene\tReads\t%Reads\tUniq\t%uniq\tStdReads\tStdUniq\n")
#numpass = 0
for g in genes:
if g not in avrgene2count:
sys.stderr.write("Gene %s is not in avrgene2count %s\n" %(g, ','.join(avrgene2count.keys()) ))
avrcounts = [0.0, 0.0]
stdcounts = [0.0, 0.0]
else:
#numpass += 1
avrcounts = avrgene2count[g]
stdcounts = stdgene2count[g]
read = avrcounts[0]
uniq = avrcounts[1]
readPc = 0.0
uniqPc = 0.0
if totalreads > 0:
readPc = iseqlib.getPc(read, totalreads)
if totaluniqs > 0:
uniqPc = iseqlib.getPc(uniq, totaluniqs)
f.write("%s\t%d\t%f\t%d\t%f\t%d\t%d\n" %(g, read, readPc, uniq, uniqPc, stdcounts[0], stdcounts[1]))
f.close()
#if numpass == 0:
# raise ValueError("ERROR\n")
def geneUsedSample(avrstats):
types = ['v', 'j', 'd', 'dj', 'vj', 'vdj']
type2count = {} #key = genetype, val = count
for type in types:
if type not in avrstats:
continue
avrgene2count = avrstats[type]
used = 0
for counts in avrgene2count.values():
if counts[0] > 0:
used += 1
type2count[type] = used
return type2count
def geneUsed(avrstats, type2genelist, outfile):
types = ['v', 'j', 'd', 'dj', 'vj', 'vdj']
f = open(outfile, 'w')
f.write("Genetype\tTotal\tUsed\tPercentage\n")
type2count = geneUsedSample(avrstats)
for type in types:
if type not in type2count:
continue
used = type2count[type]
if type in type2genelist:
total = len(type2genelist[type])
if total == 0:
raise ValueError("Genetype %s has zero genes\n" %type)
pc = 100.0*used/total
f.write("%s\t%d\t%d\t%f\n" %(type, total, used, pc))
else:
f.write("%s\tNA\t%d\tNA\n" %(type, used))
f.close()
def geneUsedSummary(sample2stats, type2genelist, group2samples, outfile, abs):
#Row = sample, column = genetype
f = open(outfile, 'w')
types = ['d', 'j', 'v', 'dj', 'vj', 'vdj']
f.write("Sample\t%s\n" %('\t'.join(types)))
for group in sorted(group2samples.keys()):
samples = group2samples[group]
type2avrcount = {'d':0, 'j':0, 'v':0, 'dj':0, 'vj':0, 'vdj':0}
#Print stats of each sample
for sample in samples:
f.write("%s" %sample)
type2count = geneUsedSample( sample2stats[sample] )
for type in types:
count = 0
if type in type2count:
count = type2count[type]
type2avrcount[type] += count
if abs:
f.write("\t%d" %count)
else:#calculate percentage
if type in type2genelist:
total = len( type2genelist[type] )
if total == 0:
raise ValueError("Genetype %s has zero genes\n" %type)
pc = 100.0*count/total
f.write("\t%f" %pc)
else:
f.write("\tNA")
f.write("\n")
#Group average stats:
f.write("%s" %group)
for type in types:
avrcount = float(type2avrcount[type])/len(samples)
if abs:
f.write("\t%d" % avrcount)
else:
if type in type2genelist:
total = len( type2genelist[type] )
if total == 0:
raise ValueError("Genetype %s has zero genes\n" %type)
pc = 100.0*avrcount/total
f.write("\t%f" %pc)
else:
f.write("\tNA")
f.write("\n")
f.close()
#def getUsage(samples, outdir, type):
# genes = getUnionGeneList(samples, type)
# sys.stderr.write("Done getting uniqGeneList\n")
# #Print out usage table for each sample:
# for s in samples:
# g2c = s.usage[type]
# tabfile = os.path.join( outdir, "%s-%s.txt" %(s.name, type) )
# f = open( tabfile, 'w')
# f.write("Gene\tTotal\tUniq\n")
# for g in genes:
# f.write( "%s\t%d\t%d\n" %(g, g2c[g][0], g2c[g][1]) )
# f.close()
#def getVJusage(sample, type2gene2count, type2genelist, outdir, abs, uniq, std):
def getVJusage(sample, rowtype, coltype, type2gene2count, type2genelist, outdir, abs, uniq, std):
#If abs is True: print absolute count, otherwise print frequencies.
#If uniq is True: using the Uniq sequence Count as the unit, otherwise, use read count
#rowtype = genetype represented by the rows, coltype = genetype represented by the columns
#(For exmple to represent vj recombinations, rows can be Vs and columns can be Js)
if rowtype not in type2gene2count or coltype not in type2gene2count or (rowtype + coltype) not in type2gene2count:
return
v2c = type2gene2count[rowtype]
j2c = type2gene2count[coltype]
vj2c = type2gene2count[rowtype + coltype]
totaluniqs = sum([c[1] for c in vj2c.values() ])
totalreads = sum([c[0] for c in vj2c.values()])
if totaluniqs == 0 or totalreads == 0:
return
#print vj2c
#raise ValueError("Sample %s has zero sequence. rowtype: %s, coltype: %s. Totaluniqs: %d, totalreads: %d" %(sample, rowtype, coltype, totaluniqs, totalreads))
if abs:
outdirname = 'abs'
else:
outdirname = 'rel'
if uniq:
outdirname += "uniq"
outdir = os.path.join(outdir, outdirname)
if not std:
file = os.path.join(outdir, "%s-vj.txt" %sample)
else:
file = os.path.join(outdir, "std%s-vj.txt" %sample)
f = open(file, 'w')
jgenes = [j for j in sorted(j2c.keys())] #column genes
if coltype in type2genelist:
jgenes = type2genelist[coltype]
vgenes = [v for v in sorted(v2c.keys())] #row genes
if rowtype in type2genelist:
vgenes = type2genelist[rowtype]
f.write( "\t%s\n" %( '\t'.join(jgenes) ) )
for v in vgenes:
if v == '' or re.search('undefined', v):
continue
f.write( "%s" %v )
for j in jgenes:
vj = '|'.join([v, j])
if vj not in vj2c:
f.write("\t0")
else:
if uniq:#uniq seq count
count = vj2c[vj][1]
else:#read count
count = vj2c[vj][0]
if abs:
f.write("\t%d" %count)
else:#relative
if uniq:
count = float(count)/totaluniqs
else:
count = float(count)/totalreads
f.write("\t%f" %count)
f.write("\n")
f.close()
def getVJusageSample(sample, rowtype, coltype, avrstats, stdstats, type2genelist, outdir):
#Print vj
abs = True
uniq = True
std = True #if true, print the standard deviation
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, abs, uniq, not std)
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, abs, not uniq, not std)
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, not abs, uniq, not std)
getVJusage(sample, rowtype, coltype, avrstats, type2genelist, outdir, not abs, not uniq, not std)
#print stds:
if stdstats:
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, abs, uniq, std)
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, abs, not uniq, std)
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, not abs, uniq, std)
getVJusage(sample, rowtype, coltype, stdstats, type2genelist, outdir, not abs, not uniq, std)
#def getGeneUsage(sample, outdir):
# '''Get V, D, J, VDJ, VJ and DJ usage
# '''
# getVJusage()
# sample.setCounts()
# sys.stderr.write("Done getting usage for %s\n" %sample.name)
#
# #Adding the average of all samples the the sample list
# addAvrSample( samples )
# sys.stderr.write("Done adding average and std sample\n")
#
# vjUsage(samples, options.outdir)
# sys.stderr.write("Done v usage and j usage\n")
#
# vjoutdir = os.path.join( options.outdir, "vj")
# system("mkdir -p %s" %vjoutdir)
# #Generate VJ using the uniq sequence count or using the read count, relative or absolute count
# abs = True
# uniq = True
# getVJusage(samples, vjoutdir, abs, not uniq)
# sys.stderr.write("Done vj usage with absolute read count\n")
# getVJusage(samples, vjoutdir, not abs, not uniq)
# sys.stderr.write("Done vj usage with relative read count\n")
# getVJusage(samples, vjoutdir, abs, uniq)
# sys.stderr.write("Done vj usage with absolute uniqSeq count\n")
# getVJusage(samples, vjoutdir, not abs, uniq)
# sys.stderr.write("Done vj usage with relative read count\n")
| en | 0.680801 | #nknguyen soe ucsc edu #Tue Jul 17 10:56:47 PDT 2012 #Library of functions used to compute the gene usage Add the average and standardDev of all the samples #'v':{ 'vgene':[totalreads, uniqseqs] } #'v':{ 'vgene':[totalreads, uniqseqs] } #get accumulate count across samples: #typeusage[g][1] += g2c[g][1] #average: #key = genename, val = [totalreads, uniqseqs] #filter out unvalid genes: #key = combination of geneNames, val = [totalReads, uniqueSeqs] #Single: #Combination: Combine the genes in 'genes' as one gene #Calculate combined counts #Delete single genes #Add combined newgene #Combinations: vj, vdj #key = j or dj gene(s), val = counts #Calculate combined counts #Each VJ or VDJ combination #current V #current J or DJ #Delete combinations with single gene in genes #Add new combinations with new combined gene: #print gene2count #Get the union of vgenes lists from all samples. #print s.usage[type].keys() #print genes #If a sample doesn't have a vgene, put the count of that vgene to 0 #i is the order of the current sampling (base 0), or, it's the number of samplings that have already added to aggStats #previous simulation didn't have this gene #Average stats of the samplings: #if totalreads == 0 or totaluniqs == 0: # raise ValueError("sample with zero read/sequence") #numpass = 0 #numpass += 1 #if numpass == 0: # raise ValueError("ERROR\n") #key = genetype, val = count #Row = sample, column = genetype #Print stats of each sample #calculate percentage #Group average stats: #def getUsage(samples, outdir, type): # genes = getUnionGeneList(samples, type) # sys.stderr.write("Done getting uniqGeneList\n") # #Print out usage table for each sample: # for s in samples: # g2c = s.usage[type] # tabfile = os.path.join( outdir, "%s-%s.txt" %(s.name, type) ) # f = open( tabfile, 'w') # f.write("Gene\tTotal\tUniq\n") # for g in genes: # f.write( "%s\t%d\t%d\n" %(g, g2c[g][0], g2c[g][1]) ) # f.close() #def getVJusage(sample, type2gene2count, type2genelist, outdir, abs, uniq, std): #If abs is True: print absolute count, otherwise print frequencies. #If uniq is True: using the Uniq sequence Count as the unit, otherwise, use read count #rowtype = genetype represented by the rows, coltype = genetype represented by the columns #(For exmple to represent vj recombinations, rows can be Vs and columns can be Js) #print vj2c #raise ValueError("Sample %s has zero sequence. rowtype: %s, coltype: %s. Totaluniqs: %d, totalreads: %d" %(sample, rowtype, coltype, totaluniqs, totalreads)) #column genes #row genes #uniq seq count #read count #relative #Print vj #if true, print the standard deviation #print stds: #def getGeneUsage(sample, outdir): # '''Get V, D, J, VDJ, VJ and DJ usage # ''' # getVJusage() # sample.setCounts() # sys.stderr.write("Done getting usage for %s\n" %sample.name) # # #Adding the average of all samples the the sample list # addAvrSample( samples ) # sys.stderr.write("Done adding average and std sample\n") # # vjUsage(samples, options.outdir) # sys.stderr.write("Done v usage and j usage\n") # # vjoutdir = os.path.join( options.outdir, "vj") # system("mkdir -p %s" %vjoutdir) # #Generate VJ using the uniq sequence count or using the read count, relative or absolute count # abs = True # uniq = True # getVJusage(samples, vjoutdir, abs, not uniq) # sys.stderr.write("Done vj usage with absolute read count\n") # getVJusage(samples, vjoutdir, not abs, not uniq) # sys.stderr.write("Done vj usage with relative read count\n") # getVJusage(samples, vjoutdir, abs, uniq) # sys.stderr.write("Done vj usage with absolute uniqSeq count\n") # getVJusage(samples, vjoutdir, not abs, uniq) # sys.stderr.write("Done vj usage with relative read count\n") | 2.462826 | 2 |
archive/batch_reducer2.py | saeedrahmo/map-reduce-dataproc | 0 | 6612922 | #!/usr/bin/env python
"""reducer.py"""
from operator import itemgetter
import sys
batch_current = 0
metric_value_min = 0
metric_value_max = 0
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
line = line.rstrip()
# parse the input we got from mapper.py
batch_id_current, metric_value, metric_selected = line.split('\t')
print('batch_id: {}\t value: {}\t metric: {}'.format(batch_id_current, metric_value, metric_selected)) | #!/usr/bin/env python
"""reducer.py"""
from operator import itemgetter
import sys
batch_current = 0
metric_value_min = 0
metric_value_max = 0
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
line = line.rstrip()
# parse the input we got from mapper.py
batch_id_current, metric_value, metric_selected = line.split('\t')
print('batch_id: {}\t value: {}\t metric: {}'.format(batch_id_current, metric_value, metric_selected)) | en | 0.535707 | #!/usr/bin/env python reducer.py # input comes from STDIN (standard input) # remove leading and trailing whitespace # parse the input we got from mapper.py | 2.737665 | 3 |
tranceiver/Receiver.py | milad72t/FlightTracker | 2 | 6612923 | <filename>tranceiver/Receiver.py
from peewee import *
import Flight_pb2
from socket import *
db = MySQLDatabase('FlightTracker', user='root', passwd='<PASSWORD>')
host = "127.0.0.1"
port = 8000
udpSocket = socket(AF_INET, SOCK_DGRAM)
udpSocket.bind(("", port))
class FlightLog(Model):
flightId = IntegerField(db_column='flightId')
altitude = IntegerField(db_column='altitude')
speed = IntegerField(db_column='speed')
angle = DoubleField(db_column='angle')
sendTime = DateTimeField(db_column='sendTime')
longitude = DoubleField(db_column='longitude')
latitude = DoubleField(db_column='latitude')
class Meta:
database = db
table_name = 'flight_logs'
def PraseToObject(data):
flightLog = Flight_pb2.Flight()
flightLog.ParseFromString(data)
return flightLog
def insertToDB(flightLog):
FlightLog.create(flightId=flightLog.flightId,altitude=flightLog.altitude,speed=flightLog.speed,angle=flightLog.angle,sendTime=flightLog.sendTime,longitude=flightLog.longitude,latitude=flightLog.latitude)
print "waiting on port:", port
while 1:
try:
data, addr = udpSocket.recvfrom(100)
insertToDB(PraseToObject(data))
except Exception,e:
print 'Error : '+str(e)
| <filename>tranceiver/Receiver.py
from peewee import *
import Flight_pb2
from socket import *
db = MySQLDatabase('FlightTracker', user='root', passwd='<PASSWORD>')
host = "127.0.0.1"
port = 8000
udpSocket = socket(AF_INET, SOCK_DGRAM)
udpSocket.bind(("", port))
class FlightLog(Model):
flightId = IntegerField(db_column='flightId')
altitude = IntegerField(db_column='altitude')
speed = IntegerField(db_column='speed')
angle = DoubleField(db_column='angle')
sendTime = DateTimeField(db_column='sendTime')
longitude = DoubleField(db_column='longitude')
latitude = DoubleField(db_column='latitude')
class Meta:
database = db
table_name = 'flight_logs'
def PraseToObject(data):
flightLog = Flight_pb2.Flight()
flightLog.ParseFromString(data)
return flightLog
def insertToDB(flightLog):
FlightLog.create(flightId=flightLog.flightId,altitude=flightLog.altitude,speed=flightLog.speed,angle=flightLog.angle,sendTime=flightLog.sendTime,longitude=flightLog.longitude,latitude=flightLog.latitude)
print "waiting on port:", port
while 1:
try:
data, addr = udpSocket.recvfrom(100)
insertToDB(PraseToObject(data))
except Exception,e:
print 'Error : '+str(e)
| none | 1 | 2.917572 | 3 | |
spike_swarm_sim/algorithms/evolutionary/cma_es.py | r-sendra/SpikeSwarmSim | 0 | 6612924 | import logging
import numpy as np
from .population import CMA_EA_Population
from .evolutionary_algorithm import EvolutionaryAlgorithm
from spike_swarm_sim.register import algorithm_registry
from spike_swarm_sim.utils import save_pickle, load_pickle
@algorithm_registry(name='CMA-ES')
class CMA_ES(EvolutionaryAlgorithm):
""" Class of the CMA-ES.
The evolution step is defined in the CMA_EA_Population class.
"""
def __init__(self, populations, *args, **kwargs):
populations = {name : CMA_EA_Population(kwargs['population_size'],\
pop['min_vals'], pop['max_vals'], pop['objects'], **pop['params'])\
for name, pop in populations.items()}
super(CMA_ES, self).__init__(populations, *args, **kwargs)
def save_population(self, generation):
""" Saves the checkpoint with the necessary information to resume the evolution.
"""
pop_checkpoint = {
'populations' : {name : np.stack(pop.population) for name, pop in self.populations.items()},
'generation' : generation,
'mutation_prob' : {name : pop.mutation_prob for name, pop in self.populations.items()},
'evolution_hist' : self.evolution_history,
'mu' : {name : pop.strategy_m for name, pop in self.populations.items()},
'C' :{name : pop.strategy_C for name, pop in self.populations.items()},
'cc' :{name : pop.cc for name, pop in self.populations.items()},
'cs' :{name : pop.cs for name, pop in self.populations.items()},
'c_cov' :{name : pop.c_cov for name, pop in self.populations.items()},
'mu_cov':{name : pop.mu_cov for name, pop in self.populations.items()},
'ds':{name : pop.ds for name, pop in self.populations.items()},
'evo_path':{name : pop.evo_path for name, pop in self.populations.items()},
'ps':{name : pop.ps for name, pop in self.populations.items()},
'B':{name : pop.B for name, pop in self.populations.items()},
'Bt' :{name : pop.Bt for name, pop in self.populations.items()},
'D' : {name : pop.D for name, pop in self.populations.items()},
'sigma' : {name : pop.sigma for name, pop in self.populations.items()},
'num_evals' :{name : pop.num_evals for name, pop in self.populations.items()},
}
file_name = 'spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name
save_pickle(pop_checkpoint, file_name)
logging.info('Successfully saved evolution checkpoint.')
def load_population(self):
""" Loads a previously saved checkpoint to resume evolution.
"""
checkpoint = load_pickle('spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name)
logging.info('Resuming CMA-ES evolution using checkpoint ' + self.checkpoint_name)
key = tuple(self.populations.keys())[0]
for key, pop in checkpoint['populations'].items():
self.populations[key].strategy_m = checkpoint['mu'][key]
self.populations[key].strategy_C = checkpoint['C'][key]
self.populations[key].cc = checkpoint['cc'][key]
self.populations[key].cs = checkpoint['cs'][key]
self.populations[key].mu_cov = checkpoint['mu_cov'][key]
self.populations[key].c_cov = checkpoint['c_cov'][key]
self.populations[key].ds = checkpoint['ds'][key]
self.populations[key].evo_path = checkpoint['evo_path'][key]
self.populations[key].ps = checkpoint['ps'][key]
self.populations[key].B = checkpoint['B'][key]
self.populations[key].Bt = checkpoint['Bt'][key]
self.populations[key].D = checkpoint['D'][key]
self.populations[key].sigma = checkpoint['sigma'][key]
self.populations[key].num_evals = checkpoint['num_evals'][key]
self.populations[key].population = self.populations[key].sample()
self.init_generation = checkpoint['generation']
self.evolution_history = checkpoint['evolution_hist'] | import logging
import numpy as np
from .population import CMA_EA_Population
from .evolutionary_algorithm import EvolutionaryAlgorithm
from spike_swarm_sim.register import algorithm_registry
from spike_swarm_sim.utils import save_pickle, load_pickle
@algorithm_registry(name='CMA-ES')
class CMA_ES(EvolutionaryAlgorithm):
""" Class of the CMA-ES.
The evolution step is defined in the CMA_EA_Population class.
"""
def __init__(self, populations, *args, **kwargs):
populations = {name : CMA_EA_Population(kwargs['population_size'],\
pop['min_vals'], pop['max_vals'], pop['objects'], **pop['params'])\
for name, pop in populations.items()}
super(CMA_ES, self).__init__(populations, *args, **kwargs)
def save_population(self, generation):
""" Saves the checkpoint with the necessary information to resume the evolution.
"""
pop_checkpoint = {
'populations' : {name : np.stack(pop.population) for name, pop in self.populations.items()},
'generation' : generation,
'mutation_prob' : {name : pop.mutation_prob for name, pop in self.populations.items()},
'evolution_hist' : self.evolution_history,
'mu' : {name : pop.strategy_m for name, pop in self.populations.items()},
'C' :{name : pop.strategy_C for name, pop in self.populations.items()},
'cc' :{name : pop.cc for name, pop in self.populations.items()},
'cs' :{name : pop.cs for name, pop in self.populations.items()},
'c_cov' :{name : pop.c_cov for name, pop in self.populations.items()},
'mu_cov':{name : pop.mu_cov for name, pop in self.populations.items()},
'ds':{name : pop.ds for name, pop in self.populations.items()},
'evo_path':{name : pop.evo_path for name, pop in self.populations.items()},
'ps':{name : pop.ps for name, pop in self.populations.items()},
'B':{name : pop.B for name, pop in self.populations.items()},
'Bt' :{name : pop.Bt for name, pop in self.populations.items()},
'D' : {name : pop.D for name, pop in self.populations.items()},
'sigma' : {name : pop.sigma for name, pop in self.populations.items()},
'num_evals' :{name : pop.num_evals for name, pop in self.populations.items()},
}
file_name = 'spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name
save_pickle(pop_checkpoint, file_name)
logging.info('Successfully saved evolution checkpoint.')
def load_population(self):
""" Loads a previously saved checkpoint to resume evolution.
"""
checkpoint = load_pickle('spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name)
logging.info('Resuming CMA-ES evolution using checkpoint ' + self.checkpoint_name)
key = tuple(self.populations.keys())[0]
for key, pop in checkpoint['populations'].items():
self.populations[key].strategy_m = checkpoint['mu'][key]
self.populations[key].strategy_C = checkpoint['C'][key]
self.populations[key].cc = checkpoint['cc'][key]
self.populations[key].cs = checkpoint['cs'][key]
self.populations[key].mu_cov = checkpoint['mu_cov'][key]
self.populations[key].c_cov = checkpoint['c_cov'][key]
self.populations[key].ds = checkpoint['ds'][key]
self.populations[key].evo_path = checkpoint['evo_path'][key]
self.populations[key].ps = checkpoint['ps'][key]
self.populations[key].B = checkpoint['B'][key]
self.populations[key].Bt = checkpoint['Bt'][key]
self.populations[key].D = checkpoint['D'][key]
self.populations[key].sigma = checkpoint['sigma'][key]
self.populations[key].num_evals = checkpoint['num_evals'][key]
self.populations[key].population = self.populations[key].sample()
self.init_generation = checkpoint['generation']
self.evolution_history = checkpoint['evolution_hist'] | en | 0.843449 | Class of the CMA-ES.
The evolution step is defined in the CMA_EA_Population class. Saves the checkpoint with the necessary information to resume the evolution. Loads a previously saved checkpoint to resume evolution. | 2.320278 | 2 |
python/src/tensor/autograd/autograd.py | dawidkski/space | 3 | 6612925 | from __future__ import annotations
from abc import abstractmethod, ABCMeta
from typing import Optional, List, Iterable, Union, TypeVar
import numpy as np
from .. import tensor as ts
T = TypeVar("T")
IterT = Union[T, Iterable[T]]
class Variable:
def __init__(self, value: ts.Tensor, op: Optional[Op] = None):
self._value: ts.Tensor = value
self._grad: ts.Tensor = ts.Tensor(np.full(value.shape, 1.0))
self.op: Optional[Op] = op
@property
def value(self):
return self._value
@value.setter
def value(self, value: ts.Tensor):
self._value = value
@property
def grad(self):
return self._grad
@grad.setter
def grad(self, value: ts.Tensor):
self._grad = value
def backward(self):
traverse(self)
def __str__(self):
return f"Variable"
def __neg__(self) -> Variable:
variable = Variable(-self._value, self.op)
variable._grad = self._grad
return variable
def __matmul__(self, other: Variable) -> Variable:
return matmul(self, other)
def __add__(self, other: Variable) -> Variable:
return add(self, other)
class Op(metaclass=ABCMeta):
def __init__(self):
self._inputs = []
@property
def inputs(self) -> List[Variable]:
return self._inputs
def __call__(self, *args: Variable):
return self.forward(*args)
@abstractmethod
def forward(self, *args: Variable):
raise NotImplementedError
@abstractmethod
def backward(self, *args: ts.Tensor):
raise NotImplementedError
@staticmethod
def _check_inputs(*inputs: Variable, num: int) -> IterT[Variable]:
if len(inputs) == num:
if len(inputs) == 1:
return inputs[0]
else:
return inputs
else:
raise ValueError(f"Incompatible input parameters. Length of inputs = {len(inputs)} ")
@staticmethod
def _check_grads(*grads: ts.Tensor, num: int) -> IterT[ts.Tensor]:
if len(grads) == num:
if len(grads) == 1:
return grads[0]
else:
return grads
else:
raise ValueError(f"Incompatible grads parameters. Length of grads = {len(grads)} ")
class Add(Op):
EXPECTED_INPUTS_LENGTH: int = 2
EXPECTED_GRADS_LENGTH: int = 1
def forward(self, *inputs: Variable) -> Variable:
x: Variable
b: Variable
x, b = self._check_inputs(*inputs, num=self.EXPECTED_INPUTS_LENGTH) # type: ignore
self._inputs.extend([x, b])
return Variable(x.value + b.value, self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=self.EXPECTED_GRADS_LENGTH)
x = self._inputs[0]
b = self._inputs[1]
x.grad = grad
if x.value.shape == b.value.shape:
b.grad = grad
elif b.value.dim == 1 and x.value.shape[1] == b.value.shape[0]:
b.grad = ts.sum(grad, 0)
else:
raise ValueError(f"Add(Op): Unsupported input shapes! {x.value.shape}, {b.value.shape}")
def __str__(self):
return f"Add"
class MatMul(Op):
EXPECTED_INPUTS_LENGTH: int = 2
EXPECTED_GRADS_LENGTH: int = 1
def forward(self, *inputs: Variable):
a: Variable
b: Variable
a, b = self._check_inputs(*inputs, num=self.EXPECTED_INPUTS_LENGTH) # type: ignore
self._inputs.extend([a, b])
return Variable(a.value @ b.value, self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=self.EXPECTED_GRADS_LENGTH)
self._inputs[0].grad = grad @ self._inputs[1].value.T
self._inputs[1].grad = self._inputs[0].value.T @ grad
def __str__(self):
return f"MatMul"
class Log(Op):
EXPECTED_INPUTS_LENGTH: int = 1
EXPECTED_GRADS_LENGTH: int = 1
def forward(self, *inputs: Variable):
x: Variable
x = self._check_inputs(*inputs, num=self.EXPECTED_INPUTS_LENGTH) # type: ignore
self._inputs.extend([x])
return Variable(ts.log(x.value), self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=self.EXPECTED_GRADS_LENGTH)
self._inputs[0].grad = self._inputs[0].value * grad
def __str__(self):
return f"Log"
class Reshape(Op):
def __init__(self, shape: List[int]):
super(Reshape, self).__init__()
self._shape_after = shape
def forward(self, *inputs: Variable):
x: Variable
x = self._check_inputs(*inputs, num=1) # type: ignore
if len(self._inputs) == 0:
self._inputs.append(x)
return Variable(x.value.reshape(self._shape_after), self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=1)
self._inputs[0].grad = grad.reshape(self._inputs[0].value.shape)
def __str__(self):
return f"Reshape"
def matmul(x: Variable, y: Variable) -> Variable:
op = MatMul()
return op(x, y)
def add(x: Variable, y: Variable) -> Variable:
op = Add()
return op(x, y)
def log(x: Variable) -> Variable:
op = Log()
return op(x)
def reshape(x: Variable, shape: List[int]) -> Variable:
op = Reshape(shape)
return op(x)
def var(*args, **kwargs) -> Variable:
return Variable(ts.Tensor(*args), **kwargs)
def traverse(variable: Variable):
if variable.op:
variable.op.backward(variable.grad)
if inputs := variable.op.inputs:
for i in inputs:
traverse(i)
def print_graph(variable: Variable, prefix=""):
delimiter = " "
def loop(v: Variable, p=""):
p += delimiter
print(p + str(v))
if op := v.op:
p += delimiter
print(p + str(op))
if inputs := op.inputs:
for i in inputs:
print_graph(i, p)
loop(variable, prefix)
| from __future__ import annotations
from abc import abstractmethod, ABCMeta
from typing import Optional, List, Iterable, Union, TypeVar
import numpy as np
from .. import tensor as ts
T = TypeVar("T")
IterT = Union[T, Iterable[T]]
class Variable:
def __init__(self, value: ts.Tensor, op: Optional[Op] = None):
self._value: ts.Tensor = value
self._grad: ts.Tensor = ts.Tensor(np.full(value.shape, 1.0))
self.op: Optional[Op] = op
@property
def value(self):
return self._value
@value.setter
def value(self, value: ts.Tensor):
self._value = value
@property
def grad(self):
return self._grad
@grad.setter
def grad(self, value: ts.Tensor):
self._grad = value
def backward(self):
traverse(self)
def __str__(self):
return f"Variable"
def __neg__(self) -> Variable:
variable = Variable(-self._value, self.op)
variable._grad = self._grad
return variable
def __matmul__(self, other: Variable) -> Variable:
return matmul(self, other)
def __add__(self, other: Variable) -> Variable:
return add(self, other)
class Op(metaclass=ABCMeta):
def __init__(self):
self._inputs = []
@property
def inputs(self) -> List[Variable]:
return self._inputs
def __call__(self, *args: Variable):
return self.forward(*args)
@abstractmethod
def forward(self, *args: Variable):
raise NotImplementedError
@abstractmethod
def backward(self, *args: ts.Tensor):
raise NotImplementedError
@staticmethod
def _check_inputs(*inputs: Variable, num: int) -> IterT[Variable]:
if len(inputs) == num:
if len(inputs) == 1:
return inputs[0]
else:
return inputs
else:
raise ValueError(f"Incompatible input parameters. Length of inputs = {len(inputs)} ")
@staticmethod
def _check_grads(*grads: ts.Tensor, num: int) -> IterT[ts.Tensor]:
if len(grads) == num:
if len(grads) == 1:
return grads[0]
else:
return grads
else:
raise ValueError(f"Incompatible grads parameters. Length of grads = {len(grads)} ")
class Add(Op):
EXPECTED_INPUTS_LENGTH: int = 2
EXPECTED_GRADS_LENGTH: int = 1
def forward(self, *inputs: Variable) -> Variable:
x: Variable
b: Variable
x, b = self._check_inputs(*inputs, num=self.EXPECTED_INPUTS_LENGTH) # type: ignore
self._inputs.extend([x, b])
return Variable(x.value + b.value, self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=self.EXPECTED_GRADS_LENGTH)
x = self._inputs[0]
b = self._inputs[1]
x.grad = grad
if x.value.shape == b.value.shape:
b.grad = grad
elif b.value.dim == 1 and x.value.shape[1] == b.value.shape[0]:
b.grad = ts.sum(grad, 0)
else:
raise ValueError(f"Add(Op): Unsupported input shapes! {x.value.shape}, {b.value.shape}")
def __str__(self):
return f"Add"
class MatMul(Op):
EXPECTED_INPUTS_LENGTH: int = 2
EXPECTED_GRADS_LENGTH: int = 1
def forward(self, *inputs: Variable):
a: Variable
b: Variable
a, b = self._check_inputs(*inputs, num=self.EXPECTED_INPUTS_LENGTH) # type: ignore
self._inputs.extend([a, b])
return Variable(a.value @ b.value, self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=self.EXPECTED_GRADS_LENGTH)
self._inputs[0].grad = grad @ self._inputs[1].value.T
self._inputs[1].grad = self._inputs[0].value.T @ grad
def __str__(self):
return f"MatMul"
class Log(Op):
EXPECTED_INPUTS_LENGTH: int = 1
EXPECTED_GRADS_LENGTH: int = 1
def forward(self, *inputs: Variable):
x: Variable
x = self._check_inputs(*inputs, num=self.EXPECTED_INPUTS_LENGTH) # type: ignore
self._inputs.extend([x])
return Variable(ts.log(x.value), self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=self.EXPECTED_GRADS_LENGTH)
self._inputs[0].grad = self._inputs[0].value * grad
def __str__(self):
return f"Log"
class Reshape(Op):
def __init__(self, shape: List[int]):
super(Reshape, self).__init__()
self._shape_after = shape
def forward(self, *inputs: Variable):
x: Variable
x = self._check_inputs(*inputs, num=1) # type: ignore
if len(self._inputs) == 0:
self._inputs.append(x)
return Variable(x.value.reshape(self._shape_after), self)
def backward(self, *grads: ts.Tensor):
grad = self._check_grads(*grads, num=1)
self._inputs[0].grad = grad.reshape(self._inputs[0].value.shape)
def __str__(self):
return f"Reshape"
def matmul(x: Variable, y: Variable) -> Variable:
op = MatMul()
return op(x, y)
def add(x: Variable, y: Variable) -> Variable:
op = Add()
return op(x, y)
def log(x: Variable) -> Variable:
op = Log()
return op(x)
def reshape(x: Variable, shape: List[int]) -> Variable:
op = Reshape(shape)
return op(x)
def var(*args, **kwargs) -> Variable:
return Variable(ts.Tensor(*args), **kwargs)
def traverse(variable: Variable):
if variable.op:
variable.op.backward(variable.grad)
if inputs := variable.op.inputs:
for i in inputs:
traverse(i)
def print_graph(variable: Variable, prefix=""):
delimiter = " "
def loop(v: Variable, p=""):
p += delimiter
print(p + str(v))
if op := v.op:
p += delimiter
print(p + str(op))
if inputs := op.inputs:
for i in inputs:
print_graph(i, p)
loop(variable, prefix)
| it | 0.195478 | # type: ignore # type: ignore # type: ignore # type: ignore | 2.765631 | 3 |
game/player/Player.py | b3ckerdev/Transformice-Server | 2 | 6612926 | from game.player.PlayerPackets import *
from server.managers.BulleManager import *
from server.managers.BulleRoomsManager import *
class Player:
def __init__(self, tcp_client):
self.tcp_client = tcp_client
self.player_packets = PlayerPackets(self)
self.connection_time = 0
self.community = {
"id": 0,
"str": "en"
}
self.captcha = {
"letters": "",
"data": b""
}
self.id = 0
self.code = 0
self.nickname = ""
self.privilege = 0
self.logged = False
self.bulle_room = None
def identification(self, nickname):
self.nickname = nickname
self.player_packets.identification(self.id, self.nickname, 0, self.community["id"], self.code, True, [])
def join_room(self, room):
if self.bulle_room != None:
self.bulle_room.leave(self)
if BulleManager.count() > 0:
bulle = BulleManager.get_bulle(room)
self.bulle_room = BulleRoomsManager.join(bulle, room)
print(self.bulle_room) | from game.player.PlayerPackets import *
from server.managers.BulleManager import *
from server.managers.BulleRoomsManager import *
class Player:
def __init__(self, tcp_client):
self.tcp_client = tcp_client
self.player_packets = PlayerPackets(self)
self.connection_time = 0
self.community = {
"id": 0,
"str": "en"
}
self.captcha = {
"letters": "",
"data": b""
}
self.id = 0
self.code = 0
self.nickname = ""
self.privilege = 0
self.logged = False
self.bulle_room = None
def identification(self, nickname):
self.nickname = nickname
self.player_packets.identification(self.id, self.nickname, 0, self.community["id"], self.code, True, [])
def join_room(self, room):
if self.bulle_room != None:
self.bulle_room.leave(self)
if BulleManager.count() > 0:
bulle = BulleManager.get_bulle(room)
self.bulle_room = BulleRoomsManager.join(bulle, room)
print(self.bulle_room) | none | 1 | 2.700098 | 3 | |
rob/console.py | dan-osull/rob | 3 | 6612927 | <gh_stars>1-10
from pathlib import WindowsPath
from typing import Union
import click
from rich import box
from rich.console import Console
from rich.table import Table
import rob.filesystem
from rob import PROJECT_NAME, VERSION
from rob.folders import Library
# click.termui._ansi_colors
HELP_HEADERS_COLOR = "bright_white"
HELP_OPTIONS_COLOR = "cyan"
# https://rich.readthedocs.io/en/stable/appendix/colors.html#appendix-colors
console = Console(highlight=False)
print_ = console.print
def print_library_info(library: Library, show_size: bool = False) -> None:
print_("")
disk_usage = library.disk_usage
for disk in disk_usage:
print_disk_usage(disk)
print_("")
print_library_folder_count(library)
table_data = library.get_table_data(show_size=show_size)
if table_data:
print_("")
if show_size:
total_bytes = sum(row["Size"] for row in table_data)
print_library_table(table_data, show_size)
print_(f"\nTotal size: {style_bytes_as_gb(total_bytes)}")
else:
print_library_table(table_data)
print_("\nRun [bold]rob list[/bold] to see size of folders.")
def print_disk_usage(disk: rob.filesystem.DiskUsage) -> None:
print_(
f"Drive {style_path(disk.drive)} "
f"{style_bytes_as_gb((disk.usage.used),ndigits=None)} used / "
f"{style_bytes_as_gb(disk.usage.total,ndigits=None)} total "
f"({round(disk.usage.used/disk.usage.total*100)}% full)"
)
def print_library_folder_count(library: Library) -> None:
plur_s = "" if len(library.folders) == 1 else "s"
print_(f"{len(library.folders)} folder{plur_s} in {style_library(library)}")
def print_library_table(table_data: list[dict], show_size: bool = False) -> None:
table = Table(row_styles=["cyan", "sky_blue1"], show_edge=False, box=box.SQUARE)
table.add_column("ID", overflow="ellipsis")
table.add_column("Path", overflow="ellipsis")
table.add_column("Name", overflow="fold")
if show_size:
for row in table_data:
row["Size"] = style_bytes_as_gb(row["Size"])
table.add_column("Size", justify="right")
for row in table_data:
values = (str(value) for value in row.values())
table.add_row(*values)
print_(table)
def print_fail(msg: str = "") -> None:
print_(f"{msg} [bold][red]FAIL[/red][/bold]")
def print_success(msg: str = "") -> None:
print_(f"{msg} [bold][green]SUCCESS[/green][/bold]")
def print_skipped() -> None:
print_(" [grey50]SKIPPED[/grey50]")
def print_title() -> None:
# Font Slant at https://patorjk.com/software/taag/#p=display&f=Slant&t=rob
# See logo.txt for original
logo_text = " __\n _________ / /_ \n / ___/ __ \\/ __ \\\n / / / /_/ / /_/ /\n/_/ \\____/_.___/"
print_(f"[bold][purple]{logo_text}[/purple][/bold] v.{VERSION}\n")
print_(
"[bold]Help:[/bold] [link=https://github.com/dan-osull/rob/]https://github.com/dan-osull/rob/[/link]\n"
)
def style_project() -> str:
return f"[bold][purple]{PROJECT_NAME}[/purple][/bold]"
def style_library(library: Library) -> str:
library_path = str(library.library_folder).strip("\\")
return f"{style_project()} library at [purple]{library_path}[/purple]"
def style_path(obj: Union[WindowsPath, str]) -> str:
return f"[cyan]{str(obj)}[/cyan]"
def style_bytes_as_gb(size_bytes: int, ndigits=1) -> str:
gigabytes = round(size_bytes / 1024**3, ndigits)
return f"{gigabytes} GB"
def confirm_action(dry_run: bool) -> None:
if dry_run:
console.rule("[green]DRY RUN MODE[/green]")
print_("No changes will be made.")
click.confirm(text="Continue?", abort=True)
| from pathlib import WindowsPath
from typing import Union
import click
from rich import box
from rich.console import Console
from rich.table import Table
import rob.filesystem
from rob import PROJECT_NAME, VERSION
from rob.folders import Library
# click.termui._ansi_colors
HELP_HEADERS_COLOR = "bright_white"
HELP_OPTIONS_COLOR = "cyan"
# https://rich.readthedocs.io/en/stable/appendix/colors.html#appendix-colors
console = Console(highlight=False)
print_ = console.print
def print_library_info(library: Library, show_size: bool = False) -> None:
print_("")
disk_usage = library.disk_usage
for disk in disk_usage:
print_disk_usage(disk)
print_("")
print_library_folder_count(library)
table_data = library.get_table_data(show_size=show_size)
if table_data:
print_("")
if show_size:
total_bytes = sum(row["Size"] for row in table_data)
print_library_table(table_data, show_size)
print_(f"\nTotal size: {style_bytes_as_gb(total_bytes)}")
else:
print_library_table(table_data)
print_("\nRun [bold]rob list[/bold] to see size of folders.")
def print_disk_usage(disk: rob.filesystem.DiskUsage) -> None:
print_(
f"Drive {style_path(disk.drive)} "
f"{style_bytes_as_gb((disk.usage.used),ndigits=None)} used / "
f"{style_bytes_as_gb(disk.usage.total,ndigits=None)} total "
f"({round(disk.usage.used/disk.usage.total*100)}% full)"
)
def print_library_folder_count(library: Library) -> None:
plur_s = "" if len(library.folders) == 1 else "s"
print_(f"{len(library.folders)} folder{plur_s} in {style_library(library)}")
def print_library_table(table_data: list[dict], show_size: bool = False) -> None:
table = Table(row_styles=["cyan", "sky_blue1"], show_edge=False, box=box.SQUARE)
table.add_column("ID", overflow="ellipsis")
table.add_column("Path", overflow="ellipsis")
table.add_column("Name", overflow="fold")
if show_size:
for row in table_data:
row["Size"] = style_bytes_as_gb(row["Size"])
table.add_column("Size", justify="right")
for row in table_data:
values = (str(value) for value in row.values())
table.add_row(*values)
print_(table)
def print_fail(msg: str = "") -> None:
print_(f"{msg} [bold][red]FAIL[/red][/bold]")
def print_success(msg: str = "") -> None:
print_(f"{msg} [bold][green]SUCCESS[/green][/bold]")
def print_skipped() -> None:
print_(" [grey50]SKIPPED[/grey50]")
def print_title() -> None:
# Font Slant at https://patorjk.com/software/taag/#p=display&f=Slant&t=rob
# See logo.txt for original
logo_text = " __\n _________ / /_ \n / ___/ __ \\/ __ \\\n / / / /_/ / /_/ /\n/_/ \\____/_.___/"
print_(f"[bold][purple]{logo_text}[/purple][/bold] v.{VERSION}\n")
print_(
"[bold]Help:[/bold] [link=https://github.com/dan-osull/rob/]https://github.com/dan-osull/rob/[/link]\n"
)
def style_project() -> str:
return f"[bold][purple]{PROJECT_NAME}[/purple][/bold]"
def style_library(library: Library) -> str:
library_path = str(library.library_folder).strip("\\")
return f"{style_project()} library at [purple]{library_path}[/purple]"
def style_path(obj: Union[WindowsPath, str]) -> str:
return f"[cyan]{str(obj)}[/cyan]"
def style_bytes_as_gb(size_bytes: int, ndigits=1) -> str:
gigabytes = round(size_bytes / 1024**3, ndigits)
return f"{gigabytes} GB"
def confirm_action(dry_run: bool) -> None:
if dry_run:
console.rule("[green]DRY RUN MODE[/green]")
print_("No changes will be made.")
click.confirm(text="Continue?", abort=True) | en | 0.385684 | # click.termui._ansi_colors # https://rich.readthedocs.io/en/stable/appendix/colors.html#appendix-colors # Font Slant at https://patorjk.com/software/taag/#p=display&f=Slant&t=rob # See logo.txt for original | 2.551478 | 3 |
edzed/utils/looptimes.py | xitop/edzed | 0 | 6612928 | """
Convert timestamps between event loop and system.
"""
import asyncio
import time
def _get_timediff():
"""Return the difference between loop and Unix time bases."""
loopnow = asyncio.get_running_loop().time()
unixnow = time.time()
return unixnow - loopnow
def loop_to_unixtime(looptime, timediff=None):
"""Convert event loop time to standard Unix time."""
if timediff is None:
timediff = _get_timediff()
return looptime + timediff
def unix_to_looptime(unixtime, timediff=None):
"""Convert standard Unix time to event loop time."""
if timediff is None:
timediff = _get_timediff()
return unixtime - timediff
| """
Convert timestamps between event loop and system.
"""
import asyncio
import time
def _get_timediff():
"""Return the difference between loop and Unix time bases."""
loopnow = asyncio.get_running_loop().time()
unixnow = time.time()
return unixnow - loopnow
def loop_to_unixtime(looptime, timediff=None):
"""Convert event loop time to standard Unix time."""
if timediff is None:
timediff = _get_timediff()
return looptime + timediff
def unix_to_looptime(unixtime, timediff=None):
"""Convert standard Unix time to event loop time."""
if timediff is None:
timediff = _get_timediff()
return unixtime - timediff
| en | 0.900116 | Convert timestamps between event loop and system. Return the difference between loop and Unix time bases. Convert event loop time to standard Unix time. Convert standard Unix time to event loop time. | 3.02071 | 3 |
src/pybel/io/hetionet/__init__.py | djinnome/pybel | 103 | 6612929 | <filename>src/pybel/io/hetionet/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Importer for Hetionet JSON."""
from .hetionet import from_hetionet_file, from_hetionet_gz, from_hetionet_json, get_hetionet
| <filename>src/pybel/io/hetionet/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Importer for Hetionet JSON."""
from .hetionet import from_hetionet_file, from_hetionet_gz, from_hetionet_json, get_hetionet
| en | 0.623557 | # -*- coding: utf-8 -*- Importer for Hetionet JSON. | 1.318718 | 1 |
compiled/python/ts_packet_header.py | smarek/ci_targets | 4 | 6612930 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class TsPacketHeader(KaitaiStruct):
"""describes the first 4 header bytes of a TS Packet header
"""
class AdaptationFieldControlEnum(Enum):
reserved = 0
payload_only = 1
adaptation_field_only = 2
adaptation_field_and_payload = 3
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sync_byte = self._io.read_u1()
self.transport_error_indicator = self._io.read_bits_int_be(1) != 0
self.payload_unit_start_indicator = self._io.read_bits_int_be(1) != 0
self.transport_priority = self._io.read_bits_int_be(1) != 0
self.pid = self._io.read_bits_int_be(13)
self.transport_scrambling_control = self._io.read_bits_int_be(2)
self.adaptation_field_control = KaitaiStream.resolve_enum(TsPacketHeader.AdaptationFieldControlEnum, self._io.read_bits_int_be(2))
self.continuity_counter = self._io.read_bits_int_be(4)
self._io.align_to_byte()
self.ts_packet_remain = self._io.read_bytes(184)
| # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class TsPacketHeader(KaitaiStruct):
"""describes the first 4 header bytes of a TS Packet header
"""
class AdaptationFieldControlEnum(Enum):
reserved = 0
payload_only = 1
adaptation_field_only = 2
adaptation_field_and_payload = 3
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sync_byte = self._io.read_u1()
self.transport_error_indicator = self._io.read_bits_int_be(1) != 0
self.payload_unit_start_indicator = self._io.read_bits_int_be(1) != 0
self.transport_priority = self._io.read_bits_int_be(1) != 0
self.pid = self._io.read_bits_int_be(13)
self.transport_scrambling_control = self._io.read_bits_int_be(2)
self.adaptation_field_control = KaitaiStream.resolve_enum(TsPacketHeader.AdaptationFieldControlEnum, self._io.read_bits_int_be(2))
self.continuity_counter = self._io.read_bits_int_be(4)
self._io.align_to_byte()
self.ts_packet_remain = self._io.read_bytes(184)
| en | 0.767786 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild describes the first 4 header bytes of a TS Packet header | 2.091417 | 2 |
math3/tests/test_aambb.py | PhloxAR/math3 | 0 | 6612931 | try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from math3 import vector
from math3.funcs import aambb
class test_aambb(unittest.TestCase):
def test_import(self):
import math3
math3.funcs.aambbfunc
def test_create_zeros(self):
result = aambb.create_zeros()
self.assertTrue(np.array_equal(result, [[0.,0.,0.],[0.,0.,0.]]))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_create_from_bounds(self):
bounds = [[-1.,1.,-1.], [2.,1.,0.]]
result = aambb.create_from_bounds(*bounds)
length = max(vector.length(bounds[0]), vector.length(bounds[1]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_create_from_points(self):
result = aambb.create_from_points(np.array([[-1.0, 0.0, 0.0]]))
self.assertTrue(np.array_equal(result, [[-1.0,-1.0,-1.0],[ 1.0, 1.0, 1.0]]))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_center_point(self):
# this should always be 0,0,0
result = aambb.create_from_bounds([-1., 1., -1.], [2., 1., 0.])
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_create_from_aabbs(self):
a1 = aambb.create_from_points([
[ 0.0, 0.0, 0.0],
[ 1.0, 1.0,-1.0]
])
a2 = aambb.create_from_points([
[ 0.0, 0.0, 2.0],
[-1.0,-1.0, 1.0]
])
result = aambb.create_from_aabbs([a1, a2])
length = np.amax(vector.length([a1, a2]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]), (result,))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_add_point(self):
a = aambb.create_from_bounds([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])
points = np.array([
[ 2.0,-1.0,-1.0],
[ 1.0, 3.0,-1.0],
])
result = aambb.add_points(a, points)
length = np.amax(vector.length([a, points]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]), (result,))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_add_aabbs(self):
a1 = aambb.create_from_bounds([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])
a2 = aambb.create_from_bounds([1.0, -2.0, 1.0], [2.0, -1.0, 1.0])
result = aambb.add_aabbs(a1, [a2])
length = np.amax(vector.length([a1, a2]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]), (result,))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
if __name__ == '__main__':
unittest.main()
| try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from math3 import vector
from math3.funcs import aambb
class test_aambb(unittest.TestCase):
def test_import(self):
import math3
math3.funcs.aambbfunc
def test_create_zeros(self):
result = aambb.create_zeros()
self.assertTrue(np.array_equal(result, [[0.,0.,0.],[0.,0.,0.]]))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_create_from_bounds(self):
bounds = [[-1.,1.,-1.], [2.,1.,0.]]
result = aambb.create_from_bounds(*bounds)
length = max(vector.length(bounds[0]), vector.length(bounds[1]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_create_from_points(self):
result = aambb.create_from_points(np.array([[-1.0, 0.0, 0.0]]))
self.assertTrue(np.array_equal(result, [[-1.0,-1.0,-1.0],[ 1.0, 1.0, 1.0]]))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_center_point(self):
# this should always be 0,0,0
result = aambb.create_from_bounds([-1., 1., -1.], [2., 1., 0.])
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_create_from_aabbs(self):
a1 = aambb.create_from_points([
[ 0.0, 0.0, 0.0],
[ 1.0, 1.0,-1.0]
])
a2 = aambb.create_from_points([
[ 0.0, 0.0, 2.0],
[-1.0,-1.0, 1.0]
])
result = aambb.create_from_aabbs([a1, a2])
length = np.amax(vector.length([a1, a2]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]), (result,))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_add_point(self):
a = aambb.create_from_bounds([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])
points = np.array([
[ 2.0,-1.0,-1.0],
[ 1.0, 3.0,-1.0],
])
result = aambb.add_points(a, points)
length = np.amax(vector.length([a, points]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]), (result,))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
def test_add_aabbs(self):
a1 = aambb.create_from_bounds([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])
a2 = aambb.create_from_bounds([1.0, -2.0, 1.0], [2.0, -1.0, 1.0])
result = aambb.add_aabbs(a1, [a2])
length = np.amax(vector.length([a1, a2]))
self.assertTrue(np.array_equal(result, [[-length,-length,-length],[length,length,length]]), (result,))
self.assertTrue(np.array_equal(aambb.centre_point(result), [0.0, 0.0, 0.0]))
if __name__ == '__main__':
unittest.main()
| en | 0.801842 | # this should always be 0,0,0 | 2.755135 | 3 |
bioimageit_gui/designer/_components.py | bioimageit/bioimageit_gui | 0 | 6612932 | <reponame>bioimageit/bioimageit_gui<gh_stars>0
from qtpy.QtWidgets import (QWidget, QVBoxLayout, QLabel)
from bioimageit_gui.core.framework import BiComponent, BiAction
from ._containers import BiDesignerContainer
class BiDesignerComponent(BiComponent):
def __init__(self):
super().__init__()
self._object_name = 'BiDesignerComponent'
#self.container = container
#self.container.register(self)
self.widget = QWidget()
layout = QVBoxLayout()
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.widget.setLayout(layout)
label = QLabel('The pipeline designer is not yet implemented')
label.setObjectName("BiWidget")
layout.addWidget(label)
def update(self, action: BiAction):
pass
def get_widget(self):
return self.widget
| from qtpy.QtWidgets import (QWidget, QVBoxLayout, QLabel)
from bioimageit_gui.core.framework import BiComponent, BiAction
from ._containers import BiDesignerContainer
class BiDesignerComponent(BiComponent):
def __init__(self):
super().__init__()
self._object_name = 'BiDesignerComponent'
#self.container = container
#self.container.register(self)
self.widget = QWidget()
layout = QVBoxLayout()
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.widget.setLayout(layout)
label = QLabel('The pipeline designer is not yet implemented')
label.setObjectName("BiWidget")
layout.addWidget(label)
def update(self, action: BiAction):
pass
def get_widget(self):
return self.widget | en | 0.239707 | #self.container = container #self.container.register(self) | 2.31491 | 2 |
reddit.py | seanneal/tweetbot | 0 | 6612933 | '''
code to interface with Reddit
'''
import configparser
import random
import praw
import duplicates
from twitter import Tweet
class Reddit:
'''
Encapsulate reddit access
'''
def __init__(self, tweet_length=138):
self.__reddit_connection = praw.Reddit(
'bot1', user_agent='<PASSWORD>')
self.__tweet_length = tweet_length
self.__duplicates = duplicates.Duplicates('posted_tweets.txt')
def __get_tweets(self, subreddit_name):
'''
grabs tweets from reddit and formats them for processing
'''
tweets = []
print('[bot] Getting tweets from Reddit\\r\\{}'.format(subreddit_name))
subreddit = self.__reddit_connection.subreddit(subreddit_name)
filtered_domains = {'imgur.com'}
should_be_filtered = \
lambda domain, post_id, stickied: domain in filtered_domains or \
self.__duplicates.duplicate_check(post_id) or \
stickied
for submission in subreddit.hot(limit=10):
if not should_be_filtered(
submission.domain,
submission.id,
submission.stickied):
first, second = self.__convert_post_to_tweet(
submission.title,
submission.author.name,
submission.is_self,
submission.subreddit.display_name,
submission.url,
submission.shortlink)
tweets.append(Tweet(first, second))
self.__duplicates.add_id(submission.id)
return tweets
@staticmethod
def __shorten_title(title, max_len):
'''
shortens title to fit in the tweet
'''
if len(title) <= max_len:
return title
return title[:max_len - 3] + "..."
def __convert_post_to_tweet(
self,
title,
user,
is_self,
subreddit_name,
url,
reddit_url):
'''
split the post into two tweets, one primary and one that is the reply.
keep all the communication here.
'''
user_preamble = 'u/{user} says: '.format(user=user)
hash_tag = '#{tag}'.format(tag=subreddit_name)
shortened_title = self.__shorten_title(
title,
self.__tweet_length -
23 -
len(user_preamble) -
len(hash_tag))
primary_tweet = user_preamble + shortened_title + ' ' + url + ' ' + hash_tag
reply_tweet = ''
if not is_self:
reply_tweet = 'further discussion to be had here: {url}'.format(
url=reddit_url)
return primary_tweet, reply_tweet
def __bootstrap_source(self, source_subreddit):
subreddit = self.__reddit_connection.subreddit(source_subreddit)
for post in subreddit.hot(limit=10):
self.__duplicates.add_id(post.id)
def __get_subreddits(self):
def empty(subreddit_name):
return not subreddit_name or subreddit_name.isspace()
def parse_list(line):
return [x for x in line.splitlines() if not empty(x)]
CONFIG_FILE = 'subreddits.ini'
CONFIG_SECTION = 'subreddits'
CONFIG_NEW = 'new'
CONFIG_KNOWN = 'known'
cfg = configparser.ConfigParser()
cfg.read(CONFIG_FILE)
subreddits_cfg = cfg.get(CONFIG_SECTION, CONFIG_KNOWN)
subreddits = parse_list(subreddits_cfg)
new_subreddits = cfg.get(CONFIG_SECTION, CONFIG_NEW)
if not empty(new_subreddits):
subreddits_cfg = subreddits_cfg + new_subreddits
cfg[CONFIG_SECTION][CONFIG_NEW] = ''
cfg[CONFIG_SECTION][CONFIG_KNOWN] = subreddits_cfg
with open(CONFIG_FILE, 'w') as cfg_file:
cfg.write(cfg_file)
new_subreddits = parse_list(new_subreddits)
for subreddit in new_subreddits:
self.__bootstrap_source(subreddit)
subreddits.append(subreddit)
if not subreddits:
print('error: subreddit list is totally empty')
return subreddits
def get_tweets(self):
'''
Get's posts from reddits and converts them to tweets.
'''
tweets = []
for subreddit in self.__get_subreddits():
for tweet in self.__get_tweets(subreddit):
tweets.append(tweet)
random.shuffle(tweets)
return tweets
| '''
code to interface with Reddit
'''
import configparser
import random
import praw
import duplicates
from twitter import Tweet
class Reddit:
'''
Encapsulate reddit access
'''
def __init__(self, tweet_length=138):
self.__reddit_connection = praw.Reddit(
'bot1', user_agent='<PASSWORD>')
self.__tweet_length = tweet_length
self.__duplicates = duplicates.Duplicates('posted_tweets.txt')
def __get_tweets(self, subreddit_name):
'''
grabs tweets from reddit and formats them for processing
'''
tweets = []
print('[bot] Getting tweets from Reddit\\r\\{}'.format(subreddit_name))
subreddit = self.__reddit_connection.subreddit(subreddit_name)
filtered_domains = {'imgur.com'}
should_be_filtered = \
lambda domain, post_id, stickied: domain in filtered_domains or \
self.__duplicates.duplicate_check(post_id) or \
stickied
for submission in subreddit.hot(limit=10):
if not should_be_filtered(
submission.domain,
submission.id,
submission.stickied):
first, second = self.__convert_post_to_tweet(
submission.title,
submission.author.name,
submission.is_self,
submission.subreddit.display_name,
submission.url,
submission.shortlink)
tweets.append(Tweet(first, second))
self.__duplicates.add_id(submission.id)
return tweets
@staticmethod
def __shorten_title(title, max_len):
'''
shortens title to fit in the tweet
'''
if len(title) <= max_len:
return title
return title[:max_len - 3] + "..."
def __convert_post_to_tweet(
self,
title,
user,
is_self,
subreddit_name,
url,
reddit_url):
'''
split the post into two tweets, one primary and one that is the reply.
keep all the communication here.
'''
user_preamble = 'u/{user} says: '.format(user=user)
hash_tag = '#{tag}'.format(tag=subreddit_name)
shortened_title = self.__shorten_title(
title,
self.__tweet_length -
23 -
len(user_preamble) -
len(hash_tag))
primary_tweet = user_preamble + shortened_title + ' ' + url + ' ' + hash_tag
reply_tweet = ''
if not is_self:
reply_tweet = 'further discussion to be had here: {url}'.format(
url=reddit_url)
return primary_tweet, reply_tweet
def __bootstrap_source(self, source_subreddit):
subreddit = self.__reddit_connection.subreddit(source_subreddit)
for post in subreddit.hot(limit=10):
self.__duplicates.add_id(post.id)
def __get_subreddits(self):
def empty(subreddit_name):
return not subreddit_name or subreddit_name.isspace()
def parse_list(line):
return [x for x in line.splitlines() if not empty(x)]
CONFIG_FILE = 'subreddits.ini'
CONFIG_SECTION = 'subreddits'
CONFIG_NEW = 'new'
CONFIG_KNOWN = 'known'
cfg = configparser.ConfigParser()
cfg.read(CONFIG_FILE)
subreddits_cfg = cfg.get(CONFIG_SECTION, CONFIG_KNOWN)
subreddits = parse_list(subreddits_cfg)
new_subreddits = cfg.get(CONFIG_SECTION, CONFIG_NEW)
if not empty(new_subreddits):
subreddits_cfg = subreddits_cfg + new_subreddits
cfg[CONFIG_SECTION][CONFIG_NEW] = ''
cfg[CONFIG_SECTION][CONFIG_KNOWN] = subreddits_cfg
with open(CONFIG_FILE, 'w') as cfg_file:
cfg.write(cfg_file)
new_subreddits = parse_list(new_subreddits)
for subreddit in new_subreddits:
self.__bootstrap_source(subreddit)
subreddits.append(subreddit)
if not subreddits:
print('error: subreddit list is totally empty')
return subreddits
def get_tweets(self):
'''
Get's posts from reddits and converts them to tweets.
'''
tweets = []
for subreddit in self.__get_subreddits():
for tweet in self.__get_tweets(subreddit):
tweets.append(tweet)
random.shuffle(tweets)
return tweets
| en | 0.933825 | code to interface with Reddit Encapsulate reddit access grabs tweets from reddit and formats them for processing shortens title to fit in the tweet split the post into two tweets, one primary and one that is the reply. keep all the communication here. Get's posts from reddits and converts them to tweets. | 3.444619 | 3 |
day4.py | Tentoe/AOC2021 | 0 | 6612934 | <filename>day4.py<gh_stars>0
input_data = open("day4.input").read().split("\n")
draw = input_data.pop(0).split(',')
input_data.pop(0)
size = 5
def getCards(data):
ret = []
for start in range(0, int(len(input_data)), size+1):
card = []
for line in range(size):
newline = []
text = input_data[start+line]
for num in range(0, len(text), 3):
newline.append(int(text[num:num+2]))
card.append(newline)
ret.append(card)
return ret
def getWinner(data):
ret = []
for idx, card in enumerate(data):
for line in range(size):
if all(map(lambda x: x == -1, card[line])):
return idx
column = [line2[line] for line2 in card]
if all(map(lambda x: x == -1, column)):
return idx
return -1
def getWinners(data):
ret = []
for idx, card in enumerate(data):
for line in range(size):
if all(map(lambda x: x == -1, card[line])):
ret.append(idx)
break
column = [line2[line] for line2 in card]
if all(map(lambda x: x == -1, column)):
ret.append(idx)
return ret
def markCards(cards, drawn):
for card in cards:
for idx, line in enumerate(card):
card[idx] = [-1 if digit == drawn else digit for digit in line]
test = card
cards = getCards(input_data)
for drawn in draw:
markCards(cards, int(drawn))
winner = getWinner(cards)
if winner >= 0:
print("Winning:", winner, cards[winner])
score = 0
for line in cards[winner]:
for digit in line:
if digit > 0:
score += digit
print(int(drawn) * score)
break
cards = getCards(input_data)
for drawn in draw:
markCards(cards, int(drawn))
winners = getWinners(cards)
winners.reverse()
for winner in winners:
last = cards.pop(winner)
print(len(cards))
if len(cards) == 0:
score = 0
for line in last:
for digit in line:
if digit > 0:
score += digit
print(int(drawn) * score)
print(drawn, score)
print(last)
break
| <filename>day4.py<gh_stars>0
input_data = open("day4.input").read().split("\n")
draw = input_data.pop(0).split(',')
input_data.pop(0)
size = 5
def getCards(data):
ret = []
for start in range(0, int(len(input_data)), size+1):
card = []
for line in range(size):
newline = []
text = input_data[start+line]
for num in range(0, len(text), 3):
newline.append(int(text[num:num+2]))
card.append(newline)
ret.append(card)
return ret
def getWinner(data):
ret = []
for idx, card in enumerate(data):
for line in range(size):
if all(map(lambda x: x == -1, card[line])):
return idx
column = [line2[line] for line2 in card]
if all(map(lambda x: x == -1, column)):
return idx
return -1
def getWinners(data):
ret = []
for idx, card in enumerate(data):
for line in range(size):
if all(map(lambda x: x == -1, card[line])):
ret.append(idx)
break
column = [line2[line] for line2 in card]
if all(map(lambda x: x == -1, column)):
ret.append(idx)
return ret
def markCards(cards, drawn):
for card in cards:
for idx, line in enumerate(card):
card[idx] = [-1 if digit == drawn else digit for digit in line]
test = card
cards = getCards(input_data)
for drawn in draw:
markCards(cards, int(drawn))
winner = getWinner(cards)
if winner >= 0:
print("Winning:", winner, cards[winner])
score = 0
for line in cards[winner]:
for digit in line:
if digit > 0:
score += digit
print(int(drawn) * score)
break
cards = getCards(input_data)
for drawn in draw:
markCards(cards, int(drawn))
winners = getWinners(cards)
winners.reverse()
for winner in winners:
last = cards.pop(winner)
print(len(cards))
if len(cards) == 0:
score = 0
for line in last:
for digit in line:
if digit > 0:
score += digit
print(int(drawn) * score)
print(drawn, score)
print(last)
break
| none | 1 | 3.262257 | 3 | |
20201027_1111_fastapi2/src/main.py | ctfwiki/subject_misc_ctfshow | 16 | 6612935 | <filename>20201027_1111_fastapi2/src/main.py
from typing import Optional
from fastapi import FastAPI,Form
from fastapi.responses import StreamingResponse
from io import BytesIO
import uvicorn
app = FastAPI()
@app.get("/")
def hello():
return {"hello": "fastapi2"}
youdontknow = ['import', 'open', 'eval', 'exec', 'class', '\'', '"', 'vars', 'str', 'chr', '%', '_', 'flag','in', '-', 'mro', '[', ']']
@app.post("/ccccalcccc",description='安全的计算器v2(flag就在根目录,但我不相信你能得到<font color="red">她</font>)')
def calc(q: Optional[str] = Form(...)):
try:
for kiword in youdontknow:
if kiword in q:
return {"res": "hack out!", "err": False}
return {"res": eval(q), "err": False}
except:
return {"res": "", "err": True}
@app.get("/yuanliang_5_aaxx.zip")
def yl5():
return StreamingResponse(BytesIO(open("yuanliang_5_aaxx.zip","rb").read()), media_type="application/octet-stream")
if __name__ == '__main__':
uvicorn.run(app=app, host="0.0.0.0", port=8000, workers=1)
| <filename>20201027_1111_fastapi2/src/main.py
from typing import Optional
from fastapi import FastAPI,Form
from fastapi.responses import StreamingResponse
from io import BytesIO
import uvicorn
app = FastAPI()
@app.get("/")
def hello():
return {"hello": "fastapi2"}
youdontknow = ['import', 'open', 'eval', 'exec', 'class', '\'', '"', 'vars', 'str', 'chr', '%', '_', 'flag','in', '-', 'mro', '[', ']']
@app.post("/ccccalcccc",description='安全的计算器v2(flag就在根目录,但我不相信你能得到<font color="red">她</font>)')
def calc(q: Optional[str] = Form(...)):
try:
for kiword in youdontknow:
if kiword in q:
return {"res": "hack out!", "err": False}
return {"res": eval(q), "err": False}
except:
return {"res": "", "err": True}
@app.get("/yuanliang_5_aaxx.zip")
def yl5():
return StreamingResponse(BytesIO(open("yuanliang_5_aaxx.zip","rb").read()), media_type="application/octet-stream")
if __name__ == '__main__':
uvicorn.run(app=app, host="0.0.0.0", port=8000, workers=1)
| none | 1 | 2.713008 | 3 | |
cudem/fetches/multibeam.py | ciresdem/cudem | 4 | 6612936 | ### multibeam.py - NCEI Multibeam
##
## Copyright (c) 2010 - 2022 Regents of the University of Colorado
##
## multibeam.py is part of CUDEM
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is furnished to do so,
## subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
## PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
## FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
### Commentary:
##
## MB Fetch
##
## Fetch Multibeam bathymetric surveys from NOAA
## MBSystem is required to process the resulting data
##
## NCEI is the U.S. national archive for multibeam bathymetric data and holds more than 9 million
## nautical miles of ship trackline data recorded from over 2400 cruises and received from sources
## worldwide.
##
## Uses NCEI multibeam groovy script to discover multibeam surveys.
##
### Code:
import os
from cudem import utils
from cudem import regions
from cudem import datasets
from cudem import xyzfun
import cudem.fetches.utils as f_utils
## ==============================================
## MapServer testing
## ==============================================
class MBDB(f_utils.FetchModule):
"""NOSHydro"""
def __init__(self, where='1=1', **kwargs):
super().__init__(**kwargs)
self._mb_dynamic_url = 'https://gis.ngdc.noaa.gov/arcgis/rest/services/web_mercator/multibeam_dynamic/MapServer/0'
self._mb_url = 'https://gis.ngdc.noaa.gov/arcgis/rest/services/web_mercator/multibeam/MapServer/0'
#self._nos_data_url = 'https://data.ngdc.noaa.gov/platforms/ocean/nos/coast/'
self._mb_query_url = '{0}/query?'.format(self._mb_dynamic_url)
self._outdir = os.path.join(os.getcwd(), 'multibeam')
self.name = 'multibeam'
self.where = where
def run(self):
if self.region is None:
return([])
_data = {
'where': self.where,
'outFields': '*',
'geometry': self.region.format('bbox'),
'inSR':4326,
'outSR':4326,
'f':'pjson',
'returnGeometry':'False',
}
_req = f_utils.Fetch(self._mb_query_url, verbose=self.verbose).fetch_req(params=_data)
if _req is not None:
print(_req.text)
features = _req.json()
for feature in features['features']:
pass
class Multibeam(f_utils.FetchModule):
"""Fetch multibeam data from NOAA NCEI"""
def __init__(self, processed=False, inc=None, process=False, **kwargs):
super().__init__(**kwargs)
self._mb_data_url = "https://data.ngdc.noaa.gov/platforms/"
self._mb_metadata_url = "https://data.noaa.gov/waf/NOAA/NESDIS/NGDC/MGG/Multibeam/iso/"
self._mb_search_url = "https://maps.ngdc.noaa.gov/mapviewer-support/multibeam/files.groovy?"
self._mb_autogrid = "https://www.ngdc.noaa.gov/maps/autogrid/"
self._mb_html = "https://www.ngdc.noaa.gov/"
self._outdir = os.path.join(os.getcwd(), 'mb')
self._urls = [self._mb_data_url, self._mb_metadata_url, self._mb_autogrid]
self.name = 'multibeam'
self.processed_p = processed
self.process = process
self.inc = utils.str2inc(inc)
def mb_inf_data_format(self, src_inf):
"""extract the data format from the mbsystem inf file."""
with open(src_inf, errors='ignore') as iob:
for il in iob:
til = il.split()
if len(til) > 1:
if til[0] == 'MBIO':
return(til[4])
def mb_inf_data_date(self, src_inf):
"""extract the data format from the mbsystem inf file."""
with open(src_inf, errors='ignore') as iob:
for il in iob:
til = il.split()
if len(til) > 1:
if til[0] == 'Time:':
return(til[3])
def mb_inf_perc_good(self, src_inf):
"""extract the data format from the mbsystem inf file."""
with open(src_inf, errors='ignore') as iob:
for il in iob:
til = il.split(':')
if len(til) > 1:
if til[0].strip() == 'Number of Good Beams':
return(til[1].split()[-1].split('%')[0])
def run(self):
these_surveys = {}
these_versions = {}
if self.region is None: return([])
_req = f_utils.Fetch(self._mb_search_url).fetch_req(params={'geometry': self.region.format('bbox')}, timeout=20)
if _req is not None and _req.status_code == 200:
survey_list = _req.text.split('\n')[:-1]
for r in survey_list:
dst_pfn = r.split(' ')[0]
dst_fn = dst_pfn.split('/')[-1:][0]
survey = dst_pfn.split('/')[6]
dn = r.split(' ')[0].split('/')[:-1]
version = dst_pfn.split('/')[9][-1]
data_url = self._mb_data_url + '/'.join(r.split('/')[3:])
if survey in these_surveys.keys():
if version in these_surveys[survey].keys():
these_surveys[survey][version].append([data_url.split(' ')[0], '/'.join([survey, dst_fn]), 'mb'])
else:
these_surveys[survey][version] = [[data_url.split(' ')[0], '/'.join([survey, dst_fn]), 'mb']]
else:
these_surveys[survey] = {version: [[data_url.split(' ')[0], '/'.join([survey, dst_fn]), 'mb']]}
else: utils.echo_error_msg('{}'.format(_req.reason))
for key in these_surveys.keys():
if self.processed_p:
if '2' in these_surveys[key].keys():
for v2 in these_surveys[key]['2']:
self.results.append(v2)
else:
for v1 in these_surveys[key]['1']:
self.results.append(v1)
else:
for keys in these_surveys[key].keys():
for survs in these_surveys[key][keys]:
self.results.append(survs)
with open('mb_inf.txt', 'w') as mb_inf_txt:
for entry in self.results:
mb_inf_txt.write(self.parse_entry_inf(entry))
mb_inf_txt.write('\n')
#self.echo_inf(entry)
def echo_inf(self, entry):
print(self.parse_entry_inf(entry))
def parse_entry_inf(self, entry, keep_inf=False):
src_data = os.path.basename(entry[1])
src_mb = src_data[:-4]
survey = entry[0].split('/')[7]
if f_utils.Fetch('{}.inf'.format(entry[0][:-4]), callback=self.callback, verbose=self.verbose).fetch_file('{}.inf'.format(src_mb)) == 0:
mb_fmt = self.mb_inf_data_format('{}.inf'.format(src_mb))
mb_date = self.mb_inf_data_date('{}.inf'.format(src_mb))
mb_perc = self.mb_inf_perc_good('{}.inf'.format(src_mb))
if not keep_inf:
utils.remove_glob('{}.inf'.format(src_mb))
return(survey, src_data, mb_fmt, mb_perc, mb_date)
def yield_xyz(self, entry):
src_data = os.path.basename(entry[1])
src_mb = src_data[:-4]
try:
survey, src_data, mb_fmt, mb_perc, mb_date = self.parse_entry_inf(entry)
except TypeError:
return
if f_utils.Fetch(entry[0], callback=self.callback, verbose=self.verbose).fetch_file(src_data) == 0:
src_xyz = os.path.basename(src_data) + '.xyz'
if not self.process:
this_weight = self.weight
out, status = utils.run_cmd('mblist -OXYZ -I{} -Ma > {}'.format(src_data, src_xyz), verbose=True)
else:
this_weight = (float(mb_perc) * (1 + (2*((int(mb_date)-2015)/100))))/100.
out, status = utils.run_cmd('mblist -OXYZ -I{} -MX{} > {}'.format(src_data, str(100-float(mb_perc)), src_xyz), verbose=True)
if status != 0:
if f_utils.Fetch('{}.inf'.format(entry[0]), callback=self.callback, verbose=self.verbose).fetch_file('{}.inf'.format(src_mb)) == 0:
mb_fmt = self.mb_inf_data_format('{}.inf'.format(src_mb))
mb_date = self.mb_inf_data_date('{}.inf'.format(src_mb))
out, status = utils.run_cmd('mblist -F{} -OXYZ -I{} -MX{} > {}'.format(mb_fmt, src_data, str(100-float(mb_perc)), src_xyz), verbose=True)
if status == 0:
_ds = datasets.XYZFile(
fn=src_xyz,
delim='\t',
data_format=168,
src_srs='epsg:4326',
dst_srs=self.dst_srs,
#name=os.path.basename(entry[1]),
src_region=self.region,
verbose=self.verbose,
weight=this_weight,
remote=True
)
if self.inc is not None:
xyz_func = lambda p: _ds.dump_xyz(dst_port=p, encode=True)
for xyz in utils.yield_cmd(
'gmt blockmedian -I{:.10f} {} -r -V'.format(
self.inc, self.region.format('gmt')
),
verbose=self.verbose,
data_fun=xyz_func
):
yield(xyzfun.XYZPoint().from_list([float(x) for x in xyz.split()]))
else:
for xyz in _ds.yield_xyz():
yield(xyz)
utils.remove_glob(src_data, '{}*'.format(src_xyz), '{}*.inf'.format(src_mb))
else:
utils.echo_error_msg('failed to process local file, {} [{}]...'.format(src_data, entry[0]))
with open(
'{}'.format(os.path.join(self._outdir, 'fetch_{}_{}.err'.format(self.name, self.region.format('fn')))),
'a'
) as mb_err:
mb_err.write('{}\n'.format(','.join([src_mb, entry[0]])))
os.rename(src_data, os.path.join(self._outdir, src_data))
utils.remove_glob(src_xyz)
else:
utils.echo_error_msg(
'failed to fetch remote file, {}...'.format(src_data)
)
### End
| ### multibeam.py - NCEI Multibeam
##
## Copyright (c) 2010 - 2022 Regents of the University of Colorado
##
## multibeam.py is part of CUDEM
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is furnished to do so,
## subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
## PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
## FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
### Commentary:
##
## MB Fetch
##
## Fetch Multibeam bathymetric surveys from NOAA
## MBSystem is required to process the resulting data
##
## NCEI is the U.S. national archive for multibeam bathymetric data and holds more than 9 million
## nautical miles of ship trackline data recorded from over 2400 cruises and received from sources
## worldwide.
##
## Uses NCEI multibeam groovy script to discover multibeam surveys.
##
### Code:
import os
from cudem import utils
from cudem import regions
from cudem import datasets
from cudem import xyzfun
import cudem.fetches.utils as f_utils
## ==============================================
## MapServer testing
## ==============================================
class MBDB(f_utils.FetchModule):
"""NOSHydro"""
def __init__(self, where='1=1', **kwargs):
super().__init__(**kwargs)
self._mb_dynamic_url = 'https://gis.ngdc.noaa.gov/arcgis/rest/services/web_mercator/multibeam_dynamic/MapServer/0'
self._mb_url = 'https://gis.ngdc.noaa.gov/arcgis/rest/services/web_mercator/multibeam/MapServer/0'
#self._nos_data_url = 'https://data.ngdc.noaa.gov/platforms/ocean/nos/coast/'
self._mb_query_url = '{0}/query?'.format(self._mb_dynamic_url)
self._outdir = os.path.join(os.getcwd(), 'multibeam')
self.name = 'multibeam'
self.where = where
def run(self):
if self.region is None:
return([])
_data = {
'where': self.where,
'outFields': '*',
'geometry': self.region.format('bbox'),
'inSR':4326,
'outSR':4326,
'f':'pjson',
'returnGeometry':'False',
}
_req = f_utils.Fetch(self._mb_query_url, verbose=self.verbose).fetch_req(params=_data)
if _req is not None:
print(_req.text)
features = _req.json()
for feature in features['features']:
pass
class Multibeam(f_utils.FetchModule):
"""Fetch multibeam data from NOAA NCEI"""
def __init__(self, processed=False, inc=None, process=False, **kwargs):
super().__init__(**kwargs)
self._mb_data_url = "https://data.ngdc.noaa.gov/platforms/"
self._mb_metadata_url = "https://data.noaa.gov/waf/NOAA/NESDIS/NGDC/MGG/Multibeam/iso/"
self._mb_search_url = "https://maps.ngdc.noaa.gov/mapviewer-support/multibeam/files.groovy?"
self._mb_autogrid = "https://www.ngdc.noaa.gov/maps/autogrid/"
self._mb_html = "https://www.ngdc.noaa.gov/"
self._outdir = os.path.join(os.getcwd(), 'mb')
self._urls = [self._mb_data_url, self._mb_metadata_url, self._mb_autogrid]
self.name = 'multibeam'
self.processed_p = processed
self.process = process
self.inc = utils.str2inc(inc)
def mb_inf_data_format(self, src_inf):
"""extract the data format from the mbsystem inf file."""
with open(src_inf, errors='ignore') as iob:
for il in iob:
til = il.split()
if len(til) > 1:
if til[0] == 'MBIO':
return(til[4])
def mb_inf_data_date(self, src_inf):
"""extract the data format from the mbsystem inf file."""
with open(src_inf, errors='ignore') as iob:
for il in iob:
til = il.split()
if len(til) > 1:
if til[0] == 'Time:':
return(til[3])
def mb_inf_perc_good(self, src_inf):
"""extract the data format from the mbsystem inf file."""
with open(src_inf, errors='ignore') as iob:
for il in iob:
til = il.split(':')
if len(til) > 1:
if til[0].strip() == 'Number of Good Beams':
return(til[1].split()[-1].split('%')[0])
def run(self):
these_surveys = {}
these_versions = {}
if self.region is None: return([])
_req = f_utils.Fetch(self._mb_search_url).fetch_req(params={'geometry': self.region.format('bbox')}, timeout=20)
if _req is not None and _req.status_code == 200:
survey_list = _req.text.split('\n')[:-1]
for r in survey_list:
dst_pfn = r.split(' ')[0]
dst_fn = dst_pfn.split('/')[-1:][0]
survey = dst_pfn.split('/')[6]
dn = r.split(' ')[0].split('/')[:-1]
version = dst_pfn.split('/')[9][-1]
data_url = self._mb_data_url + '/'.join(r.split('/')[3:])
if survey in these_surveys.keys():
if version in these_surveys[survey].keys():
these_surveys[survey][version].append([data_url.split(' ')[0], '/'.join([survey, dst_fn]), 'mb'])
else:
these_surveys[survey][version] = [[data_url.split(' ')[0], '/'.join([survey, dst_fn]), 'mb']]
else:
these_surveys[survey] = {version: [[data_url.split(' ')[0], '/'.join([survey, dst_fn]), 'mb']]}
else: utils.echo_error_msg('{}'.format(_req.reason))
for key in these_surveys.keys():
if self.processed_p:
if '2' in these_surveys[key].keys():
for v2 in these_surveys[key]['2']:
self.results.append(v2)
else:
for v1 in these_surveys[key]['1']:
self.results.append(v1)
else:
for keys in these_surveys[key].keys():
for survs in these_surveys[key][keys]:
self.results.append(survs)
with open('mb_inf.txt', 'w') as mb_inf_txt:
for entry in self.results:
mb_inf_txt.write(self.parse_entry_inf(entry))
mb_inf_txt.write('\n')
#self.echo_inf(entry)
def echo_inf(self, entry):
print(self.parse_entry_inf(entry))
def parse_entry_inf(self, entry, keep_inf=False):
src_data = os.path.basename(entry[1])
src_mb = src_data[:-4]
survey = entry[0].split('/')[7]
if f_utils.Fetch('{}.inf'.format(entry[0][:-4]), callback=self.callback, verbose=self.verbose).fetch_file('{}.inf'.format(src_mb)) == 0:
mb_fmt = self.mb_inf_data_format('{}.inf'.format(src_mb))
mb_date = self.mb_inf_data_date('{}.inf'.format(src_mb))
mb_perc = self.mb_inf_perc_good('{}.inf'.format(src_mb))
if not keep_inf:
utils.remove_glob('{}.inf'.format(src_mb))
return(survey, src_data, mb_fmt, mb_perc, mb_date)
def yield_xyz(self, entry):
src_data = os.path.basename(entry[1])
src_mb = src_data[:-4]
try:
survey, src_data, mb_fmt, mb_perc, mb_date = self.parse_entry_inf(entry)
except TypeError:
return
if f_utils.Fetch(entry[0], callback=self.callback, verbose=self.verbose).fetch_file(src_data) == 0:
src_xyz = os.path.basename(src_data) + '.xyz'
if not self.process:
this_weight = self.weight
out, status = utils.run_cmd('mblist -OXYZ -I{} -Ma > {}'.format(src_data, src_xyz), verbose=True)
else:
this_weight = (float(mb_perc) * (1 + (2*((int(mb_date)-2015)/100))))/100.
out, status = utils.run_cmd('mblist -OXYZ -I{} -MX{} > {}'.format(src_data, str(100-float(mb_perc)), src_xyz), verbose=True)
if status != 0:
if f_utils.Fetch('{}.inf'.format(entry[0]), callback=self.callback, verbose=self.verbose).fetch_file('{}.inf'.format(src_mb)) == 0:
mb_fmt = self.mb_inf_data_format('{}.inf'.format(src_mb))
mb_date = self.mb_inf_data_date('{}.inf'.format(src_mb))
out, status = utils.run_cmd('mblist -F{} -OXYZ -I{} -MX{} > {}'.format(mb_fmt, src_data, str(100-float(mb_perc)), src_xyz), verbose=True)
if status == 0:
_ds = datasets.XYZFile(
fn=src_xyz,
delim='\t',
data_format=168,
src_srs='epsg:4326',
dst_srs=self.dst_srs,
#name=os.path.basename(entry[1]),
src_region=self.region,
verbose=self.verbose,
weight=this_weight,
remote=True
)
if self.inc is not None:
xyz_func = lambda p: _ds.dump_xyz(dst_port=p, encode=True)
for xyz in utils.yield_cmd(
'gmt blockmedian -I{:.10f} {} -r -V'.format(
self.inc, self.region.format('gmt')
),
verbose=self.verbose,
data_fun=xyz_func
):
yield(xyzfun.XYZPoint().from_list([float(x) for x in xyz.split()]))
else:
for xyz in _ds.yield_xyz():
yield(xyz)
utils.remove_glob(src_data, '{}*'.format(src_xyz), '{}*.inf'.format(src_mb))
else:
utils.echo_error_msg('failed to process local file, {} [{}]...'.format(src_data, entry[0]))
with open(
'{}'.format(os.path.join(self._outdir, 'fetch_{}_{}.err'.format(self.name, self.region.format('fn')))),
'a'
) as mb_err:
mb_err.write('{}\n'.format(','.join([src_mb, entry[0]])))
os.rename(src_data, os.path.join(self._outdir, src_data))
utils.remove_glob(src_xyz)
else:
utils.echo_error_msg(
'failed to fetch remote file, {}...'.format(src_data)
)
### End
| en | 0.694701 | ### multibeam.py - NCEI Multibeam ## ## Copyright (c) 2010 - 2022 Regents of the University of Colorado ## ## multibeam.py is part of CUDEM ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ## of the Software, and to permit persons to whom the Software is furnished to do so, ## subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, ## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ## PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE ## FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ## ### Commentary: ## ## MB Fetch ## ## Fetch Multibeam bathymetric surveys from NOAA ## MBSystem is required to process the resulting data ## ## NCEI is the U.S. national archive for multibeam bathymetric data and holds more than 9 million ## nautical miles of ship trackline data recorded from over 2400 cruises and received from sources ## worldwide. ## ## Uses NCEI multibeam groovy script to discover multibeam surveys. ## ### Code: ## ============================================== ## MapServer testing ## ============================================== NOSHydro #self._nos_data_url = 'https://data.ngdc.noaa.gov/platforms/ocean/nos/coast/' Fetch multibeam data from NOAA NCEI extract the data format from the mbsystem inf file. extract the data format from the mbsystem inf file. extract the data format from the mbsystem inf file. #self.echo_inf(entry) #name=os.path.basename(entry[1]), ### End | 1.219703 | 1 |
server.py | 0Shark/SindiAIDev | 6 | 6612937 | <gh_stars>1-10
from flask import Flask, render_template, request
import pymysql
import utils
from pyowm import OWM
# OpenWeather
weatherAPI_token = "44edc82d5c54a7d0cd68aec1904e810e"
mgr = OWM(weatherAPI_token)
# initializing variables
s = 0
q = 0
facehash = ""
app = Flask(__name__)
def insert_sql(user_input): # inserting user inputs, bot outputs and time into database
global s
global facehash
s = s + 1 # ID
resp = utils.giveInput(user_input, facehash)
resp = str(resp)
try:
sql = 'INSERT INTO user_bot_chat (id, User_input, Bot_output) VALUES("' + str(
s) + '","' + user_input + '","' + resp + '");'
a.execute(sql)
conn.commit()
except Exception as e:
print("Line 27")
print("Exeception occured:{}".format(e))
def user_list(): # extracting user inputs from user_bot_chat database
user = []
sql = 'select User_input from user_bot_chat;'
a.execute(sql)
w_user = list(a.fetchall())
for i in w_user:
# user.append('You: ' + i[0])
user.append(i[0])
return user
def bot_list(): # extracting bot responses from user_bot_chat database
bot = []
sql = 'select Bot_output from user_bot_chat;'
a.execute(sql)
w_bot = list(a.fetchall())
for i in w_bot:
# bot.append('Sindi' + i[0])
bot.append(i[0])
return bot
@app.route('/home') # links to the first page - index.html
def index():
weather = getWeather()
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
@app.route('/') # links to the first page - index.html
def home():
return render_template("setup.html")
@app.route('/setup', methods=['POST'])
def setup():
weather = getWeather()
global facehash
facehash= request.form["facehash"]
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
@app.route('/clear')
def clearChat():
weather = getWeather()
# Clear all table rows
sql = "TRUNCATE TABLE user_bot_chat;"
a.execute(sql)
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
def r(): # takes user inputs and bot outputs and insert into a array to later send to html file
try:
user_input = request.form["user_input"]
insert_sql(user_input)
r = []
user = user_list()
bot = bot_list()
for j in range(0, len(user)):
r.append(user[j])
r.append(bot[j])
return r
except:
r = []
user = user_list()
bot = bot_list()
for j in range(0, len(user)):
r.append(user[j])
r.append(bot[j])
return r
def getWeather():
observation = mgr.weather_at_place('Tirana')
w = observation.get_weather()
wind_data = w.get_wind()
humidity = w.get_humidity()
temp_data = w.get_temperature('celsius')
icon = w.get_weather_icon_name()
weatherData = [str(int(temp_data['temp'])), 'Tirana', str(icon), str(int(humidity)), str(int(wind_data['speed']))]
return weatherData
@app.route('/process', methods=['POST'])
def process():
weather = getWeather()
# called when user input is given and submit button is pressed
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
if __name__ == '__main__':
try: # connects to the database
conn = pymysql.connect(host='localhost', user='root', password='', db='sindi_db')
a = conn.cursor()
except Exception as e:
print("QUERY ERROR: Connection")
print("Exeception occured:{}".format(e))
app.run(host='0.0.0.0', port=int('8000'), debug=True) # 0.0.0.0.,80
# conn.close()
# a.close()
| from flask import Flask, render_template, request
import pymysql
import utils
from pyowm import OWM
# OpenWeather
weatherAPI_token = "44edc82d5c54a7d0cd68aec1904e810e"
mgr = OWM(weatherAPI_token)
# initializing variables
s = 0
q = 0
facehash = ""
app = Flask(__name__)
def insert_sql(user_input): # inserting user inputs, bot outputs and time into database
global s
global facehash
s = s + 1 # ID
resp = utils.giveInput(user_input, facehash)
resp = str(resp)
try:
sql = 'INSERT INTO user_bot_chat (id, User_input, Bot_output) VALUES("' + str(
s) + '","' + user_input + '","' + resp + '");'
a.execute(sql)
conn.commit()
except Exception as e:
print("Line 27")
print("Exeception occured:{}".format(e))
def user_list(): # extracting user inputs from user_bot_chat database
user = []
sql = 'select User_input from user_bot_chat;'
a.execute(sql)
w_user = list(a.fetchall())
for i in w_user:
# user.append('You: ' + i[0])
user.append(i[0])
return user
def bot_list(): # extracting bot responses from user_bot_chat database
bot = []
sql = 'select Bot_output from user_bot_chat;'
a.execute(sql)
w_bot = list(a.fetchall())
for i in w_bot:
# bot.append('Sindi' + i[0])
bot.append(i[0])
return bot
@app.route('/home') # links to the first page - index.html
def index():
weather = getWeather()
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
@app.route('/') # links to the first page - index.html
def home():
return render_template("setup.html")
@app.route('/setup', methods=['POST'])
def setup():
weather = getWeather()
global facehash
facehash= request.form["facehash"]
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
@app.route('/clear')
def clearChat():
weather = getWeather()
# Clear all table rows
sql = "TRUNCATE TABLE user_bot_chat;"
a.execute(sql)
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
def r(): # takes user inputs and bot outputs and insert into a array to later send to html file
try:
user_input = request.form["user_input"]
insert_sql(user_input)
r = []
user = user_list()
bot = bot_list()
for j in range(0, len(user)):
r.append(user[j])
r.append(bot[j])
return r
except:
r = []
user = user_list()
bot = bot_list()
for j in range(0, len(user)):
r.append(user[j])
r.append(bot[j])
return r
def getWeather():
observation = mgr.weather_at_place('Tirana')
w = observation.get_weather()
wind_data = w.get_wind()
humidity = w.get_humidity()
temp_data = w.get_temperature('celsius')
icon = w.get_weather_icon_name()
weatherData = [str(int(temp_data['temp'])), 'Tirana', str(icon), str(int(humidity)), str(int(wind_data['speed']))]
return weatherData
@app.route('/process', methods=['POST'])
def process():
weather = getWeather()
# called when user input is given and submit button is pressed
return render_template("index.html", user_input=r(), temp=weather[0], location=weather[1], icon=weather[2], humidity=weather[3], wind=weather[4], music=utils.music_playing())
if __name__ == '__main__':
try: # connects to the database
conn = pymysql.connect(host='localhost', user='root', password='', db='sindi_db')
a = conn.cursor()
except Exception as e:
print("QUERY ERROR: Connection")
print("Exeception occured:{}".format(e))
app.run(host='0.0.0.0', port=int('8000'), debug=True) # 0.0.0.0.,80
# conn.close()
# a.close() | en | 0.65359 | # OpenWeather # initializing variables # inserting user inputs, bot outputs and time into database # ID # extracting user inputs from user_bot_chat database # user.append('You: ' + i[0]) # extracting bot responses from user_bot_chat database # bot.append('Sindi' + i[0]) # links to the first page - index.html # links to the first page - index.html # Clear all table rows # takes user inputs and bot outputs and insert into a array to later send to html file # called when user input is given and submit button is pressed # connects to the database # 0.0.0.0.,80 # conn.close() # a.close() | 2.616763 | 3 |
catastro_finder.py | jorgeramirezcarrasco/catastro-finder | 0 | 6612938 | import requests
import json
import re
from bs4 import BeautifulSoup
class CatastroFinder:
"""CatastroFinder"""
def __init__(self,catastro_dict_path=None):
"""
Args:
catastro_dict_path (str, optional): Json file with catastro urls to scrap. Defaults to "./catastro_artifacts.json".
"""
if catastro_dict_path:
with open(catastro_dict_path) as json_file:
self.catastro_dict=json.load(json_file)
else:
self.catastro_dict={
"provincias": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx/ObtenerProvincias",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"content-type": "application/json; charset=UTF-8",
"origin": "https://www1.sedecatastro.gob.es",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx?from=NuevoVisor",
"accept-language": "es-ES,es;q=0.9"
}
},
"municipios": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx/ObtenerMunicipios",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"content-type": "application/json; charset=UTF-8",
"origin": "https://www1.sedecatastro.gob.es",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx?from=NuevoVisor",
"accept-language": "es-ES,es;q=0.9"
}
},
"vias": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx/ObtenerVias",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"content-type": "application/json; charset=UTF-8",
"origin": "https://www1.sedecatastro.gob.es",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx?from=NuevoVisor",
"accept-language": "es-ES,es;q=0.9"
}
},
"inmuebles": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCListaBienes.aspx",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-language": "es-ES,es;q=0.9"
}
},
"cp": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCConCiud.aspx",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-language": "es-ES,es;q=0.9"
}
},
"lat_long": {
"url": "https://www1.sedecatastro.gob.es/Cartografia/BuscarParcelaInternet.aspx",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
'sec-fetch-site': 'same-origin',
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-language": "es-ES,es;q=0.9"
}
}
}
def get_provincias(self,filtro=""):
"""get_provincias
Args:
filtro (str, optional): Filtro. Defaults to "".
Returns:
(list): List of items with Codigo and Denominacion. ['Codigo': 15, 'Denominacion': 'A CORUÑA'}...]
"""
url=self.catastro_dict["provincias"]["url"]
headers=self.catastro_dict["provincias"]["headers"]
payload = "{ 'filtro': '"+filtro+"'}"
response = requests.request("POST", url, headers=headers, data = payload)
return json.loads(response.content)['d']
def get_municipios(self,provincia):
"""get_municipios
Args:
provincia (str): Provincia code to search.
Returns:
(list): List of items with Codigo and Denominacion. ['Codigo': 121, 'Denominacion': 'SANTA POLA'}...]
"""
url=self.catastro_dict["municipios"]["url"]
headers=self.catastro_dict["municipios"]["headers"]
payload = "{\"filtro\":\"\",\"provincia\":"+str(provincia)+"}"
response = requests.request("POST", url, headers=headers, data = payload)
return json.loads(response.content)['d']
def get_vias(self,provincia,municipio,input_via):
"""get_vias
Args:
provincia (str): Provincia code to search.
municipio (str): Municipio code to search.
input_via (str): Via input to search.
Returns:
(list): List of items with Codigo, Sigla, TipoVia, DenominacionCompleta and Denominacion. {'Codigo': 1212, 'Sigla': 'CL', 'TipoVia': 'CALLE', 'Denominacion': 'SANTA CRISTINA', 'DenominacionCompleta': 'SANTA CRISTINA (CALLE)'}
"""
url=self.catastro_dict["vias"]["url"]
headers=self.catastro_dict["vias"]["headers"]
payload = "{\"filtro\":\""+str(input_via)+"\",\"provincia\":"+str(provincia)+",\"municipio\":"+str(municipio)+"}"
response = requests.request("POST", url, headers=headers, data = payload)
return json.loads(response.content)['d']
def search_inmueble(self,via_result,via_numero,selected_provincia,selected_municipio,tipur="U",pest="urbana"):
"""search inmueble
Args:
via_result (dict): [description]
via_numero (str): [description]
selected_provincia (dict): [description]
selected_municipio ([dict): [description]
tipur (str, optional): tipur. Defaults to "U".
pest (str, optional): pest. Defaults to "urbana".
Returns:
(list): List of inmuebles
"""
url=self.catastro_dict["inmuebles"]["url"]
headers=self.catastro_dict["inmuebles"]["headers"]
via = via_result['Denominacion'].replace(" ","@")
params = (
('via', str(via)),
('tipoVia', str(via_result['Sigla'])),
('numero', str(via_numero)),
('kilometro', ''),
('bloque', ''),
('escalera', ''),
('planta', ''),
('puerta', ''),
('DescProv', str(selected_provincia['Denominacion'])),
('prov', str(selected_provincia['Codigo'])),
('muni', str(selected_municipio['Codigo'])),
('DescMuni', str(selected_municipio['Denominacion'])),
('TipUR', str(tipur)),
('codvia', str(via_result['Codigo'])),
('comVia', str(via_result['DenominacionCompleta'])),
('pest', str(pest)),
('from', 'OVCBusqueda'),
('nomusu', ' '),
('tipousu', ''),
('ZV', 'NO'),
('ZR', 'NO'),
)
response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.content,features="html.parser")
inmueble_results = soup.find_all("div", "panel-heading")
cleaned_results = []
for inmueble in inmueble_results:
results_item = {}
for element in inmueble.find_all("span"):
if "title" in element.attrs:
if element.attrs["title"] == "Localización":
results_item["Localización"] = element.text
results_item["RefC"] = element.parent.parent.find("b").text.replace(" ","")
if element.attrs["title"] == "Año construcción":
results_item["Año construcción"] = element.text.replace(" ","")
if element.attrs["title"] == "Uso":
results_item["Uso"] = element.text
if element.attrs["title"] == "Coeficiente de participación":
results_item["Coeficiente de participación"] = element.text.replace(" ","")
if element.attrs["title"] == "Superficie construida":
results_item["Superficie construida"] = element.text.replace(" ","")
if results_item:
cleaned_results.append(results_item)
return cleaned_results
def get_cp(self,provincia,municipio,rc,urbrus="U"):
"""get_cp
Args:
provincia (str): Provincia code to search.
municipio (str): Municipio code to search.
rc (str): Ref catastral to search.
urbrus (str, optional): urbrus. Defaults to "U".
Returns:
(str): Postal Code
"""
url=self.catastro_dict["cp"]["url"]
headers=self.catastro_dict["cp"]["headers"]
params = (
('del', str(provincia)),
('mun', str(municipio)),
('UrbRus', str(urbrus)),
('RefC', str(rc)),
('Apenom', ''),
('esBice', ''),
('RCBice1', ''),
('RCBice2', ''),
('DenoBice', ''),
('from', 'nuevoVisor'),
('ZV', 'NO'),
)
response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.content,features="html.parser")
cp = re.search("\d{5}",soup.find_all("span", "control-label black")[1].get_text(strip=True, separator=" "))[0]
return cp
def get_lat_lon(self, rc):
"""get_lat_lon
Args:
rc (str): Ref catastral to search.
Returns:
(dict): dict with lat and lng
"""
url=self.catastro_dict["lat_long"]["url"]
headers=self.catastro_dict["lat_long"]["headers"]
params = (
('refcat', str(rc)),
)
response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.content,features="html.parser")
data_form_list = [inp for inp in soup.find_all("input") if 'class' in inp.parent.attrs and 'aspNetHidden' in inp.parent["class"]]
data_form_dict = {}
for data_form in data_form_list:
data_form_dict[data_form.attrs['name']] = data_form.attrs['value']
url=self.catastro_dict["lat_long"]["url"]
headers=self.catastro_dict["lat_long"]["headers"]
params = (
('refcat', str(rc)),
)
data = {
'__VIEWSTATE': data_form_dict['__VIEWSTATE'],
'__VIEWSTATEGENERATOR': data_form_dict['__VIEWSTATEGENERATOR'],
'__EVENTVALIDATION': data_form_dict['__EVENTVALIDATION'],
'ctl00$Contenido$RefCat': str(rc),
'ctl00$Contenido$ImgBGoogleMaps.x': '0',
'ctl00$Contenido$ImgBGoogleMaps.y': '0'
}
response = requests.post(url, headers=headers, params=params, data=data)
soup = BeautifulSoup(response.content,features="html.parser")
lat_long = str(soup.find_all("span", {"id": "ctl00_Contenido_lblAbrirVentana"})[0].find("script")).split("&q=")[-1].split("(")[0].split(",")
return (lat_long[0],lat_long[1])
| import requests
import json
import re
from bs4 import BeautifulSoup
class CatastroFinder:
"""CatastroFinder"""
def __init__(self,catastro_dict_path=None):
"""
Args:
catastro_dict_path (str, optional): Json file with catastro urls to scrap. Defaults to "./catastro_artifacts.json".
"""
if catastro_dict_path:
with open(catastro_dict_path) as json_file:
self.catastro_dict=json.load(json_file)
else:
self.catastro_dict={
"provincias": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx/ObtenerProvincias",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"content-type": "application/json; charset=UTF-8",
"origin": "https://www1.sedecatastro.gob.es",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx?from=NuevoVisor",
"accept-language": "es-ES,es;q=0.9"
}
},
"municipios": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx/ObtenerMunicipios",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"content-type": "application/json; charset=UTF-8",
"origin": "https://www1.sedecatastro.gob.es",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx?from=NuevoVisor",
"accept-language": "es-ES,es;q=0.9"
}
},
"vias": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx/ObtenerVias",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"content-type": "application/json; charset=UTF-8",
"origin": "https://www1.sedecatastro.gob.es",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCBusqueda.aspx?from=NuevoVisor",
"accept-language": "es-ES,es;q=0.9"
}
},
"inmuebles": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCListaBienes.aspx",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-language": "es-ES,es;q=0.9"
}
},
"cp": {
"url": "https://www1.sedecatastro.gob.es/CYCBienInmueble/OVCConCiud.aspx",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-language": "es-ES,es;q=0.9"
}
},
"lat_long": {
"url": "https://www1.sedecatastro.gob.es/Cartografia/BuscarParcelaInternet.aspx",
"headers": {
"authority": "www1.sedecatastro.gob.es",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
'sec-fetch-site': 'same-origin',
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-language": "es-ES,es;q=0.9"
}
}
}
def get_provincias(self,filtro=""):
"""get_provincias
Args:
filtro (str, optional): Filtro. Defaults to "".
Returns:
(list): List of items with Codigo and Denominacion. ['Codigo': 15, 'Denominacion': 'A CORUÑA'}...]
"""
url=self.catastro_dict["provincias"]["url"]
headers=self.catastro_dict["provincias"]["headers"]
payload = "{ 'filtro': '"+filtro+"'}"
response = requests.request("POST", url, headers=headers, data = payload)
return json.loads(response.content)['d']
def get_municipios(self,provincia):
"""get_municipios
Args:
provincia (str): Provincia code to search.
Returns:
(list): List of items with Codigo and Denominacion. ['Codigo': 121, 'Denominacion': 'SANTA POLA'}...]
"""
url=self.catastro_dict["municipios"]["url"]
headers=self.catastro_dict["municipios"]["headers"]
payload = "{\"filtro\":\"\",\"provincia\":"+str(provincia)+"}"
response = requests.request("POST", url, headers=headers, data = payload)
return json.loads(response.content)['d']
def get_vias(self,provincia,municipio,input_via):
"""get_vias
Args:
provincia (str): Provincia code to search.
municipio (str): Municipio code to search.
input_via (str): Via input to search.
Returns:
(list): List of items with Codigo, Sigla, TipoVia, DenominacionCompleta and Denominacion. {'Codigo': 1212, 'Sigla': 'CL', 'TipoVia': 'CALLE', 'Denominacion': 'SANTA CRISTINA', 'DenominacionCompleta': 'SANTA CRISTINA (CALLE)'}
"""
url=self.catastro_dict["vias"]["url"]
headers=self.catastro_dict["vias"]["headers"]
payload = "{\"filtro\":\""+str(input_via)+"\",\"provincia\":"+str(provincia)+",\"municipio\":"+str(municipio)+"}"
response = requests.request("POST", url, headers=headers, data = payload)
return json.loads(response.content)['d']
def search_inmueble(self,via_result,via_numero,selected_provincia,selected_municipio,tipur="U",pest="urbana"):
"""search inmueble
Args:
via_result (dict): [description]
via_numero (str): [description]
selected_provincia (dict): [description]
selected_municipio ([dict): [description]
tipur (str, optional): tipur. Defaults to "U".
pest (str, optional): pest. Defaults to "urbana".
Returns:
(list): List of inmuebles
"""
url=self.catastro_dict["inmuebles"]["url"]
headers=self.catastro_dict["inmuebles"]["headers"]
via = via_result['Denominacion'].replace(" ","@")
params = (
('via', str(via)),
('tipoVia', str(via_result['Sigla'])),
('numero', str(via_numero)),
('kilometro', ''),
('bloque', ''),
('escalera', ''),
('planta', ''),
('puerta', ''),
('DescProv', str(selected_provincia['Denominacion'])),
('prov', str(selected_provincia['Codigo'])),
('muni', str(selected_municipio['Codigo'])),
('DescMuni', str(selected_municipio['Denominacion'])),
('TipUR', str(tipur)),
('codvia', str(via_result['Codigo'])),
('comVia', str(via_result['DenominacionCompleta'])),
('pest', str(pest)),
('from', 'OVCBusqueda'),
('nomusu', ' '),
('tipousu', ''),
('ZV', 'NO'),
('ZR', 'NO'),
)
response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.content,features="html.parser")
inmueble_results = soup.find_all("div", "panel-heading")
cleaned_results = []
for inmueble in inmueble_results:
results_item = {}
for element in inmueble.find_all("span"):
if "title" in element.attrs:
if element.attrs["title"] == "Localización":
results_item["Localización"] = element.text
results_item["RefC"] = element.parent.parent.find("b").text.replace(" ","")
if element.attrs["title"] == "Año construcción":
results_item["Año construcción"] = element.text.replace(" ","")
if element.attrs["title"] == "Uso":
results_item["Uso"] = element.text
if element.attrs["title"] == "Coeficiente de participación":
results_item["Coeficiente de participación"] = element.text.replace(" ","")
if element.attrs["title"] == "Superficie construida":
results_item["Superficie construida"] = element.text.replace(" ","")
if results_item:
cleaned_results.append(results_item)
return cleaned_results
def get_cp(self,provincia,municipio,rc,urbrus="U"):
"""get_cp
Args:
provincia (str): Provincia code to search.
municipio (str): Municipio code to search.
rc (str): Ref catastral to search.
urbrus (str, optional): urbrus. Defaults to "U".
Returns:
(str): Postal Code
"""
url=self.catastro_dict["cp"]["url"]
headers=self.catastro_dict["cp"]["headers"]
params = (
('del', str(provincia)),
('mun', str(municipio)),
('UrbRus', str(urbrus)),
('RefC', str(rc)),
('Apenom', ''),
('esBice', ''),
('RCBice1', ''),
('RCBice2', ''),
('DenoBice', ''),
('from', 'nuevoVisor'),
('ZV', 'NO'),
)
response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.content,features="html.parser")
cp = re.search("\d{5}",soup.find_all("span", "control-label black")[1].get_text(strip=True, separator=" "))[0]
return cp
def get_lat_lon(self, rc):
"""get_lat_lon
Args:
rc (str): Ref catastral to search.
Returns:
(dict): dict with lat and lng
"""
url=self.catastro_dict["lat_long"]["url"]
headers=self.catastro_dict["lat_long"]["headers"]
params = (
('refcat', str(rc)),
)
response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.content,features="html.parser")
data_form_list = [inp for inp in soup.find_all("input") if 'class' in inp.parent.attrs and 'aspNetHidden' in inp.parent["class"]]
data_form_dict = {}
for data_form in data_form_list:
data_form_dict[data_form.attrs['name']] = data_form.attrs['value']
url=self.catastro_dict["lat_long"]["url"]
headers=self.catastro_dict["lat_long"]["headers"]
params = (
('refcat', str(rc)),
)
data = {
'__VIEWSTATE': data_form_dict['__VIEWSTATE'],
'__VIEWSTATEGENERATOR': data_form_dict['__VIEWSTATEGENERATOR'],
'__EVENTVALIDATION': data_form_dict['__EVENTVALIDATION'],
'ctl00$Contenido$RefCat': str(rc),
'ctl00$Contenido$ImgBGoogleMaps.x': '0',
'ctl00$Contenido$ImgBGoogleMaps.y': '0'
}
response = requests.post(url, headers=headers, params=params, data=data)
soup = BeautifulSoup(response.content,features="html.parser")
lat_long = str(soup.find_all("span", {"id": "ctl00_Contenido_lblAbrirVentana"})[0].find("script")).split("&q=")[-1].split("(")[0].split(",")
return (lat_long[0],lat_long[1])
| en | 0.295184 | CatastroFinder Args: catastro_dict_path (str, optional): Json file with catastro urls to scrap. Defaults to "./catastro_artifacts.json". get_provincias Args: filtro (str, optional): Filtro. Defaults to "". Returns: (list): List of items with Codigo and Denominacion. ['Codigo': 15, 'Denominacion': 'A CORUÑA'}...] get_municipios Args: provincia (str): Provincia code to search. Returns: (list): List of items with Codigo and Denominacion. ['Codigo': 121, 'Denominacion': 'SANTA POLA'}...] get_vias Args: provincia (str): Provincia code to search. municipio (str): Municipio code to search. input_via (str): Via input to search. Returns: (list): List of items with Codigo, Sigla, TipoVia, DenominacionCompleta and Denominacion. {'Codigo': 1212, 'Sigla': 'CL', 'TipoVia': 'CALLE', 'Denominacion': 'SANTA CRISTINA', 'DenominacionCompleta': 'SANTA CRISTINA (CALLE)'} search inmueble Args: via_result (dict): [description] via_numero (str): [description] selected_provincia (dict): [description] selected_municipio ([dict): [description] tipur (str, optional): tipur. Defaults to "U". pest (str, optional): pest. Defaults to "urbana". Returns: (list): List of inmuebles get_cp Args: provincia (str): Provincia code to search. municipio (str): Municipio code to search. rc (str): Ref catastral to search. urbrus (str, optional): urbrus. Defaults to "U". Returns: (str): Postal Code get_lat_lon Args: rc (str): Ref catastral to search. Returns: (dict): dict with lat and lng | 2.82624 | 3 |
metamodels/siamese_net.py | proteekroy/samoo | 6 | 6612939 | <reponame>proteekroy/samoo<filename>metamodels/siamese_net.py
from itertools import product
from torch.autograd import Variable
from utils import *
from dataloader import *
import numpy as np
from metamodels.neural_metamodel import NeuralMetamodel
class SiameseMetamodel(NeuralMetamodel):
def __init__(self, n_var, n_obj,
problem_name='problem_obj',
n_splits=20,
embed_length=10,
batch_size=10,
total_epoch=200,
resume=False,
cross_val=False,
resume_filename=None,
neuralnet=None,
disp = True,
best_accuracy_model=True,
save_model=False,
dataset_func=True
):
super().__init__(n_var, n_obj, problem_name, n_splits, embed_length,
batch_size, total_epoch, resume, cross_val,
resume_filename, neuralnet, disp, best_accuracy_model,
save_model, dataset_func)
def predict(self, input, *args, **kwargs):
self.model.eval()
input = torch.from_numpy(input)
output, _, _ = self.model.forward(Variable(input.float()), Variable(input.float()))
return output.data.numpy()
def perform_epoch(self, epoch, test_flag=False):
if test_flag:
self.net.eval()
loader = self.testloader
else:
self.net.train()
loader = self.trainloader
losses, top = AverageMeter(), AverageMeter()
for batch_idx, (train_index, x, f) in enumerate(loader):
self.batch_size = f.shape[0]
index = torch.from_numpy(
np.asarray(list(product(np.asarray(range(0, self.batch_size)), np.asarray(range(0, self.batch_size))))))
f1 = f[index[:, 0]]
f2 = f[index[:, 1]]
x1 = x[index[:, 0]]
x2 = x[index[:, 1]]
label = (f1 <= f2).float()
if self.use_cuda:
x, x1, x2, f, f1, f2, label = Variable(x.float().cuda()), Variable(x1.cuda()), \
Variable(x2.cuda()), Variable(f.float().cuda()), \
Variable(f1.cuda()), Variable(f2.cuda()), Variable(label.cuda())
else:
x, x1, x2, f, f1, f2 = Variable(x), Variable(x1), Variable(x2), \
Variable(f.float()), Variable(f1), Variable(f2)
self.optimizer.zero_grad()
_, _, predicted_label = self.net.forward(x1.float(), x2.float())
predicted_f, _, _ = self.net.forward(x.float(), x.float())
loss1 = self.mseLoss(predicted_f, f.float())
# loss2 = self.mseLoss(predicted_label.float(), label.float())
loss2 = self.BCEloss(predicted_label.float(), label.float())
# out_label = (torch.flatten(out_label)).view(-1,1)
# label = (torch.flatten(label)).view(-1,1)
# loss3 = self.crossEntropyLoss(out_label.float(), label.long())
loss = loss1 + loss2
if not test_flag:
loss.backward()
self.optimizer.step()
prec_matrix = torch.eq(torch.round(predicted_label.data).long(), label.long())
prec_f = 100*torch.mean(prec_matrix.float())
# measure accuracy and record loss
losses.update(loss.data.item(), 1)
top.update(prec_f, 1)
if self.disp:
if test_flag:
print('Test Epoch: %d | Loss : %.4f | Acc : %.4f ' % (epoch, losses.avg, top.avg))
else:
print('Train Epoch: %d | Loss : %.4f | Acc : %.4f ' % (epoch, losses.avg, top.avg))
return losses.avg, top.avg
| from itertools import product
from torch.autograd import Variable
from utils import *
from dataloader import *
import numpy as np
from metamodels.neural_metamodel import NeuralMetamodel
class SiameseMetamodel(NeuralMetamodel):
def __init__(self, n_var, n_obj,
problem_name='problem_obj',
n_splits=20,
embed_length=10,
batch_size=10,
total_epoch=200,
resume=False,
cross_val=False,
resume_filename=None,
neuralnet=None,
disp = True,
best_accuracy_model=True,
save_model=False,
dataset_func=True
):
super().__init__(n_var, n_obj, problem_name, n_splits, embed_length,
batch_size, total_epoch, resume, cross_val,
resume_filename, neuralnet, disp, best_accuracy_model,
save_model, dataset_func)
def predict(self, input, *args, **kwargs):
self.model.eval()
input = torch.from_numpy(input)
output, _, _ = self.model.forward(Variable(input.float()), Variable(input.float()))
return output.data.numpy()
def perform_epoch(self, epoch, test_flag=False):
if test_flag:
self.net.eval()
loader = self.testloader
else:
self.net.train()
loader = self.trainloader
losses, top = AverageMeter(), AverageMeter()
for batch_idx, (train_index, x, f) in enumerate(loader):
self.batch_size = f.shape[0]
index = torch.from_numpy(
np.asarray(list(product(np.asarray(range(0, self.batch_size)), np.asarray(range(0, self.batch_size))))))
f1 = f[index[:, 0]]
f2 = f[index[:, 1]]
x1 = x[index[:, 0]]
x2 = x[index[:, 1]]
label = (f1 <= f2).float()
if self.use_cuda:
x, x1, x2, f, f1, f2, label = Variable(x.float().cuda()), Variable(x1.cuda()), \
Variable(x2.cuda()), Variable(f.float().cuda()), \
Variable(f1.cuda()), Variable(f2.cuda()), Variable(label.cuda())
else:
x, x1, x2, f, f1, f2 = Variable(x), Variable(x1), Variable(x2), \
Variable(f.float()), Variable(f1), Variable(f2)
self.optimizer.zero_grad()
_, _, predicted_label = self.net.forward(x1.float(), x2.float())
predicted_f, _, _ = self.net.forward(x.float(), x.float())
loss1 = self.mseLoss(predicted_f, f.float())
# loss2 = self.mseLoss(predicted_label.float(), label.float())
loss2 = self.BCEloss(predicted_label.float(), label.float())
# out_label = (torch.flatten(out_label)).view(-1,1)
# label = (torch.flatten(label)).view(-1,1)
# loss3 = self.crossEntropyLoss(out_label.float(), label.long())
loss = loss1 + loss2
if not test_flag:
loss.backward()
self.optimizer.step()
prec_matrix = torch.eq(torch.round(predicted_label.data).long(), label.long())
prec_f = 100*torch.mean(prec_matrix.float())
# measure accuracy and record loss
losses.update(loss.data.item(), 1)
top.update(prec_f, 1)
if self.disp:
if test_flag:
print('Test Epoch: %d | Loss : %.4f | Acc : %.4f ' % (epoch, losses.avg, top.avg))
else:
print('Train Epoch: %d | Loss : %.4f | Acc : %.4f ' % (epoch, losses.avg, top.avg))
return losses.avg, top.avg | en | 0.391617 | # loss2 = self.mseLoss(predicted_label.float(), label.float()) # out_label = (torch.flatten(out_label)).view(-1,1) # label = (torch.flatten(label)).view(-1,1) # loss3 = self.crossEntropyLoss(out_label.float(), label.long()) # measure accuracy and record loss | 2.221445 | 2 |
2. Capturing video from the webcam/face-2.py | jalayrupera/opencvBasics | 0 | 6612940 | import cv2
Capture = cv2.VideoCapture(0);
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))
while True:
ret,frame = Capture.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGRA2GRAY)
cv2.imshow('new',gray)
cv2.imshow('RGB',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Capture.release()
out.release()
cv2.destroyAllWindows()
| import cv2
Capture = cv2.VideoCapture(0);
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))
while True:
ret,frame = Capture.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGRA2GRAY)
cv2.imshow('new',gray)
cv2.imshow('RGB',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Capture.release()
out.release()
cv2.destroyAllWindows()
| none | 1 | 2.592751 | 3 | |
prototypes/vlasov/vlasov.py | Krissmedt/imprunko | 5 | 6612941 | import numpy as np
from initial import *
import conf as prm
from visualize import visualize
import os, sys
from timer import Timer
sys.path.insert(0, '../radcool')
from rad import ph_evolve
from rad import el_synch_cool
from rad import CSapprox
#set up figure
import pylab as mlab
fig = mlab.figure(figsize=(10, 12))
mlab.rc('font', family='serif')
mlab.rc('xtick', labelsize='xx-small')
mlab.rc('ytick', labelsize='xx-small')
#charge density
def charge(ff, ux, prm):
rhos = np.zeros( (prm.nxfull, prm.ns) )
#integrate over the velocity distribution
# qm = q/m, i.e. charge-to-mass ratio
for kk in range(prm.ns):
for ii in prm.xfull:
gamma = np.sqrt( 1.0 + (prm.m[kk] * ux[:, kk])**2 )
#gamma = 1.0
rhos[ii, kk] = prm.q[kk] * prm.du[kk] * np.sum( ff[:, ii, kk] ) # charge density for certain species
#sum over species
rho = np.sum(rhos, 1)
return rho
def wrap(x, prm):
#left halo to right edge
x[prm.xLb] = x[prm.xRe]
#right halo to left edge
x[prm.xRb] = x[prm.xLe]
return x
def damp(x, prm):
#left halo
bval = x[prm.xLb[-1]+1]
x[prm.xLb] = bval*np.exp(-np.linspace(0.0, 10.0, prm.nxHalo) )
#right halo
bval = x[prm.xRb[0]-1]
x[prm.xRb] = bval*np.exp(-np.linspace(10.0, 0.0, prm.nxHalo) )
return x
def poisson(ex, rho, prm):
# XXX why remove the mean?
rho -= np.mean( rho[prm.xmid] )
for ii in prm.xmid:
ex[ii+1] = ex[ii] + 4.0 * np.pi * rho[ii+1] # Gaussian units, c=1
if prm.periodic:
ex = wrap(ex, prm)
ex -= np.sum( ex[prm.xmid] )/ prm.nx
#damp E field
if prm.finite:
damp(ex, prm)
return ex
# advection of the distribution function
def position(ff, ux, ajx, prm):
ajxs = np.zeros( (prm.nxfull, prm.ns) )
flux = np.zeros( (prm.nvfull, prm.nxfull) )
for kk in range(prm.ns):
aa = ux[:, kk] * prm.dt/prm.dx #compute shift in units of cells
fa = np.floor(aa) # upwind direction
#vectorized over velocity space
for ii in range(2, prm.nx+3):
#1st order linear upwind flux
#ss = np.ones(prm.nvfull)*ii - fa
#iss = ss.astype(int) # index array needs to be type casted into int before we can use it
#flux[:, ii] = aa[:] * np.diag(ff[:, iss, kk])
#second order conservative scheme
#flux[:, ii] = aa * ( ff[:, ii+1, kk] + ff[:, ii, kk] )*0.5 \
# - aa[:]*aa * ( ff[:, ii+1, kk] - ff[:, ii, kk] )*0.5
#4th order conservative
flux[:, ii] = aa[:] * (-ff[:,ii+2,kk]+ 7.0*ff[:,ii+1,kk]+ 7.0*ff[:,ii,kk]-ff[:,ii-1,kk])/12.0 \
+ aa[:]**2 * ( ff[:,ii+2,kk]-15.0*ff[:,ii+1,kk]+15.0*ff[:,ii,kk]-ff[:,ii-1,kk])/24.0 \
+ aa[:]**3 * ( ff[:,ii+2,kk]- ff[:,ii+1,kk]- ff[:,ii,kk]+ff[:,ii-1,kk])/12.0 \
+ aa[:]**4 * (-ff[:,ii+2,kk]+ 3.0*ff[:,ii+1,kk]- 3.0*ff[:,ii,kk]+ff[:,ii-1,kk])/24.0
#add flux as f_i^t+dt = f_i^t - (U_i+1/2 - U_i-1/2)
ff[:, prm.xmid, kk] -= (flux[:, prm.xmid] - flux[:, prm.xmid-1])
#numerical flux integration over velocity, i.e., U = int q*U(u) du/gamma
gamma = np.sqrt( 1.0 + (prm.m[kk] * ux[:,kk])**2 )
ajxs[prm.xmid, kk] = prm.q[kk] * prm.du[kk] * np.sum( flux[:, prm.xmid]/gamma[:,np.newaxis], 0)
#gamma = 1.0
#ajxs[prm.xmid, kk] = prm.q[kk] * prm.du[kk] * np.sum( flux[:, prm.xmid]/gamma, 0)
if prm.periodic:
#wrap boundaries
ff[:, prm.xLb, :] = ff[:, prm.xRe, :]
ff[:, prm.xRb, :] = ff[:, prm.xLe, :]
if prm.finite:
bvalL = ff[:, prm.xLb[-1]+1, :]
for ir in prm.xLb:
ff[:, ir, :] = bvalL
bvalR = ff[:, prm.xRb[0]-1, :]
for ir in prm.xRb:
ff[:, ir, :] = bvalR
#limit flux
np.clip(ff, 0.0, None, out=ff)
#reduce flux
ajx[:] = np.sum( ajxs, 1 )
if prm.periodic:
ajx = wrap(ajx, prm)
if prm.finite:
ajx = damp(ajx, prm)
return ff, ajx
def velocity(f, ex, prm):
#interpolate half-integer staggered Ex to full integer grid fex
fex = np.zeros(prm.nxfull)
for ii in prm.xmid:
fex[ii] = (ex[ii] + ex[ii-1])/2.0
if prm.periodic:
fex = wrap(fex, prm)
if prm.finite:
fex = damp(fex, prm)
flux = np.zeros( (prm.nvfull, prm.nxfull) )
jj = np.arange(2,prm.nv+3)
for kk in range(prm.ns):
aa = fex[:] * prm.qm[kk] * prm.dt/prm.du[kk] #shift in units of phase space cells
#1st order linear upwind scheme
#fa = np.floor(aa).astype(int) #upwind direction
#for ii in prm.xfull:
#for ii in range(2, prm.nx+3):
# js = jj - fa[ii]
# flux[jj, ii] = aa[ii] * ff[js, ii, kk]
#2nd order conservative
#flux[jj, :] = aa[:] * ( ff[jj+1, :, kk] + ff[jj, :, kk] )*0.5 \
# - aa[:]*aa[:] * ( ff[jj+1, :, kk] - ff[jj, :, kk] )*0.5
#4th order conservative
flux[jj, :] = aa[:] * (-ff[jj+2,:,kk]+ 7.0*ff[jj+1,:,kk]+ 7.0*ff[jj,:,kk]-ff[jj-1,:,kk])/12.0 \
+ aa[:]**2 * ( ff[jj+2,:,kk]-15.0*ff[jj+1,:,kk]+15.0*ff[jj,:,kk]-ff[jj-1,:,kk])/24.0 \
+ aa[:]**3 * ( ff[jj+2,:,kk]- ff[jj+1,:,kk]- ff[jj,:,kk]+ff[jj-1,:,kk])/12.0 \
+ aa[:]**4 * (-ff[jj+2,:,kk]+ 3.0*ff[jj+1,:,kk]- 3.0*ff[jj,:,kk]+ff[jj-1,:,kk])/24.0
#add flux as f_i^t+dt = f_i^t - (U_i+1/2 - U_i-1/2)
ff[1:prm.nv+5, :, kk] -= ( flux[1:prm.nv+5, :] - flux[0:prm.nv+4, :] )
#limit flux
np.clip(ff, 0.0, None, out=ff)
return ff
def efield(ex, ajx, prm):
#remove mean
ajx -= np.mean( ajx[prm.xmid] )
#amperes law E_n+1 = E_n - J
ex[prm.xmid] = ex[prm.xmid] - 4.0 * np.pi * ajx[prm.xmid] ## 4\pi or epsilon factor?
if prm.periodic:
ex = wrap(ex, prm)
if prm.finite:
damp(ex, prm)
np.clip(ex, -100.0, 100.0, out=ex)
return ex
#def el_evolve(ffi, vxi, fp, prm):
#ffi electron distribution
#vxi velocity grid of electrons
#full photon distribution f(z, phi)
# do nothing
#return ffi
#def ph_evolve(ffi, vxi, fp, px, prm):
#
# wc = 1.0e0 #cyclotron frequency
# x = px / wc #dimensionless energy
#
# #mean velocity
# rho = np.trapz(ffi, x=vxi)
# g = np.mean(np.abs( ffi ))
# #rho = 1.0
# fp = g**4.0 * rho*(4.0/3.0)*x**(1.0/3.0) *np.exp(-x)
# return fp
def radiative_reactions(ux, ff, px, fp, prm):
#erase old stuff; i.e., do not accumulate
fp[:,:,:] = 0.0
#loop over spatial cells
for ix in prm.xfull:
iss = 0
#radiation reactions
for dirs in range(2):
if dirs == 0:
vpos = ux[:,iss] > 0 #+x going particles
elif dirs == 1:
vpos = ux[:,iss] < 0 #-x going particles
for kk in range(prm.ns):
ffi = ff[vpos, ix, kk] #slice correct velocities
vxi = np.abs( ux[vpos, iss] )
if dirs == 1:
vxi = np.flipud( vxi )
ffi = np.flipud( ffi )
#fp[dirs, :, ix] += radiate( ffi, vxi, fp[dirs, :, ix], px, prm) #evolve photons
#compute radiation per bin
for i, ivx in enumerate( vxi ):
#if ivx > 1.0e-2:
gamma = np.sqrt(1.0 + ivx**2)
S = CSapprox(px, gamma) #spectrum from one bundle of electrons with velocity of gamma
#normalize
S *= ffi[i] * prm.tau/(prm.sigma_T * prm.R) * 3.0e10 * 1.0e9
fp[dirs, :, ix] += S
return ff, fp
def radiate(ffi, vxi, fp, px, prm):
#time scaling
dt_rad = prm.dt/1.0e2
#number density scaling
ffi *= prm.tau/(prm.sigma_T * prm.R)
fp = ph_evolve(ffi, vxi, fp, px, dt_rad)
return fp
def collisions(ux, ff, px, fp, prm):
#erase old stuff; i.e., do not accumulate
fp[:,:,:] = 0.0
#loop over spatial cells
for ix in prm.xfull:
iss = 0
#radiation reactions
for dirs in range(2):
if dirs == 0:
vpos = ux[:,iss] > 0 #+x going particles
elif dirs == 1:
vpos = ux[:,iss] < 0 #-x going particles
for kk in range(prm.ns):
ffi = ff[vpos, ix, kk] #slice correct velocities
vxi = np.abs( ux[vpos, iss] )
if dirs == 1:
vxi = np.flipud( vxi )
ffi = np.flipud( ffi )
fp[dirs, :, ix] += radiate( ffi, vxi, fp[dirs, :, ix], px, prm) #evolve photons
print np.max( fp[dirs, :, ix] )
return ff, fp
def inject(ff, prm):
vd = -5.0
vt = 0.1
amp = 0.001
for kk in range(prm.ns):
ii = prm.xmid[-1]
ux = np.linspace(prm.vmin[kk], prm.vmax[kk], prm.nvfull)
for jj in range(prm.nvfull):
ff[jj, ii, kk] += amp * np.exp( -(ux[jj] - vd)**2/(2.0*vt)**2)
return ff
#initialize
#--------------------------------------------------
#load configuration
#ff, ex, ajx, xx, ux, px, fp = initial(prm)
ff, ex, ajx, xx, ux, px, fp = initial_test(prm)
#initial step
rho = charge(ff, ux, prm)
ex = poisson(ex, rho, prm)
#ff, fp = collisions(ux, ff, px, fp, prm)
#ff, fp = radiative_reactions(ux, ff, px, fp, prm)
# calculate plasma frequency for every species and every coordinate cell
for kk in range(prm.ns):
print"q = {}, m = {}".format(prm.q[kk],prm.m[kk])
for ii in prm.xfull:
rhos = prm.q[kk] * prm.du[kk] * np.sum( ff[:, ii, kk] )
wpe_calc = np.sqrt( 4.0 * np.pi * prm.q[kk] * rhos / prm.m[kk])
#print wpe_calc
#print ff[:,37,kk]
ff, ajx = position(ff, ux, ajx, prm)
ex = efield(ex, ajx, prm)
#sys.exit()
#--------------------------------------------------
# main loop
visz = visualize("out", xx, ux, px)
visz.plot(0, ff, ex, ajx, rho, fp) #plot once to create figures
simulation = np.zeros( (prm.nx, prm.ntime+1, 1) )
#--------------------------------------------------
#Save to file
import h5py
f = h5py.File("out/run.hdf5", "w")
grp0 = f.create_group("params")
grp0.attrs['dx'] = prm.dx
grp0.attrs['dt'] = prm.dt
grp0.attrs['nx'] = prm.nx
grp0.attrs['nv'] = prm.nv
grp0.attrs['ns'] = prm.ns
grp0.attrs['ntime'] = prm.ntime
grp = f.create_group("fields")
dset_ex = grp.create_dataset("Ex", (prm.nx, prm.ntime+1), dtype='f')
timer = Timer(["total", "lap"])
timer.start("total")
jtime = 0
time = 0.0
for jtime in range(prm.ntime+1):
if (jtime % 20 == 0):
#ff, fp = radiative_reactions(ux, ff, px, fp, prm)
print"{} {}".format(time, np.amax( np.absolute(ex) ))
# print "-----------", jtime, "/", time, "----------"
# timer.stats("lap")
visz.plot(jtime, ff, ex, ajx, rho, fp)
# timer.start("lap")
#ff = inject(ff, prm)
ff = velocity(ff, ex, prm)
#ff, fp = collisions(ux, ff, px, fp, prm)
#ff, fp = radiative_reactions(ux, ff, px, fp, prm)
ff, ajx = position(ff, ux, ajx, prm)
rho = charge(ff, ux, prm)
ex = efield(ex, ajx, prm)
#ex = poisson(ex, rho, prm)
time += prm.dt
#simulation[:, jtime, 0] = ex[prm.xmid]
dset_ex[:,jtime] = ex[prm.xmid]
timer.lap("lap")
timer.stop("total")
timer.stats("total")
| import numpy as np
from initial import *
import conf as prm
from visualize import visualize
import os, sys
from timer import Timer
sys.path.insert(0, '../radcool')
from rad import ph_evolve
from rad import el_synch_cool
from rad import CSapprox
#set up figure
import pylab as mlab
fig = mlab.figure(figsize=(10, 12))
mlab.rc('font', family='serif')
mlab.rc('xtick', labelsize='xx-small')
mlab.rc('ytick', labelsize='xx-small')
#charge density
def charge(ff, ux, prm):
rhos = np.zeros( (prm.nxfull, prm.ns) )
#integrate over the velocity distribution
# qm = q/m, i.e. charge-to-mass ratio
for kk in range(prm.ns):
for ii in prm.xfull:
gamma = np.sqrt( 1.0 + (prm.m[kk] * ux[:, kk])**2 )
#gamma = 1.0
rhos[ii, kk] = prm.q[kk] * prm.du[kk] * np.sum( ff[:, ii, kk] ) # charge density for certain species
#sum over species
rho = np.sum(rhos, 1)
return rho
def wrap(x, prm):
#left halo to right edge
x[prm.xLb] = x[prm.xRe]
#right halo to left edge
x[prm.xRb] = x[prm.xLe]
return x
def damp(x, prm):
#left halo
bval = x[prm.xLb[-1]+1]
x[prm.xLb] = bval*np.exp(-np.linspace(0.0, 10.0, prm.nxHalo) )
#right halo
bval = x[prm.xRb[0]-1]
x[prm.xRb] = bval*np.exp(-np.linspace(10.0, 0.0, prm.nxHalo) )
return x
def poisson(ex, rho, prm):
# XXX why remove the mean?
rho -= np.mean( rho[prm.xmid] )
for ii in prm.xmid:
ex[ii+1] = ex[ii] + 4.0 * np.pi * rho[ii+1] # Gaussian units, c=1
if prm.periodic:
ex = wrap(ex, prm)
ex -= np.sum( ex[prm.xmid] )/ prm.nx
#damp E field
if prm.finite:
damp(ex, prm)
return ex
# advection of the distribution function
def position(ff, ux, ajx, prm):
ajxs = np.zeros( (prm.nxfull, prm.ns) )
flux = np.zeros( (prm.nvfull, prm.nxfull) )
for kk in range(prm.ns):
aa = ux[:, kk] * prm.dt/prm.dx #compute shift in units of cells
fa = np.floor(aa) # upwind direction
#vectorized over velocity space
for ii in range(2, prm.nx+3):
#1st order linear upwind flux
#ss = np.ones(prm.nvfull)*ii - fa
#iss = ss.astype(int) # index array needs to be type casted into int before we can use it
#flux[:, ii] = aa[:] * np.diag(ff[:, iss, kk])
#second order conservative scheme
#flux[:, ii] = aa * ( ff[:, ii+1, kk] + ff[:, ii, kk] )*0.5 \
# - aa[:]*aa * ( ff[:, ii+1, kk] - ff[:, ii, kk] )*0.5
#4th order conservative
flux[:, ii] = aa[:] * (-ff[:,ii+2,kk]+ 7.0*ff[:,ii+1,kk]+ 7.0*ff[:,ii,kk]-ff[:,ii-1,kk])/12.0 \
+ aa[:]**2 * ( ff[:,ii+2,kk]-15.0*ff[:,ii+1,kk]+15.0*ff[:,ii,kk]-ff[:,ii-1,kk])/24.0 \
+ aa[:]**3 * ( ff[:,ii+2,kk]- ff[:,ii+1,kk]- ff[:,ii,kk]+ff[:,ii-1,kk])/12.0 \
+ aa[:]**4 * (-ff[:,ii+2,kk]+ 3.0*ff[:,ii+1,kk]- 3.0*ff[:,ii,kk]+ff[:,ii-1,kk])/24.0
#add flux as f_i^t+dt = f_i^t - (U_i+1/2 - U_i-1/2)
ff[:, prm.xmid, kk] -= (flux[:, prm.xmid] - flux[:, prm.xmid-1])
#numerical flux integration over velocity, i.e., U = int q*U(u) du/gamma
gamma = np.sqrt( 1.0 + (prm.m[kk] * ux[:,kk])**2 )
ajxs[prm.xmid, kk] = prm.q[kk] * prm.du[kk] * np.sum( flux[:, prm.xmid]/gamma[:,np.newaxis], 0)
#gamma = 1.0
#ajxs[prm.xmid, kk] = prm.q[kk] * prm.du[kk] * np.sum( flux[:, prm.xmid]/gamma, 0)
if prm.periodic:
#wrap boundaries
ff[:, prm.xLb, :] = ff[:, prm.xRe, :]
ff[:, prm.xRb, :] = ff[:, prm.xLe, :]
if prm.finite:
bvalL = ff[:, prm.xLb[-1]+1, :]
for ir in prm.xLb:
ff[:, ir, :] = bvalL
bvalR = ff[:, prm.xRb[0]-1, :]
for ir in prm.xRb:
ff[:, ir, :] = bvalR
#limit flux
np.clip(ff, 0.0, None, out=ff)
#reduce flux
ajx[:] = np.sum( ajxs, 1 )
if prm.periodic:
ajx = wrap(ajx, prm)
if prm.finite:
ajx = damp(ajx, prm)
return ff, ajx
def velocity(f, ex, prm):
#interpolate half-integer staggered Ex to full integer grid fex
fex = np.zeros(prm.nxfull)
for ii in prm.xmid:
fex[ii] = (ex[ii] + ex[ii-1])/2.0
if prm.periodic:
fex = wrap(fex, prm)
if prm.finite:
fex = damp(fex, prm)
flux = np.zeros( (prm.nvfull, prm.nxfull) )
jj = np.arange(2,prm.nv+3)
for kk in range(prm.ns):
aa = fex[:] * prm.qm[kk] * prm.dt/prm.du[kk] #shift in units of phase space cells
#1st order linear upwind scheme
#fa = np.floor(aa).astype(int) #upwind direction
#for ii in prm.xfull:
#for ii in range(2, prm.nx+3):
# js = jj - fa[ii]
# flux[jj, ii] = aa[ii] * ff[js, ii, kk]
#2nd order conservative
#flux[jj, :] = aa[:] * ( ff[jj+1, :, kk] + ff[jj, :, kk] )*0.5 \
# - aa[:]*aa[:] * ( ff[jj+1, :, kk] - ff[jj, :, kk] )*0.5
#4th order conservative
flux[jj, :] = aa[:] * (-ff[jj+2,:,kk]+ 7.0*ff[jj+1,:,kk]+ 7.0*ff[jj,:,kk]-ff[jj-1,:,kk])/12.0 \
+ aa[:]**2 * ( ff[jj+2,:,kk]-15.0*ff[jj+1,:,kk]+15.0*ff[jj,:,kk]-ff[jj-1,:,kk])/24.0 \
+ aa[:]**3 * ( ff[jj+2,:,kk]- ff[jj+1,:,kk]- ff[jj,:,kk]+ff[jj-1,:,kk])/12.0 \
+ aa[:]**4 * (-ff[jj+2,:,kk]+ 3.0*ff[jj+1,:,kk]- 3.0*ff[jj,:,kk]+ff[jj-1,:,kk])/24.0
#add flux as f_i^t+dt = f_i^t - (U_i+1/2 - U_i-1/2)
ff[1:prm.nv+5, :, kk] -= ( flux[1:prm.nv+5, :] - flux[0:prm.nv+4, :] )
#limit flux
np.clip(ff, 0.0, None, out=ff)
return ff
def efield(ex, ajx, prm):
#remove mean
ajx -= np.mean( ajx[prm.xmid] )
#amperes law E_n+1 = E_n - J
ex[prm.xmid] = ex[prm.xmid] - 4.0 * np.pi * ajx[prm.xmid] ## 4\pi or epsilon factor?
if prm.periodic:
ex = wrap(ex, prm)
if prm.finite:
damp(ex, prm)
np.clip(ex, -100.0, 100.0, out=ex)
return ex
#def el_evolve(ffi, vxi, fp, prm):
#ffi electron distribution
#vxi velocity grid of electrons
#full photon distribution f(z, phi)
# do nothing
#return ffi
#def ph_evolve(ffi, vxi, fp, px, prm):
#
# wc = 1.0e0 #cyclotron frequency
# x = px / wc #dimensionless energy
#
# #mean velocity
# rho = np.trapz(ffi, x=vxi)
# g = np.mean(np.abs( ffi ))
# #rho = 1.0
# fp = g**4.0 * rho*(4.0/3.0)*x**(1.0/3.0) *np.exp(-x)
# return fp
def radiative_reactions(ux, ff, px, fp, prm):
#erase old stuff; i.e., do not accumulate
fp[:,:,:] = 0.0
#loop over spatial cells
for ix in prm.xfull:
iss = 0
#radiation reactions
for dirs in range(2):
if dirs == 0:
vpos = ux[:,iss] > 0 #+x going particles
elif dirs == 1:
vpos = ux[:,iss] < 0 #-x going particles
for kk in range(prm.ns):
ffi = ff[vpos, ix, kk] #slice correct velocities
vxi = np.abs( ux[vpos, iss] )
if dirs == 1:
vxi = np.flipud( vxi )
ffi = np.flipud( ffi )
#fp[dirs, :, ix] += radiate( ffi, vxi, fp[dirs, :, ix], px, prm) #evolve photons
#compute radiation per bin
for i, ivx in enumerate( vxi ):
#if ivx > 1.0e-2:
gamma = np.sqrt(1.0 + ivx**2)
S = CSapprox(px, gamma) #spectrum from one bundle of electrons with velocity of gamma
#normalize
S *= ffi[i] * prm.tau/(prm.sigma_T * prm.R) * 3.0e10 * 1.0e9
fp[dirs, :, ix] += S
return ff, fp
def radiate(ffi, vxi, fp, px, prm):
#time scaling
dt_rad = prm.dt/1.0e2
#number density scaling
ffi *= prm.tau/(prm.sigma_T * prm.R)
fp = ph_evolve(ffi, vxi, fp, px, dt_rad)
return fp
def collisions(ux, ff, px, fp, prm):
#erase old stuff; i.e., do not accumulate
fp[:,:,:] = 0.0
#loop over spatial cells
for ix in prm.xfull:
iss = 0
#radiation reactions
for dirs in range(2):
if dirs == 0:
vpos = ux[:,iss] > 0 #+x going particles
elif dirs == 1:
vpos = ux[:,iss] < 0 #-x going particles
for kk in range(prm.ns):
ffi = ff[vpos, ix, kk] #slice correct velocities
vxi = np.abs( ux[vpos, iss] )
if dirs == 1:
vxi = np.flipud( vxi )
ffi = np.flipud( ffi )
fp[dirs, :, ix] += radiate( ffi, vxi, fp[dirs, :, ix], px, prm) #evolve photons
print np.max( fp[dirs, :, ix] )
return ff, fp
def inject(ff, prm):
vd = -5.0
vt = 0.1
amp = 0.001
for kk in range(prm.ns):
ii = prm.xmid[-1]
ux = np.linspace(prm.vmin[kk], prm.vmax[kk], prm.nvfull)
for jj in range(prm.nvfull):
ff[jj, ii, kk] += amp * np.exp( -(ux[jj] - vd)**2/(2.0*vt)**2)
return ff
#initialize
#--------------------------------------------------
#load configuration
#ff, ex, ajx, xx, ux, px, fp = initial(prm)
ff, ex, ajx, xx, ux, px, fp = initial_test(prm)
#initial step
rho = charge(ff, ux, prm)
ex = poisson(ex, rho, prm)
#ff, fp = collisions(ux, ff, px, fp, prm)
#ff, fp = radiative_reactions(ux, ff, px, fp, prm)
# calculate plasma frequency for every species and every coordinate cell
for kk in range(prm.ns):
print"q = {}, m = {}".format(prm.q[kk],prm.m[kk])
for ii in prm.xfull:
rhos = prm.q[kk] * prm.du[kk] * np.sum( ff[:, ii, kk] )
wpe_calc = np.sqrt( 4.0 * np.pi * prm.q[kk] * rhos / prm.m[kk])
#print wpe_calc
#print ff[:,37,kk]
ff, ajx = position(ff, ux, ajx, prm)
ex = efield(ex, ajx, prm)
#sys.exit()
#--------------------------------------------------
# main loop
visz = visualize("out", xx, ux, px)
visz.plot(0, ff, ex, ajx, rho, fp) #plot once to create figures
simulation = np.zeros( (prm.nx, prm.ntime+1, 1) )
#--------------------------------------------------
#Save to file
import h5py
f = h5py.File("out/run.hdf5", "w")
grp0 = f.create_group("params")
grp0.attrs['dx'] = prm.dx
grp0.attrs['dt'] = prm.dt
grp0.attrs['nx'] = prm.nx
grp0.attrs['nv'] = prm.nv
grp0.attrs['ns'] = prm.ns
grp0.attrs['ntime'] = prm.ntime
grp = f.create_group("fields")
dset_ex = grp.create_dataset("Ex", (prm.nx, prm.ntime+1), dtype='f')
timer = Timer(["total", "lap"])
timer.start("total")
jtime = 0
time = 0.0
for jtime in range(prm.ntime+1):
if (jtime % 20 == 0):
#ff, fp = radiative_reactions(ux, ff, px, fp, prm)
print"{} {}".format(time, np.amax( np.absolute(ex) ))
# print "-----------", jtime, "/", time, "----------"
# timer.stats("lap")
visz.plot(jtime, ff, ex, ajx, rho, fp)
# timer.start("lap")
#ff = inject(ff, prm)
ff = velocity(ff, ex, prm)
#ff, fp = collisions(ux, ff, px, fp, prm)
#ff, fp = radiative_reactions(ux, ff, px, fp, prm)
ff, ajx = position(ff, ux, ajx, prm)
rho = charge(ff, ux, prm)
ex = efield(ex, ajx, prm)
#ex = poisson(ex, rho, prm)
time += prm.dt
#simulation[:, jtime, 0] = ex[prm.xmid]
dset_ex[:,jtime] = ex[prm.xmid]
timer.lap("lap")
timer.stop("total")
timer.stats("total")
| en | 0.5082 | #set up figure #charge density #integrate over the velocity distribution # qm = q/m, i.e. charge-to-mass ratio #gamma = 1.0 # charge density for certain species #sum over species #left halo to right edge #right halo to left edge #left halo #right halo # XXX why remove the mean? # Gaussian units, c=1 #damp E field # advection of the distribution function #compute shift in units of cells # upwind direction #vectorized over velocity space #1st order linear upwind flux #ss = np.ones(prm.nvfull)*ii - fa #iss = ss.astype(int) # index array needs to be type casted into int before we can use it #flux[:, ii] = aa[:] * np.diag(ff[:, iss, kk]) #second order conservative scheme #flux[:, ii] = aa * ( ff[:, ii+1, kk] + ff[:, ii, kk] )*0.5 \ # - aa[:]*aa * ( ff[:, ii+1, kk] - ff[:, ii, kk] )*0.5 #4th order conservative #add flux as f_i^t+dt = f_i^t - (U_i+1/2 - U_i-1/2) #numerical flux integration over velocity, i.e., U = int q*U(u) du/gamma #gamma = 1.0 #ajxs[prm.xmid, kk] = prm.q[kk] * prm.du[kk] * np.sum( flux[:, prm.xmid]/gamma, 0) #wrap boundaries #limit flux #reduce flux #interpolate half-integer staggered Ex to full integer grid fex #shift in units of phase space cells #1st order linear upwind scheme #fa = np.floor(aa).astype(int) #upwind direction #for ii in prm.xfull: #for ii in range(2, prm.nx+3): # js = jj - fa[ii] # flux[jj, ii] = aa[ii] * ff[js, ii, kk] #2nd order conservative #flux[jj, :] = aa[:] * ( ff[jj+1, :, kk] + ff[jj, :, kk] )*0.5 \ # - aa[:]*aa[:] * ( ff[jj+1, :, kk] - ff[jj, :, kk] )*0.5 #4th order conservative #add flux as f_i^t+dt = f_i^t - (U_i+1/2 - U_i-1/2) #limit flux #remove mean #amperes law E_n+1 = E_n - J ## 4\pi or epsilon factor? #def el_evolve(ffi, vxi, fp, prm): #ffi electron distribution #vxi velocity grid of electrons #full photon distribution f(z, phi) # do nothing #return ffi #def ph_evolve(ffi, vxi, fp, px, prm): # # wc = 1.0e0 #cyclotron frequency # x = px / wc #dimensionless energy # # #mean velocity # rho = np.trapz(ffi, x=vxi) # g = np.mean(np.abs( ffi )) # #rho = 1.0 # fp = g**4.0 * rho*(4.0/3.0)*x**(1.0/3.0) *np.exp(-x) # return fp #erase old stuff; i.e., do not accumulate #loop over spatial cells #radiation reactions #+x going particles #-x going particles #slice correct velocities #fp[dirs, :, ix] += radiate( ffi, vxi, fp[dirs, :, ix], px, prm) #evolve photons #compute radiation per bin #if ivx > 1.0e-2: #spectrum from one bundle of electrons with velocity of gamma #normalize #time scaling #number density scaling #erase old stuff; i.e., do not accumulate #loop over spatial cells #radiation reactions #+x going particles #-x going particles #slice correct velocities #evolve photons #initialize #-------------------------------------------------- #load configuration #ff, ex, ajx, xx, ux, px, fp = initial(prm) #initial step #ff, fp = collisions(ux, ff, px, fp, prm) #ff, fp = radiative_reactions(ux, ff, px, fp, prm) # calculate plasma frequency for every species and every coordinate cell #print wpe_calc #print ff[:,37,kk] #sys.exit() #-------------------------------------------------- # main loop #plot once to create figures #-------------------------------------------------- #Save to file #ff, fp = radiative_reactions(ux, ff, px, fp, prm) # print "-----------", jtime, "/", time, "----------" # timer.stats("lap") # timer.start("lap") #ff = inject(ff, prm) #ff, fp = collisions(ux, ff, px, fp, prm) #ff, fp = radiative_reactions(ux, ff, px, fp, prm) #ex = poisson(ex, rho, prm) #simulation[:, jtime, 0] = ex[prm.xmid] | 2.146362 | 2 |
debugged/bandsite/utils.py | bhrutledge/debugged-django | 0 | 6612942 | <filename>debugged/bandsite/utils.py
from django.core.mail import send_mail
from django.conf import settings
from debugged.bandsite.settings import *
def process_contact(form):
subject = form.cleaned_data['subject']
sender = '"%s" <%s>' % (form.cleaned_data['sender_name'], form.cleaned_data['sender_email'])
message = form.cleaned_data['message']
recipient = None
for contact in CONTACT_EMAILS:
if subject == contact['subject']:
recipient = contact['email']
break
if recipient:
subject = settings.EMAIL_SUBJECT_PREFIX + subject
send_mail(subject, message, sender, [recipient], fail_silently=False)
admin = settings.ADMINS[0][1]
send_mail(subject, message, sender, [admin], fail_silently=False)
def process_mailing_list(form):
sender = form.cleaned_data['sender_email']
send_mail("subscribe", "subscribe", sender, [LIST_EMAIL], fail_silently=False)
| <filename>debugged/bandsite/utils.py
from django.core.mail import send_mail
from django.conf import settings
from debugged.bandsite.settings import *
def process_contact(form):
subject = form.cleaned_data['subject']
sender = '"%s" <%s>' % (form.cleaned_data['sender_name'], form.cleaned_data['sender_email'])
message = form.cleaned_data['message']
recipient = None
for contact in CONTACT_EMAILS:
if subject == contact['subject']:
recipient = contact['email']
break
if recipient:
subject = settings.EMAIL_SUBJECT_PREFIX + subject
send_mail(subject, message, sender, [recipient], fail_silently=False)
admin = settings.ADMINS[0][1]
send_mail(subject, message, sender, [admin], fail_silently=False)
def process_mailing_list(form):
sender = form.cleaned_data['sender_email']
send_mail("subscribe", "subscribe", sender, [LIST_EMAIL], fail_silently=False)
| none | 1 | 2.088135 | 2 | |
src/pathme/kegg/cli.py | brucetony/PathMe | 12 | 6612943 | # -*- coding: utf-8 -*-
"""Command line interface for KEGG that can be run with ``python -m pathme.kegg``."""
import logging
import os
import time
import click
from tqdm import tqdm
from bio2bel_chebi import Manager as ChebiManager
from bio2bel_hgnc import Manager as HgncManager
from pybel import from_pickle
from .convert_to_bel import kegg_to_pickles
from .utils import download_kgml_files, get_kegg_pathway_ids
from ..constants import KEGG_BEL, KEGG_FILES
from ..export_utils import get_paths_in_folder
from ..utils import summarize_helper
logger = logging.getLogger(__name__)
__all__ = [
'main',
]
@click.group()
def main():
"""Manage KEGG."""
@main.command(help='Downloads KEGG files')
@click.option('-c', '--connection', help=f"Defaults to {KEGG_FILES}")
def download(connection):
"""Download KEGG KGML."""
kegg_ids = get_kegg_pathway_ids(connection=connection)
if click.confirm(
'You are about to download KGML files from KEGG.\n'
'Please make sure you have read KEGG license (see: https://www.kegg.jp/kegg/rest/).'
' These files cannot be distributed and their use must be exclusively with academic purposes.\n'
'We (PathMe developers) are not responsible for the end use of this data.\n',
):
click.echo('You have read and accepted the conditions stated above.\n')
download_kgml_files(kegg_ids)
@main.command()
@click.option('-f', '--flatten', is_flag=True, default=False)
@click.option('-e', '--export-folder', default=KEGG_BEL, show_default=True)
@click.option('-v', '--debug', is_flag=True, default=False, help='Debug mode')
def bel(flatten, export_folder, debug):
"""Convert KEGG to BEL."""
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
logger.setLevel(logging.INFO)
if debug:
click.echo("Debug mode on")
logger.setLevel(logging.DEBUG)
t = time.time()
logger.info('Initiating HGNC Manager')
hgnc_manager = HgncManager()
if not hgnc_manager.is_populated():
click.echo('bio2bel_hgnc was not populated. Populating now.')
hgnc_manager.populate()
logger.info('Initiating ChEBI Manager')
chebi_manager = ChebiManager()
if not chebi_manager.is_populated():
click.echo('bio2bel_chebi was not populated. Populating now.')
chebi_manager.populate()
if flatten:
logger.info('Flattening mode activated')
resource_paths = [
path
for path in get_paths_in_folder(KEGG_FILES)
]
kegg_to_pickles(
resource_files=resource_paths,
resource_folder=KEGG_FILES,
hgnc_manager=hgnc_manager,
chebi_manager=chebi_manager,
flatten=flatten,
export_folder=export_folder,
)
logger.info('KEGG exported in %.2f seconds', time.time() - t)
@main.command()
@click.option('-e', '--export-folder', default=KEGG_BEL, show_default=True)
def summarize(export_folder):
"""Summarize the KEGG export."""
click.echo('loading KEGG graphs')
graphs = [
from_pickle(os.path.join(export_folder, fname))
for fname in tqdm(get_paths_in_folder(export_folder))
]
if graphs:
summarize_helper(graphs)
else:
click.echo("Please export KEGG to BEL first. Run 'python3 -m pathme kegg bel' ")
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
"""Command line interface for KEGG that can be run with ``python -m pathme.kegg``."""
import logging
import os
import time
import click
from tqdm import tqdm
from bio2bel_chebi import Manager as ChebiManager
from bio2bel_hgnc import Manager as HgncManager
from pybel import from_pickle
from .convert_to_bel import kegg_to_pickles
from .utils import download_kgml_files, get_kegg_pathway_ids
from ..constants import KEGG_BEL, KEGG_FILES
from ..export_utils import get_paths_in_folder
from ..utils import summarize_helper
logger = logging.getLogger(__name__)
__all__ = [
'main',
]
@click.group()
def main():
"""Manage KEGG."""
@main.command(help='Downloads KEGG files')
@click.option('-c', '--connection', help=f"Defaults to {KEGG_FILES}")
def download(connection):
"""Download KEGG KGML."""
kegg_ids = get_kegg_pathway_ids(connection=connection)
if click.confirm(
'You are about to download KGML files from KEGG.\n'
'Please make sure you have read KEGG license (see: https://www.kegg.jp/kegg/rest/).'
' These files cannot be distributed and their use must be exclusively with academic purposes.\n'
'We (PathMe developers) are not responsible for the end use of this data.\n',
):
click.echo('You have read and accepted the conditions stated above.\n')
download_kgml_files(kegg_ids)
@main.command()
@click.option('-f', '--flatten', is_flag=True, default=False)
@click.option('-e', '--export-folder', default=KEGG_BEL, show_default=True)
@click.option('-v', '--debug', is_flag=True, default=False, help='Debug mode')
def bel(flatten, export_folder, debug):
"""Convert KEGG to BEL."""
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
logger.setLevel(logging.INFO)
if debug:
click.echo("Debug mode on")
logger.setLevel(logging.DEBUG)
t = time.time()
logger.info('Initiating HGNC Manager')
hgnc_manager = HgncManager()
if not hgnc_manager.is_populated():
click.echo('bio2bel_hgnc was not populated. Populating now.')
hgnc_manager.populate()
logger.info('Initiating ChEBI Manager')
chebi_manager = ChebiManager()
if not chebi_manager.is_populated():
click.echo('bio2bel_chebi was not populated. Populating now.')
chebi_manager.populate()
if flatten:
logger.info('Flattening mode activated')
resource_paths = [
path
for path in get_paths_in_folder(KEGG_FILES)
]
kegg_to_pickles(
resource_files=resource_paths,
resource_folder=KEGG_FILES,
hgnc_manager=hgnc_manager,
chebi_manager=chebi_manager,
flatten=flatten,
export_folder=export_folder,
)
logger.info('KEGG exported in %.2f seconds', time.time() - t)
@main.command()
@click.option('-e', '--export-folder', default=KEGG_BEL, show_default=True)
def summarize(export_folder):
"""Summarize the KEGG export."""
click.echo('loading KEGG graphs')
graphs = [
from_pickle(os.path.join(export_folder, fname))
for fname in tqdm(get_paths_in_folder(export_folder))
]
if graphs:
summarize_helper(graphs)
else:
click.echo("Please export KEGG to BEL first. Run 'python3 -m pathme kegg bel' ")
if __name__ == '__main__':
main()
| en | 0.711525 | # -*- coding: utf-8 -*- Command line interface for KEGG that can be run with ``python -m pathme.kegg``. Manage KEGG. Download KEGG KGML. Convert KEGG to BEL. Summarize the KEGG export. | 2.28525 | 2 |
ARC_face/utils.py | zz00zws/magic_learning | 1 | 6612944 | <reponame>zz00zws/magic_learning
import torch,os,math
import PIL.Image as pimg
import mtcnn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def iou(x11,xs):
x11=x11.to(device)
xs=xs.to(device)
a_x1=(x11[:,3]-x11[:,1])*(x11[:,4]-x11[:,2])
a_x=(xs[:,3]-xs[:,1])*(xs[:,4]-xs[:,2])
x1=torch.max(x11[:,1],xs[:,1])
y1=torch.max(x11[:,2],xs[:,2])
x2=torch.min(x11[:,3],xs[:,3])
y2=torch.min(x11[:,4],xs[:,4])
w=torch.max(torch.tensor([0]).float().to(device),x2-x1).to(device)
h=torch.max(torch.tensor([0]).float().to(device),y2-y1).to(device)
s=w*h/(a_x+a_x1-w*h)
return s
def iou_m(x11,xs):
x11=x11.to(device)
xs=xs.to(device)
a_x1=(x11[:,3]-x11[:,1])*(x11[:,4]-x11[:,2])
a_x=(xs[:,3]-xs[:,1])*(xs[:,4]-xs[:,2])
x1=torch.max(x11[:,1],xs[:,1])
y1=torch.max(x11[:,2],xs[:,2])
x2=torch.min(x11[:,3],xs[:,3])
y2=torch.min(x11[:,4],xs[:,4])
w=torch.max(torch.tensor([0]).to(device).float(),x2-x1).to(device)
h=torch.max(torch.tensor([0]).to(device).float(),y2-y1).to(device)
s=w*h/torch.min(a_x,a_x1)
return s
def nms(boxes,size,thresh=0.3,isMin=False):
if boxes.shape[0] == 0:
return torch.tensor([])
asd,boxx=(-boxes[:,0]).sort(0)
_boxes = boxes[boxx].to(device)
r_boxes = torch.tensor([]).view(-1,size).to(device)
while _boxes.shape[0] >1:
a = _boxes[0].view(-1,size)
b = _boxes[1:].view(-1,size)
r_boxes = torch.cat((r_boxes,a),0)
if isMin:
_boxes = b[iou_m(a[:,:5],b[:,:5]) < thresh]
else:
_boxes = b[iou(a[:,:5],b[:,:5]) < thresh]
if _boxes.shape[0] >0:
r_boxes = torch.cat((r_boxes,_boxes[0].view(-1,size)),0)
return r_boxes
def crop(img,oms):
cx=(oms[:,3]+oms[:,1])/2
cy=(oms[:,4]+oms[:,2])/2
l=torch.max(oms[:,3]-oms[:,1],oms[:,4]-oms[:,2])/2
px1=oms[:,5]
py1=oms[:,6]
px2=oms[:,7]
py2=oms[:,8]
px4=oms[:,11]
py4=oms[:,12]
px5=oms[:,13]
py5=oms[:,14]
ux=px2-px1
uy=py2-py1
dx=px5-px4
dy=py5-py4
cos=(uy/ux+dy/dx)/2
kks=[]
for i in range(l.size(0)):
# print(int(1.42*l[i].item()))
im=img.crop((cx[i].item()-int(1.42*l[i].item()),cy[i].item()-int(1.42*l[i].item()),
cx[i].item()+int(1.42*l[i].item()),cy[i].item()+int(1.42*l[i].item())))
theta=math.acos(cos[i].item())
im=im.rotate(-theta)
x,y=im.size
im=im.crop((int(0.28*x),int(0.28*y),int(0.72*x),int(0.72*y)))
# im.save('./img_out/'+str(i)+str(theta)+'.jpg')
kks.append(im)
return kks
if __name__ == '__main__':
path='./img'
for i in os.listdir(path):
img=pimg.open(os.path.join(path,i))
oms=mtcnn.test_all(img)
crop(img,oms)
| import torch,os,math
import PIL.Image as pimg
import mtcnn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def iou(x11,xs):
x11=x11.to(device)
xs=xs.to(device)
a_x1=(x11[:,3]-x11[:,1])*(x11[:,4]-x11[:,2])
a_x=(xs[:,3]-xs[:,1])*(xs[:,4]-xs[:,2])
x1=torch.max(x11[:,1],xs[:,1])
y1=torch.max(x11[:,2],xs[:,2])
x2=torch.min(x11[:,3],xs[:,3])
y2=torch.min(x11[:,4],xs[:,4])
w=torch.max(torch.tensor([0]).float().to(device),x2-x1).to(device)
h=torch.max(torch.tensor([0]).float().to(device),y2-y1).to(device)
s=w*h/(a_x+a_x1-w*h)
return s
def iou_m(x11,xs):
x11=x11.to(device)
xs=xs.to(device)
a_x1=(x11[:,3]-x11[:,1])*(x11[:,4]-x11[:,2])
a_x=(xs[:,3]-xs[:,1])*(xs[:,4]-xs[:,2])
x1=torch.max(x11[:,1],xs[:,1])
y1=torch.max(x11[:,2],xs[:,2])
x2=torch.min(x11[:,3],xs[:,3])
y2=torch.min(x11[:,4],xs[:,4])
w=torch.max(torch.tensor([0]).to(device).float(),x2-x1).to(device)
h=torch.max(torch.tensor([0]).to(device).float(),y2-y1).to(device)
s=w*h/torch.min(a_x,a_x1)
return s
def nms(boxes,size,thresh=0.3,isMin=False):
if boxes.shape[0] == 0:
return torch.tensor([])
asd,boxx=(-boxes[:,0]).sort(0)
_boxes = boxes[boxx].to(device)
r_boxes = torch.tensor([]).view(-1,size).to(device)
while _boxes.shape[0] >1:
a = _boxes[0].view(-1,size)
b = _boxes[1:].view(-1,size)
r_boxes = torch.cat((r_boxes,a),0)
if isMin:
_boxes = b[iou_m(a[:,:5],b[:,:5]) < thresh]
else:
_boxes = b[iou(a[:,:5],b[:,:5]) < thresh]
if _boxes.shape[0] >0:
r_boxes = torch.cat((r_boxes,_boxes[0].view(-1,size)),0)
return r_boxes
def crop(img,oms):
cx=(oms[:,3]+oms[:,1])/2
cy=(oms[:,4]+oms[:,2])/2
l=torch.max(oms[:,3]-oms[:,1],oms[:,4]-oms[:,2])/2
px1=oms[:,5]
py1=oms[:,6]
px2=oms[:,7]
py2=oms[:,8]
px4=oms[:,11]
py4=oms[:,12]
px5=oms[:,13]
py5=oms[:,14]
ux=px2-px1
uy=py2-py1
dx=px5-px4
dy=py5-py4
cos=(uy/ux+dy/dx)/2
kks=[]
for i in range(l.size(0)):
# print(int(1.42*l[i].item()))
im=img.crop((cx[i].item()-int(1.42*l[i].item()),cy[i].item()-int(1.42*l[i].item()),
cx[i].item()+int(1.42*l[i].item()),cy[i].item()+int(1.42*l[i].item())))
theta=math.acos(cos[i].item())
im=im.rotate(-theta)
x,y=im.size
im=im.crop((int(0.28*x),int(0.28*y),int(0.72*x),int(0.72*y)))
# im.save('./img_out/'+str(i)+str(theta)+'.jpg')
kks.append(im)
return kks
if __name__ == '__main__':
path='./img'
for i in os.listdir(path):
img=pimg.open(os.path.join(path,i))
oms=mtcnn.test_all(img)
crop(img,oms) | en | 0.169153 | # print(int(1.42*l[i].item())) # im.save('./img_out/'+str(i)+str(theta)+'.jpg') | 2.223226 | 2 |
chat/views.py | SungHwanKaist/VoiceChat | 0 | 6612945 | <gh_stars>0
from django.db import transaction
from django.shortcuts import render, redirect, render_to_response, get_object_or_404
import haikunator
from .models import Room
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
import uuid
import random
def about(request):
context = RequestContext(request)
room_list = []
for room in Room.objects.all():
if room.number < 2:
room_list.append(room)
return render(request, "chat/about.html", {
'room_list': room_list,
})
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label, number=0)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
context = RequestContext(request)
room, created = Room.objects.get_or_create(label=label, number=0)
if room.chat_status == "Waiting":
return render_to_response('')
elif room.chat_status == "Initialize":
return render_to_response('')
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-timestamp')[:50])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
})
def update_status(request):
label = request.POST['label']
status = request.POST['status']
room = Room.objects.all().get(label = label)
room.chat_status = status
room.save()
return HttpResponse(status)
def end_chat(label):
room = Room.objects.all().get(label = label)
room.chat_status = "Terminated"
room.save()
return redirect('/')
| from django.db import transaction
from django.shortcuts import render, redirect, render_to_response, get_object_or_404
import haikunator
from .models import Room
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
import uuid
import random
def about(request):
context = RequestContext(request)
room_list = []
for room in Room.objects.all():
if room.number < 2:
room_list.append(room)
return render(request, "chat/about.html", {
'room_list': room_list,
})
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label, number=0)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
context = RequestContext(request)
room, created = Room.objects.get_or_create(label=label, number=0)
if room.chat_status == "Waiting":
return render_to_response('')
elif room.chat_status == "Initialize":
return render_to_response('')
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-timestamp')[:50])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
})
def update_status(request):
label = request.POST['label']
status = request.POST['status']
room = Room.objects.all().get(label = label)
room.chat_status = status
room.save()
return HttpResponse(status)
def end_chat(label):
room = Room.objects.all().get(label = label)
room.chat_status = "Terminated"
room.save()
return redirect('/') | en | 0.836329 | Randomly create a new room, and redirect to it. Room view - show the room, with latest messages. The template for this view has the WebSocket business to send and stream messages, so see the template for where the magic happens. # If the room with the given label doesn't exist, automatically create it # upon first visit (a la etherpad). # We want to show the last 50 messages, ordered most-recent-last | 2.450384 | 2 |
instruction_set.py | chuckeles/genetic-treasures-python | 1 | 6612946 | import random
def generate(size=64):
""" Generate a new instruction set. """
return [random.randrange(256) for _ in range(size)]
def crossover(parent1, parent2, take_random=False):
""" Cross-over 2 instruction sets. If take_random is False, selects just 1 point and
takes the first part from set1 and the second from set2. If take_random is True,
each instruction is taken from either set randomly. """
if take_random:
indices = [random.choice([1, 2]) for _ in parent1]
child1 = []
child2 = []
for i, index in enumerate(indices):
child1.append(parent1[i] if index == 1 else parent2[i])
child2.append(parent1[i] if index == 2 else parent2[i])
return child1, child2
else:
point = random.randrange(len(parent1))
return parent2[point:] + parent1[:point], parent1[point:] + parent2[:point]
def mutate_bits(inset, mutation_chance=5):
""" Mutate the instruction set by changing 1 bit per instruction. """
def change_bit(byte):
bit = 1 << random.randrange(8)
if random.choice([True, False]):
return byte | bit
else:
return byte & ~bit
return [change_bit(i) if random.randrange(100) < mutation_chance else i for i in inset]
def mutate_bytes(inset, mutation_chance=2):
""" Mutate the instruction set by changing whole bytes. """
return [random.randrange(256) if random.randrange(100) < mutation_chance else i for i in inset]
def mutate_combined(inset, mutation_chance=5):
""" Apply mutation for bits and bytes simultaneously. """
return mutate_bits(mutate_bytes(inset, round(mutation_chance / 4)), mutation_chance)
| import random
def generate(size=64):
""" Generate a new instruction set. """
return [random.randrange(256) for _ in range(size)]
def crossover(parent1, parent2, take_random=False):
""" Cross-over 2 instruction sets. If take_random is False, selects just 1 point and
takes the first part from set1 and the second from set2. If take_random is True,
each instruction is taken from either set randomly. """
if take_random:
indices = [random.choice([1, 2]) for _ in parent1]
child1 = []
child2 = []
for i, index in enumerate(indices):
child1.append(parent1[i] if index == 1 else parent2[i])
child2.append(parent1[i] if index == 2 else parent2[i])
return child1, child2
else:
point = random.randrange(len(parent1))
return parent2[point:] + parent1[:point], parent1[point:] + parent2[:point]
def mutate_bits(inset, mutation_chance=5):
""" Mutate the instruction set by changing 1 bit per instruction. """
def change_bit(byte):
bit = 1 << random.randrange(8)
if random.choice([True, False]):
return byte | bit
else:
return byte & ~bit
return [change_bit(i) if random.randrange(100) < mutation_chance else i for i in inset]
def mutate_bytes(inset, mutation_chance=2):
""" Mutate the instruction set by changing whole bytes. """
return [random.randrange(256) if random.randrange(100) < mutation_chance else i for i in inset]
def mutate_combined(inset, mutation_chance=5):
""" Apply mutation for bits and bytes simultaneously. """
return mutate_bits(mutate_bytes(inset, round(mutation_chance / 4)), mutation_chance)
| en | 0.892442 | Generate a new instruction set. Cross-over 2 instruction sets. If take_random is False, selects just 1 point and takes the first part from set1 and the second from set2. If take_random is True, each instruction is taken from either set randomly. Mutate the instruction set by changing 1 bit per instruction. Mutate the instruction set by changing whole bytes. Apply mutation for bits and bytes simultaneously. | 3.372969 | 3 |
UrlConverter.py | gh640/SublimeUrlConverter | 3 | 6612947 | <reponame>gh640/SublimeUrlConverter<filename>UrlConverter.py
# coding: utf-8
"""Converts selected URLs to links with fetched page titles.
"""
import html
import logging
from concurrent.futures import TimeoutError, ThreadPoolExecutor, as_completed
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
import sublime
import sublime_plugin
__version__ = '0.5.0'
__author__ = "<NAME>"
__copyright__ = 'Copyright 2021, <NAME>'
__license__ = 'MIT'
logger = logging.getLogger('UrlConverter')
SETTINGS_NAME = 'UrlConverter.sublime-settings'
class TitleFetcher:
"""Webpage title fetcher with multithreading."""
def fetch(self, urls):
settings = sublime.load_settings(SETTINGS_NAME)
timeout = settings.get('timeout', 10)
if type(timeout) not in (int, float):
logger.error('`timeout` must be an int or float.')
return {}
results = []
with ThreadPoolExecutor(max_workers=10) as executor:
futures = (executor.submit(self.fetch_title, url) for url in urls)
try:
results.extend(
f.result() for f in as_completed(futures, timeout=timeout)
)
except TimeoutError as e:
logger.error('Page title fetching timed out.')
return {}
return dict(results)
@staticmethod
def fetch_title(url):
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.head.title.text.strip()
except Exception as e:
title = False
logger.error('Failed to fetch an HTML title of a URL: {}.'.format(str(e)))
return (url, title)
class BaseUrlConverter:
"""Common abstract url converter."""
REPL_TEMPLATE = ''
def run(self, edit):
region_and_urls = self.get_selected_urls()
region_and_repls = self.prepare_region_and_repls(region_and_urls)
self.replace_regions(edit, region_and_repls)
sublime.status_message('UrlConverter: urls are converted successfully.')
def prepare_region_and_repls(self, region_and_urls):
urls = self.extract_unique_urls(region_and_urls)
url_titles_dict = self.fetch_titles(urls)
return self.combine_region_links(region_and_urls, url_titles_dict)
def get_selected_urls(self):
region_and_urls = []
for region in self.view.sel():
url = self.view.substr(region).strip()
parsed = urlparse(url)
if parsed.scheme not in ("http", "https"):
continue
region_and_urls.append((region, url))
return region_and_urls
def extract_unique_urls(self, region_and_urls):
return set(url for region, url in region_and_urls)
def fetch_titles(self, urls):
fetcher = TitleFetcher()
return fetcher.fetch(urls)
def combine_region_links(self, region_and_urls, url_titles_dict):
region_and_repls = []
for region, url in region_and_urls:
if url_titles_dict.get(url):
repl = self.REPL_TEMPLATE.format(url=url, title=url_titles_dict[url])
region_and_repls.append((region, repl))
return region_and_repls
def replace_regions(self, edit, region_and_repls):
# Replace regions from the last to avoid misselection.
for region, repl in sorted(region_and_repls, key=lambda x: x[0], reverse=True):
if repl:
self.view.replace(edit, region, repl)
class UrlConverterConvertToHtml(BaseUrlConverter, sublime_plugin.TextCommand):
"""Html url converter command."""
REPL_TEMPLATE = '<a href="{url}">{title}</a>'
def combine_region_links(self, region_and_urls, url_titles_dict):
"""Override to escape the url in html `href`."""
region_and_repls = []
for region, url in region_and_urls:
if url_titles_dict.get(url):
repl = self.REPL_TEMPLATE.format(
url=html.escape(url), title=url_titles_dict[url]
)
region_and_repls.append((region, repl))
return region_and_repls
class UrlConverterConvertToMarkdown(BaseUrlConverter, sublime_plugin.TextCommand):
"""Markdown url converter command."""
REPL_TEMPLATE = '[{title}]({url})'
class UrlConverterConvertToRestructuredtext(
BaseUrlConverter, sublime_plugin.TextCommand
):
"""RestructuredText url converter command."""
REPL_TEMPLATE = '`{title} <{url}>`_'
class UrlConverterConvertToPath(BaseUrlConverter, sublime_plugin.TextCommand):
"""Path url converter command."""
def prepare_region_and_repls(self, region_and_urls):
converter = self.extract_path_of_url
return ((region, converter(url)) for region, url in region_and_urls)
def extract_path_of_url(self, url):
parsed = urlparse(url)
return ''.join(parsed[2:])
class UrlConverterConvertToCustom(BaseUrlConverter, sublime_plugin.TextCommand):
"""Custom-format url converter command."""
def run(self, edit, template=None):
if template:
self.REPL_TEMPLATE = template
else:
settings = sublime.load_settings(SETTINGS_NAME)
self.REPL_TEMPLATE = settings.get('fallback_template', '{title}\n{url}')
super().run(edit)
| # coding: utf-8
"""Converts selected URLs to links with fetched page titles.
"""
import html
import logging
from concurrent.futures import TimeoutError, ThreadPoolExecutor, as_completed
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
import sublime
import sublime_plugin
__version__ = '0.5.0'
__author__ = "<NAME>"
__copyright__ = 'Copyright 2021, <NAME>'
__license__ = 'MIT'
logger = logging.getLogger('UrlConverter')
SETTINGS_NAME = 'UrlConverter.sublime-settings'
class TitleFetcher:
"""Webpage title fetcher with multithreading."""
def fetch(self, urls):
settings = sublime.load_settings(SETTINGS_NAME)
timeout = settings.get('timeout', 10)
if type(timeout) not in (int, float):
logger.error('`timeout` must be an int or float.')
return {}
results = []
with ThreadPoolExecutor(max_workers=10) as executor:
futures = (executor.submit(self.fetch_title, url) for url in urls)
try:
results.extend(
f.result() for f in as_completed(futures, timeout=timeout)
)
except TimeoutError as e:
logger.error('Page title fetching timed out.')
return {}
return dict(results)
@staticmethod
def fetch_title(url):
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.head.title.text.strip()
except Exception as e:
title = False
logger.error('Failed to fetch an HTML title of a URL: {}.'.format(str(e)))
return (url, title)
class BaseUrlConverter:
"""Common abstract url converter."""
REPL_TEMPLATE = ''
def run(self, edit):
region_and_urls = self.get_selected_urls()
region_and_repls = self.prepare_region_and_repls(region_and_urls)
self.replace_regions(edit, region_and_repls)
sublime.status_message('UrlConverter: urls are converted successfully.')
def prepare_region_and_repls(self, region_and_urls):
urls = self.extract_unique_urls(region_and_urls)
url_titles_dict = self.fetch_titles(urls)
return self.combine_region_links(region_and_urls, url_titles_dict)
def get_selected_urls(self):
region_and_urls = []
for region in self.view.sel():
url = self.view.substr(region).strip()
parsed = urlparse(url)
if parsed.scheme not in ("http", "https"):
continue
region_and_urls.append((region, url))
return region_and_urls
def extract_unique_urls(self, region_and_urls):
return set(url for region, url in region_and_urls)
def fetch_titles(self, urls):
fetcher = TitleFetcher()
return fetcher.fetch(urls)
def combine_region_links(self, region_and_urls, url_titles_dict):
region_and_repls = []
for region, url in region_and_urls:
if url_titles_dict.get(url):
repl = self.REPL_TEMPLATE.format(url=url, title=url_titles_dict[url])
region_and_repls.append((region, repl))
return region_and_repls
def replace_regions(self, edit, region_and_repls):
# Replace regions from the last to avoid misselection.
for region, repl in sorted(region_and_repls, key=lambda x: x[0], reverse=True):
if repl:
self.view.replace(edit, region, repl)
class UrlConverterConvertToHtml(BaseUrlConverter, sublime_plugin.TextCommand):
"""Html url converter command."""
REPL_TEMPLATE = '<a href="{url}">{title}</a>'
def combine_region_links(self, region_and_urls, url_titles_dict):
"""Override to escape the url in html `href`."""
region_and_repls = []
for region, url in region_and_urls:
if url_titles_dict.get(url):
repl = self.REPL_TEMPLATE.format(
url=html.escape(url), title=url_titles_dict[url]
)
region_and_repls.append((region, repl))
return region_and_repls
class UrlConverterConvertToMarkdown(BaseUrlConverter, sublime_plugin.TextCommand):
"""Markdown url converter command."""
REPL_TEMPLATE = '[{title}]({url})'
class UrlConverterConvertToRestructuredtext(
BaseUrlConverter, sublime_plugin.TextCommand
):
"""RestructuredText url converter command."""
REPL_TEMPLATE = '`{title} <{url}>`_'
class UrlConverterConvertToPath(BaseUrlConverter, sublime_plugin.TextCommand):
"""Path url converter command."""
def prepare_region_and_repls(self, region_and_urls):
converter = self.extract_path_of_url
return ((region, converter(url)) for region, url in region_and_urls)
def extract_path_of_url(self, url):
parsed = urlparse(url)
return ''.join(parsed[2:])
class UrlConverterConvertToCustom(BaseUrlConverter, sublime_plugin.TextCommand):
"""Custom-format url converter command."""
def run(self, edit, template=None):
if template:
self.REPL_TEMPLATE = template
else:
settings = sublime.load_settings(SETTINGS_NAME)
self.REPL_TEMPLATE = settings.get('fallback_template', '{title}\n{url}')
super().run(edit) | en | 0.70151 | # coding: utf-8 Converts selected URLs to links with fetched page titles. Webpage title fetcher with multithreading. Common abstract url converter. # Replace regions from the last to avoid misselection. Html url converter command. Override to escape the url in html `href`. Markdown url converter command. RestructuredText url converter command. Path url converter command. Custom-format url converter command. | 2.733033 | 3 |
lib/systems/beta-d-ribopyranose.py | pulsar-chem/BPModule | 0 | 6612948 | import pulsar as psr
def load_ref_system():
""" Returns beta-d-ribopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.2536 0.5171 0.5160
O 0.2337 0.9565 1.4313
C -1.0704 1.1270 0.8432
C -1.5748 -0.2276 0.3071
O -2.8327 -0.0487 -0.3275
C -0.5604 -0.8293 -0.6944
O -0.6145 0.0498 -1.8280
C 0.8657 -0.8516 -0.1081
O 1.6871 -1.0903 -1.2600
O 2.3850 0.3580 1.3453
H -1.0252 1.8925 0.0485
H -1.6735 1.4957 1.6927
H 1.5058 1.2934 -0.2326
H 1.0050 -1.6784 0.6209
H -0.8689 -1.8429 -1.0364
H -1.7939 -0.9391 1.1363
H 2.1119 0.2310 2.2884
H 2.6386 -1.0785 -1.0036
H 0.1296 -0.1667 -2.4481
H -2.7102 0.3715 -1.2187
""")
| import pulsar as psr
def load_ref_system():
""" Returns beta-d-ribopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.2536 0.5171 0.5160
O 0.2337 0.9565 1.4313
C -1.0704 1.1270 0.8432
C -1.5748 -0.2276 0.3071
O -2.8327 -0.0487 -0.3275
C -0.5604 -0.8293 -0.6944
O -0.6145 0.0498 -1.8280
C 0.8657 -0.8516 -0.1081
O 1.6871 -1.0903 -1.2600
O 2.3850 0.3580 1.3453
H -1.0252 1.8925 0.0485
H -1.6735 1.4957 1.6927
H 1.5058 1.2934 -0.2326
H 1.0050 -1.6784 0.6209
H -0.8689 -1.8429 -1.0364
H -1.7939 -0.9391 1.1363
H 2.1119 0.2310 2.2884
H 2.6386 -1.0785 -1.0036
H 0.1296 -0.1667 -2.4481
H -2.7102 0.3715 -1.2187
""")
| en | 0.403924 | Returns beta-d-ribopyranose as found in the IQMol fragment library. All credit to https://github.com/nutjunkie/IQmol C 1.2536 0.5171 0.5160 O 0.2337 0.9565 1.4313 C -1.0704 1.1270 0.8432 C -1.5748 -0.2276 0.3071 O -2.8327 -0.0487 -0.3275 C -0.5604 -0.8293 -0.6944 O -0.6145 0.0498 -1.8280 C 0.8657 -0.8516 -0.1081 O 1.6871 -1.0903 -1.2600 O 2.3850 0.3580 1.3453 H -1.0252 1.8925 0.0485 H -1.6735 1.4957 1.6927 H 1.5058 1.2934 -0.2326 H 1.0050 -1.6784 0.6209 H -0.8689 -1.8429 -1.0364 H -1.7939 -0.9391 1.1363 H 2.1119 0.2310 2.2884 H 2.6386 -1.0785 -1.0036 H 0.1296 -0.1667 -2.4481 H -2.7102 0.3715 -1.2187 | 2.278945 | 2 |
alerta/stats/__init__.py | rudderlabs/alerta | 1 | 6612949 | from .stats import StatsD
| from .stats import StatsD
| none | 1 | 1.078635 | 1 | |
flowws_structure_pretraining/SANNeighbors.py | klarh/flowws-structure-pretraining | 0 | 6612950 | <reponame>klarh/flowws-structure-pretraining<gh_stars>0
import collections
import flowws
from flowws import Argument as Arg
import freud
import numpy as np
class SANN:
def __init__(self, system, r_guess=2.0, r_scale=1.25, ball_count=4):
self.system = system
self.r_guess = r_guess
self.r_scale = r_scale
self.ball_count = ball_count
@property
def system(self):
return self._system
@system.setter
def system(self, value):
self._system = value
self._nq = freud.locality.AABBQuery(self.system.box, self.system.positions)
def compute(self, query_points):
done = False
r_guess = self.r_guess
r_max = np.min(self.system.box[:3]) / 2
total_checks = 0
clipped_checks = 0
while not done:
if total_checks < self.ball_count:
qargs = dict(mode='ball', r_max=r_guess, exclude_ii=True)
else:
N = 16
for _ in range(total_checks):
N = max(N + 1, int(self.r_scale * N))
qargs = dict(
mode='nearest', r_guess=r_guess, num_neighbors=N, exclude_ii=True
)
q = self._nq.query(query_points, qargs)
nl = q.toNeighborList(sort_by_distance=True)
(done, result) = self.create_neighbor_list(nl)
r_guess *= self.r_scale
total_checks += 1
if r_guess > r_max:
if clipped_checks:
raise ValueError('Can\'t find enough neighbors in box')
clipped_checks += 1
r_guess = r_max * 0.999
return result
def create_neighbor_list(self, nl):
all_i_s = nl.query_point_indices
all_j_s = nl.point_indices
all_d_s = nl.distances
segments = nl.segments
counts = nl.neighbor_counts
if np.any(counts < 3):
return (False, None)
cumulative_ds = np.cumsum(all_d_s)
same_i = all_i_s[1:] == all_i_s[:-1]
ds_to_smear = cumulative_ds[:-1][~same_i]
ds_to_smear = np.insert(ds_to_smear, 0, 0)
cumulative_ds -= np.repeat(ds_to_smear, counts)
cumulative_sames = np.cumsum(np.insert(same_i, 0, True))
sames_to_smear = cumulative_sames[:-1][~same_i]
sames_to_smear = np.insert(sames_to_smear, 0, 1)
cumulative_sames -= np.repeat(sames_to_smear, counts)
m = cumulative_sames + 1
R = cumulative_ds / np.clip(m - 2, 1, 1e30)
filt = R >= all_d_s
filt[segments] = True
if np.all(np.add.reduceat(filt, segments) < counts):
return (True, nl.copy().filter(filt))
return (False, None)
@flowws.add_stage_arguments
class SANNeighbors(flowws.Stage):
"""Calculate neighbors using the solid angle nearest neighbors algorithm
https://aip.scitation.org/doi/10.1063/1.4729313
"""
ARGS = []
System = collections.namedtuple('System', ['box', 'positions'])
def run(self, scope, storage):
scope['nlist_generator'] = self.get_nlist
def get_nlist(self, box, positions):
system = self.System(box, positions)
sann = SANN(system)
return sann.compute(positions)
| import collections
import flowws
from flowws import Argument as Arg
import freud
import numpy as np
class SANN:
def __init__(self, system, r_guess=2.0, r_scale=1.25, ball_count=4):
self.system = system
self.r_guess = r_guess
self.r_scale = r_scale
self.ball_count = ball_count
@property
def system(self):
return self._system
@system.setter
def system(self, value):
self._system = value
self._nq = freud.locality.AABBQuery(self.system.box, self.system.positions)
def compute(self, query_points):
done = False
r_guess = self.r_guess
r_max = np.min(self.system.box[:3]) / 2
total_checks = 0
clipped_checks = 0
while not done:
if total_checks < self.ball_count:
qargs = dict(mode='ball', r_max=r_guess, exclude_ii=True)
else:
N = 16
for _ in range(total_checks):
N = max(N + 1, int(self.r_scale * N))
qargs = dict(
mode='nearest', r_guess=r_guess, num_neighbors=N, exclude_ii=True
)
q = self._nq.query(query_points, qargs)
nl = q.toNeighborList(sort_by_distance=True)
(done, result) = self.create_neighbor_list(nl)
r_guess *= self.r_scale
total_checks += 1
if r_guess > r_max:
if clipped_checks:
raise ValueError('Can\'t find enough neighbors in box')
clipped_checks += 1
r_guess = r_max * 0.999
return result
def create_neighbor_list(self, nl):
all_i_s = nl.query_point_indices
all_j_s = nl.point_indices
all_d_s = nl.distances
segments = nl.segments
counts = nl.neighbor_counts
if np.any(counts < 3):
return (False, None)
cumulative_ds = np.cumsum(all_d_s)
same_i = all_i_s[1:] == all_i_s[:-1]
ds_to_smear = cumulative_ds[:-1][~same_i]
ds_to_smear = np.insert(ds_to_smear, 0, 0)
cumulative_ds -= np.repeat(ds_to_smear, counts)
cumulative_sames = np.cumsum(np.insert(same_i, 0, True))
sames_to_smear = cumulative_sames[:-1][~same_i]
sames_to_smear = np.insert(sames_to_smear, 0, 1)
cumulative_sames -= np.repeat(sames_to_smear, counts)
m = cumulative_sames + 1
R = cumulative_ds / np.clip(m - 2, 1, 1e30)
filt = R >= all_d_s
filt[segments] = True
if np.all(np.add.reduceat(filt, segments) < counts):
return (True, nl.copy().filter(filt))
return (False, None)
@flowws.add_stage_arguments
class SANNeighbors(flowws.Stage):
"""Calculate neighbors using the solid angle nearest neighbors algorithm
https://aip.scitation.org/doi/10.1063/1.4729313
"""
ARGS = []
System = collections.namedtuple('System', ['box', 'positions'])
def run(self, scope, storage):
scope['nlist_generator'] = self.get_nlist
def get_nlist(self, box, positions):
system = self.System(box, positions)
sann = SANN(system)
return sann.compute(positions) | en | 0.799496 | Calculate neighbors using the solid angle nearest neighbors algorithm https://aip.scitation.org/doi/10.1063/1.4729313 | 2.302559 | 2 |