repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd-j/prospector | prospect/likelihood/kernels.py | 1 | 4381 | import numpy as np
__all__ = ["Kernel", "Uncorrelated", "ExpSquared", "Matern", "PhotoCal",
"PhotSamples_MVN"]
class Kernel(object):
def __init__(self, parnames=[], name=''):
"""
:param parnames:
A list of names of the kernel params, used to alias the intrinsic
parameter names. This way different instances of the same kernel
can have different parameter names.
"""
if len(parnames) == 0:
parnames = self.kernel_params
assert len(parnames) == len(self.kernel_params)
self.param_alias = dict(zip(self.kernel_params, parnames))
self.params = {}
self.name = name
def __repr__(self):
return '{}({})'.format(self.__class__, self.param_alias.items())
def update(self, **kwargs):
"""Take a dictionary of parameters, pick out the properly named
parameters according to the alias, and put them in the param state
dictionary.
"""
for k in self.kernel_params:
self.params[k] = kwargs[self.param_alias[k]]
def __call__(self, metric, weights=None, ndim=2, **extras):
"""Return a covariance matrix, given a metric. Optionally, multiply
the output kernel by a weight function to induce non-stationarity.
"""
k = self.construct_kernel(metric)
if ndim != k.ndim:
# Either promote to 2 dimensions or demote to 1.
# The latter should never happen...
k = np.diag(k)
if weights is None:
return k
elif ndim == 2:
Sigma = weights[None, :] * k * weights[:, None]
else:
Sigma = k * weights**2
return Sigma
class Uncorrelated(Kernel):
# Simple uncorrelated noise model
ndim = 1
kernel_params = ['amplitude']
def construct_kernel(self, metric):
s = metric.shape[0]
jitter = self.params['amplitude']**2 * np.ones(s)
if metric.ndim == 2:
return np.diag(jitter)
elif metric.ndim == 1:
return jitter
else:
raise(NotImplementedError)
class ExpSquared(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'length']
def construct_kernel(self, metric):
"""Construct an exponential squared covariance matrix.
"""
a, l = self.params['amplitude'], self.params['length']
Sigma = a**2 * np.exp(-(metric[:, None] - metric[None, :])**2 / (2 * l**2))
return Sigma
class Matern(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'length']
def construct_kernel(self, metric):
"""Construct a Matern kernel covariance matrix, for \nu=3/2.
"""
a, l = self.params['amplitude'], self.params['length']
Sigma = np.sqrt(3) * np.abs(metric[:, None] - metric[None, :]) / l
Sigma = a**2 * (1 + Sigma) * np.exp(-Sigma)
return Sigma
class PhotoCal(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'filter_names']
def construct_kernel(self, metric):
""" This adds correlated noise in specified bands of photometry
"""
k = np.array([f in self.params["filter_names"] for f in metric])
K = k[:, None] * k[None, :] # select off-diagonal elements
return K * self.params["amplitude"]**2
class PhotSamples_MVN(Kernel):
npars = 0
kernel_params = []
def __init__(self, cov, filter_names, parnames=[], name=''):
super().__init__(parnames=parnames, name=name)
assert cov.shape[0] == len(filter_names)
# if no covariance, set ndim = 1
if not np.count_nonzero(cov - np.diag(np.diagonal(cov))):
self.ndim = 1
else:
self.ndim = 2
self.cov_mat = cov
self.params["filter_names"] = filter_names
def construct_kernel(self, metric):
# we pull the rows of the covariance matrix corresponding to the filters listed in `metric`
band_index = np.array([self.params["filter_names"].index(f) for f in metric])
return self.cov_mat[band_index[:, None], band_index]
def __call__(self, metric, weights=None, ndim=2, **extras):
assert weights is None, "PhotCorrelated is not meant to be weighted by anything"
return super().__call__(metric, ndim=ndim, **extras)
| mit | 9da1df6a2a428bc9007dedfdf4023f6e | 31.213235 | 99 | 0.577037 | 3.870141 | false | false | false | false |
pingo-io/pingo-py | pingo/rpi/grove.py | 6 | 1903 | import pingo
grovepi = None
class GrovePi(pingo.Board, pingo.AnalogInputCapable, pingo.PwmOutputCapable):
def __init__(self):
global grovepi
try:
import grovepi as grovepi # noqa
except ImportError:
raise ImportError('pingo.rpi.GrovePi requires grovepi installed')
super(GrovePi, self).__init__()
# location: gpio_id
self.ANALOG_PINS = {'A0': 14, 'A1': 15, 'A2': 16, 'A3': 17}
self.PIN_MODES = {
pingo.IN: 'INPUT',
pingo.OUT: 'OUTPUT'
}
self.PIN_STATES = {
pingo.HIGH: 1,
pingo.LOW: 0
}
pwm_pins = [3, 5, 6]
digital_pins = [0, 1, 2, 4, 7, 8, 9]
self._add_pins(
[pingo.PwmPin(self, location)
for location in pwm_pins] +
[pingo.DigitalPin(self, location)
for location in digital_pins] +
[pingo.AnalogPin(self, location, 10, gpio_id)
for location, gpio_id in self.ANALOG_PINS.items()]
)
def _set_digital_mode(self, pin, mode):
grovepi.pinMode(pin.location, self.PIN_MODES[mode])
def _set_pin_state(self, pin, state):
grovepi.digitalWrite(pin.location, self.PIN_STATES[state])
def _get_pin_state(self, pin):
return pingo.LOW if grovepi.digitalRead(pin.location) == 0 else pingo.HIGH
def _get_pin_value(self, pin):
return (grovepi.analogRead(self.ANALOG_PINS[pin.location]) / 10.23)
def _set_analog_mode(self, pin, mode):
grovepi.pinMode(self.ANALOG_PINS[pin.location], 'INPUT')
def _set_pwm_mode(self, pin, mode):
grovepi.pinMode(pin.location, 'OUTPUT')
def _set_pwm_frequency(self, pin, value):
raise NotImplementedError
def _set_pwm_duty_cycle(self, pin, value):
grovepi.analogWrite(pin.location, int(value * 2.55))
| mit | 26ee1024d735d52a13e05dcdbca8ec1f | 27.833333 | 82 | 0.576984 | 3.209106 | false | false | false | false |
pingo-io/pingo-py | pingo/iot/server.py | 1 | 1545 | from bottle import Bottle
import json
import pingo
import sys
app = Bottle(__name__)
board = pingo.detect.get_board()
@app.route('/')
def main():
pins = {key: repr(value) for key, value in board.pins}
return {
'board': repr(board),
'pins': json.dumps(pins)
}
@app.route('/mode/<mode>/<pin>')
def mode(mode, pin):
assert mode in ('input', 'output')
mode = pingo.IN if 'input' else pingo.OUT
pin = board.pins[pin]
pin.mode = mode
@app.route('/analog')
def analog_pins():
pins = {location: pin for location, pin in board.pins
if pin.is_analog}
return {'pins': str(pins)}
@app.route('/analog/<pin>')
def analog_input(pin):
pin = board.pins[pin]
pin.mode = pingo.IN
return {'input': pin.state}
@app.route('/analog/<pin>/<signal:float>')
def analog_output(pin, signal):
pin = board.pins[pin]
pin.mode = pingo.OUT
pin.value = signal
return {'output': signal}
@app.route('/digital')
def digital_pins():
pins = board.pins()
return {'pins': str(pins)}
@app.route('/digital/<pin>')
def digital_input(pin):
pin = board.pins[pin]
pin.mode = pingo.IN
return {'input': pin.state}
@app.route('/digital/<pin>/<signal:int>')
def digital_output(pin, signal):
pin = board.pins[pin]
pin.mode = pingo.OUT
pin.high() if signal else pin.low()
return {'output': signal}
if __name__ == '__main__':
try:
kwargs = {'host': sys.argv[1]}
except IndexError:
kwargs = {}
app.run(debug=True, **kwargs)
| mit | 5cfb07a6e71c42fad28d010f367271e7 | 19.328947 | 58 | 0.599353 | 3.153061 | false | false | false | false |
kivy/kivy-ios | kivy_ios/recipes/libffi/__init__.py | 1 | 1629 | from kivy_ios.toolchain import Recipe, shprint
import sh
from os.path import exists
class LibffiRecipe(Recipe):
version = "3.4.2"
url = "https://github.com/libffi/libffi/releases/download/v{version}/libffi-{version}.tar.gz"
library = "build/Release-{arch.sdk}/libffi.a"
include_per_arch = True
include_dir = "build_{arch.sdk}-{arch.arch}/include"
include_name = "ffi"
archs = ["x86_64", "arm64"]
def prebuild_arch(self, arch):
if self.has_marker("patched"):
return
self.apply_patch("enable-tramp-build.patch")
shprint(sh.sed,
"-i.bak",
"s/-miphoneos-version-min=7.0/-miphoneos-version-min=9.0/g",
"generate-darwin-source-and-headers.py")
self.set_marker("patched")
def build_arch(self, arch):
if exists("generate-darwin-source-and-headers.py"):
shprint(
sh.mv,
"generate-darwin-source-and-headers.py",
"_generate-darwin-source-and-headers.py")
shprint(sh.touch, "generate-darwin-source-and-headers.py")
python3 = sh.Command("python3")
shprint(python3, "_generate-darwin-source-and-headers.py", "--only-ios")
shprint(sh.xcodebuild, self.ctx.concurrent_xcodebuild,
"ONLY_ACTIVE_ARCH=NO",
"ARCHS={}".format(arch.arch),
"BITCODE_GENERATION_MODE=bitcode",
"-sdk", arch.sdk,
"-project", "libffi.xcodeproj",
"-target", "libffi-iOS",
"-configuration", "Release")
recipe = LibffiRecipe()
| mit | fcdf18b195382eb282c8d1b44c1aa210 | 36.022727 | 97 | 0.576427 | 3.525974 | false | false | false | false |
algolia/algoliasearch-client-python | algoliasearch/http/requester.py | 1 | 1604 | from typing import Union
import requests
from requests import Timeout, RequestException, Session
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from algoliasearch.http.transporter import Response, Request
class Requester(object):
def __init__(self):
# type: () -> None
self._session = None # type: Union[None, Session]
def send(self, request):
# type: (Request) -> Response
if self._session is None:
self._session = requests.Session()
# Ask urllib not to make retries on its own.
self._session.mount("https://", HTTPAdapter(max_retries=Retry(connect=0)))
req = requests.Request(
method=request.verb,
url=request.url,
headers=request.headers,
data=request.data_as_string,
)
r = req.prepare() # type: ignore
requests_timeout = (request.connect_timeout, request.timeout)
try:
response = self._session.send( # type: ignore
r,
timeout=requests_timeout,
proxies=request.proxies,
)
except Timeout as e:
return Response(error_message=str(e), is_timed_out_error=True)
except RequestException as e:
return Response(error_message=str(e), is_network_error=True)
return Response(response.status_code, response.json(), response.reason)
def close(self):
# type: () -> None
if self._session is not None:
self._session.close()
self._session = None
| mit | 9e187c2ffdff2dc6c91c7b8f9f58d649 | 27.642857 | 86 | 0.595387 | 4.300268 | false | false | false | false |
algolia/algoliasearch-client-python | algoliasearch/search_client.py | 1 | 17008 | import base64
import hashlib
import hmac
import re
import time
import warnings
from typing import Optional, Union, List, Iterator
from algoliasearch.exceptions import ValidUntilNotFoundException
from algoliasearch.helpers import endpoint, is_async_available, build_raw_response_batch
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.http.serializer import QueryParametersSerializer
from algoliasearch.http.verb import Verb
from algoliasearch.responses import (
IndexingResponse,
AddApiKeyResponse,
UpdateApiKeyResponse,
DeleteApiKeyResponse,
RestoreApiKeyResponse,
MultipleIndexBatchIndexingResponse,
DictionaryResponse,
)
from algoliasearch.search_index import SearchIndex
from algoliasearch.configs import SearchConfig
from algoliasearch.http.transporter import Transporter
from algoliasearch.http.requester import Requester
from algoliasearch.http.hosts import CallType
class SearchClient(object):
@property
def app_id(self):
# type: () -> str
return self._config.app_id
def __init__(self, transporter, search_config):
# type: (Transporter, SearchConfig) -> None
self._transporter = transporter
self._config = search_config
def init_index(self, name):
# type: (str) -> SearchIndex
return SearchIndex(self._transporter, self._config, name)
@staticmethod
def create(app_id=None, api_key=None):
# type: (Optional[str], Optional[str]) -> SearchClient
config = SearchConfig(app_id, api_key)
return SearchClient.create_with_config(config)
@staticmethod
def create_with_config(config):
# type: (SearchConfig) -> SearchClient
requester = Requester()
transporter = Transporter(requester, config)
client = SearchClient(transporter, config)
if is_async_available():
from algoliasearch.search_client_async import SearchClientAsync
from algoliasearch.http.transporter_async import TransporterAsync
from algoliasearch.http.requester_async import RequesterAsync
return SearchClientAsync(
client, TransporterAsync(RequesterAsync(), config), config
)
return client
def move_index(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/operation", src_index_name),
{"operation": "move", "destination": dst_index_name},
request_options,
)
return IndexingResponse(self.init_index(src_index_name), [raw_response])
def copy_index(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/operation", src_index_name),
{"operation": "copy", "destination": dst_index_name},
request_options,
)
return IndexingResponse(self.init_index(src_index_name), [raw_response])
def copy_settings(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None:
request_options = {}
request_options["scope"] = ["settings"]
return self.copy_index(src_index_name, dst_index_name, request_options)
def copy_synonyms(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None:
request_options = {}
request_options["scope"] = ["synonyms"]
return self.copy_index(src_index_name, dst_index_name, request_options)
def copy_rules(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None:
request_options = {}
request_options["scope"] = ["rules"]
return self.copy_index(src_index_name, dst_index_name, request_options)
def assign_user_id(self, user_id, cluster, request_options=None):
# type: (str, str,Optional[Union[dict, RequestOptions]]) -> dict
if request_options is None:
request_options = RequestOptions.create(self._config)
request_options["X-Algolia-User-ID"] = user_id
return self._transporter.write(
Verb.POST, "1/clusters/mapping", {"cluster": cluster}, request_options
)
def assign_user_ids(self, user_ids, cluster, request_options=None):
# type: (Union[List[dict], Iterator[dict]], str, Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.write(
Verb.POST,
"1/clusters/mapping/batch",
{"cluster": cluster, "users": user_ids},
request_options,
)
def remove_user_id(self, user_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
if request_options is None:
request_options = RequestOptions.create(self._config)
request_options["X-Algolia-User-ID"] = user_id
return self._transporter.write(
Verb.DELETE, "1/clusters/mapping", None, request_options
)
def list_clusters(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/clusters", {}, request_options)
def get_user_id(self, user_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, endpoint("1/clusters/mapping/{}", user_id), None, request_options
)
def list_user_ids(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, "1/clusters/mapping", None, request_options
)
def get_top_user_ids(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, "1/clusters/mapping/top", None, request_options
)
def search_user_ids(self, query, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST, "1/clusters/mapping/search", {"query": query}, request_options
)
def has_pending_mappings(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
retrieve_mappings = None
if isinstance(request_options, dict):
retrieve_mappings = request_options.pop(
"retrieveMappings", retrieve_mappings
)
if retrieve_mappings:
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
request_options.query_parameters["getClusters"] = retrieve_mappings
return self._transporter.read(
Verb.GET, "1/clusters/mapping/pending", None, request_options
)
def list_api_keys(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/keys", None, request_options)
def get_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, endpoint("1/keys/{}", key), None, request_options
)
def delete_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> DeleteApiKeyResponse # noqa: E501
raw_response = self._transporter.write(
Verb.DELETE, endpoint("1/keys/{}", key), None, request_options
)
return DeleteApiKeyResponse(self, raw_response, key)
def add_api_key(self, acl, request_options=None):
# type: (list, Optional[Union[dict, RequestOptions]]) -> AddApiKeyResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST, "1/keys", {"acl": acl}, request_options
)
return AddApiKeyResponse(self, raw_response)
def update_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> UpdateApiKeyResponse # noqa: E501
if not isinstance(request_options, RequestOptions):
request_options = RequestOptions.create(self._config, request_options)
raw_response = self._transporter.write(
Verb.PUT, endpoint("1/keys/{}", key), {}, request_options
)
return UpdateApiKeyResponse(self, raw_response, request_options)
def restore_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> RestoreApiKeyResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST, endpoint("1/keys/{}/restore", key), None, request_options
)
return RestoreApiKeyResponse(self, raw_response, key)
@staticmethod
def generate_secured_api_key(parent_api_key, restrictions):
# type: (str, dict) -> str
query_parameters = QueryParametersSerializer.serialize(restrictions)
secured_key = hmac.new(
parent_api_key.encode("utf-8"),
query_parameters.encode("utf-8"),
hashlib.sha256,
).hexdigest()
base64encoded = base64.b64encode(
("{}{}".format(secured_key, query_parameters)).encode("utf-8")
)
return str(base64encoded.decode("utf-8"))
@staticmethod
def get_secured_api_key_remaining_validity(api_key):
# type: (str) -> int
decoded_string = base64.b64decode(api_key)
match = re.search(r"validUntil=(\d+)", str(decoded_string))
if match is None:
raise ValidUntilNotFoundException("ValidUntil not found in api key.")
return int(match.group(1)) - int(round(time.time()))
def list_indices(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/indexes", None, request_options)
def get_logs(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/logs", None, request_options)
def multiple_queries(self, queries, request_options=None):
# type: (List[dict], Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST, "1/indexes/*/queries", {"requests": queries}, request_options
)
def multiple_get_objects(self, requests, request_options=None):
# type: (List[dict], Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST, "1/indexes/*/objects", {"requests": requests}, request_options
)
def multiple_batch(self, operations, request_options=None):
# type: (List[dict], Optional[Union[dict, RequestOptions]]) -> MultipleIndexBatchIndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST, "1/indexes/*/batch", {"requests": operations}, request_options
)
return MultipleIndexBatchIndexingResponse(self, raw_response)
def wait_task(self, index_name, task_id, request_options=None):
# type: (str, int, Optional[Union[dict, RequestOptions]]) -> None
self.init_index(index_name).wait_task(task_id, request_options)
def set_personalization_strategy(self, strategy, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> dict
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"SearchClient",
"set_personalization_strategy",
"PersonalizationClient",
"set_personalization_strategy",
),
DeprecationWarning,
)
return self._transporter.write(
Verb.POST,
"1/recommendation/personalization/strategy",
strategy,
request_options,
)
def get_personalization_strategy(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"SearchClient",
"get_personalization_strategy",
"PersonalizationClient",
"get_personalization_strategy",
),
DeprecationWarning,
)
return self._transporter.read(
Verb.GET, "1/recommendation/personalization/strategy", None, request_options
)
def save_dictionary_entries(
self, dictionary, dictionary_entries, request_options=None
):
# type: (str, List[dict], Optional[Union[dict, RequestOptions]]) -> DictionaryResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/dictionaries/{}/batch", dictionary),
{
"clearExistingDictionaryEntries": False,
"requests": build_raw_response_batch("addEntry", dictionary_entries),
},
request_options,
)
return DictionaryResponse(self, raw_response)
def replace_dictionary_entries(
self, dictionary, dictionary_entries, request_options=None
):
# type: (str, List[dict], Optional[Union[dict, RequestOptions]]) -> DictionaryResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/dictionaries/{}/batch", dictionary),
{
"clearExistingDictionaryEntries": True,
"requests": build_raw_response_batch("addEntry", dictionary_entries),
},
request_options,
)
return DictionaryResponse(self, raw_response)
def delete_dictionary_entries(self, dictionary, object_ids, request_options=None):
# type: (str, Iterator[str], Optional[Union[dict, RequestOptions]])-> DictionaryResponse # noqa: E501
request = [{"objectID": object_id} for object_id in object_ids]
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/dictionaries/{}/batch", dictionary),
{
"clearExistingDictionaryEntries": False,
"requests": build_raw_response_batch("deleteEntry", request),
},
request_options,
)
return DictionaryResponse(self, raw_response)
def clear_dictionary_entries(self, dictionary, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> DictionaryResponse # noqa: E501
return self.replace_dictionary_entries(dictionary, [], request_options)
def search_dictionary_entries(self, dictionary, query, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST,
endpoint("1/dictionaries/{}/search", dictionary),
{"query": query},
request_options,
)
def set_dictionary_settings(self, dictionary_settings, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]])-> DictionaryResponse # noqa: E501
raw_response = self._transporter.write(
Verb.PUT, "1/dictionaries/*/settings", dictionary_settings, request_options
)
return DictionaryResponse(self, raw_response)
def get_dictionary_settings(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, "1/dictionaries/*/settings", {}, request_options
)
def close(self):
# type: () -> None
return self._transporter.close() # type: ignore
def _sync(self):
# type: () -> SearchClient
return self
def custom_request(self, data, uri, method, call_type, request_options=None):
# type: (dict, str, str, int, Optional[Union[dict, RequestOptions]]) -> dict
if call_type == CallType.WRITE:
return self._transporter.write(method, uri, data, request_options)
else:
return self._transporter.read(method, uri, data, request_options)
| mit | a20db1ddfd78dca5efdeef9fe2b15326 | 34.881857 | 118 | 0.628645 | 3.990615 | false | false | false | false |
algolia/algoliasearch-client-python | algoliasearch/search_index.py | 1 | 20577 | import copy
import math
import random
import string
import time
from typing import Any, Optional, Dict, List, Union, Iterator, Callable
from algoliasearch.configs import SearchConfig
from algoliasearch.exceptions import (
MissingObjectIdException,
ObjectNotFoundException,
RequestException,
)
from algoliasearch.helpers import assert_object_id, build_raw_response_batch, endpoint
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.http.serializer import SettingsDeserializer
from algoliasearch.http.transporter import Transporter
from algoliasearch.http.verb import Verb
from algoliasearch.iterators import ObjectIterator, SynonymIterator, RuleIterator
from algoliasearch.responses import Response, IndexingResponse, MultipleResponse
class SearchIndex(object):
@property
def app_id(self):
return self._config.app_id
@property
def name(self):
return self._name
def __init__(self, transporter, config, name):
# type: (Transporter, SearchConfig, str) -> None
self._transporter = transporter
self._config = config
self._name = name
def exists(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> bool
try:
self.get_settings(request_options)
except RequestException as e:
if e.status_code == 404:
return False
raise e
return True
def save_object(self, obj, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
return self.save_objects([obj], request_options)
def save_objects(self, objects, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
generate_object_id = False
if (
isinstance(request_options, dict)
and "autoGenerateObjectIDIfNotExist" in request_options
):
generate_object_id = request_options.pop("autoGenerateObjectIDIfNotExist")
if generate_object_id:
response = self._chunk("addObject", objects, request_options, False)
else:
try:
response = self._chunk("updateObject", objects, request_options)
except MissingObjectIdException as e:
message = str(e)
message += (
". All objects must have an unique objectID "
"(like a primary key) to be valid. "
"Algolia is also able to generate objectIDs "
"automatically but *it's not recommended*. "
"To do it, use "
"`{'autoGenerateObjectIDIfNotExist': True}` "
"on the request options parameter."
)
raise MissingObjectIdException(message, e.obj)
return response
def replace_all_objects(self, objects, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> MultipleResponse # noqa: E501
safe = False
if isinstance(request_options, dict) and "safe" in request_options:
safe = request_options.pop("safe")
tmp_index_name = self._create_temporary_name()
responses = MultipleResponse()
responses.push(
self.copy_to(tmp_index_name, {"scope": ["settings", "synonyms", "rules"]})
)
if safe:
responses.wait()
try:
from algoliasearch.search_client import SearchClient
except ImportError: # Already imported.
pass
tmp_client = SearchClient(self._transporter, self._config)
tmp_index = tmp_client.init_index(tmp_index_name)
responses.push(tmp_index.save_objects(objects, request_options))
if safe:
responses.wait()
responses.push(tmp_index.move_to(self._name))
if safe:
responses.wait()
return responses
def get_object(self, object_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET,
endpoint("1/indexes/{}/{}", self._name, object_id),
None,
request_options,
)
def get_objects(self, object_ids, request_options=None):
# type: (Iterator[str], Optional[Union[dict, RequestOptions]]) -> dict
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
# store attributesToRetrieve for use in each request
attributes_to_retrieve = request_options.data.pop("attributesToRetrieve", None)
requests = []
for object_id in object_ids:
request = {"indexName": self._name, "objectID": str(object_id)}
if attributes_to_retrieve:
request["attributesToRetrieve"] = attributes_to_retrieve
requests.append(request)
return self._transporter.read(
Verb.POST, "1/indexes/*/objects", {"requests": requests}, request_options
)
def find_object(self, callback, request_options=None):
# type: (Callable[[Dict[str, Any]], bool], Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
paginate = True
query = ""
page = 0
if isinstance(request_options, dict):
request_options = copy.copy(request_options)
paginate = request_options.pop("paginate", paginate)
query = request_options.pop("query", query)
request_options = RequestOptions.create(self._config, request_options)
while True:
request_options.data["page"] = page
res = self.search(query, request_options)
for pos, hit in enumerate(res["hits"]):
if callback(hit):
return {
"object": hit,
"position": pos,
"page": page,
}
has_next_page = page + 1 < int(res["nbPages"])
if not paginate or not has_next_page:
raise ObjectNotFoundException
page += 1
def browse_objects(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> ObjectIterator
return ObjectIterator(self._transporter, self._name, request_options)
def partial_update_object(self, obj, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
return self.partial_update_objects([obj], request_options)
def partial_update_objects(self, objects, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
generate_object_id = False
if isinstance(request_options, dict) and "createIfNotExists" in request_options:
generate_object_id = request_options.pop("createIfNotExists")
if generate_object_id:
response = self._chunk(
"partialUpdateObject", objects, request_options, False
)
else:
response = self._chunk(
"partialUpdateObjectNoCreate", objects, request_options
)
return response
def delete_object(self, object_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
return self.delete_objects([object_id], request_options)
def delete_objects(self, object_ids, request_options=None):
# type: (Union[List[str], Iterator[str]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
objects = list(map(lambda object_id: {"objectID": object_id}, object_ids))
return self._chunk("deleteObject", objects, request_options)
def delete_by(self, filters, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/deleteByQuery", self._name),
filters,
request_options,
)
return IndexingResponse(self, [raw_response])
def clear_objects(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> IndexingResponse
raw_response = self._transporter.write(
Verb.POST, endpoint("1/indexes/{}/clear", self._name), None, request_options
)
return IndexingResponse(self, [raw_response])
def set_settings(self, settings, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.PUT,
endpoint("1/indexes/{}/settings", self._name),
settings,
request_options,
)
return IndexingResponse(self, [raw_response])
def get_settings(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
request_options.query_parameters["getVersion"] = 2
raw_response = self._transporter.read(
Verb.GET,
endpoint("1/indexes/{}/settings", self._name),
None,
request_options,
)
return SettingsDeserializer.deserialize(raw_response)
def search(self, query, request_options=None):
# type: (Optional[str], Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.read(
Verb.POST,
endpoint("1/indexes/{}/query", self._name),
{"query": str(query)},
request_options,
)
def search_for_facet_values(self, facet_name, facet_query, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.read(
Verb.POST,
endpoint("1/indexes/{}/facets/{}/query", self._name, facet_name),
{"facetQuery": facet_query},
request_options,
)
def save_synonym(self, synonym, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
return self.save_synonyms([synonym], request_options)
def save_synonyms(self, synonyms, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if not synonyms:
return IndexingResponse(self, [])
assert_object_id(synonyms)
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/synonyms/batch", self._name),
list(synonyms),
request_options,
)
return IndexingResponse(self, [raw_response])
def replace_all_synonyms(self, synoyms, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
request_options["replaceExistingSynonyms"] = True
return self.save_synonyms(list(synoyms), request_options)
def get_synonym(self, object_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET,
endpoint("1/indexes/{}/synonyms/{}", self._name, object_id),
None,
request_options,
)
def search_synonyms(self, query, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.read(
Verb.POST,
endpoint("1/indexes/{}/synonyms/search", self._name),
{"query": str(query)},
request_options,
)
def browse_synonyms(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> SynonymIterator
return SynonymIterator(self._transporter, self._name, request_options)
def delete_synonym(self, object_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.DELETE,
endpoint("1/indexes/{}/synonyms/{}", self._name, object_id),
None,
request_options,
)
return IndexingResponse(self, [raw_response])
def clear_synonyms(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> IndexingResponse
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/synonyms/clear", self._name),
None,
request_options,
)
return IndexingResponse(self, [raw_response])
def save_rule(self, rule, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
return self.save_rules([rule], request_options)
def save_rules(self, rules, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if not rules:
return IndexingResponse(self, [])
assert_object_id(rules)
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/rules/batch", self._name),
list(rules),
request_options,
)
return IndexingResponse(self, [raw_response])
def replace_all_rules(self, rules, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
request_options.query_parameters["clearExistingRules"] = 1
return self.save_rules(list(rules), request_options)
def get_rule(self, object_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET,
endpoint("1/indexes/{}/rules/{}", self._name, object_id),
None,
request_options,
)
def search_rules(self, query, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.read(
Verb.POST,
endpoint("1/indexes/{}/rules/search", self._name),
{"query": str(query)},
request_options,
)
def browse_rules(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> RuleIterator
return RuleIterator(self._transporter, self._name, request_options)
def delete_rule(self, object_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.DELETE,
endpoint("1/indexes/{}/rules/{}", self._name, object_id),
None,
request_options,
)
return IndexingResponse(self, [raw_response])
def clear_rules(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> IndexingResponse
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/rules/clear", self._name),
None,
request_options,
)
return IndexingResponse(self, [raw_response])
def get_task(self, task_id, request_options=None):
# type: (int, Optional[Union[dict, RequestOptions]]) -> dict
assert task_id, "task_id cannot be empty."
return self._transporter.read(
"GET",
endpoint("1/indexes/{}/task/{}", self._name, task_id),
None,
request_options,
)
def wait_task(self, task_id, request_options=None):
# type: (int, Optional[Union[dict, RequestOptions]]) -> None
retries_count = 1
while True:
task = self.get_task(task_id, request_options)
if task["status"] == "published":
break
retries_count += 1
factor = math.ceil(retries_count / 10)
sleep_for = factor * self._config.wait_task_time_before_retry
time.sleep(sleep_for / 1000000.0)
def delete(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> Response
raw_response = self._transporter.write(
Verb.DELETE, endpoint("1/indexes/{}", self._name), None, request_options
)
return IndexingResponse(self, [raw_response])
def batch(self, requests, request_options=None):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._raw_batch(requests, request_options)
return IndexingResponse(self, [raw_response])
def _chunk(self, action, objects, request_options, validate_object_id=True):
# type: (str, Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]], bool) -> IndexingResponse # noqa: E501
raw_responses = []
batch = []
batch_size = self._config.batch_size
for obj in objects:
batch.append(obj)
if len(batch) == batch_size:
if validate_object_id:
assert_object_id(batch)
requests = build_raw_response_batch(action, batch)
raw_responses.append(self._raw_batch(requests, request_options))
batch = []
if len(batch):
if validate_object_id:
assert_object_id(batch)
requests = build_raw_response_batch(action, batch)
raw_responses.append(self._raw_batch(requests, request_options))
return IndexingResponse(self, raw_responses)
def _raw_batch(self, requests, request_options):
# type: (Union[List[dict], Iterator[dict]], Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/batch", self._name),
{"requests": list(requests)},
request_options,
)
def move_to(self, name, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/operation", self._name),
{"operation": "move", "destination": name},
request_options,
)
return IndexingResponse(self, [raw_response])
def copy_to(self, name, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/operation", self._name),
{"operation": "copy", "destination": name},
request_options,
)
return IndexingResponse(self, [raw_response])
@staticmethod
def get_object_position(res, object_id):
# type: (Dict[str, Any], str) -> int
for i, hit in enumerate(res["hits"]):
if hit.get("objectID") == object_id:
return i
return -1
def _create_temporary_name(self):
# type: () -> str
letters = string.ascii_letters
random_string = "".join(random.choice(letters) for i in range(10))
tmp_index_name = "{}_tmp_{}".format(self._name, random_string)
return tmp_index_name
def _sync(self):
# type: () -> SearchIndex
return self
| mit | 6658d4b5dc74e3a35e8357708ecca72c | 34.054514 | 134 | 0.602712 | 4.111289 | false | false | false | false |
algolia/algoliasearch-client-python | algoliasearch/recommendation_client.py | 1 | 4194 | import warnings
from typing import Optional, Union
from algoliasearch.configs import RecommendationConfig
from algoliasearch.helpers import is_async_available
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.http.requester import Requester
from algoliasearch.http.transporter import Transporter
from algoliasearch.http.verb import Verb
class RecommendationClient(object):
def __init__(self, transporter, config):
# type: (Transporter, RecommendationConfig) -> None
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"RecommendationClient",
"init",
"PersonalizationClient",
"init",
),
DeprecationWarning,
)
self._transporter = transporter
self._config = config
@staticmethod
def create(app_id=None, api_key=None, region=None):
# type: (Optional[str], Optional[str], Optional[str]) -> RecommendationClient # noqa: E501
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"RecommendationClient",
"create",
"PersonalizationClient",
"create",
),
DeprecationWarning,
)
config = RecommendationConfig(app_id, api_key, region)
return RecommendationClient.create_with_config(config)
@staticmethod
def create_with_config(config):
# type: (RecommendationConfig) -> RecommendationClient
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"RecommendationClient",
"create_with_config",
"PersonalizationClient",
"create_with_config",
),
DeprecationWarning,
)
requester = Requester()
transporter = Transporter(requester, config)
client = RecommendationClient(transporter, config)
if is_async_available():
from algoliasearch.recommendation_client_async import (
RecommendationClientAsync,
)
from algoliasearch.http.transporter_async import TransporterAsync
from algoliasearch.http.requester_async import RequesterAsync
return RecommendationClientAsync(
client, TransporterAsync(RequesterAsync(), config), config
)
return client
def set_personalization_strategy(
self, personalization_strategy, request_options=None
): # noqa: E501
# type: (dict, Optional[Union[dict, RequestOptions]]) -> dict
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"RecommendationClient",
"set_personalization_strategy",
"PersonalizationClient",
"set_personalization_strategy",
),
DeprecationWarning,
)
return self._transporter.write(
Verb.POST,
"1/strategies/personalization",
personalization_strategy,
request_options,
)
def get_personalization_strategy(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"RecommendationClient",
"get_personalization_strategy",
"PersonalizationClient",
"get_personalization_strategy",
),
DeprecationWarning,
)
return self._transporter.read(
Verb.GET, "1/strategies/personalization", None, request_options
)
def close(self):
# type: () -> None
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"RecommendationClient",
"close",
"PersonalizationClient",
"close",
),
DeprecationWarning,
)
return self._transporter.close() # type: ignore
| mit | eb4143dd205d243e5511c2994004d99b | 29.613139 | 99 | 0.556986 | 4.598684 | false | true | false | false |
gae-init/gae-init-debug | main/control/test.py | 11 | 2752 | # coding: utf-8
import flask
import flask_wtf
import wtforms
import auth
import util
from main import app
TESTS = [
'alert',
'badge',
'button',
'filter',
'font',
'form',
'grid',
'heading',
'label',
'pageres',
'pagination',
'paragraph',
'responsive',
'social',
'table',
]
class TestForm(flask_wtf.FlaskForm):
name = wtforms.StringField(
'Text',
[wtforms.validators.required()], filters=[util.strip_filter],
description='This is a very important field',
)
number = wtforms.IntegerField('Integer', [wtforms.validators.optional()])
email = wtforms.StringField(
'Email',
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
date = wtforms.DateField('Date', [wtforms.validators.optional()])
textarea = wtforms.TextAreaField('Textarea')
boolean = wtforms.BooleanField(
'Render it as Markdown',
[wtforms.validators.optional()],
)
password = wtforms.PasswordField(
'Password',
[wtforms.validators.optional(), wtforms.validators.length(min=6)],
)
password_visible = wtforms.StringField(
'Password visible',
[wtforms.validators.optional(), wtforms.validators.length(min=6)],
description='Visible passwords for the win!'
)
prefix = wtforms.StringField('Prefix', [wtforms.validators.optional()])
suffix = wtforms.StringField('Suffix', [wtforms.validators.required()])
both = wtforms.IntegerField('Both', [wtforms.validators.required()])
select = wtforms.SelectField(
'Language',
[wtforms.validators.optional()],
choices=[(s, s.title()) for s in ['english', 'greek', 'spanish']]
)
checkboxes = wtforms.SelectMultipleField(
'User permissions',
[wtforms.validators.required()],
choices=[(c, c.title()) for c in ['admin', 'moderator', 'slave']]
)
radios = wtforms.SelectField(
'Choose your weapon',
[wtforms.validators.optional()],
choices=[(r, r.title()) for r in ['gun', 'knife', 'chainsaw', 'sword']]
)
public = wtforms.StringField('Public Key', [wtforms.validators.optional()])
private = wtforms.StringField('Private Key', [wtforms.validators.optional()])
recaptcha = flask_wtf.RecaptchaField()
@app.route('/admin/test/<test>/', methods=['GET', 'POST'])
@app.route('/admin/test/', methods=['GET', 'POST'])
@auth.admin_required
def admin_test(test=None):
if test and test not in TESTS:
flask.abort(404)
form = TestForm()
if form.validate_on_submit():
pass
return flask.render_template(
'admin/test/test_one.html' if test else 'admin/test/test.html',
title='Test: %s' % test.title() if test else 'Test',
html_class='test',
form=form,
test=test,
tests=TESTS,
back_url_for='admin_test' if test else None,
)
| mit | e5129adebbdddc3529e77a98f091a29d | 26.79798 | 79 | 0.667151 | 3.487959 | false | true | false | false |
spyder-ide/qtpy | qtpy/compat.py | 1 | 5700 | #
# Copyright © 2009- The Spyder Development Team
# Licensed under the terms of the MIT License
"""
Compatibility functions
"""
import sys
from . import (
PYQT5,
PYQT6,
PYSIDE2,
PYSIDE6,
)
from .QtWidgets import QFileDialog
TEXT_TYPES = (str,)
def is_text_string(obj):
"""Return True if `obj` is a text string, False if it is anything else,
like binary data."""
return isinstance(obj, str)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
# =============================================================================
# QVariant conversion utilities
# =============================================================================
PYQT_API_1 = False
def to_qvariant(obj=None): # analysis:ignore
"""Convert Python object to QVariant
This is a transitional function from PyQt API#1 (QVariant exist)
to PyQt API#2 and Pyside (QVariant does not exist)"""
return obj
def from_qvariant(qobj=None, pytype=None): # analysis:ignore
"""Convert QVariant object to Python object
This is a transitional function from PyQt API #1 (QVariant exist)
to PyQt API #2 and Pyside (QVariant does not exist)"""
return qobj
# =============================================================================
# Wrappers around QFileDialog static methods
# =============================================================================
def getexistingdirectory(parent=None, caption='', basedir='',
options=QFileDialog.ShowDirsOnly):
"""Wrapper around QtGui.QFileDialog.getExistingDirectory static method
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
# Calling QFileDialog static method
if sys.platform == "win32":
# On Windows platforms: redirect standard outputs
_temp1, _temp2 = sys.stdout, sys.stderr
sys.stdout, sys.stderr = None, None
try:
result = QFileDialog.getExistingDirectory(parent, caption, basedir,
options)
finally:
if sys.platform == "win32":
# On Windows platforms: restore standard outputs
sys.stdout, sys.stderr = _temp1, _temp2
if not is_text_string(result):
# PyQt API #1
result = to_text_string(result)
return result
def _qfiledialog_wrapper(attr, parent=None, caption='', basedir='',
filters='', selectedfilter='', options=None):
if options is None:
options = QFileDialog.Option(0)
func = getattr(QFileDialog, attr)
# Calling QFileDialog static method
if sys.platform == "win32":
# On Windows platforms: redirect standard outputs
_temp1, _temp2 = sys.stdout, sys.stderr
sys.stdout, sys.stderr = None, None
result = func(parent, caption, basedir, filters, selectedfilter, options)
if sys.platform == "win32":
# On Windows platforms: restore standard outputs
sys.stdout, sys.stderr = _temp1, _temp2
output, selectedfilter = result
# Always returns the tuple (output, selectedfilter)
return output, selectedfilter
def getopenfilename(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getOpenFileName static method
Returns a tuple (filename, selectedfilter) -- when dialog box is canceled,
returns a tuple of empty strings
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getOpenFileName', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
def getopenfilenames(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getOpenFileNames static method
Returns a tuple (filenames, selectedfilter) -- when dialog box is canceled,
returns a tuple (empty list, empty string)
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getOpenFileNames', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
def getsavefilename(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getSaveFileName static method
Returns a tuple (filename, selectedfilter) -- when dialog box is canceled,
returns a tuple of empty strings
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getSaveFileName', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
# =============================================================================
def isalive(object):
"""Wrapper around sip.isdeleted and shiboken.isValid which tests whether
an object is currently alive."""
if PYQT5 or PYQT6:
from . import sip
return not sip.isdeleted(object)
elif PYSIDE2 or PYSIDE6:
from . import shiboken
return shiboken.isValid(object)
| mit | 373bfab841b69eeb796206e8790d14ec | 37.248322 | 79 | 0.59256 | 4.523016 | false | false | false | false |
spyder-ide/qtpy | qtpy/Qt3DLogic.py | 1 | 1343 | # -----------------------------------------------------------------------------
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provides Qt3DLogic classes and functions."""
from . import (
PYQT5,
PYQT6,
PYSIDE2,
PYSIDE6,
QtModuleNotInstalledError,
)
if PYQT5:
try:
from PyQt5.Qt3DLogic import *
except ModuleNotFoundError as error:
raise QtModuleNotInstalledError(
name='Qt3DLogic', missing_package='PyQt3D'
) from error
elif PYQT6:
try:
from PyQt6.Qt3DLogic import *
except ModuleNotFoundError as error:
raise QtModuleNotInstalledError(
name='Qt3DLogic', missing_package='PyQt6-3D'
) from error
elif PYSIDE2:
# https://bugreports.qt.io/projects/PYSIDE/issues/PYSIDE-1026
import PySide2.Qt3DLogic as __temp
import inspect
for __name in inspect.getmembers(__temp.Qt3DLogic):
globals()[__name[0]] = __name[1]
elif PYSIDE6:
# https://bugreports.qt.io/projects/PYSIDE/issues/PYSIDE-1026
import PySide6.Qt3DLogic as __temp
import inspect
for __name in inspect.getmembers(__temp.Qt3DLogic):
globals()[__name[0]] = __name[1]
| mit | 3cd048374f2601e3fd058c9b7f64876d | 28.822222 | 79 | 0.584203 | 3.845272 | false | false | false | false |
spyder-ide/qtpy | qtpy/QtWidgets.py | 1 | 2874 | # -----------------------------------------------------------------------------
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provides widget classes and functions."""
from . import PYQT5, PYQT6, PYSIDE2, PYSIDE6
if PYQT5:
from PyQt5.QtWidgets import *
elif PYQT6:
from PyQt6 import QtWidgets
from PyQt6.QtWidgets import *
from PyQt6.QtGui import QAction, QActionGroup, QShortcut, QFileSystemModel, QUndoCommand
from PyQt6.QtOpenGLWidgets import QOpenGLWidget
# Map missing/renamed methods
QTextEdit.setTabStopWidth = lambda self, *args, **kwargs: self.setTabStopDistance(*args, **kwargs)
QTextEdit.tabStopWidth = lambda self, *args, **kwargs: self.tabStopDistance(*args, **kwargs)
QTextEdit.print_ = lambda self, *args, **kwargs: self.print(*args, **kwargs)
QPlainTextEdit.setTabStopWidth = lambda self, *args, **kwargs: self.setTabStopDistance(*args, **kwargs)
QPlainTextEdit.tabStopWidth = lambda self, *args, **kwargs: self.tabStopDistance(*args, **kwargs)
QPlainTextEdit.print_ = lambda self, *args, **kwargs: self.print(*args, **kwargs)
QApplication.exec_ = QApplication.exec
QDialog.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
QMenu.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
QLineEdit.getTextMargins = lambda self: (self.textMargins().left(), self.textMargins().top(), self.textMargins().right(), self.textMargins().bottom())
# Allow unscoped access for enums inside the QtWidgets module
from .enums_compat import promote_enums
promote_enums(QtWidgets)
del QtWidgets
elif PYSIDE2:
from PySide2.QtWidgets import *
elif PYSIDE6:
from PySide6.QtWidgets import *
from PySide6.QtGui import QAction, QActionGroup, QShortcut, QUndoCommand
from PySide6.QtOpenGLWidgets import QOpenGLWidget
# Map missing/renamed methods
QTextEdit.setTabStopWidth = lambda self, *args, **kwargs: self.setTabStopDistance(*args, **kwargs)
QTextEdit.tabStopWidth = lambda self, *args, **kwargs: self.tabStopDistance(*args, **kwargs)
QPlainTextEdit.setTabStopWidth = lambda self, *args, **kwargs: self.setTabStopDistance(*args, **kwargs)
QPlainTextEdit.tabStopWidth = lambda self, *args, **kwargs: self.tabStopDistance(*args, **kwargs)
QLineEdit.getTextMargins = lambda self: (self.textMargins().left(), self.textMargins().top(), self.textMargins().right(), self.textMargins().bottom())
# Map DeprecationWarning methods
QApplication.exec_ = QApplication.exec
QDialog.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
QMenu.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
| mit | dcd92ff62d76c0f465983259217db0ba | 52.185185 | 154 | 0.685237 | 3.691517 | false | false | false | false |
spyder-ide/qtpy | qtpy/cli.py | 1 | 2866 | # -----------------------------------------------------------------------------
# Copyright © 2009- The QtPy Contributors
#
# Released under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provide a CLI to allow configuring developer settings, including mypy."""
# Standard library imports
import argparse
import textwrap
def print_version():
"""Print the current version of the package."""
import qtpy
print('QtPy version', qtpy.__version__)
def generate_mypy_args():
"""Generate a string with always-true/false args to pass to mypy."""
options = {False: '--always-false', True: '--always-true'}
import qtpy
apis_active = {name: qtpy.API == name for name in qtpy.API_NAMES}
mypy_args = ' '.join(
f'{options[is_active]}={name.upper()}'
for name, is_active in apis_active.items()
)
return mypy_args
def print_mypy_args():
"""Print the generated mypy args to stdout."""
print(generate_mypy_args())
def generate_arg_parser():
"""Generate the argument parser for the dev CLI for QtPy."""
parser = argparse.ArgumentParser(
description='Features to support development with QtPy.',
)
parser.set_defaults(func=parser.print_help)
parser.add_argument(
'--version', action='store_const', dest='func', const=print_version,
help='If passed, will print the version and exit')
cli_subparsers = parser.add_subparsers(
title='Subcommands', help='Subcommand to run', metavar='Subcommand')
# Parser for the MyPy args subcommand
mypy_args_parser = cli_subparsers.add_parser(
name='mypy-args',
help='Generate command line arguments for using mypy with QtPy.',
formatter_class=argparse.RawTextHelpFormatter,
description=textwrap.dedent(
"""
Generate command line arguments for using mypy with QtPy.
This will generate strings similar to the following
which help guide mypy through which library QtPy would have used
so that mypy can get the proper underlying type hints.
--always-false=PYQT5 --always-false=PYQT6 --always-true=PYSIDE2 --always-false=PYSIDE6
It can be used as follows on Bash or a similar shell:
mypy --package mypackage $(qtpy mypy-args)
"""
),
)
mypy_args_parser.set_defaults(func=print_mypy_args)
return parser
def main(args=None):
"""Run the development CLI for QtPy."""
parser = generate_arg_parser()
parsed_args = parser.parse_args(args=args)
reserved_params = {'func'}
cleaned_args = {key: value for key, value in vars(parsed_args).items()
if key not in reserved_params}
parsed_args.func(**cleaned_args)
| mit | cfe9ea2cd0b6796f902cf474178993d6 | 31.556818 | 102 | 0.614311 | 4.152174 | false | false | false | false |
koduj-z-klasa/python101 | docs/pyqt/todopw/tabmodel_z3.py | 2 | 1068 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from PyQt5.QtCore import QAbstractTableModel, QModelIndex, Qt, QVariant
class TabModel(QAbstractTableModel):
""" Tabelaryczny model danych """
def __init__(self, pola=[], dane=[], parent=None):
super(TabModel, self).__init__()
self.pola = pola
self.tabela = dane
def aktualizuj(self, dane):
""" Przypisuje źródło danych do modelu """
print(dane)
self.tabela = dane
def rowCount(self, parent=QModelIndex()):
""" Zwraca ilość wierszy """
return len(self.tabela)
def columnCount(self, parent=QModelIndex()):
""" Zwraca ilość kolumn """
if self.tabela:
return len(self.tabela[0])
else:
return 0
def data(self, index, rola=Qt.DisplayRole):
""" Wyświetlanie danych """
i = index.row()
j = index.column()
if rola == Qt.DisplayRole:
return '{0}'.format(self.tabela[i][j])
else:
return QVariant()
| mit | 2ee6f6fdda79e8871136afcd39864696 | 26.894737 | 71 | 0.570755 | 3.231707 | false | false | false | false |
rochacbruno/dynaconf | example/issues/449_django_lazy_path/polls/views.py | 3 | 1623 | from __future__ import annotations
from django.conf import settings
from django.http import HttpResponse
def index(request):
tests = []
tests.append("<b>.env</b>")
tests.append(
"""<pre>DYNACONF_SERVER='prod_server_fromenv.com'
DEV_SERVER='dev_server_fromenv.com'
# switch envs or omit to default to DYNACONF
ENV_FOR_DYNACONF=dev
</pre>
"""
)
tests.append("<b>settings.toml</b>")
tests.append(
"""<pre>[dynaconf]
server = 'foo.com'
username = 'prod user'
password = false # in prod this value must come from .secrets.toml or vault
STATIC_URL = '/changed/in/settings.toml/by/dynaconf/'
[dev]
username = 'dev user'</pre>
"""
)
tests.append("<b>.secrets.toml</b>")
tests.append(
"""<pre>[dev]
password = 'My5up3r53c4et'</pre>
"""
)
tests.append(
'<b>INSTALLED_APPS = ["dynaconf.contrib.django_dynaconf"...] </b>'
)
tests.append("<b> from django.conf import settings</b>")
tests.append(f"settings.STATIC_URL: {settings.STATIC_URL}")
tests.append(f"settings.SERVER: {settings.SERVER}")
tests.append(f"settings.USERNAME: {settings.USERNAME}")
tests.append(f"settings.PASSWORD: {settings.PASSWORD}")
with settings.using_env("dev"):
tests.append("<b>$ In dev env</b>")
tests.append(f"settings.STATIC_URL: {settings.STATIC_URL}")
tests.append(f"settings.SERVER: {settings.SERVER}")
tests.append(f"settings.USERNAME: {settings.USERNAME}")
tests.append(f"settings.PASSWORD: {settings.PASSWORD}")
# settings.setenv('dev')
return HttpResponse("<br>".join(tests))
| mit | f53b73aa0e63d33ffcdc1642c560fc25 | 29.055556 | 77 | 0.645102 | 3.278788 | false | true | false | false |
koduj-z-klasa/python101 | docs/bazy/sql/sqlraw.py | 1 | 2275 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3
# utworzenie połączenia z bazą przechowywaną na dysku
# lub w pamięci (':memory:')
con = sqlite3.connect('test.db')
# dostęp do kolumn przez indeksy i przez nazwy
con.row_factory = sqlite3.Row
# utworzenie obiektu kursora
cur = con.cursor()
# tworzenie tabel
cur.execute("DROP TABLE IF EXISTS klasa;")
cur.execute("""
CREATE TABLE IF NOT EXISTS klasa (
id INTEGER PRIMARY KEY ASC,
nazwa varchar(250) NOT NULL,
profil varchar(250) DEFAULT ''
)""")
cur.executescript("""
DROP TABLE IF EXISTS uczen;
CREATE TABLE IF NOT EXISTS uczen (
id INTEGER PRIMARY KEY ASC,
imie varchar(250) NOT NULL,
nazwisko varchar(250) NOT NULL,
klasa_id INTEGER NOT NULL,
FOREIGN KEY(klasa_id) REFERENCES klasa(id)
)""")
# wstawiamy jeden rekord danych
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1A', 'matematyczny'))
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1B', 'humanistyczny'))
# wykonujemy zapytanie SQL, które pobierze id klasy "1A" z tabeli "klasa".
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1A',))
klasa_id = cur.fetchone()[0]
# tupla "uczniowie" zawiera tuple z danymi poszczególnych uczniów
uczniowie = (
(None, 'Tomasz', 'Nowak', klasa_id),
(None, 'Jan', 'Kos', klasa_id),
(None, 'Piotr', 'Kowalski', klasa_id)
)
# wstawiamy wiele rekordów
cur.executemany('INSERT INTO uczen VALUES(?,?,?,?)', uczniowie)
# zatwierdzamy zmiany w bazie
con.commit()
# pobieranie danych z bazy
def czytajdane():
"""Funkcja pobiera i wyświetla dane z bazy."""
cur.execute(
"""
SELECT uczen.id,imie,nazwisko,nazwa FROM uczen,klasa
WHERE uczen.klasa_id=klasa.id
""")
uczniowie = cur.fetchall()
for uczen in uczniowie:
print(uczen['id'], uczen['imie'], uczen['nazwisko'], uczen['nazwa'])
print()
czytajdane()
# zmiana klasy ucznia o identyfikatorze 2
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1B',))
klasa_id = cur.fetchone()[0]
cur.execute('UPDATE uczen SET klasa_id=? WHERE id=?', (klasa_id, 2))
# usunięcie ucznia o identyfikatorze 3
cur.execute('DELETE FROM uczen WHERE id=?', (3,))
czytajdane()
con.close()
| mit | d3e03c7be281afe562c874674f106eac | 25.940476 | 77 | 0.65665 | 2.514444 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor/toml/encoder.py | 1 | 4748 | _G=']\n'
_F=' = '
_E='\n'
_D=False
_C='['
_B='.'
_A=None
import datetime,re,sys
from decimal import Decimal
from .decoder import InlineTableDict
if sys.version_info>=(3,):unicode=str
def dump(o,f,encoder=_A):
if not f.write:raise TypeError('You can only dump an object to a file descriptor')
A=dumps(o,encoder=encoder);f.write(A);return A
def dumps(o,encoder=_A):
C=encoder;A=''
if C is _A:C=TomlEncoder(o.__class__)
B,D=C.dump_sections(o,'');A+=B;G=[id(o)]
while D:
H=[id(A)for A in D]
for K in G:
if K in H:raise ValueError('Circular reference detected')
G+=H;I=C.get_empty_table()
for E in D:
B,F=C.dump_sections(D[E],E)
if B or not B and not F:
if A and A[-2:]!='\n\n':A+=_E
A+=_C+E+_G
if B:A+=B
for J in F:I[E+_B+J]=F[J]
D=I
return A
def _dump_str(v):
G="'";F='\\';C='"'
if sys.version_info<(3,)and hasattr(v,'decode')and isinstance(v,str):v=v.decode('utf-8')
v='%r'%v
if v[0]=='u':v=v[1:]
D=v.startswith(G)
if D or v.startswith(C):v=v[1:-1]
if D:v=v.replace("\\'",G);v=v.replace(C,'\\"')
v=v.split('\\x')
while len(v)>1:
A=-1
if not v[0]:v=v[1:]
v[0]=v[0].replace('\\\\',F);B=v[0][A]!=F
while v[0][:A]and v[0][A]==F:B=not B;A-=1
if B:E='x'
else:E='u00'
v=[v[0]+E+v[1]]+v[2:]
return unicode(C+v[0]+C)
def _dump_float(v):return '{}'.format(v).replace('e+0','e+').replace('e-0','e-')
def _dump_time(v):
A=v.utcoffset()
if A is _A:return v.isoformat()
return v.isoformat()[:-6]
class TomlEncoder:
def __init__(A,_dict=dict,preserve=_D):A._dict=_dict;A.preserve=preserve;A.dump_funcs={str:_dump_str,unicode:_dump_str,list:A.dump_list,bool:lambda v:unicode(v).lower(),int:lambda v:v,float:_dump_float,Decimal:_dump_float,datetime.datetime:lambda v:v.isoformat().replace('+00:00','Z'),datetime.time:_dump_time,datetime.date:lambda v:v.isoformat()}
def get_empty_table(A):return A._dict()
def dump_list(B,v):
A=_C
for C in v:A+=' '+unicode(B.dump_value(C))+','
A+=']';return A
def dump_inline_table(B,section):
A=section;C=''
if isinstance(A,dict):
D=[]
for (E,F) in A.items():G=B.dump_inline_table(F);D.append(E+_F+G)
C+='{ '+', '.join(D)+' }\n';return C
else:return unicode(B.dump_value(A))
def dump_value(B,v):
A=B.dump_funcs.get(type(v))
if A is _A and hasattr(v,'__iter__'):A=B.dump_funcs[list]
return A(v)if A is not _A else B.dump_funcs[str](v)
def dump_sections(C,o,sup):
D=sup;F=''
if D!=''and D[-1]!=_B:D+=_B
M=C._dict();G=''
for A in o:
A=unicode(A);B=A
if not re.match('^[A-Za-z0-9_-]+$',A):B=_dump_str(A)
if not isinstance(o[A],dict):
N=_D
if isinstance(o[A],list):
for L in o[A]:
if isinstance(L,dict):N=True
if N:
for L in o[A]:
H=_E;G+='[['+D+B+']]\n';I,J=C.dump_sections(L,D+B)
if I:
if I[0]==_C:H+=I
else:G+=I
while J:
O=C._dict()
for K in J:
E,P=C.dump_sections(J[K],D+B+_B+K)
if E:H+=_C+D+B+_B+K+_G;H+=E
for E in P:O[K+_B+E]=P[E]
J=O
G+=H
elif o[A]is not _A:F+=B+_F+unicode(C.dump_value(o[A]))+_E
elif C.preserve and isinstance(o[A],InlineTableDict):F+=B+_F+C.dump_inline_table(o[A])
else:M[B]=o[A]
F+=G;return F,M
class TomlPreserveInlineDictEncoder(TomlEncoder):
def __init__(A,_dict=dict):super(TomlPreserveInlineDictEncoder,A).__init__(_dict,True)
class TomlArraySeparatorEncoder(TomlEncoder):
def __init__(B,_dict=dict,preserve=_D,separator=','):
A=separator;super(TomlArraySeparatorEncoder,B).__init__(_dict,preserve)
if A.strip()=='':A=','+A
elif A.strip(' \t\n\r,'):raise ValueError('Invalid separator for arrays')
B.separator=A
def dump_list(D,v):
B=[];C=_C
for A in v:B.append(D.dump_value(A))
while B!=[]:
E=[]
for A in B:
if isinstance(A,list):
for F in A:E.append(F)
else:C+=' '+unicode(A)+D.separator
B=E
C+=']';return C
class TomlNumpyEncoder(TomlEncoder):
def __init__(A,_dict=dict,preserve=_D):import numpy as B;super(TomlNumpyEncoder,A).__init__(_dict,preserve);A.dump_funcs[B.float16]=_dump_float;A.dump_funcs[B.float32]=_dump_float;A.dump_funcs[B.float64]=_dump_float;A.dump_funcs[B.int16]=A._dump_int;A.dump_funcs[B.int32]=A._dump_int;A.dump_funcs[B.int64]=A._dump_int
def _dump_int(A,v):return '{}'.format(int(v))
class TomlPreserveCommentEncoder(TomlEncoder):
def __init__(A,_dict=dict,preserve=_D):from dynaconf.vendor.toml.decoder import CommentValue as B;super(TomlPreserveCommentEncoder,A).__init__(_dict,preserve);A.dump_funcs[B]=lambda v:v.dump(A.dump_value)
class TomlPathlibEncoder(TomlEncoder):
def _dump_pathlib_path(A,v):return _dump_str(str(v))
def dump_value(A,v):
if(3,4)<=sys.version_info:
import pathlib as B
if isinstance(v,B.PurePath):v=str(v)
return super(TomlPathlibEncoder,A).dump_value(v) | mit | 0173076b9e11b921b22c241e903f65b4 | 34.440299 | 348 | 0.617944 | 2.2805 | false | false | false | false |
koduj-z-klasa/python101 | bazy/sqlorm/sqlraw05.py | 1 | 2212 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
# utworzenie połączenia z bazą przechowywaną w pamięci RAM
con = sqlite3.connect(':memory:')
# dostęp do kolumn przez indeksy i przez nazwy
con.row_factory = sqlite3.Row
# utworzenie obiektu kursora
cur = con.cursor()
# tworzenie tabel
cur.executescript("""
DROP TABLE IF EXISTS klasa;
CREATE TABLE IF NOT EXISTS klasa (
id INTEGER PRIMARY KEY ASC,
nazwa varchar(250) NOT NULL,
profil varchar(250) DEFAULT ''
);
DROP TABLE IF EXISTS uczen;
CREATE TABLE IF NOT EXISTS uczen (
id INTEGER PRIMARY KEY ASC,
imie varchar(250) NOT NULL,
nazwisko varchar(250) NOT NULL,
klasa_id INTEGER NOT NULL,
FOREIGN KEY(klasa_id) REFERENCES klasa(id)
)""")
# wstawiamy dane uczniów
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1A', 'matematyczny'))
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1B', 'humanistyczny'))
# wykonujemy zapytanie SQL, które pobierze id klasy "1A" z tabeli "klasa".
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1A',))
klasa_id = cur.fetchone()[0]
# wstawiamy dane uczniów
cur.execute('INSERT INTO uczen VALUES(?,?,?,?)',
(None, 'Tomasz', 'Nowak', klasa_id))
cur.execute('INSERT INTO uczen VALUES(?,?,?,?)',
(None, 'Adam', 'Kowalski', klasa_id))
# zatwierdzamy zmiany w bazie
con.commit()
def czytajdane():
"""Funkcja pobiera i wyświetla dane z bazy"""
cur.execute(
"""
SELECT uczen.id,imie,nazwisko,nazwa FROM uczen,klasa
WHERE uczen.klasa_id=klasa.id
""")
uczniowie = cur.fetchall()
for uczen in uczniowie:
print uczen['id'], uczen['imie'], uczen['nazwisko'], uczen['nazwa']
print ""
czytajdane()
# przepisanie ucznia do innej klasy
cur.execute('SELECT id FROM uczen WHERE nazwisko="Nowak"')
uczen_id = cur.fetchone()[0]
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1B',))
klasa_id = cur.fetchone()[0]
cur.execute('UPDATE uczen SET klasa_id=? WHERE id=?', (klasa_id, uczen_id))
czytajdane()
# usunięcie ucznia o identyfikatorze 1
cur.execute('DELETE FROM uczen WHERE id=?', (1,))
czytajdane()
con.close()
| mit | 7bc375b353bda68e25646c1e3e53c020 | 27.960526 | 77 | 0.651976 | 2.595519 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor_src/ruamel/yaml/resolver.py | 1 | 15286 | # coding: utf-8
from __future__ import absolute_import
import re
if False: # MYPY
from typing import Any, Dict, List, Union, Text, Optional # NOQA
from .compat import VersionType # NOQA
from .compat import string_types, _DEFAULT_YAML_VERSION # NOQA
from .error import * # NOQA
from .nodes import MappingNode, ScalarNode, SequenceNode # NOQA
from .util import RegExp # NOQA
__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
# fmt: off
# resolvers consist of
# - a list of applicable version
# - a tag
# - a regexp
# - a list of first characters to match
implicit_resolvers = [
([(1, 2)],
u'tag:yaml.org,2002:bool',
RegExp(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
list(u'tTfF')),
([(1, 1)],
u'tag:yaml.org,2002:bool',
RegExp(u'''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO')),
([(1, 2)],
u'tag:yaml.org,2002:float',
RegExp(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.')),
([(1, 1)],
u'tag:yaml.org,2002:float',
RegExp(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.')),
([(1, 2)],
u'tag:yaml.org,2002:int',
RegExp(u'''^(?:[-+]?0b[0-1_]+
|[-+]?0o?[0-7_]+
|[-+]?[0-9_]+
|[-+]?0x[0-9a-fA-F_]+)$''', re.X),
list(u'-+0123456789')),
([(1, 1)],
u'tag:yaml.org,2002:int',
RegExp(u'''^(?:[-+]?0b[0-1_]+
|[-+]?0?[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int
list(u'-+0123456789')),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:merge',
RegExp(u'^(?:<<)$'),
[u'<']),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:null',
RegExp(u'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u'']),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:timestamp',
RegExp(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \\t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
(?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789')),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:value',
RegExp(u'^(?:=)$'),
[u'=']),
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:yaml',
RegExp(u'^(?:!|&|\\*)$'),
list(u'!&*')),
]
# fmt: on
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {} # type: Dict[Any, Any]
yaml_path_resolvers = {} # type: Dict[Any, Any]
def __init__(self, loadumper=None):
# type: (Any, Any) -> None
self.loadumper = loadumper
if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None:
self.loadumper._resolver = self.loadumper
self._loader_version = None # type: Any
self.resolver_exact_paths = [] # type: List[Any]
self.resolver_prefix_paths = [] # type: List[Any]
@property
def parser(self):
# type: () -> Any
if self.loadumper is not None:
if hasattr(self.loadumper, 'typ'):
return self.loadumper.parser
return self.loadumper._parser
return None
@classmethod
def add_implicit_resolver_base(cls, tag, regexp, first):
# type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
# deepcopy doesn't work here
cls.yaml_implicit_resolvers = dict(
(k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
)
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
# type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
# deepcopy doesn't work here
cls.yaml_implicit_resolvers = dict(
(k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
)
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
# @classmethod
# def add_implicit_resolver(cls, tag, regexp, first):
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# type: (Any, Any, Any) -> None
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if 'yaml_path_resolvers' not in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = [] # type: List[Any]
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError('Invalid path element: %s' % (element,))
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif (
node_check not in [ScalarNode, SequenceNode, MappingNode]
and not isinstance(node_check, string_types)
and node_check is not None
):
raise ResolverError('Invalid node checker: %s' % (node_check,))
if not isinstance(index_check, (string_types, int)) and index_check is not None:
raise ResolverError('Invalid index checker: %s' % (index_check,))
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
raise ResolverError('Invalid node kind: %s' % (kind,))
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
# type: (Any, Any) -> None
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
# type: () -> None
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
# type: (int, Text, Any, Any, Any) -> bool
node_check, index_check = path[depth - 1]
if isinstance(node_check, string_types):
if current_node.tag != node_check:
return False
elif node_check is not None:
if not isinstance(current_node, node_check):
return False
if index_check is True and current_index is not None:
return False
if (index_check is False or index_check is None) and current_index is None:
return False
if isinstance(index_check, string_types):
if not (
isinstance(current_index, ScalarNode) and index_check == current_index.value
):
return False
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return False
return True
def resolve(self, kind, value, implicit):
# type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
if value == "":
resolvers = self.yaml_implicit_resolvers.get("", [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if bool(self.yaml_path_resolvers):
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
@property
def processing_version(self):
# type: () -> Any
return None
class Resolver(BaseResolver):
pass
for ir in implicit_resolvers:
if (1, 2) in ir[0]:
Resolver.add_implicit_resolver_base(*ir[1:])
class VersionedResolver(BaseResolver):
"""
contrary to the "normal" resolver, the smart resolver delays loading
the pattern matching rules. That way it can decide to load 1.1 rules
or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals
and Yes/No/On/Off booleans.
"""
def __init__(self, version=None, loader=None, loadumper=None):
# type: (Optional[VersionType], Any, Any) -> None
if loader is None and loadumper is not None:
loader = loadumper
BaseResolver.__init__(self, loader)
self._loader_version = self.get_loader_version(version)
self._version_implicit_resolver = {} # type: Dict[Any, Any]
def add_version_implicit_resolver(self, version, tag, regexp, first):
# type: (VersionType, Any, Any, Any) -> None
if first is None:
first = [None]
impl_resolver = self._version_implicit_resolver.setdefault(version, {})
for ch in first:
impl_resolver.setdefault(ch, []).append((tag, regexp))
def get_loader_version(self, version):
# type: (Optional[VersionType]) -> Any
if version is None or isinstance(version, tuple):
return version
if isinstance(version, list):
return tuple(version)
# assume string
return tuple(map(int, version.split(u'.')))
@property
def versioned_resolver(self):
# type: () -> Any
"""
select the resolver based on the version we are parsing
"""
version = self.processing_version
if version not in self._version_implicit_resolver:
for x in implicit_resolvers:
if version in x[0]:
self.add_version_implicit_resolver(version, x[1], x[2], x[3])
return self._version_implicit_resolver[version]
def resolve(self, kind, value, implicit):
# type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
if value == "":
resolvers = self.versioned_resolver.get("", [])
else:
resolvers = self.versioned_resolver.get(value[0], [])
resolvers += self.versioned_resolver.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if bool(self.yaml_path_resolvers):
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
@property
def processing_version(self):
# type: () -> Any
try:
version = self.loadumper._scanner.yaml_version
except AttributeError:
try:
if hasattr(self.loadumper, 'typ'):
version = self.loadumper.version
else:
version = self.loadumper._serializer.use_version # dumping
except AttributeError:
version = None
if version is None:
version = self._loader_version
if version is None:
version = _DEFAULT_YAML_VERSION
return version
| mit | 41933438c13ef91891e354c19a8cc8a7 | 37.310777 | 94 | 0.534345 | 3.670108 | false | false | false | false |
koduj-z-klasa/python101 | docs/pyqt/widzety/widzety_z6.py | 1 | 4045 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from PyQt5.QtWidgets import QApplication, QWidget
from gui_z6 import Ui_Widget
from PyQt5.QtGui import QColor
class Widgety(QWidget, Ui_Widget):
""" Główna klasa aplikacji """
kanaly = {'R'} # zbiór kanałów
kolorW = QColor(0, 0, 0) # kolor RGB kształtu 1
def __init__(self, parent=None):
super(Widgety, self).__init__(parent)
self.setupUi(self) # tworzenie interfejsu
# Sygnały i sloty ###
# przyciski CheckBox ###
self.grupaChk.buttonClicked[int].connect(self.ustawKsztalt)
self.ksztaltChk.clicked.connect(self.aktywujKsztalt)
# Slider + przyciski RadioButton ###
for i in range(self.ukladR.count()):
self.ukladR.itemAt(i).widget().toggled.connect(self.ustawKanalRBtn)
self.suwak.valueChanged.connect(self.zmienKolor)
# Lista ComboBox i SpinBox ###
self.grupaRBtn.clicked.connect(self.ustawStan)
self.listaRGB.activated[str].connect(self.ustawKanalCBox)
self.spinRGB.valueChanged[int].connect(self.zmienKolor)
# przyciski PushButton ###
for btn in self.grupaP.buttons():
btn.clicked[bool].connect(self.ustawKanalPBtn)
self.grupaPBtn.clicked.connect(self.ustawStan)
# etykiety QLabel i pola QEditLine ###
for v in ('R', 'G', 'B'):
kolor = getattr(self, 'kolor' + v)
kolor.textEdited.connect(self.zmienKolor)
def info(self):
fontB = "QWidget { font-weight: bold }"
fontN = "QWidget { font-weight: normal }"
for v in ('R', 'G', 'B'):
label = getattr(self, 'label' + v)
kolor = getattr(self, 'kolor' + v)
if v in self.kanaly:
label.setStyleSheet(fontB)
kolor.setEnabled(True)
else:
label.setStyleSheet(fontN)
kolor.setEnabled(False)
self.kolorR.setText(str(self.kolorW.red()))
self.kolorG.setText(str(self.kolorW.green()))
self.kolorB.setText(str(self.kolorW.blue()))
def ustawKanalPBtn(self, wartosc):
nadawca = self.sender()
if wartosc:
self.kanaly.add(nadawca.text())
else:
self.kanaly.remove(nadawca.text())
def ustawStan(self, wartosc):
if wartosc:
self.listaRGB.setEnabled(False)
self.spinRGB.setEnabled(False)
else:
self.listaRGB.setEnabled(True)
self.spinRGB.setEnabled(True)
self.kanaly = set()
self.kanaly.add(self.listaRGB.currentText())
def ustawKanalCBox(self, wartosc):
self.kanaly = set() # resetujemy zbiór kanałów
self.kanaly.add(wartosc)
def ustawKanalRBtn(self, wartosc):
self.kanaly = set() # resetujemy zbiór kanałów
nadawca = self.sender()
if wartosc:
self.kanaly.add(nadawca.text())
def zmienKolor(self, wartosc):
wartosc = int(wartosc)
self.lcd.display(wartosc)
if 'R' in self.kanaly:
self.kolorW.setRed(wartosc)
if 'G' in self.kanaly:
self.kolorW.setGreen(wartosc)
if 'B' in self.kanaly:
self.kolorW.setBlue(wartosc)
self.ksztaltAktywny.ustawKolorW(
self.kolorW.red(),
self.kolorW.green(),
self.kolorW.blue())
self.info()
def ustawKsztalt(self, wartosc):
self.ksztaltAktywny.ustawKsztalt(wartosc)
def aktywujKsztalt(self, wartosc):
nadawca = self.sender()
if wartosc:
self.ksztaltAktywny = self.ksztalt1
nadawca.setText("<=")
else:
self.ksztaltAktywny = self.ksztalt2
nadawca.setText("=>")
self.grupaChk.buttons()[self.ksztaltAktywny.ksztalt].setChecked(True)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
okno = Widgety()
okno.show()
sys.exit(app.exec_())
| mit | bc172e2d800422f5577342a6e2715ddb | 31.780488 | 79 | 0.593254 | 2.923858 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor/ruamel/yaml/events.py | 1 | 2311 | _H='explicit'
_G='style'
_F='flow_style'
_E='value'
_D='anchor'
_C='implicit'
_B='tag'
_A=None
if False:from typing import Any,Dict,Optional,List
def CommentCheck():0
class Event:
__slots__='start_mark','end_mark','comment'
def __init__(A,start_mark=_A,end_mark=_A,comment=CommentCheck):
B=comment;A.start_mark=start_mark;A.end_mark=end_mark
if B is CommentCheck:B=_A
A.comment=B
def __repr__(A):
C=[B for B in[_D,_B,_C,_E,_F,_G]if hasattr(A,B)];B=', '.join(['%s=%r'%(B,getattr(A,B))for B in C])
if A.comment not in[_A,CommentCheck]:B+=', comment={!r}'.format(A.comment)
return'%s(%s)'%(A.__class__.__name__,B)
class NodeEvent(Event):
__slots__=_D,
def __init__(A,anchor,start_mark=_A,end_mark=_A,comment=_A):Event.__init__(A,start_mark,end_mark,comment);A.anchor=anchor
class CollectionStartEvent(NodeEvent):
__slots__=_B,_C,_F,'nr_items'
def __init__(A,anchor,tag,implicit,start_mark=_A,end_mark=_A,flow_style=_A,comment=_A,nr_items=_A):NodeEvent.__init__(A,anchor,start_mark,end_mark,comment);A.tag=tag;A.implicit=implicit;A.flow_style=flow_style;A.nr_items=nr_items
class CollectionEndEvent(Event):__slots__=()
class StreamStartEvent(Event):
__slots__='encoding',
def __init__(A,start_mark=_A,end_mark=_A,encoding=_A,comment=_A):Event.__init__(A,start_mark,end_mark,comment);A.encoding=encoding
class StreamEndEvent(Event):__slots__=()
class DocumentStartEvent(Event):
__slots__=_H,'version','tags'
def __init__(A,start_mark=_A,end_mark=_A,explicit=_A,version=_A,tags=_A,comment=_A):Event.__init__(A,start_mark,end_mark,comment);A.explicit=explicit;A.version=version;A.tags=tags
class DocumentEndEvent(Event):
__slots__=_H,
def __init__(A,start_mark=_A,end_mark=_A,explicit=_A,comment=_A):Event.__init__(A,start_mark,end_mark,comment);A.explicit=explicit
class AliasEvent(NodeEvent):__slots__=()
class ScalarEvent(NodeEvent):
__slots__=_B,_C,_E,_G
def __init__(A,anchor,tag,implicit,value,start_mark=_A,end_mark=_A,style=_A,comment=_A):NodeEvent.__init__(A,anchor,start_mark,end_mark,comment);A.tag=tag;A.implicit=implicit;A.value=value;A.style=style
class SequenceStartEvent(CollectionStartEvent):__slots__=()
class SequenceEndEvent(CollectionEndEvent):__slots__=()
class MappingStartEvent(CollectionStartEvent):__slots__=()
class MappingEndEvent(CollectionEndEvent):__slots__=() | mit | ca925a38ec19646835a9a6803fbfba0e | 50.377778 | 230 | 0.698399 | 2.699766 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor/box/converters.py | 1 | 3165 | _G='r'
_F='w'
_E=False
_D=True
_C='strict'
_B='utf-8'
_A=None
import csv,json,sys,warnings
from pathlib import Path
import dynaconf.vendor.ruamel.yaml as yaml
from dynaconf.vendor.box.exceptions import BoxError,BoxWarning
from dynaconf.vendor import toml
BOX_PARAMETERS='default_box','default_box_attr','conversion_box','frozen_box','camel_killer_box','box_safe_prefix','box_duplicates','ordered_box','default_box_none_transform','box_dots','modify_tuples_box','box_intact_types','box_recast'
def _exists(filename,create=_E):
A=filename;B=Path(A)
if create:
try:B.touch(exist_ok=_D)
except OSError as C:raise BoxError(f"Could not create file {A} - {C}")
else:return
if not B.exists():raise BoxError(f'File "{A}" does not exist')
if not B.is_file():raise BoxError(f"{A} is not a file")
def _to_json(obj,filename=_A,encoding=_B,errors=_C,**C):
A=filename;B=json.dumps(obj,ensure_ascii=_E,**C)
if A:
_exists(A,create=_D)
with open(A,_F,encoding=encoding,errors=errors)as D:D.write(B if sys.version_info>=(3,0)else B.decode(_B))
else:return B
def _from_json(json_string=_A,filename=_A,encoding=_B,errors=_C,multiline=_E,**B):
D=json_string;A=filename
if A:
_exists(A)
with open(A,_G,encoding=encoding,errors=errors)as E:
if multiline:C=[json.loads(A.strip(),**B)for A in E if A.strip()and not A.strip().startswith('#')]
else:C=json.load(E,**B)
elif D:C=json.loads(D,**B)
else:raise BoxError('from_json requires a string or filename')
return C
def _to_yaml(obj,filename=_A,default_flow_style=_E,encoding=_B,errors=_C,**C):
B=default_flow_style;A=filename
if A:
_exists(A,create=_D)
with open(A,_F,encoding=encoding,errors=errors)as D:yaml.dump(obj,stream=D,default_flow_style=B,**C)
else:return yaml.dump(obj,default_flow_style=B,**C)
def _from_yaml(yaml_string=_A,filename=_A,encoding=_B,errors=_C,**A):
F='Loader';C=yaml_string;B=filename
if F not in A:A[F]=yaml.SafeLoader
if B:
_exists(B)
with open(B,_G,encoding=encoding,errors=errors)as E:D=yaml.load(E,**A)
elif C:D=yaml.load(C,**A)
else:raise BoxError('from_yaml requires a string or filename')
return D
def _to_toml(obj,filename=_A,encoding=_B,errors=_C):
A=filename
if A:
_exists(A,create=_D)
with open(A,_F,encoding=encoding,errors=errors)as B:toml.dump(obj,B)
else:return toml.dumps(obj)
def _from_toml(toml_string=_A,filename=_A,encoding=_B,errors=_C):
B=toml_string;A=filename
if A:
_exists(A)
with open(A,_G,encoding=encoding,errors=errors)as D:C=toml.load(D)
elif B:C=toml.loads(B)
else:raise BoxError('from_toml requires a string or filename')
return C
def _to_csv(box_list,filename,encoding=_B,errors=_C):
B=filename;A=box_list;C=list(A[0].keys())
for E in A:
if list(E.keys())!=C:raise BoxError('BoxList must contain the same dictionary structure for every item to convert to csv')
if B:
_exists(B,create=_D)
with open(B,_F,encoding=encoding,errors=errors,newline='')as F:
D=csv.DictWriter(F,fieldnames=C);D.writeheader()
for G in A:D.writerow(G)
def _from_csv(filename,encoding=_B,errors=_C):
A=filename;_exists(A)
with open(A,_G,encoding=encoding,errors=errors,newline='')as B:C=csv.DictReader(B);return[A for A in C] | mit | e95a57f9d03bfa986ee54c361be3e3da | 39.589744 | 237 | 0.708689 | 2.56483 | false | false | false | false |
koduj-z-klasa/python101 | docs/mcpi/algorytmy/mcpi-lpi01.py | 1 | 1957 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import random
from time import sleep
import mcpi.minecraft as minecraft # import modułu minecraft
import mcpi.block as block # import modułu block
import local.minecraftstuff as mcstuff
os.environ["USERNAME"] = "Steve" # nazwa użytkownika
os.environ["COMPUTERNAME"] = "mykomp" # nazwa komputera
mc = minecraft.Minecraft.create("192.168.1.10") # połączenie z serwerem
def plac(x, y, z, roz=10, gracz=False):
"""Funkcja wypełnia sześcienny obszar od podanej pozycji
powietrzem i opcjonalnie umieszcza gracza w środku.
Parametry: x, y, z - współrzędne pozycji początkowej,
roz - rozmiar wypełnianej przestrzeni,
gracz - czy umieścić gracza w środku
Wymaga: globalnych obiektów mc i block.
"""
podloga = block.STONE
wypelniacz = block.AIR
# kamienna podłoże
mc.setBlocks(x, y - 1, z, x + roz, y - 1, z + roz, podloga)
# czyszczenie
mc.setBlocks(x, y, z, x + roz, y + roz, z + roz, wypelniacz)
# umieść gracza w środku
if gracz:
mc.player.setPos(x + roz / 2, y + roz / 2, z + roz / 2)
def model(promien, x, y, z):
"""
Fukcja buduje obrys kwadratu, którego środek to punkt x, y, z
oraz koło wpisane w ten kwadrat
"""
mcfig = mcstuff.MinecraftDrawing(mc)
obrys = block.SANDSTONE
wypelniacz = block.AIR
mc.setBlocks(x - promien, y, z - promien, x +
promien, y, z + promien, obrys)
mc.setBlocks(x - promien + 1, y, z - promien + 1, x +
promien - 1, y, z + promien - 1, wypelniacz)
mcfig.drawHorizontalCircle(0, 0, 0, promien, block.GRASS)
def liczbaPi():
r = float(raw_input("Podaj promień koła: "))
model(r, 0, 0, 0)
def main():
mc.postToChat("LiczbaPi") # wysłanie komunikatu do mc
plac(-50, 0, -50, 100)
mc.player.setPos(20, 20, 0)
liczbaPi()
return 0
if __name__ == '__main__':
main()
| mit | df6c02bf1195ac4b4633f1a7e1d2890c | 26.956522 | 72 | 0.634526 | 2.301909 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/loaders/base.py | 1 | 6645 | from __future__ import annotations
import io
import warnings
from dynaconf.utils import build_env_list
from dynaconf.utils import ensure_a_list
from dynaconf.utils import upperfy
class BaseLoader:
"""Base loader for dynaconf source files.
:param obj: {[LazySettings]} -- [Dynaconf settings]
:param env: {[string]} -- [the current env to be loaded defaults to
[development]]
:param identifier: {[string]} -- [identifier ini, yaml, json, py, toml]
:param extensions: {[list]} -- [List of extensions with dots ['.a', '.b']]
:param file_reader: {[callable]} -- [reads file return dict]
:param string_reader: {[callable]} -- [reads string return dict]
"""
def __init__(
self, obj, env, identifier, extensions, file_reader, string_reader
):
"""Instantiates a loader for different sources"""
self.obj = obj
self.env = env or obj.current_env
self.identifier = identifier
self.extensions = extensions
self.file_reader = file_reader
self.string_reader = string_reader
@staticmethod
def warn_not_installed(obj, identifier): # pragma: no cover
if identifier not in obj._not_installed_warnings:
warnings.warn(
f"{identifier} support is not installed in your environment. "
f"`pip install dynaconf[{identifier}]`"
)
obj._not_installed_warnings.append(identifier)
def load(self, filename=None, key=None, silent=True):
"""
Reads and loads in to `self.obj` a single key or all keys from source
:param filename: Optional filename to load
:param key: if provided load a single key
:param silent: if load erros should be silenced
"""
filename = filename or self.obj.get(self.identifier.upper())
if not filename:
return
if not isinstance(filename, (list, tuple)):
split_files = ensure_a_list(filename)
if all([f.endswith(self.extensions) for f in split_files]): # noqa
files = split_files # it is a ['file.ext', ...]
else: # it is a single config as string
files = [filename]
else: # it is already a list/tuple
files = filename
source_data = self.get_source_data(files)
if self.obj.get("ENVIRONMENTS_FOR_DYNACONF") is False:
self._envless_load(source_data, silent, key)
else:
self._load_all_envs(source_data, silent, key)
def get_source_data(self, files):
"""Reads each file and returns source data for each file
{"path/to/file.ext": {"key": "value"}}
"""
data = {}
for source_file in files:
if source_file.endswith(self.extensions):
try:
with open(
source_file,
encoding=self.obj.get(
"ENCODING_FOR_DYNACONF", "utf-8"
),
) as open_file:
content = self.file_reader(open_file)
self.obj._loaded_files.append(source_file)
if content:
data[source_file] = content
except OSError as e:
if ".local." not in source_file:
warnings.warn(
f"{self.identifier}_loader: {source_file} "
f":{str(e)}"
)
else:
# for tests it is possible to pass string
content = self.string_reader(source_file)
if content:
data[source_file] = content
return data
def _envless_load(self, source_data, silent=True, key=None):
"""Load all the keys from each file without env separation"""
for file_data in source_data.values():
self._set_data_to_obj(
file_data,
self.identifier,
key=key,
)
def _load_all_envs(self, source_data, silent=True, key=None):
"""Load configs from files separating by each environment"""
for file_data in source_data.values():
# env name is checked in lower
file_data = {k.lower(): value for k, value in file_data.items()}
# is there a `dynaconf_merge` on top level of file?
file_merge = file_data.get("dynaconf_merge")
# is there a flag disabling dotted lookup on file?
file_dotted_lookup = file_data.get("dynaconf_dotted_lookup")
for env in build_env_list(self.obj, self.env):
env = env.lower() # lower for better comparison
try:
data = file_data[env] or {}
except KeyError:
if silent:
continue
raise
if not data:
continue
self._set_data_to_obj(
data,
f"{self.identifier}_{env}",
file_merge,
key,
file_dotted_lookup=file_dotted_lookup,
)
def _set_data_to_obj(
self,
data,
identifier,
file_merge=None,
key=False,
file_dotted_lookup=None,
):
"""Calls settings.set to add the keys"""
# data 1st level keys should be transformed to upper case.
data = {upperfy(k): v for k, v in data.items()}
if key:
key = upperfy(key)
if self.obj.filter_strategy:
data = self.obj.filter_strategy(data)
# is there a `dynaconf_merge` inside an `[env]`?
file_merge = file_merge or data.pop("DYNACONF_MERGE", False)
# If not passed or passed as None,
# look for inner [env] value, or default settings.
if file_dotted_lookup is None:
file_dotted_lookup = data.pop(
"DYNACONF_DOTTED_LOOKUP",
self.obj.get("DOTTED_LOOKUP_FOR_DYNACONF"),
)
if not key:
self.obj.update(
data,
loader_identifier=identifier,
merge=file_merge,
dotted_lookup=file_dotted_lookup,
)
elif key in data:
self.obj.set(
key,
data.get(key),
loader_identifier=identifier,
merge=file_merge,
dotted_lookup=file_dotted_lookup,
)
| mit | 899e423f6ffa11462b933aa4185d570e | 34.15873 | 79 | 0.52295 | 4.303756 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor_src/ruamel/yaml/emitter.py | 1 | 64039 | # coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
import sys
from .error import YAMLError, YAMLStreamError
from .events import * # NOQA
# fmt: off
from .compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, check_anchorname_char
# fmt: on
if False: # MYPY
from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA
from .compat import StreamType # NOQA
__all__ = ['Emitter', 'EmitterError']
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(
self,
scalar,
empty,
multiline,
allow_flow_plain,
allow_block_plain,
allow_single_quoted,
allow_double_quoted,
allow_block,
):
# type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Indents(object):
# replacement for the list based stack of None/int
def __init__(self):
# type: () -> None
self.values = [] # type: List[Tuple[int, bool]]
def append(self, val, seq):
# type: (Any, Any) -> None
self.values.append((val, seq))
def pop(self):
# type: () -> Any
return self.values.pop()[0]
def last_seq(self):
# type: () -> bool
# return the seq(uence) value for the element added before the last one
# in increase_indent()
try:
return self.values[-2][1]
except IndexError:
return False
def seq_flow_align(self, seq_indent, column):
# type: (int, int) -> int
# extra spaces because of dash
if len(self.values) < 2 or not self.values[-1][1]:
return 0
# -1 for the dash
base = self.values[-1][0] if self.values[-1][0] is not None else 0
return base + seq_indent - column - 1
def __len__(self):
# type: () -> int
return len(self.values)
class Emitter(object):
# fmt: off
DEFAULT_TAG_PREFIXES = {
u'!': u'!',
u'tag:yaml.org,2002:': u'!!',
}
# fmt: on
MAX_SIMPLE_KEY_LENGTH = 128
def __init__(
self,
stream,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
brace_single_entry_mapping_in_flow_sequence=None,
dumper=None,
):
# type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA
self.dumper = dumper
if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
self.dumper._emitter = self
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None # type: Optional[Text]
self.allow_space_break = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = [] # type: List[Any]
self.state = self.expect_stream_start # type: Any
# Current event and the event queue.
self.events = [] # type: List[Any]
self.event = None # type: Any
# The current indentation level and the stack of previous indents.
self.indents = Indents()
self.indent = None # type: Optional[int]
# flow_context is an expanding/shrinking list consisting of '{' and '['
# for each unclosed flow context. If empty list that means block context
self.flow_context = [] # type: List[Text]
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
self.compact_seq_seq = True # dash after dash
self.compact_seq_map = True # key after dash
# self.compact_ms = False # dash after key, only when excplicit key with ?
self.no_newline = None # type: Optional[bool] # set if directly after `- `
# Whether the document requires an explicit document end indicator
self.open_ended = False
# colon handling
self.colon = u':'
self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
# single entry mappings in flow sequence
self.brace_single_entry_mapping_in_flow_sequence = (
brace_single_entry_mapping_in_flow_sequence
) # NOQA
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
# set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
self.unicode_supplementary = sys.maxunicode > 0xffff
self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
self.top_level_colon_align = top_level_colon_align
self.best_sequence_indent = 2
self.requested_indent = indent # specific for literal zero indent
if indent and 1 < indent < 10:
self.best_sequence_indent = indent
self.best_map_indent = self.best_sequence_indent
# if self.best_sequence_indent < self.sequence_dash_offset + 1:
# self.best_sequence_indent = self.sequence_dash_offset + 1
self.best_width = 80
if width and width > self.best_sequence_indent * 2:
self.best_width = width
self.best_line_break = u'\n' # type: Any
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None # type: Any
# Prepared anchor and tag.
self.prepared_anchor = None # type: Any
self.prepared_tag = None # type: Any
# Scalar analysis and style.
self.analysis = None # type: Any
self.style = None # type: Any
self.scalar_after_indicator = True # write a scalar on the same line as `---`
@property
def stream(self):
# type: () -> Any
try:
return self._stream
except AttributeError:
raise YAMLStreamError('output stream needs to specified')
@stream.setter
def stream(self, val):
# type: (Any) -> None
if val is None:
return
if not hasattr(val, 'write'):
raise YAMLStreamError('stream argument needs to have a write() method')
self._stream = val
@property
def serializer(self):
# type: () -> Any
try:
if hasattr(self.dumper, 'typ'):
return self.dumper.serializer
return self.dumper._serializer
except AttributeError:
return self # cyaml
@property
def flow_level(self):
# type: () -> int
return len(self.flow_context)
def dispose(self):
# type: () -> None
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
# type: (Any) -> None
if dbg(DBG_EVENT):
nprint(event)
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
# type: () -> bool
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
# type: (int) -> bool
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return len(self.events) < count + 1
def increase_indent(self, flow=False, sequence=None, indentless=False):
# type: (bool, Optional[bool], bool) -> None
self.indents.append(self.indent, sequence)
if self.indent is None: # top level
if flow:
# self.indent = self.best_sequence_indent if self.indents.last_seq() else \
# self.best_map_indent
# self.indent = self.best_sequence_indent
self.indent = self.requested_indent
else:
self.indent = 0
elif not indentless:
self.indent += (
self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
)
# if self.indents.last_seq():
# if self.indent == 0: # top level block sequence
# self.indent = self.best_sequence_indent - self.sequence_dash_offset
# else:
# self.indent += self.best_sequence_indent
# else:
# self.indent += self.best_map_indent
# States.
# Stream handlers.
def expect_stream_start(self):
# type: () -> None
if isinstance(self.event, StreamStartEvent):
if PY2:
if self.event.encoding and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
else:
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError('expected StreamStartEvent, but got %s' % (self.event,))
def expect_nothing(self):
# type: () -> None
raise EmitterError('expected nothing, but got %s' % (self.event,))
# Document handlers.
def expect_first_document_start(self):
# type: () -> Any
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
# type: (bool) -> None
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (
first
and not self.event.explicit
and not self.canonical
and not self.event.version
and not self.event.tags
and not self.check_empty_document()
)
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError('expected DocumentStartEvent, but got %s' % (self.event,))
def expect_document_end(self):
# type: () -> None
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError('expected DocumentEndEvent, but got %s' % (self.event,))
def expect_document_root(self):
# type: () -> None
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
# type: (bool, bool, bool, bool) -> None
self.root_context = root
self.sequence_context = sequence # not used in PyYAML
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
if (
self.process_anchor(u'&')
and isinstance(self.event, ScalarEvent)
and self.sequence_context
):
self.sequence_context = False
if (
root
and isinstance(self.event, ScalarEvent)
and not self.scalar_after_indicator
):
self.write_indent()
self.process_tag()
if isinstance(self.event, ScalarEvent):
# nprint('@', self.indention, self.no_newline, self.column)
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
# nprint('@', self.indention, self.no_newline, self.column)
i2, n2 = self.indention, self.no_newline # NOQA
if self.event.comment:
if self.event.flow_style is False and self.event.comment:
if self.write_post_comment(self.event):
self.indention = False
self.no_newline = True
if self.write_pre_comment(self.event):
self.indention = i2
self.no_newline = not self.indention
if (
self.flow_level
or self.canonical
or self.event.flow_style
or self.check_empty_sequence()
):
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.event.flow_style is False and self.event.comment:
self.write_post_comment(self.event)
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
if (
self.flow_level
or self.canonical
or self.event.flow_style
or self.check_empty_mapping()
):
self.expect_flow_mapping(single=self.event.nr_items == 1)
else:
self.expect_block_mapping()
else:
raise EmitterError('expected NodeEvent, but got %s' % (self.event,))
def expect_alias(self):
# type: () -> None
if self.event.anchor is None:
raise EmitterError('anchor is not specified for alias')
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
# type: () -> None
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
# type: () -> None
ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
self.write_indicator(u' ' * ind + u'[', True, whitespace=True)
self.increase_indent(flow=True, sequence=True)
self.flow_context.append('[')
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
# type: () -> None
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == '['
self.write_indicator(u']', False)
if self.event.comment and self.event.comment[0]:
# eol comment on empty flow sequence
self.write_post_comment(self.event)
elif self.flow_level == 0:
self.write_line_break()
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
# type: () -> None
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == '['
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
if self.event.comment and self.event.comment[0]:
# eol comment on flow sequence
self.write_post_comment(self.event)
else:
self.no_newline = False
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self, single=False):
# type: (Optional[bool]) -> None
ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
map_init = u'{'
if (
single
and self.flow_level
and self.flow_context[-1] == '['
and not self.canonical
and not self.brace_single_entry_mapping_in_flow_sequence
):
# single map item with flow context, no curly braces necessary
map_init = u''
self.write_indicator(u' ' * ind + map_init, True, whitespace=True)
self.flow_context.append(map_init)
self.increase_indent(flow=True, sequence=False)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
# type: () -> None
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == '{' # empty flow mapping
self.write_indicator(u'}', False)
if self.event.comment and self.event.comment[0]:
# eol comment on empty mapping
self.write_post_comment(self.event)
elif self.flow_level == 0:
self.write_line_break()
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
# type: () -> None
if isinstance(self.event, MappingEndEvent):
# if self.event.comment and self.event.comment[1]:
# self.write_pre_comment(self.event)
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped in [u'{', u'']
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
if popped != u'':
self.write_indicator(u'}', False)
if self.event.comment and self.event.comment[0]:
# eol comment on flow mapping, never reached on empty mappings
self.write_post_comment(self.event)
else:
self.no_newline = False
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
# type: () -> None
self.write_indicator(self.prefixed_colon, False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
# type: () -> None
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(self.prefixed_colon, True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
# type: () -> None
if self.mapping_context:
indentless = not self.indention
else:
indentless = False
if not self.compact_seq_seq and self.column != 0:
self.write_line_break()
self.increase_indent(flow=False, sequence=True, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
# type: () -> Any
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
# type: (bool) -> None
if not first and isinstance(self.event, SequenceEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments on a block list e.g. empty line
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
self.no_newline = False
else:
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
nonl = self.no_newline if self.column == 0 else False
self.write_indent()
ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
self.write_indicator(u' ' * ind + u'-', True, indention=True)
if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
self.no_newline = True
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
# type: () -> None
if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
self.write_line_break()
self.increase_indent(flow=False, sequence=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
# type: () -> None
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
# type: (Any) -> None
if not first and isinstance(self.event, MappingEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.write_indent()
if self.check_simple_key():
if not isinstance(
self.event, (SequenceStartEvent, MappingStartEvent)
): # sequence keys
try:
if self.event.style == '?':
self.write_indicator(u'?', True, indention=True)
except AttributeError: # aliases have no style
pass
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
if isinstance(self.event, AliasEvent):
self.stream.write(u' ')
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
# type: () -> None
if getattr(self.event, 'style', None) != '?':
# prefix = u''
if self.indent == 0 and self.top_level_colon_align is not None:
# write non-prefixed colon
c = u' ' * (self.top_level_colon_align - self.column) + self.colon
else:
c = self.prefixed_colon
self.write_indicator(c, False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
# type: () -> None
self.write_indent()
self.write_indicator(self.prefixed_colon, True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
# type: () -> bool
return (
isinstance(self.event, SequenceStartEvent)
and bool(self.events)
and isinstance(self.events[0], SequenceEndEvent)
)
def check_empty_mapping(self):
# type: () -> bool
return (
isinstance(self.event, MappingStartEvent)
and bool(self.events)
and isinstance(self.events[0], MappingEndEvent)
)
def check_empty_document(self):
# type: () -> bool
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (
isinstance(event, ScalarEvent)
and event.anchor is None
and event.tag is None
and event.implicit
and event.value == ""
)
def check_simple_key(self):
# type: () -> bool
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if (
isinstance(self.event, (ScalarEvent, CollectionStartEvent))
and self.event.tag is not None
):
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return length < self.MAX_SIMPLE_KEY_LENGTH and (
isinstance(self.event, AliasEvent)
or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True)
or (
isinstance(self.event, ScalarEvent)
# if there is an explicit style for an empty string, it is a simple key
and not (self.analysis.empty and self.style and self.style not in '\'"')
and not self.analysis.multiline
)
or self.check_empty_sequence()
or self.check_empty_mapping()
)
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
# type: (Any) -> bool
if self.event.anchor is None:
self.prepared_anchor = None
return False
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator + self.prepared_anchor, True)
# issue 288
self.no_newline = False
self.prepared_anchor = None
return True
def process_tag(self):
# type: () -> None
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if (not self.canonical or tag is None) and (
(self.style == "" and self.event.implicit[0])
or (self.style != "" and self.event.implicit[1])
):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError('tag is not specified')
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
if (
self.sequence_context
and not self.flow_level
and isinstance(self.event, ScalarEvent)
):
self.no_newline = True
self.prepared_tag = None
def choose_scalar_style(self):
# type: () -> Any
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if (not self.event.style or self.event.style == '?') and (
self.event.implicit[0] or not self.event.implicit[2]
):
if not (
self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
) and (
self.flow_level
and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain)
):
return ""
self.analysis.allow_block = True
if self.event.style and self.event.style in '|>':
if (
not self.flow_level
and not self.simple_key_context
and self.analysis.allow_block
):
return self.event.style
if not self.event.style and self.analysis.allow_double_quoted:
if "'" in self.event.value or '\n' in self.event.value:
return '"'
if not self.event.style or self.event.style == "'":
if self.analysis.allow_single_quoted and not (
self.simple_key_context and self.analysis.multiline
):
return "'"
return '"'
def process_scalar(self):
# type: () -> None
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = not self.simple_key_context
# if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
# nprint('xx', self.sequence_context, self.flow_level)
if self.sequence_context and not self.flow_level:
self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == "'":
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar, self.event.comment)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
if self.event.comment:
self.write_post_comment(self.event)
# Analyzers.
def prepare_version(self, version):
# type: (Any) -> Any
major, minor = version
if major != 1:
raise EmitterError('unsupported YAML version: %d.%d' % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
# type: (Any) -> Any
if not handle:
raise EmitterError('tag handle must not be empty')
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r" % (utf8(handle)))
for ch in handle[1:-1]:
if not (
u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in u'-_'
):
raise EmitterError(
'invalid character %r in the tag handle: %r' % (utf8(ch), utf8(handle))
)
return handle
def prepare_tag_prefix(self, prefix):
# type: (Any) -> Any
if not prefix:
raise EmitterError('tag prefix must not be empty')
chunks = [] # type: List[Any]
start = end = 0
if prefix[0] == u'!':
end = 1
ch_set = u"-;/?:@&=+$,_.~*'()[]"
if self.dumper:
version = getattr(self.dumper, 'version', (1, 2))
if version is None or version >= (1, 2):
ch_set += u'#'
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in ch_set:
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end + 1
data = utf8(ch)
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return "".join(chunks)
def prepare_tag(self, tag):
# type: (Any) -> Any
if not tag:
raise EmitterError('tag must not be empty')
if tag == u'!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix) :]
chunks = [] # type: List[Any]
start = end = 0
ch_set = u"-;/?:@&=+$,_.~*'()[]"
if self.dumper:
version = getattr(self.dumper, 'version', (1, 2))
if version is None or version >= (1, 2):
ch_set += u'#'
while end < len(suffix):
ch = suffix[end]
if (
u'0' <= ch <= u'9'
or u'A' <= ch <= u'Z'
or u'a' <= ch <= u'z'
or ch in ch_set
or (ch == u'!' and handle != u'!')
):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end + 1
data = utf8(ch)
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = "".join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
# type: (Any) -> Any
if not anchor:
raise EmitterError('anchor must not be empty')
for ch in anchor:
if not check_anchorname_char(ch):
raise EmitterError(
'invalid character %r in the anchor: %r' % (utf8(ch), utf8(anchor))
)
return anchor
def analyze_scalar(self, scalar):
# type: (Any) -> Any
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(
scalar=scalar,
empty=True,
multiline=False,
allow_flow_plain=False,
allow_block_plain=True,
allow_single_quoted=True,
allow_double_quoted=True,
allow_block=False,
)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029'
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:': # ToDo
if self.serializer.use_version == (1, 1):
flow_indicators = True
elif len(scalar) == 1: # single character
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859
flow_indicators = True
if ch == u'?' and self.serializer.use_version == (1, 1):
flow_indicators = True
if ch == u':':
if followed_by_whitespace:
flow_indicators = True
block_indicators = True
if ch == u'#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (
ch == u'\x85'
or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'
or (self.unicode_supplementary and (u'\U00010000' <= ch <= u'\U0010FFFF'))
) and ch != u'\uFEFF':
# unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == u' ':
if index == 0:
leading_space = True
if index == len(scalar) - 1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in u'\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar) - 1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = ch in u'\0 \t\r\n\x85\u2028\u2029'
followed_by_whitespace = (
index + 1 >= len(scalar) or scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029'
)
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if leading_space or leading_break or trailing_space or trailing_break:
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if special_characters:
allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False
elif space_break:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
if not self.allow_space_break:
allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(
scalar=scalar,
empty=False,
multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block,
)
# Writers.
def flush_stream(self):
# type: () -> None
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# type: () -> None
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\uFEFF'.encode(self.encoding))
def write_stream_end(self):
# type: () -> None
self.flush_stream()
def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False):
# type: (Any, Any, bool, bool) -> None
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' ' + indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
# type: () -> None
indent = self.indent or 0
if (
not self.indention
or self.column > indent
or (self.column == indent and not self.whitespace)
):
if bool(self.no_newline):
self.no_newline = False
else:
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' ' * (indent - self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
# type: (Any) -> None
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
# type: (Any) -> None
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
# type: (Any, Any) -> None
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
self.write_indicator(u"'", True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if (
start + 1 == end
and self.column > self.best_width
and split
and start != 0
and end != len(text)
):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u"'":
if start < end:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u"'":
data = u"''"
self.column += 2
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = ch == u' '
breaks = ch in u'\n\x85\u2028\u2029'
end += 1
self.write_indicator(u"'", False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'"': u'"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if (
ch is None
or ch in u'"\\\x85\u2028\u2029\uFEFF'
or not (
u'\x20' <= ch <= u'\x7E'
or (
self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD')
)
)
):
if start < end:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\' + self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if (
0 < end < len(text) - 1
and (ch == u' ' or start >= end)
and self.column + (end - start) > self.best_width
and split
):
data = text[start:end] + u'\\'
if start < end:
start = end
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_block_hints(self, text):
# type: (Any) -> Any
indent = 0
indicator = u''
hints = u''
if text:
if text[0] in u' \n\x85\u2028\u2029':
indent = self.best_sequence_indent
hints += text_type(indent)
elif self.root_context:
for end in ['\n---', '\n...']:
pos = 0
while True:
pos = text.find(end, pos)
if pos == -1:
break
try:
if text[pos + 4] in ' \r\n':
break
except IndexError:
pass
pos += 1
if pos > -1:
break
if pos > 0:
indent = self.best_sequence_indent
if text[-1] not in u'\n\x85\u2028\u2029':
indicator = u'-'
elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
indicator = u'+'
hints += indicator
return hints, indent, indicator
def write_folded(self, text):
# type: (Any) -> None
hints, _indent, _indicator = self.determine_block_hints(text)
self.write_indicator(u'>' + hints, True)
if _indicator == u'+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029\a':
if (
not leading_space
and ch is not None
and ch != u' '
and text[start] == u'\n'
):
self.write_line_break()
leading_space = ch == u' '
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start + 1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029\a':
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
if ch == u'\a':
if end < (len(text) - 1) and not text[end + 2].isspace():
self.write_line_break()
self.write_indent()
end += 2 # \a and the space that is inserted on the fold
else:
raise EmitterError('unexcpected fold indicator \\a before space')
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = ch in u'\n\x85\u2028\u2029'
spaces = ch == u' '
end += 1
def write_literal(self, text, comment=None):
# type: (Any, Any) -> None
hints, _indent, _indicator = self.determine_block_hints(text)
self.write_indicator(u'|' + hints, True)
try:
comment = comment[1][0]
if comment:
self.stream.write(comment)
except (TypeError, IndexError):
pass
if _indicator == u'+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
if self.root_context:
idnx = self.indent if self.indent is not None else 0
self.stream.write(u' ' * (_indent + idnx))
else:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = ch in u'\n\x85\u2028\u2029'
end += 1
def write_plain(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
else:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start + 1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029': # type: ignore
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
try:
self.stream.write(data)
except: # NOQA
sys.stdout.write(repr(data) + '\n')
raise
start = end
if ch is not None:
spaces = ch == u' '
breaks = ch in u'\n\x85\u2028\u2029'
end += 1
def write_comment(self, comment, pre=False):
# type: (Any, bool) -> None
value = comment.value
# nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
if not pre and value[-1] == '\n':
value = value[:-1]
try:
# get original column position
col = comment.start_mark.column
if comment.value and comment.value.startswith('\n'):
# never inject extra spaces if the comment starts with a newline
# and not a real comment (e.g. if you have an empty line following a key-value
col = self.column
elif col < self.column + 1:
ValueError
except ValueError:
col = self.column + 1
# nprint('post_comment', self.line, self.column, value)
try:
# at least one space if the current column >= the start column of the comment
# but not at the start of a line
nr_spaces = col - self.column
if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n':
nr_spaces = 1
value = ' ' * nr_spaces + value
try:
if bool(self.encoding):
value = value.encode(self.encoding)
except UnicodeDecodeError:
pass
self.stream.write(value)
except TypeError:
raise
if not pre:
self.write_line_break()
def write_pre_comment(self, event):
# type: (Any) -> bool
comments = event.comment[1]
if comments is None:
return False
try:
start_events = (MappingStartEvent, SequenceStartEvent)
for comment in comments:
if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
continue
if self.column != 0:
self.write_line_break()
self.write_comment(comment, pre=True)
if isinstance(event, start_events):
comment.pre_done = True
except TypeError:
sys.stdout.write('eventtt {} {}'.format(type(event), event))
raise
return True
def write_post_comment(self, event):
# type: (Any) -> bool
if self.event.comment[0] is None:
return False
comment = event.comment[0]
self.write_comment(comment)
return True
| mit | 3ffbd7951dc10cfa39a97331d748f37d | 36.937796 | 157 | 0.504177 | 4.2469 | false | false | false | false |
koduj-z-klasa/python101 | docs/pygame/life/code2.py | 1 | 5501 | # coding=utf-8
import pygame
import pygame.locals
class Board(object):
"""
Plansza do gry. Odpowiada za rysowanie okna gry.
"""
def __init__(self, width, height):
"""
Konstruktor planszy do gry. Przygotowuje okienko gry.
:param width: szerokość w pikselach
:param height: wysokość w pikselach
"""
self.surface = pygame.display.set_mode((width, height), 0, 32)
pygame.display.set_caption('Game of life')
def draw(self, *args):
"""
Rysuje okno gry
:param args: lista obiektów do narysowania
"""
background = (0, 0, 0)
self.surface.fill(background)
for drawable in args:
drawable.draw_on(self.surface)
# dopiero w tym miejscu następuje fatyczne rysowanie
# w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane
pygame.display.update()
class GameOfLife(object):
"""
Łączy wszystkie elementy gry w całość.
"""
def __init__(self, width, height, cell_size=10):
"""
Przygotowanie ustawień gry
:param width: szerokość planszy mierzona liczbą komórek
:param height: wysokość planszy mierzona liczbą komórek
:param cell_size: bok komórki w pikselach
"""
pygame.init()
self.board = Board(width * cell_size, height * cell_size)
# zegar którego użyjemy do kontrolowania szybkości rysowania
# kolejnych klatek gry
self.fps_clock = pygame.time.Clock()
self.population = Population(width, height, cell_size)
def run(self):
"""
Główna pętla gry
"""
while not self.handle_events():
# działaj w pętli do momentu otrzymania sygnału do wyjścia
self.board.draw(
self.population,
)
self.fps_clock.tick(15)
def handle_events(self):
"""
Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką
:return True jeżeli pygame przekazał zdarzenie wyjścia z gry
"""
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
return True
from pygame.locals import MOUSEMOTION, MOUSEBUTTONDOWN
if event.type == MOUSEMOTION or event.type == MOUSEBUTTONDOWN:
self.population.handle_mouse()
# magiczne liczby używane do określenia czy komórka jest żywa
DEAD = 0
ALIVE = 1
class Population(object):
"""
Populacja komórek
"""
def __init__(self, width, height, cell_size=10):
"""
Przygotowuje ustawienia populacji
:param width: szerokość planszy mierzona liczbą komórek
:param height: wysokość planszy mierzona liczbą komórek
:param cell_size: bok komórki w pikselach
"""
self.box_size = cell_size
self.height = height
self.width = width
self.generation = self.reset_generation()
def reset_generation(self):
"""
Tworzy i zwraca macierz pustej populacji
"""
# w pętli wypełnij listę kolumnami
# które także w pętli zostają wypełnione wartością 0 (DEAD)
return [[DEAD for y in range(self.height)] for x in range(self.width)]
def handle_mouse(self):
# pobierz stan guzików myszki z wykorzystaniem funcji pygame
buttons = pygame.mouse.get_pressed()
if not any(buttons):
# ignoruj zdarzenie jeśli żaden z guzików nie jest wciśnięty
return
# dodaj żywą komórką jeśli wciśnięty jest pierwszy guzik myszki
# będziemy mogli nie tylko dodawać żywe komórki ale także je usuwać
alive = True if buttons[0] else False
# pobierz pozycję kursora na planszy mierzoną w pikselach
x, y = pygame.mouse.get_pos()
# przeliczamy współrzędne komórki z pikseli na współrzędne komórki w macierz
# gracz może kliknąć w kwadracie o szerokości box_size by wybrać komórkę
x /= self.box_size
y /= self.box_size
# ustaw stan komórki na macierzy
self.generation[int(x)][int(y)] = ALIVE if alive else DEAD
def draw_on(self, surface):
"""
Rysuje komórki na planszy
"""
for x, y in self.alive_cells():
size = (self.box_size, self.box_size)
position = (x * self.box_size, y * self.box_size)
color = (255, 255, 255)
thickness = 1
pygame.draw.rect(surface, color, pygame.locals.Rect(position, size), thickness)
def alive_cells(self):
"""
Generator zwracający współrzędne żywych komórek.
"""
for x in range(len(self.generation)):
column = self.generation[x]
for y in range(len(column)):
if column[y] == ALIVE:
# jeśli komórka jest żywa zwrócimy jej współrzędne
yield x, y
# Ta część powinna być zawsze na końcu modułu (ten plik jest modułem)
# chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane
if __name__ == "__main__":
game = GameOfLife(80, 40)
game.run()
| mit | e696a753fb70aac63ab28042af854324 | 30.77439 | 91 | 0.586419 | 2.834916 | false | false | false | false |
koduj-z-klasa/python101 | docs/mcpi/glife/mcpi-glife05.py | 1 | 7044 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import sys
import os
from random import randint
from time import sleep
import mcpi.minecraft as minecraft # import modułu minecraft
import mcpi.block as block # import modułu block
os.environ["USERNAME"] = "Steve" # nazwa użytkownika
os.environ["COMPUTERNAME"] = "mykomp" # nazwa komputera
mc = minecraft.Minecraft.create("192.168.1.10") # połączenie z MCPi
class GraWZycie(object):
"""
Główna klasa gry, łączy wszystkie elementy.
"""
def __init__(self, mc, szer, wys, ile=40):
"""
Przygotowanie ustawień gry
:param szer: szerokość planszy mierzona liczbą komórek
:param wys: wysokość planszy mierzona liczbą komórek
"""
self.mc = mc
mc.postToChat('Gra o zycie')
self.szer = szer
self.wys = wys
self.populacja = Populacja(mc, szer, wys) # instancja klasy Populacja
if ile:
self.populacja.losuj(ile)
else:
self.populacja.wczytaj()
def uruchom(self):
"""
Główna pętla gry
"""
i = 0
while True: # działaj w pętli do momentu otrzymania sygnału do wyjścia
print("Generacja: " + str(i))
self.plac(0, 0, 0, self.szer, self.wys) # narysuj pole gry
self.populacja.rysuj()
self.populacja.nast_generacja()
i += 1
sleep(1)
def plac(self, x, y, z, szer=20, wys=10):
"""
Funkcja tworzy plac gry
"""
podloga = block.STONE
wypelniacz = block.AIR
granica = block.OBSIDIAN
# granica, podłoże, czyszczenie
self.mc.setBlocks(
x - 5, y, z - 5,
x + szer + 5, y + max(szer, wys), z + wys + 5, wypelniacz)
self.mc.setBlocks(
x - 1, y - 1, z - 1, x + szer + 1, y - 1, z + wys + 1, granica)
self.mc.setBlocks(x, y - 1, z, x + szer, y - 1, z + wys, podloga)
self.mc.setBlocks(
x, y, z, x + szer, y + max(szer, wys), z + wys, wypelniacz)
# magiczne liczby używane do określenia czy komórka jest żywa
DEAD = 0
ALIVE = 1
BLOK_ALIVE = 35 # block.WOOL
class Populacja(object):
"""
Populacja komórek
"""
def __init__(self, mc, ilex, iley):
"""
Przygotowuje ustawienia populacji
:param mc: obiekt Minecrafta
:param ilex: rozmiar x macierzy komórek (wiersze)
:param iley: rozmiar y macierzy komórek (kolumny)
"""
self.mc = mc
self.iley = iley
self.ilex = ilex
self.generacja = self.reset_generacja()
def reset_generacja(self):
"""
Tworzy i zwraca macierz pustej populacji
"""
# wyrażenie listowe tworzy x kolumn o y komórkach
# wypełnionych wartością 0 (DEAD)
return [[DEAD for y in xrange(self.iley)] for x in xrange(self.ilex)]
def losuj(self, ile=50):
"""
Losowo wypełnia macierz żywymi komórkami, czyli wartością 1 (ALIVE)
"""
for i in range(ile):
x = randint(0, self.ilex - 1)
y = randint(0, self.iley - 1)
self.generacja[x][y] = ALIVE
print self.generacja
def wczytaj(self):
"""
Funkcja wczytuje populację komórek z MC RPi
"""
ileKom = 0
print "Proszę czekać, aktuzalizacja macierzy..."
for x in range(self.ilex):
for z in range(self.iley):
blok = self.mc.getBlock(x, 0, z)
if blok != block.AIR:
self.generacja[x][z] = ALIVE
ileKom += 1
print self.generacja
print "Żywych:", str(ileKom)
sleep(3)
def rysuj(self):
"""
Rysuje komórki na planszy, czyli umieszcza odpowiednie bloki
"""
print "Rysowanie macierzy..."
for x, z in self.zywe_komorki():
podtyp = randint(0, 15)
mc.setBlock(x, 0, z, BLOK_ALIVE, podtyp)
def zywe_komorki(self):
"""
Generator zwracający współrzędne żywych komórek.
"""
for x in range(len(self.generacja)):
kolumna = self.generacja[x]
for y in range(len(kolumna)):
if kolumna[y] == ALIVE:
yield x, y # zwracamy współrzędne, jeśli komórka jest żywa
def sasiedzi(self, x, y):
"""
Generator zwracający wszystkich okolicznych sąsiadów
"""
for nx in range(x - 1, x + 2):
for ny in range(y - 1, y + 2):
if nx == x and ny == y:
continue # pomiń współrzędne centrum
if nx >= self.ilex:
# sąsiad poza końcem planszy, bierzemy pierwszego w danym
# rzędzie
nx = 0
elif nx < 0:
# sąsiad przed początkiem planszy, bierzemy ostatniego w
# danym rzędzie
nx = self.ilex - 1
if ny >= self.iley:
# sąsiad poza końcem planszy, bierzemy pierwszego w danej
# kolumnie
ny = 0
elif ny < 0:
# sąsiad przed początkiem planszy, bierzemy ostatniego w
# danej kolumnie
ny = self.iley - 1
# zwróć stan komórki w podanych współrzędnych
yield self.generacja[nx][ny]
def nast_generacja(self):
"""
Generuje następną generację populacji komórek
"""
print "Obliczanie generacji..."
nast_gen = self.reset_generacja()
for x in range(len(self.generacja)):
kolumna = self.generacja[x]
for y in range(len(kolumna)):
# pobieramy wartości sąsiadów
# dla żywej komórki dostaniemy wartość 1 (ALIVE)
# dla martwej otrzymamy wartość 0 (DEAD)
# zwykła suma pozwala nam określić liczbę żywych sąsiadów
iluS = sum(self.sasiedzi(x, y))
if iluS == 3:
# rozmnażamy się
nast_gen[x][y] = ALIVE
elif iluS == 2:
# przechodzi do kolejnej generacji bez zmian
nast_gen[x][y] = kolumna[y]
else:
# za dużo lub za mało sąsiadów by przeżyć
nast_gen[x][y] = DEAD
# nowa generacja staje się aktualną generacją
self.generacja = nast_gen
if __name__ == "__main__":
gra = GraWZycie(mc, 30, 20, 0) # instancja klasy GraWZycie
mc.player.setPos(20, 1, 10)
gra.uruchom() # wywołanie metody uruchom()
| mit | 09c488caff63d64e00dab05fbe63c4cd | 31.790244 | 79 | 0.511766 | 2.750993 | false | false | false | false |
koduj-z-klasa/python101 | docs/podstawy/przyklady/06_slownik_csv.py | 1 | 2677 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os # moduł udostępniający funkcję isfile()
import csv # moduł do obsługi formatu csv
slownik = {} # pusty słownik
sFile = "slownik.csv" # nazwa pliku zawierającego wyrazy i ich tłumaczenia
def otworz(plik):
if os.path.isfile(sFile): # czy istnieje plik słownika?
with open(sFile, newline='') as plikcsv: # otwórz plik do odczytu
tresc = csv.reader(plikcsv)
for linia in tresc: # przeglądamy kolejne linie
slownik[linia[0]] = linia[1:]
return len(slownik) # zwracamy ilość elementów w słowniku
def zapisz(slownik):
# otwieramy plik do zapisu, istniejący plik zostanie nadpisany(!)
with open(sFile, "w", newline='') as plikcsv:
tresc = csv.writer(plikcsv)
for wobcy in slownik:
lista = slownik[wobcy]
lista.insert(0, wobcy)
tresc.writerow(lista)
def oczysc(str):
str = str.strip() # usuń początkowe lub końcowe białe znaki
str = str.lower() # zmień na małe litery
return str
def main(args):
print("""Podaj dane w formacie:
wyraz obcy: znaczenie1, znaczenie2
Aby zakończyć wprowadzanie danych, podaj 0.
""")
# wobce = set() # pusty zbiór wyrazów obcych
# zmienna oznaczająca, że użytkownik uzupełnił lub zmienił słownik
nowy = False
ileWyrazow = otworz(sFile)
print("Wpisów w bazie:", ileWyrazow)
# główna pętla programu
while True:
dane = input("Podaj dane: ")
t = dane.split(":")
wobcy = t[0].strip().lower() # robimy to samo, co funkcja oczysc()
if wobcy == 'koniec':
break
elif dane.count(":") == 1: # sprawdzamy poprawność danych
if wobcy in slownik:
print("Wyraz", wobcy, " i jego znaczenia są już w słowniku.")
op = input("Zastąpić wpis (t/n)? ")
# czy wyrazu nie ma w słowniku? a może chcemy go zastąpić?
if wobcy not in slownik or op == "t":
znaczenia = t[1].split(",") # znaczenia zapisujemy w liście
znaczenia = list(map(oczysc, znaczenia)) # oczyszczamy listę
slownik[wobcy] = znaczenia
nowy = True
else:
print("Błędny format!")
if nowy:
zapisz(slownik)
print("=" * 50)
print("{0: <15}{1: <40}".format("Wyraz obcy", "Znaczenia"))
print("=" * 50)
for wobcy in slownik:
print("{0: <15}{1: <40}".format(wobcy, ",".join(slownik[wobcy])))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| mit | 9497913348a04aaf3dbdb7c9bbb560a3 | 31.395062 | 77 | 0.587652 | 2.418433 | false | false | false | false |
koduj-z-klasa/python101 | docs/pygame/tictactoe_str/tictactoe_str2.py | 1 | 1654 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import pygame
import sys
import random
from pygame.locals import * # udostępnienie nazw metod z locals
# inicjacja modułu pygame
pygame.init()
# przygotowanie powierzchni do rysowania, czyli inicjacja okna gry
OKNOGRY = pygame.display.set_mode((150, 150), 0, 32)
# tytuł okna gry
pygame.display.set_caption('Kółko i krzyżyk')
# lista opisująca stan pola gry, 0 - pole puste, 1 - gracz, 2 - komputer
POLE_GRY = [0, 0, 0,
0, 0, 0,
0, 0, 0]
RUCH = 1 # do kogo należy ruch: 1 – gracz, 2 – komputer
WYGRANY = 0 # wynik gry: 0 - nikt, 1 - gracz, 2 - komputer, 3 - remis
WYGRANA = False
# rysowanie planszy gry, czyli linii oddzielających pola
def rysuj_plansze():
for i in range(0, 3): # x
for j in range(0, 3): # y
# argumenty: powierzchnia, kolor, x,y, w,h, grubość linii
pygame.draw.rect(OKNOGRY, (255, 255, 255),
Rect((j * 50, i * 50), (50, 50)), 1)
# narysuj kółka
def rysuj_pole_gry():
for i in range(0, 3):
for j in range(0, 3):
pole = i * 3 + j # zmienna pole przyjmuje wartości od 0-8
# x i y określają środki kolejnych pól,
# a więc wartości: 25,25, 25,75 25,125 75,25 itd.
x = j * 50 + 25
y = i * 50 + 25
if POLE_GRY[pole] == 1:
# rysuj kółko gracza
pygame.draw.circle(OKNOGRY, (0, 0, 255), (x, y), 10)
elif POLE_GRY[pole] == 2:
# rysuj kółko komputera
pygame.draw.circle(OKNOGRY, (255, 0, 0), (x, y), 10)
| mit | 5158d6eb3c8412b5fefac906f0a789ae | 29.679245 | 72 | 0.559656 | 2.373723 | false | false | false | false |
koduj-z-klasa/python101 | docs/podstawy/przyklady/ocenyfun.py | 1 | 1478 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Moduł ocenyfun zawiera funkcje wykorzystywane w pliku 05_oceny_03.py
"""
import math # zaimportuj moduł matematyczny
def drukuj(co, kom="Sekwencja zawiera: "):
print(kom)
for i in co:
print(i, end=" ")
def srednia(oceny):
suma = sum(oceny)
return suma / float(len(oceny))
def mediana(oceny):
"""
Jeżeli ilość ocen jest parzysta, medianą jest średnia arytmetyczna
dwóch środkowych ocen. Jesli ilość jest nieparzysta mediana równa
się elementowi środkowemu ouporządkowanej rosnąco listy ocen.
"""
oceny.sort()
if len(oceny) % 2 == 0: # parzysta ilość ocen
half = int(len(oceny) / 2)
# można tak:
# return float(oceny[half-1]+oceny[half]) / 2.0
# albo tak:
return float(sum(oceny[half - 1:half + 1])) / 2.0
else: # nieparzysta ilość ocen
return oceny[len(oceny) / 2]
def wariancja(oceny, srednia):
"""
Wariancja to suma kwadratów różnicy każdej oceny i średniej
podzielona przez ilość ocen:
sigma = (o1-s)+(o2-s)+...+(on-s) / n, gdzie:
o1, o2, ..., on - kolejne oceny,
s - średnia ocen,
n - liczba ocen.
"""
sigma = 0.0
for ocena in oceny:
sigma += (ocena - srednia)**2
return sigma / len(oceny)
def odchylenie(oceny, srednia): # pierwiastek kwadratowy z wariancji
w = wariancja(oceny, srednia)
return math.sqrt(w)
| mit | b640d518f7548f82db21a671ff221165 | 24.875 | 72 | 0.618357 | 2.202128 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/constants.py | 1 | 1259 | # pragma: no cover
from __future__ import annotations
INI_EXTENSIONS = (".ini", ".conf", ".properties")
TOML_EXTENSIONS = (".toml", ".tml")
YAML_EXTENSIONS = (".yaml", ".yml")
JSON_EXTENSIONS = (".json",)
ALL_EXTENSIONS = (
INI_EXTENSIONS + TOML_EXTENSIONS + YAML_EXTENSIONS + JSON_EXTENSIONS
) # noqa
EXTERNAL_LOADERS = {
"ENV": "dynaconf.loaders.env_loader",
"VAULT": "dynaconf.loaders.vault_loader",
"REDIS": "dynaconf.loaders.redis_loader",
}
DJANGO_PATCH = """
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://www.dynaconf.com/django/
import dynaconf # noqa
settings = dynaconf.DjangoDynaconf(__name__) # noqa
# HERE ENDS DYNACONF EXTENSION LOAD (No more code below this line)
"""
INSTANCE_TEMPLATE = """
from dynaconf import Dynaconf
settings = Dynaconf(
envvar_prefix="DYNACONF",
settings_files={settings_files},
)
# `envvar_prefix` = export envvars with `export DYNACONF_FOO=bar`.
# `settings_files` = Load these files in the order.
"""
EXTS = (
"py",
"toml",
"tml",
"yaml",
"yml",
"ini",
"conf",
"properties",
"json",
)
DEFAULT_SETTINGS_FILES = [f"settings.{ext}" for ext in EXTS] + [
f".secrets.{ext}" for ext in EXTS
]
| mit | 3c4f1fe35170de87eed90ba6380df44c | 23.211538 | 78 | 0.651311 | 2.941589 | false | false | false | false |
koduj-z-klasa/python101 | docs/mcpi/algorytmy/mcpi-lpi02.py | 1 | 2565 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import random
from time import sleep
import mcpi.minecraft as minecraft # import modułu minecraft
import mcpi.block as block # import modułu block
import local.minecraftstuff as mcstuff
os.environ["USERNAME"] = "Steve" # nazwa użytkownika
os.environ["COMPUTERNAME"] = "mykomp" # nazwa komputera
mc = minecraft.Minecraft.create("192.168.1.10") # połączenie z serwerem
def plac(x, y, z, roz=10, gracz=False):
"""Funkcja wypełnia sześcienny obszar od podanej pozycji
powietrzem i opcjonalnie umieszcza gracza w środku.
Parametry: x, y, z - współrzędne pozycji początkowej,
roz - rozmiar wypełnianej przestrzeni,
gracz - czy umieścić gracza w środku
Wymaga: globalnych obiektów mc i block.
"""
podloga = block.STONE
wypelniacz = block.AIR
# kamienna podłoże
mc.setBlocks(x, y - 1, z, x + roz, y - 1, z + roz, podloga)
# czyszczenie
mc.setBlocks(x, y, z, x + roz, y + roz, z + roz, wypelniacz)
# umieść gracza w środku
if gracz:
mc.player.setPos(x + roz / 2, y + roz / 2, z + roz / 2)
def model(promien, x, y, z):
"""
Fukcja buduje obrys kwadratu, którego środek to punkt x, y, z
oraz koło wpisane w ten kwadrat
"""
mcfig = mcstuff.MinecraftDrawing(mc)
obrys = block.SANDSTONE
wypelniacz = block.AIR
mc.setBlocks(x - promien, y, z - promien, x +
promien, y, z + promien, obrys)
mc.setBlocks(x - promien + 1, y, z - promien + 1, x +
promien - 1, y, z + promien - 1, wypelniacz)
mcfig.drawHorizontalCircle(0, 0, 0, promien, block.GRASS)
def liczbaPi():
r = float(raw_input("Podaj promień koła: "))
model(r, 0, 0, 0)
# pobieramy ilość punktów w kwadracie
ileKw = int(raw_input("Podaj ilość losowanych punktów: "))
ileKo = 0 # ilość punktów w kole
blok = block.SAND
for i in range(ileKw):
x = round(random.uniform(-r, r))
y = round(random.uniform(-r, r))
print x, y
if abs(x)**2 + abs(y)**2 <= r**2:
ileKo += 1
# umieść blok w MC Pi
mc.setBlock(x, 10, y, blok)
mc.postToChat("W kole = " + str(ileKo) + " W Kwadracie = " + str(ileKw))
pi = 4 * ileKo / float(ileKw)
mc.postToChat("Pi w przyblizeniu: {:.10f}".format(pi))
def main():
mc.postToChat("LiczbaPi") # wysłanie komunikatu do mc
plac(-50, 0, -50, 100)
mc.player.setPos(20, 20, 0)
liczbaPi()
return 0
if __name__ == '__main__':
main()
| mit | 8d7ce47ae4d36184d7370631647fb2a9 | 28.034483 | 76 | 0.614014 | 2.319559 | false | false | false | false |
koduj-z-klasa/python101 | docs/mcpi/funkcje/mcpi-funkcje05.py | 1 | 5725 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np # import biblioteki do obliczeń naukowych
import matplotlib.pyplot as plt # import biblioteki do tworzenia wykresów
import mcpi.minecraft as minecraft # import modułu minecraft
import mcpi.block as block # import modułu block
os.environ["USERNAME"] = "Steve" # wpisz dowolną nazwę użytkownika
os.environ["COMPUTERNAME"] = "mykomp" # wpisz dowolną nazwę komputera
mc = minecraft.Minecraft.create("192.168.1.10") # połaczenie z mc
def plac(x, y, z, roz=10, gracz=False):
"""
Funkcja tworzy podłoże i wypełnia sześcienny obszar od podanej pozycji,
opcjonalnie umieszcza gracza w środku.
Parametry: x, y, z - współrzędne pozycji początkowej,
roz - rozmiar wypełnianej przestrzeni,
gracz - czy umieścić gracza w środku
Wymaga: globalnych obiektów mc i block.
"""
podloga = block.STONE
wypelniacz = block.AIR
# podloga i czyszczenie
mc.setBlocks(x, y - 1, z, x + roz, y - 1, z + roz, podloga)
mc.setBlocks(x, y, z, x + roz, y + roz, z + roz, wypelniacz)
# umieść gracza w środku
if gracz:
mc.player.setPos(x + roz / 2, y + roz / 2, z + roz / 2)
def uklad(blok=block.OBSIDIAN):
"""
Funkcja rysuje układ współrzędnych
"""
for i in range(-80, 81, 2):
mc.setBlock(i, -1, 0, blok)
mc.setBlock(0, -1, i, blok)
mc.setBlock(0, i, 0, blok)
def rysuj(x, y, z, blok=block.IRON_BLOCK):
"""
Funkcja wizualizuje wykres funkcji, umieszczając bloki w pionie/poziomie
w punktach wyznaczonych przez pary elementów list x, y lub x, z
"""
czylista = True if len(y) > 1 else False
for i in range(len(x)):
if czylista:
print(x[i], y[i])
mc.setBlock(x[i], y[i], z[0], blok)
else:
print(x[i], z[i])
mc.setBlock(x[i], y[0], z[i], blok)
def rysuj_linie(x, y, z, blok=block.IRON_BLOCK):
"""
Funkcja wizualizuje wykres funkcji, umieszczając bloki w pionie/poziomie
w punktach wyznaczonych przez pary elementów list x, y lub x, z
przy użyciu metody drawLine()
"""
import local.minecraftstuff as mcstuff
mcfig = mcstuff.MinecraftDrawing(mc)
czylista = True if len(y) > 1 else False
for i in range(len(x) - 1):
x1 = int(x[i])
x2 = int(x[i + 1])
if czylista:
y1 = int(y[i])
y2 = int(y[i + 1])
print (x1, y1, z[0], x2, y2, z[0])
mcfig.drawLine(x1, y1, z[0], x2, y2, z[0], blok)
else:
z1 = int(z[i])
z2 = int(z[i + 1])
print (x1, y[0], z1, x2, y[0], z2)
mcfig.drawLine(x1, y[0], z1, x2, y[0], z2, blok)
def wykres(x, y, tytul="Wykres funkcji", *extra):
"""
Funkcja wizualizuje wykres funkcji, której argumenty zawiera lista x
a wartości lista y i ew. dodatkowe listy w parametrze *extra
"""
if len(extra):
plt.plot(x, y, extra[0], extra[1]) # dwa wykresy na raz
else:
plt.plot(x, y)
plt.title(tytul)
# plt.xlabel(podpis)
plt.grid(True)
plt.show()
def fun1(blok=block.IRON_BLOCK):
"""
Funkcja f(x) = a*x + b
"""
a = int(raw_input('Podaj współczynnik a: '))
b = int(raw_input('Podaj współczynnik b: '))
x = range(-10, 11) # lista argumentów x = <-10;10> z krokiem 1
y = [a * i + b for i in x] # wyrażenie listowe
print x, "\n", y
wykres(x, y, "f(x) = a*x + b")
rysuj_linie(x, y, [1], blok)
def fun2(blok=block.REDSTONE_ORE):
"""
Wykres funkcji f(x), gdzie x = <-1;2> z krokiem 0.15, przy czym
f(x) = x/(x+2) dla x >= 1
f(x) = x*x/3 dla x < 1 i x > 0
f(x) = x/(-3) dla x <= 0
"""
x = np.arange(-1, 2.15, 0.15) # lista argumentów x
y = [] # lista wartości f(x)
for i in x:
if i <= 0:
y.append(i / -3)
elif i < 1:
y.append(i ** 2 / 3)
else:
y.append(i / (i + 2))
wykres(x, y, "Funkcja mieszana")
x = [round(i * 20, 2) for i in x]
y = [round(i * 20, 2) for i in y]
print x, "\n", y
rysuj(x, y, [1], blok)
def fun3(blok=block.LAPIS_LAZULI_BLOCK):
"""
Funkcja f(x) = log2(x)
"""
x = np.arange(0.1, 41, 1) # lista argumentów x
y = [np.log2(i) for i in x]
y = [round(i, 2) * 2 for i in y]
print x, "\n", y
wykres(x, y, "Funkcja logarytmiczna")
rysuj(x, y, [1], blok)
def fkw(x, a=0.3, b=0.1, c=0):
return a * x**2 + b * x + c
def fkwadratowa():
"""
Funkcja przygotowuje dziedzinę funkcji kwadratowej
oraz dwie przeciwdziedziny, druga z odwróconym znakiem. Następnie
buduje ich wykresy w poziomie i w pionie.
"""
while True:
lewy = float(raw_input("Podaj lewy kraniec przedziału: "))
prawy = float(raw_input("Podaj prawy kraniec przedziału: "))
if lewy * prawy < 1 and lewy <= prawy:
break
print lewy, prawy
# x = np.arange(lewy, prawy, 0.2)
x = np.linspace(lewy, prawy, 60, True)
x = [round(i, 2) for i in x]
y1 = [fkw(i) for i in x]
y1 = [round(i, 2) for i in y1]
y2 = [-fkw(i) for i in x]
y2 = [round(i, 2) for i in y2]
print x, "\n", y1, "\n", y2
wykres(x, y1, "Funkcja kwadratowa", x, y2)
rysuj_linie(x, [1], y1, block.GRASS)
rysuj(x, [1], y2, block.SAND)
rysuj(x, y1, [1], block.WOOL)
rysuj_linie(x, y2, [1], block.IRON_BLOCK)
def main():
mc.postToChat("Funkcje w Minecrafcie") # wysłanie komunikatu do mc
plac(-80, -20, -80, 160)
mc.player.setPos(-15, 10, -15)
uklad(block.OBSIDIAN)
fkwadratowa()
return 0
if __name__ == '__main__':
main()
| mit | 776f3144e66d953a4d516e8c738982e5 | 28.541667 | 76 | 0.566467 | 2.228684 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor_src/click/decorators.py | 1 | 11109 | import inspect
import sys
from functools import update_wrapper
from .core import Argument
from .core import Command
from .core import Group
from .core import Option
from .globals import get_current_context
from .utils import echo
def pass_context(f):
"""Marks a callback as wanting to receive the current context
object as first argument.
"""
def new_func(*args, **kwargs):
return f(get_current_context(), *args, **kwargs)
return update_wrapper(new_func, f)
def pass_obj(f):
"""Similar to :func:`pass_context`, but only pass the object on the
context onwards (:attr:`Context.obj`). This is useful if that object
represents the state of a nested system.
"""
def new_func(*args, **kwargs):
return f(get_current_context().obj, *args, **kwargs)
return update_wrapper(new_func, f)
def make_pass_decorator(object_type, ensure=False):
"""Given an object type this creates a decorator that will work
similar to :func:`pass_obj` but instead of passing the object of the
current context, it will find the innermost context of type
:func:`object_type`.
This generates a decorator that works roughly like this::
from functools import update_wrapper
def decorator(f):
@pass_context
def new_func(ctx, *args, **kwargs):
obj = ctx.find_object(object_type)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
:param object_type: the type of the object to pass.
:param ensure: if set to `True`, a new object will be created and
remembered on the context if it's not there yet.
"""
def decorator(f):
def new_func(*args, **kwargs):
ctx = get_current_context()
if ensure:
obj = ctx.ensure_object(object_type)
else:
obj = ctx.find_object(object_type)
if obj is None:
raise RuntimeError(
"Managed to invoke callback without a context"
f" object of type {object_type.__name__!r}"
" existing."
)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
def _make_command(f, name, attrs, cls):
if isinstance(f, Command):
raise TypeError("Attempted to convert a callback into a command twice.")
try:
params = f.__click_params__
params.reverse()
del f.__click_params__
except AttributeError:
params = []
help = attrs.get("help")
if help is None:
help = inspect.getdoc(f)
if isinstance(help, bytes):
help = help.decode("utf-8")
else:
help = inspect.cleandoc(help)
attrs["help"] = help
return cls(
name=name or f.__name__.lower().replace("_", "-"),
callback=f,
params=params,
**attrs,
)
def command(name=None, cls=None, **attrs):
r"""Creates a new :class:`Command` and uses the decorated function as
callback. This will also automatically attach all decorated
:func:`option`\s and :func:`argument`\s as parameters to the command.
The name of the command defaults to the name of the function with
underscores replaced by dashes. If you want to change that, you can
pass the intended name as the first argument.
All keyword arguments are forwarded to the underlying command class.
Once decorated the function turns into a :class:`Command` instance
that can be invoked as a command line utility or be attached to a
command :class:`Group`.
:param name: the name of the command. This defaults to the function
name with underscores replaced by dashes.
:param cls: the command class to instantiate. This defaults to
:class:`Command`.
"""
if cls is None:
cls = Command
def decorator(f):
cmd = _make_command(f, name, attrs, cls)
cmd.__doc__ = f.__doc__
return cmd
return decorator
def group(name=None, **attrs):
"""Creates a new :class:`Group` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Group`.
"""
attrs.setdefault("cls", Group)
return command(name, **attrs)
def _param_memo(f, param):
if isinstance(f, Command):
f.params.append(param)
else:
if not hasattr(f, "__click_params__"):
f.__click_params__ = []
f.__click_params__.append(param)
def argument(*param_decls, **attrs):
"""Attaches an argument to the command. All positional arguments are
passed as parameter declarations to :class:`Argument`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Argument` instance manually
and attaching it to the :attr:`Command.params` list.
:param cls: the argument class to instantiate. This defaults to
:class:`Argument`.
"""
def decorator(f):
ArgumentClass = attrs.pop("cls", Argument)
_param_memo(f, ArgumentClass(param_decls, **attrs))
return f
return decorator
def option(*param_decls, **attrs):
"""Attaches an option to the command. All positional arguments are
passed as parameter declarations to :class:`Option`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Option` instance manually
and attaching it to the :attr:`Command.params` list.
:param cls: the option class to instantiate. This defaults to
:class:`Option`.
"""
def decorator(f):
# Issue 926, copy attrs, so pre-defined options can re-use the same cls=
option_attrs = attrs.copy()
if "help" in option_attrs:
option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
OptionClass = option_attrs.pop("cls", Option)
_param_memo(f, OptionClass(param_decls, **option_attrs))
return f
return decorator
def confirmation_option(*param_decls, **attrs):
"""Shortcut for confirmation prompts that can be ignored by passing
``--yes`` as parameter.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
def callback(ctx, param, value):
if not value:
ctx.abort()
@click.command()
@click.option('--yes', is_flag=True, callback=callback,
expose_value=False, prompt='Do you want to continue?')
def dropdb():
pass
"""
def decorator(f):
def callback(ctx, param, value):
if not value:
ctx.abort()
attrs.setdefault("is_flag", True)
attrs.setdefault("callback", callback)
attrs.setdefault("expose_value", False)
attrs.setdefault("prompt", "Do you want to continue?")
attrs.setdefault("help", "Confirm the action without prompting.")
return option(*(param_decls or ("--yes",)), **attrs)(f)
return decorator
def password_option(*param_decls, **attrs):
"""Shortcut for password prompts.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
@click.command()
@click.option('--password', prompt=True, confirmation_prompt=True,
hide_input=True)
def changeadmin(password):
pass
"""
def decorator(f):
attrs.setdefault("prompt", True)
attrs.setdefault("confirmation_prompt", True)
attrs.setdefault("hide_input", True)
return option(*(param_decls or ("--password",)), **attrs)(f)
return decorator
def version_option(version=None, *param_decls, **attrs):
"""Adds a ``--version`` option which immediately ends the program
printing out the version number. This is implemented as an eager
option that prints the version and exits the program in the callback.
:param version: the version number to show. If not provided Click
attempts an auto discovery via setuptools.
:param prog_name: the name of the program (defaults to autodetection)
:param message: custom message to show instead of the default
(``'%(prog)s, version %(version)s'``)
:param others: everything else is forwarded to :func:`option`.
"""
if version is None:
if hasattr(sys, "_getframe"):
module = sys._getframe(1).f_globals.get("__name__")
else:
module = ""
def decorator(f):
prog_name = attrs.pop("prog_name", None)
message = attrs.pop("message", "%(prog)s, version %(version)s")
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
return
prog = prog_name
if prog is None:
prog = ctx.find_root().info_name
ver = version
if ver is None:
try:
import pkg_resources
except ImportError:
pass
else:
for dist in pkg_resources.working_set:
scripts = dist.get_entry_map().get("console_scripts") or {}
for entry_point in scripts.values():
if entry_point.module_name == module:
ver = dist.version
break
if ver is None:
raise RuntimeError("Could not determine version")
echo(message % {"prog": prog, "version": ver}, color=ctx.color)
ctx.exit()
attrs.setdefault("is_flag", True)
attrs.setdefault("expose_value", False)
attrs.setdefault("is_eager", True)
attrs.setdefault("help", "Show the version and exit.")
attrs["callback"] = callback
return option(*(param_decls or ("--version",)), **attrs)(f)
return decorator
def help_option(*param_decls, **attrs):
"""Adds a ``--help`` option which immediately ends the program
printing out the help page. This is usually unnecessary to add as
this is added by default to all commands unless suppressed.
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault("is_flag", True)
attrs.setdefault("expose_value", False)
attrs.setdefault("help", "Show this message and exit.")
attrs.setdefault("is_eager", True)
attrs["callback"] = callback
return option(*(param_decls or ("--help",)), **attrs)(f)
return decorator
| mit | 89d307ed9b4cbfd351a10f2e5bad573c | 32.561934 | 83 | 0.603565 | 4.361602 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor_src/click/_bashcomplete.py | 1 | 12216 | import copy
import os
import re
from collections import abc
from .core import Argument
from .core import MultiCommand
from .core import Option
from .parser import split_arg_string
from .types import Choice
from .utils import echo
WORDBREAK = "="
# Note, only BASH version 4.4 and later have the nosort option.
COMPLETION_SCRIPT_BASH = """
%(complete_func)s() {
local IFS=$'\n'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
%(autocomplete_var)s=complete $1 ) )
return 0
}
%(complete_func)setup() {
local COMPLETION_OPTIONS=""
local BASH_VERSION_ARR=(${BASH_VERSION//./ })
# Only BASH version 4.4 and later have the nosort option.
if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \
&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
COMPLETION_OPTIONS="-o nosort"
fi
complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
}
%(complete_func)setup
"""
COMPLETION_SCRIPT_ZSH = """
#compdef %(script_names)s
%(complete_func)s() {
local -a completions
local -a completions_with_descriptions
local -a response
(( ! $+commands[%(script_names)s] )) && return 1
response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
COMP_CWORD=$((CURRENT-1)) \\
%(autocomplete_var)s=\"complete_zsh\" \\
%(script_names)s )}")
for key descr in ${(kv)response}; do
if [[ "$descr" == "_" ]]; then
completions+=("$key")
else
completions_with_descriptions+=("$key":"$descr")
fi
done
if [ -n "$completions_with_descriptions" ]; then
_describe -V unsorted completions_with_descriptions -U
fi
if [ -n "$completions" ]; then
compadd -U -V unsorted -a completions
fi
compstate[insert]="automenu"
}
compdef %(complete_func)s %(script_names)s
"""
COMPLETION_SCRIPT_FISH = (
"complete --no-files --command %(script_names)s --arguments"
' "(env %(autocomplete_var)s=complete_fish'
" COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)"
' %(script_names)s)"'
)
_completion_scripts = {
"bash": COMPLETION_SCRIPT_BASH,
"zsh": COMPLETION_SCRIPT_ZSH,
"fish": COMPLETION_SCRIPT_FISH,
}
_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]")
def get_completion_script(prog_name, complete_var, shell):
cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_"))
script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH)
return (
script
% {
"complete_func": f"_{cf_name}_completion",
"script_names": prog_name,
"autocomplete_var": complete_var,
}
).strip() + ";"
def resolve_ctx(cli, prog_name, args):
"""Parse into a hierarchy of contexts. Contexts are connected
through the parent variable.
:param cli: command definition
:param prog_name: the program that is running
:param args: full list of args
:return: the final context/command parsed
"""
ctx = cli.make_context(prog_name, args, resilient_parsing=True)
args = ctx.protected_args + ctx.args
while args:
if isinstance(ctx.command, MultiCommand):
if not ctx.command.chain:
cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
ctx = cmd.make_context(
cmd_name, args, parent=ctx, resilient_parsing=True
)
args = ctx.protected_args + ctx.args
else:
# Walk chained subcommand contexts saving the last one.
while args:
cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True,
)
args = sub_ctx.args
ctx = sub_ctx
args = sub_ctx.protected_args + sub_ctx.args
else:
break
return ctx
def start_of_option(param_str):
"""
:param param_str: param_str to check
:return: whether or not this is the start of an option declaration
(i.e. starts "-" or "--")
"""
return param_str and param_str[:1] == "-"
def is_incomplete_option(all_args, cmd_param):
"""
:param all_args: the full original list of args supplied
:param cmd_param: the current command parameter
:return: whether or not the last option declaration (i.e. starts
"-" or "--") is incomplete and corresponds to this cmd_param. In
other words whether this cmd_param option can still accept
values
"""
if not isinstance(cmd_param, Option):
return False
if cmd_param.is_flag:
return False
last_option = None
for index, arg_str in enumerate(
reversed([arg for arg in all_args if arg != WORDBREAK])
):
if index + 1 > cmd_param.nargs:
break
if start_of_option(arg_str):
last_option = arg_str
return True if last_option and last_option in cmd_param.opts else False
def is_incomplete_argument(current_params, cmd_param):
"""
:param current_params: the current params and values for this
argument as already entered
:param cmd_param: the current command parameter
:return: whether or not the last argument is incomplete and
corresponds to this cmd_param. In other words whether or not the
this cmd_param argument can still accept values
"""
if not isinstance(cmd_param, Argument):
return False
current_param_values = current_params[cmd_param.name]
if current_param_values is None:
return True
if cmd_param.nargs == -1:
return True
if (
isinstance(current_param_values, abc.Iterable)
and cmd_param.nargs > 1
and len(current_param_values) < cmd_param.nargs
):
return True
return False
def get_user_autocompletions(ctx, args, incomplete, cmd_param):
"""
:param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param
"""
results = []
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [
(c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete)
]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(
ctx=ctx, args=args, incomplete=incomplete
)
results = [
c if isinstance(c, tuple) else (c, None) for c in dynamic_completions
]
return results
def get_visible_commands_starting_with(ctx, starts_with):
"""
:param ctx: context associated with the parsed command
:starts_with: string that visible commands must start with.
:return: all visible (not hidden) commands that start with starts_with.
"""
for c in ctx.command.list_commands(ctx):
if c.startswith(starts_with):
command = ctx.command.get_command(ctx, c)
if not command.hidden:
yield command
def add_subcommand_completions(ctx, incomplete, completions_out):
# Add subcommand completions.
if isinstance(ctx.command, MultiCommand):
completions_out.extend(
[
(c.name, c.get_short_help_str())
for c in get_visible_commands_starting_with(ctx, incomplete)
]
)
# Walk up the context list and add any other completion
# possibilities from chained commands
while ctx.parent is not None:
ctx = ctx.parent
if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
remaining_commands = [
c
for c in get_visible_commands_starting_with(ctx, incomplete)
if c.name not in ctx.protected_args
]
completions_out.extend(
[(c.name, c.get_short_help_str()) for c in remaining_commands]
)
def get_choices(cli, prog_name, args, incomplete):
"""
:param cli: command definition
:param prog_name: the program that is running
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:return: all the possible completions for the incomplete
"""
all_args = copy.deepcopy(args)
ctx = resolve_ctx(cli, prog_name, args)
if ctx is None:
return []
has_double_dash = "--" in all_args
# In newer versions of bash long opts with '='s are partitioned, but
# it's easier to parse without the '='
if start_of_option(incomplete) and WORDBREAK in incomplete:
partition_incomplete = incomplete.partition(WORDBREAK)
all_args.append(partition_incomplete[0])
incomplete = partition_incomplete[2]
elif incomplete == WORDBREAK:
incomplete = ""
completions = []
if not has_double_dash and start_of_option(incomplete):
# completions for partial options
for param in ctx.command.params:
if isinstance(param, Option) and not param.hidden:
param_opts = [
param_opt
for param_opt in param.opts + param.secondary_opts
if param_opt not in all_args or param.multiple
]
completions.extend(
[(o, param.help) for o in param_opts if o.startswith(incomplete)]
)
return completions
# completion for option values from user supplied values
for param in ctx.command.params:
if is_incomplete_option(all_args, param):
return get_user_autocompletions(ctx, all_args, incomplete, param)
# completion for argument values from user supplied values
for param in ctx.command.params:
if is_incomplete_argument(ctx.params, param):
return get_user_autocompletions(ctx, all_args, incomplete, param)
add_subcommand_completions(ctx, incomplete, completions)
# Sort before returning so that proper ordering can be enforced in custom types.
return sorted(completions)
def do_complete(cli, prog_name, include_descriptions):
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
for item in get_choices(cli, prog_name, args, incomplete):
echo(item[0])
if include_descriptions:
# ZSH has trouble dealing with empty array parameters when
# returned from commands, use '_' to indicate no description
# is present.
echo(item[1] if item[1] else "_")
return True
def do_complete_fish(cli, prog_name):
cwords = split_arg_string(os.environ["COMP_WORDS"])
incomplete = os.environ["COMP_CWORD"]
args = cwords[1:]
for item in get_choices(cli, prog_name, args, incomplete):
if item[1]:
echo(f"{item[0]}\t{item[1]}")
else:
echo(item[0])
return True
def bashcomplete(cli, prog_name, complete_var, complete_instr):
if "_" in complete_instr:
command, shell = complete_instr.split("_", 1)
else:
command = complete_instr
shell = "bash"
if command == "source":
echo(get_completion_script(prog_name, complete_var, shell))
return True
elif command == "complete":
if shell == "fish":
return do_complete_fish(cli, prog_name)
elif shell in {"bash", "zsh"}:
return do_complete(cli, prog_name, shell == "zsh")
return False
| mit | 10647f3d7f29d465e6abf4010b39273f | 31.927224 | 86 | 0.600606 | 3.973975 | false | false | false | false |
koduj-z-klasa/python101 | docs/podstawy/przyklady/05_oceny.py | 1 | 2058 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# importujemy funkcje z modułu ocenyfun zapisanego w pliku ocenyfun.py
from ocenyfun import drukuj, srednia, mediana, odchylenie
def main(args):
przedmioty = set(['polski', 'angielski']) # definicja zbioru
drukuj(przedmioty, "Lista przedmiotów zawiera: ")
print("\nAby przerwać wprowadzanie przedmiotów, naciśnij Enter.")
while True:
przedmiot = input("Podaj nazwę przedmiotu: ")
if len(przedmiot):
if przedmiot in przedmioty: # czy przedmiot jest w zbiorze?
print("Ten przedmiot już mamy :-)")
przedmioty.add(przedmiot) # dodaj przedmiot do zbioru
else:
drukuj(przedmioty, "\nTwoje przedmioty: ")
przedmiot = input("\nZ którego przedmiotu wprowadzisz oceny? ")
# jeżeli przedmiotu nie ma w zbiorze
if przedmiot not in przedmioty:
print("Brak takiego przedmiotu, możesz go dodać.")
else:
break # wyjście z pętli
oceny = [] # pusta lista ocen
ocena = None # zmienna sterująca pętlą i do pobierania ocen
print("\nAby przerwać wprowadzanie ocen, podaj 0 (zero).")
while not ocena:
try:
ocena = int(input("Podaj ocenę (1-6): "))
if (ocena > 0 and ocena < 7):
oceny.append(float(ocena))
elif ocena == 0:
break
else:
print("Błędna ocena.")
ocena = None
except ValueError:
print("Błędne dane!")
drukuj(oceny, przedmiot.capitalize() + " - wprowadzone oceny: ")
s = srednia(oceny) # wywołanie funkcji z modułu ocenyfun
m = mediana(oceny) # wywołanie funkcji z modułu ocenyfun
o = odchylenie(oceny, s) # wywołanie funkcji z modułu ocenyfun
print("\nŚrednia: {0:5.2f}".format(s))
print("Mediana: {0:5.2f}\nOdchylenie: {1:5.2f}".format(m, o))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| mit | f3d4a007fab2d96bf1cf512126ebb1f9 | 35.232143 | 75 | 0.593396 | 2.495695 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/utils/boxing.py | 1 | 2970 | from __future__ import annotations
import inspect
from functools import wraps
from dynaconf.utils import recursively_evaluate_lazy_format
from dynaconf.utils import upperfy
from dynaconf.utils.functional import empty
from dynaconf.vendor.box import Box
def evaluate_lazy_format(f):
"""Marks a method on Dynabox instance to
lazily evaluate LazyFormat objects upon access."""
@wraps(f)
def evaluate(dynabox, item, *args, **kwargs):
value = f(dynabox, item, *args, **kwargs)
settings = dynabox._box_config["box_settings"]
if getattr(value, "_dynaconf_lazy_format", None):
dynabox._box_config[
f"raw_{item.lower()}"
] = f"@{value.formatter.token} {value.value}"
return recursively_evaluate_lazy_format(value, settings)
return evaluate
class DynaBox(Box):
"""Specialized Box for dynaconf
it allows items/attrs to be found both in upper or lower case"""
@evaluate_lazy_format
def __getattr__(self, item, *args, **kwargs):
try:
return super().__getattr__(item, *args, **kwargs)
except (AttributeError, KeyError):
n_item = item.lower() if item.isupper() else upperfy(item)
return super().__getattr__(n_item, *args, **kwargs)
@evaluate_lazy_format
def __getitem__(self, item, *args, **kwargs):
try:
return super().__getitem__(item, *args, **kwargs)
except (AttributeError, KeyError):
n_item = item.lower() if item.isupper() else upperfy(item)
return super().__getitem__(n_item, *args, **kwargs)
def __copy__(self):
return self.__class__(
super(Box, self).copy(),
box_settings=self._box_config.get("box_settings"),
)
def copy(self):
return self.__class__(
super(Box, self).copy(),
box_settings=self._box_config.get("box_settings"),
)
def _case_insensitive_get(self, item, default=None):
"""adds a bit of overhead but allows case insensitive get
See issue: #486
"""
lower_self = {k.casefold(): v for k, v in self.items()}
return lower_self.get(item.casefold(), default)
@evaluate_lazy_format
def get(self, item, default=None, *args, **kwargs):
if item not in self: # toggle case
item = item.lower() if item.isupper() else upperfy(item)
value = super().get(item, empty, *args, **kwargs)
if value is empty:
# see Issue: #486
return self._case_insensitive_get(item, default)
return value
def __dir__(self):
keys = list(self.keys())
reserved = [
item[0]
for item in inspect.getmembers(DynaBox)
if not item[0].startswith("__")
]
return (
keys
+ [k.lower() for k in keys]
+ [k.upper() for k in keys]
+ reserved
)
| mit | a5922b5b4c1363c36a87484597ed3104 | 31.282609 | 70 | 0.578451 | 3.882353 | false | false | false | false |
koduj-z-klasa/python101 | docs/mcpi/algorytmy/mcpi-rbrowna03.py | 1 | 5453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np # import biblioteki do obliczeń naukowych
import matplotlib.pyplot as plt # import biblioteki do tworzenia wykresow
from random import randint
from time import sleep
import mcpi.minecraft as minecraft # import modułu minecraft
import mcpi.block as block # import modułu block
os.environ["USERNAME"] = "Steve" # wpisz dowolną nazwę użytkownika
os.environ["COMPUTERNAME"] = "mykomp" # wpisz dowolną nazwę komputera
mc = minecraft.Minecraft.create("192.168.1.10") # połączenie z serwerem
def plac(x, y, z, roz=10, gracz=False):
"""
Funkcja tworzy podłoże i wypełnia sześcienny obszar od podanej pozycji,
opcjonalnie umieszcza gracza w środku.
Parametry: x, y, z - współrzędne pozycji początkowej,
roz - rozmiar wypełnianej przestrzeni,
gracz - czy umieścić gracza w środku
Wymaga: globalnych obiektów mc i block.
"""
podloga = block.SAND
wypelniacz = block.AIR
# podloga i czyszczenie
mc.setBlocks(x, y - 1, z, x + roz, y - 1, z + roz, podloga)
mc.setBlocks(x, y, z, x + roz, y + roz, z + roz, wypelniacz)
# umieść gracza w środku
if gracz:
mc.player.setPos(x + roz / 2, y + roz / 2, z + roz / 2)
def wykres(x, y, tytul="Wykres funkcji", *extra):
"""
Funkcja wizualizuje wykres funkcji, której argumenty zawiera lista x
a wartości lista y i ew. dodatkowe listy w parametrze *extra
"""
if len(extra):
plt.plot(x, y, extra[0], extra[1]) # dwa wykresy na raz
else:
plt.plot(x, y, "o:", color="blue", linewidth="3", alpha=0.8)
plt.title(tytul)
plt.grid(True)
plt.show()
def rysuj(x, y, z, blok=block.IRON_BLOCK):
"""
Funkcja wizualizuje wykres funkcji, umieszczając bloki w pionie/poziomie
w punktach wyznaczonych przez pary elementów list x, y lub x, z
"""
czylista = True if len(y) > 1 else False
for i in range(len(x)):
if czylista:
print(x[i], y[i])
mc.setBlock(x[i], y[i], z[0], blok)
else:
print(x[i], z[i])
mc.setBlock(x[i], y[0], z[i], blok)
def rysuj_linie(x, y, z, blok=block.IRON_BLOCK):
"""
Funkcja wizualizuje wykres funkcji, umieszczając bloki w pionie/poziomie
w punktach wyznaczonych przez pary elementów list x, y lub x, z
przy użyciu metody drawLine()
"""
import local.minecraftstuff as mcstuff
mcfig = mcstuff.MinecraftDrawing(mc)
czylista = True if len(y) > 1 else False
for i in range(len(x) - 1):
x1 = int(x[i])
x2 = int(x[i + 1])
if czylista:
y1 = int(y[i])
y2 = int(y[i + 1])
mc.setBlock(x2, y2, z[0], block.GRASS)
mc.setBlock(x1, y1, z[0], block.GRASS)
mcfig.drawLine(x1, y1, z[0], x2, y2, z[0], blok)
mc.setBlock(x2, y2, z[0], block.GRASS)
mc.setBlock(x1, y1, z[0], block.GRASS)
print (x1, y1, z[0], x2, y2, z[0])
else:
z1 = int(z[i])
z2 = int(z[i + 1])
mc.setBlock(x1, y[0], z1, block.GRASS)
mc.setBlock(x2, y[0], z2, block.GRASS)
mcfig.drawLine(x1, y[0], z1, x2, y[0], z2, blok)
mc.setBlock(x1, y[0], z1, block.GRASS)
mc.setBlock(x2, y[0], z2, block.GRASS)
print (x1, y[0], z1, x2, y[0], z2)
sleep(1) # przerwa na reklamę :-)
mc.setBlock(0, 1, 0, block.OBSIDIAN)
if czylista:
mc.setBlock(x2, y2, z[0], block.OBSIDIAN)
else:
mc.setBlock(x2, y[0], z2, block.OBSIDIAN)
def ruchyBrowna(dane=[]):
if len(dane):
lx, ly = dane # rozpakowanie listy
x = lx[-1] # ostatni element lx
y = ly[-1] # ostatni element ly
else:
n = int(raw_input("Ile ruchów? "))
r = int(raw_input("Krok przesunięcia? "))
x = y = 0
lx = [0] # lista odciętych
ly = [0] # lista rzędnych
for i in range(0, n):
# losujemy kąt i zamieniamy na radiany
rad = float(randint(0, 360)) * np.pi / 180
x = x + r * np.cos(rad) # wylicz współrzędną x
y = y + r * np.sin(rad) # wylicz współrzędną y
x = int(round(x, 2)) # zaokrągl
y = int(round(y, 2)) # zaokrągl
print(x, y)
lx.append(x)
ly.append(y)
# oblicz wektor końcowego przesunięcia
s = np.fabs(np.sqrt(x**2 + y**2))
print "Wektor przesunięcia: {:.2f}".format(s)
wykres(lx, ly, "Ruchy Browna")
rysuj_linie(lx, [1], ly, block.WOOL)
if not len(dane):
zapisz_dane((lx, ly))
def zapisz_dane(dane):
"""Funkcja zapisuje dane w formacie json w pliku"""
import json
plik = open('rbrowna.log', 'w')
json.dump(dane, plik)
plik.close()
def czytaj_dane():
"""Funkcja odczytuje dane w formacie json z pliku"""
import json
dane = []
nazwapliku = raw_input("Podaj nazwę pliku z danymi lub naciśnij ENTER: ")
if os.path.isfile(nazwapliku):
with open(nazwapliku, "r") as plik:
dane = json.load(plik)
else:
print "Podany plik nie istnieje!"
return dane
def main():
mc.postToChat("Ruchy Browna") # wysłanie komunikatu do mc
plac(-80, -20, -80, 160)
plac(-80, 0, -80, 160)
ruchyBrowna(czytaj_dane())
return 0
if __name__ == '__main__':
main()
| mit | 84728bae0f52837093252b05c595c366 | 30.561404 | 77 | 0.577358 | 2.354712 | false | false | false | false |
rochacbruno/dynaconf | example/toml_with_secrets/program.py | 1 | 1331 | from __future__ import annotations
from dynaconf import LazySettings
settings = LazySettings(
environments=True,
ENV_FOR_DYNACONF="example",
ENVVAR_PREFIX_FOR_DYNACONF="PROGRAM",
load_dotenv=True,
settings_files="settings.toml;.secrets.toml",
)
print(settings.USERNAME)
print(settings.SERVER)
print(settings.PASSWORD)
assertions = {
"SERVER": "fromenv.com",
"USERNAME": "admin",
"PASSWORD": "My5up3r53c4et",
}
for key, value in assertions.items():
found = settings.get(key)
assert found == getattr(settings, key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
assertions = {"SERVER": "fromenv.com", "USERNAME": "foo"}
for key, value in assertions.items():
found = settings.from_env("development").get(key)
assert found == getattr(settings.from_env("development"), key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
assertions = {
"SERVER": "fromenv.com",
"USERNAME": "foo",
"PASSWORD": "My5up3r53c4et", # keep=True will keep it from [example] env
}
for key, value in assertions.items():
found = settings.from_env("development", keep=True).get(key)
assert found == getattr(settings.from_env("development", keep=True), key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
| mit | d7a9fd6433388b3bb9215654fc05fce8 | 27.934783 | 77 | 0.666416 | 3.238443 | false | false | false | false |
rochacbruno/dynaconf | dynaconf/vendor_src/ruamel/yaml/events.py | 5 | 3902 | # coding: utf-8
# Abstract classes.
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
def CommentCheck():
# type: () -> None
pass
class Event(object):
__slots__ = 'start_mark', 'end_mark', 'comment'
def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
# type: (Any, Any, Any) -> None
self.start_mark = start_mark
self.end_mark = end_mark
# assert comment is not CommentCheck
if comment is CommentCheck:
comment = None
self.comment = comment
def __repr__(self):
# type: () -> Any
attributes = [
key
for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
if hasattr(self, key)
]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
if self.comment not in [None, CommentCheck]:
arguments += ', comment={!r}'.format(self.comment)
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
__slots__ = ('anchor',)
def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.anchor = anchor
class CollectionStartEvent(NodeEvent):
__slots__ = 'tag', 'implicit', 'flow_style', 'nr_items'
def __init__(
self,
anchor,
tag,
implicit,
start_mark=None,
end_mark=None,
flow_style=None,
comment=None,
nr_items=None,
):
# type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None
NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
self.tag = tag
self.implicit = implicit
self.flow_style = flow_style
self.nr_items = nr_items
class CollectionEndEvent(Event):
__slots__ = ()
# Implementations.
class StreamStartEvent(Event):
__slots__ = ('encoding',)
def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.encoding = encoding
class StreamEndEvent(Event):
__slots__ = ()
class DocumentStartEvent(Event):
__slots__ = 'explicit', 'version', 'tags'
def __init__(
self,
start_mark=None,
end_mark=None,
explicit=None,
version=None,
tags=None,
comment=None,
):
# type: (Any, Any, Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
__slots__ = ('explicit',)
def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.explicit = explicit
class AliasEvent(NodeEvent):
__slots__ = ()
class ScalarEvent(NodeEvent):
__slots__ = 'tag', 'implicit', 'value', 'style'
def __init__(
self,
anchor,
tag,
implicit,
value,
start_mark=None,
end_mark=None,
style=None,
comment=None,
):
# type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
self.tag = tag
self.implicit = implicit
self.value = value
self.style = style
class SequenceStartEvent(CollectionStartEvent):
__slots__ = ()
class SequenceEndEvent(CollectionEndEvent):
__slots__ = ()
class MappingStartEvent(CollectionStartEvent):
__slots__ = ()
class MappingEndEvent(CollectionEndEvent):
__slots__ = ()
| mit | 091309e0638043578fdd78debbd241a8 | 23.853503 | 90 | 0.558688 | 3.660413 | false | false | false | false |
jblance/mpp-solar | mppsolar/helpers.py | 1 | 3001 | #!/usr/bin/env python3
import logging
import importlib
log = logging.getLogger("helpers")
def get_kwargs(kwargs, key, default=None):
if key not in kwargs or not kwargs[key]:
return default
return kwargs[key]
def key_wanted(key, filter=None, excl_filter=None):
# remove any specifically excluded keys
if excl_filter is not None and excl_filter.search(key):
# log.debug(f"key_wanted: key {key} matches excl_filter {excl_filter} so key excluded")
return False
if filter is None:
# log.debug(
# f"key_wanted: No filter and key {key} not excluded by excl_filter {excl_filter} so key wanted"
# )
return True
elif filter.search(key):
# log.debug(
# f"key_wanted: key {key} matches filter {filter} and not excl_filter {excl_filter} so key wanted"
# )
return True
else:
# log.debug(f"key_wanted: key {key} does not match filter {filter} so key excluded")
return False
def get_resp_defn(key, defns):
"""
look for a definition for the supplied key
"""
# print(key, defns)
if not key:
return None
if type(key) is bytes:
try:
key = key.decode("utf-8")
except UnicodeDecodeError:
log.info(f"key decode error for {key}")
for defn in defns:
if key == defn[0]:
# print(key, defn)
return defn
# did not find definition for this key
log.info(f"No defn found for {key} key")
return [key, key, "", ""]
# def get_outputs(output_list):
# """
# Take a comma separated list of output names
# attempt to find and instantiate the corresponding module
# return array of modules
# """
# ops = []
# outputs = output_list.split(",")
# for output in outputs:
# log.info(f"attempting to create output processor: {output}")
# try:
# output_module = importlib.import_module("mppsolar.outputs." + output, ".")
# output_class = getattr(output_module, output)
# ops.append(output_class())
# except ModuleNotFoundError:
# # perhaps raise a Powermon exception here??
# # maybe warn and keep going, only error if no outputs found?
# log.critical(f"No module found for output processor {output}")
# return ops
def get_device_class(device_type=None):
"""
Take a device type string
attempt to find and instantiate the corresponding module
return class if found, otherwise return None
"""
if device_type is None:
return None
device_type = device_type.lower()
try:
device_module = importlib.import_module("mppsolar.devices." + device_type, ".")
except ModuleNotFoundError as e:
# perhaps raise a mppsolar exception here??
log.critical(f"Error loading device {device_type}: {e}")
return None
device_class = getattr(device_module, device_type)
return device_class
| mit | 6c54b804da5ac7b6f96bc075e979028e | 31.619565 | 109 | 0.614795 | 3.793932 | false | false | false | false |
jonathf/chaospy | chaospy/distributions/baseclass/distribution.py | 1 | 33546 | """Abstract baseclass for all distributions."""
import logging
import numpy
import chaospy
from .utils import check_dependencies
class Distribution(object):
"""Baseclass for all probability distributions."""
__array_priority__ = 9000
"""Numpy override variable."""
interpret_as_integer = False
"""
Flag indicating that return value from the methods sample, and inv
should be interpreted as integers instead of floating point.
"""
@property
def stochastic_dependent(self):
"""True if distribution contains stochastically dependent components."""
return any(len(deps) > 1 for deps in self._dependencies)
def __init__(
self,
parameters,
dependencies,
rotation=None,
exclusion=None,
repr_args=None,
):
"""
Distribution initializer.
In addition to assigning some object variables, also checks for
some consistency issues.
Args:
parameters (Optional[Distribution[str, Union[ndarray, Distribution]]]):
Collection of model parameters.
dependencies (Optional[Sequence[Set[int]]]):
Dependency identifiers. One collection for each dimension.
rotation (Optional[Sequence[int]]):
The order of which to resolve dependencies.
exclusion (Optional[Sequence[int]]):
Distributions that has been "taken out of play" and
therefore can not be reused other places in the
dependency hierarchy.
repr_args (Optional[Sequence[str]]):
Positional arguments to place in the object string
representation. The repr output will then be:
`<class name>(<arg1>, <arg2>, ...)`.
Raises:
StochasticallyDependentError:
For dependency structures that can not later be
rectified. This include under-defined
distributions, and inclusion of distributions that
should be exclusion.
"""
assert isinstance(parameters, dict)
self._parameters = parameters
self._dependencies = list(dependencies)
if rotation is None:
rotation = sorted(enumerate(self._dependencies), key=lambda x: len(x[1]))
rotation = [key for key, _ in rotation]
rotation = list(rotation)
assert len(set(rotation)) == len(dependencies)
assert min(rotation) == 0
assert max(rotation) == len(dependencies) - 1
self._rotation = rotation
if exclusion is None:
exclusion = set()
self._exclusion = set(exclusion)
if repr_args is None:
repr_args = (
"{}={}".format(key, self._parameters[key])
for key in sorted(self._parameters)
)
self._repr_args = list(repr_args)
self._mom_cache = {(0,) * len(dependencies): 1.0}
self._ttr_cache = {}
self._indices = {}
self._all_dependencies = {dep for deps in self._dependencies for dep in deps}
if len(self._all_dependencies) < len(dependencies):
raise chaospy.StochasticallyDependentError(
"%s is an under-defined probability distribution." % self
)
for key, param in list(parameters.items()):
if isinstance(param, Distribution):
if self._all_dependencies.intersection(param._exclusion):
raise chaospy.StochasticallyDependentError(
(
"%s contains dependencies that can not also exist "
"other places in the dependency hierarchy"
)
% param
)
self._exclusion.update(param._exclusion)
else:
self._parameters[key] = numpy.asarray(param)
def get_parameters(self, idx, cache, assert_numerical=True):
"""Get distribution parameters."""
del assert_numerical
out = self._parameters.copy()
assert isinstance(cache, dict)
if idx is not None:
assert not isinstance(idx, dict), idx
assert idx == int(idx), idx
assert "idx" not in out
assert "cache" not in out
out["cache"] = cache
out["idx"] = idx
return out
@property
def lower(self):
"""Lower bound for the distribution."""
cache = {}
out = numpy.zeros(len(self))
for idx in self._rotation:
out[idx] = self._get_lower(idx, cache=cache)
return out
def _get_lower(self, idx, cache):
"""In-processes function for getting lower bounds."""
if (idx, self) in cache:
return cache[idx, self][0]
if hasattr(self, "get_lower_parameters"):
parameters = self.get_lower_parameters(idx, cache)
else:
parameters = self.get_parameters(idx, cache, assert_numerical=False)
out = self._lower(**parameters)
assert not isinstance(out, Distribution), (self, out)
out = numpy.atleast_1d(out)
assert out.ndim == 1, (self, out, cache)
cache[idx, self] = (out, None)
return out
def _lower(self, **kwargs): # pragma: no cover
"""Backend lower bound."""
raise chaospy.UnsupportedFeature("lower not supported")
@property
def upper(self):
"""Upper bound for the distribution."""
cache = {}
out = numpy.zeros(len(self))
for idx in self._rotation:
out[idx] = self._get_upper(idx, cache=cache)
return out
def _get_upper(self, idx, cache):
"""In-processes function for getting upper bounds."""
if (idx, self) in cache:
return cache[idx, self][0]
if hasattr(self, "get_upper_parameters"):
parameters = self.get_upper_parameters(idx, cache)
else:
parameters = self.get_parameters(idx, cache, assert_numerical=False)
out = self._upper(**parameters)
assert not isinstance(out, Distribution), (self, out)
out = numpy.atleast_1d(out)
assert out.ndim == 1, (self, out, cache)
cache[idx, self] = (out, None)
size = max([elem[0].size for elem in cache.values()])
assert all([elem[0].size in (1, size) for elem in cache.values()])
return out
def _upper(self, **kwargs): # pragma: no cover
"""Backend upper bound."""
raise chaospy.UnsupportedFeature("lower not supported")
def fwd(self, x_data):
"""
Forward Rosenblatt transformation.
Args:
x_data (numpy.ndarray):
Location for the distribution function. ``x_data.shape`` must
be compatible with distribution shape.
Returns:
(numpy.ndarray):
Evaluated distribution function values, where
``out.shape==x_data.shape``.
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
x_data = numpy.asfarray(x_data)
shape = x_data.shape
x_data = x_data.reshape(len(self), -1)
cache = {}
q_data = numpy.zeros(x_data.shape)
for idx in self._rotation:
q_data[idx] = self._get_fwd(x_data[idx], idx, cache)
indices = (q_data > 1) | (q_data < 0)
if numpy.any(indices): # pragma: no cover
logger.debug(
"%s.fwd: %d/%d outputs out of bounds",
self,
numpy.sum(indices),
len(indices),
)
q_data = numpy.clip(q_data, a_min=0, a_max=1)
q_data = q_data.reshape(shape)
return q_data
def _get_fwd(self, x_data, idx, cache):
"""In-process function for getting cdf-values."""
logger = logging.getLogger(__name__)
assert (idx, self) not in cache, "repeated evaluation"
lower = numpy.broadcast_to(
self._get_lower(idx, cache=cache.copy()), x_data.shape
)
upper = numpy.broadcast_to(
self._get_upper(idx, cache=cache.copy()), x_data.shape
)
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._cdf(x_data, **parameters)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(x_data.shape)
out[:] = ret_val
indices = x_data < lower
if numpy.any(indices):
logger.debug(
"%s.fwd: %d/%d inputs below bounds",
self,
numpy.sum(indices),
len(indices),
)
out = numpy.where(indices, 0, out)
indices = x_data > upper
if numpy.any(indices):
logger.debug(
"%s.fwd: %d/%d inputs above bounds",
self,
numpy.sum(indices),
len(indices),
)
out = numpy.where(indices, 1, out)
assert numpy.all((out >= 0) | (out <= 1))
cache[idx, self] = (x_data, out)
assert out.ndim == 1, (self, out, cache)
return out
def cdf(self, x_data):
"""
Cumulative distribution function.
Note that chaospy only supports cumulative distribution functions for
stochastically independent distributions.
Args:
x_data (numpy.ndarray):
Location for the distribution function. Assumes that
``len(x_data) == len(distribution)``.
Returns:
(numpy.ndarray):
Evaluated distribution function values, where output has shape
``x_data.shape`` in one dimension and ``x_data.shape[1:]`` in
higher dimensions.
"""
check_dependencies(self)
if self.stochastic_dependent:
raise chaospy.StochasticallyDependentError(
"Cumulative distribution does not support dependencies."
)
x_data = numpy.asarray(x_data)
if self.interpret_as_integer:
x_data = x_data + 0.5
q_data = self.fwd(x_data)
if len(self) > 1:
q_data = numpy.prod(q_data, 0)
return q_data
def inv(self, q_data, max_iterations=100, tollerance=1e-5):
"""
Inverse Rosenblatt transformation.
If possible the transformation is done analytically. If not possible,
transformation is approximated using an algorithm that alternates
between Newton-Raphson and binary search.
Args:
q_data (numpy.ndarray):
Probabilities to be inverse. If any values are outside ``[0,
1]``, error will be raised. ``q_data.shape`` must be compatible
with distribution shape.
max_iterations (int):
If approximation is used, this sets the maximum number of
allowed iterations in the Newton-Raphson algorithm.
tollerance (float):
If approximation is used, this set the error tolerance level
required to define a sample as converged.
Returns:
(numpy.ndarray):
Inverted probability values where
``out.shape == q_data.shape``.
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
q_data = numpy.asfarray(q_data)
assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!"
shape = q_data.shape
q_data = q_data.reshape(len(self), -1)
cache = {}
x_data = numpy.zeros(q_data.shape)
for idx in self._rotation:
x_data[idx] = self._get_inv(q_data[idx], idx, cache)
x_data = x_data.reshape(shape)
return x_data
def _get_inv(self, q_data, idx, cache):
"""In-process function for getting ppf-values."""
logger = logging.getLogger(__name__)
assert numpy.all(q_data <= 1) and numpy.all(q_data >= 0)
assert q_data.ndim == 1
if (idx, self) in cache:
return cache[idx, self][0]
lower = numpy.broadcast_to(
self._get_lower(idx, cache=cache.copy()), q_data.shape
)
upper = numpy.broadcast_to(
self._get_upper(idx, cache=cache.copy()), q_data.shape
)
try:
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._ppf(q_data, **parameters)
except chaospy.UnsupportedFeature:
ret_val = chaospy.approximate_inverse(self, idx, q_data, cache=cache)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(q_data.shape)
out[:] = ret_val
indices = out < lower
if numpy.any(indices):
logger.debug(
"%s.inv: %d/%d outputs below bounds",
self,
numpy.sum(indices),
len(indices),
)
out = numpy.where(indices, lower, out)
indices = out > upper
if numpy.any(indices):
logger.debug(
"%s.inv: %d/%d outputs above bounds",
self,
numpy.sum(indices),
len(indices),
)
out = numpy.where(indices, upper, out)
assert out.ndim == 1
cache[idx, self] = (out, q_data)
assert out.ndim == 1, (self, out, cache)
return out
def _ppf(self, xloc, **kwargs):
raise chaospy.UnsupportedFeature("%s: does not support analytical ppf." % self)
def ppf(self, q_data, max_iterations=100, tollerance=1e-5):
"""
Point percentile function.
Also known as the inverse cumulative distribution function.
Note that chaospy only supports point percentiles for univariate
distributions.
Args:
q_data (numpy.ndarray):
Probabilities to be inverse. If any values are outside ``[0,
1]``, error will be raised.
max_iterations (int):
If approximation is used, this sets the maximum number of
allowed iterations in the Newton-Raphson algorithm.
tollerance (float):
If approximation is used, this set the error tolerance level
required to define a sample as converged.
Returns:
(numpy.ndarray):
Inverted probability values where
``out.shape == q_data.shape``.
"""
if len(self) > 1:
raise ValueError("only one-dimensional distribution supports percentiles.")
return self.inv(
q_data,
max_iterations=max_iterations,
tollerance=tollerance,
)
def pdf(self, x_data, decompose=False, allow_approx=True, step_size=1e-7):
"""
Probability density function.
If possible the density will be calculated analytically. If not
possible, it will be approximated by approximating the one-dimensional
derivative of the forward Rosenblatt transformation and multiplying the
component parts. Note that even if the distribution is multivariate,
each component of the Rosenblatt is one-dimensional.
Args:
x_data (numpy.ndarray):
Location for the density function. If multivariate,
`len(x_data) == len(self)` is required.
decompose (bool):
Decompose multivariate probability density `p(x), p(y|x), ...`
instead of multiplying them together into `p(x, y, ...)`.
allow_approx (bool):
Allow the density to be estimated using numerical derivative of
forward mapping if analytical approach fails. Raises error
instead if false.
step_size (float):
The relative step size between two points used to calculate the
derivative, assuming approximation is being used.
Raises:
chaospy.UnsupportedFeature:
If analytical calculation is not possible and `allow_approx` is
false.
Returns:
(numpy.ndarray):
Evaluated density function evaluated in `x_data`. If decompose,
`output.shape == x_data.shape`, else if multivariate the first
dimension is multiplied together.
Example:
>>> chaospy.Gamma(2).pdf([1, 2, 3, 4, 5]).round(3)
array([0.368, 0.271, 0.149, 0.073, 0.034])
>>> dist = chaospy.Iid(chaospy.Normal(0, 1), 2)
>>> grid = numpy.mgrid[-1.5:2, -1.5:2]
>>> dist.pdf(grid).round(3)
array([[0.017, 0.046, 0.046, 0.017],
[0.046, 0.124, 0.124, 0.046],
[0.046, 0.124, 0.124, 0.046],
[0.017, 0.046, 0.046, 0.017]])
>>> dist.pdf(grid, decompose=True).round(3)
array([[[0.13 , 0.13 , 0.13 , 0.13 ],
[0.352, 0.352, 0.352, 0.352],
[0.352, 0.352, 0.352, 0.352],
[0.13 , 0.13 , 0.13 , 0.13 ]],
<BLANKLINE>
[[0.13 , 0.352, 0.352, 0.13 ],
[0.13 , 0.352, 0.352, 0.13 ],
[0.13 , 0.352, 0.352, 0.13 ],
[0.13 , 0.352, 0.352, 0.13 ]]])
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
x_data = numpy.asfarray(x_data)
shape = x_data.shape
x_data = x_data.reshape(len(self), -1)
f_data = numpy.zeros(x_data.shape)
cache = {}
for idx in self._rotation:
try:
cache_ = cache.copy()
f_data[idx] = self._get_pdf(x_data[idx], idx, cache)
except chaospy.UnsupportedFeature:
if allow_approx:
logger.info(
"%s: has stochastic dependencies; "
"Approximating density with numerical derivative.",
str(self),
)
cache = cache_
f_data[idx] = chaospy.approximate_density(
self, idx, x_data[idx], cache=cache, step_size=step_size
)
else:
raise
f_data = f_data.reshape(shape)
if len(self) > 1 and not decompose:
f_data = numpy.prod(f_data, 0)
return f_data
def _get_pdf(self, x_data, idx, cache):
"""In-process function for getting pdf-values."""
logger = logging.getLogger(__name__)
assert x_data.ndim == 1
if (idx, self) in cache:
return cache[idx, self][1]
lower = numpy.broadcast_to(
self._get_lower(idx, cache=cache.copy()), x_data.shape
)
upper = numpy.broadcast_to(
self._get_upper(idx, cache=cache.copy()), x_data.shape
)
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._pdf(x_data, **parameters)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(x_data.shape)
out[:] = ret_val
indices = (x_data < lower) | (x_data > upper)
if numpy.any(indices):
logger.debug(
"%s.fwd: %d/%d inputs out of bounds",
self,
numpy.sum(indices),
len(indices),
)
logger.debug("%s[%s]: %s - %s - %s", self, idx, lower, x_data, upper)
out = numpy.where(indices, 0, ret_val)
if self in cache:
out = numpy.where(x_data == cache[self][0], out, 0)
cache[idx, self] = (x_data, out)
assert out.ndim == 1, (self, out, cache)
return out
def _pdf(self, xloc, **kwargs):
raise chaospy.UnsupportedFeature("%s: does not support analytical pdf." % self)
def sample(
self, size=(), rule="random", antithetic=None, include_axis_dim=False, seed=None
):
"""
Create pseudo-random generated samples.
By default, the samples are created using standard (pseudo-)random
samples. However, if needed, the samples can also be created by either
low-discrepancy sequences, and/or variance reduction techniques.
Changing the sampling scheme, use the following ``rule`` flag:
---------------------- -------------------------------------------
key description
---------------------- -------------------------------------------
``additive_recursion`` Modulus of golden ratio samples.
``chebyshev`` Roots of first order Chebyshev polynomials.
``grid`` Regular spaced grid.
``halton`` Halton low-discrepancy sequence.
``hammersley`` Hammersley low-discrepancy sequence.
``korobov`` Korobov lattice.
``latin_hypercube`` Latin hypercube samples.
``nested_chebyshev`` Chebyshev nodes adjusted to ensure nested.
``nested_grid`` Nested regular spaced grid.
``random`` Classical (Pseudo-)Random samples.
``sobol`` Sobol low-discrepancy sequence.
---------------------- -------------------------------------------
All samples are created on the ``[0, 1]``-hypercube, which then is
mapped into the domain of the distribution using the inverse Rosenblatt
transformation.
Args:
size (numpy.ndarray):
The size of the samples to generate.
rule (str):
Indicator defining the sampling scheme.
antithetic (bool, numpy.ndarray):
If provided, will be used to setup antithetic variables. If
array, defines the axes to mirror.
include_axis_dim (bool):
By default an extra dimension even if the number of dimensions
is 1.
seed (Optional[int]):
If provided, fixes the random variable's seed, ensuring
reproducible results.
Returns:
(numpy.ndarray):
Random samples with ``self.shape``. An extra dimension might be
added to the front if either ``len(dist) > 1`` or
``include_axis_dim=True``.
"""
if seed is not None:
state = numpy.random.get_state()
numpy.random.seed(seed)
out = self.sample(
size,
rule=rule,
antithetic=antithetic,
include_axis_dim=include_axis_dim,
)
numpy.random.set_state(state)
return out
check_dependencies(self)
size_ = numpy.prod(size, dtype=int)
dim = len(self)
shape = (size,) if isinstance(size, (int, float, numpy.number)) else tuple(size)
shape = (-1,) + shape[1:]
shape = shape if dim == 1 and not include_axis_dim else (dim,) + shape
from chaospy.distributions import sampler
out = sampler.generator.generate_samples(
order=size_, domain=self, rule=rule, antithetic=antithetic
)
for idx, dist in enumerate(self):
if dist.interpret_as_integer:
out[idx] = numpy.round(out[idx])
if self.interpret_as_integer:
out = numpy.round(out).astype(int)
out = out.reshape(shape)
return out
def mom(self, K, allow_approx=True, **kwargs):
"""
Raw statistical moments.
Creates non-centralized raw moments from the random variable. If
analytical options can not be utilized, Monte Carlo integration
will be used.
Args:
K (numpy.ndarray):
Index of the raw moments. k.shape must be compatible with
distribution shape. Sampling scheme when performing Monte
Carlo
allow_approx (bool):
Allow the moments to be calculated using quadrature integration
if analytical approach fails. Raises error instead if false.
kwargs (Any):
Arguments passed to :func:`chaospy.approximate_moment` if
approximation is used.
Raises:
chaospy.UnsupportedFeature:
If analytical calculation is not possible and `allow_approx` is
false.
Returns:
(numpy.ndarray):
Shapes are related through the identity
``k.shape == dist.shape+k.shape``.
"""
logger = logging.getLogger(__name__)
K = numpy.asarray(K, dtype=int)
assert numpy.all(K >= 0)
shape = K.shape
dim = len(self)
if dim > 1:
assert len(self) == shape[0]
shape = shape[1:]
size = int(K.size / dim)
K = K.reshape(dim, size)
try:
out = [self._get_mom(kdata) for kdata in K.T]
logger.debug("%s: moment calculated successfully", str(self))
except chaospy.UnsupportedFeature:
if allow_approx:
logger.info(
"%s: has stochastic dependencies; "
"Approximating moments with quadrature.",
str(self),
)
out = [chaospy.approximate_moment(self, kdata) for kdata in K.T]
else:
out = [self._get_mom(kdata) for kdata in K.T]
out = numpy.array(out)
assert out.size == numpy.prod(shape), (out, shape)
return out.reshape(shape)
def _get_mom(self, kdata):
"""In-process function for getting moments."""
if tuple(kdata) in self._mom_cache:
return self._mom_cache[tuple(kdata)]
if hasattr(self, "get_mom_parameters"):
parameters = self.get_mom_parameters()
else:
parameters = self.get_parameters(idx=None, cache={}, assert_numerical=False)
assert "idx" not in parameters, (self, parameters)
ret_val = float(self._mom(kdata, **parameters))
assert not isinstance(ret_val, Distribution), (self, ret_val)
self._mom_cache[tuple(kdata)] = ret_val
return ret_val
def _mom(self, kloc, **kwargs):
raise chaospy.UnsupportedFeature("moments not supported for this distribution")
def ttr(self, kloc):
"""
Three terms relation's coefficient generator.
Args:
k (numpy.ndarray, int):
The order of the coefficients.
Returns:
(Recurrence coefficients):
Where out[0] is the first (A) and out[1] is the second
coefficient With ``out.shape==(2,)+k.shape``.
"""
check_dependencies(self)
kloc = numpy.asarray(kloc, dtype=int)
shape = kloc.shape
kloc = kloc.reshape(len(self), -1)
out = numpy.zeros((2,) + kloc.shape)
for idy, kloc_ in enumerate(kloc.T):
for idx in range(len(self)):
out[:, idx, idy] = self._get_ttr(kloc_[idx], idx)
return out.reshape((2,) + shape)
def _get_ttr(self, kdata, idx):
"""In-process function for getting TTR-values."""
if (idx, kdata) in self._ttr_cache:
return self._ttr_cache[idx, kdata]
if hasattr(self, "get_ttr_parameters"):
parameters = self.get_ttr_parameters(idx)
else:
parameters = self.get_parameters(idx, cache={}, assert_numerical=True)
alpha, beta = self._ttr(kdata, **parameters)
assert not isinstance(alpha, Distribution), (self, alpha)
assert not isinstance(beta, Distribution), (self, beta)
alpha = numpy.asfarray(alpha).item()
beta = numpy.asfarray(beta).item()
self._ttr_cache[idx, kdata] = (alpha, beta)
return alpha, beta
def _ttr(self, kloc, **kwargs):
raise chaospy.UnsupportedFeature(
"three terms recursion not supported for this distribution"
)
def _get_cache(self, idx, cache, get=None):
"""
In-process function for getting cached values.
Each time a distribution has been processed, the input and output
values are stored in the cache.
This checks if a distribution has been processed before and return a
cache value if it is.
The cached values are as follows:
----------- ------------- -------------
Context Get 0 Get 1
----------- ------------- -------------
pdf Input values Output values
cdf/fwd Input values Output values
ppf/inv Output values Input values
lower/upper Output values N/A
----------- ------------- -------------
Args:
idx (int):
Which dimension to get cache from.
cache (Dict[Distribution, Tuple[numpy.ndarray, numpy.ndarray]]):
Collection of cached values. Keys are distributions that has
been processed earlier, values consist of up to two cache
value.
get (int):
Which cache to retrieve.
Returns:
(numpy.ndarray, Distribution):
The content of the cache, if any. Else return self.
"""
if (idx, self) in cache:
assert get in (0, 1)
out = cache[idx, self][get]
else:
out = self._cache(idx=idx, cache=cache, get=get)
return out
def _cache(self, idx, cache, get):
"""Backend function of retrieving cache values."""
return self
def __getitem__(self, index):
if isinstance(index, numpy.number):
assert index.dtype == int
index = int(index)
if isinstance(index, int):
if not -len(self) < index < len(self):
raise IndexError("index out of bounds: %s" % index)
if index < 0:
index += len(self)
return chaospy.ItemDistribution(int(index), self)
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = len(self) if index.stop is None else index.stop
step = 1 if index.step is None else index.step
return chaospy.J(*[self[idx] for idx in range(start, stop, step)])
raise IndexError("unrecognized key: %s" % repr(index))
def __iter__(self):
for idx in range(len(self)):
yield self[idx]
def __len__(self):
"""Distribution length."""
return len(self._dependencies)
def __repr__(self):
"""Distribution repr function."""
args = ", ".join([str(arg) for arg in self._repr_args])
return "{}({})".format(self.__class__.__name__, args)
def __str__(self):
"""Distribution str function."""
return repr(self)
def __add__(self, X):
"""Y.__add__(X) <==> X+Y"""
return chaospy.Add(self, X)
def __radd__(self, X):
"""Y.__radd__(X) <==> Y+X"""
return chaospy.Add(self, X)
def __sub__(self, X):
"""Y.__sub__(X) <==> X-Y"""
return chaospy.Add(self, -X)
def __rsub__(self, X):
"""Y.__rsub__(X) <==> Y-X"""
return chaospy.Add(X, -self)
def __neg__(self):
"""X.__neg__() <==> -X"""
return chaospy.Negative(self)
def __mul__(self, X):
"""Y.__mul__(X) <==> X*Y"""
return chaospy.Multiply(self, X)
def __rmul__(self, X):
"""Y.__rmul__(X) <==> Y*X"""
return chaospy.Multiply(X, self)
def __div__(self, X):
"""Y.__div__(X) <==> Y/X"""
return chaospy.Multiply(self, X**-1)
def __rdiv__(self, X):
"""Y.__rdiv__(X) <==> X/Y"""
return chaospy.Multiply(X, self**-1)
def __floordiv__(self, X):
"""Y.__floordiv__(X) <==> Y/X"""
return chaospy.Multiply(self, X**-1)
def __rfloordiv__(self, X):
"""Y.__rfloordiv__(X) <==> X/Y"""
return chaospy.Multiply(X, self**-1)
def __truediv__(self, X):
"""Y.__truediv__(X) <==> Y/X"""
return chaospy.Multiply(self, X**-1)
def __rtruediv__(self, X):
"""Y.__rtruediv__(X) <==> X/Y"""
return chaospy.Multiply(X, self**-1)
def __pow__(self, X):
"""Y.__pow__(X) <==> Y**X"""
return chaospy.Power(self, X)
def __rpow__(self, X):
"""Y.__rpow__(X) <==> X**Y"""
return chaospy.Power(X, self)
def __eq__(self, other):
if not isinstance(other, Distribution):
return False
if len(other) != len(self):
return False
if len(self) > 1:
return all([self == other for self, other in zip(self, other)])
if isinstance(self, chaospy.ItemDistribution) and isinstance(
other, chaospy.ItemDistribution
):
return (
self._parameters["index"] == other._parameters["index"]
and self._parameters["parent"] is other._parameters["parent"]
)
return self is other
def __hash__(self):
return id(self)
| mit | a3b201561af7e362951084dc8daa6d90 | 36.149502 | 88 | 0.535354 | 4.206922 | false | false | false | false |
jonathf/chaospy | chaospy/distributions/operators/truncation.py | 1 | 8221 | """
Truncation.
Example usage
-------------
Simple distribution to start with::
>>> distribution = chaospy.Normal(0, 1)
>>> distribution.inv([0.9, 0.99, 0.999]).round(4)
array([1.2816, 2.3263, 3.0902])
Same distribution, but with a upper truncation::
>>> right_trunc = chaospy.Trunc(chaospy.Normal(0, 1), upper=1)
>>> right_trunc
Trunc(Normal(mu=0, sigma=1), upper=1)
>>> right_trunc.inv([0.9, 0.99, 0.999]).round(4)
array([0.6974, 0.9658, 0.9965])
Same, but with lower truncation::
>>> left_trunc = chaospy.Trunc(chaospy.Normal(0, 1), lower=1)
>>> left_trunc
Trunc(Normal(mu=0, sigma=1), lower=1)
>>> left_trunc.inv([0.001, 0.01, 0.1]).round(4)
array([1.0007, 1.0066, 1.0679])
"""
import numpy
import chaospy
from ..baseclass import Distribution, OperatorDistribution
class Trunc(Distribution):
"""Truncation."""
def __init__(self, dist, lower=None, upper=None):
"""
Constructor.
Args:
dist (Distribution):
Distribution to be truncated.
lower (Distribution, numpy.ndarray):
Lower truncation bound.
upper (Distribution, numpy.ndarray):
Upper truncation bound.
"""
assert isinstance(dist, Distribution)
repr_args = [dist]
repr_args += chaospy.format_repr_kwargs(lower=(lower, None))
repr_args += chaospy.format_repr_kwargs(upper=(upper, None))
exclusion = set()
for deps in dist._dependencies:
exclusion.update(deps)
if isinstance(lower, Distribution):
if lower.stochastic_dependent:
raise chaospy.StochasticallyDependentError(
"Joint distribution with dependencies not supported."
)
assert len(dist) == len(lower)
lower_ = lower.lower
elif lower is None:
lower = lower_ = dist.lower
else:
lower = lower_ = numpy.atleast_1d(lower)
if isinstance(upper, Distribution):
if upper.stochastic_dependent:
raise chaospy.StochasticallyDependentError(
"Joint distribution with dependencies not supported."
)
assert len(dist) == len(upper)
upper_ = upper.upper
elif upper is None:
upper = upper_ = dist.upper
else:
upper = upper_ = numpy.atleast_1d(upper)
assert numpy.all(
upper_ > lower_
), "condition `upper > lower` not satisfied: %s <= %s" % (upper_, lower_)
dependencies, parameters, rotation = chaospy.declare_dependencies(
distribution=self,
parameters=dict(lower=lower, upper=upper),
length=len(dist),
)
super(Trunc, self).__init__(
parameters=parameters,
dependencies=dependencies,
exclusion=exclusion,
repr_args=repr_args,
)
self._dist = dist
def get_parameters(self, idx, cache, assert_numerical=True):
parameters = super(Trunc, self).get_parameters(
idx, cache, assert_numerical=assert_numerical
)
assert set(parameters) == {"cache", "lower", "upper", "idx"}
if isinstance(parameters["lower"], Distribution):
parameters["lower"] = parameters["lower"]._get_cache(
idx, cache=parameters["cache"], get=0
)
elif len(parameters["lower"]) > 1 and idx is not None:
parameters["lower"] = parameters["lower"][idx]
if isinstance(parameters["upper"], Distribution):
parameters["upper"] = parameters["upper"]._get_cache(
idx, cache=parameters["cache"], get=0
)
elif len(parameters["upper"]) > 1 and idx is not None:
parameters["upper"] = parameters["upper"][idx]
if assert_numerical:
assert not isinstance(parameters["lower"], Distribution) or not isinstance(
parameters["upper"], Distribution
)
if idx is None:
del parameters["idx"]
return parameters
def _lower(self, idx, lower, upper, cache):
"""
Distribution lower bound.
Examples:
>>> chaospy.Trunc(chaospy.Uniform(), upper=0.6).lower
array([0.])
>>> chaospy.Trunc(chaospy.Uniform(), lower=0.6).lower
array([0.6])
"""
del upper
if isinstance(lower, Distribution):
lower = lower._get_lower(idx, cache=cache)
return lower
def _upper(self, idx, lower, upper, cache):
"""
Distribution lower bound.
Examples:
>>> chaospy.Trunc(chaospy.Uniform(), upper=0.6).upper
array([0.6])
>>> chaospy.Trunc(chaospy.Uniform(), lower=0.6).upper
array([1.])
"""
del lower
if isinstance(upper, Distribution):
upper = upper._get_upper(idx, cache=cache)
return upper
def _cdf(self, xloc, idx, lower, upper, cache):
"""
Cumulative distribution function.
Example:
>>> chaospy.Uniform().fwd([-0.5, 0.3, 0.7, 1.2])
array([0. , 0.3, 0.7, 1. ])
>>> chaospy.Trunc(chaospy.Uniform(), upper=0.4).fwd([-0.5, 0.2, 0.8, 1.2])
array([0. , 0.5, 1. , 1. ])
>>> chaospy.Trunc(chaospy.Uniform(), lower=0.6).fwd([-0.5, 0.2, 0.8, 1.2])
array([0. , 0. , 0.5, 1. ])
"""
assert not isinstance(lower, Distribution)
assert not isinstance(upper, Distribution)
lower = numpy.broadcast_to(lower, xloc.shape)
upper = numpy.broadcast_to(upper, xloc.shape)
lower = self._dist._get_fwd(lower, idx, cache=cache.copy())
upper = self._dist._get_fwd(upper, idx, cache=cache.copy())
uloc = self._dist._get_fwd(xloc, idx, cache)
return (uloc - lower) / (1 - lower) / upper
def _pdf(self, xloc, idx, lower, upper, cache):
"""
Probability density function.
Example:
>>> dist = chaospy.Trunc(chaospy.Uniform(), upper=0.6)
>>> dist.pdf([-0.25, 0.25, 0.5, 0.75, 1.25])
array([0. , 1.66666667, 1.66666667, 0. , 0. ])
>>> dist = chaospy.Trunc(chaospy.Uniform(), upper=0.4)
>>> dist.pdf([-0.25, 0.25, 0.5, 0.75, 1.25])
array([0. , 2.5, 0. , 0. , 0. ])
>>> dist = chaospy.Trunc(chaospy.Uniform(), lower=0.4)
>>> dist.pdf([-0.25, 0.25, 0.5, 0.75, 1.25])
array([0. , 0. , 1.66666667, 1.66666667, 0. ])
>>> dist = chaospy.Trunc(chaospy.Uniform(), lower=0.6)
>>> dist.pdf([-0.25, 0.25, 0.5, 0.75, 1.25])
array([0. , 0. , 0. , 2.5, 0. ])
"""
assert not isinstance(lower, Distribution)
assert not isinstance(upper, Distribution)
lower = numpy.broadcast_to(lower, xloc.shape)
upper = numpy.broadcast_to(upper, xloc.shape)
lower = self._dist._get_fwd(lower, idx, cache=cache.copy())
upper = self._dist._get_fwd(upper, idx, cache=cache.copy())
uloc = self._dist._get_pdf(xloc, idx, cache=cache)
return uloc / (1 - lower) / upper
def _ppf(self, qloc, idx, lower, upper, cache):
"""
Point percentile function.
Example:
>>> chaospy.Uniform().inv([0.1, 0.2, 0.9])
array([0.1, 0.2, 0.9])
>>> chaospy.Trunc(chaospy.Uniform(), upper=0.4).inv([0.1, 0.2, 0.9])
array([0.04, 0.08, 0.36])
>>> chaospy.Trunc(chaospy.Uniform(), lower=0.6).inv([0.1, 0.2, 0.9])
array([0.64, 0.68, 0.96])
"""
assert not isinstance(lower, Distribution)
assert not isinstance(upper, Distribution)
lower = numpy.broadcast_to(lower, qloc.shape)
upper = numpy.broadcast_to(upper, qloc.shape)
lower = self._dist._get_fwd(lower, idx, cache=cache.copy())
upper = self._dist._get_fwd(upper, idx, cache=cache.copy())
return self._dist._get_inv(qloc * upper * (1 - lower) + lower, idx, cache=cache)
| mit | 936a74829597bd11db8e2ebe925a6bf6 | 36.538813 | 88 | 0.541297 | 3.607284 | false | false | false | false |
pyamg/pyamg | pyamg/relaxation/smoothing.py | 1 | 24568 | """Method to create pre and post-smoothers on the levels of a MultilevelSolver.
The setup_smoother_name functions are helper functions for
parsing user input and assigning each level the appropriate smoother for
the functions in 'change_smoothers'.
The standard interface is
Parameters
----------
lvl : multilevel level
the level in the hierarchy for which to assign a smoother
iterations : int
how many smoother iterations
optional_params : dict
optional params specific for each method such as omega or sweep
Returns
-------
Function pointer for the appropriate relaxation method for level=lvl
Examples
--------
See change_smoothers above
"""
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import LinearOperator
from ..util.utils import scale_rows, get_block_diag, get_diagonal
from ..util.linalg import approximate_spectral_radius
from ..krylov import gmres, cgne, cgnr, cg
from . import relaxation
from .chebyshev import chebyshev_polynomial_coefficients
# Default relaxation parameters and list of by-definition
# symmetric relaxation schemes, e.g. Jacobi.
DEFAULT_SWEEP = 'forward'
DEFAULT_NITER = 1
SYMMETRIC_RELAXATION = ['jacobi', 'richardson', 'block_jacobi',
'jacobi_ne', 'chebyshev', None]
def _unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
return v, {}
def change_smoothers(ml, presmoother, postsmoother):
"""Initialize pre and post smoothers.
Initialize pre- and post- smoothers throughout a MultilevelSolver, with
the option of having different smoothers at different levels
For each level of the MultilevelSolver 'ml' (except the coarsest level),
initialize the .presmoother() and .postsmoother() methods used in the
multigrid cycle.
Parameters
----------
ml : pyamg multilevel hierarchy
Data structure that stores the multigrid hierarchy.
presmoother : None, string, tuple, list
presmoother can be (1) the name of a supported smoother, e.g.
"gauss_seidel", (2) a tuple of the form ('method','opts') where
'method' is the name of a supported smoother and 'opts' a dict of
keyword arguments to the smoother, or (3) a list of instances of
options 1 or 2. See the Examples section for illustrations of the
format.
If presmoother is a list, presmoother[i] determines the smoothing
strategy for level i. Else, presmoother defines the same strategy
for all levels.
If len(presmoother) < len(ml.levels), then
presmoother[-1] is used for all remaining levels
If len(presmoother) > len(ml.levels), then
the remaining smoothing strategies are ignored
postsmoother : string, tuple, list
Defines postsmoother in identical fashion to presmoother
Returns
-------
ml changed in place
ml.levels[i].presmoother <=== presmoother[i]
ml.levels[i].postsmoother <=== postsmoother[i]
ml.symmetric_smoothing is marked True/False depending on whether
the smoothing scheme is symmetric.
Notes
-----
- Parameter 'omega' of the Jacobi, Richardson, and jacobi_ne
methods is scaled by the spectral radius of the matrix on
each level. Therefore 'omega' should be in the interval (0,2).
- Parameter 'withrho' (default: True) controls whether the omega is
rescaled by the spectral radius in jacobi, block_jacobi, and jacobi_ne
- By initializing the smoothers after the hierarchy has been setup, allows
for "algebraically" directed relaxation, such as strength_based_schwarz,
which uses only the strong connections of a degree-of-freedom to define
overlapping regions
- Available smoother methods::
gauss_seidel
block_gauss_seidel
jacobi
block_jacobi
richardson
sor
chebyshev
gauss_seidel_nr
gauss_seidel_ne
jacobi_ne
cg
gmres
cgne
cgnr
schwarz
strength_based_schwarz
None
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> from pyamg.relaxation.smoothing import change_smoothers
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> b = np.random.rand(A.shape[0],)
>>> ml = smoothed_aggregation_solver(A, max_coarse=10)
>>> # Set all levels to use gauss_seidel's defaults
>>> smoothers = 'gauss_seidel'
>>> change_smoothers(ml, presmoother=smoothers, postsmoother=smoothers)
>>> residuals=[]
>>> x = ml.solve(b, tol=1e-8, residuals=residuals)
>>> # Set all levels to use three iterations of gauss_seidel's defaults
>>> smoothers = ('gauss_seidel', {'iterations' : 3})
>>> change_smoothers(ml, presmoother=smoothers, postsmoother=None)
>>> residuals=[]
>>> x = ml.solve(b, tol=1e-8, residuals=residuals)
>>> # Set level 0 to use gauss_seidel's defaults, and all
>>> # subsequent levels to use 5 iterations of cgnr
>>> smoothers = ['gauss_seidel', ('cgnr', {'maxiter' : 5})]
>>> change_smoothers(ml, presmoother=smoothers, postsmoother=smoothers)
>>> residuals=[]
>>> x = ml.solve(b, tol=1e-8, residuals=residuals)
"""
ml.symmetric_smoothing = True
# interpret arguments into list
if isinstance(presmoother, (str, tuple)) or (presmoother is None):
presmoother = [presmoother]
elif not isinstance(presmoother, list):
raise ValueError('Unrecognized presmoother')
if isinstance(postsmoother, (str, tuple)) or (postsmoother is None):
postsmoother = [postsmoother]
elif not isinstance(postsmoother, list):
raise ValueError('Unrecognized postsmoother')
# set ml.levels[i].presmoother = presmoother[i],
# ml.levels[i].postsmoother = postsmoother[i]
fn1 = None # Predefine to keep scope beyond first loop
fn2 = None
kwargs1 = {}
kwargs2 = {}
min_len = min(len(presmoother), len(postsmoother), len(ml.levels[:-1]))
# same = (len(presmoother) == len(postsmoother))
for i in range(0, min_len):
# unpack presmoother[i]
fn1, kwargs1 = _unpack_arg(presmoother[i])
# get function handle
try:
setup_presmoother = _setup_call(str(fn1).lower())
except NameError as e:
raise NameError(f'Invalid presmoother method: {fn1}') from e
ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
# unpack postsmoother[i]
fn2, kwargs2 = _unpack_arg(postsmoother[i])
# get function handle
try:
setup_postsmoother = _setup_call(str(fn2).lower())
except NameError as e:
raise NameError(f'Invalid postsmoother method: {fn2}') from e
ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)
# Check if symmetric smoothing scheme
if 'iterations' in kwargs1:
it1 = kwargs1['iterations']
else:
it1 = DEFAULT_NITER
if 'iterations' in kwargs2:
it2 = kwargs2['iterations']
else:
it2 = DEFAULT_NITER
if (fn1 != fn2) or (it1 != it2):
ml.symmetric_smoothing = False
elif fn1 not in SYMMETRIC_RELAXATION:
if 'sweep' in kwargs1:
sweep1 = kwargs1['sweep']
else:
sweep1 = DEFAULT_SWEEP
if 'sweep' in kwargs2:
sweep2 = kwargs2['sweep']
else:
sweep2 = DEFAULT_SWEEP
if (sweep1, sweep2) not in (('forward', 'backward'),
('backward', 'forward'),
('symmetric', 'symmetric')):
ml.symmetric_smoothing = False
if len(presmoother) < len(postsmoother):
mid_len = min(len(postsmoother), len(ml.levels[:-1]))
for i in range(min_len, mid_len):
# Set up presmoother
ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
# unpack postsmoother[i]
fn2, kwargs2 = _unpack_arg(postsmoother[i])
# get function handle
try:
setup_postsmoother = _setup_call(str(fn2).lower())
except NameError as e:
raise NameError(f'Invalid postsmoother method: {fn2}') from e
ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)
# Check if symmetric smoothing scheme
if 'iterations' in kwargs1:
it1 = kwargs1['iterations']
else:
it1 = DEFAULT_NITER
if 'iterations' in kwargs2:
it2 = kwargs2['iterations']
else:
it2 = DEFAULT_NITER
if (fn1 != fn2) or (it1 != it2):
ml.symmetric_smoothing = False
elif fn1 not in SYMMETRIC_RELAXATION:
if 'sweep' in kwargs1:
sweep1 = kwargs1['sweep']
else:
sweep1 = DEFAULT_SWEEP
if 'sweep' in kwargs2:
sweep2 = kwargs2['sweep']
else:
sweep2 = DEFAULT_SWEEP
if (sweep1, sweep2) not in (('forward', 'backward'),
('backward', 'forward'),
('symmetric', 'symmetric')):
ml.symmetric_smoothing = False
elif len(presmoother) > len(postsmoother):
mid_len = min(len(presmoother), len(ml.levels[:-1]))
for i in range(min_len, mid_len):
# unpack presmoother[i]
fn1, kwargs1 = _unpack_arg(presmoother[i])
# get function handle
try:
setup_presmoother = _setup_call(str(fn1).lower())
except NameError as e:
raise NameError(f'Invalid presmoother method: {fn1}') from e
ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
# Set up postsmoother
ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)
# Check if symmetric smoothing scheme
if 'iterations' in kwargs1:
it1 = kwargs1['iterations']
else:
it1 = DEFAULT_NITER
if 'iterations' in kwargs2:
it2 = kwargs2['iterations']
else:
it2 = DEFAULT_NITER
if (fn1 != fn2) or (it1 != it2):
ml.symmetric_smoothing = False
elif fn1 not in SYMMETRIC_RELAXATION:
if 'sweep' in kwargs1:
sweep1 = kwargs1['sweep']
else:
sweep1 = DEFAULT_SWEEP
if 'sweep' in kwargs2:
sweep2 = kwargs2['sweep']
else:
sweep2 = DEFAULT_SWEEP
if (sweep1, sweep2) not in (('forward', 'backward'),
('backward', 'forward'),
('symmetric', 'symmetric')):
ml.symmetric_smoothing = False
else:
mid_len = min_len
# Fill in remaining levels
for i in range(mid_len, len(ml.levels[:-1])):
ml.levels[i].presmoother = setup_presmoother(ml.levels[i], **kwargs1)
ml.levels[i].postsmoother = setup_postsmoother(ml.levels[i], **kwargs2)
def rho_D_inv_A(A):
"""Return the (approx.) spectral radius of D^-1 * A.
Parameters
----------
A : sparse-matrix
Returns
-------
approximate spectral radius of diag(A)^{-1} A
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.relaxation.smoothing import rho_D_inv_A
>>> from scipy.sparse import csr_matrix
>>> import numpy as np
>>> A = csr_matrix(np.array([[1.0,0,0],[0,2.0,0],[0,0,3.0]]))
>>> print(f'{rho_D_inv_A(A):2.2}')
1.0
"""
if not hasattr(A, 'rho_D_inv'):
D_inv = get_diagonal(A, inv=True)
D_inv_A = scale_rows(A, D_inv, copy=True)
A.rho_D_inv = approximate_spectral_radius(D_inv_A)
return A.rho_D_inv
def rho_block_D_inv_A(A, Dinv):
"""Return the (approx.) spectral radius of block D^-1 * A.
Parameters
----------
A : sparse-matrix
size NxN
Dinv : array
Inverse of diagonal blocks of A
size (N/blocksize, blocksize, blocksize)
Returns
-------
approximate spectral radius of (Dinv A)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.relaxation.smoothing import rho_block_D_inv_A
>>> from pyamg.util.utils import get_block_diag
>>> A = poisson((10,10), format='csr')
>>> Dinv = get_block_diag(A, blocksize=4, inv_flag=True)
"""
if not hasattr(A, 'rho_block_D_inv'):
blocksize = Dinv.shape[1]
if Dinv.shape[1] != Dinv.shape[2]:
raise ValueError('Dinv has incorrect dimensions')
if Dinv.shape[0] != int(A.shape[0]/blocksize):
raise ValueError('Dinv and A have incompatible dimensions')
Dinv = sparse.bsr_matrix((Dinv,
np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0]+1)),
shape=A.shape)
# Don't explicitly form Dinv*A
def matvec(x):
return Dinv*(A*x)
D_inv_A = LinearOperator(A.shape, matvec, dtype=A.dtype)
A.rho_block_D_inv = approximate_spectral_radius(D_inv_A)
return A.rho_block_D_inv
# pylint: disable=redefined-builtin
def matrix_asformat(lvl, name, format, blocksize=None):
"""Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage.
"""
desired_matrix = name + format
M = getattr(lvl, name)
if format == 'bsr':
desired_matrix += str(blocksize[0])+str(blocksize[1])
if hasattr(lvl, desired_matrix):
# if lvl already contains lvl.name+format
pass
elif M.format == format and format != 'bsr':
# is base_matrix already in the correct format?
setattr(lvl, desired_matrix, M)
elif M.format == format and format == 'bsr':
# convert to bsr with the right blocksize
# tobsr() will not do anything extra if this is uneeded
setattr(lvl, desired_matrix, M.tobsr(blocksize=blocksize))
else:
# convert
newM = getattr(M, 'to' + format)()
setattr(lvl, desired_matrix, newM)
return getattr(lvl, desired_matrix)
# pylint: disable=unused-argument
def setup_gauss_seidel(lvl, iterations=DEFAULT_NITER, sweep=DEFAULT_SWEEP):
"""Set up Gauss-Seidel."""
def smoother(A, x, b):
relaxation.gauss_seidel(A, x, b, iterations=iterations, sweep=sweep)
return smoother
def setup_jacobi(lvl, iterations=DEFAULT_NITER, omega=1.0, withrho=True):
"""Set up weighted-Jacobi."""
if withrho:
omega = omega/rho_D_inv_A(lvl.A)
def smoother(A, x, b):
relaxation.jacobi(A, x, b, iterations=iterations, omega=omega)
return smoother
def setup_schwarz(lvl, iterations=DEFAULT_NITER, subdomain=None,
subdomain_ptr=None, inv_subblock=None, inv_subblock_ptr=None,
sweep=DEFAULT_SWEEP):
"""Set up Schwarz."""
matrix_asformat(lvl, 'A', 'csr')
lvl.Acsr.sort_indices()
subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr = \
relaxation.schwarz_parameters(lvl.Acsr, subdomain, subdomain_ptr,
inv_subblock, inv_subblock_ptr)
def smoother(A, x, b):
relaxation.schwarz(lvl.Acsr, x, b, iterations=iterations,
subdomain=subdomain,
subdomain_ptr=subdomain_ptr,
inv_subblock=inv_subblock,
inv_subblock_ptr=inv_subblock_ptr, sweep=sweep)
return smoother
def setup_strength_based_schwarz(lvl, iterations=DEFAULT_NITER,
sweep=DEFAULT_SWEEP):
"""Set up strength-based Schwarz."""
# Use the overlapping regions defined by strength of connection matrix C
# for the overlapping Schwarz method
if not hasattr(lvl, 'C'):
C = lvl.A.tocsr()
else:
C = lvl.C.tocsr()
C.sort_indices()
subdomain_ptr = C.indptr.copy()
subdomain = C.indices.copy()
return setup_schwarz(lvl, iterations=iterations, subdomain=subdomain,
subdomain_ptr=subdomain_ptr, sweep=sweep)
def setup_block_jacobi(lvl, iterations=DEFAULT_NITER, omega=1.0, Dinv=None,
blocksize=None, withrho=True):
"""Set up block Jacobi."""
# Determine Blocksize
if blocksize is None and Dinv is None:
if sparse.isspmatrix_csr(lvl.A):
blocksize = 1
elif sparse.isspmatrix_bsr(lvl.A):
blocksize = lvl.A.blocksize[0]
elif blocksize is None:
blocksize = Dinv.shape[1]
if blocksize == 1:
# Block Jacobi is equivalent to normal Jacobi
return setup_jacobi(lvl, iterations=iterations, omega=omega,
withrho=withrho)
# Use Block Jacobi
if Dinv is None:
Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
if withrho:
omega = omega/rho_block_D_inv_A(lvl.A, Dinv)
def smoother(A, x, b):
relaxation.block_jacobi(A, x, b, iterations=iterations,
omega=omega, Dinv=Dinv,
blocksize=blocksize)
return smoother
def setup_block_gauss_seidel(lvl, iterations=DEFAULT_NITER,
sweep=DEFAULT_SWEEP,
Dinv=None, blocksize=None):
"""Set up block Gauss-Seidel."""
# Determine Blocksize
if blocksize is None and Dinv is None:
if sparse.isspmatrix_csr(lvl.A):
blocksize = 1
elif sparse.isspmatrix_bsr(lvl.A):
blocksize = lvl.A.blocksize[0]
elif blocksize is None:
blocksize = Dinv.shape[1]
if blocksize == 1:
# Block GS is equivalent to normal GS
return setup_gauss_seidel(lvl, iterations=iterations, sweep=sweep)
# Use Block GS
if Dinv is None:
Dinv = get_block_diag(lvl.A, blocksize=blocksize, inv_flag=True)
def smoother(A, x, b):
relaxation.block_gauss_seidel(A, x, b, iterations=iterations,
Dinv=Dinv, blocksize=blocksize,
sweep=sweep)
return smoother
def setup_richardson(lvl, iterations=DEFAULT_NITER, omega=1.0):
"""Set up Richardson."""
omega = omega/approximate_spectral_radius(lvl.A)
def smoother(A, x, b):
relaxation.polynomial(A, x, b, coefficients=[omega],
iterations=iterations)
return smoother
def setup_sor(lvl, omega=0.5, iterations=DEFAULT_NITER, sweep=DEFAULT_SWEEP):
"""Set up SOR."""
def smoother(A, x, b):
relaxation.sor(A, x, b, omega=omega, iterations=iterations,
sweep=sweep)
return smoother
def setup_chebyshev(lvl, lower_bound=1.0/30.0, upper_bound=1.1, degree=3,
iterations=DEFAULT_NITER):
"""Set up Chebyshev."""
rho = approximate_spectral_radius(lvl.A)
a = rho * lower_bound
b = rho * upper_bound
# drop the constant coefficient
coefficients = -chebyshev_polynomial_coefficients(a, b, degree)[:-1]
def smoother(A, x, b):
relaxation.polynomial(A, x, b, coefficients=coefficients,
iterations=iterations)
return smoother
def setup_jacobi_ne(lvl, iterations=DEFAULT_NITER, omega=1.0, withrho=True):
"""Set up Jacobi NE."""
matrix_asformat(lvl, 'A', 'csr')
if withrho:
omega = omega/rho_D_inv_A(lvl.Acsr)**2
def smoother(A, x, b):
relaxation.jacobi_ne(lvl.Acsr, x, b, iterations=iterations,
omega=omega)
return smoother
def setup_gauss_seidel_ne(lvl, iterations=DEFAULT_NITER, sweep=DEFAULT_SWEEP,
omega=1.0):
"""Set up Gauss-Seidel NE."""
matrix_asformat(lvl, 'A', 'csr')
def smoother(A, x, b):
relaxation.gauss_seidel_ne(lvl.Acsr, x, b, iterations=iterations,
sweep=sweep, omega=omega)
return smoother
def setup_gauss_seidel_nr(lvl, iterations=DEFAULT_NITER, sweep=DEFAULT_SWEEP,
omega=1.0):
"""Set up Gauss-Seidel NR."""
matrix_asformat(lvl, 'A', 'csc')
def smoother(A, x, b):
relaxation.gauss_seidel_nr(lvl.Acsc, x, b, iterations=iterations,
sweep=sweep, omega=omega)
return smoother
def setup_gmres(lvl, tol=1e-12, maxiter=1, restrt=None, M=None, callback=None,
residuals=None):
"""Set up GMRES smoothing."""
def smoother(A, x, b):
x[:] = (
gmres(
A,
b,
x0=x,
tol=tol,
maxiter=maxiter,
restrt=restrt,
M=M,
callback=callback,
residuals=residuals)[0]).reshape(
x.shape)
return smoother
def setup_cg(lvl, tol=1e-12, maxiter=1, M=None, callback=None, residuals=None):
"""Set up CG smoothing."""
def smoother(A, x, b):
x[:] = (cg(A, b, x0=x, tol=tol, maxiter=maxiter, M=M,
callback=callback, residuals=residuals)[0]).reshape(x.shape)
return smoother
def setup_cgne(lvl, tol=1e-12, maxiter=1, M=None, callback=None,
residuals=None):
"""Set up CGNE smoothing."""
def smoother(A, x, b):
x[:] = (
cgne(
A,
b,
x0=x,
tol=tol,
maxiter=maxiter,
M=M,
callback=callback,
residuals=residuals)[0]).reshape(
x.shape)
return smoother
def setup_cgnr(lvl, tol=1e-12, maxiter=1, M=None, callback=None,
residuals=None):
"""Set up CGNR smoothing."""
def smoother(A, x, b):
x[:] = (
cgnr(
A,
b,
x0=x,
tol=tol,
maxiter=maxiter,
M=M,
callback=callback,
residuals=residuals)[0]).reshape(
x.shape)
return smoother
def setup_none(lvl):
"""Set up default, empty smoother."""
def smoother(A, x, b):
pass
return smoother
def _setup_call(fn):
"""Register setup functions.
This is a helper function to call the setup methods and avoids use of eval().
"""
setup_register = {
'gauss_seidel': setup_gauss_seidel,
'jacobi': setup_jacobi,
'schwarz': setup_schwarz,
'strength_based_schwarz': setup_strength_based_schwarz,
'block_jacobi': setup_block_jacobi,
'block_gauss_seidel': setup_block_gauss_seidel,
'richardson': setup_richardson,
'sor': setup_sor,
'chebyshev': setup_chebyshev,
'jacobi_ne': setup_jacobi_ne,
'gauss_seidel_ne': setup_gauss_seidel_ne,
'gauss_seidel_nr': setup_gauss_seidel_nr,
'gmres': setup_gmres,
'cg': setup_cg,
'cgne': setup_cgne,
'cgnr': setup_cgnr,
'none': setup_none,
}
if fn not in setup_register:
raise ValueError(f'Function {fn} does not have a setup')
return setup_register[fn]
| mit | 58ec31404b60a1909e978ba9cbed9de6 | 33.074896 | 83 | 0.577336 | 3.676193 | false | false | false | false |
pyamg/pyamg | pyamg/krylov/_gmres_mgs.py | 1 | 11065 | """GMRES Gram-Schmidt-based implementation."""
import warnings
from warnings import warn
import numpy as np
import scipy as sp
from scipy.linalg import get_blas_funcs, get_lapack_funcs
from ..util.linalg import norm
from ..util import make_system
def apply_givens(Q, v, k):
"""Apply the first k Givens rotations in Q to v.
Parameters
----------
Q : list
list of consecutive 2x2 Givens rotations
v : array
vector to apply the rotations to
k : int
number of rotations to apply.
Returns
-------
v is changed in place
Notes
-----
This routine is specialized for GMRES. It assumes that the first Givens
rotation is for dofs 0 and 1, the second Givens rotation is for
dofs 1 and 2, and so on.
"""
for j in range(k):
Qloc = Q[j]
v[j:j+2] = np.dot(Qloc, v[j:j+2])
def gmres_mgs(A, b, x0=None, tol=1e-5,
restrt=None, maxiter=None,
M=None, callback=None, residuals=None, reorth=False):
"""Generalized Minimum Residual Method (GMRES) based on MGS.
GMRES iteratively refines the initial solution guess to the system
Ax = b. Modified Gram-Schmidt version. Left preconditioning, leading
to preconditioned residuals.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
Tolerance for stopping criteria, let r=r_k
||M r|| < tol ||M b||
if ||b||=0, then set ||M b||=1 for these tests.
restrt : None, int
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : None, int
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
- defaults to min(n,40) if restart=None
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
preconditioned residual history in the 2-norm,
including the initial preconditioned residual
reorth : boolean
If True, then a check is made whether to re-orthogonalize the Krylov
space each GMRES iteration
Returns
-------
(xk, info)
xk : an updated guess after k iterations to the solution of Ax = b
info : halting status
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix.
For robustness, modified Gram-Schmidt is used to orthogonalize the
Krylov Space Givens Rotations are used to provide the residual norm
each iteration
The residual is the *preconditioned* residual.
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs')
>>> print(f'{norm(b - A*x):.6}')
6.54282
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
.. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html
"""
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = A.shape[0]
# Ensure that warnings are always reissued from this function
warnings.filterwarnings('always', module='pyamg.krylov._gmres_mgs')
# Get fast access to underlying BLAS routines
# dotc is the conjugate dot, dotu does no conjugation
[lartg] = get_lapack_funcs(['lartg'], [x])
if np.iscomplexobj(np.zeros((1,), dtype=x.dtype)):
[axpy, dotu, dotc, scal] =\
get_blas_funcs(['axpy', 'dotu', 'dotc', 'scal'], [x])
else:
# real type
[axpy, dotu, dotc, scal] =\
get_blas_funcs(['axpy', 'dot', 'dot', 'scal'], [x])
# Set number of outer and inner iterations
# If no restarts,
# then set max_inner=maxiter and max_outer=n
# If restarts are set,
# then set max_inner=restart and max_outer=maxiter
if restrt:
if maxiter:
max_outer = maxiter
else:
max_outer = 1
if restrt > n:
warn('Setting restrt to maximum allowed, n.')
restrt = n
max_inner = restrt
else:
max_outer = 1
if maxiter is None:
maxiter = min(n, 40)
elif maxiter > n:
warn('Setting maxiter to maximum allowed, n.')
maxiter = n
max_inner = maxiter
# Is this a one dimensional matrix?
if n == 1:
entry = np.ravel(A @ np.array([1.0], dtype=x.dtype))
return (postprocess(b/entry), 0)
# Prep for method
r = b - A @ x
# Apply preconditioner
r = M @ r
normr = norm(r)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess if b != 0,
normb = norm(b)
if normb == 0.0:
normMb = 1.0 # reset so that tol is unscaled
else:
normMb = norm(M @ b)
# set the stopping criteria (see the docstring)
if normr < tol * normMb:
return (postprocess(x), 0)
# Use separate variable to track iterations. If convergence fails, we
# cannot simply report niter = (outer-1)*max_outer + inner. Numerical
# error could cause the inner loop to halt while the actual ||r|| > tolerance.
niter = 0
# Begin GMRES
for _outer in range(max_outer):
# Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
# Space required is O(n*max_inner).
# NOTE: We are dealing with row-major matrices, so we traverse in a
# row-major fashion,
# i.e., H and V's transpose is what we store.
Q = [] # Givens Rotations
# Upper Hessenberg matrix, which is then
# converted to upper tri with Givens Rots
H = np.zeros((max_inner+1, max_inner+1), dtype=x.dtype)
V = np.zeros((max_inner+1, n), dtype=x.dtype) # Krylov Space
# vs store the pointers to each column of V.
# This saves a considerable amount of time.
vs = []
# v = r/normr
V[0, :] = scal(1.0/normr, r)
vs.append(V[0, :])
# This is the RHS vector for the problem in the Krylov Space
g = np.zeros((n,), dtype=x.dtype)
g[0] = normr
for inner in range(max_inner):
# New Search Direction
v = V[inner+1, :]
v[:] = np.ravel(M @ (A @ vs[-1]))
vs.append(v)
normv_old = norm(v)
# Modified Gram Schmidt
for k in range(inner+1):
vk = vs[k]
alpha = dotc(vk, v)
H[inner, k] = alpha
v[:] = axpy(vk, v, n, -alpha)
normv = norm(v)
H[inner, inner+1] = normv
# Re-orthogonalize
if (reorth is True) and (normv_old == normv_old + 0.001 * normv):
for k in range(inner+1):
vk = vs[k]
alpha = dotc(vk, v)
H[inner, k] = H[inner, k] + alpha
v[:] = axpy(vk, v, n, -alpha)
# Check for breakdown
if H[inner, inner+1] != 0.0:
v[:] = scal(1.0/H[inner, inner+1], v)
# Apply previous Givens rotations to H
if inner > 0:
apply_givens(Q, H[inner, :], inner)
# Calculate and apply next complex-valued Givens Rotation
# for the last inner iteration, when inner = n-1.
# ==> Note that if max_inner = n, then this is unnecessary
if inner != n-1:
if H[inner, inner+1] != 0:
[c, s, r] = lartg(H[inner, inner], H[inner, inner+1])
Qblock = np.array([[c, s], [-np.conjugate(s), c]], dtype=x.dtype)
Q.append(Qblock)
# Apply Givens Rotation to g,
# the RHS for the linear system in the Krylov Subspace.
g[inner:inner+2] = np.dot(Qblock, g[inner:inner+2])
# Apply effect of Givens Rotation to H
H[inner, inner] = dotu(Qblock[0, :], H[inner, inner:inner+2])
H[inner, inner+1] = 0.0
niter += 1
# Do not update normr if last inner iteration, because
# normr is calculated directly after this loop ends.
if inner < max_inner-1:
normr = np.abs(g[inner+1])
if normr < tol * normMb:
break
if residuals is not None:
residuals.append(normr)
if callback is not None:
y = sp.linalg.solve(H[0:inner+1, 0:inner+1].T, g[0:inner+1])
update = np.ravel(V[:inner+1, :].T.dot(y.reshape(-1, 1)))
callback(x + update)
# end inner loop, back to outer loop
# Find best update to x in Krylov Space V. Solve inner x inner system.
y = sp.linalg.solve(H[0:inner+1, 0:inner+1].T, g[0:inner+1])
update = np.ravel(V[:inner+1, :].T.dot(y.reshape(-1, 1)))
x = x + update
r = b - A @ x
# Apply preconditioner
r = M @ r
normr = norm(r)
# Allow user access to the iterates
if callback is not None:
callback(x)
if residuals is not None:
residuals.append(normr)
# Has GMRES stagnated?
indices = (x != 0)
if indices.any():
change = np.max(np.abs(update[indices] / x[indices]))
if change < 1e-12:
# No change, halt
return (postprocess(x), -1)
# test for convergence
if normr < tol * normMb:
return (postprocess(x), 0)
# end outer loop
return (postprocess(x), niter)
| mit | ab3756fd790ec8fc5367a692eb04874e | 32.734756 | 85 | 0.551649 | 3.666335 | false | false | false | false |
pyamg/pyamg | pyamg/krylov/_minimal_residual.py | 1 | 4817 | """Minimum Residual projection method."""
import warnings
from warnings import warn
import numpy as np
from ..util.linalg import norm
from ..util import make_system
def minimal_residual(A, b, x0=None, tol=1e-5,
maxiter=None, M=None,
callback=None, residuals=None):
"""Minimal residual (MR) algorithm. 1D projection method.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
Tolerance for stopping criteria, let r=r_k
||M r|| < tol ||M b||
if ||b||=0, then set ||M b||=1 for these tests.
maxiter : int
maximum number of iterations allowed
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
preconditioned residual history in the 2-norm,
including the initial preconditioned residual
Returns
-------
(xk, info)
xk : an updated guess after k iterations to the solution of Ax = b
info : halting status
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix.
..
minimal residual algorithm: Preconditioned version:
r = b - A x r = b - A x, z = M r
while not converged: while not converged:
p = A r p = M A z
alpha = (p,r) / (p,p) alpha = (p, z) / (p, p)
x = x + alpha r x = x + alpha z
r = r - alpha p z = z - alpha p
See Also
--------
_steepest_descent
Examples
--------
>>> from pyamg.krylov import minimal_residual
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = minimal_residual(A,b, maxiter=2, tol=1e-8)
>>> print(f'{norm(b - A*x):.6}')
7.26369
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 137--142, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
warnings.filterwarnings('always', module='pyamg.krylov._minimal_residual')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# setup method
r = b - A @ x
z = M @ r
normr = norm(z)
# store initial residual
if residuals is not None:
residuals[:] = [normr]
# Check initial guess if b != 0,
normb = norm(b)
if normb == 0.0:
normMb = 1.0 # reset so that tol is unscaled
else:
normMb = norm(M @ b)
# set the stopping criteria (see the docstring)
if normr < tol * normMb:
return (postprocess(x), 0)
# How often should r be recomputed
recompute_r = 50
it = 0
while True:
p = M @ (A @ z)
# (p, z) = (M A M r, M r) = (M A z, z)
pz = np.inner(p.conjugate(), z) # check curvature of M^-1 A
if pz < 0.0:
warn('\nIndefinite matrix detected in minimal residual, stopping.\n')
return (postprocess(x), -1)
alpha = pz / np.inner(p.conjugate(), p)
x = x + alpha * z
it += 1
if np.mod(it, recompute_r) and it > 0:
r = b - A @ x
z = M @ r
else:
z = z - alpha * p
normr = norm(z)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
# set the stopping criteria (see the docstring)
if normr < tol * normMb:
return (postprocess(x), 0)
if it == maxiter:
return (postprocess(x), it)
| mit | 1161e1f9cac0ee1d43f02504832d0821 | 29.295597 | 81 | 0.540793 | 3.826052 | false | false | false | false |
pyamg/pyamg | pyamg/gallery/tests/test_diffusion.py | 1 | 2626 | """Test diffusion example."""
import numpy as np
from pyamg.gallery.diffusion import diffusion_stencil_2d
from numpy.testing import TestCase, assert_equal, assert_almost_equal
class TestDiffusionStencil2D(TestCase):
def test_simple_finite_difference(self):
# isotropic
stencil = [[0.0, -1.0, 0.0],
[-1.0, 4.0, -1.0],
[0.0, -1.0, 0.0]]
assert_equal(diffusion_stencil_2d(epsilon=1.0, theta=0.0,
type='FD'), stencil)
# weak horizontal
stencil = [[0.0, -1.0, 0.0],
[-0.5, 3.0, -0.5],
[0.0, -1.0, 0.0]]
assert_almost_equal(diffusion_stencil_2d(epsilon=0.5, theta=0.0,
type='FD'), stencil)
# weak vertical
stencil = [[0.0, -0.5, 0.0],
[-1.0, 3.0, -1.0],
[0.0, -0.5, 0.0]]
assert_almost_equal(diffusion_stencil_2d(epsilon=0.5, theta=np.pi/2,
type='FD'), stencil)
def test_simple_finite_element(self):
# isotropic
stencil = np.array([[-1.0, -1.0, -1.0],
[-1.0, 8.0, -1.0],
[-1.0, -1.0, -1.0]]) / 3.0
assert_almost_equal(diffusion_stencil_2d(epsilon=1.0, theta=0.0,
type='FE'), stencil)
# weak horizontal
# assert_almost_equal(diffusion_stencil_2d(epsilon=0.5, theta=0.0,
# type='FE'), stencil)
# weak vertical
# assert_almost_equal(diffusion_stencil_2d(epsilon=0.5, theta=pi/2,
# type='FE'), stencil)
def test_zero_sum(self):
"""Test that stencil entries sum to zero."""
for type in ['FD', 'FE']:
for theta in [np.pi/8, np.pi/5, np.pi/4, np.pi/3, np.pi/2, np.pi]:
for epsilon in [0.001, 0.01, 1.0]:
stencil = diffusion_stencil_2d(epsilon=epsilon,
theta=theta, type=type)
assert_almost_equal(stencil.sum(), 0.0)
def test_rotation_invariance(self):
"""Test invariance to theta when epsilon=1.0."""
for type in ['FD', 'FE']:
expected = diffusion_stencil_2d(epsilon=1.0, theta=0.0, type=type)
for theta in [np.pi/8, np.pi/4, np.pi/3, np.pi/2, np.pi]:
result = diffusion_stencil_2d(epsilon=1.0,
theta=theta, type=type)
assert_almost_equal(result, expected)
| mit | 3912b8024529a72c647fb45a040b6147 | 40.68254 | 78 | 0.47639 | 3.237978 | false | true | false | false |
bbangert/routes | routes/middleware.py | 1 | 6548 | """Routes WSGI Middleware"""
import re
import logging
from webob import Request
from routes.base import request_config
from routes.util import URLGenerator
log = logging.getLogger('routes.middleware')
class RoutesMiddleware(object):
"""Routing middleware that handles resolving the PATH_INFO in
addition to optionally recognizing method overriding.
.. Note::
This module requires webob to be installed. To depend on it, you may
list routes[middleware] in your ``requirements.txt``
"""
def __init__(self, wsgi_app, mapper, use_method_override=True,
path_info=True, singleton=True):
"""Create a Route middleware object
Using the use_method_override keyword will require Paste to be
installed, and your application should use Paste's WSGIRequest
object as it will properly handle POST issues with wsgi.input
should Routes check it.
If path_info is True, then should a route var contain
path_info, the SCRIPT_NAME and PATH_INFO will be altered
accordingly. This should be used with routes like:
.. code-block:: python
map.connect('blog/*path_info', controller='blog', path_info='')
"""
self.app = wsgi_app
self.mapper = mapper
self.singleton = singleton
self.use_method_override = use_method_override
self.path_info = path_info
self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
if self.log_debug:
log.debug("Initialized with method overriding = %s, and path "
"info altering = %s", use_method_override, path_info)
def __call__(self, environ, start_response):
"""Resolves the URL in PATH_INFO, and uses wsgi.routing_args
to pass on URL resolver results."""
old_method = None
if self.use_method_override:
req = None
# In some odd cases, there's no query string
try:
qs = environ['QUERY_STRING']
except KeyError:
qs = ''
if '_method' in qs:
req = Request(environ)
req.errors = 'ignore'
try:
method = req.GET.get('_method')
except UnicodeDecodeError:
method = None
if method:
old_method = environ['REQUEST_METHOD']
environ['REQUEST_METHOD'] = method.upper()
if self.log_debug:
log.debug("_method found in QUERY_STRING, altering "
"request method to %s",
environ['REQUEST_METHOD'])
elif environ['REQUEST_METHOD'] == 'POST' and is_form_post(environ):
if req is None:
req = Request(environ)
req.errors = 'ignore'
try:
method = req.POST.get('_method')
except UnicodeDecodeError:
method = None
if method:
old_method = environ['REQUEST_METHOD']
environ['REQUEST_METHOD'] = method.upper()
if self.log_debug:
log.debug("_method found in POST data, altering "
"request method to %s",
environ['REQUEST_METHOD'])
# Run the actual route matching
# -- Assignment of environ to config triggers route matching
if self.singleton:
config = request_config()
config.mapper = self.mapper
config.environ = environ
match = config.mapper_dict
route = config.route
else:
results = self.mapper.routematch(environ=environ)
if results:
match, route = results[0], results[1]
else:
match = route = None
if old_method:
environ['REQUEST_METHOD'] = old_method
if not match:
match = {}
if self.log_debug:
urlinfo = "%s %s" % (environ['REQUEST_METHOD'],
environ['PATH_INFO'])
log.debug("No route matched for %s", urlinfo)
elif self.log_debug:
urlinfo = "%s %s" % (environ['REQUEST_METHOD'],
environ['PATH_INFO'])
log.debug("Matched %s", urlinfo)
log.debug("Route path: '%s', defaults: %s", route.routepath,
route.defaults)
log.debug("Match dict: %s", match)
url = URLGenerator(self.mapper, environ)
environ['wsgiorg.routing_args'] = ((url), match)
environ['routes.route'] = route
environ['routes.url'] = url
if route and route.redirect:
route_name = '_redirect_%s' % id(route)
location = url(route_name, **match)
log.debug("Using redirect route, redirect to '%s' with status"
"code: %s", location, route.redirect_status)
start_response(route.redirect_status,
[('Content-Type', 'text/plain; charset=utf8'),
('Location', location)])
return []
# If the route included a path_info attribute and it should be used to
# alter the environ, we'll pull it out
if self.path_info and 'path_info' in match:
oldpath = environ['PATH_INFO']
newpath = match.get('path_info') or ''
environ['PATH_INFO'] = newpath
if not environ['PATH_INFO'].startswith('/'):
environ['PATH_INFO'] = '/' + environ['PATH_INFO']
environ['SCRIPT_NAME'] += re.sub(
r'^(.*?)/' + re.escape(newpath) + '$', r'\1', oldpath)
response = self.app(environ, start_response)
# Wrapped in try as in rare cases the attribute will be gone already
try:
del self.mapper.environ
except AttributeError:
pass
return response
def is_form_post(environ):
"""Determine whether the request is a POSTed html form"""
content_type = environ.get('CONTENT_TYPE', '').lower()
if ';' in content_type:
content_type = content_type.split(';', 1)[0]
return content_type in ('application/x-www-form-urlencoded',
'multipart/form-data')
| mit | c7aea3683696194f6c19f3a63986a3fb | 37.745562 | 79 | 0.529322 | 4.687187 | false | false | false | false |
joke2k/faker | faker/providers/person/no_NO/__init__.py | 1 | 7074 | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name_male}}-{{first_name_male}} {{last_name}}",
"{{first_name_male}}-{{first_name_male}} {{last_name}}",
"{{first_name_female}}-{{first_name_female}} {{last_name}}",
"{{first_name_female}}-{{first_name_female}} {{last_name}}",
"{{first_name}} {{last_name}}-{{last_name}}",
"{{first_name}} {{last_name}}-{{last_name}}",
"{{prefix}} {{first_name_male}} {{last_name}}",
)
# 100 most common male first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-menn-100.html
first_names_male = (
"Adrian",
"Alexander",
"Alf",
"Anders",
"Andreas",
"Arild",
"Arne",
"Asbjørn",
"Bjørn",
"Christian",
"Dag",
"Daniel",
"Egil",
"Einar",
"Eirik",
"Eivind",
"Emil",
"Erik",
"Erling",
"Espen",
"Finn",
"Frank",
"Fredrik",
"Frode",
"Geir",
"Gunnar",
"Hans",
"Harald",
"Helge",
"Henrik",
"Håkon",
"Håvard",
"Ivar",
"Jan",
"Jens",
"Joakim",
"Johannes",
"Johan",
"John",
"Jonas",
"Jon",
"Jørgen",
"Karl",
"Kenneth",
"Kim",
"Kjell",
"Kjetil",
"Knut",
"Kåre",
"Kristian",
"Kristoffer",
"Lars",
"Leif",
"Magne",
"Magnus",
"Marius",
"Markus",
"Martin",
"Mathias",
"Morten",
"Nils",
"Odd",
"Ola",
"Olav",
"Ole",
"Per",
"Petter",
"Pål",
"Roar",
"Robert",
"Roger",
"Rolf",
"Roy",
"Rune",
"Sander",
"Sebastian",
"Sigurd",
"Simen",
"Sindre",
"Sondre",
"Steinar",
"Stein",
"Stian",
"Stig",
"Svein",
"Sverre",
"Terje",
"Thomas",
"Thor",
"Tobias",
"Tommy",
"Tom",
"Torbjørn",
"Tore",
"Tor",
"Trond",
"Vegard",
"Vidar",
"Øystein",
"Øyvind",
)
# 100 most common female first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-kvinner-100.html
first_names_female = (
"Andrea",
"Anette",
"Anita",
"Anna",
"Anne",
"Ann",
"Astrid",
"Aud",
"Bente",
"Berit",
"Bjørg",
"Britt",
"Camilla",
"Cathrine",
"Cecilie",
"Elin",
"Elisabeth",
"Elise",
"Eli",
"Ellen",
"Else",
"Emilie",
"Emma",
"Eva",
"Gerd",
"Grete",
"Grethe",
"Gro",
"Gunn",
"Hanna",
"Hanne",
"Hege",
"Heidi",
"Helene",
"Hilde",
"Ida",
"Ingeborg",
"Inger",
"Ingrid",
"Irene",
"Janne",
"Jenny",
"Jorunn",
"Julie",
"Karen",
"Karin",
"Kari",
"Karoline",
"Kirsten",
"Kjersti",
"Kristine",
"Kristin",
"Laila",
"Lene",
"Linda",
"Line",
"Linn",
"Lise",
"Liv",
"Malin",
"Maren",
"Marianne",
"Maria",
"Marie",
"Mari",
"Marit",
"Marte",
"Martine",
"May",
"Mette",
"Mona",
"Monica",
"Nina",
"Nora",
"Ragnhild",
"Randi",
"Reidun",
"Rita",
"Ruth",
"Sara",
"Sigrid",
"Silje",
"Siri",
"Sissel",
"Siv",
"Sofie",
"Solveig",
"Stine",
"Synnøve",
"Thea",
"Tone",
"Tonje",
"Torill",
"Tove",
"Trine",
"Turid",
"Unni",
"Vilde",
"Wenche",
"Åse",
)
first_names = first_names_male + first_names_female
# 100 most common last names, alphabetically.
# Source: http://www.ssb.no/a/navn/alf/etter100.html
last_names = (
"Aasen",
"Aas",
"Abrahamsen",
"Ahmed",
"Ali",
"Amundsen",
"Andersen",
"Andreassen",
"Andresen",
"Antonsen",
"Arnesen",
"Aune",
"Bakken",
"Bakke",
"Berge",
"Berg",
"Berntsen",
"Bøe",
"Birkeland",
"Brekke",
"Christensen",
"Dahl",
"Danielsen",
"Edvardsen",
"Eide",
"Eliassen",
"Ellingsen",
"Engen",
"Eriksen",
"Evensen",
"Fredriksen",
"Gulbrandsen",
"Gundersen",
"Hagen",
"Halvorsen",
"Hansen",
"Hanssen",
"Haugen",
"Hauge",
"Haugland",
"Haug",
"Helland",
"Henriksen",
"Holm",
"Isaksen",
"Iversen",
"Jacobsen",
"Jakobsen",
"Jensen",
"Jenssen",
"Johannessen",
"Johansen",
"Johnsen",
"Jørgensen",
"Karlsen",
"Knudsen",
"Knutsen",
"Kristensen",
"Kristiansen",
"Kristoffersen",
"Larsen",
"Lien",
"Lie",
"Lunde",
"Lund",
"Madsen",
"Martinsen",
"Mathisen",
"Mikkelsen",
"Moen",
"Moe",
"Myhre",
"Myklebust",
"Nguyen",
"Nielsen",
"Nilsen",
"Næss",
"Nygård",
"Olsen",
"Paulsen",
"Pedersen",
"Pettersen",
"Rasmussen",
"Rønning",
"Ruud",
"Sandvik",
"Simonsen",
"Sivertsen",
"Solberg",
"Solheim",
"Sørensen",
"Sæther",
"Strand",
"Strøm",
"Svendsen",
"Tangen",
"Thomassen",
"Thorsen",
"Tveit",
"Vik",
"Ødegård",
)
prefixes = (
"Dr.",
"Prof.",
)
| mit | 4b6772cc2f350c3dca7fae001151bbca | 19.03125 | 68 | 0.370869 | 3.11165 | false | false | false | false |
joke2k/faker | faker/providers/automotive/nl_NL/__init__.py | 1 | 2533 | import re
import string
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
"""Implement automotive provider for `nl_NL` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_Netherlands
- https://www.cbs.nl/en-gb/figures/detail/82044eng
.. |license_plate_car| replace::
:meth:`license_plate_car() <faker.providers.automotive.nl_NL.Provider.license_plate_car>`
.. |license_plate_motorbike| replace::
:meth:`license_plate_motorbike() <faker.providers.automotive.nl_NL.Provider.license_plate_motorbike>`
"""
# License formats for cars / other vehicles than motorbikes
license_formats = (
# Format 6
"##-%?-??",
# Format 7
"##-%??-#",
# Format 8
"#-@??-##",
# Format 9
"%?-###-?",
# Format 10
"%-###-??",
)
# License formats for motorbikes.
# According to CBS, approximately 10% of road vehicles in the Netherlands are motorbikes
license_formats_motorbike = (
"M?-??-##",
"##-M?-??",
)
# Base first letters of format
license_plate_prefix_letters = "BDFGHJKLNPRSTVXZ"
# For Format 8 (9-XXX-99) "BDFGHJLNPR" are not used,
# as to not clash with former export license plates
license_plate_prefix_letters_format_8 = "KSTVXZ"
def license_plate_motorbike(self) -> str:
"""Generate a license plate for motorbikes."""
return self.bothify(
self.random_element(self.license_formats_motorbike),
letters=string.ascii_uppercase,
)
def license_plate_car(self) -> str:
"""Generate a license plate for cars."""
# Replace % with license_plate_prefix_letters
temp = re.sub(
r"\%",
self.random_element(self.license_plate_prefix_letters),
self.random_element(self.license_formats),
)
# Replace @ with license_plate_prefix_letters_format_8
temp = re.sub(r"\@", self.random_element(self.license_plate_prefix_letters_format_8), temp)
return self.bothify(temp, letters=string.ascii_uppercase)
def license_plate(self) -> str:
"""Generate a license plate.
This method randomly chooses 10% between |license_plate_motorbike|
or 90% |license_plate_car| to generate the result.
"""
if self.generator.random.random() < 0.1:
return self.license_plate_motorbike()
return self.license_plate_car()
| mit | c42b5696d81eecbaca0dde4c32d02f0f | 31.896104 | 108 | 0.613502 | 3.503458 | false | false | false | false |
joke2k/faker | faker/providers/date_time/ru_RU/__init__.py | 1 | 53527 | from faker.typing import Country
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
DAY_NAMES = {
"0": "Воскресенье",
"1": "Понедельник",
"2": "Вторник",
"3": "Среда",
"4": "Четверг",
"5": "Пятница",
"6": "Суббота",
}
MONTH_NAMES = {
"01": "Январь",
"02": "Февраль",
"03": "Март",
"04": "Апрель",
"05": "Май",
"06": "Июнь",
"07": "Июль",
"08": "Август",
"09": "Сентябрь",
"10": "Октябрь",
"11": "Ноябрь",
"12": "Декабрь",
}
# Timezone names are based on Wiki list, source: https://ru.wikipedia.org/wiki/Список_часовых_поясов_по_странам
countries = [
Country(
timezones=["Андорра (UTC+01)"],
alpha_2_code="AD",
alpha_3_code="AND",
continent="Европа",
name="Андорра",
capital="Андорра-ла-Велья",
),
Country(
timezones=["Афганистан (UTC+04:30)"],
alpha_2_code="AF",
alpha_3_code="AFG",
continent="Азия",
name="Афганистан",
capital="Кабул",
),
Country(
timezones=["Антигуа и Барбуда (UTC-04)"],
alpha_2_code="AG",
alpha_3_code="ATG",
continent="Северная Америка",
name="Антигуа и Барбуда",
capital="Сент-Джонс",
),
Country(
timezones=["Албания (UTC+01)"],
alpha_2_code="AL",
alpha_3_code="ALB",
continent="Европа",
name="Албания",
capital="Тирана",
),
Country(
timezones=["Армения (UTC+04)"],
alpha_2_code="AM",
alpha_3_code="ARM",
continent="Азия",
name="Армения",
capital="Ереван",
),
Country(
timezones=["Ангола (UTC+01)"],
alpha_2_code="AO",
alpha_3_code="AGO",
continent="Африка",
name="Ангола",
capital="Луанда",
),
Country(
timezones=["Аргентина (UTC-03)"],
alpha_2_code="AR",
alpha_3_code="ARG",
continent="Южная Америка",
name="Аргентина",
capital="Буэнос Айрес",
),
Country(
timezones=["Австрия (UTC+01)"],
alpha_2_code="AT",
alpha_3_code="AUT",
continent="Европа",
name="Австрия",
capital="Вена",
),
Country(
timezones=[
"Австралия (UTC+05)",
"Австралия (UTC+06:30)",
"Австралия (UTC+07)",
"Австралия (UTC+08)",
"Австралия (UTC+9:30)",
"Австралия (UTC+10)",
"Австралия (UTC+10:30)",
"Австралия (UTC+11:30)",
],
alpha_2_code="AU",
alpha_3_code="AUS",
continent="Океания",
name="Австралия",
capital="Канберра",
),
Country(
timezones=["Азербайджан (UTC+04)"],
alpha_2_code="AZ",
alpha_3_code="AZE",
continent="Азия",
name="Азербайджан",
capital="Баку",
),
Country(
timezones=["Барбадос (UTC-04)"],
alpha_2_code="BB",
alpha_3_code="BRB",
continent="Северная Америка",
name="Барбадос",
capital="Бриджтаун",
),
Country(
timezones=["Бангладеш (UTC+06)"],
alpha_2_code="BD",
alpha_3_code="BGD",
continent="Азия",
name="Бангладеш",
capital="Дака",
),
Country(
timezones=["Бельгия (UTC+01)"],
alpha_2_code="BE",
alpha_3_code="BEL",
continent="Европа",
name="Бельгия",
capital="Брюссель",
),
Country(
timezones=["Буркина-Фасо (UTC)"],
alpha_2_code="BF",
alpha_3_code="BFA",
continent="Африка",
name="Буркина-Фасо",
capital="Уагадугу",
),
Country(
timezones=["Болгария (UTC+02)"],
alpha_2_code="BG",
alpha_3_code="BGR",
continent="Европа",
name="Болгария",
capital="София",
),
Country(
timezones=["Бахрейн (UTC+03)"],
alpha_2_code="BH",
alpha_3_code="BHR",
continent="Азия",
name="Бахрейн",
capital="Манама",
),
Country(
timezones=["Бурунди (UTC+02)"],
alpha_2_code="BI",
alpha_3_code="BDI",
continent="Африка",
name="Бурунди",
capital="Гитега",
),
Country(
timezones=["Бенин (UTC+01)"],
alpha_2_code="BJ",
alpha_3_code="BEN",
continent="Африка",
name="Бенин",
capital="Порто-Ново",
),
Country(
timezones=["Бруней (UTC+08)"],
alpha_2_code="BN",
alpha_3_code="BRN",
continent="Азия",
name="Бруней",
capital="Бандар-Сери-Бегаван",
),
Country(
timezones=["Боливия (UTC-04)"],
alpha_2_code="BO",
alpha_3_code="BOL",
continent="Южная Америка",
name="Боливия",
capital="Сукре",
),
Country(
timezones=[
"Бразилия (UTC-05)",
"Бразилия (UTC-04)",
"Бразилия (UTC-03)",
"Бразилия (UTC-02)",
],
alpha_2_code="BR",
alpha_3_code="BRA",
continent="Южная Америка",
name="Бразилия",
capital="Бразилиа",
),
Country(
timezones=["Багамские Острова (UTC-05)"],
alpha_2_code="BS",
alpha_3_code="BHS",
continent="Северная Америка",
name="Багамские Острова",
capital="Нассау",
),
Country(
timezones=["Бутан (UTC+06)"],
alpha_2_code="BT",
alpha_3_code="BTN",
continent="Азия",
name="Бутан",
capital="Тхимпху",
),
Country(
timezones=["Ботсвана (UTC+02)"],
alpha_2_code="BW",
alpha_3_code="BWA",
continent="Африка",
name="Ботсвана",
capital="Габороне",
),
Country(
timezones=["Белоруссия (UTC+03)"],
alpha_2_code="BY",
alpha_3_code="BLR",
continent="Европа",
name="Белоруссия",
capital="Минск",
),
Country(
timezones=["Белиз (UTC-06)"],
alpha_2_code="BZ",
alpha_3_code="BLZ",
continent="Северная Америка",
name="Белиз",
capital="Бельмопан",
),
Country(
timezones=[
"Канада (UTC-08)",
"Канада (UTC-07)",
"Канада (UTC-06)",
"Канада (UTC-05)",
"Канада (UTC-04)",
"Канада (UTC-03:30)",
],
alpha_2_code="CA",
alpha_3_code="CAN",
continent="Северная Америка",
name="Канада",
capital="Оттава",
),
Country(
timezones=[
"Демократическая Республика Конго (UTC+01)",
"Демократическая Республика Конго (UTC+02)",
],
alpha_2_code="CD",
alpha_3_code="COD",
continent="Африка",
name="Демократическая Республика Конго",
capital="Киншаса",
),
Country(
timezones=["Республика Конго (UTC+01)"],
alpha_2_code="CG",
alpha_3_code="COG",
continent="Африка",
name="Руспублика Конго",
capital="Браззавиль",
),
Country(
timezones=["Кот-д'Ивуар (UTC)"],
alpha_2_code="CI",
alpha_3_code="CIV",
continent="Африка",
name="Кот-д'Ивуар",
capital="Ямусукро",
),
Country(
timezones=["Чили (UTC-06)", "Чили (UTC-04)"],
alpha_2_code="CL",
alpha_3_code="CHL",
continent="Южная Америка",
name="Чили",
capital="Сантьяго",
),
Country(
timezones=["Камерун (UTC+01)"],
alpha_2_code="CM",
alpha_3_code="CMR",
continent="Африка",
name="Камерун",
capital="Яунде",
),
Country(
timezones=["Китай (UTC+08)"],
alpha_2_code="CN",
alpha_3_code="CHN",
continent="Азия",
name="Китайская Народная Республика",
capital="Пекин",
),
Country(
timezones=["Колумбия (UTC-05)"],
alpha_2_code="CO",
alpha_3_code="COL",
continent="Южная Америка",
name="Колумбия",
capital="Богота",
),
Country(
timezones=["Коста-Рика (UTC-06)"],
alpha_2_code="CR",
alpha_3_code="CRI",
continent="Северная Америка",
name="Коста-Рика",
capital="Сан-Хосе",
),
Country(
timezones=["Куба (UTC-05)"],
alpha_2_code="CU",
alpha_3_code="CUB",
continent="Северная Америка",
name="Куба",
capital="Гавана",
),
Country(
timezones=["Кабо-Верде (UTC-01)"],
alpha_2_code="CV",
alpha_3_code="CPV",
continent="Африка",
name="Кабо-Верде",
capital="Прая",
),
Country(
timezones=["Кипр (UTC+02)"],
alpha_2_code="CY",
alpha_3_code="CYP",
continent="Азия",
name="Кипр",
capital="Никосия",
),
Country(
timezones=["Чехия (UTC+01)"],
alpha_2_code="CZ",
alpha_3_code="CZE",
continent="Европа",
name="Чехия",
capital="Прага",
),
Country(
timezones=["Германия (UTC+01)"],
alpha_2_code="DE",
alpha_3_code="DEU",
continent="Европа",
name="Германия",
capital="Берлин",
),
Country(
timezones=["Джибути (UTC+03)"],
alpha_2_code="DJ",
alpha_3_code="DJI",
continent="Африка",
name="Джибути",
capital="Джибути",
),
Country(
timezones=["Дания (UTC+01)"],
alpha_2_code="DK",
alpha_3_code="DNK",
continent="Европа",
name="Дания",
capital="Копенгаген",
),
Country(
timezones=["Доминика (UTC-04)"],
alpha_2_code="DM",
alpha_3_code="DMA",
continent="Северная Америка",
name="Доминика",
capital="Розо",
),
Country(
timezones=["Доминиканская Республика (UTC-04)"],
alpha_2_code="DO",
alpha_3_code="DOM",
continent="Северная Америка",
name="Доминиканская Республика",
capital="Санто-Доминго",
),
Country(
timezones=["Эквадор (UTC-06)", "Эквадор (UTC-05)"],
alpha_2_code="EC",
alpha_3_code="ECU",
continent="Южная Америка",
name="Эквадор",
capital="Кито",
),
Country(
timezones=["Эстония (UTC+02)"],
alpha_2_code="EE",
alpha_3_code="EST",
continent="Европа",
name="Эстония",
capital="Таллинн",
),
Country(
timezones=["Египет (UTC+02)"],
alpha_2_code="EG",
alpha_3_code="EGY",
continent="Африка",
name="Египет",
capital="Каир",
),
Country(
timezones=["Эритрея (UTC+03)"],
alpha_2_code="ER",
alpha_3_code="ERI",
continent="Африка",
name="Эритрея",
capital="Асмэра",
),
Country(
timezones=["Эфиопия (UTC+03)"],
alpha_2_code="ET",
alpha_3_code="ETH",
continent="Африка",
name="Эфиопия",
capital="Аддис-Абеба",
),
Country(
timezones=["Финляндия (UTC+02)"],
alpha_2_code="FI",
alpha_3_code="FIN",
continent="Европа",
name="Финляндия",
capital="Хельсинки",
),
Country(
timezones=["Фиджи (UTC+12)"],
alpha_2_code="FJ",
alpha_3_code="FJI",
continent="Океания",
name="Фиджи",
capital="Сува",
),
Country(
timezones=["Франция (UTC+01)"],
alpha_2_code="FR",
alpha_3_code="FRA",
continent="Европа",
name="Франция",
capital="Париж",
),
Country(
timezones=["Габон (UTC+01)"],
alpha_2_code="GA",
alpha_3_code="GAB",
continent="Африка",
name="Габон",
capital="Либревиль",
),
Country(
timezones=["Грузия (UTC+04)"],
alpha_2_code="GE",
alpha_3_code="GEO",
continent="Азия",
name="Грузия",
capital="Тбилиси",
),
Country(
timezones=["Гана (UTC)"],
alpha_2_code="GH",
alpha_3_code="GHA",
continent="Африка",
name="Гана",
capital="Аккра",
),
Country(
timezones=["Гамбия (UTC)"],
alpha_2_code="GM",
alpha_3_code="GMB",
continent="Африка",
name="Гамбия",
capital="Банджул",
),
Country(
timezones=["Гвинея (UTC)"],
alpha_2_code="GN",
alpha_3_code="GIN",
continent="Африка",
name="Гвинея",
capital="Конакри",
),
Country(
timezones=["Греция (UTC+02)"],
alpha_2_code="GR",
alpha_3_code="GRC",
continent="Европа",
name="Греция",
capital="Афины",
),
Country(
timezones=["Гватемала (UTC-06)"],
alpha_2_code="GT",
alpha_3_code="GTM",
continent="Северная Америка",
name="Гватемала",
capital="Гватемала",
),
Country(
timezones=["Гаити (UTC-05)"],
alpha_2_code="HT",
alpha_3_code="HTI",
continent="Северная Америка",
name="Гаити",
capital="Порт-о-Пренс",
),
Country(
timezones=["Гвинея-Бисау (UTC)"],
alpha_2_code="GW",
alpha_3_code="GNB",
continent="Африка",
name="Гвинея-Бисау",
capital="Бисау",
),
Country(
timezones=["Гайана (UTC-04)"],
alpha_2_code="GY",
alpha_3_code="GUY",
continent="Южная Америка",
name="Гайана",
capital="Джорджтаун",
),
Country(
timezones=["Гондурас (UTC-06)"],
alpha_2_code="HN",
alpha_3_code="HND",
continent="Северная Америка",
name="Гондурас",
capital="Тегусигальпа",
),
Country(
timezones=["Венгрия (UTC+01)"],
alpha_2_code="HU",
alpha_3_code="HUN",
continent="Европа",
name="Венгрия",
capital="Будапешт",
),
Country(
timezones=[
"Индонезия (UTC+07)",
"Индонезия (UTC+08)",
"Индонезия (UTC+09)",
],
alpha_2_code="ID",
alpha_3_code="IDN",
continent="Азия",
name="Индонезия",
capital="Джакарта",
),
Country(
timezones=["Ирландия (UTC)"],
alpha_2_code="IE",
alpha_3_code="IRL",
continent="Европа",
name="Ирландия",
capital="Дублин",
),
Country(
timezones=["Израиль (UTC+02)"],
alpha_2_code="IL",
alpha_3_code="ISR",
continent="Азия",
name="Израиль",
capital="Иерусалим",
),
Country(
timezones=["Индия (UTC+05:30"],
alpha_2_code="IN",
alpha_3_code="IND",
continent="Азия",
name="Индия",
capital="Дели",
),
Country(
timezones=["Ирак (UTC+03)"],
alpha_2_code="IQ",
alpha_3_code="IRQ",
continent="Азия",
name="Ирак",
capital="Багдад",
),
Country(
timezones=["Иран (UTC+03:30)"],
alpha_2_code="IR",
alpha_3_code="IRN",
continent="Азия",
name="Иран",
capital="Тегеран",
),
Country(
timezones=["Исландия (UTC)"],
alpha_2_code="IS",
alpha_3_code="ISL",
continent="Европа",
name="Исландия",
capital="Рейкьявик",
),
Country(
timezones=["Италия (UTC+01)"],
alpha_2_code="IT",
alpha_3_code="ITA",
continent="Европа",
name="Италия",
capital="Рим",
),
Country(
timezones=["Ямайка (UTC-05)"],
alpha_2_code="JM",
alpha_3_code="JAM",
continent="Северная Америка",
name="Ямайка",
capital="Кингстон",
),
Country(
timezones=["Иордания (UTC+02)"],
alpha_2_code="JO",
alpha_3_code="JOR",
continent="Азия",
name="Иордания",
capital="Амман",
),
Country(
timezones=["Япония (UTC+09)"],
alpha_2_code="JP",
alpha_3_code="JPN",
continent="Азия",
name="Япония",
capital="Токио",
),
Country(
timezones=["Кения (UTC+03)"],
alpha_2_code="KE",
alpha_3_code="KEN",
continent="Африка",
name="Кения",
capital="Найроби",
),
Country(
timezones=["Киргизия (UTC+06)"],
alpha_2_code="KG",
alpha_3_code="KGZ",
continent="Азия",
name="Киргизия",
capital="Бишкек",
),
Country(
timezones=[
"Кирибати (UTC+12)",
"Кирибати (UTC+13)",
"Кирибати (UTC+14)",
],
alpha_2_code="KI",
alpha_3_code="KIR",
continent="Океания",
name="Кирибати",
capital="Южная Тарава",
),
Country(
timezones=["КНДР (UTC+09)"],
alpha_2_code="KP",
alpha_3_code="PRK",
continent="Азия",
name="КНДР",
capital="Пхеньян",
),
Country(
timezones=["Республика Корея (UTC+09)"],
alpha_2_code="KR",
alpha_3_code="KOR",
continent="Азия",
name="Республика Корея",
capital="Сеул",
),
Country(
timezones=["Кувейт (UTC+03)"],
alpha_2_code="KW",
alpha_3_code="KWT",
continent="Азия",
name="Кувейт",
capital="Эль-Кувейт",
),
Country(
timezones=["Ливан (UTC+02)"],
alpha_2_code="LB",
alpha_3_code="LBN",
continent="Азия",
name="Ливан",
capital="Бейрут",
),
Country(
timezones=["Лихтенштейн (UTC+01)"],
alpha_2_code="LI",
alpha_3_code="LIE",
continent="Европа",
name="Лихтенштейн",
capital="Вадуц",
),
Country(
timezones=["Либерия (UTC)"],
alpha_2_code="LR",
alpha_3_code="LBR",
continent="Африка",
name="Либерия",
capital="Монровия",
),
Country(
timezones=["Лесото (UTC+02)"],
alpha_2_code="LS",
alpha_3_code="LSO",
continent="Африка",
name="Лесото",
capital="Масеру",
),
Country(
timezones=["Литва (UTC+02)"],
alpha_2_code="LT",
alpha_3_code="LTU",
continent="Европа",
name="Литва",
capital="Вильнюс",
),
Country(
timezones=["Люксембург (UTC+01)"],
alpha_2_code="LU",
alpha_3_code="LUX",
continent="Европа",
name="Люксембург",
capital="Люксембург",
),
Country(
timezones=["Латвия (UTC+02)"],
alpha_2_code="LV",
alpha_3_code="LVA",
continent="Европа",
name="Латвия",
capital="Рига",
),
Country(
timezones=["Ливия (UTC+02)"],
alpha_2_code="LY",
alpha_3_code="LBY",
continent="Африка",
name="Ливия",
capital="Триполи",
),
Country(
timezones=["Мадагаскар (UTC+03)"],
alpha_2_code="MG",
alpha_3_code="MDG",
continent="Африка",
name="Мадагаскар",
capital="Антананариву",
),
Country(
timezones=["Маршалловы Острова (UTC+12)"],
alpha_2_code="MH",
alpha_3_code="MHL",
continent="Океания",
name="Маршалловы Острова",
capital="Маджуро",
),
Country(
timezones=["Северная Македония (UTC+01)"],
alpha_2_code="MK",
alpha_3_code="MKD",
continent="Европа",
name="Северная Македония",
capital="Скопье",
),
Country(
timezones=["Мали (UTC)"],
alpha_2_code="ML",
alpha_3_code="MLI",
continent="Африка",
name="Мали",
capital="Бамако",
),
Country(
timezones=["Мьянма (UTC+06:30)"],
alpha_2_code="MM",
alpha_3_code="MMR",
continent="Азия",
name="Мьянма",
capital="Нейпьидо",
),
Country(
timezones=["Монголия (UTC+07)", "Монголия (UTC+08)"],
alpha_2_code="MN",
alpha_3_code="MNG",
continent="Азия",
name="Монголия",
capital="Улан-Батор",
),
Country(
timezones=["Мавритания (UTC)"],
alpha_2_code="MR",
alpha_3_code="MRT",
continent="Африка",
name="Мавритания",
capital="Нуакшот",
),
Country(
timezones=["Мальта (UTC+01)"],
alpha_2_code="MT",
alpha_3_code="MLT",
continent="Европа",
name="Мальта",
capital="Валлетта",
),
Country(
timezones=["Маврикий (UTC+04)"],
alpha_2_code="MU",
alpha_3_code="MUS",
continent="Африка",
name="Маврикий",
capital="Порт-Луи",
),
Country(
timezones=["Мальдивы (UTC+05)"],
alpha_2_code="MV",
alpha_3_code="MDV",
continent="Азия",
name="Мальдивы",
capital="Мале",
),
Country(
timezones=["Малави (UTC+02)"],
alpha_2_code="MW",
alpha_3_code="MWI",
continent="Африка",
name="Малави",
capital="Лилонгве",
),
Country(
timezones=["Мексика (UTC-08)", "Мексика (UTC-07)", "Мексика (UTC-06)"],
alpha_2_code="MX",
alpha_3_code="MEX",
continent="Северная Америка",
name="Мексика",
capital="Мехико",
),
Country(
timezones=["Малайзия (UTC+08)"],
alpha_2_code="MY",
alpha_3_code="MYS",
continent="Азия",
name="Малайзия",
capital="Куала-Лумпур",
),
Country(
timezones=["Мозамбик (UTC+02)"],
alpha_2_code="MZ",
alpha_3_code="MOZ",
continent="Африка",
name="Мозамбик",
capital="Мапуту",
),
Country(
timezones=["Намибия (UTC+01)"],
alpha_2_code="NA",
alpha_3_code="NAM",
continent="Африка",
name="Намибия",
capital="Виндхук",
),
Country(
timezones=["Нигер (UTC+01)"],
alpha_2_code="NE",
alpha_3_code="NER",
continent="Африка",
name="Нигер",
capital="Ниамей",
),
Country(
timezones=["Нигерия (UTC+01)"],
alpha_2_code="NG",
alpha_3_code="NGA",
continent="Африка",
name="Нигерия",
capital="Абуджа",
),
Country(
timezones=["Никарагуа (UTC-06)"],
alpha_2_code="NI",
alpha_3_code="NIC",
continent="Северная Америка",
name="Никарагуа",
capital="Манагуа",
),
Country(
timezones=["Нидерланды (UTC+01)"],
alpha_2_code="NL",
alpha_3_code="NLD",
continent="Европа",
name="Нидерланды",
capital="Амстердам",
),
Country(
timezones=["Норвегия (UTC+01)"],
alpha_2_code="NO",
alpha_3_code="NOR",
continent="Европа",
name="Норвегия",
capital="Осло",
),
Country(
timezones=["Непал (UTC+05:45"],
alpha_2_code="NP",
alpha_3_code="NPL",
continent="Азия",
name="Непал",
capital="Катманду",
),
Country(
timezones=["Науру (UTC+12)"],
alpha_2_code="NR",
alpha_3_code="NRU",
continent="Океания",
name="Науру",
capital="Ярен",
),
Country(
timezones=["Новая Зеландия (UTC+12)"],
alpha_2_code="NZ",
alpha_3_code="NZL",
continent="Океания",
name="Новая Зеландия",
capital="Веллингтон",
),
Country(
timezones=["Оман (UTC+04"],
alpha_2_code="OM",
alpha_3_code="OMN",
continent="Азия",
name="Оман",
capital="Маскат",
),
Country(
timezones=["Панама (UTC-05)"],
alpha_2_code="PA",
alpha_3_code="PAN",
continent="Северная Америка",
name="Панама",
capital="Панама",
),
Country(
timezones=["Перу (UTC-05)"],
alpha_2_code="PE",
alpha_3_code="PER",
continent="Южная Америка",
name="Перу",
capital="Лима",
),
Country(
timezones=["Папуа - Новая Гвинея (UTC+10)"],
alpha_2_code="PG",
alpha_3_code="PNG",
continent="Океания",
name="Папуа - Новая Гвинея",
capital="Порт-Морсби",
),
Country(
timezones=["Филиппины (UTC+08)"],
alpha_2_code="PH",
alpha_3_code="PHL",
continent="Азия",
name="Филиппины",
capital="Манила",
),
Country(
timezones=["Пакистан (UTC+05)"],
alpha_2_code="PK",
alpha_3_code="PAK",
continent="Азия",
name="Пакистан",
capital="Исламабад",
),
Country(
timezones=["Польша (UTC+01)"],
alpha_2_code="PL",
alpha_3_code="POL",
continent="Европа",
name="Польша",
capital="Варшава",
),
Country(
timezones=["Португалия (UTC)"],
alpha_2_code="PT",
alpha_3_code="PRT",
continent="Европа",
name="Португалия",
capital="Лиссабон",
),
Country(
timezones=["Палау (UTC+09)"],
alpha_2_code="PW",
alpha_3_code="PLW",
continent="Океания",
name="Палау",
capital="Кампала",
),
Country(
timezones=["Парагвай (UTC-04)"],
alpha_2_code="PY",
alpha_3_code="PRY",
continent="Южная Америка",
name="Парагвай",
capital="Асунсьон",
),
Country(
timezones=["Катар (UTC+03)"],
alpha_2_code="QA",
alpha_3_code="QAT",
continent="Азия",
name="Катар",
capital="Доха",
),
Country(
timezones=["Румыния (UTC+02)"],
alpha_2_code="RO",
alpha_3_code="ROU",
continent="Европа",
name="Румыния",
capital="Бухарест",
),
Country(
timezones=[
"Россия (UTC+02)",
"Россия (UTC+03)",
"Россия (UTC+04)",
"Россия (UTC+05)",
"Россия (UTC+06)",
"Россия (UTC+07)",
"Россия (UTC+08)",
"Россия (UTC+09)",
"Россия (UTC+10)",
"Россия (UTC+11)",
"Россия (UTC+12)",
],
alpha_2_code="RU",
alpha_3_code="RUS",
continent="Европа",
name="Россия",
capital="Москва",
),
Country(
timezones=["Руанда (UTC+02)"],
alpha_2_code="RW",
alpha_3_code="RWA",
continent="Африка",
name="Руанда",
capital="Кигали",
),
Country(
timezones=["Саудовская Аравия (UTC+03)"],
alpha_2_code="SA",
alpha_3_code="SAU",
continent="Азия",
name="Саудовская Аравия",
capital="Эр-Рияд",
),
Country(
timezones=["Соломоновы Острова (UTC+11)"],
alpha_2_code="SB",
alpha_3_code="SLB",
continent="Океания",
name="Соломоновы Острова",
capital="Хониара",
),
Country(
timezones=["Сейшельские острова (UTC+04)"],
alpha_2_code="SC",
alpha_3_code="SYC",
continent="Африка",
name="Сейшельские острова",
capital="Виктория",
),
Country(
timezones=["Судан (UTC+03)"],
alpha_2_code="SD",
alpha_3_code="SDN",
continent="Африка",
name="Судан",
capital="Хартум",
),
Country(
timezones=["Швеция (UTC+01)"],
alpha_2_code="SE",
alpha_3_code="SWE",
continent="Европа",
name="Швеци",
capital="Стокгольм",
),
Country(
timezones=["Сингапур (UTC+08)"],
alpha_2_code="SG",
alpha_3_code="SGP",
continent="Азия",
name="Сингапур",
capital="Сингапур",
),
Country(
timezones=["Словения (UTC+01)"],
alpha_2_code="SI",
alpha_3_code="SVN",
continent="Европа",
name="Словения",
capital="Любляна",
),
Country(
timezones=["Словакия (UTC+01)"],
alpha_2_code="SK",
alpha_3_code="SVK",
continent="Европа",
name="Словакия",
capital="Братислава",
),
Country(
timezones=["Сьерра-Леоне (UTC)"],
alpha_2_code="SL",
alpha_3_code="SLE",
continent="Африка",
name="Сьерра Леоне",
capital="Фритаун",
),
Country(
timezones=["Сан-Марино (UTC+01)"],
alpha_2_code="SM",
alpha_3_code="SMR",
continent="Европа",
name="Сан-Марино",
capital="Сан-Марино",
),
Country(
timezones=["Сенегал (UTC)"],
alpha_2_code="SN",
alpha_3_code="SEN",
continent="Африка",
name="Сенегал",
capital="Дакар",
),
Country(
timezones=["Сомали (UTC+03)"],
alpha_2_code="SO",
alpha_3_code="SOM",
continent="Африка",
name="Сомали",
capital="Могадишо",
),
Country(
timezones=["Суринам (UTC-03)"],
alpha_2_code="SR",
alpha_3_code="SUR",
continent="Южная Америка",
name="Суринам",
capital="Парамарибо",
),
Country(
timezones=["Сан-Томе и Принсипи (UTC)"],
alpha_2_code="ST",
alpha_3_code="STP",
continent="Африка",
name="Сан-Томе и Принсипи",
capital="Сан-Томе",
),
Country(
timezones=["Сирия (UTC+02)"],
alpha_2_code="SY",
alpha_3_code="SYR",
continent="Азия",
name="Сирия",
capital="Дамаск",
),
Country(
timezones=["Того (UTC)"],
alpha_2_code="TG",
alpha_3_code="TGO",
continent="Африка",
name="Того",
capital="Ломе",
),
Country(
timezones=["Таиланд (UTC+07)"],
alpha_2_code="TH",
alpha_3_code="THA",
continent="Азия",
name="Таиланд",
capital="Бангкок",
),
Country(
timezones=["Таджикистан (UTC+05)"],
alpha_2_code="TJ",
alpha_3_code="TJK",
continent="Азия",
name="Таджикистан",
capital="Душанбе",
),
Country(
timezones=["Туркмения (UTC+05)"],
alpha_2_code="TM",
alpha_3_code="TKM",
continent="Азия",
name="Туркмения",
capital="Ашхабад",
),
Country(
timezones=["Тунис (UTC+01)"],
alpha_2_code="TN",
alpha_3_code="TUN",
continent="Африка",
name="Тунис",
capital="Тунис",
),
Country(
timezones=["Тонга (UTC+13)"],
alpha_2_code="TO",
alpha_3_code="TON",
continent="Океания",
name="Тонга",
capital="Нукуалофа",
),
Country(
timezones=["Турция (UTC+02)"],
alpha_2_code="TR",
alpha_3_code="TUR",
continent="Азия",
name="Турция",
capital="Анкара",
),
Country(
timezones=["Тринидад и Тобаго (UTC-04)"],
alpha_2_code="TT",
alpha_3_code="TTO",
continent="Северная Америка",
name="Тринидад и Тобаго",
capital="Порт-оф-Спейн",
),
Country(
timezones=["Тувалу (UTC+12)"],
alpha_2_code="TV",
alpha_3_code="TUV",
continent="Океания",
name="Тувалу",
capital="Фунафути",
),
Country(
timezones=["Танзания (UTC+03)"],
alpha_2_code="TZ",
alpha_3_code="TZA",
continent="Африка",
name="Танзания",
capital="Додома",
),
Country(
timezones=["Украина (UTC+02)", "Украина (UTC+03)"],
alpha_2_code="UA",
alpha_3_code="UKR",
continent="Европа",
name="Украина",
capital="Киев",
),
Country(
timezones=["Уганда (UTC+03)"],
alpha_2_code="UG",
alpha_3_code="UGA",
continent="Африка",
name="Уганда",
capital="Кампала",
),
Country(
timezones=[
"США (UTC-11)",
"США (UTC-10)",
"США (UTC-09)",
"США (UTC-08)",
"США (UTC-07)",
"США (UTC-06)",
"США (UTC-05)",
"США (UTC-04)",
"США (UTC+10)",
],
alpha_2_code="US",
alpha_3_code="USA",
continent="Северная Америка",
name="США",
capital="Вашингтон",
),
Country(
timezones=["Уругвай (UTC-03)"],
alpha_2_code="UY",
alpha_3_code="URY",
continent="Южная Америка",
name="Уругвай",
capital="Монтевидео",
),
Country(
timezones=["Узбекистан (UTC+05)"],
alpha_2_code="UZ",
alpha_3_code="UZB",
continent="Азия",
name="Узбекистан",
capital="Ташкент",
),
Country(
timezones=["Ватикан (UTC+01)"],
alpha_2_code="VA",
alpha_3_code="VAT",
continent="Европа",
name="Ватикан",
capital="Ватикан",
),
Country(
timezones=["Венесуэла (UTC-04:30)"],
alpha_2_code="VE",
alpha_3_code="VEN",
continent="Южная Америка",
name="Венесуэла",
capital="Каракас",
),
Country(
timezones=["Вьетнам (UTC+07)"],
alpha_2_code="VN",
alpha_3_code="VNM",
continent="Азия",
name="Вьетнам",
capital="Ханой",
),
Country(
timezones=["Вануату (UTC+11)"],
alpha_2_code="VU",
alpha_3_code="VUT",
continent="Океания",
name="Вануату",
capital="Порт-Вила",
),
Country(
timezones=["Йемен (UTC+03)"],
alpha_2_code="YE",
alpha_3_code="YEM",
continent="Азия",
name="Йемен",
capital="Сана",
),
Country(
timezones=["Замбия (UTC+02)"],
alpha_2_code="ZM",
alpha_3_code="ZMB",
continent="Африка",
name="Замбия",
capital="Лусака",
),
Country(
timezones=["Зимбабве (UTC+02)"],
alpha_2_code="ZW",
alpha_3_code="ZWE",
continent="Африка",
name="Зимбабве",
capital="Хараре",
),
Country(
timezones=["Алжир (UTC+01)"],
alpha_2_code="DZ",
alpha_3_code="DZA",
continent="Африка",
name="Алжир",
capital="Алжир",
),
Country(
timezones=["Босния и Герцеговина (UTC+01)"],
alpha_2_code="BA",
alpha_3_code="BIH",
continent="Европа",
name="Босния и Герцеговина",
capital="Сараево",
),
Country(
timezones=["Камбоджа (UTC+07)"],
alpha_2_code="KH",
alpha_3_code="KHM",
continent="Азия",
name="Камбоджа",
capital="Пномпень",
),
Country(
timezones=["ЦАР (UTC+01)"],
alpha_2_code="CF",
alpha_3_code="CAF",
continent="Африка",
name="ЦАР",
capital="Банги",
),
Country(
timezones=["Чад (UTC+01)"],
alpha_2_code="TD",
alpha_3_code="TCD",
continent="Африка",
name="Чад",
capital="Нджамена",
),
Country(
timezones=["Коморы (UTC+03)"],
alpha_2_code="KM",
alpha_3_code="COM",
continent="Африка",
name="Коморы",
capital="Морони",
),
Country(
timezones=["Хорватия (UTC+01)"],
alpha_2_code="HR",
alpha_3_code="HRV",
continent="Европа",
name="Хорватия",
capital="Загреб",
),
Country(
timezones=["Восточный Тимор (UTC+09)"],
alpha_2_code="TL",
alpha_3_code="TLS",
continent="Азия",
name="Восточный Тимор",
capital="Дили",
),
Country(
timezones=["Сальвадор (UTC-06)"],
alpha_2_code="SV",
alpha_3_code="SLV",
continent="Северная Америка",
name="Сальвадор",
capital="Сан-Сальвадор",
),
Country(
timezones=["Экваториальная Гвинея (UTC+01)"],
alpha_2_code="GQ",
alpha_3_code="GNQ",
continent="Африка",
name="Экваториальная Гвинея",
capital="Малабо",
),
Country(
timezones=["Гренада (UTC-04)"],
alpha_2_code="GD",
alpha_3_code="GRD",
continent="Северная Америка",
name="Гренада",
capital="Сент-Джорджес",
),
Country(
timezones=["Казахстан (UTC+05)", "Казахстан (UTC+06)"],
alpha_2_code="KZ",
alpha_3_code="KAZ",
continent="Азия",
name="Казахстан",
capital="Нур-Султан (Астана)",
),
Country(
timezones=["Лаос (UTC+07)"],
alpha_2_code="LA",
alpha_3_code="LAO",
continent="Азия",
name="Лаос",
capital="Вьентьян",
),
Country(
timezones=["Микронезия (UTC+10)", "Микронезия (UTC+11)"],
alpha_2_code="FM",
alpha_3_code="FSM",
continent="Океания",
name="Микронезия",
capital="Паликир",
),
Country(
timezones=["Молдавия (UTC+02)"],
alpha_2_code="MD",
alpha_3_code="MDA",
continent="Европа",
name="Молдавия",
capital="Кишинев",
),
Country(
timezones=["Монако (UTC+01)"],
alpha_2_code="MC",
alpha_3_code="MCO",
continent="Европа",
name="Монако",
capital="Монако",
),
Country(
timezones=["Черногория (UTC+01)"],
alpha_2_code="ME",
alpha_3_code="MNE",
continent="Европа",
name="Черногория",
capital="Подгорица",
),
Country(
timezones=["Марокко (UTC)"],
alpha_2_code="MA",
alpha_3_code="MAR",
continent="Африка",
name="Марокко",
capital="Рабат",
),
Country(
timezones=["Сент-Китс и Невис (UTC-04)"],
alpha_2_code="KN",
alpha_3_code="KNA",
continent="Северная Америка",
name="Сент-Китс и Невис",
capital="Бастер",
),
Country(
timezones=["Сент-Люсия (UTC-04)"],
alpha_2_code="LC",
alpha_3_code="LCA",
continent="Северная Америка",
name="Сент-Люсия",
capital="Кастри",
),
Country(
timezones=["Сент-Винсент и Гренадины (UTC-04)"],
alpha_2_code="VC",
alpha_3_code="VCT",
continent="Северная Америка",
name="Сент-Винсент и Гренадины",
capital="Кингстаун",
),
Country(
timezones=["Самоа (UTC+13)"],
alpha_2_code="WS",
alpha_3_code="WSM",
continent="Океания",
name="Самоа",
capital="Апиа",
),
Country(
timezones=["Сербия (UTC+01)"],
alpha_2_code="RS",
alpha_3_code="SRB",
continent="Европа",
name="Сербия",
capital="Белград",
),
Country(
timezones=["ЮАР (UTC+02)"],
alpha_2_code="ZA",
alpha_3_code="ZAF",
continent="Африка",
name="ЮАР",
capital="Претория",
),
Country(
timezones=["Испания (UTC)", "Испания (UTC+01)"],
alpha_2_code="ES",
alpha_3_code="ESP",
continent="Европа",
name="Испания",
capital="Мадрид",
),
Country(
timezones=["Шри-Ланка (UTC+05:30)"],
alpha_2_code="LK",
alpha_3_code="LKA",
continent="Азия",
name="Шри-Ланка",
capital="Шри-Джаяварденепура-Котте",
),
Country(
timezones=["Эсватини (Свазиленд) (UTC+02)"],
alpha_2_code="SZ",
alpha_3_code="SWZ",
continent="Африка",
name="Эсватини (Свазиленд)",
capital="Мбабане",
),
Country(
timezones=["Швейцария (UTC+01)"],
alpha_2_code="CH",
alpha_3_code="CHE",
continent="Европа",
name="Швейцария",
capital="Берн",
),
Country(
timezones=["ОАЭ (UTC+04)"],
alpha_2_code="AE",
alpha_3_code="ARE",
continent="Азия",
name="ОАЭ",
capital="Абу-Даби",
),
Country(
timezones=["Великобритания (UTC)"],
alpha_2_code="GB",
alpha_3_code="GBR",
continent="Европа",
name="Великобритания",
capital="Лондон",
),
]
def day_of_week(self) -> str:
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
return self.MONTH_NAMES[month]
| mit | f330a9e8013c2eb8e0666451d9851808 | 27.749543 | 115 | 0.416041 | 2.872329 | false | false | false | false |
joke2k/faker | faker/providers/phone_number/en_PH/__init__.py | 1 | 8227 | from typing import Sequence, Tuple
from ... import BaseProvider
class Provider(BaseProvider):
"""
Provider for Philippine mobile and landline telephone numbers
This provider has methods that generate phone numbers specific to service providers whenever applicable, because the
kinds of services, the quality of said services, and even the fees may vary depending on the service provider and
the service location. This in turn, affects subscriber behavior, e.g. someone with a SIM from company X may be very
unlikely to respond to calls and texts sent from a company Y SIM as the service charge might be more expensive. The
provider methods are there to enable the creation of more "realistic" fake data for such cases.
Additional Notes:
- The Philippine telecommunication industry is dominated by the Globe-PLDT duopoly. Globe offers landline
services under the Globe brand and mobile services under the Globe and TM brands. PLDT offers landline
services under the PLDT brand, and its subsidiaries offer mobile services under the Smart, TNT, and SUN
brands. The rest of the industry is shared by smaller players, and Bayantel is one of the more well-known
players that provide landline services.
- Globe mobile prefixes include both Globe and TM brands, and the Smart mobile prefixes include both Smart
and TNT brands but not the SUN brand. Available sources only split the prefixes this way.
- In October 2019, Area 2 landline numbers were migrated to an 8 digit scheme, while the rest of the
country still uses the original 7 digit scheme. Area 2 is comprised of the whole National Capital
Region (aka Metro Manila) and parts of surrounding provinces, and within this area, the service
provider's identifier is included in every 8 digit landline number.
Sources:
- https://en.wikipedia.org/wiki/Telephone_numbers_in_the_Philippines
- https://www.prefix.ph/prefixes/2019-updated-complete-list-of-philippine-mobile-network-prefixes/
- https://powerpinoys.com/network-prefixes-philippines/
"""
globe_mobile_number_prefixes: Tuple[str, ...] = (
"817",
"904",
"905",
"906",
"915",
"916",
"917",
"926",
"927",
"935",
"936",
"937",
"945",
"955",
"956",
"965",
"966",
"967",
"973",
"975",
"976",
"977",
"978",
"979",
"994",
"995",
"996",
"997",
)
smart_mobile_number_prefixes: Tuple[str, ...] = (
"813",
"907",
"908",
"909",
"910",
"911",
"912",
"913",
"914",
"918",
"919",
"920",
"921",
"928",
"929",
"930",
"938",
"939",
"940",
"946",
"947",
"948",
"949",
"950",
"951",
"961",
"970",
"981",
"989",
"992",
"998",
"999",
)
sun_mobile_number_prefixes: Tuple[str, ...] = (
"922",
"923",
"924",
"925",
"931",
"932",
"933",
"934",
"941",
"942",
"943",
"944",
)
globe_mobile_number_formats: Tuple[str, ...] = (
"0{{globe_mobile_number_prefix}}-###-####",
"+63{{globe_mobile_number_prefix}}-###-####",
)
smart_mobile_number_formats: Tuple[str, ...] = (
"0{{smart_mobile_number_prefix}}-###-####",
"+63{{smart_mobile_number_prefix}}-###-####",
)
sun_mobile_number_formats: Tuple[str, ...] = (
"0{{sun_mobile_number_prefix}}-###-####",
"+63{{sun_mobile_number_prefix}}-###-####",
)
mobile_number_formats: Tuple[str, ...] = (
globe_mobile_number_formats + smart_mobile_number_formats + sun_mobile_number_formats
)
bayantel_landline_identifiers: Tuple[str, ...] = tuple(str(x) for x in range(3000, 3500))
misc_landline_identifiers: Tuple[str, ...] = tuple(str(x) for x in range(5300, 5800)) + tuple(
str(x) for x in range(6000, 6700)
)
non_area2_landline_area_codes: Tuple[str, ...] = (
"32",
"33",
"34",
"35",
"36",
"38",
"42",
"43",
"44",
"45",
"46",
"47",
"48",
"49",
"52",
"53",
"54",
"55",
"56",
"62",
"63",
"64",
"65",
"68",
"72",
"74",
"75",
"77",
"78",
"82",
"83",
"84",
"85",
"86",
"87",
"88",
)
globe_area2_landline_number_formats: Tuple[str, ...] = (
"02-7###-####",
"+632-7###-####",
)
pldt_area2_landline_number_formats: Tuple[str, ...] = (
"02-8###-####",
"+632-8###-####",
)
bayantel_area2_landline_number_formats: Tuple[str, ...] = (
"02-{{bayantel_landline_identifier}}-####",
"+632-{{bayantel_landline_identifier}}-####",
)
misc_area2_landline_number_formats: Tuple[str, ...] = (
"02-{{misc_landline_identifier}}-####",
"+632-{{misc_landline_identifier}}-####",
)
area2_landline_number_formats: Tuple[str, ...] = (
globe_area2_landline_number_formats
+ pldt_area2_landline_number_formats
+ bayantel_area2_landline_number_formats
+ misc_area2_landline_number_formats
)
non_area2_landline_number_formats: Tuple[str, ...] = (
"0{{non_area2_landline_area_code}}-###-####",
"+63{{non_area2_landline_area_code}}-###-####",
)
landline_number_formats: Tuple[str, ...] = area2_landline_number_formats + non_area2_landline_number_formats
def _create_phone_number(self, formats: Sequence[str]) -> str:
pattern: str = self.random_element(formats)
return self.numerify(self.generator.parse(pattern))
def globe_mobile_number_prefix(self) -> str:
return self.random_element(self.globe_mobile_number_prefixes)
def smart_mobile_number_prefix(self) -> str:
return self.random_element(self.smart_mobile_number_prefixes)
def sun_mobile_number_prefix(self) -> str:
return self.random_element(self.sun_mobile_number_prefixes)
def bayantel_landline_identifier(self) -> str:
return self.random_element(self.bayantel_landline_identifiers)
def misc_landline_identifier(self) -> str:
return self.random_element(self.misc_landline_identifiers)
def non_area2_landline_area_code(self) -> str:
return self.random_element(self.non_area2_landline_area_codes)
def globe_mobile_number(self) -> str:
return self._create_phone_number(self.globe_mobile_number_formats)
def smart_mobile_number(self) -> str:
return self._create_phone_number(self.smart_mobile_number_formats)
def sun_mobile_number(self) -> str:
return self._create_phone_number(self.sun_mobile_number_formats)
def mobile_number(self) -> str:
return self._create_phone_number(self.mobile_number_formats)
def globe_area2_landline_number(self) -> str:
return self._create_phone_number(self.globe_area2_landline_number_formats)
def pldt_area2_landline_number(self) -> str:
return self._create_phone_number(self.pldt_area2_landline_number_formats)
def bayantel_area2_landline_number(self) -> str:
return self._create_phone_number(self.bayantel_area2_landline_number_formats)
def misc_area2_landline_number(self) -> str:
return self._create_phone_number(self.misc_area2_landline_number_formats)
def area2_landline_number(self) -> str:
return self._create_phone_number(self.area2_landline_number_formats)
def non_area2_landline_number(self) -> str:
return self._create_phone_number(self.non_area2_landline_number_formats)
def landline_number(self) -> str:
return self._create_phone_number(self.landline_number_formats)
| mit | fe8eea6fb4c2377045d9ff2d2b90124a | 31.646825 | 120 | 0.572019 | 3.496388 | false | false | false | false |
joke2k/faker | faker/providers/address/nl_NL/__init__.py | 1 | 57923 | from .. import Provider as AddressProvider
class Provider(AddressProvider):
building_number_formats = ("#", "##", "###", "#", "##", "###")
street_suffixes = (
"baan",
"boulevard",
"dreef",
"hof",
"laan",
"pad",
"ring",
"singel",
"steeg",
"straat",
"weg",
)
# the 4 digit numerical part of Dutch postcodes is between 1000 and 9999;
# see http://nl.wikipedia.org/wiki/Postcode#Postcodes_in_Nederland
postcode_formats = ("%###??", "%### ??")
city_formats = ("{{city}}",)
# countries are from http://nl.wikipedia.org/wiki/ISO_3166-1
countries = (
"Afghanistan",
"Albanië",
"Algerije",
"Amerikaans-Samoa",
"Amerikaanse Maagdeneilanden",
"Andorra",
"Angola",
"Anguilla",
"Antarctica",
"Antigua en Barbuda",
"Argentinië",
"Armenië",
"Aruba",
"Australië",
"Azerbeidzjan",
"Bahama's",
"Bahrein",
"Bangladesh",
"Barbados",
"België",
"Belize",
"Benin",
"Bermuda",
"Bhutan",
"Bolivia",
"Bonaire, Sint Eustatius en Saba",
"Bosnië en Herzegovina",
"Botswana",
"Bouveteiland",
"Brazilië",
"Brits Indische Oceaanterritorium",
"Britse Maagdeneilanden",
"Brunei",
"Bulgarije",
"Burkina Faso",
"Burundi",
"Cambodja",
"Canada",
"Centraal-Afrikaanse Republiek",
"Chili",
"China",
"Christmaseiland",
"Cocoseilanden",
"Colombia",
"Comoren",
"Congo-Brazzaville",
"Congo-Kinshasa",
"Cookeilanden",
"Costa Rica",
"Cuba",
"Curaçao",
"Cyprus",
"Denemarken",
"Djibouti",
"Dominica",
"Dominicaanse Republiek",
"Duitsland",
"Ecuador",
"Egypte",
"El Salvador",
"Equatoriaal-Guinea",
"Eritrea",
"Estland",
"Ethiopië",
"Faeröer",
"Falklandeilanden",
"Fiji",
"Filipijnen",
"Finland",
"Frankrijk",
"Frans-Guyana",
"Frans-Polynesië",
"Franse Zuidelijke en Antarctische Gebieden",
"Gabon",
"Gambia",
"Georgië",
"Ghana",
"Gibraltar",
"Grenada",
"Griekenland",
"Groenland",
"Guadeloupe",
"Guam",
"Guatemala",
"Guernsey",
"Guinee",
"Guinee-Bissau",
"Guyana",
"Haïti",
"Heard en McDonaldeilanden",
"Honduras",
"Hongarije",
"Hongkong",
"IJsland",
"Ierland",
"India",
"Indonesië",
"Irak",
"Iran",
"Israël",
"Italië",
"Ivoorkust",
"Jamaica",
"Japan",
"Jemen",
"Jersey",
"Jordanië",
"Kaaimaneilanden",
"Kaapverdië",
"Kameroen",
"Kazachstan",
"Kenia",
"Kirgizië",
"Kiribati",
"Kleine Pacifische eilanden van de Verenigde Staten",
"Koeweit",
"Kroatië",
"Laos",
"Lesotho",
"Letland",
"Libanon",
"Liberia",
"Libië",
"Liechtenstein",
"Litouwen",
"Luxemburg",
"Macau",
"Macedonië",
"Madagaskar",
"Malawi",
"Maldiven",
"Maleisië",
"Mali",
"Malta",
"Man",
"Marokko",
"Marshalleilanden",
"Martinique",
"Mauritanië",
"Mauritius",
"Mayotte",
"Mexico",
"Micronesia",
"Moldavië",
"Monaco",
"Mongolië",
"Montenegro",
"Montserrat",
"Mozambique",
"Myanmar",
"Namibië",
"Nauru",
"Nederland",
"Nepal",
"Nicaragua",
"Nieuw-Caledonië",
"Nieuw-Zeeland",
"Niger",
"Nigeria",
"Niue",
"Noord-Korea",
"Noordelijke Marianen",
"Noorwegen",
"Norfolk",
"Oeganda",
"Oekraïne",
"Oezbekistan",
"Oman",
"Oost-Timor",
"Oostenrijk",
"Pakistan",
"Palau",
"Palestina",
"Panama",
"Papoea-Nieuw-Guinea",
"Paraguay",
"Peru",
"Pitcairneilanden",
"Polen",
"Portugal",
"Puerto Rico",
"Qatar",
"Roemenië",
"Rusland",
"Rwanda",
"Réunion",
"Saint Kitts en Nevis",
"Saint Lucia",
"Saint Vincent en de Grenadines",
"Saint-Barthélemy",
"Saint-Pierre en Miquelon",
"Salomonseilanden",
"Samoa",
"San Marino",
"Sao Tomé en Principe",
"Saoedi-Arabië",
"Senegal",
"Servië",
"Seychellen",
"Sierra Leone",
"Singapore",
"Sint Maarten",
"Sint-Helena, Ascension en Tristan da Cunha",
"Sint-Maarten",
"Slovenië",
"Slowakije",
"Soedan",
"Somalië",
"Spanje",
"Spitsbergen en Jan Mayen",
"Sri Lanka",
"Suriname",
"Swaziland",
"Syrië",
"Tadzjikistan",
"Taiwan",
"Tanzania",
"Thailand",
"Togo",
"Tokelau",
"Tonga",
"Trinidad en Tobago",
"Tsjaad",
"Tsjechië",
"Tunesië",
"Turkije",
"Turkmenistan",
"Turks- en Caicoseilanden",
"Tuvalu",
"Uruguay",
"Vanuatu",
"Vaticaanstad",
"Venezuela",
"Verenigd Koninkrijk",
"Verenigde Arabische Emiraten",
"Verenigde Staten",
"Vietnam",
"Wallis en Futuna",
"Westelijke Sahara",
"Wit-Rusland",
"Zambia",
"Zimbabwe",
"Zuid-Afrika",
"Zuid-Georgia en de Zuidelijke Sandwicheilanden",
"Zuid-Korea",
"Zuid-Soedan",
"Zweden",
"Zwitserland",
"Åland",
)
# cities are taken from the BAG "woonplaats";
# in this case the 8-Mar-2014 extract;
# see http://data.nlextract.nl/bag/csv/
cities = (
"'s Gravenmoer",
"'s-Graveland",
"'s-Gravendeel",
"'s-Gravenhage",
"'s-Gravenpolder",
"'s-Gravenzande",
"'s-Heer Abtskerke",
"'s-Heer Arendskerke",
"'s-Heer Hendrikskinderen",
"'s-Heerenberg",
"'s-Heerenbroek",
"'s-Heerenhoek",
"'s-Hertogenbosch",
"'t Goy",
"'t Haantje",
"'t Harde",
"'t Loo Oldebroek",
"'t Veld",
"'t Waar",
"'t Zand",
"'t Zandt",
"1e Exloërmond",
"2e Exloërmond",
"2e Valthermond",
"Aadorp",
"Aagtekerke",
"Aalden",
"Aalsmeer",
"Aalsmeerderbrug",
"Aalst",
"Aalsum",
"Aalten",
"Aardenburg",
"Aarlanderveen",
"Aarle-Rixtel",
"Aartswoud",
"Abbega",
"Abbekerk",
"Abbenbroek",
"Abbenes",
"Abcoude",
"Achlum",
"Achterveld",
"Achthuizen",
"Achtmaal",
"Acquoy",
"Adorp",
"Aduard",
"Aerdenhout",
"Aerdt",
"Afferden",
"Afferden L",
"Agelo",
"Akersloot",
"Akkrum",
"Akmarijp",
"Albergen",
"Alblasserdam",
"Alde Leie",
"Aldeboarn",
"Aldtsjerk",
"Alem",
"Alkmaar",
"Allingawier",
"Almelo",
"Almen",
"Almere",
"Almkerk",
"Alphen",
"Alphen aan den Rijn",
"Alteveer",
"Alteveer gem Hoogeveen",
"Altforst",
"Ambt Delden",
"Ameide",
"Amen",
"America",
"Amerongen",
"Amersfoort",
"Ammerstol",
"Ammerzoden",
"Amstelhoek",
"Amstelveen",
"Amstenrade",
"Amsterdam",
"Amsterdam-Duivendrecht",
"Andel",
"Andelst",
"Anderen",
"Andijk",
"Ane",
"Anerveen",
"Anevelde",
"Angeren",
"Angerlo",
"Anjum",
"Ankeveen",
"Anloo",
"Anna Paulowna",
"Annen",
"Annerveenschekanaal",
"Ansen",
"Apeldoorn",
"Appelscha",
"Appeltern",
"Appingedam",
"Arcen",
"Arkel",
"Arnemuiden",
"Arnhem",
"Arriën",
"Arum",
"Asch",
"Asperen",
"Assen",
"Assendelft",
"Asten",
"Augsbuurt",
"Augustinusga",
"Austerlitz",
"Avenhorn",
"Axel",
"Azewijn",
"Baaiduinen",
"Baaium",
"Baak",
"Baambrugge",
"Baard",
"Baarland",
"Baarle-Nassau",
"Baarlo",
"Baarn",
"Baars",
"Babberich",
"Babyloniënbroek",
"Bad Nieuweschans",
"Badhoevedorp",
"Baexem",
"Baflo",
"Bakel",
"Bakhuizen",
"Bakkeveen",
"Balgoij",
"Balinge",
"Balk",
"Balkbrug",
"Balloo",
"Balloërveld",
"Ballum",
"Baneheide",
"Banholt",
"Bant",
"Bantega",
"Barchem",
"Barendrecht",
"Barger-Compascuum",
"Barneveld",
"Barsingerhorn",
"Basse",
"Batenburg",
"Bathmen",
"Bavel",
"Bavel AC",
"Bears",
"Bedum",
"Beegden",
"Beek",
"Beek en Donk",
"Beekbergen",
"Beemte Broekland",
"Beers NB",
"Beerta",
"Beerze",
"Beerzerveld",
"Beesd",
"Beesel",
"Beets",
"Beetsterzwaag",
"Beilen",
"Beinsdorp",
"Belfeld",
"Bellingwolde",
"Belt-Schutsloot",
"Beltrum",
"Bemelen",
"Bemmel",
"Beneden-Leeuwen",
"Bennebroek",
"Bennekom",
"Benneveld",
"Benningbroek",
"Benschop",
"Bentelo",
"Benthuizen",
"Bentveld",
"Berg en Dal",
"Berg en Terblijt",
"Bergambacht",
"Bergeijk",
"Bergen (NH)",
"Bergen L",
"Bergen aan Zee",
"Bergen op Zoom",
"Bergentheim",
"Bergharen",
"Berghem",
"Bergschenhoek",
"Beringe",
"Berkel en Rodenrijs",
"Berkel-Enschot",
"Berkenwoude",
"Berkhout",
"Berlicum",
"Berltsum",
"Bern",
"Best",
"Beugen",
"Beuningen",
"Beuningen Gld",
"Beusichem",
"Beutenaken",
"Beverwijk",
"Biddinghuizen",
"Bierum",
"Biervliet",
"Biest-Houtakker",
"Biezenmortel",
"Biggekerke",
"Bilthoven",
"Bingelrade",
"Bitgum",
"Bitgummole",
"Bladel",
"Blankenham",
"Blaricum",
"Blauwestad",
"Blauwhuis",
"Bleiswijk",
"Blesdijke",
"Bleskensgraaf ca",
"Blessum",
"Blije",
"Blijham",
"Blitterswijck",
"Bloemendaal",
"Blokker",
"Blokzijl",
"Boazum",
"Bocholtz",
"Bodegraven",
"Boekel",
"Boelenslaan",
"Boer",
"Boerakker",
"Boesingheliede",
"Boijl",
"Boksum",
"Bolsward",
"Bontebok",
"Boornbergum",
"Boornzwaag",
"Borculo",
"Borger",
"Borgercompagnie",
"Borgsweer",
"Born",
"Borne",
"Bornerbroek",
"Bornwird",
"Borssele",
"Bosch en Duin",
"Boschoord",
"Boskoop",
"Bosschenhoofd",
"Botlek Rotterdam",
"Bourtange",
"Boven-Leeuwen",
"Bovenkarspel",
"Bovensmilde",
"Boxmeer",
"Boxtel",
"Braamt",
"Brakel",
"Brandwijk",
"Brantgum",
"Breda",
"Bredevoort",
"Breedenbroek",
"Breezand",
"Breezanddijk",
"Breskens",
"Breukelen",
"Breukeleveen",
"Brielle",
"Briltil",
"Britsum",
"Britswert",
"Broek",
"Broek in Waterland",
"Broek op Langedijk",
"Broekhuizen",
"Broekhuizenvorst",
"Broekland",
"Broeksterwâld",
"Bronkhorst",
"Bronneger",
"Bronnegerveen",
"Brouwershaven",
"Bruchem",
"Brucht",
"Bruchterveld",
"Bruinehaar",
"Bruinisse",
"Brummen",
"Brunssum",
"Bruntinge",
"Buchten",
"Budel",
"Budel-Dorplein",
"Budel-Schoot",
"Buggenum",
"Buinen",
"Buinerveen",
"Buitenkaag",
"Buitenpost",
"Bunde",
"Bunne",
"Bunnik",
"Bunschoten-Spakenburg",
"Burdaard",
"Buren",
"Burgerbrug",
"Burgerveen",
"Burgh-Haamstede",
"Burgum",
"Burgwerd",
"Burum",
"Bussum",
"Buurmalsen",
"Cadier en Keer",
"Cadzand",
"Callantsoog",
"Capelle aan den IJssel",
"Castelre",
"Castenray",
"Casteren",
"Castricum",
"Chaam",
"Clinge",
"Coevorden",
"Colijnsplaat",
"Collendoorn",
"Colmschate",
"Cornwerd",
"Cothen",
"Creil",
"Cromvoirt",
"Cruquius",
"Cuijk",
"Culemborg",
"Daarle",
"Daarlerveen",
"Dalem",
"Dalen",
"Dalerpeel",
"Dalerveen",
"Dalfsen",
"Dalmsholte",
"Damwâld",
"Darp",
"De Bilt",
"De Blesse",
"De Bult",
"De Cocksdorp",
"De Falom",
"De Glind",
"De Goorn",
"De Groeve",
"De Heen",
"De Heurne",
"De Hoeve",
"De Kiel",
"De Klomp",
"De Knipe",
"De Koog",
"De Krim",
"De Kwakel",
"De Lier",
"De Meern",
"De Moer",
"De Mortel",
"De Pol",
"De Punt",
"De Rijp",
"De Rips",
"De Schiphorst",
"De Steeg",
"De Tike",
"De Veenhoop",
"De Waal",
"De Weere",
"De Westereen",
"De Wilgen",
"De Wilp",
"De Zilk",
"Dearsum",
"Dedemsvaart",
"Dedgum",
"Deelen",
"Deest",
"Deil",
"Deinum",
"Delden",
"Delfgauw",
"Delfstrahuizen",
"Delft",
"Delfzijl",
"Delwijnen",
"Demen",
"Den Andel",
"Den Bommel",
"Den Burg",
"Den Dolder",
"Den Dungen",
"Den Ham",
"Den Helder",
"Den Hoorn",
"Den Horn",
"Den Hout",
"Den Ilp",
"Den Oever",
"Den Velde",
"Denekamp",
"Deurne",
"Deurningen",
"Deursen-Dennenburg",
"Deurze",
"Deventer",
"Didam",
"Dieden",
"Diemen",
"Diepenheim",
"Diepenveen",
"Dieren",
"Diessen",
"Diever",
"Dieverbrug",
"Diffelen",
"Dijken",
"Dinteloord",
"Dinxperlo",
"Diphoorn",
"Dirkshorn",
"Dirksland",
"Dodewaard",
"Doenrade",
"Doesburg",
"Doetinchem",
"Doeveren",
"Doezum",
"Dokkum",
"Doldersum",
"Domburg",
"Donderen",
"Dongen",
"Dongjum",
"Doniaga",
"Donkerbroek",
"Doorn",
"Doornenburg",
"Doornspijk",
"Doorwerth",
"Dordrecht",
"Dorst",
"Drachten",
"Drachten-Azeven",
"Drachtstercompagnie",
"Dreischor",
"Drempt",
"Dreumel",
"Driebergen-Rijsenburg",
"Drieborg",
"Driebruggen",
"Driehuis NH",
"Driehuizen",
"Driel",
"Driewegen",
"Driezum",
"Drijber",
"Drimmelen",
"Drogeham",
"Drogteropslagen",
"Drongelen",
"Dronryp",
"Dronten",
"Drouwen",
"Drouwenermond",
"Drouwenerveen",
"Drunen",
"Druten",
"Duiven",
"Duivendrecht",
"Duizel",
"Dussen",
"Dwingeloo",
"Eagum",
"Earnewâld",
"Easterein",
"Easterlittens",
"Eastermar",
"Easterwierrum",
"Echt",
"Echteld",
"Echten",
"Echtenerbrug",
"Eck en Wiel",
"Eckelrade",
"Edam",
"Ede",
"Ederveen",
"Ee",
"Eede",
"Eefde",
"Eelde",
"Eelderwolde",
"Eemdijk",
"Eemnes",
"Eemshaven",
"Een",
"Een-West",
"Eenrum",
"Eenum",
"Eerbeek",
"Eersel",
"Ees",
"Eesergroen",
"Eeserveen",
"Eesterga",
"Eesveen",
"Eethen",
"Eext",
"Eexterveen",
"Eexterveenschekanaal",
"Eexterzandvoort",
"Egchel",
"Egmond aan Zee",
"Egmond aan den Hoef",
"Egmond-Binnen",
"Eibergen",
"Eijsden",
"Eindhoven",
"Einighausen",
"Ekehaar",
"Elahuizen",
"Elburg",
"Eldersloo",
"Eleveld",
"Elim",
"Elkenrade",
"Ell",
"Ellecom",
"Ellemeet",
"Ellertshaar",
"Ellewoutsdijk",
"Elp",
"Elsendorp",
"Elshout",
"Elsloo",
"Elspeet",
"Elst",
"Elst Ut",
"Emmeloord",
"Emmen",
"Emmer-Compascuum",
"Empe",
"Emst",
"Engwierum",
"Enkhuizen",
"Ens",
"Enschede",
"Enspijk",
"Enter",
"Enumatil",
"Epe",
"Epen",
"Eppenhuizen",
"Epse",
"Erica",
"Erichem",
"Erlecom",
"Erm",
"Ermelo",
"Erp",
"Esbeek",
"Esch",
"Escharen",
"Espel",
"Est",
"Etten",
"Etten-Leur",
"Europoort Rotterdam",
"Eursinge",
"Everdingen",
"Evertsoord",
"Ewijk",
"Exloo",
"Exloërveen",
"Exmorra",
"Eygelshoven",
"Eys",
"Ezinge",
"Farmsum",
"Feanwâlden",
"Feerwerd",
"Feinsum",
"Ferwert",
"Ferwoude",
"Fijnaart",
"Finsterwolde",
"Firdgum",
"Fleringen",
"Fluitenberg",
"Fochteloo",
"Follega",
"Folsgare",
"Formerum",
"Foudgum",
"Foxhol",
"Foxwolde",
"Franeker",
"Frederiksoord",
"Friens",
"Frieschepalen",
"Froombosch",
"Gaanderen",
"Gaast",
"Gaastmeer",
"Galder",
"Gameren",
"Gapinge",
"Garderen",
"Garmerwolde",
"Garminge",
"Garnwerd",
"Garrelsweer",
"Garsthuizen",
"Garyp",
"Gassel",
"Gasselte",
"Gasselternijveen",
"Gasselternijveenschemond",
"Gastel",
"Gasteren",
"Gauw",
"Geelbroek",
"Geerdijk",
"Geersdijk",
"Geertruidenberg",
"Geervliet",
"Gees",
"Geesbrug",
"Geesteren",
"Geeuwenbrug",
"Geffen",
"Geijsteren",
"Geldermalsen",
"Gelderswoude",
"Geldrop",
"Geleen",
"Gellicum",
"Gelselaar",
"Gemert",
"Gemonde",
"Genderen",
"Gendringen",
"Gendt",
"Genemuiden",
"Gennep",
"Gerkesklooster",
"Gersloot",
"Geulle",
"Giesbeek",
"Giessen",
"Giessenburg",
"Gieten",
"Gieterveen",
"Giethmen",
"Giethoorn",
"Gilze",
"Ginnum",
"Glane",
"Glimmen",
"Godlinze",
"Goedereede",
"Goes",
"Goingarijp",
"Goirle",
"Goor",
"Gorinchem",
"Gorredijk",
"Gorssel",
"Gouda",
"Gouderak",
"Goudriaan",
"Goudswaard",
"Goutum",
"Goënga",
"Goëngahuizen",
"Graauw",
"Grafhorst",
"Graft",
"Gramsbergen",
"Grashoek",
"Grathem",
"Grave",
"Greonterp",
"Grevenbicht",
"Griendtsveen",
"Grijpskerk",
"Grijpskerke",
"Groede",
"Groenekan",
"Groeningen",
"Groenlo",
"Groesbeek",
"Groessen",
"Groet",
"Grolloo",
"Groningen",
"Gronsveld",
"Groot-Ammers",
"Grootebroek",
"Grootegast",
"Grootschermer",
"Grou",
"Grubbenvorst",
"Gulpen",
"Guttecoven",
"Gytsjerk",
"Haaften",
"Haaksbergen",
"Haalderen",
"Haaren",
"Haarle",
"Haarlem",
"Haarlemmerliede",
"Haarlo",
"Haarsteeg",
"Haarzuilens",
"Haastrecht",
"Haelen",
"Hagestein",
"Haghorst",
"Haler",
"Halfweg",
"Hall",
"Halle",
"Hallum",
"Halsteren",
"Handel",
"Hank",
"Hansweert",
"Hantum",
"Hantumeruitburen",
"Hantumhuizen",
"Hapert",
"Haps",
"Harbrinkhoek",
"Hardenberg",
"Harderwijk",
"Hardinxveld-Giessendam",
"Haren",
"Haren Gn",
"Harfsen",
"Harich",
"Haringhuizen",
"Harkema",
"Harkstede",
"Harlingen",
"Harmelen",
"Harreveld",
"Harskamp",
"Hartwerd",
"Haskerdijken",
"Haskerhorne",
"Hasselt",
"Hattem",
"Hattemerbroek",
"Haule",
"Haulerwijk",
"Hauwert",
"Havelte",
"Havelterberg",
"Hazerswoude-Dorp",
"Hazerswoude-Rijndijk",
"Hedel",
"Hedikhuizen",
"Hee",
"Heeg",
"Heel",
"Heelsum",
"Heelweg",
"Heemserveen",
"Heemskerk",
"Heemstede",
"Heenvliet",
"Heerde",
"Heerenveen",
"Heerewaarden",
"Heerhugowaard",
"Heerjansdam",
"Heerle",
"Heerlen",
"Heesbeen",
"Heesch",
"Heesselt",
"Heeswijk-Dinther",
"Heeten",
"Heeze",
"Hegebeintum",
"Hegelsom",
"Hei- en Boeicop",
"Heibloem",
"Heide",
"Heijen",
"Heijenrath",
"Heijningen",
"Heikant",
"Heilig Landstichting",
"Heiligerlee",
"Heiloo",
"Heinenoord",
"Heinkenszand",
"Heino",
"Hekelingen",
"Hekendorp",
"Helden",
"Helenaveen",
"Hellendoorn",
"Hellevoetsluis",
"Hellouw",
"Hellum",
"Helmond",
"Helvoirt",
"Hem",
"Hemelum",
"Hemmen",
"Hempens",
"Hemrik",
"Hendrik-Ido-Ambacht",
"Hengelo",
"Hengelo (Gld)",
"Hengevelde",
"Hengstdijk",
"Hensbroek",
"Herbaijum",
"Herkenbosch",
"Herkingen",
"Hernen",
"Herpen",
"Herpt",
"Herten",
"Hertme",
"Herveld",
"Herwen",
"Herwijnen",
"Heteren",
"Heukelom",
"Heukelum",
"Heumen",
"Heusden",
"Heveadorp",
"Heythuysen",
"Hezingen",
"Hiaure",
"Hichtum",
"Hidaard",
"Hierden",
"Hieslum",
"Hijken",
"Hijum",
"Hilaard",
"Hillegom",
"Hilvarenbeek",
"Hilversum",
"Hindeloopen",
"Hinnaard",
"Hippolytushoef",
"Hitzum",
"Hobrede",
"Hoedekenskerke",
"Hoek",
"Hoek van Holland",
"Hoenderloo",
"Hoensbroek",
"Hoenzadriel",
"Hoevelaken",
"Hoeven",
"Hoge Hexel",
"Hollandsche Rading",
"Hollandscheveld",
"Hollum",
"Holsloot",
"Holten",
"Holthees",
"Holtheme",
"Holthone",
"Holtum",
"Holwerd",
"Holwierde",
"Hommerts",
"Homoet",
"Honselersdijk",
"Hoofddorp",
"Hoofdplaat",
"Hoog Soeren",
"Hoog-Keppel",
"Hoogblokland",
"Hooge Mierde",
"Hooge Zwaluwe",
"Hoogeloon",
"Hoogenweg",
"Hoogerheide",
"Hoogersmilde",
"Hoogeveen",
"Hoogezand",
"Hooghalen",
"Hoogkarspel",
"Hoogland",
"Hooglanderveen",
"Hoogmade",
"Hoogvliet Rotterdam",
"Hoogwoud",
"Hoorn",
"Hoornaar",
"Hoornsterzwaag",
"Horn",
"Hornhuizen",
"Horssen",
"Horst",
"Houten",
"Houtigehage",
"Houwerzijl",
"Huijbergen",
"Huis ter Heide",
"Huisduinen",
"Huisseling",
"Huissen",
"Huizen",
"Huizinge",
"Hulsberg",
"Hulsel",
"Hulshorst",
"Hulst",
"Hulten",
"Hummelo",
"Hunsel",
"Hurdegaryp",
"Hurwenen",
"Húns",
"IJhorst",
"IJlst",
"IJmuiden",
"IJsselham",
"IJsselmuiden",
"IJsselstein",
"IJzendijke",
"IJzendoorn",
"Idaerd",
"Idsegahuizum",
"Idskenhuizen",
"Idzega",
"Iens",
"Ilpendam",
"Indijk",
"Ingber",
"Ingelum",
"Ingen",
"It Heidenskip",
"Itens",
"Ittervoort",
"Jaarsveld",
"Jabeek",
"Jannum",
"Jellum",
"Jelsum",
"Jirnsum",
"Jislum",
"Jisp",
"Jistrum",
"Jonkerslân",
"Jonkersvaart",
"Joppe",
"Jorwert",
"Joure",
"Jouswier",
"Jubbega",
"Julianadorp",
"Jutrijp",
"Kaag",
"Kaard",
"Kaatsheuvel",
"Kalenberg",
"Kallenkote",
"Kamerik",
"Kampen",
"Kamperland",
"Kamperveen",
"Kantens",
"Kapel Avezaath",
"Kapel-Avezaath",
"Kapelle",
"Kapellebrug",
"Katlijk",
"Kats",
"Kattendijke",
"Katwijk",
"Katwijk NB",
"Katwoude",
"Kedichem",
"Keent",
"Keijenborg",
"Kekerdom",
"Kelpen-Oler",
"Kerk Avezaath",
"Kerk-Avezaath",
"Kerkdriel",
"Kerkenveld",
"Kerkrade",
"Kerkwerve",
"Kerkwijk",
"Kessel",
"Kesteren",
"Kiel-Windeweer",
"Kilder",
"Kimswerd",
"Kinderdijk",
"Kinnum",
"Klaaswaal",
"Klarenbeek",
"Klazienaveen",
"Klazienaveen-Noord",
"Klein Zundert",
"Klijndijk",
"Klimmen",
"Kloetinge",
"Klooster Lidlum",
"Kloosterburen",
"Kloosterhaar",
"Kloosterzande",
"Klundert",
"Knegsel",
"Koarnjum",
"Kockengen",
"Koedijk",
"Koekange",
"Koewacht",
"Kolderwolde",
"Kolham",
"Kolhorn",
"Kollum",
"Kollumerpomp",
"Kollumerzwaag",
"Kommerzijl",
"Koningsbosch",
"Koningslust",
"Koog aan de Zaan",
"Koolwijk",
"Kootstertille",
"Kootwijk",
"Kootwijkerbroek",
"Kornhorn",
"Kornwerderzand",
"Kortehemmen",
"Kortenhoef",
"Kortgene",
"Koudekerk aan den Rijn",
"Koudekerke",
"Koudum",
"Koufurderrige",
"Krabbendijke",
"Kraggenburg",
"Kreileroord",
"Krewerd",
"Krimpen aan de Lek",
"Krimpen aan den IJssel",
"Kring van Dorth",
"Krommenie",
"Kronenberg",
"Kropswolde",
"Kruiningen",
"Kruisland",
"Kudelstaart",
"Kuinre",
"Kuitaart",
"Kwadendamme",
"Kwadijk",
"Kwintsheul",
"Kûbaard",
"Laag Zuthem",
"Laag-Keppel",
"Laag-Soeren",
"Lage Mierde",
"Lage Vuursche",
"Lage Zwaluwe",
"Lageland",
"Lambertschaag",
"Lamswaarde",
"Landerum",
"Landgraaf",
"Landhorst",
"Landsmeer",
"Langbroek",
"Langedijke",
"Langelille",
"Langelo",
"Langenboom",
"Langerak",
"Langeveen",
"Langeweg",
"Langezwaag",
"Langweer",
"Laren",
"Lathum",
"Lattrop-Breklenkamp",
"Lauwersoog",
"Lauwerzijl",
"Ledeacker",
"Leek",
"Leende",
"Leens",
"Leerbroek",
"Leerdam",
"Leermens",
"Leersum",
"Leeuwarden",
"Legemeer",
"Leiden",
"Leiderdorp",
"Leidschendam",
"Leimuiden",
"Leimuiderbrug",
"Lekkerkerk",
"Lekkum",
"Lellens",
"Lelystad",
"Lemele",
"Lemelerveld",
"Lemiers",
"Lemmer",
"Lengel",
"Lent",
"Leons",
"Lepelstraat",
"Lettelbert",
"Lettele",
"Leunen",
"Leur",
"Leusden",
"Leuth",
"Leutingewolde",
"Leuvenheim",
"Leveroy",
"Lewedorp",
"Lexmond",
"Lichtaard",
"Lichtenvoorde",
"Liempde",
"Lienden",
"Lierderholthuis",
"Lieren",
"Lierop",
"Lies",
"Lieshout",
"Liessel",
"Lievelde",
"Lieveren",
"Lijnden",
"Limbricht",
"Limmen",
"Linde",
"Linden",
"Linne",
"Linschoten",
"Lioessens",
"Lippenhuizen",
"Lisse",
"Lisserbroek",
"Lith",
"Lithoijen",
"Lobith",
"Lochem",
"Loenen",
"Loenen aan de Vecht",
"Loenersloot",
"Loerbeek",
"Lollum",
"Lomm",
"Longerhouw",
"Loo Gld",
"Loon",
"Loon op Zand",
"Loosbroek",
"Loosdrecht",
"Loozen",
"Lopik",
"Lopikerkapel",
"Loppersum",
"Losdorp",
"Losser",
"Lottum",
"Loënga",
"Lucaswolde",
"Luddeweer",
"Luinjeberd",
"Lunteren",
"Lutjebroek",
"Lutjegast",
"Lutjewinkel",
"Luttelgeest",
"Lutten",
"Luttenberg",
"Luxwoude",
"Luyksgestel",
"Lytsewierrum",
"Maarheeze",
"Maarn",
"Maarsbergen",
"Maarssen",
"Maartensdijk",
"Maasbommel",
"Maasbracht",
"Maasbree",
"Maasdam",
"Maasdijk",
"Maashees",
"Maasland",
"Maassluis",
"Maastricht",
"Maastricht-Airport",
"Maasvlakte Rotterdam",
"Macharen",
"Made",
"Makkinga",
"Makkum",
"Malden",
"Mander",
"Manderveen",
"Mantgum",
"Mantinge",
"Maren-Kessel",
"Margraten",
"Maria Hoop",
"Mariahout",
"Mariaparochie",
"Marijenkampen",
"Mariënberg",
"Mariënheem",
"Mariënvelde",
"Markelo",
"Marken",
"Markenbinnen",
"Marknesse",
"Marle",
"Marrum",
"Marsum",
"Marum",
"Marwijksoord",
"Mastenbroek",
"Matsloot",
"Maurik",
"Mechelen",
"Medemblik",
"Meeden",
"Meedhuizen",
"Meerkerk",
"Meerlo",
"Meerssen",
"Meerstad",
"Meeuwen",
"Megchelen",
"Megen",
"Meijel",
"Melderslo",
"Melick",
"Meliskerke",
"Melissant",
"Menaam",
"Mensingeweer",
"Meppel",
"Meppen",
"Merkelbeek",
"Merselo",
"Meteren",
"Meterik",
"Metslawier",
"Mheer",
"Middelaar",
"Middelburg",
"Middelharnis",
"Middelie",
"Middelstum",
"Middenbeemster",
"Middenmeer",
"Midlaren",
"Midlum",
"Midsland",
"Midwolda",
"Midwolde",
"Midwoud",
"Miedum",
"Mierlo",
"Mijdrecht",
"Mijnsheerenland",
"Mildam",
"Milheeze",
"Mill",
"Millingen aan de Rijn",
"Milsbeek",
"Minnertsga",
"Mirns",
"Moddergat",
"Moerdijk",
"Moergestel",
"Moerkapelle",
"Moerstraten",
"Molenaarsgraaf",
"Molenhoek",
"Molenschot",
"Molkwerum",
"Monnickendam",
"Monster",
"Montfoort",
"Montfort",
"Mook",
"Mookhoek",
"Moordrecht",
"Moorveld",
"Morra",
"Muiden",
"Muiderberg",
"Munnekeburen",
"Munnekezijl",
"Munstergeleen",
"Muntendam",
"Mussel",
"Musselkanaal",
"Mûnein",
"Naaldwijk",
"Naarden",
"Nagele",
"Nederasselt",
"Nederhemert",
"Nederhorst den Berg",
"Nederland",
"Nederweert",
"Nederweert-Eind",
"Neede",
"Neer",
"Neerijnen",
"Neeritter",
"Neerkant",
"Neerlangel",
"Neerloon",
"Nes",
"Netersel",
"Netterden",
"Niawier",
"Nibbixwoud",
"Niebert",
"Niehove",
"Niekerk",
"Nietap",
"Nieuw Annerveen",
"Nieuw Beerta",
"Nieuw Heeten",
"Nieuw Namen",
"Nieuw Scheemda",
"Nieuw- en Sint Joosland",
"Nieuw-Amsterdam",
"Nieuw-Balinge",
"Nieuw-Beijerland",
"Nieuw-Buinen",
"Nieuw-Dordrecht",
"Nieuw-Lekkerland",
"Nieuw-Roden",
"Nieuw-Schoonebeek",
"Nieuw-Vennep",
"Nieuw-Vossemeer",
"Nieuw-Weerdinge",
"Nieuwaal",
"Nieuwdorp",
"Nieuwe Niedorp",
"Nieuwe Pekela",
"Nieuwe Wetering",
"Nieuwe-Tonge",
"Nieuwebrug",
"Nieuwediep",
"Nieuwegein",
"Nieuwehorne",
"Nieuwendijk",
"Nieuwer Ter Aa",
"Nieuwerbrug aan den Rijn",
"Nieuwerkerk",
"Nieuwerkerk aan den IJssel",
"Nieuweroord",
"Nieuwersluis",
"Nieuweschoot",
"Nieuwkoop",
"Nieuwkuijk",
"Nieuwland",
"Nieuwlande",
"Nieuwlande Coevorden",
"Nieuwleusen",
"Nieuwolda",
"Nieuwpoort",
"Nieuwstadt",
"Nieuwveen",
"Nieuwvliet",
"Niezijl",
"Niftrik",
"Nigtevecht",
"Nij Altoenae",
"Nij Beets",
"Nijbroek",
"Nijeberkoop",
"Nijega",
"Nijehaske",
"Nijeholtpade",
"Nijeholtwolde",
"Nijelamer",
"Nijemirdum",
"Nijensleek",
"Nijetrijne",
"Nijeveen",
"Nijhuizum",
"Nijkerk",
"Nijkerkerveen",
"Nijland",
"Nijlande",
"Nijmegen",
"Nijverdal",
"Nispen",
"Nisse",
"Nistelrode",
"Noardburgum",
"Nooitgedacht",
"Noorbeek",
"Noord-Scharwoude",
"Noord-Sleen",
"Noordbeemster",
"Noordbroek",
"Noordeinde",
"Noordeinde Gld",
"Noordeloos",
"Noorden",
"Noordgouwe",
"Noordhoek",
"Noordhorn",
"Noordlaren",
"Noordscheschut",
"Noordwelle",
"Noordwijk",
"Noordwijkerhout",
"Noordwolde",
"Nootdorp",
"Norg",
"Notter",
"Nuenen",
"Nuis",
"Nuland",
"Numansdorp",
"Nunhem",
"Nunspeet",
"Nuth",
"Nutter",
"Obbicht",
"Obdam",
"Ochten",
"Odijk",
"Odiliapeel",
"Odoorn",
"Odoornerveen",
"Oeffelt",
"Oegstgeest",
"Oene",
"Oentsjerk",
"Offingawier",
"Ohé en Laak",
"Oijen",
"Oirlo",
"Oirsbeek",
"Oirschot",
"Oisterwijk",
"Okkenbroek",
"Olburgen",
"Oldeberkoop",
"Oldebroek",
"Oldeholtpade",
"Oldeholtwolde",
"Oldehove",
"Oldekerk",
"Oldelamer",
"Oldemarkt",
"Oldenzaal",
"Oldenzijl",
"Oldeouwer",
"Oldetrijne",
"Olst",
"Olterterp",
"Ommel",
"Ommen",
"Ommeren",
"Onderdendam",
"Onna",
"Onnen",
"Onstwedde",
"Ooij",
"Ooltgensplaat",
"Oost West en Middelbeers",
"Oost-Graftdijk",
"Oost-Souburg",
"Oostburg",
"Oostdijk",
"Oosteind",
"Oosterbeek",
"Oosterbierum",
"Oosterblokker",
"Oosterend",
"Oosterhesselen",
"Oosterhout",
"Oosterland",
"Oosterleek",
"Oosternieland",
"Oosternijkerk",
"Oosterstreek",
"Oosterwijk",
"Oosterwijtwerd",
"Oosterwolde",
"Oosterwolde Gld",
"Oosterzee",
"Oosthem",
"Oosthuizen",
"Oostkapelle",
"Oostknollendam",
"Oostrum",
"Oostvoorne",
"Oostwold",
"Oostwoud",
"Oostzaan",
"Ootmarsum",
"Opeinde",
"Opende",
"Ophemert",
"Opheusden",
"Opijnen",
"Oploo",
"Opmeer",
"Oppenhuizen",
"Opperdoes",
"Oranje",
"Oranjewoud",
"Orvelte",
"Ospel",
"Oss",
"Ossendrecht",
"Ossenisse",
"Ossenwaard",
"Ossenzijl",
"Oterleek",
"Otterlo",
"Ottersum",
"Ottoland",
"Oud Ade",
"Oud Annerveen",
"Oud Gastel",
"Oud Ootmarsum",
"Oud Zuilen",
"Oud-Alblas",
"Oud-Beijerland",
"Oud-Vossemeer",
"Ouddorp",
"Oude Meer",
"Oude Niedorp",
"Oude Pekela",
"Oude Wetering",
"Oude Willem",
"Oude-Tonge",
"Oudebildtzijl",
"Oudega",
"Oudehaske",
"Oudehorne",
"Oudelande",
"Oudemirdum",
"Oudemolen",
"Oudenbosch",
"Oudendijk",
"Oudenhoorn",
"Ouderkerk aan de Amstel",
"Ouderkerk aan den IJssel",
"Oudeschans",
"Oudeschild",
"Oudeschip",
"Oudeschoot",
"Oudesluis",
"Oudewater",
"Oudezijl",
"Oudheusden",
"Oudkarspel",
"Oudorp",
"Oudwoude",
"Ouwerkerk",
"Ouwster-Nijega",
"Ouwsterhaule",
"Overasselt",
"Overberg",
"Overdinkel",
"Overlangel",
"Overloon",
"Overschild",
"Overslag",
"Overveen",
"Ovezande",
"Paasloo",
"Paesens",
"Pannerden",
"Panningen",
"Papekop",
"Papendrecht",
"Papenhoven",
"Papenvoort",
"Parrega",
"Paterswolde",
"Peest",
"Peins",
"Peize",
"Peperga",
"Pernis Rotterdam",
"Persingen",
"Pesse",
"Petten",
"Philippine",
"Piaam",
"Piershil",
"Pieterburen",
"Pietersbierum",
"Pieterzijl",
"Pijnacker",
"Pingjum",
"Plasmolen",
"Poederoijen",
"Poeldijk",
"Polsbroek",
"Poortugaal",
"Poortvliet",
"Poppenwier",
"Posterholt",
"Prinsenbeek",
"Puiflijk",
"Punthorst",
"Purmer",
"Purmerend",
"Purmerland",
"Puth",
"Putte",
"Putten",
"Puttershoek",
"Raalte",
"Raamsdonk",
"Raamsdonksveer",
"Raard",
"Radewijk",
"Radio Kootwijk",
"Raerd",
"Randwijk",
"Ransdaal",
"Rasquert",
"Ravenstein",
"Ravenswaaij",
"Ravenswoud",
"Readtsjerk",
"Reahûs",
"Reduzum",
"Reek",
"Reeuwijk",
"Reijmerstok",
"Reitsum",
"Rekken",
"Renesse",
"Renkum",
"Renswoude",
"Ressen",
"Retranchement",
"Reusel",
"Reutum",
"Reuver",
"Rha",
"Rheden",
"Rhee",
"Rheeze",
"Rheezerveen",
"Rhenen",
"Rhenoy",
"Rhoon",
"Ridderkerk",
"Ried",
"Riel",
"Rien",
"Riethoven",
"Rietmolen",
"Rijen",
"Rijkevoort",
"Rijkevoort-De Walsert",
"Rijnsaterwoude",
"Rijnsburg",
"Rijpwetering",
"Rijs",
"Rijsbergen",
"Rijsenhout",
"Rijssen",
"Rijswijk",
"Rijswijk (GLD)",
"Rijswijk (NB)",
"Rilland",
"Rinsumageast",
"Ritthem",
"Rockanje",
"Roden",
"Roderesch",
"Roderwolde",
"Roelofarendsveen",
"Roermond",
"Rogat",
"Roggel",
"Rohel",
"Rolde",
"Roodeschool",
"Roosendaal",
"Roosteren",
"Rosmalen",
"Rossum",
"Roswinkel",
"Rotstergaast",
"Rotsterhaule",
"Rotterdam",
"Rotterdam-Albrandswaard",
"Rottevalle",
"Rottum",
"Rouveen",
"Rozenburg",
"Rozendaal",
"Rucphen",
"Ruigahuizen",
"Ruinen",
"Ruinerwold",
"Rumpt",
"Rutten",
"Ruurlo",
"Ryptsjerk",
"Saaksum",
"Saasveld",
"Saaxumhuizen",
"Sambeek",
"Sandfirden",
"Santpoort-Noord",
"Santpoort-Zuid",
"Sappemeer",
"Sas van Gent",
"Sassenheim",
"Sauwerd",
"Schagen",
"Schagerbrug",
"Schaijk",
"Schalkhaar",
"Schalkwijk",
"Schalsum",
"Schardam",
"Scharendijke",
"Scharmer",
"Scharnegoutum",
"Scharsterbrug",
"Scharwoude",
"Scheemda",
"Scheerwolde",
"Schellinkhout",
"Schelluinen",
"Schermerhorn",
"Scherpenisse",
"Scherpenzeel",
"Schettens",
"Scheulder",
"Schiedam",
"Schiermonnikoog",
"Schijf",
"Schijndel",
"Schildwolde",
"Schimmert",
"Schin op Geul",
"Schinnen",
"Schinveld",
"Schipborg",
"Schiphol",
"Schiphol-Rijk",
"Schipluiden",
"Schokland",
"Schoondijke",
"Schoonebeek",
"Schoonhoven",
"Schoonloo",
"Schoonoord",
"Schoonrewoerd",
"Schoorl",
"Schore",
"Schouwerzijl",
"Schraard",
"Schuinesloot",
"Sebaldeburen",
"Sellingen",
"Serooskerke",
"Sevenum",
"Sexbierum",
"Sibculo",
"Sibrandabuorren",
"Sibrandahûs",
"Siddeburen",
"Siebengewald",
"Siegerswoude",
"Sijbekarspel",
"Silvolde",
"Simonshaven",
"Simpelveld",
"Sinderen",
"Sint Agatha",
"Sint Annen",
"Sint Anthonis",
"Sint Geertruid",
"Sint Hubert",
"Sint Jansklooster",
"Sint Jansteen",
"Sint Joost",
"Sint Kruis",
"Sint Maarten",
"Sint Maartensbrug",
"Sint Maartensvlotbrug",
"Sint Nicolaasga",
"Sint Odiliënberg",
"Sint Pancras",
"Sint Philipsland",
"Sint-Annaland",
"Sint-Maartensdijk",
"Sint-Michielsgestel",
"Sint-Oedenrode",
"Sintjohannesga",
"Sirjansland",
"Sittard",
"Skingen",
"Slagharen",
"Slappeterp",
"Sleen",
"Sleeuwijk",
"Slenaken",
"Sliedrecht",
"Slijk-Ewijk",
"Slijkenburg",
"Slochteren",
"Slootdorp",
"Sloten",
"Sluis",
"Sluiskil",
"Smakt",
"Smalle Ee",
"Smallebrugge",
"Smilde",
"Snakkerburen",
"Sneek",
"Snelrewaard",
"Snikzwaag",
"Soerendonk",
"Soest",
"Soesterberg",
"Someren",
"Sommelsdijk",
"Son en Breugel",
"Sondel",
"Sonnega",
"Spaarndam",
"Spaarndam gem. Haarlem",
"Spanbroek",
"Spanga",
"Spankeren",
"Spannum",
"Spaubeek",
"Spier",
"Spierdijk",
"Spijk",
"Spijk Gn",
"Spijkenisse",
"Spijkerboor",
"Sprang-Capelle",
"Sprundel",
"Spui",
"St. Willebrord",
"St.-Annaparochie",
"St.-Jacobiparochie",
"Stad aan 't Haringvliet",
"Stadskanaal",
"Stampersgat",
"Standdaarbuiten",
"Staphorst",
"Starnmeer",
"Startenhuizen",
"Stavenisse",
"Stavoren",
"Stedum",
"Steenbergen",
"Steendam",
"Steenderen",
"Steenenkamer",
"Steensel",
"Steenwijk",
"Steenwijkerwold",
"Stegeren",
"Steggerda",
"Stein",
"Stellendam",
"Sterksel",
"Stevensbeek",
"Stevensweert",
"Steyl",
"Stieltjeskanaal",
"Stiens",
"Stitswerd",
"Stokkum",
"Stolwijk",
"Stompetoren",
"Stoutenburg",
"Stoutenburg Noord",
"Stramproy",
"Streefkerk",
"Striep",
"Strijbeek",
"Strijen",
"Strijensas",
"Stroe",
"Stroobos",
"Stuifzand",
"Sumar",
"Surhuisterveen",
"Surhuizum",
"Susteren",
"Suwâld",
"Swalmen",
"Sweikhuizen",
"Swichum",
"Swifterbant",
"Swolgen",
"Taarlo",
"Teeffelen",
"Teerns",
"Tegelen",
"Ten Boer",
"Ten Post",
"Ter Aar",
"Ter Aard",
"Ter Apel",
"Ter Apelkanaal",
"Ter Heijde",
"Ter Idzard",
"Terband",
"Terborg",
"Terheijden",
"Terherne",
"Terhole",
"Terkaple",
"Termunten",
"Termunterzijl",
"Ternaard",
"Terneuzen",
"Teroele",
"Terschuur",
"Tersoal",
"Terwispel",
"Terwolde",
"Teteringen",
"Teuge",
"Thesinge",
"Tholen",
"Thorn",
"Tiel",
"Tiendeveen",
"Tienhoven",
"Tienray",
"Tijnje",
"Tilburg",
"Tilligte",
"Tinallinge",
"Tinte",
"Tirns",
"Tjalhuizum",
"Tjalleberd",
"Tjerkgaast",
"Tjerkwerd",
"Tjuchem",
"Tolbert",
"Toldijk",
"Tolkamer",
"Tollebeek",
"Tonden",
"Toornwerd",
"Tricht",
"Triemen",
"Tripscompagnie",
"Tubbergen",
"Tuil",
"Tuitjenhorn",
"Tuk",
"Tull en 't Waal",
"Twello",
"Twijzel",
"Twijzelerheide",
"Twisk",
"Tynaarlo",
"Tytsjerk",
"Tzum",
"Tzummarum",
"Ubbena",
"Ubbergen",
"Uddel",
"Uden",
"Udenhout",
"Uffelte",
"Ugchelen",
"Uitdam",
"Uitgeest",
"Uithoorn",
"Uithuizen",
"Uithuizermeeden",
"Uitwellingerga",
"Uitwijk",
"Ulestraten",
"Ulft",
"Ulicoten",
"Ulrum",
"Ulvenhout",
"Ulvenhout AC",
"Ureterp",
"Urk",
"Urmond",
"Ursem",
"Ursem gem. S",
"Usquert",
"Utrecht",
"Vaals",
"Vaassen",
"Valburg",
"Valkenburg",
"Valkenswaard",
"Valthe",
"Valthermond",
"Varik",
"Varsselder",
"Varsseveld",
"Vasse",
"Veelerveen",
"Veen",
"Veendam",
"Veenendaal",
"Veenhuizen",
"Veeningen",
"Veenklooster",
"Veenoord",
"Veere",
"Veessen",
"Vegelinsoord",
"Veghel",
"Velddriel",
"Velden",
"Veldhoven",
"Velp",
"Velsen-Noord",
"Velsen-Zuid",
"Velserbroek",
"Ven-Zelderheide",
"Venebrugge",
"Venhorst",
"Venhuizen",
"Venlo",
"Venray",
"Vessem",
"Vethuizen",
"Veulen",
"Vianen",
"Vianen NB",
"Vierakker",
"Vierhouten",
"Vierhuizen",
"Vierlingsbeek",
"Vierpolders",
"Vijfhuizen",
"Vijlen",
"Vilsteren",
"Vinkega",
"Vinkel",
"Vinkenbuurt",
"Vinkeveen",
"Visvliet",
"Vlaardingen",
"Vlagtwedde",
"Vledder",
"Vledderveen",
"Vleuten",
"Vlieland",
"Vlierden",
"Vlijmen",
"Vlissingen",
"Vlist",
"Vlodrop",
"Voerendaal",
"Vogelenzang",
"Vogelwaarde",
"Volendam",
"Volkel",
"Vollenhove",
"Vondelingenplaat Rotterdam",
"Voorburg",
"Voorhout",
"Voorschoten",
"Voorst",
"Voorthuizen",
"Vorchten",
"Vorden",
"Vorstenbosch",
"Vortum-Mullem",
"Vragender",
"Vredenheim",
"Vredepeel",
"Vreeland",
"Vries",
"Vriescheloo",
"Vriezenveen",
"Vroomshoop",
"Vrouwenakker",
"Vrouwenparochie",
"Vrouwenpolder",
"Vught",
"Vuren",
"Waaksens",
"Waal",
"Waalre",
"Waalwijk",
"Waarde",
"Waardenburg",
"Waarder",
"Waardhuizen",
"Waarland",
"Waaxens",
"Wachtum",
"Waddinxveen",
"Wadenoijen",
"Wagenberg",
"Wagenborgen",
"Wageningen",
"Walem",
"Walsoorden",
"Wamel",
"Wanneperveen",
"Wanroij",
"Wanssum",
"Wapenveld",
"Wapse",
"Wapserveen",
"Warder",
"Warffum",
"Warfhuizen",
"Warfstermolen",
"Warmenhuizen",
"Warmond",
"Warns",
"Warnsveld",
"Warstiens",
"Warten",
"Waskemeer",
"Waspik",
"Wassenaar",
"Wateren",
"Watergang",
"Waterhuizen",
"Wateringen",
"Waterlandkerkje",
"Waverveen",
"Wedde",
"Weerselo",
"Weert",
"Weesp",
"Wehe-den Hoorn",
"Wehl",
"Weidum",
"Weiteveen",
"Wekerom",
"Well",
"Well L",
"Wellerlooi",
"Welsum",
"Wemeldinge",
"Wenum Wiesel",
"Wergea",
"Werkendam",
"Werkhoven",
"Wernhout",
"Wervershoof",
"Wesepe",
"Wessem",
"West-Graftdijk",
"West-Terschelling",
"Westbeemster",
"Westbroek",
"Westdorp",
"Westdorpe",
"Westendorp",
"Westerbeek",
"Westerbork",
"Westerbroek",
"Westeremden",
"Westergeest",
"Westerhaar-Vriezenveensewijk",
"Westerhoven",
"Westerland",
"Westerlee",
"Westernieland",
"Westervelde",
"Westervoort",
"Westerwijtwerd",
"Westhem",
"Westhoek",
"Westkapelle",
"Westknollendam",
"Westmaas",
"Westwoud",
"Westzaan",
"Wetering",
"Weteringbrug",
"Wetsens",
"Wetsinge",
"Weurt",
"Wezep",
"Wezup",
"Wezuperbrug",
"Wichmond",
"Wier",
"Wierden",
"Wieringerwaard",
"Wieringerwerf",
"Wierum",
"Wijchen",
"Wijckel",
"Wijdenes",
"Wijdewormer",
"Wijhe",
"Wijk aan Zee",
"Wijk bij Duurstede",
"Wijk en Aalburg",
"Wijlre",
"Wijnaldum",
"Wijnandsrade",
"Wijnbergen",
"Wijngaarden",
"Wijnjewoude",
"Wijster",
"Wilbertoord",
"Wildervank",
"Wilhelminadorp",
"Wilhelminaoord",
"Willemsoord",
"Willemstad",
"Wilnis",
"Wilp",
"Wilsum",
"Winde",
"Windraak",
"Winkel",
"Winneweer",
"Winschoten",
"Winssen",
"Winsum",
"Wintelre",
"Winterswijk",
"Winterswijk Brinkheurne",
"Winterswijk Corle",
"Winterswijk Henxel",
"Winterswijk Huppel",
"Winterswijk Kotten",
"Winterswijk Meddo",
"Winterswijk Miste",
"Winterswijk Ratum",
"Winterswijk Woold",
"Wirdum",
"Wirdum Gn",
"Wissenkerke",
"Witharen",
"Witmarsum",
"Witte Paarden",
"Wittelte",
"Wittem",
"Witteveen",
"Wiuwert",
"Wjelsryp",
"Woensdrecht",
"Woerden",
"Woerdense Verlaat",
"Wognum",
"Woldendorp",
"Wolfheze",
"Wolphaartsdijk",
"Wolsum",
"Woltersum",
"Wolvega",
"Wommels",
"Wons",
"Workum",
"Wormer",
"Wormerveer",
"Woubrugge",
"Woudbloem",
"Woudenberg",
"Woudrichem",
"Woudsend",
"Wouw",
"Wouwse Plantage",
"Wyns",
"Wytgaard",
"Wâlterswâld",
"Wânswert",
"Yde",
"Yerseke",
"Ypecolsga",
"Ysbrechtum",
"Ysselsteyn",
"Zaamslag",
"Zaandam",
"Zaandijk",
"Zalk",
"Zaltbommel",
"Zandberg",
"Zandeweer",
"Zandhuizen",
"Zandpol",
"Zandvoort",
"Zeddam",
"Zeegse",
"Zeeland",
"Zeerijp",
"Zeewolde",
"Zegge",
"Zegveld",
"Zeijen",
"Zeijerveen",
"Zeijerveld",
"Zeist",
"Zelhem",
"Zenderen",
"Zennewijnen",
"Zetten",
"Zevenaar",
"Zevenbergen",
"Zevenbergschen Hoek",
"Zevenhoven",
"Zevenhuizen",
"Zierikzee",
"Zieuwent",
"Zijderveld",
"Zijdewind",
"Zijldijk",
"Zoelen",
"Zoelmond",
"Zoetermeer",
"Zoeterwoude",
"Zonnemaire",
"Zorgvlied",
"Zoutelande",
"Zoutkamp",
"Zuid-Beijerland",
"Zuid-Scharwoude",
"Zuidbroek",
"Zuiddorpe",
"Zuidermeer",
"Zuiderwoude",
"Zuidhorn",
"Zuidlaarderveen",
"Zuidland",
"Zuidlaren",
"Zuidoostbeemster",
"Zuidschermer",
"Zuidveen",
"Zuidveld",
"Zuidvelde",
"Zuidwolde",
"Zuidzande",
"Zuilichem",
"Zuna",
"Zundert",
"Zurich",
"Zutphen",
"Zuurdijk",
"Zwaag",
"Zwaagdijk-Oost",
"Zwaagdijk-West",
"Zwaanshoek",
"Zwagerbosch",
"Zwammerdam",
"Zwanenburg",
"Zwartebroek",
"Zwartemeer",
"Zwartewaal",
"Zwartsluis",
"Zweeloo",
"Zweins",
"Zwiggelte",
"Zwijndrecht",
"Zwinderen",
"Zwolle",
"de Hoef",
"de Lutte",
"de Wijk",
"de Woude",
)
provinces = (
"Drenthe",
"Flevoland",
"Friesland",
"Gelderland",
"Groningen",
"Limburg",
"Noord-Brabant",
"Noord-Holland",
"Overijssel",
"Utrecht",
"Zeeland",
"Zuid-Holland",
)
street_name_formats = ("{{first_name}}{{street_suffix}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}}\n{{city}}",)
def administrative_unit(self) -> str:
return self.random_element(self.provinces)
province = administrative_unit
def city(self) -> str:
return self.random_element(self.cities)
| mit | c964eee4b26695ad72966b352374b72f | 20.137742 | 77 | 0.424206 | 2.891977 | false | false | false | false |
joke2k/faker | tests/providers/test_file.py | 1 | 1697 | import re
import unittest
from faker import Faker
class TestFile(unittest.TestCase):
"""Tests file"""
def setUp(self):
self.fake = Faker()
Faker.seed(0)
def test_file_path(self):
for _ in range(100):
file_path = self.fake.file_path()
assert re.search(r"\/\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(absolute=False)
assert re.search(r"\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(depth=3)
assert re.search(r"\/\w+\/\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(extension="pdf")
assert re.search(r"\/\w+\/\w+\.pdf", file_path)
file_path = self.fake.file_path(category="image")
assert re.search(r"\/\w+\/\w+\.(bmp|gif|jpeg|jpg|png|tiff)", file_path)
def test_unix_device(self):
reg_device = re.compile(r"^/dev/(vd|sd|xvd)[a-z]$")
# Test default
for _ in range(100):
path = self.fake.unix_device()
assert reg_device.match(path)
# Test with prefix
for _ in range(100):
path = self.fake.unix_device("sd")
assert reg_device.match(path)
assert path.startswith("/dev/sd")
def test_unix_partition(self):
reg_part = re.compile(r"^/dev/(vd|sd|xvd)[a-z]\d$")
# Test default
for _ in range(100):
path = self.fake.unix_partition()
assert reg_part.match(path)
# Test with prefix
for _ in range(100):
path = self.fake.unix_partition("sd")
assert reg_part.match(path)
assert path.startswith("/dev/sd")
| mit | ec99496343e115a6b1791aee722b5da9 | 33.632653 | 83 | 0.53683 | 3.421371 | false | true | false | false |
joke2k/faker | faker/providers/ssn/no_NO/__init__.py | 1 | 3339 | import datetime
import operator
from typing import List, Optional, Sequence
from ....typing import SexLiteral
from .. import Provider as SsnProvider
def checksum(digits: Sequence[int], scale: List[int]) -> int:
"""
Calculate checksum of Norwegian personal identity code.
Checksum is calculated with "Module 11" method using a scale.
The digits of the personal code are multiplied by the corresponding
number in the scale and summed;
if remainder of module 11 of the sum is less than 10, checksum is the
remainder.
If remainder is 0, the checksum is 0.
https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
"""
chk_nbr = 11 - (sum(map(operator.mul, digits, scale)) % 11)
if chk_nbr == 11:
return 0
return chk_nbr
class Provider(SsnProvider):
scale1 = (3, 7, 6, 1, 8, 9, 4, 5, 2)
scale2 = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
def ssn(self, dob: Optional[str] = None, gender: Optional[SexLiteral] = None) -> str:
"""
Returns 11 character Norwegian personal identity code (Fødselsnummer).
A Norwegian personal identity code consists of 11 digits, without any
whitespace or other delimiters. The form is DDMMYYIIICC, where III is
a serial number separating persons born oh the same date with different
intervals depending on the year they are born. CC is two checksums.
https://en.wikipedia.org/wiki/National_identification_number#Norway
:param dob: date of birth as a "YYYYMMDD" string
:type dob: str
:param gender: gender of the person - "F" for female, M for male.
:type gender: str
:return: Fødselsnummer in str format (11 digs)
:rtype: str
"""
if dob:
birthday = datetime.datetime.strptime(dob, "%Y%m%d")
else:
age = datetime.timedelta(days=self.generator.random.randrange(18 * 365, 90 * 365))
birthday = datetime.datetime.now() - age
if not gender:
gender = self.generator.random.choice(("F", "M"))
elif gender not in ("F", "M"):
raise ValueError("Gender must be one of F or M.")
while True:
if 1900 <= birthday.year <= 1999:
suffix = str(self.generator.random.randrange(0, 49))
elif 1854 <= birthday.year <= 1899:
suffix = str(self.generator.random.randrange(50, 74))
elif 2000 <= birthday.year <= 2039:
suffix = str(self.generator.random.randrange(50, 99))
elif 1940 <= birthday.year <= 1999:
suffix = str(self.generator.random.randrange(90, 99))
if gender == "F":
gender_num = self.generator.random.choice((0, 2, 4, 6, 8))
elif gender == "M":
gender_num = self.generator.random.choice((1, 3, 5, 7, 9))
pnr = birthday.strftime("%d%m%y") + suffix.zfill(2) + str(gender_num)
pnr_nums = [int(ch) for ch in pnr]
k1 = checksum(Provider.scale1, pnr_nums)
k2 = checksum(Provider.scale2, pnr_nums + [k1])
# Checksums with a value of 10 is rejected.
# https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
if k1 == 10 or k2 == 10:
continue
pnr += f"{k1}{k2}"
return pnr
| mit | 940599955c57c1d627708b4edd440dc3 | 39.204819 | 94 | 0.596344 | 3.627174 | false | false | false | false |
joke2k/faker | faker/providers/automotive/ro_RO/__init__.py | 1 | 1181 | import string
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
"""Implement automotive provider for ``ro_RO`` locale."""
license_plate_prefix = (
"AB",
"AG",
"AR",
"B",
"BC",
"BH",
"BN",
"BR",
"BT",
"BV",
"BZ",
"CJ",
"CL",
"CS",
"CT",
"CV",
"DB",
"DJ",
"GJ",
"GL",
"GR",
"HD",
"HR",
"IF",
"IL",
"IS",
"MH",
"MM",
"MS",
"NT",
"OT",
"PH",
"SB",
"SJ",
"SM",
"SV",
"TL",
"TM",
"TR",
"VL",
"VN",
"VS",
)
license_plate_suffix = (
"-###-???",
"-##-???",
)
def license_plate(self) -> str:
"""Generate a license plate."""
prefix: str = self.random_element(self.license_plate_prefix)
suffix = self.bothify(
self.random_element(self.license_plate_suffix),
letters=string.ascii_uppercase,
)
return prefix + suffix
| mit | 48fa9e190f18338257a82b98950876b8 | 16.893939 | 68 | 0.365792 | 3.679128 | false | false | false | false |
joke2k/faker | faker/providers/address/az_AZ/__init__.py | 1 | 15731 | from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ("{{first_name}}",)
city_suffixes = ("şəhəri", "ş.")
street_suffixes = ("küçəsi", "küç.", "prospekti", "pr.")
village_suffixes = ("kəndi", "k.")
district_suffixes = ("rayonu", "ray.")
settlement_suffixes = ("qəsəbəsi", "qəs.")
building_number_formats = ("#",)
house_number_formats = ("#", "##", "###")
address_formats = (
"{{city}} {{city_suffix }}, {{street_name}} {{street_suffix}}, b. {{building_number}}, m. {{house_number}}",
"{{district}} {{district_suffix }}, {{street_name}} {{street_suffix}}, b. {{building_number}},"
" m. {{house_number}}",
"{{district}} {{district_suffix }}, {{village}} {{village_suffix}}, {{street_name}} {{street_suffix}}",
"{{district}} {{district_suffix }}, {{settlement}} {{settlement_suffix}}, {{street_name}} {{street_suffix}},"
" b. {{building_number}}, m. {{house_number}}",
)
street_name_formats = ("{{street}}",)
cities = [
"Bakı",
"Gəncə",
"Naxçıvan",
"Xankəndi",
"Lənkəran",
"Mingəçevir",
"Naftalan",
"Sumqayıt",
"Şəki",
"Şirvan",
"Yevlax",
]
countries = [
"Abxaziya",
"Akrotiri və Dekeliya",
"Aland adaları",
"Albaniya",
"Almaniya",
"Amerika Samoası",
"Andorra Knyazlığı",
"Angilya",
"Anqola",
"Antiqua və Barbuda",
"Argentina Respublikası",
"Aruba",
"Avstraliya",
"Avstriya",
"Azərbaycan",
"Baham adaları",
"Banqladeş",
"Barbados",
"Belçika",
"Beliz",
"Belarus",
"Benin",
"Bermud adaları",
"Birləşmiş Ərəb Əmirlikləri",
"ABŞ",
"Boliviya",
"Bolqarıstan",
"Bosniya və Herseqovina",
"Botsvana",
"Böyük Britaniya",
"Braziliya",
"Bruney",
"Burkina",
"Burundi",
"Butan",
"Bəhreyn",
"Cersi",
"Cəbəllütariq",
"Cənubi Afrika Respublikası",
"Cənubi Sudan",
"Cənubi Koreya",
"Cibuti",
"Çad",
"Çexiya",
"Monteneqro",
"Çili",
"Çin",
"Danimarka",
"Dominika",
"Dominikan",
"Efiopiya",
"Ekvador",
"Ekvatorial",
"Eritreya",
"Ermənistan",
"Estoniya",
"Əfqanıstan",
"Əlcəzair",
"Farer adaları",
"Fələstin",
"Fici",
"Fil Dişi Sahili",
"Filippin",
"Finlandiya",
"Folklend adaları",
"Fransa",
"Fransa Polineziyası",
"Gernsi",
"Gürcüstan",
"Haiti",
"Hindistan",
"Honduras",
"Honkonq",
"Xorvatiya",
"İndoneziya",
"İordaniya",
"İraq",
"İran",
"İrlandiya",
"İslandiya",
"İspaniya",
"İsrail",
"İsveç",
"İsveçrə",
"İtaliya",
"Kabo-Verde",
"Kamboca",
"Kamerun",
"Kanada",
"Kayman adaları",
"Keniya",
"Kipr",
"Kiribati",
"Kokos adaları",
"Kolumbiya",
"Komor adaları",
"Konqo Respublikası",
"Konqo Demokratik Respublikası",
"Kosovo",
"Kosta-Rika",
"Kuba",
"Kuk adaları",
"Küveyt",
"Qabon",
"Qambiya",
"Qana",
"Qətər",
"Qayana",
"Qazaxıstan",
"Qərbi Saxara",
"Qırğızıstan",
"Qrenada",
"Qrenlandiya",
"Quam",
"Qvatemala",
"Qvineya",
"Qvineya-Bisau",
"Laos",
"Latviya",
"Lesoto",
"Liberiya",
"Litva",
"Livan",
"Liviya",
"Lixtenşteyn",
"Lüksemburq",
"Macarıstan",
"Madaqaskar",
"Makao",
"Şimali Makedoniya",
"Malavi",
"Malayziya",
"Maldiv adaları",
"Mali",
"Malta",
"Marşall adaları",
"Mavriki",
"Mavritaniya",
"Mayotta",
"Meksika",
"Men adası",
"Mərakeş",
"Mərkəzi Afrika Respublikası",
"Mikroneziya",
"Milad adası",
"Misir",
"Myanma",
"Moldova",
"Monako",
"Monqolustan",
"Montserrat",
"Mozambik",
"Müqəddəs Yelena adası",
"Namibiya",
"Nauru",
"Nepal",
"Niderland",
"Niger",
"Nigeriya",
"Nikaraqua",
"Norveç",
"Oman",
"Özbəkistan",
"Pakistan",
"Palau",
"Panama",
"Papua",
"Paraqvay",
"Peru",
"Pitkern adaları",
"Polşa",
"Portuqaliya",
"Puerto-Riko",
"Ruanda",
"Rumıniya",
"Rusiya",
"Salvador",
"Samoa",
"San-Marino",
"San-Tome və Prinsipi",
"Seneqal",
"Sen-Bartelemi",
"Sent-Kits və Nevis",
"Sent-Lüsiya",
"Sen-Marten",
"Sen-Pyer və Mikelon",
"Sent-Vinsent və Qrenadin",
"Serbiya",
"Seyşel adaları",
"Səudiyyə Ərəbistanı",
"Sinqapur",
"Slovakiya",
"Sloveniya",
"Solomon adaları",
"Somali",
"Somalilend",
"Sudan",
"Surinam",
"Suriya",
"Esvatini",
"Syerra-Leone",
"Şərqi Timor",
"Şimali Kipr Türk Respublikası",
"Şimali Koreya",
"Şimali Marian adaları",
"Şri-Lanka",
"Tacikistan",
"Tanzaniya",
"Tailand",
"Tonqa",
"Toqo",
"Trinidad və Tobaqo",
"Tunis",
"Tuvalu",
"Türkiyə",
"Türkmənistan",
"Ukrayna",
"Uqanda",
"Uruqvay",
"Vanuatu",
"Vatikan",
"Venesuela",
"Vyetnam",
"Yamayka",
"Yaponiya",
"Yeni Kaledoniya",
"Yeni Zelandiya",
"Yəmən",
"Yunanıstan",
"Zambiya",
"Zimbabve",
]
districts = [
"Abşeron",
"Ağcabədi",
"Ağdam",
"Ağdaş",
"Ağstafa",
"Ağsu",
"Astara",
"Babək",
"Balakən",
"Beyləqan",
"Bərdə",
"Biləsuvar",
"Cəbrayıl",
"Cəlilabad",
"Culfa",
"Daşkəsən",
"Füzuli",
"Gədəbəy",
"Goranboy",
"Göyçay",
"Göygöl",
"Hacıqabul",
"Xaçmaz",
"Xızı",
"Xocalı",
"Xocavənd",
"İmişli",
"İsmayıllı",
"Kəlbəcər",
"Kəngərli",
"Kürdəmir",
"Qax",
"Qazax",
"Qəbələ",
"Qobustan",
"Quba",
"Qubadlı",
"Qusar",
"Laçın",
"Lerik",
"Lənkəran",
"Masallı",
"Neftçala",
"Oğuz",
"Ordubad*",
"Saatlı",
"Sabirabad",
"Salyan",
"Samux",
"Sədərək",
"Siyəzən",
"Şabran",
"Şahbuz",
"Şamaxı",
"Şəki",
"Şəmkir",
"Şərur",
"Şuşa",
"Tərtər",
"Tovuz",
"Ucar",
"Yardımlı",
"Yevlax",
"Zaqatala",
"Zəngilan",
"Zərdab",
]
settlements = [
"Ləki",
"Nehrəm",
"Soyuqbulaq",
"Şəkərli",
"Cəhri",
"Qarayazi",
"Həzi asalanov",
"Poylu",
"Ceyrançöl",
"Saloğlu",
"Vurğun",
"Qəhramanli",
"Yuxari aran",
"Mayak",
"Milabad",
"Örənqala",
"Baharabad",
"Günəş",
"Orconikidze",
"Digah",
"Güzdək",
"Hökməli",
"Qobu",
"Mehdiabad",
"Saray",
"Taxtakörpü",
"Hindarx",
"Şərq",
"Sarisu",
"Mil",
"Türklər",
"Bahar",
"Babək",
"Keşlə",
"Qabaqçöl",
"Xindiristan",
"Bənövşələr",
"Birinci alibəyli",
"Birinci baharli",
"Birinci dördyol",
"Birinci quzanli",
"İkinci alibəyli",
"İkinci baharli",
"İkinci dördyol",
"İmamqulubəyli",
"Qasimbəyli",
"Səfərli",
"Təzəkənd",
"Ergi",
"Yeni ergi",
"Aşaği qəsil kəndi",
"Orta qəsil kəndi",
"Türyançay",
"Yuxari qəsil kəndi",
"Birinci zobucuq",
"İkinci zobucuq",
"Üçüncü zobucuq",
"Dördüncü zobucuq",
"Beşinci zobucuq",
"Fin",
"Horadiz",
"Qayidiş 2",
"Hacikənd",
"Yuxari ağcakənd",
"Qizilhacili",
"Goran",
"Aşaği ağcakənd",
"Qazanbulaq",
"Kürəkçay",
"Ayaq qərvənd",
"Quzanli",
"Navahi",
"Pirsaat",
"Muğan",
"Padar",
"Navahi",
"Vətəgə",
"Bəhrəmtəpə",
"Lahic",
"Basqal",
"Şəfəq",
"Yeni mil",
"Xocahəsən",
"Sulutəpə",
"Biləcəri",
"Binəqədi",
"28 may",
"Rəsulzadə",
"Qumlaq",
"Xələfli",
"Xudafərin",
"Mahmudlu",
"Novoqolovka",
"Alunitdağ",
"Quşçu körpüsü",
"Daşkəsən",
"Quşçu",
"Yuxari daşkəsən",
"1 nömrəli qayidiş",
"2 nömrəli qayidiş",
]
streets = [
"A.AĞAYEV",
"A.M.CÜMƏ",
"TƏBRİZ",
"XALİQ ABBASOV",
"İSLAM ABIŞOV",
"HEYDƏR ƏLİYEV",
"XƏTAİ",
"GÖL ƏTRAFI",
"Z.XƏLİL",
"2-Cİ SÜLH",
"Q.PİRİMOV",
"ASİF MƏMMƏDOV",
"R.HACIYEV",
"FƏXRƏDDİN ƏSƏDOV",
"K.MARKS",
"OKTAY KƏRİMOV",
"Z.KAZIMZADƏ",
"HƏSƏNOĞLU",
"KAVEROÇKİN",
"P.ÇAYKOVSKİ",
"HÜSEYN ARİF",
"HACI MURAD",
"BAKI-BATUMİ",
"NEMƏT QULİYEV",
"R.AXUNDOV",
"AKAD.H.ƏLİYEV",
"RƏHİM ŞIXƏLİYEV",
"YUSİFZADƏ",
"E.QOCAYEV",
"TARZƏN H.MƏMMƏDOV",
"İ.HİDAYƏTZADƏ",
"T.ƏLİYEV",
"MƏMMƏD ARAZ",
"V.PLOTNİKOV",
"Ə.ORUCƏLİYEV",
"Z.BÜNYADOV",
"İ.DADAŞOV",
"ƏLƏSGƏR QAYIBOV",
"M.ARİF",
"M.QASQAY",
"Ə.ƏBDÜLRƏHİMOV",
"İZZƏT HƏMİDOV",
"AZADLIQ",
"ARİF HEYDƏROV",
"N.SÜLEYMANOV",
"ŞAHİN MUSTAFAYEV",
"Ə.VAHİD",
"Ü.BÜNYADZADƏ",
"NAZİM HACIYEV",
"24-CÜ KORPÜ",
"1-Cİ MƏDƏN",
"Y.HÜSEYNOV",
"22-Cİ DAĞLIQ",
"SÜD FERMASI",
"ÇAPAYEV",
"E. NƏCƏFOV",
"FAİQ RÜSTƏMOV",
"28 MAY",
"ZABİTLƏR EVİ",
"S.S. AXUNDOV",
"GƏNCLƏR DOSTLUĞU",
"H.SULTANOV",
"ƏHMƏD QASIMOV",
"XURŞUD AĞAYEV",
"NATƏVAN",
"YENİ MASSİV",
"MƏLİK ASLANOV KÜÇƏSİ,",
"VİDADİ",
"8 MART",
"İ.HACIYEV",
"Y. HƏSƏNOV",
]
villages = [
"Kələki",
"Binələr",
"Davudlu",
"Birinci aral",
"İkinci aral",
"Cardam",
"Qaradeyin",
"Qarağan şixlar",
"Qarağan sədi",
"Qəribli",
"Qolqəti",
"Mürsəl",
"Şordəhnə",
"Tofiqi",
"Yenicə",
"Ərəbocaği",
"Hapitli",
"Ovçulu",
"Şəkili",
"Yuxari ağcayazi",
"Aşaği ağcayazi",
"Yuxari ləki",
"Düzqişlaq",
"Kolayir",
"Koçvəlili",
"Xətai",
"Yenigün",
"Qaçaq kərəm",
"Poylu",
"Tatli",
"Yaradullu",
"Xilxina",
"Mollacəfərli",
"Ağgöl",
"Aşaği göycəli",
"Aşaği kəsəmən",
"Böyük kəsik",
"Dağ kəsəmən",
"Eynalli",
"Göycəli",
"Həsənsu",
"Kolxələfli",
"Koçəsgər",
"Köhnəqişlaq",
"Qarahəsənli",
"Soyuqbulaqlar",
"Qiraq kəsəmən",
"Didivar",
"Muğanli",
"Pirili",
"Sadiqli",
"Uzunoba",
"Qaraməmmədli",
"Navahi",
"Ülgüc",
"Ərəbmehdibəy",
"Dədəli",
"Qasimbəyli",
"Ərəbsarvan",
"Haciqədirli",
"Göydəlləkli",
"Ərəbuşaği",
"Ağalarbəyli",
"Maşadqanli",
"Aratli curuğlu",
"Keşdiməz",
"Bozavand",
"Ağarx",
"Qarabağli",
"Xanbulaq",
"Kəndaxan",
"Yenilik",
"Kövlüc",
"Elabad",
"Yenikənd",
"Hingar",
"Girdə",
"Gursulu",
"Qaraqoyunlu",
"Musabəyli",
"İlxiçi",
"Hacisəmədli",
"Qəşəd",
"Kəndoba",
"Cəfərli",
"Haciuşaği",
"Cəlayir",
"Abasxanli",
"Kalva",
"Suraxani",
"Dilman",
"Haciman",
"Xatman",
"Növcü",
"Axundlu",
"Məlikçobanli",
]
def house_number(self):
"""
:example: 'm. 49'
"""
return self.numerify(self.random_element(self.house_number_formats))
def city(self):
"""
:example: 'Xankəndi'
"""
return self.random_element(self.cities)
def city_suffix(self):
"""
:example: 'ş.'
"""
return self.random_element(self.city_suffixes)
def street(self):
"""
:example: 'A.AĞAYEV'
"""
return self.random_element(self.streets)
def street_suffix(self):
"""
:example: 'küç.'
"""
return self.random_element(self.street_suffixes)
def village(self):
"""
:example: 'Didivar'
"""
return self.random_element(self.villages)
def village_suffix(self):
"""
:example: 'k.'
"""
return self.random_element(self.village_suffixes)
def district(self):
"""
:example: 'Babək'
"""
return self.random_element(self.districts)
def district_suffix(self):
"""
:example: 'r.'
"""
return self.random_element(self.district_suffixes)
def settlement(self):
"""
:example: 'Horadiz'
"""
return self.random_element(self.settlements)
def settlement_suffix(self):
"""
:example: 'qəs.'
"""
return self.random_element(self.settlement_suffixes)
def administrative_unit(self):
"""
:example: 'Xankəndi'
"""
return self.random_element(self.districts + self.cities)
def postcode(self):
"""
:example: 'AZ1027'
"""
index = self.generator.random.randint(900, 6600)
return "AZ%04d" % index if index > 999 else "AZ0%03d" % index
def postalcode(self):
return self.postcode()
| mit | 5e0e5d4a65442e317d136c948ba20b76 | 20.842258 | 117 | 0.43338 | 2.436713 | false | false | false | false |
joke2k/faker | faker/documentor.py | 1 | 4220 | import inspect
import warnings
from enum import Enum, auto
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from .generator import Generator
from .providers import BaseProvider
from .proxy import Faker
class FakerEnum(Enum):
"""Required for faker.providers.enum"""
A = auto
B = auto
class Documentor:
def __init__(self, generator: Union[Generator, Faker]) -> None:
"""
:param generator: a localized Generator with providers filled,
for which to write the documentation
:type generator: faker.Generator()
"""
self.generator = generator
self.max_name_len: int = 0
self.already_generated: List[str] = []
def get_formatters(
self,
locale: Optional[str] = None,
excludes: Optional[List[str]] = None,
**kwargs: Any,
) -> List[Tuple[BaseProvider, Dict[str, str]]]:
self.max_name_len = 0
self.already_generated = [] if excludes is None else excludes[:]
formatters = []
providers: List[BaseProvider] = self.generator.get_providers()
for provider in providers[::-1]: # reverse
if locale and provider.__lang__ and provider.__lang__ != locale:
continue
formatters.append(
(provider, self.get_provider_formatters(provider, **kwargs)),
)
return formatters
def get_provider_formatters(
self,
provider: BaseProvider,
prefix: str = "fake.",
with_args: bool = True,
with_defaults: bool = True,
) -> Dict[str, str]:
formatters = {}
for name, method in inspect.getmembers(provider, inspect.ismethod):
# skip 'private' method and inherited methods
if name.startswith("_") or name in self.already_generated:
continue
arguments = []
faker_args: List[Union[str, Type[Enum]]] = []
faker_kwargs = {}
if name == "binary":
faker_kwargs["length"] = 1024
elif name in ["zip", "tar"]:
faker_kwargs.update(
{
"uncompressed_size": 1024,
"min_file_size": 512,
}
)
if name == "enum":
faker_args = [FakerEnum]
if with_args:
# retrieve all parameter
argspec = inspect.getfullargspec(method)
lst = [x for x in argspec.args if x not in ["self", "cls"]]
for i, arg in enumerate(lst):
if argspec.defaults and with_defaults:
try:
default = argspec.defaults[i]
if isinstance(default, str):
default = repr(default)
else:
# TODO check default type
default = f"{default}"
arg = f"{arg}={default}"
except IndexError:
pass
arguments.append(arg)
if with_args == "first":
break
if with_args != "first":
if argspec.varargs:
arguments.append("*" + argspec.varargs)
if argspec.varkw:
arguments.append("**" + argspec.varkw)
# build fake method signature
signature = f"{prefix}{name}({', '.join(arguments)})"
try:
# make a fake example
example = self.generator.format(name, *faker_args, **faker_kwargs)
except (AttributeError, ValueError) as e:
warnings.warn(str(e))
continue
formatters[signature] = example
self.max_name_len = max(self.max_name_len, len(signature))
self.already_generated.append(name)
return formatters
@staticmethod
def get_provider_name(provider_class: BaseProvider) -> str:
return provider_class.__provider__
| mit | 221db1a4d2908a6acf02f008f8c70c2d | 31.713178 | 82 | 0.500474 | 4.861751 | false | false | false | false |
joke2k/faker | faker/providers/person/fr_FR/__init__.py | 1 | 12993 | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{prefix}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{first_name_female}}-{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{prefix}} {{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{prefix}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{first_name_male}}-{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}} {{prefix}} {{last_name}}",
)
formats = formats_male + formats_female
first_names_male = (
"Adrien",
"Aimé",
"Alain",
"Alexandre",
"Alfred",
"Alphonse",
"André",
"Antoine",
"Arthur",
"Auguste",
"Augustin",
"Benjamin",
"Benoît",
"Bernard",
"Bertrand",
"Charles",
"Christophe",
"Daniel",
"David",
"Denis",
"Édouard",
"Émile",
"Emmanuel",
"Éric",
"Étienne",
"Eugène",
"François",
"Franck",
"Frédéric",
"Gabriel",
"Georges",
"Gérard",
"Gilbert",
"Gilles",
"Grégoire",
"Guillaume",
"Guy",
"William",
"Henri",
"Honoré",
"Hugues",
"Isaac",
"Jacques",
"Jean",
"Jérôme",
"Joseph",
"Jules",
"Julien",
"Laurent",
"Léon",
"Louis",
"Luc",
"Lucas",
"Marc",
"Marcel",
"Martin",
"Matthieu",
"Maurice",
"Michel",
"Nicolas",
"Noël",
"Olivier",
"Patrick",
"Paul",
"Philippe",
"Pierre",
"Raymond",
"Rémy",
"René",
"Richard",
"Robert",
"Roger",
"Roland",
"Sébastien",
"Stéphane",
"Théodore",
"Théophile",
"Thibaut",
"Thibault",
"Thierry",
"Thomas",
"Timothée",
"Tristan",
"Victor",
"Vincent",
"Xavier",
"Yves",
"Zacharie",
)
first_names_female = (
"Adélaïde",
"Adèle",
"Adrienne",
"Agathe",
"Agnès",
"Aimée",
"Alexandrie",
"Alix",
"Alexandria",
"Alex",
"Alice",
"Amélie",
"Anaïs",
"Anastasie",
"Andrée",
"Anne",
"Anouk",
"Antoinette",
"Arnaude",
"Astrid",
"Audrey",
"Aurélie",
"Aurore",
"Bernadette",
"Brigitte",
"Capucine",
"Caroline",
"Catherine",
"Cécile",
"Céline",
"Célina",
"Chantal",
"Charlotte",
"Christelle",
"Christiane",
"Christine",
"Claire",
"Claudine",
"Clémence",
"Colette",
"Constance",
"Corinne",
"Danielle",
"Denise",
"Diane",
"Dorothée",
"Édith",
"Éléonore",
"Élisabeth",
"Élise",
"Élodie",
"Émilie",
"Emmanuelle",
"Françoise",
"Frédérique",
"Gabrielle",
"Geneviève",
"Hélène",
"Henriette",
"Hortense",
"Inès",
"Isabelle",
"Jacqueline",
"Jeanne",
"Jeannine",
"Joséphine",
"Josette",
"Julie",
"Juliette",
"Laetitia",
"Laure",
"Laurence",
"Lorraine",
"Louise",
"Luce",
"Lucie",
"Lucy",
"Madeleine",
"Manon",
"Marcelle",
"Margaux",
"Margaud",
"Margot",
"Marguerite",
"Margot",
"Margaret",
"Maggie",
"Marianne",
"Marie",
"Marine",
"Marthe",
"Martine",
"Maryse",
"Mathilde",
"Michèle",
"Michelle",
"Michelle",
"Monique",
"Nathalie",
"Nath",
"Nathalie",
"Nicole",
"Noémi",
"Océane",
"Odette",
"Olivie",
"Patricia",
"Paulette",
"Pauline",
"Pénélope",
"Philippine",
"Renée",
"Sabine",
"Simone",
"Sophie",
"Stéphanie",
"Susanne",
"Suzanne",
"Susan",
"Suzanne",
"Sylvie",
"Thérèse",
"Valentine",
"Valérie",
"Véronique",
"Victoire",
"Virginie",
"Zoé",
"Camille",
"Claude",
"Dominique",
)
first_names = first_names_male + first_names_female
last_names = (
"Martin",
"Bernard",
"Thomas",
"Robert",
"Petit",
"Dubois",
"Richard",
"Garcia",
"Durand",
"Moreau",
"Lefebvre",
"Simon",
"Laurent",
"Michel",
"Leroy",
"Martinez",
"David",
"Fontaine",
"Da Silva",
"Morel",
"Fournier",
"Dupont",
"Bertrand",
"Lambert",
"Rousseau",
"Girard",
"Roux",
"Vincent",
"Lefèvre",
"Boyer",
"Lopez",
"Bonnet",
"Andre",
"François",
"Mercier",
"Muller",
"Guérin",
"Legrand",
"Sanchez",
"Garnier",
"Chevalier",
"Faure",
"Perez",
"Clément",
"Fernandez",
"Blanc",
"Robin",
"Morin",
"Gauthier",
"Pereira",
"Perrin",
"Roussel",
"Henry",
"Duval",
"Gautier",
"Nicolas",
"Masson",
"Marie",
"Noël",
"Ferreira",
"Lemaire",
"Mathieu",
"Rivière",
"Denis",
"Marchand",
"Rodriguez",
"Dumont",
"Payet",
"Lucas",
"Dufour",
"Dos Santos",
"Joly",
"Blanchard",
"Meunier",
"Rodrigues",
"Caron",
"Gérard",
"Fernandes",
"Brunet",
"Meyer",
"Barbier",
"Leroux",
"Renard",
"Goncalves",
"Gaillard",
"Brun",
"Roy",
"Picard",
"Giraud",
"Roger",
"Schmitt",
"Colin",
"Arnaud",
"Vidal",
"Gonzalez",
"Lemoine",
"Roche",
"Aubert",
"Olivier",
"Leclercq",
"Pierre",
"Philippe",
"Bourgeois",
"Renaud",
"Martins",
"Leclerc",
"Guillaume",
"Lacroix",
"Lecomte",
"Benoit",
"Fabre",
"Carpentier",
"Vasseur",
"Louis",
"Hubert",
"Jean",
"Dumas",
"Rolland",
"Grondin",
"Rey",
"Huet",
"Gomez",
"Dupuis",
"Guillot",
"Berger",
"Moulin",
"Hoarau",
"Menard",
"Deschamps",
"Fleury",
"Adam",
"Boucher",
"Poirier",
"Bertin",
"Charles",
"Aubry",
"Da Costa",
"Royer",
"Dupuy",
"Maillard",
"Paris",
"Baron",
"Lopes",
"Guyot",
"Carre",
"Jacquet",
"Renault",
"Hervé",
"Charpentier",
"Klein",
"Cousin",
"Collet",
"Léger",
"Ribeiro",
"Hernandez",
"Bailly",
"Schneider",
"Le Gall",
"Ruiz",
"Langlois",
"Bouvier",
"Gomes",
"Prévost",
"Julien",
"Lebrun",
"Breton",
"Germain",
"Millet",
"Boulanger",
"Rémy",
"Le Roux",
"Daniel",
"Marques",
"Maillot",
"Leblanc",
"Le Goff",
"Barre",
"Perrot",
"Lévêque",
"Marty",
"Benard",
"Monnier",
"Hamon",
"Pelletier",
"Alves",
"Étienne",
"Marchal",
"Poulain",
"Tessier",
"Lemaître",
"Guichard",
"Besson",
"Mallet",
"Hoareau",
"Gillet",
"Weber",
"Jacob",
"Collin",
"Chevallier",
"Perrier",
"Michaud",
"Carlier",
"Delaunay",
"Chauvin",
"Alexandre",
"Maréchal",
"Antoine",
"Lebon",
"Cordier",
"Lejeune",
"Bouchet",
"Pasquier",
"Legros",
"Delattre",
"Humbert",
"De Oliveira",
"Briand",
"Lamy",
"Launay",
"Gilbert",
"Perret",
"Lesage",
"Gay",
"Nguyen",
"Navarro",
"Besnard",
"Pichon",
"Hebert",
"Cohen",
"Pons",
"Lebreton",
"Sauvage",
"De Sousa",
"Pineau",
"Albert",
"Jacques",
"Pinto",
"Barthelemy",
"Turpin",
"Bigot",
"Lelièvre",
"Georges",
"Reynaud",
"Ollivier",
"Martel",
"Voisin",
"Leduc",
"Guillet",
"Vallée",
"Coulon",
"Camus",
"Marin",
"Teixeira",
"Costa",
"Mahe",
"Didier",
"Charrier",
"Gaudin",
"Bodin",
"Guillou",
"Grégoire",
"Gros",
"Blanchet",
"Buisson",
"Blondel",
"Paul",
"Dijoux",
"Barbe",
"Hardy",
"Laine",
"Evrard",
"Laporte",
"Rossi",
"Joubert",
"Regnier",
"Tanguy",
"Gimenez",
"Allard",
"Devaux",
"Morvan",
"Lévy",
"Dias",
"Courtois",
"Lenoir",
"Berthelot",
"Pascal",
"Vaillant",
"Guilbert",
"Thibault",
"Moreno",
"Duhamel",
"Colas",
"Masse",
"Baudry",
"Bruneau",
"Verdier",
"Delorme",
"Blin",
"Guillon",
"Mary",
"Coste",
"Pruvost",
"Maury",
"Allain",
"Valentin",
"Godard",
"Joseph",
"Brunel",
"Marion",
"Texier",
"Seguin",
"Raynaud",
"Bourdon",
"Raymond",
"Bonneau",
"Chauvet",
"Maurice",
"Legendre",
"Loiseau",
"Ferrand",
"Toussaint",
"Techer",
"Lombard",
"Lefort",
"Couturier",
"Bousquet",
"Diaz",
"Riou",
"Clerc",
"Weiss",
"Imbert",
"Jourdan",
"Delahaye",
"Gilles",
"Guibert",
"Bègue",
"Descamps",
"Delmas",
"Peltier",
"Dupré",
"Chartier",
"Martineau",
"Laroche",
"Leconte",
"Maillet",
"Parent",
"Labbé",
"Potier",
"Bazin",
"Normand",
"Pottier",
"Torres",
"Lagarde",
"Blot",
"Jacquot",
"Lemonnier",
"Grenier",
"Rocher",
"Bonnin",
"Boutin",
"Fischer",
"Munoz",
"Neveu",
"Lacombe",
"Mendès",
"Delannoy",
"Auger",
"Wagner",
"Fouquet",
"Mace",
"Ramos",
"Pages",
"Petitjean",
"Chauveau",
"Foucher",
"Peron",
"Guyon",
"Gallet",
"Rousset",
"Traore",
"Bernier",
"Vallet",
"Letellier",
"Bouvet",
"Hamel",
"Chrétien",
"Faivre",
"Boulay",
"Thierry",
"Samson",
"Ledoux",
"Salmon",
"Gosselin",
"Lecoq",
"Pires",
"Leleu",
"Becker",
"Diallo",
"Merle",
"Valette",
)
prefixes = ("de", "de la", "Le", "du")
| mit | 5ebca554b2dde5ac05881db793fbbf1d | 18.46003 | 71 | 0.36971 | 3.082915 | false | false | false | false |
joke2k/faker | faker/providers/ssn/fi_FI/__init__.py | 1 | 2485 | import datetime
from .. import Provider as SsnProvider
class Provider(SsnProvider):
def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:
"""
Returns 11 character Finnish personal identity code (Henkilötunnus,
HETU, Swedish: Personbeteckning). This function assigns random
gender to person.
HETU consists of eleven characters of the form DDMMYYCZZZQ, where
DDMMYY is the date of birth, C the century sign, ZZZ the individual
number and Q the control character (checksum). The sign for the
century is either + (1800–1899), - (1900–1999), or A (2000–2099).
The individual number ZZZ is odd for males and even for females.
For people born in Finland its range is 002-899
(larger numbers may be used in special cases).
An example of a valid code is 311280-888Y.
https://en.wikipedia.org/wiki/National_identification_number#Finland
"""
def _checksum(hetu):
checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
return checksum_characters[int(hetu) % 31]
age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.date.today() - age
hetu_date = "%02d%02d%s" % (
birthday.day,
birthday.month,
str(birthday.year)[-2:],
)
range = (900, 999) if artificial is True else (2, 899)
suffix = str(self.generator.random.randrange(*range)).zfill(3)
checksum = _checksum(hetu_date + suffix)
separator = self._get_century_code(birthday.year)
hetu = "".join([hetu_date, separator, suffix, checksum])
return hetu
@staticmethod
def _get_century_code(year: int) -> str:
"""Returns the century code for a given year"""
if 2000 <= year < 3000:
separator = "A"
elif 1900 <= year < 2000:
separator = "-"
elif 1800 <= year < 1900:
separator = "+"
else:
raise ValueError("Finnish SSN do not support people born before the year 1800 or after the year 2999")
return separator
vat_id_formats = ("FI########",)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Finnish VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| mit | 26013738c78adcdca13bb098e76cd6f7 | 38.333333 | 114 | 0.61138 | 3.644118 | false | false | false | false |
joke2k/faker | faker/providers/person/it_IT/__init__.py | 1 | 32695 | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats = formats_male + formats_female
# source: https://en.wikipedia.org/w/index.php?title=Category:Italian_masculine_given_names
first_names_male = (
"Achille",
"Adamo",
"Adelmo",
"Adriano",
"Agnolo",
"Agostino",
"Alberico",
"Alberto",
"Alderano",
"Aldo",
"Alessandro",
"Alessio",
"Alfio",
"Alfredo",
"Alphons",
"Amadeo",
"Amedeo",
"Amico",
"Amleto",
"Angelo",
"Annibale",
"Ansaldo",
"Antonello",
"Antonino",
"Antonio",
"Armando",
"Arnaldo",
"Arnulfo",
"Arsenio",
"Arturo",
"Atenulf",
"Augusto",
"Azeglio",
"Baccio",
"Baldassare",
"Bartolomeo",
"Benedetto",
"Benito",
"Benvenuto",
"Beppe",
"Bernardo",
"Biagio",
"Bruno",
"Calcedonio",
"Calogero",
"Camillo",
"Carlo",
"Carmelo",
"Cesare",
"Cipriano",
"Cirillo",
"Ciro",
"Claudio",
"Coluccio",
"Coriolano",
"Corrado",
"Costantino",
"Costanzo",
"Damiano",
"Daniele",
"Danilo",
"Dante",
"Dario",
"Delfino",
"Dino",
"Dionigi",
"Domenico",
"Donatello",
"Donato",
"Durante",
"Edoardo",
"Elladio",
"Elmo",
"Emilio",
"Ennio",
"Enrico",
"Enzio",
"Enzo",
"Eraldo",
"Ermanno",
"Ermenegildo",
"Ermes",
"Ernesto",
"Ettore",
"Ezio",
"Fabio",
"Fabrizio",
"Fausto",
"Fedele",
"Federico",
"Federigo",
"Ferdinando",
"Filippo",
"Fiorenzo",
"Fiorino",
"Flavio",
"Francesco",
"Franco",
"Fredo",
"Fulvio",
"Gabriele",
"Gaetano",
"Galasso",
"Gaspare",
"Gastone",
"Geronimo",
"Giacinto",
"Giacobbe",
"Giacomo",
"Giampaolo",
"Giampiero",
"Gian",
"Giancarlo",
"Gianfrancesco",
"Gianfranco",
"Gianluca",
"Gianluigi",
"Gianmarco",
"Gianni",
"Gianpaolo",
"Gianpietro",
"Gilberto",
"Gino",
"Gioacchino",
"Gioachino",
"Gioele",
"Gioffre",
"Gionata",
"Giorgio",
"Giosuè",
"Giovanni",
"Girolamo",
"Giuliano",
"Giulio",
"Giuseppe",
"Giustino",
"Goffredo",
"Graziano",
"Greco",
"Guarino",
"Guglielmo",
"Guido",
"Gustavo",
"Hugo",
"Ignazio",
"Ippazio",
"Ivan",
"Ivo",
"Jacopo",
"Lamberto",
"Lando",
"Laureano",
"Lazzaro",
"Leonardo",
"Leone",
"Leopoldo",
"Liberto",
"Livio",
"Lodovico",
"Lorenzo",
"Luca",
"Luchino",
"Luciano",
"Lucio",
"Ludovico",
"Luigi",
"Manuel",
"Marcantonio",
"Marcello",
"Marco",
"Mariano",
"Mario",
"Martino",
"Martino",
"Massimiliano",
"Massimo",
"Matteo",
"Mattia",
"Maurilio",
"Maurizio",
"Mauro",
"Michelangelo",
"Michele",
"Micheletto",
"Michelotto",
"Milo",
"Mirco",
"Mirko",
"Nanni",
"Napoleone",
"Niccolò",
"Nico",
"Nicola",
"Nicolò",
"Nino",
"Orazio",
"Oreste",
"Orlando",
"Osvaldo",
"Ottavio",
"Ottone",
"Panfilo",
"Paolo",
"Paride",
"Pasqual",
"Pasquale",
"Patrizio",
"Pellegrino",
"Pier",
"Pierangelo",
"Piergiorgio",
"Piergiuseppe",
"Pierluigi",
"Piermaria",
"Piero",
"Pierpaolo",
"Piersanti",
"Pietro",
"Pompeo",
"Pomponio",
"Puccio",
"Raffaele",
"Raffaellino",
"Raffaello",
"Raimondo",
"Ranieri",
"Rembrandt",
"Renzo",
"Riccardo",
"Ricciotti",
"Roberto",
"Rocco",
"Rodolfo",
"Rolando",
"Roman",
"Romeo",
"Romolo",
"Ronaldo",
"Rosario",
"Ruggero",
"Ruggiero",
"Sabatino",
"Salvatore",
"Salvi",
"Sandro",
"Sante",
"Santino",
"Saverio",
"Sebastiano",
"Sergius",
"Severino",
"Silvestro",
"Silvio",
"Simone",
"Stefano",
"Telemaco",
"Temistocle",
"Tiziano",
"Toni",
"Tonino",
"Torquato",
"Tullio",
"Ubaldo",
"Uberto",
"Ugo",
"Ugolino",
"Umberto",
"Valerio",
"Venancio",
"Vincentio",
"Vincenzo",
"Virgilio",
"Vito",
"Vittorio",
)
# source: https://en.wikipedia.org/wiki/Category:Italian_feminine_given_names
first_names_female = (
"Adelasia",
"Adele",
"Adriana",
"Alessandra",
"Alessia",
"Alina",
"Allegra",
"Amalia",
"Amanda",
"Angelica",
"Angelina",
"Anita",
"Annalisa",
"Annamaria",
"Annetta",
"Annunziata",
"Antonella",
"Antonia",
"Antonietta",
"Antonina",
"Aria",
"Aurora",
"Barbara",
"Beatrice",
"Berenice",
"Bettina",
"Bianca",
"Bianca",
"Camilla",
"Carla",
"Carolina",
"Cassandra",
"Caterina",
"Cecilia",
"Chiara",
"Claudia",
"Clelia",
"Concetta",
"Cristina",
"Daria",
"Diana",
"Dina",
"Dolores",
"Donatella",
"Donna",
"Eleanora",
"Elena",
"Eliana",
"Elisa",
"Elvira",
"Emma",
"Erika",
"Etta",
"Eugenia",
"Eva",
"Evangelista",
"Fabia",
"Fabrizia",
"Federica",
"Fernanda",
"Fiamma",
"Filippa",
"Flavia",
"Flora",
"Fortunata",
"Francesca",
"Gabriella",
"Gelsomina",
"Gemma",
"Germana",
"Giada",
"Gianna",
"Giorgia",
"Giovanna",
"Giulia",
"Giuliana",
"Giulietta",
"Giuseppina",
"Gloria",
"Graziella",
"Greca",
"Griselda",
"Ida",
"Ilaria",
"Imelda",
"Iolanda",
"Irma",
"Isa",
"Isabella",
"Jolanda",
"Lara",
"Laura",
"Lauretta",
"Letizia",
"Liana",
"Licia",
"Lidia",
"Liliana",
"Lilla",
"Lina",
"Lisa",
"Livia",
"Lolita",
"Loredana",
"Loretta",
"Lucia",
"Luciana",
"Lucrezia",
"Ludovica",
"Luigina",
"Luisa",
"Marcella",
"Margherita",
"Maria",
"Maria",
"Maria",
"Mariana",
"Marina",
"Marisa",
"Marissa",
"Marta",
"Martina",
"Matilda",
"Maura",
"Melania",
"Melina",
"Melissa",
"Mercedes",
"Michela",
"Milena",
"Monica",
"Morena",
"Nadia",
"Natalia",
"Nedda",
"Nicoletta",
"Nina",
"Ninetta",
"Olga",
"Ornella",
"Paloma",
"Paola",
"Paoletta",
"Patrizia",
"Paulina",
"Pierina",
"Pina",
"Priscilla",
"Raffaella",
"Ramona",
"Renata",
"Rita",
"Roberta",
"Romana",
"Romina",
"Rosa",
"Rosalia",
"Rosaria",
"Rosina",
"Rossana",
"Sandra",
"Serafina",
"Serena",
"Silvia",
"Simonetta",
"Sole",
"Sonia",
"Sophia",
"Stefani",
"Stefania",
"Stella",
"Susanna",
"Sylvia",
"Tatiana",
"Teresa",
"Tina",
"Tiziana",
"Tonia",
"Valentina",
"Valeria",
"Vanessa",
"Veronica",
"Victoria",
"Vincenza",
"Virginia",
"Viridiana",
"Vittoria",
"Zaira",
)
first_names = first_names_male + first_names_female
# source: https://en.wiktionary.org/w/index.php?title=Category:Italian_surnames
last_names = (
"Abatantuono",
"Abate",
"Abba",
"Abbagnale",
"Accardo",
"Acerbi",
"Adinolfi",
"Agazzi",
"Agnesi",
"Agostinelli",
"Agostini",
"Ajello",
"Albertini",
"Alboni",
"Aldobrandi",
"Alfieri",
"Alfonsi",
"Alighieri",
"Almagià",
"Aloisio",
"Alonzi",
"Altera",
"Amaldi",
"Amato",
"Ammaniti",
"Anastasi",
"Andreotti",
"Andreozzi",
"Angeli",
"Angiolello",
"Anguillara",
"Anguissola",
"Anichini",
"Antelami",
"Antonacci",
"Antonelli",
"Antonello",
"Antonetti",
"Antonini",
"Antonioni",
"Antonucci",
"Aporti",
"Argan",
"Argentero",
"Argenti",
"Argento",
"Argurio",
"Ariasso",
"Ariosto",
"Armani",
"Armellini",
"Asmundo",
"Asprucci",
"Aulenti",
"Avogadro",
"Babati",
"Babato",
"Babbo",
"Bacosi",
"Badoer",
"Badoglio",
"Baggio",
"Baglioni",
"Bajamonti",
"Bajardi",
"Balbi",
"Balbo",
"Balla",
"Balotelli",
"Bandello",
"Baracca",
"Barbarigo",
"Barberini",
"Barcaccia",
"Barcella",
"Barese",
"Baresi",
"Barillaro",
"Baroffio",
"Barozzi",
"Barracco",
"Barsanti",
"Bartoli",
"Barzini",
"Basadonna",
"Bassi",
"Basso",
"Bataglia",
"Battaglia",
"Battelli",
"Battisti",
"Bazzi",
"Beccaria",
"Beccheria",
"Beffa",
"Belletini",
"Bellini",
"Bellocchio",
"Bellucci",
"Bellò",
"Bembo",
"Benedetti",
"Benigni",
"Benussi",
"Berengario",
"Bergoglio",
"Berlusconi",
"Bernardi",
"Bernardini",
"Bernetti",
"Bernini",
"Berrè",
"Bersani",
"Bertoli",
"Bertolucci",
"Bertoni",
"Bettin",
"Bettoni",
"Bevilacqua",
"Biagi",
"Biagiotti",
"Bianchi",
"Bianchini",
"Bignami",
"Bignardi",
"Binaghi",
"Bixio",
"Blasi",
"Boaga",
"Bocca",
"Boccaccio",
"Boccherini",
"Boccioni",
"Bocelli",
"Bodoni",
"Boezio",
"Boiardo",
"Boitani",
"Boito",
"Boldù",
"Bombieri",
"Bompiani",
"Bonanno",
"Bonatti",
"Bonaventura",
"Bondumier",
"Bongiorno",
"Bonino",
"Bonolis",
"Bonomo",
"Borghese",
"Borgia",
"Borrani",
"Borromeo",
"Borromini",
"Borroni",
"Borsellino",
"Borsiere",
"Borzomì",
"Bosio",
"Bossi",
"Bosurgi",
"Botta",
"Bottaro",
"Botticelli",
"Bottigliero",
"Bova",
"Bragadin",
"Bragaglia",
"Bramante",
"Brambilla",
"Brancaccio",
"Branciforte",
"Brenna",
"Bresciani",
"Briccialdi",
"Brichese",
"Broggini",
"Broschi",
"Brugnaro",
"Brunelleschi",
"Brunello",
"Bruno",
"Bruscantini",
"Bulzoni",
"Buonauro",
"Burcardo",
"Buscetta",
"Busoni",
"Cabibbo",
"Caboto",
"Cabrini",
"Caccianemico",
"Caccioppoli",
"Cadorna",
"Caetani",
"Cafarchia",
"Caffarelli",
"Cagnin",
"Cagnotto",
"Cainero",
"Caironi",
"Calarco",
"Calbo",
"Calgari",
"Callegari",
"Callegaro",
"Calvo",
"Camanni",
"Camicione",
"Camilleri",
"Camiscione",
"Cammarata",
"Campanella",
"Campano",
"Campise",
"Camuccini",
"Canali",
"Canetta",
"Canevascini",
"Canil",
"Cannizzaro",
"Canova",
"Cantimori",
"Capecchi",
"Capone",
"Cappelli",
"Capuana",
"Caracciolo",
"Cardano",
"Carducci",
"Carfagna",
"Carli",
"Carnera",
"Carocci",
"Carosone",
"Carpaccio",
"Carriera",
"Carullo",
"Caruso",
"Casadei",
"Casagrande",
"Casale",
"Casaleggio",
"Casalodi",
"Casarin",
"Casellati",
"Casini",
"Cassarà",
"Castelli",
"Castellitto",
"Castiglione",
"Castioni",
"Catalano",
"Catenazzi",
"Cattaneo",
"Cavalcanti",
"Cavanna",
"Ceci",
"Celentano",
"Cendron",
"Ceravolo",
"Ceri",
"Cerquiglini",
"Cerutti",
"Cesaroni",
"Cesarotti",
"Ceschi",
"Chechi",
"Cheda",
"Cherubini",
"Chiappetta",
"Chiaramonte",
"Chiesa",
"Chigi",
"Chindamo",
"Chinnici",
"Chittolini",
"Ciampi",
"Cianciolo",
"Ciani",
"Cibin",
"Cicala",
"Cicilia",
"Cignaroli",
"Cilea",
"Cilibrasi",
"Cimarosa",
"Cimini",
"Cipolla",
"Civaschi",
"Coardi",
"Cocci",
"Cociarelli",
"Colletti",
"Collina",
"Collodi",
"Columbo",
"Combi",
"Comboni",
"Comencini",
"Comeriato",
"Comisso",
"Comolli",
"Condoleo",
"Contarini",
"Conte",
"Conti",
"Contrafatto",
"Coppola",
"Corbo",
"Corcos",
"Corradi",
"Correr",
"Cortese",
"Cossiga",
"Costalonga",
"Costanzi",
"Cremonesi",
"Crespi",
"Crisafulli",
"Crispi",
"Cristoforetti",
"Cuda",
"Cugia",
"Cundari",
"Cuomo",
"Curatoli",
"Curci",
"Curiel",
"Cusano",
"Cutrufo",
"Cutuli",
"Cuzzocrea",
"Dalla",
"Dallapé",
"Dallara",
"Dandolo",
"Deledda",
"Delle",
"Dellucci",
"Depero",
"Desio",
"Detti",
"Dibiasi",
"Disdero",
"Doglioni",
"Donarelli",
"Donati",
"Donatoni",
"Donini",
"Donà",
"Doria",
"Dossetti",
"Dossi",
"Dovara",
"Draghi",
"Druso",
"Dulbecco",
"Duodo",
"Durante",
"Duse",
"Eco",
"Einaudi",
"Emanuelli",
"Emo",
"Endrizzi",
"Errani",
"Errigo",
"Esposito",
"Fabbri",
"Fabrizi",
"Faggiani",
"Fagiani",
"Fagotto",
"Falcone",
"Falier",
"Fallaci",
"Falloppio",
"Fantini",
"Fantoni",
"Fantozzi",
"Fanucci",
"Faranda",
"Farina",
"Farinelli",
"Farnese",
"Fattori",
"Faugno",
"Favata",
"Federici",
"Federico",
"Fermi",
"Ferrabosco",
"Ferragamo",
"Ferragni",
"Ferrante",
"Ferrara",
"Ferrari",
"Ferraris",
"Ferrata",
"Ferrazzi",
"Ferretti",
"Ferrucci",
"Fibonacci",
"Ficino",
"Fieramosca",
"Filangieri",
"Filippelli",
"Filippini",
"Filogamo",
"Filzi",
"Finetti",
"Finotto",
"Finzi",
"Fioravanti",
"Fiorucci",
"Fischetti",
"Fittipaldi",
"Flaiano",
"Florio",
"Fo",
"Foa",
"Foconi",
"Fogazzaro",
"Foletti",
"Folliero",
"Fornaciari",
"Forza",
"Foscari",
"Foà",
"Fracci",
"Franceschi",
"Franscini",
"Franzese",
"Frescobaldi",
"Fusani",
"Fuseli",
"Gabba",
"Gabbana",
"Gabrieli",
"Gadda",
"Gaggini",
"Gagliano",
"Gagliardi",
"Gaiatto",
"Gaito",
"Galeati",
"Galiazzo",
"Galilei",
"Galtarossa",
"Galuppi",
"Galvani",
"Gangemi",
"Gargallo",
"Garibaldi",
"Garobbio",
"Garozzo",
"Garrone",
"Garzoni",
"Gasperi",
"Gatto",
"Gelli",
"Gemito",
"Gentileschi",
"Gentili",
"Gentilini",
"Geraci",
"Germano",
"Giacconi",
"Giacometti",
"Giammusso",
"Gianetti",
"Gianinazzi",
"Giannelli",
"Giannetti",
"Giannini",
"Giannone",
"Giannotti",
"Giannuzzi",
"Gianvecchio",
"Gibilisco",
"Gigli",
"Gilardoni",
"Ginese",
"Ginesio",
"Gioberti",
"Giolitti",
"Giorgetti",
"Giovine",
"Giradello",
"Giulietti",
"Giunti",
"Giusti",
"Goldoni",
"Goldstein",
"Golgi",
"Golino",
"Gonzaga",
"Gori",
"Gottardi",
"Gotti",
"Govoni",
"Gozzano",
"Gozzi",
"Gradenigo",
"Gramsci",
"Granatelli",
"Grassi",
"Grasso",
"Gravina",
"Greco",
"Greggio",
"Gregori",
"Gregorio",
"Gremese",
"Grifeo",
"Grimani",
"Grisoni",
"Gritti",
"Grossi",
"Gualandi",
"Gualtieri",
"Guarana",
"Guarato",
"Guariento",
"Guarneri",
"Gucci",
"Guglielmi",
"Guicciardini",
"Guidone",
"Guidotti",
"Guinizzelli",
"Gullotta",
"Gulotta",
"Gussoni",
"Iacobucci",
"Iacovelli",
"Iadanza",
"Iannelli",
"Iannotti",
"Iannucci",
"Iannuzzi",
"Impastato",
"Infantino",
"Innocenti",
"Interiano",
"Interminei",
"Interminelli",
"Inzaghi",
"Ioppi",
"Jacuzzi",
"Jilani",
"Jovinelli",
"Juvara",
"Lamborghini",
"Lancisi",
"Lanfranchi",
"Lattuada",
"Leblanc",
"Legnante",
"Leonardi",
"Leoncavallo",
"Leone",
"Leonetti",
"Leopardi",
"Lercari",
"Lerner",
"Letta",
"Lettiere",
"Ligorio",
"Liguori",
"Lippomano",
"Littizzetto",
"Liverotti",
"Lollobrigida",
"Lombardi",
"Lombardo",
"Lombroso",
"Longhena",
"Lopresti",
"Loredan",
"Lovato",
"Lucarelli",
"Lucchesi",
"Lucciano",
"Luciani",
"Ludovisi",
"Luna",
"Lupo",
"Luria",
"Lussu",
"Luxardo",
"Luzi",
"Maccanelli",
"Maderna",
"Maderno",
"Maffei",
"Maggioli",
"Maglio",
"Magnani",
"Magrassi",
"Majewski",
"Majorana",
"Malacarne",
"Malaparte",
"Malatesta",
"Malenchini",
"Malipiero",
"Malpighi",
"Manacorda",
"Mancini",
"Mannoia",
"Manolesso",
"Mantegazza",
"Mantegna",
"Manunta",
"Manzoni",
"Marangoni",
"Marazzi",
"Marcacci",
"Marconi",
"Marenzio",
"Marinetti",
"Marini",
"Marino",
"Marrone",
"Marsili",
"Martinelli",
"Martucci",
"Marzorati",
"Mascagni",
"Mascheroni",
"Maspero",
"Mastandrea",
"Mastroianni",
"Mattarella",
"Matteotti",
"Mazzacurati",
"Mazzanti",
"Mazzeo",
"Mazzi",
"Mazzini",
"Mazzocchi",
"Medici",
"Mengolo",
"Mennea",
"Mercadante",
"Mercalli",
"Mercantini",
"Mercati",
"Merisi",
"Metella",
"Meucci",
"Mezzetta",
"Micca",
"Michelangeli",
"Micheletti",
"Migliaccio",
"Milanesi",
"Mimun",
"Miniati",
"Missoni",
"Moccia",
"Mocenigo",
"Modiano",
"Modigliani",
"Modugno",
"Mogherini",
"Molesini",
"Monaco",
"Mondadori",
"Mondaini",
"Monduzzi",
"Moneta",
"Monicelli",
"Montalcini",
"Montalti",
"Montanari",
"Montanariello",
"Montanelli",
"Monte",
"Montecchi",
"Montesano",
"Montessori",
"Monteverdi",
"Monti",
"Morabito",
"Morandi",
"Morandini",
"Morellato",
"Moresi",
"Moretti",
"Morgagni",
"Morlacchi",
"Morosini",
"Morpurgo",
"Morricone",
"Morrocco",
"Mortati",
"Morucci",
"Moschino",
"Mozart",
"Munari",
"Muratori",
"Murialdo",
"Murri",
"Musatti",
"Muti",
"Naccari",
"Nadi",
"Napolitano",
"Natta",
"Navarria",
"Navone",
"Necci",
"Nibali",
"Nicoletti",
"Nicolini",
"Nicolucci",
"Nievo",
"Niggli",
"Niscoromni",
"Nitti",
"Nitto",
"Nolcini",
"Nonis",
"Norbiato",
"Nordio",
"Nosiglia",
"Notarbartolo",
"Novaro",
"Nugnes",
"Odescalchi",
"Offredi",
"Oliboni",
"Olivetti",
"Omma",
"Onio",
"Onisto",
"Opizzi",
"Orengo",
"Orlando",
"Orsini",
"Ortese",
"Ortolani",
"Oscuro",
"Ossani",
"Ossola",
"Ostinelli",
"Ottino",
"Ovadia",
"Pace",
"Pacelli",
"Pacetti",
"Pacillo",
"Pacomio",
"Padovano",
"Paganini",
"Pagliaro",
"Pagnotto",
"Palazzo",
"Palladio",
"Palmisano",
"Palombi",
"Paltrinieri",
"Palumbo",
"Panatta",
"Panicucci",
"Panzera",
"Paoletti",
"Paolini",
"Paolucci",
"Papafava",
"Papetti",
"Pareto",
"Parini",
"Parisi",
"Parmitano",
"Parpinel",
"Parri",
"Paruta",
"Pascarella",
"Pasolini",
"Pasqua",
"Passalacqua",
"Pastine",
"Pausini",
"Pavanello",
"Pavarotti",
"Pavone",
"Peano",
"Pederiva",
"Pedersoli",
"Pedrazzini",
"Pedroni",
"Pellegrini",
"Pelli",
"Pellico",
"Pennetta",
"Pepe",
"Peranda",
"Pergolesi",
"Perini",
"Perozzo",
"Persico",
"Pertile",
"Pertini",
"Peruzzi",
"Petralli",
"Petrassi",
"Petrocelli",
"Petrucci",
"Petrucelli",
"Petruzzi",
"Pezzali",
"Piacentini",
"Piane",
"Piazzi",
"Piccinni",
"Piccio",
"Pietrangeli",
"Pigafetta",
"Pignatti",
"Pinamonte",
"Pincherle",
"Pininfarina",
"Piovani",
"Pirandello",
"Pirelli",
"Pisacane",
"Pisani",
"Pisano",
"Pisaroni",
"Pistoletto",
"Pizzamano",
"Pizzetti",
"Pizziol",
"Pizzo",
"Platini",
"Poerio",
"Polani",
"Polesel",
"Polizzi",
"Pometta",
"Pontecorvo",
"Ponti",
"Porcellato",
"Porzio",
"Pozzecco",
"Prada",
"Praga",
"Pratesi",
"Prati",
"Priuli",
"Procacci",
"Prodi",
"Proietti",
"Pucci",
"Puccini",
"Pugliese",
"Puglisi",
"Pulci",
"Quasimodo",
"Querini",
"Raimondi",
"Ramazzotti",
"Randazzo",
"Rapisardi",
"Rastelli",
"Raurica",
"Ravaglioli",
"Redi",
"Regge",
"Renault",
"Renier",
"Rensi",
"Renzi",
"Respighi",
"Riccardi",
"Riccati",
"Ricci",
"Ricciardi",
"Ricolfi",
"Rienzo",
"Righi",
"Rinaldi",
"Rismondo",
"Ritacca",
"Rizzo",
"Rizzoli",
"Rocca",
"Roccabonella",
"Roero",
"Romagnoli",
"Romano",
"Romiti",
"Roncalli",
"Rosiello",
"Rosmini",
"Rosselli",
"Rossellini",
"Rossetti",
"Rossi",
"Rossini",
"Roth",
"Rubbia",
"Ruberto",
"Ruffini",
"Ruggeri",
"Ruggieri",
"Russo",
"Rusticucci",
"Sabatini",
"Sabbatini",
"Saffi",
"Sagese",
"Sagnelli",
"Sagredo",
"Salandra",
"Salata",
"Salgari",
"Salieri",
"Salvemini",
"Salvini",
"Salvo",
"Samele",
"Sandi",
"Sanguineti",
"Sansoni",
"Santi",
"Santorio",
"Santoro",
"Sanudo",
"Saraceno",
"Saracino",
"Saragat",
"Satriani",
"Satta",
"Sauli",
"Sauro",
"Savorgnan",
"Sbarbaro",
"Scaduto",
"Scalera",
"Scalfaro",
"Scamarcio",
"Scandone",
"Scaramucci",
"Scarfoglio",
"Scarlatti",
"Scarpa",
"Scarpetta",
"Scarponi",
"Schiaparelli",
"Schiavo",
"Schiavone",
"Schicchi",
"Scialpi",
"Scotti",
"Scotto",
"Seddio",
"Segni",
"Segrè",
"Semitecolo",
"Serao",
"Serlupi",
"Sermonti",
"Serraglio",
"Sforza",
"Sgalambro",
"Sgarbi",
"Sibilia",
"Siffredi",
"Silvestri",
"Simeoni",
"Sinisi",
"Sismondi",
"Smirnoff",
"Sobrero",
"Soderini",
"Soffici",
"Sokolov",
"Solari",
"Solimena",
"Sollima",
"Sommaruga",
"Sonnino",
"Soprano",
"Soranzo",
"Sordi",
"Sorrentino",
"Spadafora",
"Spallanzani",
"Spanevello",
"Speri",
"Spinelli",
"Spinola",
"Squarcione",
"Sraffa",
"Staglieno",
"Stefanelli",
"Stein",
"Stoppani",
"Storladi",
"Stradivari",
"Strangio",
"Stucchi",
"Surian",
"Tacchini",
"Taccola",
"Tafuri",
"Tagliafierro",
"Taliani",
"Taliercio",
"Tamborini",
"Tamburello",
"Tamburi",
"Tamburini",
"Tanzini",
"Tarantini",
"Tarantino",
"Tarchetti",
"Tartaglia",
"Tartini",
"Tasca",
"Tasso",
"Tassoni",
"Tebaldi",
"Tedesco",
"Telesio",
"Tencalla",
"Terragni",
"Tiepolo",
"Tirabassi",
"Togliatti",
"Tognazzi",
"Toldo",
"Tolentino",
"Tomaselli",
"Tomasetti",
"Tomasini",
"Tomei",
"Tommaseo",
"Toninelli",
"Tonisto",
"Torlonia",
"Tornatore",
"Torricelli",
"Toscani",
"Toscanini",
"Toselli",
"Tosi",
"Toso",
"Tosto",
"Totino",
"Tozzi",
"Tozzo",
"Traetta",
"Trapanese",
"Trapani",
"Travaglia",
"Travaglio",
"Traversa",
"Travia",
"Trebbi",
"Treccani",
"Tremonti",
"Trentin",
"Trentini",
"Tresoldi",
"Treves",
"Trevisan",
"Trevisani",
"Trezzini",
"Trillini",
"Trincavelli",
"Trobbiani",
"Troisi",
"Trombetta",
"Tron",
"Tropea",
"Trotta",
"Trupiano",
"Trussardi",
"Turati",
"Turchetta",
"Turchi",
"Turci",
"Turrini",
"Tutino",
"Tuzzolino",
"Ubaldi",
"Udinese",
"Udinesi",
"Ughi",
"Ungaretti",
"Valentino",
"Valguarnera",
"Valier",
"Valmarana",
"Vanvitelli",
"Varano",
"Vasari",
"Vattimo",
"Vecellio",
"Vecoli",
"Veltroni",
"Vendetti",
"Venditti",
"Veneziano",
"Venier",
"Vento",
"Venturi",
"Vercelloni",
"Verdi",
"Verdone",
"Verga",
"Vergassola",
"Vergerio",
"Verri",
"Versace",
"Vespa",
"Vespucci",
"Vezzali",
"Vianello",
"Vidoni",
"Vigliotti",
"Vigorelli",
"Villadicani",
"Villarosa",
"Viola",
"Virgilio",
"Visconti",
"Visintini",
"Vismara",
"Vittadello",
"Vitturi",
"Vivaldi",
"Viviani",
"Volta",
"Volterra",
"Zabarella",
"Zaccagnini",
"Zaccardo",
"Zacchia",
"Zacco",
"Zaguri",
"Zamengo",
"Zamorani",
"Zampa",
"Zanazzo",
"Zanichelli",
"Zanzi",
"Zarlino",
"Zecchini",
"Zeffirelli",
"Zetticci",
"Ziani",
"Zichichi",
"Zito",
"Zola",
"Zoppetti",
"Zoppetto",
)
prefixes_female = ("Dott.", "Sig.ra")
prefixes_male = ("Dott.", "Sig.")
prefixes = ("Dott.", "Sig.", "Sig.ra")
| mit | 817a1d7eafb13b779eefd50d62554e00 | 18.593525 | 95 | 0.385503 | 3.032288 | false | false | false | false |
joke2k/faker | faker/providers/address/sk_SK/__init__.py | 1 | 122027 | from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ("{{city_name}}",)
street_name_formats = ("{{street_name}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("%", "%#", "%##")
street_suffixes_long = ("ulica", "trieda", "nábrežie", "námestie")
street_suffixes_short = ("ul.", "tr.", "nábr.", "nám.")
postcode_formats = (
"8## ##",
"9## ##",
"0## ##",
)
cities = (
"Ábelová",
"Abovce",
"Abrahám",
"Abrahámovce",
"Abrahámovce",
"Abramová",
"Abranovce",
"Adidovce",
"Alekšince",
"Andovce",
"Andrejová",
"Ardanovce",
"Ardovo",
"Arnutovce",
"Báb",
"Babie",
"Babín",
"Babiná",
"Babindol",
"Babinec",
"Bacúch",
"Bacúrov",
"Báč",
"Bačka",
"Bačkov",
"Bačkovík",
"Badín",
"Baďan",
"Báhoň",
"Bajany",
"Bajč",
"Bajerov",
"Bajerovce",
"Bajka",
"Bajtava",
"Baka",
"Baláže",
"Baldovce",
"Balog nad Ipľom",
"Baloň",
"Banka",
"Bánov",
"Bánovce nad Bebravou",
"Bánovce nad Ondavou",
"Banská Belá",
"Banská Štiavnica",
"Banská Bystrica",
"Banské",
"Banský Studenec",
"Baňa",
"Bara",
"Barca",
"Bartošovce",
"Bardoňovo",
"Bartošova Lehôtka",
"Bardejov",
"Baška",
"Baškovce",
"Baškovce",
"Bašovce",
"Batizovce",
"Bátorová",
"Bátka",
"Bátorove Kosihy",
"Bátovce",
"Beharovce",
"Beckov",
"Becherov",
"Belá",
"Belá",
"Belá - Dulice",
"Belá nad Cirochou",
"Beladice",
"Belejovce",
"Belín",
"Belina",
"Belince",
"Bellova Ves",
"Beloveža",
"Beluj",
"Beluša",
"Belža",
"Beniakovce",
"Benice",
"Benkovce",
"Beňadiková",
"Beňadikovce",
"Beňadovo",
"Beňatina",
"Beňuš",
"Bernolákovo",
"Bertotovce",
"Beša",
"Beša",
"Bešeňov",
"Bešeňová",
"Betlanovce",
"Betliar",
"Bežovce",
"Bidovce",
"Biel",
"Bielovce",
"Biely Kostol",
"Bijacovce",
"Bílkove Humence",
"Bíňa",
"Bíňovce",
"Biskupice",
"Biskupová",
"Bitarová",
"Blahová",
"Blatná na Ostrove",
"Blatná Polianka",
"Blatné",
"Blatné Remety",
"Blatné Revištia",
"Blatnica",
"Blažice",
"Blažovce",
"Blesovce",
"Blhovce",
"Bobot",
"Bobrov",
"Bobrovček",
"Bobrovec",
"Bobrovník",
"Bočiar",
"Bodíky",
"Bodiná",
"Bodorová",
"Bodovce",
"Bodružal",
"Bodza",
"Bodzianske Lúky",
"Bogliarka",
"Bohdanovce",
"Bohdanovce nad Trnavou",
"Boheľov",
"Bohunice",
"Bohunice",
"Bohúňovo",
"Bojná",
"Bojnice",
"Bojničky",
"Boldog",
"Boleráz",
"Bolešov",
"Boliarov",
"Boľ",
"Boľkovce",
"Borcová",
"Borčany",
"Borčice",
"Borinka",
"Borová",
"Borovce",
"Borský Mikuláš",
"Borský Svätý Jur",
"Borša",
"Bory",
"Bošáca",
"Bošany",
"Bottovo",
"Boťany",
"Bôrka",
"Bracovce",
"Branč",
"Branovo",
"Bratislava",
"Okres Bratislava II",
"Okres Bratislava III",
"Okres Bratislava IV",
"Okres Bratislava V",
"Braväcovo",
"Brdárka",
"Brehov",
"Brehy",
"Brekov",
"Brestov",
"Brestov",
"Brestov nad Laborcom",
"Brestovany",
"Brestovec",
"Brestovec",
"Bretejovce",
"Bretka",
"Breza",
"Brezany",
"Brezina",
"Breziny",
"Breznica",
"Breznička",
"Breznička",
"Brezno",
"Brezolupy",
"Brezov",
"Brezová pod Bradlom",
"Brezovec",
"Brezovica",
"Brezovica",
"Brezovička",
"Brezovka",
"Brežany",
"Brhlovce",
"Brieštie",
"Brodské",
"Brodzany",
"Brunovce",
"Brusnica",
"Brusník",
"Brusno",
"Brutovce",
"Bruty",
"Brvnište",
"Brzotín",
"Buclovany",
"Búč",
"Bučany",
"Budča",
"Budikovany",
"Budimír",
"Budiná",
"Budince",
"Budiš",
"Budkovce",
"Budmerice",
"Buglovce",
"Buková",
"Bukovce",
"Bukovec",
"Bukovec",
"Bukovina",
"Bulhary",
"Bunetice",
"Bunkovce",
"Bušince",
"Bušovce",
"Buzica",
"Buzitka",
"Bystrá",
"Bystrá",
"Bystrany",
"Bystré",
"Bystričany",
"Bystrička",
"Byšta",
"Bytča",
"Bzenica",
"Bzenov",
"Bzince pod Javorinou",
"Bziny",
"Bzovík",
"Bzovská Lehôtka",
"Bžany",
"Cabaj - Čápor",
"Cabov",
"Cakov",
"Cejkov",
"Cernina",
"Cerová",
"Cerovo",
"Cestice",
"Cífer",
"Cigeľ",
"Cigeľka",
"Cigla",
"Cimenná",
"Cinobaňa",
"Čabalovce",
"Čabiny",
"Čabradský Vrbovok",
"Čadca",
"Čachtice",
"Čajkov",
"Čaka",
"Čakajovce",
"Čakanovce",
"Čakanovce",
"Čakany",
"Čaklov",
"Čalovec",
"Čamovce",
"Čaňa",
"Čaradice",
"Čáry",
"Častá",
"Častkov",
"Častkovce",
"Čata",
"Čataj",
"Čavoj",
"Čebovce",
"Čečehov",
"Čečejovce",
"Čechy",
"Čechynce",
"Čekovce",
"Čeláre",
"Čelkova Lehota",
"Čelovce",
"Čelovce",
"Čeľadice",
"Čeľadince",
"Čeľovce",
"Čenkovce",
"Čerenčany",
"Čereňany",
"Čerhov",
"Čerín",
"Čermany",
"Černík",
"Černina",
"Černochov",
"Čertižné",
"Červená Voda",
"Červenica",
"Červenica pri Sabinove",
"Červeník",
"Červený Hrádok",
"Červený Kameň",
"Červený Kláštor",
"Červeňany",
"České Brezovo",
"Čičarovce",
"Čičava",
"Čičmany",
"Číčov",
"Čierna",
"Čierna Lehota",
"Čierna Lehota",
"Čierna nad Tisou",
"Čierna Voda",
"Čierne",
"Čierne Kľačany",
"Čierne nad Topľou",
"Čierne Pole",
"Čierny Balog",
"Čierny Brod",
"Čierny Potok",
"Čifáre",
"Čiližská Radvaň",
"Čimhová",
"Čirč",
"Číž",
"Čižatice",
"Čoltovo",
"Čremošné",
"Čučma",
"Čukalovce",
"Dačov Lom",
"Daletice",
"Danišovce",
"Dargov",
"Davidov",
"Debraď",
"Dedačov",
"Dedina Mládeže",
"Dedinka",
"Dedinky",
"Dechtice",
"Dekýš",
"Demandice",
"Demänovská Dolina",
"Demjata",
"Detrík",
"Detva",
"Detvianska Huta",
"Devičany",
"Devičie",
"Dežerice",
"Diaková",
"Diakovce",
"Diviacka Nová Ves",
"Diviaky nad Nitricou",
"Divín",
"Divina",
"Divinka",
"Dlhá",
"Dlhá nad Kysucou",
"Dlhá nad Oravou",
"Dlhá nad Váhom",
"Dlhá Ves",
"Dlhé Klčovo",
"Dlhé nad Cirochou",
"Dlhé Pole",
"Dlhé Stráže",
"Dlhoňa",
"Dlžín",
"Dobrá",
"Dobrá Niva",
"Dobrá Voda",
"Dobroč",
"Dobrohošť",
"Dobroslava",
"Dobšiná",
"Dohňany",
"Dojč",
"Dolinka",
"Dolná Breznica",
"Dolná Krupá",
"Dolná Lehota",
"Dolná Mariková",
"Dolná Mičiná",
"Dolná Poruba",
"Dolná Seč",
"Dolná Streda",
"Dolná Strehová",
"Dolná Súča",
"Dolná Tižina",
"Dolná Trnávka",
"Dolná Ves",
"Dolná Ždaňa",
"Dolné Dubové",
"Dolné Kočkovce",
"Dolné Lefantovce",
"Dolné Lovčice",
"Dolné Mladonice",
"Dolné Naštice",
"Dolné Obdokovce",
"Dolné Orešany",
"Dolné Otrokovce",
"Dolné Plachtince",
"Dolné Saliby",
"Dolné Semerovce",
"Dolné Srnie",
"Dolné Strháre",
"Dolné Trhovište",
"Dolné Vestenice",
"Dolné Zahorany",
"Dolné Zelenice",
"Dolný Badín",
"Dolný Bar",
"Dolný Harmanec",
"Dolný Hričov",
"Dolný Chotár",
"Dolný Kalník",
"Dolný Kubín",
"Dolný Lieskov",
"Dolný Lopašov",
"Dolný Ohaj",
"Dolný Pial",
"Dolný Štál",
"Dolný Vadičov",
"Doľany",
"Doľany",
"Domadice",
"Domaníky",
"Domaniža",
"Domaňovce",
"Donovaly",
"Drábsko",
"Drahňov",
"Drahovce",
"Dravce",
"Dražice",
"Dražkovce",
"Drážovce",
"Drienčany",
"Drienica",
"Drienov",
"Drienovec",
"Drienovo",
"Drienovská Nová Ves",
"Drietoma",
"Drnava",
"Drňa",
"Družstevná pri Hornáde",
"Drženice",
"Držkovce",
"Dubinné",
"Dubnica nad Váhom",
"Dubnička",
"Dubník",
"Dubno",
"Dubodiel",
"Dubová",
"Dubová",
"Dubovany",
"Dubovce",
"Dubové",
"Dubové",
"Dubovec",
"Dubovica",
"Dúbrava",
"Dúbrava",
"Dúbrava",
"Dúbravica",
"Dúbravka",
"Dúbravy",
"Ducové",
"Dudince",
"Dukovce",
"Dulov",
"Dulova Ves",
"Dulovce",
"Dulovo",
"Dunajská Lužná",
"Dunajov",
"Dunajská Streda",
"Dunajský Klátov",
"Duplín",
"Dvorany nad Nitrou",
"Dvorec",
"Dvorianky",
"Dvorníky",
"Dvorníky - Včeláre",
"Dvory nad Žitavou",
"Ďačov",
"Ďanová",
"Ďapalovce",
"Ďubákovo",
"Ďurčiná",
"Ďurďoš",
"Ďurďošík",
"Ďurďové",
"Ďurkov",
"Ďurková",
"Ďurkovce",
"Egreš",
"Fačkov",
"Falkušovce",
"Farná",
"Fekišovce",
"Figa",
"Fijaš",
"Fiľakovo",
"Fiľakovské Kováče",
"Fintice",
"Folkušová",
"Forbasy",
"Frička",
"Fričkovce",
"Fričovce",
"Fulianka",
"Gabčíkovo",
"Gaboltov",
"Gajary",
"Galanta",
"Galovany",
"Gánovce",
"Gáň",
"Gbelce",
"Gbely",
"Gbeľany",
"Geča",
"Gelnica",
"Gemer",
"Gemerček",
"Gemerská Hôrka",
"Gemerská Panica",
"Gemerská Poloma",
"Gemerská Ves",
"Gemerské Dechtáre",
"Gemerské Michalovce",
"Gemerské Teplice",
"Gemerský Jablonec",
"Gemerský Sad",
"Geraltov",
"Gerlachov",
"Gerlachov",
"Giglovce",
"Giraltovce",
"Girovce",
"Glabušovce",
"Gočaltovo",
"Gočovo",
"Golianovo",
"Gortva",
"Gôtovany",
"Granč - Petrovce",
"Gregorova Vieska",
"Gregorovce",
"Gribov",
"Gruzovce",
"Gyňov",
"Habovka",
"Habura",
"Hačava",
"Háj",
"Háj",
"Hajná Nová Ves",
"Hajnáčka",
"Hájske",
"Hajtovka",
"Haláčovce",
"Halič",
"Haligovce",
"Haluzice",
"Hamuliakovo",
"Handlová",
"Hanigovce",
"Haniska",
"Haniska",
"Hanková",
"Hankovce",
"Hankovce",
"Hanušovce nad Topľou",
"Harakovce",
"Harhaj",
"Harichovce",
"Harmanec",
"Hatalov",
"Hatné",
"Havaj",
"Havka",
"Havranec",
"Hažín",
"Hažín nad Cirochou",
"Hažlín",
"Helcmanovce",
"Heľpa",
"Henckovce",
"Henclová",
"Hencovce",
"Hendrichovce",
"Herľany",
"Hermanovce",
"Hermanovce nad Topľou",
"Hertník",
"Hervartov",
"Hiadeľ",
"Hincovce",
"Hladovka",
"Hlboké",
"Hliník nad Hronom",
"Hlinné",
"Hlivištia",
"Hlohovec",
"Hniezdne",
"Hnilčík",
"Hnilec",
"Hnojné",
"Hnúšťa",
"Hodejov",
"Hodejovec",
"Hodkovce",
"Hodruša - Hámre",
"Hokovce",
"Holčíkovce",
"Holiare",
"Holice",
"Holíč",
"Holiša",
"Holumnica",
"Honce",
"Hontianska Vrbica",
"Hontianske Moravce",
"Hontianske Nemce",
"Hontianske Tesáre",
"Hontianske Trsťany",
"Horná Breznica",
"Horná Kráľová",
"Horná Krupá",
"Horná Lehota",
"Horná Lehota",
"Horná Mariková",
"Horná Mičiná",
"Horná Poruba",
"Horná Potôň",
"Horná Seč",
"Horná Streda",
"Horná Strehová",
"Horná Súča",
"Horná Štubňa",
"Horná Ves",
"Horná Ves",
"Horná Ždaňa",
"Horné Dubové",
"Horné Hámre",
"Horné Chlebany",
"Horné Lefantovce",
"Horné Mladonice",
"Horné Mýto",
"Horné Naštice",
"Horné Obdokovce",
"Horné Orešany",
"Horné Otrokovce",
"Horné Plachtince",
"Horné Pršany",
"Horné Saliby",
"Horné Semerovce",
"Horné Srnie",
"Horné Strháre",
"Horné Štitáre",
"Horné Trhovište",
"Horné Turovce",
"Horné Vestenice",
"Horné Zahorany",
"Horné Zelenice",
"Horný Badín",
"Horný Bar",
"Horný Hričov",
"Horný Kalník",
"Horný Lieskov",
"Horný Pial",
"Horný Tisovník",
"Horný Vadičov",
"Horňa",
"Horňany",
"Horovce",
"Horovce",
"Hoste",
"Hostice",
"Hostie",
"Hostišovce",
"Hostovice",
"Hosťová",
"Hosťovce",
"Hosťovce",
"Hozelec",
"Hôrka",
"Hôrka nad Váhom",
"Hôrky",
"Hrabičov",
"Hrabkov",
"Hrabová Roztoka",
"Hrabovčík",
"Hrabovec",
"Hrabovec nad Laborcom",
"Hrabské",
"Hrabušice",
"Hradisko",
"Hradište",
"Hradište",
"Hradište pod Vrátnom",
"Hrádok",
"Hrachovište",
"Hrachovo",
"Hraničné",
"Hranovnica",
"Hraň",
"Hrašné",
"Hrašovík",
"Hrčeľ",
"Hrhov",
"Hriadky",
"Hričovské Podhradie",
"Hriňová",
"Hrišovce",
"Hrkovce",
"Hrlica",
"Hrnčiarovce nad Parnou",
"Hrnčiarska Ves",
"Hrnčiarske Zalužany",
"Hrochoť",
"Hromoš",
"Hronec",
"Hronovce",
"Hronsek",
"Hronská Breznica",
"Hronská Dúbrava",
"Hronské Kľačany",
"Hronské Kosihy",
"Hronský Beňadik",
"Hrubá Borša",
"Hruboňovo",
"Hrubov",
"Hrubý Šúr",
"Hrušov",
"Hrušov",
"Hrušovany",
"Hrušovo",
"Hruštín",
"Hubice",
"Hubina",
"Hubošovce",
"Hubová",
"Hubovo",
"Hucín",
"Hudcovce",
"Hul",
"Humenné",
"Huncovce",
"Hunkovce",
"Hurbanova Ves",
"Hurbanovo",
"Husák",
"Husiná",
"Hutka",
"Huty",
"Hviezdoslavov",
"Hvozdnica",
"Hybe",
"Hýľov",
"Chanava",
"Chlebnice",
"Chlmec",
"Chľaba",
"Chmeľnica",
"Chmeľov",
"Chmeľová",
"Chmeľovec",
"Chminianska Nová Ves",
"Chminianske Jakubovany",
"Chmiňany",
"Choča",
"Chocholná - Velčice",
"Choňkovce",
"Chorvátsky Grob",
"Chorváty",
"Chotča",
"Chotín",
"Chrabrany",
"Chrámec",
"Chrastince",
"Chrastné",
"Chrasť nad Hornádom",
"Chrenovec - Brusno",
"Chropov",
"Chrťany",
"Chtelnica",
"Chudá Lehota",
"Chvalová",
"Chvojnica",
"Chvojnica",
"Chynorany",
"Chyžné",
"Igram",
"Ihľany",
"Ihráč",
"Ilava",
"Iliašovce",
"Ilija",
"Imeľ",
"Inovce",
"Iňa",
"Iňačovce",
"Ipeľské Predmostie",
"Ipeľské Úľany",
"Ipeľský Sokolec",
"Istebné",
"Ivachnová",
"Ivančiná",
"Ivanice",
"Ivanka pri Dunaji",
"Ivanka pri Nitre",
"Ivanovce",
"Iža",
"Ižipovce",
"Ižkovce",
"Jablonec",
"Jablonica",
"Jablonka",
"Jablonov",
"Jablonov nad Turňou",
"Jablonové",
"Jablonové",
"Jabloň",
"Jabloňovce",
"Jacovce",
"Jahodná",
"Jaklovce",
"Jakovany",
"Jakubany",
"Jakubov",
"Jakubova Voľa",
"Jakubovany",
"Jakubovany",
"Jakušovce",
"Jalová",
"Jalovec",
"Jalovec",
"Jalšové",
"Jalšovík",
"Jamník",
"Jamník",
"Janice",
"Janík",
"Janíky",
"Jankovce",
"Janov",
"Janova Lehota",
"Janovce",
"Jánovce",
"Jánovce",
"Janovík",
"Jarabá",
"Jarabina",
"Jarok",
"Jarovnice",
"Jasenica",
"Jasenie",
"Jasenov",
"Jasenov",
"Jasenová",
"Jasenovce",
"Jasenové",
"Jasenovo",
"Jaslovské Bohunice",
"Jasov",
"Jasová",
"Jastrabá",
"Jastrabie nad Topľou",
"Jastrabie pri Michalovciach",
"Jatov",
"Javorina (vojenský obvod)",
"Jazernica",
"Jedlinka",
"Jedľové Kostoľany",
"Jelenec",
"Jelka",
"Jelšava",
"Jelšovce",
"Jelšovec",
"Jenkovce",
"Jesenské",
"Jesenské",
"Jestice",
"Ješkova Ves",
"Jezersko",
"Jovice",
"Jovsa",
"Jur nad Hronom",
"Jurkova Voľa",
"Jurová",
"Jurské",
"Juskova Voľa",
"Kačanov",
"Kajal",
"Kalameny",
"Kalinkovo",
"Kalinov",
"Kalinovo",
"Kalná nad Hronom",
"Kalná Roztoka",
"Kálnica",
"Kalnište",
"Kalonda",
"Kalša",
"Kaloša",
"Kaluža",
"Kaľamenová",
"Kaľava",
"Kamanová",
"Kamenec pod Vtáčnikom",
"Kamenica",
"Kamenica nad Cirochou",
"Kamenica nad Hronom",
"Kameničany",
"Kameničná",
"Kamenín",
"Kamenná Poruba",
"Kamenná Poruba",
"Kamenné Kosihy",
"Kamenný Most",
"Kameňany",
"Kamienka",
"Kamienka",
"Kanianka",
"Kapišová",
"Kaplna",
"Kapušany",
"Kapušianske Kľačany",
"Karlová",
"Karná",
"Kašov",
"Kátlovce",
"Kátov",
"Kazimír",
"Kecerovce",
"Kecerovský Lipovec",
"Kečkovce",
"Kečovo",
"Kechnec",
"Kendice",
"Kesovce",
"Keť",
"Kežmarok",
"Kiarov",
"Kladzany",
"Klasov",
"Kláštor pod Znievom",
"Klátova Nová Ves",
"Klčov",
"Klenov",
"Klenová",
"Klenovec",
"Kleňany",
"Klieština",
"Klin",
"Klin nad Bodrogom",
"Klížska Nemá",
"Klokoč",
"Klokočov",
"Klokočov",
"Klubina",
"Kluknava",
"Kľačany",
"Kľače",
"Kľačno",
"Kľak",
"Kľúčovec",
"Kľušov",
"Kmeťovo",
"Kobeliarovo",
"Kobylnice",
"Kobyly",
"Koceľovce",
"Kociha",
"Kocurany",
"Kočín - Lančár",
"Kočovce",
"Kochanovce",
"Kochanovce",
"Kojatice",
"Kojšov",
"Kokava nad Rimavicou",
"Kokošovce",
"Kokšov - Bakša",
"Kolačkov",
"Kolačno",
"Koláre",
"Kolárovice",
"Kolárovo",
"Kolbasov",
"Kolbovce",
"Kolibabovce",
"Kolinovce",
"Kolíňany",
"Kolonica",
"Kolta",
"Komárany",
"Komárno",
"Komárov",
"Komárovce",
"Komjatice",
"Komjatná",
"Komoča",
"Koniarovce",
"Konrádovce",
"Konská",
"Konská",
"Koňuš",
"Kopčany",
"Kopernica",
"Koplotovce",
"Koprivnica",
"Kordíky",
"Korejovce",
"Korňa",
"Koromľa",
"Korunková",
"Korytárky",
"Korytné",
"Kosihovce",
"Kosihy nad Ipľom",
"Kosorín",
"Kostolec",
"Kostolište",
"Kostolná pri Dunaji",
"Kostolná Ves",
"Kostolná - Záriečie",
"Kostolné",
"Kostolné Kračany",
"Kostoľany pod Tribečom",
"Koš",
"Košariská",
"Košarovce",
"Košeca",
"Košecké Podhradie",
"Košice",
"Okres Košice II",
"Okres Košice III",
"Okres Košice IV",
"Košická Belá",
"Košická Polianka",
"Košické Oľšany",
"Košický Klečenov",
"Koškovce",
"Košolná",
"Košúty",
"Košťany nad Turcom",
"Kotešová",
"Kotmanová",
"Kotrčiná Lúčka",
"Kováčová",
"Kováčová",
"Kováčovce",
"Koválov",
"Koválovec",
"Kovarce",
"Kozárovce",
"Kozelník",
"Kozí Vrbovok",
"Kožany",
"Kožuchov",
"Kožuchovce",
"Kračúnovce",
"Krahule",
"Krajná Bystrá",
"Krajná Poľana",
"Krajná Porúbka",
"Krajné",
"Krajné Čierno",
"Krakovany",
"Králiky",
"Kráľ",
"Kráľov Brod",
"Kráľova Lehota",
"Kráľová nad Váhom",
"Kráľová pri Senci",
"Kraľovany",
"Kráľovce",
"Kráľovce - Krnišov",
"Kráľovičove Kračany",
"Kráľovský Chlmec",
"Kraskovo",
"Krásna Lúka",
"Krásna Ves",
"Krásno",
"Krásno nad Kysucou",
"Krásnohorská Dlhá Lúka",
"Krásnohorské Podhradie",
"Krásnovce",
"Krásny Brod",
"Krasňany",
"Kravany",
"Kravany",
"Kravany nad Dunajom",
"Krčava",
"Kremná",
"Kremnica",
"Kremnické Bane",
"Kristy",
"Krišľovce",
"Krišovská Liesková",
"Krivá",
"Krivany",
"Kriváň",
"Krivé",
"Krivoklát",
"Krivosúd - Bodovka",
"Kríže",
"Krížová Ves",
"Krížovany",
"Križovany nad Dudváhom",
"Krná",
"Krnča",
"Krokava",
"Krompachy",
"Krpeľany",
"Krškany",
"Krtovce",
"Kručov",
"Krupina",
"Krušetnica",
"Krušinec",
"Krušovce",
"Kružlov",
"Kružlová",
"Kružná",
"Kružno",
"Kšinná",
"Kubáňovo",
"Kučín",
"Kučín",
"Kuchyňa",
"Kuklov",
"Kuková",
"Kukučínov",
"Kunerad",
"Kunešov",
"Kunova Teplica",
"Kuraľany",
"Kurima",
"Kurimany",
"Kurimka",
"Kurov",
"Kusín",
"Kútniky",
"Kúty",
"Kuzmice",
"Kuzmice",
"Kvačany",
"Kvačany",
"Kvakovce",
"Kvašov",
"Kvetoslavov",
"Kyjatice",
"Kyjov",
"Kynceľová",
"Kysak",
"Kyselica",
"Kysta",
"Kysucké Nové Mesto",
"Kysucký Lieskovec",
"Láb",
"Lackov",
"Lacková",
"Lada",
"Ladce",
"Ladice",
"Ladmovce",
"Ladomerská Vieska",
"Ladomirov",
"Ladomirová",
"Ladzany",
"Lakšárska Nová Ves",
"Lascov",
"Laskár",
"Lastomír",
"Lastovce",
"Laškovce",
"Látky",
"Lazany",
"Lazisko",
"Lazy pod Makytou",
"Lažany",
"Lednica",
"Lednické Rovne",
"Legnava",
"Lehnice",
"Lehota",
"Lehota nad Rimavicou",
"Lehota pod Vtáčnikom",
"Lehôtka",
"Lehôtka pod Brehmi",
"Lechnica",
"Lekárovce",
"Leles",
"Leľa",
"Lemešany",
"Lenartov",
"Lenartovce",
"Lendak",
"Lenka",
"Lentvora",
"Leopoldov",
"Lesenice",
"Lesíček",
"Lesné",
"Lesnica",
"Leštiny",
"Lešť (vojenský obvod)",
"Letanovce",
"Letničie",
"Leváre",
"Levice",
"Levkuška",
"Levoča",
"Ležiachov",
"Libichava",
"Licince",
"Ličartovce",
"Liesek",
"Lieskovany",
"Lieskovec",
"Lieskovec",
"Liešno",
"Liešťany",
"Lietava",
"Lietavská Lúčka",
"Lietavská Svinná - Babkov",
"Likavka",
"Limbach",
"Lipany",
"Lipník",
"Lipníky",
"Lipová",
"Lipová",
"Lipovany",
"Lipovce",
"Lipové",
"Lipovec",
"Lipovec",
"Lipovník",
"Lipovník",
"Liptovská Anna",
"Liptovská Kokava",
"Liptovská Lúžna",
"Liptovská Osada",
"Liptovská Porúbka",
"Liptovská Sielnica",
"Liptovská Štiavnica",
"Liptovská Teplá",
"Liptovská Teplička",
"Liptovské Beharovce",
"Liptovské Kľačany",
"Liptovské Matiašovce",
"Liptovské Revúce",
"Liptovské Sliače",
"Liptovský Hrádok",
"Liptovský Ján",
"Liptovský Michal",
"Liptovský Mikuláš",
"Liptovský Ondrej",
"Liptovský Peter",
"Liptovský Trnovec",
"Lisková",
"Lišov",
"Litava",
"Litmanová",
"Livina",
"Livinské Opatovce",
"Livov",
"Livovská Huta",
"Lodno",
"Lok",
"Lokca",
"Lom nad Rimavicou",
"Lomná",
"Lomné",
"Lomnička",
"Lontov",
"Lopašov",
"Lopúchov",
"Lopušné Pažite",
"Lošonec",
"Lovce",
"Lovča",
"Lovčica - Trubín",
"Lovinobaňa",
"Lozorno",
"Ložín",
"Lubeník",
"Lubina",
"Lúč na Ostrove",
"Lučatín",
"Lučenec",
"Lúčina",
"Lučivná",
"Lúčka",
"Lúčka",
"Lúčka",
"Lúčka",
"Lúčky",
"Lúčky",
"Lúčky",
"Lúčnica nad Žitavou",
"Ludanice",
"Ludrová",
"Luhyňa",
"Lúka",
"Lukačovce",
"Lukáčovce",
"Lukavica",
"Lukavica",
"Lukov",
"Lukovištia",
"Lúky",
"Lula",
"Lupoč",
"Lutila",
"Lutiše",
"Lužany",
"Lužany pri Topli",
"Lužianky",
"Lysá pod Makytou",
"Lysica",
"Ľubá",
"Ľubela",
"Ľubica",
"Ľubietová",
"Ľubiša",
"Ľubochňa",
"Ľuboreč",
"Ľuboriečka",
"Ľubotice",
"Ľubotín",
"Ľubovec",
"Ľudovítová",
"Ľutina",
"Ľutov",
"Macov",
"Mad",
"Madunice",
"Magnezitovce",
"Machulince",
"Majcichov",
"Majere",
"Majerovce",
"Makov",
"Makovce",
"Malacky",
"Malachov",
"Malá Čalomija",
"Malá Čausa",
"Malá Čierna",
"Malá Domaša",
"Malá Franková",
"Malá Hradná",
"Malá Ida",
"Malá Lehota",
"Malá Lodina",
"Malá nad Hronom",
"Malá Poľana",
"Malá Tŕňa",
"Málaš",
"Malatiná",
"Malatíny",
"Malcov",
"Malčice",
"Malé Borové",
"Malé Dvorníky",
"Malé Chyndice",
"Malé Hoste",
"Malé Kosihy",
"Malé Kozmálovce",
"Malé Kršteňany",
"Malé Lednice",
"Malé Leváre",
"Malé Ludince",
"Malé Ozorovce",
"Malé Raškovce",
"Malé Ripňany",
"Malé Straciny",
"Malé Trakany",
"Malé Uherce",
"Malé Vozokany",
"Malé Zálužie",
"Malé Zlievce",
"Málinec",
"Malinová",
"Malinovo",
"Malužiná",
"Malý Cetín",
"Malý Čepčín",
"Malý Horeš",
"Malý Kamenec",
"Malý Krtíš",
"Malý Lapáš",
"Malý Lipník",
"Malý Slavkov",
"Malý Slivník",
"Malý Šariš",
"Malženice",
"Mankovce",
"Maňa",
"Marcelová",
"Margecany",
"Marhaň",
"Marianka",
"Markovce",
"Markuška",
"Markušovce",
"Maršová - Rašov",
"Martin",
"Martin nad Žitavou",
"Martinček",
"Martinová",
"Martovce",
"Mašková",
"Maškovce",
"Matejovce nad Hornádom",
"Matiaška",
"Matiašovce",
"Matovce",
"Matúškovo",
"Matysová",
"Maťovské Vojkovce",
"Medovarce",
"Medvedie",
"Medveďov",
"Medzany",
"Medzev",
"Medzianky",
"Medzibrod",
"Medzibrodie nad Oravou",
"Medzilaborce",
"Melčice - Lieskové",
"Melek",
"Meliata",
"Mengusovce",
"Merašice",
"Merník",
"Mestečko",
"Mestisko",
"Mičakovce",
"Mierovo",
"Miezgovce",
"Michajlov",
"Michal na Ostrove",
"Michal nad Žitavou",
"Michalková",
"Michalok",
"Michalová",
"Michalovce",
"Michaľany",
"Miklušovce",
"Miková",
"Mikulášová",
"Mikušovce",
"Mikušovce",
"Milhosť",
"Miloslavov",
"Milpoš",
"Miňovce",
"Mirkovce",
"Miroľa",
"Mládzovo",
"Mlynárovce",
"Mlynčeky",
"Mlynica",
"Mlynky",
"Mníchova Lehota",
"Mníšek nad Hnilcom",
"Mníšek nad Popradom",
"Moča",
"Močenok",
"Močiar",
"Modra",
"Modra nad Cirochou",
"Modrany",
"Modrová",
"Modrovka",
"Modrý Kameň",
"Mojmírovce",
"Mojš",
"Mojtín",
"Mojzesovo",
"Mokrá Lúka",
"Mokrance",
"Mokroluh",
"Mokrý Háj",
"Moldava nad Bodvou",
"Moravany",
"Moravany nad Váhom",
"Moravské Lieskové",
"Moravský Svätý Ján",
"Most pri Bratislave",
"Mostová",
"Moškovec",
"Mošovce",
"Moštenica",
"Mošurov",
"Motešice",
"Motyčky",
"Môlča",
"Mrázovce",
"Mučín",
"Mudroňovo",
"Mudrovce",
"Muľa",
"Muráň",
"Muránska Dlhá Lúka",
"Muránska Huta",
"Muránska Lehota",
"Muránska Zdychava",
"Mútne",
"Mužla",
"Myjava",
"Myslina",
"Mýtna",
"Mýtne Ludany",
"Mýto pod Ďumbierom",
"Nacina Ves",
"Nadlice",
"Naháč",
"Nálepkovo",
"Námestovo",
"Nána",
"Nandraž",
"Necpaly",
"Nedanovce",
"Nedašovce",
"Neded",
"Nededza",
"Nedožery - Brezany",
"Nechválova Polianka",
"Nemce",
"Nemcovce",
"Nemcovce",
"Nemčice",
"Nemčiňany",
"Nemecká",
"Nemečky",
"Nemešany",
"Nemšová",
"Nenince",
"Neporadza",
"Neporadza",
"Nesvady",
"Nesluša",
"Neverice",
"Nevidzany",
"Nevidzany",
"Nevoľné",
"Nezbudská Lúčka",
"Nimnica",
"Nitra",
"Nitra nad Ipľom",
"Nitrianska Blatnica",
"Nitrianska Streda",
"Nitrianske Hrnčiarovce",
"Nitrianske Pravno",
"Nitrianske Rudno",
"Nitrianske Sučany",
"Nitrica",
"Nižná",
"Nižná",
"Nižná Boca",
"Nižná Hutka",
"Nižná Jablonka",
"Nižná Jedľová",
"Nižná Kamenica",
"Nižná Myšľa",
"Nižná Olšava",
"Nižná Pisaná",
"Nižná Polianka",
"Nižná Rybnica",
"Nižná Sitnica",
"Nižná Slaná",
"Nižná Voľa",
"Nižné Ladičkovce",
"Nižné Nemecké",
"Nižné Repaše",
"Nižné Ružbachy",
"Nižný Čaj",
"Nižný Hrabovec",
"Nižný Hrušov",
"Nižný Klátov",
"Nižný Komárnik",
"Nižný Kručov",
"Nižný Lánec",
"Nižný Mirošov",
"Nižný Orlík",
"Nižný Skálnik",
"Nižný Slavkov",
"Nižný Tvarožec",
"Nižný Žipov",
"Nolčovo",
"Norovce",
"Nová Baňa",
"Nová Bašta",
"Nová Bošáca",
"Nová Bystrica",
"Nová Dedina",
"Nová Dedinka",
"Nová Dubnica",
"Nová Kelča",
"Nová Lehota",
"Nová Lesná",
"Nová Ľubovňa",
"Nová Polhora",
"Nová Polianka",
"Nová Sedlica",
"Nová Ves",
"Nová Ves nad Váhom",
"Nová Ves nad Žitavou",
"Nová Vieska",
"Nováčany",
"Nováky",
"Nové Hony",
"Nové Mesto nad Váhom",
"Nové Sady",
"Nové Zámky",
"Novosad",
"Novoť",
"Nový Ruskov",
"Nový Salaš",
"Nový Tekov",
"Nový Život",
"Nýrovce",
"Ňagov",
"Ňárad",
"Obeckov",
"Obišovce",
"Oborín",
"Obručné",
"Obyce",
"Očkov",
"Očová",
"Odorín",
"Ohrady",
"Ohradzany",
"Ochodnica",
"Ochtiná",
"Okoč",
"Okoličná na Ostrove",
"Okrúhle",
"Okružná",
"Olcnava",
"Olejníkov",
"Olešná",
"Olováry",
"Olšovany",
"Oľdza",
"Oľka",
"Oľšavce",
"Oľšavica",
"Oľšavka",
"Oľšavka",
"Oľšinkov",
"Oľšov",
"Omastiná",
"Omšenie",
"Ondavka",
"Ondavské Matiašovce",
"Ondrašovce",
"Ondrašová",
"Ondrejovce",
"Opátka",
"Opatovce",
"Opatovce nad Nitrou",
"Opatovská Nová Ves",
"Opava",
"Opiná",
"Opoj",
"Oponice",
"Oravce",
"Orávka",
"Oravská Jasenica",
"Oravská Lesná",
"Oravská Polhora",
"Oravská Poruba",
"Oravský Biely Potok",
"Oravský Podzámok",
"Ordzovany",
"Orechová",
"Orechová Potôň",
"Oravské Veselé",
"Oreské",
"Oreské",
"Orešany",
"Orlov",
"Orovnica",
"Ortuťová",
"Osádka",
"Osadné",
"Osikov",
"Oslany",
"Osrblie",
"Ostrá Lúka",
"Ostratice",
"Ostrov",
"Ostrov",
"Ostrovany",
"Ostrý Grúň",
"Osturňa",
"Osuské",
"Oščadnica",
"Otrhánky",
"Otročok",
"Ovčiarsko",
"Ovčie",
"Ozdín",
"Ožďany",
"Pača",
"Padáň",
"Padarovce",
"Pakostov",
"Palárikovo",
"Palín",
"Palota",
"Panické Dravce",
"Paňa",
"Paňovce",
"Papín",
"Papradno",
"Parchovany",
"Parihuzovce",
"Párnica",
"Partizánska Ľupča",
"Partizánske",
"Pastovce",
"Pastuchov",
"Pašková",
"Paština Závada",
"Pata",
"Pataš",
"Pavčina Lehota",
"Pavlice",
"Pavlová",
"Pavlova Ves",
"Pavlovce",
"Pavlovce",
"Pavlovce nad Uhom",
"Pavľany",
"Pažiť",
"Pčoliné",
"Pečenice",
"Pečeňady",
"Pečeňany",
"Pečovská Nová Ves",
"Peder",
"Perín - Chym",
"Pernek",
"Petkovce",
"Petrikovce",
"Petrová",
"Petrova Lehota",
"Petrova Ves",
"Petrovany",
"Petrovce",
"Petrovce",
"Petrovce",
"Petrovce nad Laborcom",
"Petrovice",
"Petrovo",
"Pezinok",
"Piešťany",
"Pichne",
"Píla",
"Píla",
"Píla",
"Pinciná",
"Pinkovce",
"Piskorovce",
"Pitelová",
"Plášťovce",
"Plavé Vozokany",
"Plavecké Podhradie",
"Plavecký Mikuláš",
"Plavecký Peter",
"Plavecký Štvrtok",
"Plaveč",
"Plavnica",
"Plechotice",
"Pleš",
"Plešivec",
"Plevník - Drienové",
"Pliešovce",
"Ploské",
"Ploské",
"Pobedim",
"Počarová",
"Počúvadlo",
"Podbiel",
"Podbranč",
"Podbrezová",
"Podhájska",
"Podhorany",
"Podhorany",
"Podhorany",
"Podhorie",
"Podhorie",
"Podhoroď",
"Podhradie",
"Podhradie",
"Podhradie",
"Podhradík",
"Podkonice",
"Podkriváň",
"Podkylava",
"Podlužany",
"Podlužany",
"Podolie",
"Podolínec",
"Podrečany",
"Podskalie",
"Podtureň",
"Podvysoká",
"Podzámčok",
"Pohorelá",
"Pohranice",
"Pohronská Polhora",
"Pohronský Bukovec",
"Pohronský Ruskov",
"Pochabany",
"Pokryváč",
"Poliakovce",
"Polianka",
"Polichno",
"Polina",
"Poloma",
"Polomka",
"Poltár",
"Poluvsie",
"Poľanovce",
"Poľany",
"Poľný Kesov",
"Pongrácovce",
"Poniky",
"Poprad",
"Poproč",
"Poproč",
"Popudinské Močidľany",
"Poráč",
"Poriadie",
"Porostov",
"Poruba",
"Poruba pod Vihorlatom",
"Porúbka",
"Porúbka",
"Porúbka",
"Porúbka",
"Poša",
"Potok",
"Potok",
"Potoky",
"Potôčky",
"Potvorice",
"Považany",
"Považská Bystrica",
"Povina",
"Povoda",
"Povrazník",
"Pozba",
"Pozdišovce",
"Pôtor",
"Praha",
"Prakovce",
"Prašice",
"Prašník",
"Pravenec",
"Pravica",
"Pravotice",
"Práznovce",
"Prečín",
"Predajná",
"Predmier",
"Prenčov",
"Preseľany",
"Prestavlky",
"Prešov",
"Príbelce",
"Pribeník",
"Pribeta",
"Pribiš",
"Príbovce",
"Pribylina",
"Priechod",
"Priekopa",
"Priepasné",
"Prietrž",
"Prietržka",
"Prievaly",
"Prievidza",
"Prihradzany",
"Príkra",
"Príslop",
"Prituľany",
"Proč",
"Prochot",
"Prosačov",
"Prosiek",
"Prša",
"Pruské",
"Prusy",
"Pružina",
"Pstriná",
"Ptičie",
"Ptrukša",
"Pucov",
"Púchov",
"Pukanec",
"Pusté Čemerné",
"Pusté Pole",
"Pusté Sady",
"Pusté Úľany",
"Pušovce",
"Rabča",
"Rabčice",
"Rad",
"Radatice",
"Radava",
"Radimov",
"Radnovce",
"Radobica",
"Radoľa",
"Radoma",
"Radošina",
"Radošovce",
"Radošovce",
"Radôstka",
"Radvanovce",
"Radvaň nad Dunajom",
"Radvaň nad Laborcom",
"Radzovce",
"Rafajovce",
"Rajčany",
"Rajec",
"Rajecká Lesná",
"Rajecké Teplice",
"Rákoš",
"Rákoš",
"Raková",
"Rakovčík",
"Rakovec nad Ondavou",
"Rakovice",
"Rakovnica",
"Rakovo",
"Rakša",
"Rakúsy",
"Rakytník",
"Rankovce",
"Rapovce",
"Raslavice",
"Rastislavice",
"Rašice",
"Ratka",
"Ratková",
"Ratkovce",
"Ratkovo",
"Ratkovská Lehota",
"Ratkovská Suchá",
"Ratkovské Bystré",
"Ratnovce",
"Ratvaj",
"Ráztočno",
"Ráztoka",
"Ražňany",
"Reca",
"Regetovka",
"Rejdová",
"Reľov",
"Remeniny",
"Remetské Hámre",
"Renčišov",
"Repejov",
"Repište",
"Rešica",
"Rešov",
"Revúca",
"Revúcka Lehota",
"Riečka",
"Riečka",
"Richnava",
"Richvald",
"Rimavská Baňa",
"Rimavská Seč",
"Rimavská Sobota",
"Rimavské Brezovo",
"Rimavské Janovce",
"Rimavské Zalužany",
"Rohov",
"Rohovce",
"Rohožník",
"Rohožník",
"Rochovce",
"Rokycany",
"Rokytov",
"Rokytov pri Humennom",
"Rokytovce",
"Rosina",
"Roškovce",
"Roštár",
"Rovensko",
"Rovinka",
"Rovné",
"Rovné",
"Rovné",
"Rovňany",
"Rozhanovce",
"Rozložná",
"Roztoky",
"Rožkovany",
"Rožňava",
"Rožňavské Bystré",
"Rúbaň",
"Rudina",
"Rudinka",
"Rudinská",
"Rudlov",
"Rudná",
"Rudnianska Lehota",
"Rudník",
"Rudník",
"Rudno",
"Rudno nad Hronom",
"Rudňany",
"Rumanová",
"Rumince",
"Runina",
"Ruská",
"Ruská Bystrá",
"Ruská Kajňa",
"Ruská Nová Ves",
"Ruská Poruba",
"Ruská Volová",
"Ruská Voľa",
"Ruská Voľa nad Popradom",
"Ruskov",
"Ruskovce",
"Ruskovce",
"Ruský Hrabovec",
"Ruský Potok",
"Ružiná",
"Ružindol",
"Ružomberok",
"Rybany",
"Rybky",
"Rybník",
"Rybník",
"Rykynčice",
"Sabinov",
"Sačurov",
"Sádočné",
"Sady nad Torysou",
"Salka",
"Santovka",
"Sap",
"Sása",
"Sása",
"Sasinkovo",
"Sazdice",
"Sebedín - Bečov",
"Sebedražie",
"Sebechleby",
"Seč",
"Sečianky",
"Sečovce",
"Sečovská Polianka",
"Sedliacka Dubová",
"Sedliská",
"Sedmerovec",
"Sejkov",
"Sekule",
"Selce",
"Selce",
"Selce",
"Selec",
"Selice",
"Seľany",
"Semerovo",
"Senec",
"Seniakovce",
"Senica",
"Senné",
"Senné",
"Senohrad",
"Seňa",
"Sereď",
"Sielnica",
"Sihelné",
"Sihla",
"Sikenica",
"Sikenička",
"Siladice",
"Silica",
"Silická Brezová",
"Silická Jablonica",
"Sirk",
"Sirník",
"Skačany",
"Skalica",
"Skalité",
"Skalka nad Váhom",
"Skároš",
"Skerešovo",
"Sklabiná",
"Sklabinský Podzámok",
"Sklabiňa",
"Sklené",
"Sklené Teplice",
"Skrabské",
"Skýcov",
"Sládkovičovo",
"Slančík",
"Slanec",
"Slanská Huta",
"Slanské Nové Mesto",
"Slaská",
"Slatina",
"Slatina nad Bebravou",
"Slatinka nad Bebravou",
"Slatinské Lazy",
"Slatvina",
"Slavec",
"Slavkovce",
"Slavnica",
"Slavoška",
"Slavošovce",
"Slepčany",
"Sliač",
"Sliepkovce",
"Slizké",
"Slivník",
"Slopná",
"Slovany",
"Slovenská Kajňa",
"Slovenská Ľupča",
"Slovenská Nová Ves",
"Slovenská Ves",
"Slovenská Volová",
"Slovenské Ďarmoty",
"Slovenské Kľačany",
"Slovenské Krivé",
"Slovenské Nové Mesto",
"Slovenské Pravno",
"Slovenský Grob",
"Slovinky",
"Sľažany",
"Smilno",
"Smižany",
"Smolenice",
"Smolinské",
"Smolnícka Huta",
"Smolník",
"Smrdáky",
"Smrečany",
"Snakov",
"Snežnica",
"Snina",
"Socovce",
"Soblahov",
"Soboš",
"Sobotište",
"Sobrance",
"Sokolce",
"Sokolovce",
"Sokoľ",
"Sokoľany",
"Solčany",
"Solčianky",
"Sološnica",
"Soľ",
"Soľnička",
"Soľník",
"Somotor",
"Sopkovce",
"Spišská Belá",
"Spišská Nová Ves",
"Spišská Stará Ves",
"Spišská Teplica",
"Spišské Bystré",
"Spišské Hanušovce",
"Spišské Podhradie",
"Spišské Tomášovce",
"Spišské Vlachy",
"Spišský Hrhov",
"Spišský Hrušov",
"Spišský Štiavnik",
"Spišský Štvrtok",
"Stakčín",
"Stakčínska Roztoka",
"Stanča",
"Stankovany",
"Stankovce",
"Stará Bašta",
"Stará Bystrica",
"Stará Halič",
"Stará Huta",
"Stará Kremnička",
"Stará Lehota",
"Stará Lesná",
"Stará Ľubovňa",
"Stará Myjava",
"Stará Turá",
"Stará Voda",
"Staré",
"Staré Hory",
"Starina",
"Starý Hrádok",
"Starý Tekov",
"Staškov",
"Staškovce",
"Stebnícka Huta",
"Stebník",
"Stožok",
"Stráne pod Tatrami",
"Stránska",
"Stránske",
"Stráňany",
"Stráňavy",
"Stratená",
"Stráža",
"Strážne",
"Strážske",
"Strečno",
"Streda nad Bodrogom",
"Stredné Plachtince",
"Strekov",
"Strelníky",
"Stretava",
"Stretavka",
"Streženice",
"Strihovce",
"Stročín",
"Stropkov",
"Studená",
"Studenec",
"Studienka",
"Stuľany",
"Stupava",
"Stupné",
"Sučany",
"Sudince",
"Súdovce",
"Suchá Dolina",
"Suchá Hora",
"Suchá nad Parnou",
"Sucháň",
"Suché",
"Suché Brezovo",
"Suchohrad",
"Sukov",
"Sulín",
"Súlovce",
"Súľov - Hradná",
"Sušany",
"Sútor",
"Svätá Mária",
"Svätoplukovo",
"Svätuš",
"Svätuše",
"Svätý Anton",
"Svätý Jur",
"Svätý Kríž",
"Svätý Peter",
"Svederník",
"Sverepec",
"Sveržov",
"Svetlice",
"Svidnička",
"Svidník",
"Svinia",
"Svinica",
"Svinice",
"Svinná",
"Svit",
"Svodín",
"Svrbice",
"Svrčinovec",
"Šahy",
"Šajdíkove Humence",
"Šalgovce",
"Šalgočka",
"Šalov",
"Šaľa",
"Šambron",
"Šamorín",
"Šamudovce",
"Šandal",
"Šarbov",
"Šarišská Poruba",
"Šarišská Trstená",
"Šarišské Bohdanovce",
"Šarišské Čierne",
"Šarišské Dravce",
"Šarišské Jastrabie",
"Šarišské Michaľany",
"Šarišské Sokolovce",
"Šarišský Štiavnik",
"Šarkan",
"Šarovce",
"Šašová",
"Šaštín - Stráže",
"Šávoľ",
"Šelpice",
"Šemetkovce",
"Šemša",
"Šenkvice",
"Šiatorská Bukovinka",
"Šiba",
"Šíd",
"Šimonovce",
"Šindliar",
"Šintava",
"Šípkov",
"Šípkové",
"Širákov",
"Širkovce",
"Široké",
"Šišov",
"Šivetice",
"Šmigovec",
"Šoltýska",
"Šoporňa",
"Špačince",
"Špania Dolina",
"Španie Pole",
"Šrobárová",
"Štefanov",
"Štefanov nad Oravou",
"Štefanová",
"Štefanovce",
"Štefanovce",
"Štefanovičová",
"Štefurov",
"Šterusy",
"Štiavnické Bane",
"Štiavnička",
"Štiavnik",
"Štítnik",
"Štós",
"Štôla",
"Štrba",
"Štrkovec",
"Štúrovo",
"Štvrtok",
"Štvrtok na Ostrove",
"Šuľa",
"Šumiac",
"Šuňava",
"Šurany",
"Šurianky",
"Šurice",
"Šúrovce",
"Šútovo",
"Šútovce",
"Švábovce",
"Švedlár",
"Švošov",
"Tachty",
"Tajná",
"Tajov",
"Tarnov",
"Tatranská Javorina",
"Tašuľa",
"Tehla",
"Tekolďany",
"Tekovská Breznica",
"Tekovské Lužany",
"Tekovské Nemce",
"Tekovský Hrádok",
"Telgárt",
"Telince",
"Temeš",
"Teplička",
"Teplička nad Váhom",
"Tepličky",
"Teplý Vrch",
"Terany",
"Terchová",
"Teriakovce",
"Terňa",
"Tesáre",
"Tesárske Mlyňany",
"Tešedíkovo",
"Tibava",
"Tichý Potok",
"Timoradza",
"Tisinec",
"Tisovec",
"Tlmače",
"Točnica",
"Tokajík",
"Tomášikovo",
"Tomášov",
"Tomášovce",
"Tomášovce",
"Topoľa",
"Topoľčany",
"Topoľčianky",
"Topoľnica",
"Topoľníky",
"Topoľovka",
"Toporec",
"Tornaľa",
"Torysa",
"Torysky",
"Tovarné",
"Tovarnianska Polianka",
"Tovarníky",
"Tôň",
"Trakovice",
"Trávnica",
"Trávnik",
"Trebatice",
"Trebejov",
"Trebeľovce",
"Trebichava",
"Trebišov",
"Trebostovo",
"Trebušovce",
"Trenč",
"Trenčianska Teplá",
"Trenčianska Turná",
"Trenčianske Bohuslavice",
"Trenčianske Jastrabie",
"Trenčianske Mitice",
"Trenčianske Stankovce",
"Trenčianske Teplice",
"Trenčín",
"Trhová Hradská",
"Trhovište",
"Trnava",
"Trnavá Hora",
"Trnava pri Laborci",
"Trnávka",
"Trnávka",
"Trnkov",
"Trnovec",
"Trnovec nad Váhom",
"Trnovo",
"Tročany",
"Trpín",
"Trstená",
"Trstená na Ostrove",
"Trstené",
"Trstené pri Hornáde",
"Trstice",
"Trstín",
"Trsťany",
"Tŕnie",
"Tuhár",
"Tuhrina",
"Tuchyňa",
"Tulčík",
"Tupá",
"Turá",
"Turany",
"Turany nad Ondavou",
"Turcovce",
"Turček",
"Turčianky",
"Turčianska Štiavnička",
"Turčianske Jaseno",
"Turčianske Kľačany",
"Turčianske Teplice",
"Turčiansky Ďur",
"Turčiansky Peter",
"Turčok",
"Turecká",
"Tureň",
"Turie",
"Turík",
"Turnianska Nová Ves",
"Turňa nad Bodvou",
"Turová",
"Turzovka",
"Tušice",
"Tušická Nová Ves",
"Tužina",
"Tvarožná",
"Tvrdomestice",
"Tvrdošín",
"Tvrdošovce",
"Ťapešovo",
"Ubľa",
"Úbrež",
"Udavské",
"Udiča",
"Údol",
"Uhliská",
"Úhorná",
"Uhorská Ves",
"Uhorské",
"Uhrovec",
"Uhrovské Podhradie",
"Ulič",
"Uličské Krivé",
"Uloža",
"Úľany nad Žitavou",
"Unín",
"Uňatín",
"Urmince",
"Utekáč",
"Uzovce",
"Uzovská Panica",
"Uzovské Pekľany",
"Uzovský Šalgov",
"Vaďovce",
"Vagrinec",
"Váhovce",
"Vajkovce",
"Valaliky",
"Valaská",
"Valaská Belá",
"Valaská Dubová",
"Valaškovce (vojenský obvod)",
"Valča",
"Valentovce",
"Valice",
"Valkovce",
"Vaľkovňa",
"Vaniškovce",
"Vápeník",
"Varadka",
"Varechovce",
"Varhaňovce",
"Varín",
"Vasiľov",
"Vavrečka",
"Vavrinec",
"Vavrišovo",
"Važec",
"Vechec",
"Velčice",
"Veličná",
"Velušovce",
"Veľaty",
"Veľká Čausa",
"Veľká Čierna",
"Veľká Dolina",
"Veľká Franková",
"Veľká Hradná",
"Veľká Ida",
"Veľká Lesná",
"Veľká Lodina",
"Veľká Lomnica",
"Veľká Mača",
"Veľká Paka",
"Veľká Tŕňa",
"Veľké Bierovce",
"Veľké Blahovo",
"Veľké Borové",
"Veľké Držkovce",
"Veľké Dvorany",
"Veľké Dvorníky",
"Veľké Hoste",
"Veľké Chlievany",
"Veľké Chyndice",
"Veľké Kapušany",
"Veľké Kosihy",
"Veľké Kostoľany",
"Veľké Kozmálovce",
"Veľké Kršteňany",
"Veľké Leváre",
"Veľké Lovce",
"Veľké Ludince",
"Veľké Orvište",
"Veľké Ozorovce",
"Veľké Raškovce",
"Veľké Revištia",
"Veľké Ripňany",
"Veľké Rovné",
"Veľké Slemence",
"Veľké Trakany",
"Veľké Turovce",
"Veľké Uherce",
"Veľké Úľany",
"Veľké Vozokany",
"Veľké Zálužie",
"Veľkrop",
"Veľký Biel",
"Veľký Cetín",
"Veľký Čepčín",
"Veľký Ďur",
"Veľký Folkmar",
"Veľký Grob",
"Veľký Horeš",
"Veľký Kamenec",
"Veľký Klíž",
"Veľký Krtíš",
"Veľký Kýr",
"Veľký Lapáš",
"Veľký Lipník",
"Veľký Meder",
"Veľký Slavkov",
"Veľký Slivník",
"Veľký Šariš",
"Veľopolie",
"Vernár",
"Veselé",
"Veterná Poruba",
"Vieska",
"Vieska",
"Vieska nad Žitavou",
"Vikartovce",
"Vinica",
"Viničky",
"Viničné",
"Vinné",
"Vinodol",
"Vinohrady nad Váhom",
"Vinosady",
"Virt",
"Vislanka",
"Vislava",
"Visolaje",
"Višňov",
"Višňové",
"Višňové",
"Vištuk",
"Vitanová",
"Vítkovce",
"Víťaz",
"Víťazovce",
"Vlača",
"Vladiča",
"Vlachovo",
"Vlachy",
"Vlčany",
"Vlčkovce",
"Vlkas",
"Vlková",
"Vlkovce",
"Vlky",
"Voderady",
"Vojany",
"Vojčice",
"Vojka",
"Vojka nad Dunajom",
"Vojkovce",
"Vojnatina",
"Vojňany",
"Vojtovce",
"Volica",
"Volkovce",
"Voľa",
"Vozokany",
"Vozokany",
"Vráble",
"Vrádište",
"Vrakúň",
"Vranov nad Topľou",
"Vrbnica",
"Vrbov",
"Vrbovce",
"Vrbová nad Váhom",
"Vrbové",
"Vrchteplá",
"Vrícko",
"Vršatské Podhradie",
"Vrútky",
"Vtáčkovce",
"Výborná",
"Výčapy - Opatovce",
"Vydrany",
"Vydrná",
"Vydrník",
"Východná",
"Výrava",
"Vysočany",
"Vysoká",
"Vysoká",
"Vysoká nad Kysucou",
"Vysoká nad Uhom",
"Vysoká pri Morave",
"Vysoké Tatry",
"Vyškovce",
"Vyškovce nad Ipľom",
"Vyšná Boca",
"Vyšná Hutka",
"Vyšná Jablonka",
"Vyšná Jedľová",
"Vyšná Kamenica",
"Vyšná Myšľa",
"Vyšná Olšava",
"Vyšná Pisaná",
"Vyšná Polianka",
"Vyšná Rybnica",
"Vyšná Sitnica",
"Vyšná Slaná",
"Vyšná Šebastová",
"Vyšná Voľa",
"Vyšné Ladičkovce",
"Vyšné nad Hronom",
"Vyšné Nemecké",
"Vyšné Remety",
"Vyšné Repaše",
"Vyšné Ružbachy",
"Vyšný Čaj",
"Vyšný Hrabovec",
"Vyšný Hrušov",
"Vyšný Kazimír",
"Vyšný Klátov",
"Vyšný Komárnik",
"Vyšný Kručov",
"Vyšný Kubín",
"Vyšný Mirošov",
"Vyšný Orlík",
"Vyšný Slavkov",
"Vyšný Tvarožec",
"Vyšný Žipov",
"Zábiedovo",
"Záborie",
"Záborské",
"Zádiel",
"Záhor",
"Záhorie (vojenský obvod)",
"Záhorská Ves",
"Záhradné",
"Zákamenné",
"Zákopčie",
"Zalaba",
"Zálesie",
"Zálesie",
"Zalužice",
"Zamarovce",
"Zámutov",
"Záriečie",
"Záskalie",
"Zatín",
"Závada",
"Závada",
"Závadka",
"Závadka",
"Závadka",
"Zavar",
"Závažná Poruba",
"Závod",
"Zázrivá",
"Zbehňov",
"Zbehy",
"Zboj",
"Zbojné",
"Zborov",
"Zborov nad Bystricou",
"Zbrojníky",
"Zbudská Belá",
"Zbudské Dlhé",
"Zbudza",
"Zbyňov",
"Zeleneč",
"Zemianska Olča",
"Zemianske Kostoľany",
"Zemianske Podhradie",
"Zemianske Sady",
"Zemné",
"Zemplín",
"Zemplínska Nová Ves",
"Zemplínska Široká",
"Zemplínska Teplica",
"Zemplínske Hámre",
"Zemplínske Hradište",
"Zemplínske Jastrabie",
"Zemplínske Kopčany",
"Zemplínsky Branč",
"Zlatá Baňa",
"Zlatá Idka",
"Zlaté",
"Zlaté Klasy",
"Zlaté Moravce",
"Zlatná na Ostrove",
"Zlatník",
"Zlatníky",
"Zlatno",
"Zlatno",
"Zliechov",
"Zohor",
"Zubák",
"Zuberec",
"Zubné",
"Zubrohlava",
"Zvolen",
"Zvončín",
"Žabokreky",
"Žabokreky nad Nitrou",
"Žakarovce",
"Žakovce",
"Žalobín",
"Žarnov",
"Žarnovica",
"Žaškov",
"Žbince",
"Ždaňa",
"Ždiar",
"Žehňa",
"Žehra",
"Železník",
"Želiezovce",
"Želmanovce",
"Žemberovce",
"Žemliare",
"Žiar",
"Žiar",
"Žiar nad Hronom",
"Žihárec",
"Žikava",
"Žilina",
"Žipov",
"Žirany",
"Žitavany",
"Žitavce",
"Žitná - Radiša",
"Žlkovce",
"Župčany",
)
streets = (
"Adámiho",
"Agátová",
"Ahoj",
"Albánska",
"Albrechtova",
"Alejová",
"Alešova",
"Alstrova",
"Alžbetínska",
"Alžbety Gwerkovej",
"Amarelková",
"Ambroseho",
"Ambrova",
"Ambrušova",
"Americká",
"Americké námestie",
"Americké námestie",
"Amurská",
"Andreja Mráza",
"Andreja Plávku",
"Andrusovova",
"Anenská",
"Anenská",
"Anízová",
"Antická",
"Antolská",
"Arménska",
"Astronomická",
"Astrová",
"Avarská",
"Azalková",
"Azovská",
"Babuškova",
"Bagarova",
"Báger",
"Bahniatková",
"Bachova",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajzova",
"Bakošova",
"Balkánska",
"Baltská",
"Bancíkovej",
"Banícka",
"Baničova",
"Baníkova",
"Banskobystrická",
"Banšelova",
"Bardejovská",
"Bárdošova",
"Barónka",
"Bartókova",
"Bartoňova",
"Bartoškova",
"Baštová",
"Batkova",
"Bazalková",
"Bazová",
"Bazovského",
"Bažantia",
"Beblavého",
"Bebravská",
"Beckovská",
"Bedľová",
"Begóniová",
"Belániková",
"Belehradská",
"Belianska",
"Belinského",
"Bellova",
"Belopotockého",
"Beňadická",
"Bencúrova",
"Benediktiho",
"Beniakova",
"Beňovského",
"Bernolákova",
"Beskydská",
"Betliarska",
"Bezekova",
"Bezručova",
"Biela",
"Bielkova",
"Bieloruská",
"Bilíkova",
"Biskupická",
"Björnsonova",
"Blagoevova",
"Blatnická",
"Blatúchová",
"Bleduľová",
"Blumentálska",
"Blyskáčová",
"Bočná",
"Bodliaková",
"Bodrocká",
"Bodvianska",
"Bohrova",
"Bohúňova",
"Bojnická",
"Boragová",
"Borekova",
"Borievková",
"Borinská",
"Borodáčova",
"Borovicová",
"Borská",
"Bosákova",
"Boskovičova",
"Bošániho",
"Botanická",
"Bottova",
"Boženy Němcovej",
"Bôrik",
"Bradáčova",
"Bradlianska",
"Brančská",
"Bratislava-Vinohrady",
"Bratislavská",
"Bratská",
"Brečtanová",
"Brestová",
"Brezová",
"Brezovská",
"Brežná",
"Bridlicová",
"Briežky",
"Brigádnická",
"Brižitská",
"Brnianska",
"Brodná",
"Brodská",
"Brokolicová",
"Bronzová",
"Broskyňová",
"Bršlenová",
"Brumovická",
"Brusnicová",
"Břeclavská",
"Bučinová",
"Budatínska",
"Budatínska",
"Budatínska",
"Búdkova cesta",
"Budovateľská",
"Budyšínska",
"Budyšínska",
"Bujnáková",
"Buková",
"Bukovinská",
"Bukureštská",
"Bulharská",
"Bulíkova",
"Bullova",
"Burgundská",
"Buzalkova",
"Bystrého",
"Bystrická",
"BzovIcka",
"Cabanova",
"Cablkova",
"Cádrova",
"Cesta mládeže",
"Cesta mládeže",
"Cesta na Červený most",
"Cesta na Červený most",
"Cesta na Kamzík",
"Cesta na Klanec",
"Cesta na Senec",
"Cígeľská",
"Cikkerova",
"Cintorínska",
"Cintulova",
"Colnícka",
"Cukrová",
"Cyklámenová",
"Cyprichova",
"Cyprichova",
"Cyrilova",
"Čachtická",
"Čajakova",
"Čajakova",
"Čajkovského",
"Čakanková",
"Čaklovská",
"Čalovská",
"Čapajevova",
"Čapkova",
"Čárskeho",
"Čavojského",
"Čečinová",
"Čelakovského",
"Čerešňová",
"Černicová",
"Černockého",
"Černockého",
"Černyševského",
"Červená",
"Červeňákova",
"Červeňova",
"Česká",
"Československých par",
"Československých tan",
"Čiernohorská",
"Čiernovodská",
"Čierny chodník",
"Čiližská",
"Čipkárska",
"Čmelíkova",
"Čmeľovec",
"Čremchová",
"Čučoriedková",
"Čulenova",
"Daliborovo námestie",
"Damborského",
"Dankovského",
"Dargovská",
"Ďatelinová",
"Daxnerovo námestie",
"Delená",
"Delená cesta",
"Demänovská",
"Desiata",
"Detvianska",
"Devätinová",
"Deviata",
"Devínska cesta",
"Devínska cesta - kam",
"Devínske jazero",
"Dlhá",
"Dlhé diely I.",
"Dlhé diely II.",
"Dlhé diely III.",
"Dneperská",
"Dobrovičova",
"Dobrovičova",
"Dobrovského",
"Dobšinského",
"Dohnalova",
"Dohnányho",
"Doležalova",
"Dolná",
"Dolné Koruny",
"Dolnokorunská",
"Dolnozemská cesta",
"Domašská",
"Domkárska",
"Domové role",
"Donnerova",
"Donovalova",
"Donská",
"Dopravná",
"Dorastenecká",
"Dostojevského rad",
"Dr. Vladimíra Clemen",
"Dražická",
"Drevená",
"Drieňová",
"Drieňová",
"Drieňová",
"Drobného",
"Drotárska cesta",
"Drotárska cesta",
"Drotárska cesta",
"Druhá",
"Druidská",
"Družicová",
"Družobná",
"Družstevná",
"Dubnická",
"Dubová",
"Dúbravčická",
"Dúbravská cesta",
"Dudova",
"Dudvážska",
"Dulovo námestie",
"Dulovo námestie",
"Ďumbierska",
"Dunajská",
"Ďurgalova",
"Dvanásta",
"Dvojkrížna",
"Dvojkrížna",
"Dvořákovo nábrežie",
"Edisonova",
"Egrešová",
"Einsteinova",
"Eisnerova",
"Elektrárenská",
"Estónska",
"Estónska",
"Exnárova",
"F. Kostku",
"Fadruszova",
"Fajnorovo nábrežie",
"Fándlyho",
"Farebná",
"Farská",
"Farského",
"Fazuľová",
"Fedákova",
"Fedinova",
"Ferienčíkova",
"Fialkové údolie",
"Fibichova",
"Fikusová",
"Filiálne nádražie",
"Fláviovská",
"Flöglova",
"Floriánske námestie",
"Fraňa Kráľa",
"Francisciho",
"Francúzskych partizá",
"Frankovská",
"Františkánska",
"Františkánske námest",
"Františka Schmuckera",
"Furdekova",
"Furdekova",
"Furmanská",
"Furmintská",
"Gabčíkova",
"Gagarinova",
"Gagarinova",
"Gagarinova",
"Gajarská",
"Gajc",
"Gajova",
"Galaktická",
"Galandova",
"Galbavého",
"Gallayova",
"Gallova",
"Galvaniho",
"Gašparíkova",
"Gaštanová",
"Gavlovičova",
"Gbelská",
"Gelnická",
"Gemerská",
"Geologická",
"Georgínová",
"Gercenova",
"Gerulatská",
"Gessayova",
"Gettingová",
"Glavica",
"Godrova",
"Gogoľova",
"Goláňova",
"Gondova",
"Goralská",
"Gorazdova",
"Gorkého",
"Gregorovej",
"Gronárska",
"Grösslingova",
"Gruzínska",
"Gunduličova",
"Guothova",
"Gusevova",
"Haanova",
"Haburská",
"Hadia cesta",
"Hadriánová",
"Hagarova",
"Hagarova",
"Hájová",
"Halašova",
"Hálkova",
"Hálova",
"Hamuliakova",
"Hanácka",
"Handlovská",
"Hanulova",
"Hanulova",
"Hany Meličkovej",
"Hargašova",
"Harmanecká",
"Harmincova",
"Hasičská",
"Hattalova",
"Havelkova",
"Havlíčkova",
"Havrania",
"Haydnova",
"Hečkova",
"Herlianska",
"Herlianska",
"Heydukova",
"Heyrovského",
"Hlaváčikova",
"Hlavatého",
"Hlavná",
"Hlavné námestie",
"Hlbinná",
"Hlboká cesta",
"Hlboká cesta",
"Hlinická",
"Hlinická",
"Hlivová",
"Hlohová",
"Hlučínska",
"Hnilecká",
"Hodálova",
"Hodonínska",
"Hodonínska",
"Hodonínska",
"Hodžovo námestie",
"Holekova",
"Holíčska",
"Hollého",
"Holubyho",
"Homolova",
"Hontianska",
"Horárska",
"Horcová",
"Horčičná",
"Horná",
"Horná Vančurová",
"Hornádska",
"Horné Židiny",
"Horská",
"Horská",
"Horská",
"Hospodárska",
"Hrabový chodník",
"Hrad",
"Hradištná",
"Hradná",
"Hradné údolie",
"Hradská",
"Hrachová",
"Hraničiarska",
"Hraničná",
"Hraničný priechod-Ču",
"Hrdličkova",
"Hrebendova",
"Hríbová",
"Hriňovská",
"Hrobákova",
"Hrobárska",
"Hroboňova",
"Hronska",
"Hroznová",
"Hrušková",
"Hrušovská",
"Hubeného",
"Hubeného",
"Hudecova",
"Humenské námestie",
"Hummelova",
"Hurbanovo námestie",
"Hurbanovo námestie",
"Husova",
"Húščavova",
"Hutnícka",
"Hviezdna",
"Hviezdicová",
"Hviezdoslavova",
"Hviezdoslavovo námes",
"Hyacintová",
"Hybešova",
"Hydinárska",
"Hýrošova",
"Chalupkova",
"Charkovská",
"Chemická",
"Chladná",
"Chlumeckého",
"Chmeľová",
"Chorvátska",
"Chorvátska",
"Chotárna",
"Chrasťová",
"Chrenová",
"Chrobákova",
"Ihličnatá",
"Ihrisková",
"Iľjušinova",
"Ilkovičova",
"Ílová",
"Ilýrska",
"Imelová",
"Inovecká",
"Inovecká",
"Ipeľská",
"Irisová",
"Irkutská",
"Iršajská",
"Iskerníková",
"Istrijská",
"Ivana Blazeviča",
"Ivana Bukovčana",
"Ivana Horvátha",
"Ivánska cesta",
"J.C.Hronského",
"Jabloňová",
"Jačmenná",
"Jadranská",
"Jadrová",
"Jahodová",
"Jakabova",
"Jakubíkova",
"Jakubovo námestie",
"Jakubská",
"Jalovcová",
"Jamnického",
"Jána Jonáša",
"Jána Poničana",
"Jána Raka",
"Jána Smreka",
"Jána Stanislava",
"Janáčkova",
"Jančova",
"Janíkove role",
"Janka Kráľa",
"Jankolova",
"Jánošíkova",
"Jánoškova",
"Janotova",
"Janšákova",
"Jantárová",
"Jantárová",
"Jantárová cesta",
"Jarabinková",
"Jarná",
"Jaroslavova",
"Jarošova",
"Jasencová",
"Jaseňová",
"Jaskový rad",
"Jasná",
"Jasovská",
"Jastrabia",
"Jašíkova",
"Javorinská",
"Javorová",
"Jazdecká",
"Jazerná",
"Jazmínová",
"Jedenásta",
"Jedlíkova",
"Jedľová",
"Jégého",
"Jegeneš",
"Jelačičova",
"Jelenia",
"Jelšová",
"Jeséniova",
"Jesenná",
"Jesenského",
"Jesienková",
"Jiráskova",
"Jiskrova",
"Jókaiho",
"Jozefa Mikisitsa",
"Jozefa Vachovského",
"Jozefská",
"Júlová",
"Junácka",
"Jungmannova",
"Júnová",
"Jurigovo námestie",
"Jurkovičova",
"Jurovského",
"Jurská",
"Justičná",
"K horárskej studni",
"K lomu",
"K pasienkom",
"K Železnej studienke",
"Kadnárova",
"Kadnárova",
"Kadnárova",
"Kadnárova",
"Kadnárova",
"Kafendova",
"Kalinčiakova",
"Kalinová",
"Kalištná",
"Kaméliová",
"Kamenárska",
"Kamenné námestie",
"Kamilková",
"Kamilková",
"Kamzík",
"Kapicova",
"Kapitulská",
"Kapitulský dvor",
"Kaplinská",
"Kapucínska",
"Kapušianska",
"Karadžičova",
"Karadžičova",
"Karadžičova",
"Karadžičova",
"Karloveská",
"Karloveské rameno",
"Karpatská",
"Karpatské námestie",
"Kašmírska",
"Kaštielska",
"Kataríny Brúderovej",
"Kaukazská",
"Kazanská",
"Kazanská",
"Kazanská",
"Keltská",
"Kempelenova",
"Ketelec",
"Kežmarské námestie",
"Kladnianska",
"Klariská",
"Klásková",
"Kláštorská",
"Klatovská",
"Klatovská",
"Klemensova",
"Klenová",
"Klimkovičova",
"Klincová",
"Klobučnícka",
"Klokočova",
"Kľukatá",
"Kĺzavá",
"Kmeťovo námestie",
"Knižková dolina",
"Koceľova",
"Kočánkova",
"Kohútova",
"Koľajná",
"Kolárska",
"Kolískova",
"Kollárova",
"Kollárovo námestie",
"Kollárovo námestie",
"Kolmá",
"Komárňanská",
"Komárnická",
"Komárnická",
"Komárovská",
"Komenského námestie",
"Kominárska",
"Komonicová",
"Koncová",
"Koniarkova",
"Konopná",
"Konvalinková",
"Konventná",
"Kopanice",
"Kopčianska",
"Koperníkova",
"Koprivnická",
"Koprivnická",
"Koprivnická",
"Korabinského",
"Kórejská",
"Koreničova",
"Koreňová",
"Korunská",
"Korytnická",
"Kosatcová",
"Kosodrevinová",
"Kostlivého",
"Kostolná",
"Košická",
"Košická",
"Košická",
"Kovácsova",
"Kováčska",
"Kovorobotnícka",
"Kovová",
"Kozia",
"Koziarka",
"Kozičova",
"Kozmonautická",
"Kožušnícka",
"Kôprová",
"Kôstková",
"Krahulčia",
"Krajinská",
"Krajinská cesta",
"Krajná",
"Krakovská",
"Kráľovské údolie",
"Krasinského",
"Kraskova",
"Krásna",
"Krásnohorská",
"Krasovského",
"Kratiny",
"Krátka",
"Krčméryho",
"Kremeľská",
"Kremencová",
"Kremnická",
"Kresánkova",
"Kríková",
"Krivá",
"Križkova",
"Krížna",
"Krížna",
"Krížna",
"Krížna",
"Krmanova",
"Krokusová",
"Krompašská",
"Krupinská",
"Kubačova",
"Kubániho",
"Kubínska",
"Kudlákova",
"Kuklovská",
"Kúkoľová",
"Kukučínova",
"Kukuričná",
"Kulíškova",
"Kultúrna",
"Kuneradská",
"Kupeckého",
"Kúpeľná",
"Kurucova",
"Kutlíkova",
"Kútska",
"Kutuzovova",
"Kuzmányho",
"Kvačalova",
"Kvetinárska",
"Kvetná",
"Kýčerského",
"Kyjevská",
"Kysucká",
"Laborecká",
"Lackova",
"Ladislava Batthyányh",
"Ladislava Dérera",
"Ladislava Sáru",
"Ľadová",
"Ladzianskeho",
"Lachova",
"Ľaliová",
"Lamačská cesta",
"Lamačská cesta",
"Lamačská cesta",
"Lamanského",
"Landauova",
"Landererova",
"Langsfeldova",
"Ľanová",
"Laskomerského",
"Laténská",
"Latorická",
"Laučekova",
"Laurinská",
"Lazaretská",
"Lazaretská",
"Leánska",
"Lediny",
"Legerského",
"Legionárska",
"Legionárska",
"Lehotského",
"Lehotského",
"Leknová",
"Lenardova",
"Lermontovova",
"Lesná",
"Lesnícka",
"Leškova",
"Letecká",
"Letisko M.R.Štefánik",
"Letná",
"Levanduľová",
"Levárska",
"Levická",
"Levočská",
"Lidická",
"Lieskovec",
"Lieskovcová",
"Lieskovská cesta",
"Lietavská",
"Lichardova",
"Likavská",
"Limbová",
"Linzbothova",
"Lipnicová",
"Lipová",
"Lipského",
"Liptovská",
"Lisovňa",
"Listová",
"Líščie nivy",
"Líščie údolie",
"Litovská",
"Lodná",
"Lombardiniho",
"Lomnická",
"Lomonosovova",
"Longobardská",
"Lónyaiová",
"Lopenícka",
"Lotyšská",
"Lovinského",
"Lozornianská",
"Ľubietovská",
"Ľubinská",
"Ľubľanská",
"Ľubochnianska",
"Ľubovnianska",
"Ľubovníková",
"Ľudové námestie",
"Ľudovíta Fullu",
"Luhačovická",
"Lužická",
"Lúčna",
"Lužná",
"Lýcejná",
"Lykovcová",
"Lysákova",
"M. Hella",
"Madáchova",
"Maďarská",
"Magnetová",
"Magnezitová",
"Magnóliová",
"Magurská",
"Macharova",
"Máchova",
"Majakovského",
"Majerníkova",
"Majerská",
"Májkova",
"Majoránová",
"Májová",
"Maková",
"Makovického",
"Malá",
"Malagová",
"Malé pálenisko",
"Malinová",
"Malodunajská",
"Malokarpatské námest",
"Malý Draždiak",
"Malý trh",
"Mamateyova",
"Mamateyova",
"Mandľová",
"Mandľovníková",
"Mánesovo námestie",
"Margarétková",
"Marhuľová",
"Mariánska",
"Marie Curie-Sklodows",
"Márie Medveďovej",
"Markova",
"Marótyho",
"Martákovej",
"Martinčekova",
"Martinčekova",
"Martinengova",
"Martinská",
"Mateja Bela",
"Matejkova",
"Matičná",
"Mätová",
"Matúškova",
"Matúšova",
"Mečíkova",
"Medená",
"Medová",
"Medovková",
"Medzierka",
"Medzilaborecká",
"Mesačná",
"Mestská",
"Meteorová",
"Metodova",
"Mickiewiczova",
"Mierová",
"Michalská",
"Mikovíniho",
"Mikulášska",
"Milana Marečka",
"Milana Pišúta",
"Miletičova",
"Miletičova",
"Mišíkova",
"Mišíkova",
"Mišíkova",
"Mládežnícka",
"Mliekárenská",
"Mlynarovičova",
"Mlynská",
"Mlynská dolina",
"Mlynská dolina",
"Mlynská dolina",
"Mlynské luhy",
"Mlynské nivy",
"Mlynské nivy",
"Mlynské nivy",
"Mlynské nivy",
"Mlynské nivy",
"Modranská",
"Modricová",
"Modrý chodník",
"Mojmírova",
"Mokráň záhon",
"Mokrohájska cesta",
"Moldavská",
"Molecova",
"Monardová",
"Morava",
"Moravská",
"Morušova",
"Moskovská",
"Most SNP",
"Mostná",
"Mostová",
"Mošovského",
"Motýlia",
"Moyšova",
"Moyzesova",
"Mozartova",
"Mramorová",
"Mraziarenská",
"Mrázova",
"Mudrochova",
"Mudroňova",
"Mudroňova",
"Mudroňova",
"Muchovo námestie",
"Muránska",
"Murgašova",
"Murnice",
"Muškátová",
"Muštová",
"Múzejná",
"Myjavská",
"Mýtna",
"Mýtna",
"Na Baránku",
"Na barine",
"Na Brezinách",
"Na doline",
"Na grbe",
"Na Grunte",
"Na Holom vrchu",
"Na hrádzi",
"Na Hrebienku",
"Na hriadkach",
"Na Kalvárii",
"Na kaštieli",
"Na kopci",
"Na križovatkách",
"Na lánoch",
"Na medzi",
"Na mýte",
"Na pántoch",
"Na pasekách",
"Na paši",
"Na pažiti",
"Na piesku",
"Na Revíne",
"Na Riviére",
"Na rozhliadke",
"Na Sitine",
"Na skale",
"Na Slanci",
"Na Slavíne",
"Na spojke",
"Na stráni",
"Na Štyridsiatku",
"Na úvrati",
"Na varte",
"Na Vlkovkách",
"Na vrátkach",
"Na vŕšku",
"Na vyhliadke",
"Na výslní",
"Na Zlatej nohe",
"Nábělkova",
"Nábrežie arm. gen. L",
"Nábrežná",
"Nad Dunajom",
"Nad Gronárom",
"Nad jazierkom",
"Nad kúriou",
"Nad lomom",
"Nad lúčkami",
"Nad lúčkami",
"Nad ostrovom",
"Nad Sihoťou",
"Nákovná",
"Nákupná",
"Námestie 1. mája",
"Námestie 6. apríla",
"Námestie Alexandra D",
"Námestie Andreja Hli",
"Námestie Biely kríž",
"Námestie Hraničiarov",
"Námestie Jána Kostru",
"Námestie Jána Pavla",
"Námestie Ľudovíta Št",
"Námestie Martina Ben",
"Námestie Rodiny",
"Námestie slobody",
"Námestie slobody",
"Námestie SNP",
"Námestie SNP",
"Námestie sv. Františ",
"Námestie sv. Petra a",
"Narcisová",
"Nedbalova",
"Nechtíková",
"Nejedlého",
"Nekrasovova",
"Nemčíkova",
"Nerudova",
"Nevädzová",
"Nevská",
"Nezábudková",
"Nezvalova",
"Niťová",
"Nitrianska",
"Nížinná",
"Nobelova",
"Nobelovo námestie",
"Nová",
"Nová Bellova",
"Nová hora",
"Novackého",
"Nové pálenisko",
"Nové záhrady I",
"Nové záhrady II",
"Nové záhrady III",
"Nové záhrady IV",
"Nové záhrady V",
"Nové záhrady VI",
"Nové záhrady VII",
"Novinárska",
"Novobanská",
"Novodvorská",
"Novohorská",
"Novohradská",
"Novosadná",
"Novosvetská",
"Novosvetská",
"Novosvetská",
"Novoveská",
"Nový záhon",
"Obežná",
"Obchodná",
"Oblačná",
"Oblúková",
"Očovská",
"Odbojárov",
"Odborárska",
"Odborárske námestie",
"Odborárske námestie",
"Odeská",
"Ohnicová",
"Okánikova",
"Okružná",
"Olbrachtova",
"Oleandrová",
"Olejkárska",
"Olivová",
"Olšová",
"Ondavská",
"Ondrejovova",
"Ondrejská",
"Opavská",
"Opletalova",
"Oráčska",
"Oravská",
"Orechová",
"Orechová cesta",
"Orechový rad",
"Orenburská",
"Orgovánová",
"Orchideová",
"Oriešková",
"Ormisova",
"Osadná",
"Osiková",
"Oskorušová",
"Osloboditeľská",
"Ostravská",
"Ostredková",
"Ostružinová",
"Osuského",
"Osvetová",
"Otonelská",
"Ovčiarska",
"Ovocná",
"Ovručská",
"Ovsená",
"Ovsištské námestie",
"Ožvoldíkova",
"Ôsma",
"Pajštúnska",
"Palackého",
"Palárikova",
"Palárikova",
"Palinová",
"Palisády",
"Palisády",
"Palisády",
"Palkovičova",
"Palmová",
"Panenská",
"Pankúchova",
"Panónska cesta",
"Panská",
"Papánkovo námestie",
"Papraďová",
"Parcelná",
"Páričkova",
"Parková",
"Partizánska",
"Pasienková",
"Pasienky",
"Pastierska",
"Paulínyho",
"Pave Vukoviča",
"Pavla Blaha",
"Pavla Horova",
"Pavlovičova",
"Pavlovova",
"Pavlovská",
"Pažického",
"Pažítková",
"Pečnianska",
"Pekná cesta",
"Pekná cesta",
"Pekná cesta",
"Pekná vyhliadka",
"Pekníkova",
"Pernecká",
"Perličková",
"Pestovateľská",
"Petara Pasicha",
"Peterská",
"Petöfiho",
"Petržalská",
"Petúniová",
"Pezinská",
"Piata",
"Pieskovcová",
"Piesočná",
"Piešťanská",
"Pifflova",
"Pilárikova",
"Pílová",
"Píniová",
"Pionierska",
"Pionierska",
"Pivoňková",
"Plachého",
"Plachého",
"Planckova",
"Planét",
"Plánky",
"Platanová",
"Plátenícka",
"Plavecká",
"Plickova",
"Pluhová",
"Plynárenská",
"Plzenská",
"Pobrežná",
"Pod agátmi",
"Pod Bôrikom",
"Pod brehmi",
"Pod gaštanmi",
"Pod Kalváriou",
"Pod Klepáčom",
"Pod Kobylou",
"Pod Krásnou hôrkou",
"Pod lesom",
"Pod lipami",
"Pod Lipovým",
"Pod násypom",
"Pod Rovnicami",
"Pod skalou",
"Pod srdcom",
"Pod Strážami",
"Pod Vachmajstrom",
"Pod Válkom",
"Pod vinicami",
"Pod záhradami",
"Pod záhradami",
"Pod Zečákom",
"Podbeľová",
"Podbrezovská",
"Podháj",
"Podhorská",
"Podhorského",
"Podjavorinskej",
"Podkarpatská",
"Podkerepušky",
"Podkolibská",
"Podkorunská",
"Podlesná",
"Podlučinského",
"Podniková",
"Podpriehradná",
"Podtatranského",
"Podunajská",
"Podunajská",
"Podzáhradná",
"Pohánková",
"Pohraničníkov",
"Pohronská",
"Polárna",
"Polianky",
"Poľná",
"Poľnohospodárska",
"Poľný mlyn",
"Poloreckého",
"Poľská",
"Poludníková",
"Poniklecová",
"Popolná",
"Popovova",
"Popradská",
"Porubského",
"Poštová",
"Potočná",
"Považanova",
"Považská",
"Povoznícka",
"Povraznícka",
"Povraznícka",
"Požiarnická",
"Pračanská",
"Prasličková",
"Pražská",
"Pražská",
"Predstaničné námesti",
"Prepoštská",
"Prešernova",
"Prešovská",
"Prešovská",
"Prešovská",
"Pri Bielom kríži",
"Pri dvore",
"Pri Dynamitke",
"Pri Habánskom mlyne",
"Pri hradnej studni",
"Pri hrádzi",
"Pri kolíske",
"Pri kríži",
"Pri mlyne",
"Pri Rochu",
"Pri seči",
"Pri Starej Prachárni",
"Pri Starom háji",
"Pri starom letisku",
"Pri Starom Mýte",
"Pri strelnici",
"Pri Struhe",
"Pri Suchom mlyne",
"Pri Šajbách",
"Pri tehelni",
"Pri trati",
"Pri vinohradoch",
"Pri zvonici",
"Priama cesta",
"Pribylinská",
"Pribinova",
"Pribinova",
"Pribinova",
"Pribišova",
"Prídanky",
"Prídavková",
"Priečna",
"Priehradná",
"Priekopnícka",
"Priekopy",
"Priemyselná",
"Priemyselná",
"Prievozská",
"Prievozská",
"Prievozská",
"Príjazdná",
"Príkopova",
"Primaciálne námestie",
"Prímoravská",
"Prípojná",
"Prístav",
"Prístavná",
"Prokofievova",
"Prokopa Veľkého",
"Prokopova",
"Prúdová",
"Prvá",
"Prvosienková",
"Pšeničná",
"Púchovská",
"Púpavová",
"Pustá",
"Puškinova",
"Pútnická",
"Pyrenejská",
"Rácova",
"Račianska",
"Račianska",
"Račianska",
"Račianska",
"Račianska",
"Račianska",
"Račianske mýto",
"Radarová",
"Rádiová",
"Radlinského",
"Radničná",
"Radničné námestie",
"Radvanská",
"Rajčianska",
"Rajecká",
"Rajská",
"Rajtákova",
"Raketová",
"Rákosová",
"Rascová",
"Rascová",
"Rastislavova",
"Rastlinná",
"Rašelinová",
"Ráztočná",
"Rázusovo nábrežie",
"Ražná",
"Rebarborová",
"Regrútska",
"Remeselnícka",
"Repašského",
"Repíková",
"Repná",
"Rešetkova",
"Revolučná",
"Révová",
"Revúcka",
"Rezedová",
"Riazanská",
"Riazanská",
"Ribayová",
"Ríbezľová",
"Riečna",
"Rigeleho",
"Rímska",
"Rízlingová",
"Riznerova",
"Robotnícka",
"Roľnícka",
"Romanova",
"Röntgenova",
"Rosná",
"Rostovská",
"Rošického",
"Rovná",
"Rovniankova",
"Rovníková",
"Royova",
"Rozálska",
"Rozmarínová",
"Rozvodná",
"Rožňavská",
"Rožňavská",
"Rožňavská",
"Rubínová",
"Rubinsteinova",
"Rudnayovo námestie",
"Rudnícka",
"Rulandská",
"Rumančeková",
"Rumunská",
"Rusovce",
"Rusovská cesta",
"Rustaveliho",
"Ružičková",
"Ružinovská",
"Ružinovská",
"Ružinovská",
"Ružomberská",
"Ružová dolina",
"Ružová dolina",
"Rybárska brána",
"Rybné námestie",
"Rybničná",
"Rybničná",
"Rybničná",
"Rýdziková",
"Rytierska",
"Sabinovská",
"Sabinovská",
"Sad Janka Kráľa",
"Sadmelijská",
"Sadová",
"Samova",
"Saratovská",
"Sartorisova",
"Sasanková",
"Sasinkova",
"Savignonská",
"Seberíniho",
"Sečovská",
"Sedlárska",
"Sedmokrásková",
"Segnáre",
"Segnerova",
"Sekulská",
"Sekurisova",
"Sekýľska",
"Semenárska",
"Semianova",
"Semilonská",
"Senická",
"Senná",
"Septimiova",
"Schengenská",
"Schillerova",
"Schneidera -Trnavské",
"Schody pri starej vo",
"Sibírska",
"Siedma",
"Sienkiewiczova",
"Silvánska",
"Sinokvetná",
"Skalická cesta",
"Skalná",
"Skerličova",
"Sklabinská",
"Sklenárova",
"Sklenárska",
"Skoroceľová",
"Skuteckého",
"Skýcovská",
"Sládkovičova",
"Sladová",
"Slatinská",
"Slávičie údolie",
"Slavín",
"Slepá",
"Sliačska",
"Sliezska",
"Slivková",
"Sĺňavská",
"Slnečná",
"Slnečnicová",
"Slovanské nábrežie",
"Slovienska",
"Slovinec",
"Slovinská",
"Slovnaftská",
"Slovnaftská",
"Slowackého",
"Smetanova",
"Smikova",
"Smolenická",
"Smolnícka",
"Smrečianska",
"Smrečianska",
"Snežienková",
"Soferove schody",
"Socháňova",
"Sochorova",
"Sokolíkova",
"Sokolská",
"Solivarská",
"Sološnická",
"Somolického",
"Somolického",
"Sosnová",
"Sovia",
"Spádová",
"Spätná cesta",
"Spišská",
"Spojná",
"Spoločenská",
"Sputniková",
"Sreznevského",
"Srnčia",
"Stachanovská",
"Stálicová",
"Stanekova",
"Staničná",
"Stará Černicová",
"Stará Ivánska cesta",
"Stará Klenová",
"Stará Prievozská",
"Stará Stupavská",
"Stará Vajnorská",
"Stará vinárska",
"Staré Grunty",
"Staré ihrisko",
"Staré záhrady",
"Starhradská",
"Starohájska",
"Staromestská",
"Staromlynská",
"Starorímska",
"Staroturský chodník",
"Stavbárska",
"Staviteľská",
"Stepná cesta",
"Stodolova",
"Stoklasová",
"Stolárska",
"Strakova",
"Stratená",
"Strážna",
"Strážnická",
"Strážny dom",
"Strečnianska",
"Stredná",
"Strelecká",
"Strelkova",
"Strmá cesta",
"Strmé sady",
"Strmý bok",
"Strmý vŕšok",
"Strojnícka",
"Stromová",
"Stropkovská",
"Struková",
"Studená",
"Studenohorská",
"Stuhová",
"Stupavská",
"Súbežná",
"Sudová",
"Súhvezdná",
"Suchá",
"Suché mýto",
"Suchohradská",
"Súkennícka",
"Súľovská",
"Sumbalova",
"Súmračná",
"Súťažná",
"Svätého Vincenta",
"Svätoplukova",
"Svätoplukova",
"Svätovojtešská",
"Svébska",
"Svetlá",
"Svíbová",
"Svidnícka",
"Svoradova",
"Svrčia",
"Syslia",
"Šafárikovo námestie",
"Šafárikovo námestie",
"Šafránová",
"Šagátova",
"Šachorová",
"Šalátová",
"Šaldova",
"Šalviová",
"Šamorínska",
"Šancová",
"Šancová",
"Šancová",
"Šancová",
"Šándorova",
"Šarišská",
"Šášovská",
"Šaštínska",
"Ševčenkova",
"Šiesta",
"Šikmá",
"Šinkovské",
"Šintavská",
"Šípková",
"Šípová",
"Šíravská",
"Široká",
"Škarniclova",
"Školská",
"Škovránčia",
"Škultétyho",
"Šoltésovej",
"Šošovicová",
"Špieszova",
"Špitálska",
"Športová",
"Šrobárovo námestie",
"Šťastná",
"Štedrá",
"Štefana Králika",
"Štefana Králika",
"Štefana Majera",
"Štefánikova",
"Štefánikova",
"Štefánikova",
"Štefanovičova",
"Štefunkova",
"Štepná",
"Štetinova",
"Štiavnická",
"Štítová",
"Štrbská",
"Štúrova",
"Štvrtá",
"Štyndlova",
"Šulekova",
"Šulekova",
"Šulekova",
"Šumavská",
"Šuňavcova",
"Šúrska",
"Šustekova",
"Šuty",
"Švabinského",
"Švantnerova",
"Tabaková",
"Tablicova",
"Táborská",
"Tajovského",
"Talichova",
"Tallerova",
"Tatranská",
"Tavaríkova osada",
"Tbiliská",
"Tehelná",
"Tehelňa",
"Tehliarska",
"Technická",
"Tekovská",
"Tekvicová",
"Telocvičná",
"Tematínska",
"Teplická",
"Terchovská",
"Teslova",
"Tešedíkova",
"Tetmayerova",
"Thurzova",
"Tibenského",
"Tibériová",
"Tichá",
"Tilgnerova",
"Timravina",
"Tobrucká",
"Tokajícka",
"Tolstého",
"Tománkova",
"Tomanova",
"Tomášikova",
"Tomášikova",
"Tomášikova",
"Tomášikova",
"Tomášikova",
"Toplianska",
"Topoľčianska",
"Topoľová",
"Toryská",
"Továrenská",
"Trajánova",
"Tramínová",
"Tranovského",
"Trávna",
"Trebišovská",
"Trebišovská",
"Trebišovská",
"Trenčianska",
"Treskoňova",
"Tretia",
"Trhová",
"Trinásta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavské mýto",
"Trnková",
"Tŕňová",
"Trojdomy",
"Trojičné námestie",
"Trstínska",
"Tučkova",
"Tuhovská",
"Tulipánová",
"Tupého",
"Tupolevova",
"Turbínova",
"Turčianska",
"Turistická",
"Turnianska",
"Tvarožkova",
"Tylova",
"Tymiánová",
"Tyršovo nábrežie",
"Učiteľská",
"Údernícka",
"Údolná",
"Uhliská",
"Uhorková",
"Uhrova",
"Uhrovecká",
"Ukrajinská",
"Ulica 1. mája",
"Ulica 29. augusta",
"Ulica 29. augusta",
"Ulica 29. augusta",
"Ulica 29. augusta",
"Ulica 8. mája",
"Ulica Alviano",
"Ulica Imricha Karvaš",
"Ulica J. Valašťana D",
"Ulica Janka Alexyho",
"Ulica Jozefa Krónera",
"Ulica Juraja Hronca",
"Ulica Karola Adlera",
"Ulica kpt. Rašu",
"Ulica Leopoldov maje",
"Ulica Ľuda Zúbka",
"Ulica Nad Válkom",
"Ulica padlých hrdino",
"Ulica Pri gaštanovej",
"Ulica Pri pastierni",
"Ulica Pri Vápeníckom",
"Ulica Pri vodnej nád",
"Ulica svornosti",
"Ulica Viktora Tegelh",
"Úprkova",
"Úradnícka",
"Uránová",
"Urbánkova",
"Urbárska",
"Ursínyho",
"Uršulínska",
"Ušiakova",
"Úvozná",
"Uzbecká",
"Úzka",
"Úžiny",
"V záhradách",
"Vajanského nábrežie",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Valachovej",
"Valašská",
"Valchárska",
"Vančurova",
"Vansovej",
"Vápencová",
"Vápenka",
"Vápenná",
"Varínska",
"Varšavská",
"Varšavská",
"Vavilovova",
"Vavrinecká",
"Vavrínova",
"Vazovova",
"Vážska",
"Včelárska",
"Velehradská",
"Veľké Štepnice",
"Veltlínska",
"Vendelínska",
"Ventúrska",
"Veterná",
"Veternicová",
"Vetvárska",
"Vetvová",
"Vidlicová",
"Viedenská cesta",
"Viedenská cesta",
"Viedenská cesta",
"Vietnamská",
"Vígľašská",
"Vihorlatská",
"Viktorínova",
"Vilová",
"Viničná",
"Vínna",
"Vinohradnícka",
"Višňová",
"Víťazná",
"Vlárska",
"Vlastenecké námestie",
"Vlčie hrdlo",
"Vlčkova",
"Vlčkova",
"Vlčkova",
"Vodné elektrárne",
"Vodný vrch",
"Vosková",
"Votrubova",
"Vrábeľská",
"Vrakunská",
"Vrakunská cesta",
"Vrakunská cesta",
"Vrančovičova",
"Vranovská",
"Vrbánska",
"Vrbenského",
"Vŕbová",
"Vresová",
"Vretenová",
"Vrchná",
"Vrútocká",
"Vtáčikova",
"Vtáčnik",
"Vyhliadka",
"Vyhnianska cesta",
"Výhonská",
"Východná",
"Vysoká",
"Vysokohorská",
"Vyšehradská",
"Vyšná",
"Výtvarná",
"Vývojová",
"Wattova",
"Wilsonova",
"Wolkrova",
"Za bránou",
"Za farou",
"Za Kasárňou",
"Za mlynom",
"Za sokolovňou",
"Za Stanicou",
"Za tehelňou",
"Záborského",
"Zadunajská cesta",
"Záhorácka",
"Záhorská",
"Záhradkárska",
"Záhradná",
"Záhradnícka",
"Záhradnícka",
"Záhradnícka",
"Záhradnícka",
"Záhrady",
"Záhrebská",
"Záhrebská",
"Záhumenná",
"Záhumenská",
"Zákutie",
"Zálužická",
"Zámocká",
"Zámocké schody",
"Zámočnícka",
"Západná",
"Západný rad",
"Záporožská",
"Záruby",
"Zátišie",
"Zátureckého",
"Zavadilová",
"Závadská",
"Záveterná",
"Závodná",
"Závodníkova",
"Zbrody",
"Zdravotnícka",
"Zelená",
"Zeleninová",
"Zelenohorská",
"Zelinárska",
"Zhorínska",
"Zidiny",
"Zimná",
"Zlatá",
"Zlaté piesky",
"Zlaté schody",
"Zlatohorská",
"Znievska",
"Zohorská",
"Zochova",
"Zrinského",
"Zvolenská",
"Zvončeková",
"Žabí majer",
"Žabotova",
"Žarnovická",
"Žatevná",
"Žehrianska",
"Železná",
"Železničiarska",
"Železničná",
"Želiarska",
"Žellova",
"Žiacka",
"Žiarska",
"Židovská",
"Žihľavová",
"Žilinská",
"Žilinská",
"Žitavská",
"Žitná",
"Živnostenská",
"Žižkova",
"Žulová",
"Župné námestie",
"Borágova",
"Parenicová",
"Loparová",
"Jegnešská",
"Jonatanová",
"Monardová",
"Perličková",
)
states = (
"Bratislavský kraj",
"Trnavský kraj",
"Trenčiansky kraj",
"Nitriansky kraj",
"Žilinský kraj",
"Banskobystrický kraj",
"Prešovský kraj",
"Košický kraj",
)
countries = (
"Afganistan",
"Afghanistanská islamská republika",
"Ålandy",
"Albánsko",
"Albánska republika",
"Alžírsko",
"Alžírska demokratická ľudová republika",
"Americká Samoa",
"Andorra",
"Andorrské kniežatstvo",
"Angola",
"Angolská republika",
"Anguilla",
"Antarktída",
"Antigua a Barbuda",
"Argentína",
"Argentínska republika",
"Arménsko",
"Arménska republika",
"Aruba",
"Austrália",
"Rakúsko",
"Rakúska republika",
"Azerbajdžan",
"Azerbajdžanská republika",
"Bahamy",
"Bahamské spoločenstvo",
"Bahrajn",
"Bahrajnské kráľovstvo",
"Bangladéš",
"Bangladéšska ľudová republika",
"Barbados",
"Bielorusko",
"Bieloruská republika",
"Belgicko",
"Belgické kráľovstvo",
"Belize",
"Benin",
"Beninská republika",
"Bermudy",
"Bhután",
"Bhutánske kráľovstvo",
"Bolívijská republika",
"Bolívijská republika",
"Bolívia",
"Bosna a Hercegovina",
"Republika Bosny a Hercegoviny",
"Botswana",
"Botswanská republika",
"Bouvetov ostrov",
"Brazília",
"Brazílska federatívna republika",
"Britské indickooceánske územie",
"Brunejsko-darussalamský štát",
"Bulharsko",
"Bulharská republika",
"Burkina Faso",
"Burundi",
"Burundská republika",
"Kambodža",
"Kambodžské kráľovstvo",
"Kamerun",
"Kamerunská republika",
"Kanada",
"Kapverdy",
"Kapverdská republika",
"Kajmanie ostrovy",
"Stredoafrická republika",
"Čad",
"Čadská republika",
"Čile",
"Čilská republika",
"Čína",
"Čínska ľudová republika",
"Vianočný ostrov",
"Kokosové ostrovy",
"Kolumbia",
"Kolumbijská republika",
"Komory",
"Komorský zväz",
"Kongo",
"Konžská republika",
"Konžská demokratická republika",
"Cookove ostrovy",
"Kostarika",
"Kostarická republika",
"Pobrežie Slonoviny",
"Republika Pobrežia Slonoviny",
"Chorvátsko",
"Chorvátska republika",
"Kuba",
"Kubánska republika",
"Cyprus",
"Cyperská republika",
"Česká republika",
"Dánsko",
"Dánske kráľovstvo",
"Džibutsko",
"Džibutská republika",
"Dominika",
"Dominické spoločenstvo",
"Dominikánska republika",
"Ekvádor",
"Ekvádorská republika",
"Egypt",
"Egyptská arabská republika",
"Salvádor",
"Salvádorská republika",
"Rovníková Guinea",
"Republika Rovníkovej Guiney",
"Eritrea",
"Estónsko",
"Estónska republika",
"Etiópia",
"Etiópska federatívna demokratická republika",
"Falklandy (Malvíny)",
"Faerské ostrovy",
"Fidži",
"Fínsko",
"Fínska republika",
"Francúzsko",
"Francúzska republika",
"Francúzska Guyana",
"Francúzska Polynézia",
"Francúzske južné a antarktické územia",
"Gabon",
"Gabonská republika",
"Gambia",
"Gambijská republika",
"Gruzínsko",
"Nemecko",
"Nemecká spolková republika",
"Ghana",
"Ghanská republika",
"Gibraltár",
"Grécko",
"Grécka republika",
"Grónsko",
"Grenada",
"Guadeloupe",
"Guam",
"Guatemala",
"Guatemalská republika",
"Guernsey",
"Guinea",
"Guinejská republika",
"Guinea-Bissau",
"Guinejsko-bissauská republika",
"Guyana",
"Guyanská kooperatívna republika",
"Haiti",
"Haitská republika",
"Heardov ostrov",
"Svätá stolica (Vatikánsky mestský štát)",
"Honduras",
"Honduraská republika",
"Hongkong",
"Osobitná administratívna oblasť Číny Hongkong",
"Maďarsko",
"Maďarská republika",
"Island",
"Islandská republika",
"India",
"Indická republika",
"Indonézia",
"Indonézska republika",
"Iránska islamská republika",
"Iránska islamská republika",
"Irak",
"Iracká republika",
"Írsko",
"Man",
"Izrael",
"Izraelský štát",
"Taliansko",
"Talianska republika",
"Jamajka",
"Japonsko",
"Jersey",
"Jordánsko",
"Jordánske hášimovské kráľovstvo",
"Kazachstan",
"Kazašská republika",
"Keňa",
"Kenská republika",
"Kiribati",
"Kiribatská republika",
"Kórejská ľudovodemokratická republika",
"Kórejská ľudovodemokratická republika",
"Kórejská republika",
"Kuvajt",
"Kuvajtský štát",
"Kirgizsko",
"Kirgizská republika",
"Laoská ľudovodemokratická republika",
"Lotyšsko",
"Lotyšská republika",
"Libanon",
"Libanonská republika",
"Lesotho",
"Lesothské kráľovstvo",
"Libéria",
"Libérijská republika",
"Líbya",
"Lichtenštajnsko",
"Lichtenštajnské kniežatstvo",
"Litva",
"Litovská republika",
"Luxembursko",
"Luxemburské veľkovojvodstvo",
"Macao",
"Osobitná administratívna oblasť Číny Macao",
"Macedónska republika",
"Bývalá juhoslovanská republika Macedónsko",
"Madagaskar",
"Madagaskarská republika",
"Malawi",
"Malawijská republika",
"Malajzia",
"Maldivy",
"Maldivská republika",
"Mali",
"Malijská republika",
"Malta",
"Maltská republika",
"Marshallove ostrovy",
"Republika Marshallových ostrovov",
"Martinik",
"Mauritánia",
"Mauritánska islamská republika",
"Maurícius",
"Maurícijská republika",
"Mayotte",
"Mexiko",
"Spojené štáty mexické",
"Mikronézske federatívne štáty",
"Mikronézske federatívne štáty",
"Moldavská republika",
"Moldavská republika",
"Moldavsko",
"Monako",
"Monacké kniežatstvo",
"Mongolsko",
"Čierna Hora",
"Montserrat",
"Maroko",
"Marocké kráľovstvo",
"Mozambik",
"Mozambická republika",
"Mjanmarsko",
"Namíbia",
"Namíbijská republika",
"Nauru",
"Nauruská republika",
"Nepál",
"Nepálska federatívna demokratická republika",
"Holandsko",
"Holandské kráľovstvo",
"Nová Kaledónia",
"Nový Zéland",
"Nikaragua",
"Nikaragujská republika",
"Niger",
"Nigerská republika",
"Nigéria",
"Nigérijská federatívna republika",
"Niue",
"Norfolk",
"Severné Mariány",
"Spoločenstvo Severných Marián",
"Nórsko",
"Nórske kráľovstvo",
"Omán",
"Ománsky sultanát",
"Pakistan",
"Pakistanská islamská republika",
"Palau",
"Palauská republika",
"palestínske územie, Okupované",
"Okupované palestínske územie",
"Panama",
"Panamská republika",
"Papua - Nová Guinea",
"Paraguaj",
"Paraguajská republika",
"Peru",
"Peruánska republika",
"Filipíny",
"Filipínska republika",
"Pitcairnove ostrovy",
"Poľsko",
"Poľská republika",
"Portugalsko",
"Portugalská republika",
"Portoriko",
"Katar",
"Katarský štát",
"Réunion",
"Rumunsko",
"Ruská federácia",
"Rwanda",
"Rwandská republika",
"Svätý Bartolomej",
"Svätá Helena, Ascension a Tristan da Cunha",
"Svätý Krištof a Nevis",
"Svätá Lucia",
"Saint Martin",
"Saint Pierre a Miquelon",
"Svätý Vincent a Grenadíny",
"Samoa",
"Samojský nezávislý štát",
"San Maríno",
"Sanmarínska republika",
"Svätý Tomáš a Princov ostrov",
"Demokratická republika Svätého Tomáša a Princovho ostrova",
"Saudská Arábia",
"Saudskoarabské kráľovstvo",
"Senegal",
"Senegalská republika",
"Srbsko",
"Srbská republika",
"Seychely",
"Seychelská republika",
"Sierra Leone",
"Sierraleonská republika",
"Singapur",
"Singapurská republika",
"Slovensko",
"Slovenská republika",
"Slovinsko",
"Slovinská republika",
"Šalamúnove ostrovy",
"Somálsko",
"Somálska republika",
"Južná Afrika",
"Juhoafrická republika",
"Južná Georgia a Južné Sandwichove ostrovy",
"Španielsko",
"Španielske kráľovstvo",
"Srí Lanka",
"Srílanská demokratická socialistická republika",
"Sudán",
"Sudánska republika",
"Surinam",
"Surinamská republika",
"Svalbard a Jan Mayen",
"Svazijsko",
"Svazijské kráľovstvo",
"Švédsko",
"Švédske kráľovstvo",
"Švajčiarsko",
"Švajčiarska konfederácia",
"Sýrska arabská republika",
"Taiwan, provincia Číny",
"Taiwan",
"Tadžikistan",
"Tadžická republika",
"Tanzánijská zjednotená republika",
"Tanzánijská zjednotená republika",
"Thajsko",
"Thajské kráľovstvo",
"Východný Timor",
"Východotimorská demokratická republika",
"Togo",
"Togská republika",
"Tokelau",
"Tonga",
"Tongské kráľovstvo",
"Trinidad a Tobago",
"Republika Trinidadu a Tobaga",
"Tunisko",
"Tuniská republika",
"Turecko",
"Turecká republika",
"Turkménsko",
"Ostrovy Turks a Caicos",
"Tuvalu",
"Uganda",
"Ugandská republika",
"Ukrajina",
"Spojené arabské emiráty",
"Spojené kráľovstvo",
"Spojené kráľovstvo Veľkej Británie a Severného Írska",
"Spojené štáty",
"Spojené štáty americké",
"Menšie odľahlé ostrovy Spojených štátov",
"Uruguaj",
"Uruguajská východná republika",
"Uzbekistan",
"Uzbecká republika",
"Vanuatu",
"Vanuatská republika",
"Venezuelská bolívarovská republika",
"Venezuela",
"Vietnam",
"Vietnamská socialistická republika",
"Panenské ostrovy, Britské",
"Britské Panenské ostrovy",
"Panenské ostrovy, Americké",
"Panenské ostrovy Spojených štátov",
"Wallis a Futuna",
"Západná Sahara",
"Jemen",
"Jemenská republika",
"Zambia",
"Zambijská republika",
"Zimbabwe",
"Zimbabwianska republika",
"Britské antarktické územie",
"Socialistická republika Barmský zväz",
"Bieloruská sovietska socialistická republika",
"ostrovy Canton a Enderbury",
"Československo, Československá socialistická republika",
"Dahome",
"Zem kráľovnej Maud",
"Východný Timor",
"Metropolitné Francúzsko",
"Francúzske pobrežie Afarov a Isasov",
"Francúzske južné a antarktické územia",
"Nemecká demokratická republika",
"Nemecká spolková republika",
"Gilbertove a lagúnové ostrovy",
"Johnston",
"Midwajské ostrovy",
"Holandské Antily",
"neutrálne pôdy",
"Nové Hebridy",
"Poručnícke územie tichomorských ostrovov",
"Panamská republika",
"Panamské prieplavové pásmo",
"Rumunská socialistická republika",
"Svätý Krištof",
"Srbsko a Čierna Hora",
"Sikkim",
"Rodézia",
"Španielska Sahara",
"Tichomorské ostrovy pod správou USA",
"ZSSR, Zväz sovietskych socialistických republík",
"Republika Horná Volta",
"Vatikánsky mestský štát (Svätá stolica)",
"Vietnamská demokratická republika",
"Wake",
"Jemenská ľudovodemokratická republika",
"Jemenská arabská republika",
"Socialistická federatívna republika Juhoslávia",
"Zairská republika",
)
def street_suffix_short(self) -> str:
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self) -> str:
return self.random_element(self.street_suffixes_long)
def city_name(self) -> str:
return self.random_element(self.cities)
def street_name(self) -> str:
return self.random_element(self.streets)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def city_with_postcode(self) -> str:
return self.postcode() + " " + self.random_element(self.cities)
| mit | 43cac81a07acb2df43813b312d237649 | 21.062096 | 71 | 0.440183 | 2.484209 | false | false | false | false |
joke2k/faker | faker/providers/address/es_CL/__init__.py | 1 | 19951 | from collections import OrderedDict
from typing import Dict, Tuple
from ... import ElementsType
from ..es import Provider as AddressProvider
class Provider(AddressProvider):
# Source for regions, provinces and communes
# https://www.subdere.gov.cl/documentacion/c%C3%B3digos-%C3%BAnicos-
# territoriales-actualizados-al-06-de-septiembre-2018
regions: Dict[str, str] = {
"TA": "Región de Tarapacá",
"AN": "Región de Antofagasta",
"AT": "Región de Atacama",
"CO": "Región de Coquimbo",
"VA": "Región de Valparaíso",
"LI": "Región del Libertador General Bernardo O'Higgins",
"ML": "Región del Maule",
"BI": "Región del Biobío",
"AR": "Región de La Araucanía",
"LL": "Región de Los Lagos",
"AI": "Región de Aysén del General Carlos Ibáñez del Campo",
"MA": "Región de Magallanes y de la Antártica Chilena",
"RM": "Región Metropolitana",
"LR": "Región de Los Ríos",
"AP": "Región de Arica y Parinacota",
"NB": "Región de Ñuble",
}
provinces: Dict[str, str] = {
"011": "Iquique",
"014": "Tamarugal",
"021": "Antofagasta",
"022": "El Loa",
"023": "Tocopilla",
"031": "Copiapó",
"032": "Chañaral",
"033": "Huasco",
"041": "Elqui",
"042": "Choapa",
"043": "Limarí",
"051": "Valparaíso",
"052": "Isla de Pascua",
"053": "Los Andes",
"054": "Petorca",
"055": "Quillota",
"056": "San Antonio",
"057": "San Felipe de Aconcagua",
"058": "Marga Marga",
"061": "Cachapoal",
"062": "Cardenal Caro",
"063": "Colchagua",
"071": "Talca",
"072": "Cauquenes",
"073": "Curicó",
"074": "Linares",
"081": "Concepción",
"082": "Arauco",
"083": "Biobío",
"091": "Cautín",
"092": "Malleco",
"101": "Llanquihue",
"102": "Chiloé",
"103": "Osorno",
"104": "Palena",
"111": "Coyhaique",
"112": "Aysén",
"113": "Capitán Prat",
"114": "General Carrera",
"121": "Magallanes",
"122": "Antártica Chilena",
"123": "Tierra del Fuego",
"124": "Última Esperanza",
"131": "Santiago",
"132": "Cordillera",
"133": "Chacabuco",
"134": "Maipo",
"135": "Melipilla",
"136": "Talagante",
"141": "Valdivia",
"142": "Ranco",
"151": "Arica",
"152": "Parinacota",
"161": "Diguillín",
"162": "Itata",
"163": "Punilla",
}
communes: Dict[str, str] = {
"15101": "Arica",
"15102": "Camarones",
"15201": "Putre",
"15202": "General Lagos",
"01101": "Iquique",
"01402": "Camiña",
"01403": "Colchane",
"01404": "Huara",
"01405": "Pica",
"01401": "Pozo Almonte",
"01107": "Alto Hospicio",
"02101": "Antofagasta",
"02102": "Mejillones",
"02103": "Sierra Gorda",
"02104": "Taltal",
"02201": "Calama",
"02202": "Ollagüe",
"02203": "San Pedro de Atacama",
"02301": "Tocopilla",
"02302": "María Elena",
"03101": "Copiapó",
"03102": "Caldera",
"03103": "Tierra Amarilla",
"03201": "Chañaral",
"03202": "Diego de Almagro",
"03301": "Vallenar",
"03302": "Alto del Carmen",
"03303": "Freirina",
"03304": "Huasco",
"04101": "La Serena",
"04102": "Coquimbo",
"04103": "Andacollo",
"04104": "La Higuera",
"04105": "Paiguano",
"04106": "Vicuña",
"04201": "Illapel",
"04202": "Canela",
"04203": "Los Vilos",
"04204": "Salamanca",
"04301": "Ovalle",
"04302": "Combarbalá",
"04303": "Monte Patria",
"04304": "Punitaqui",
"04305": "Río Hurtado",
"05101": "Valparaíso",
"05102": "Casablanca",
"05103": "Concón",
"05104": "Juan Fernández",
"05105": "Puchuncaví",
"05801": "Quilpué",
"05107": "Quintero",
"05804": "Villa Alemana",
"05109": "Viña del Mar",
"05201": "Isla de Pascua",
"05301": "Los Andes",
"05302": "Calle Larga",
"05303": "Rinconada",
"05304": "San Esteban",
"05401": "La Ligua",
"05402": "Cabildo",
"05403": "Papudo",
"05404": "Petorca",
"05405": "Zapallar",
"05501": "Quillota",
"05502": "Calera",
"05503": "Hijuelas",
"05504": "La Cruz",
"05802": "Limache",
"05506": "Nogales",
"05803": "Olmué",
"05601": "San Antonio",
"05602": "Algarrobo",
"05603": "Cartagena",
"05604": "El Quisco",
"05605": "El Tabo",
"05606": "Santo Domingo",
"05701": "San Felipe",
"05702": "Catemu",
"05703": "Llaillay",
"05704": "Panquehue",
"05705": "Putaendo",
"05706": "Santa María",
"06101": "Rancagua",
"06102": "Codegua",
"06103": "Coinco",
"06104": "Coltauco",
"06105": "Doñihue",
"06106": "Graneros",
"06107": "Las Cabras",
"06108": "Machalí",
"06109": "Malloa",
"06110": "Mostazal",
"06111": "Olivar",
"06112": "Peumo",
"06113": "Pichidegua",
"06114": "Quinta de Tilcoco",
"06115": "Rengo",
"06116": "Requínoa",
"06117": "San Vicente",
"06201": "Pichilemu",
"06202": "La Estrella",
"06203": "Litueche",
"06204": "Marchihue",
"06205": "Navidad",
"06206": "Paredones",
"06301": "San Fernando",
"06302": "Chépica",
"06303": "Chimbarongo",
"06304": "Lolol",
"06305": "Nancagua",
"06306": "Palmilla",
"06307": "Peralillo",
"06308": "Placilla",
"06309": "Pumanque",
"06310": "Santa Cruz",
"07101": "Talca",
"07102": "Constitución",
"07103": "Curepto",
"07104": "Empedrado",
"07105": "Maule",
"07106": "Pelarco",
"07107": "Pencahue",
"07108": "Río Claro",
"07109": "San Clemente",
"07110": "San Rafael",
"07201": "Cauquenes",
"07202": "Chanco",
"07203": "Pelluhue",
"07301": "Curicó",
"07302": "Hualañé",
"07303": "Licantén",
"07304": "Molina",
"07305": "Rauco",
"07306": "Romeral",
"07307": "Sagrada Familia",
"07308": "Teno",
"07309": "Vichuquén",
"07401": "Linares",
"07402": "Colbún",
"07403": "Longaví",
"07404": "Parral",
"07405": "Retiro",
"07406": "San Javier",
"07407": "Villa Alegre",
"07408": "Yerbas Buenas",
"08101": "Concepción",
"08102": "Coronel",
"08103": "Chiguayante",
"08104": "Florida",
"08105": "Hualqui",
"08106": "Lota",
"08107": "Penco",
"08108": "San Pedro de la Paz",
"08109": "Santa Juana",
"08110": "Talcahuano",
"08111": "Tomé",
"08112": "Hualpén",
"08201": "Lebu",
"08202": "Arauco",
"08203": "Cañete",
"08204": "Contulmo",
"08205": "Curanilahue",
"08206": "Los Álamos",
"08207": "Tirúa",
"08301": "Los Ángeles",
"08302": "Antuco",
"08303": "Cabrero",
"08304": "Laja",
"08305": "Mulchén",
"08306": "Nacimiento",
"08307": "Negrete",
"08308": "Quilaco",
"08309": "Quilleco",
"08310": "San Rosendo",
"08311": "Santa Bárbara",
"08312": "Tucapel",
"08313": "Yumbel",
"08314": "Alto Biobío",
"16101": "Chillán",
"16102": "Bulnes",
"16202": "Cobquecura",
"16203": "Coelemu",
"16302": "Coihueco",
"16103": "Chillán Viejo",
"16104": "El Carmen",
"16204": "Ninhue",
"16303": "Ñiquén",
"16105": "Pemuco",
"16106": "Pinto",
"16205": "Portezuelo",
"16107": "Quillón",
"16201": "Quirihue",
"16206": "Ránquil",
"16301": "San Carlos",
"16304": "San Fabián",
"16108": "San Ignacio",
"16305": "San Nicolás",
"16207": "Treguaco",
"16109": "Yungay",
"09101": "Temuco",
"09102": "Carahue",
"09103": "Cunco",
"09104": "Curarrehue",
"09105": "Freire",
"09106": "Galvarino",
"09107": "Gorbea",
"09108": "Lautaro",
"09109": "Loncoche",
"09110": "Melipeuco",
"09111": "Nueva Imperial",
"09112": "Padre Las Casas",
"09113": "Perquenco",
"09114": "Pitrufquén",
"09115": "Pucón",
"09116": "Saavedra",
"09117": "Teodoro Schmidt",
"09118": "Toltén",
"09119": "Vilcún",
"09120": "Villarrica",
"09121": "Cholchol",
"09201": "Angol",
"09202": "Collipulli",
"09203": "Curacautín",
"09204": "Ercilla",
"09205": "Lonquimay",
"09206": "Los Sauces",
"09207": "Lumaco",
"09208": "Purén",
"09209": "Renaico",
"09210": "Traiguén",
"09211": "Victoria",
"14101": "Valdivia",
"14102": "Corral",
"14202": "Futrono",
"14201": "La Unión",
"14203": "Lago Ranco",
"14103": "Lanco",
"14104": "Los Lagos",
"14105": "Máfil",
"14106": "Mariquina",
"14107": "Paillaco",
"14108": "Panguipulli",
"14204": "Río Bueno",
"10101": "Puerto Montt",
"10102": "Calbuco",
"10103": "Cochamó",
"10104": "Fresia",
"10105": "Frutillar",
"10106": "Los Muermos",
"10107": "Llanquihue",
"10108": "Maullín",
"10109": "Puerto Varas",
"10201": "Castro",
"10202": "Ancud",
"10203": "Chonchi",
"10204": "Curaco de Vélez",
"10205": "Dalcahue",
"10206": "Puqueldón",
"10207": "Queilén",
"10208": "Quellón",
"10209": "Quemchi",
"10210": "Quinchao",
"10301": "Osorno",
"10302": "Puerto Octay",
"10303": "Purranque",
"10304": "Puyehue",
"10305": "Río Negro",
"10306": "San Juan de la Costa",
"10307": "San Pablo",
"10401": "Chaitén",
"10402": "Futaleufú",
"10403": "Hualaihué",
"10404": "Palena",
"11101": "Coihaique",
"11102": "Lago Verde",
"11201": "Aisén",
"11202": "Cisnes",
"11203": "Guaitecas",
"11301": "Cochrane",
"11302": "O'Higgins",
"11303": "Tortel",
"11401": "Chile Chico",
"11402": "Río Ibáñez",
"12101": "Punta Arenas",
"12102": "Laguna Blanca",
"12103": "Río Verde",
"12104": "San Gregorio",
"12201": "Cabo de Hornos",
"12202": "Antártica",
"12301": "Porvenir",
"12302": "Primavera",
"12303": "Timaukel",
"12401": "Natales",
"12402": "Torres del Paine",
"13101": "Santiago",
"13102": "Cerrillos",
"13103": "Cerro Navia",
"13104": "Conchalí",
"13105": "El Bosque",
"13106": "Estación Central",
"13107": "Huechuraba",
"13108": "Independencia",
"13109": "La Cisterna",
"13110": "La Florida",
"13111": "La Granja",
"13112": "La Pintana",
"13113": "La Reina",
"13114": "Las Condes",
"13115": "Lo Barnechea",
"13116": "Lo Espejo",
"13117": "Lo Prado",
"13118": "Macul",
"13119": "Maipú",
"13120": "Ñuñoa",
"13121": "Pedro Aguirre Cerda",
"13122": "Peñalolén",
"13123": "Providencia",
"13124": "Pudahuel",
"13125": "Quilicura",
"13126": "Quinta Normal",
"13127": "Recoleta",
"13128": "Renca",
"13129": "San Joaquín",
"13130": "San Miguel",
"13131": "San Ramón",
"13132": "Vitacura",
"13201": "Puente Alto",
"13202": "Pirque",
"13203": "San José de Maipo",
"13301": "Colina",
"13302": "Lampa",
"13303": "Tiltil",
"13401": "San Bernardo",
"13402": "Buin",
"13403": "Calera de Tango",
"13404": "Paine",
"13501": "Melipilla",
"13502": "Alhué",
"13503": "Curacaví",
"13504": "María Pinto",
"13505": "San Pedro",
"13601": "Talagante",
"13602": "El Monte",
"13603": "Isla de Maipo",
"13604": "Padre Hurtado",
"13605": "Peñaflor",
}
street_prefixes = OrderedDict(
[
("Calle", 0.6),
("Avenida", 0.1),
("Avda.", 0.1),
("Av.", 0.1),
("Pasaje", 0.04),
("Psje.", 0.04),
("Camino", 0.02),
]
)
street_suffixes = (
"Norte",
"Sur",
)
city_formats = ("{{city}}",)
street_name_formats = (
"{{street_prefix}} {{common_street_name}}",
"{{street_prefix}} {{historic_people_street_name}}",
"{{street_prefix}} {{first_name_male}} {{last_name}}",
"{{street_prefix}} {{first_name_female}} {{last_name}}",
"{{street_prefix}} {{plant_street_name}}",
"{{common_street_name}}",
"{{historic_people_street_name}}",
"{{plant_street_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
)
building_number_formats = OrderedDict(
[
("%###", 0.35),
("%##", 0.35),
("%#", 0.25),
("%", 0.05),
]
)
street_address_formats = (
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}} {{secondary_address}}",
)
address_formats = OrderedDict(
[
("{{street_address}}\n{{commune_and_region}}, {{postcode}}", 0.4),
("{{street_address}}\n{{commune_and_region}}", 0.4),
("{{highway_name}}, km {{random_int:big_kilometer}}", 0.1),
("{{road_name}}, km {{random_int:kilometer}}, {{region}}", 0.1),
]
)
secondary_address_formats = ("Dpto. @@##", "Piso @#", "Of. %##@")
common_street_names = OrderedDict(
[
("Arturo Prat", 0.118812),
("Esmeralda", 0.107261),
("Manuel Rodríguez", 0.105611),
("Gabriela Mistral", 0.104785),
("Los Aromos", 0.104785),
("Las Rosas", 0.098185),
("Caupolicán", 0.094884),
("Lautaro", 0.094059),
("Los Alerces", 0.086634),
("Los Copihues", 0.084983),
]
)
# Some chilean historic people. Full names come first, then its variants
historic_people_street_names = (
("Alonso de Ercilla",),
("Alonso de Ribera",),
("Álvaro Casanova", "Casanova"),
("Aníbal Pinto Garmendia", "Aníbal Pinto"),
("Antonio Varas",),
("Arturo Alessandri Palma", "Arturo Alessandri"),
("Benjamín Vicuña Mackenna", "Vicuña Mackenna", "Mackenna"),
("Bernardo O'Higgins", "O'Higgins"),
("Camilo Henríquez",),
("Caupolicán",),
("Colo Colo",),
("Diego Barros Arana", "Barros Arana"),
("Diego Portales", "Portales"),
("Domingo Santa María", "Santa María"),
("Eliodoro Yáñez",),
("Enrique Mac Iver", "Mac Iver"),
("Eusebio Lillo",),
("Francisco Bilbao", "Bilbao"),
("José de San Martín", "San Martín"),
("José Manuel Balmaceda", "Balmaceda"),
("José Miguel Carrera",),
("José Victorino Lastarria", "Lastarria"),
("Juan Mackenna",),
("Lord Thomas Cochrane", "Lord Cochrane", "Cochrane"),
("Los Carrera",),
("Manuel Antonio Matta", "Matta"),
("Manuel Bulnes", "Bulnes"),
("Manuel José Irarrázaval", "Irarrázabal"),
("Manuel Montt",),
("Manuel Rodríguez",),
("Manuel Baquedano", "Baquedano"),
("Michimalonco",),
("Padre Alberto Hurtado", "Alberto Hurtado"),
("Patricio Lynch", "Lynch"),
("Paula Jaraquemada",),
("Pedro Aguirre Cerda",),
("Pedro de Valdivia",),
("Pedro Montt",),
("Ramón Barros Luco", "Barros Luco"),
("Ramón Carnicer",),
("Ramón Freire", "Freire"),
("Ramón Picarte", "Picarte"),
("Salvador Allende Gossens", "Salvador Allende"),
("Santa Rosa",),
)
# Some streets are named by plants
plant_street_names: ElementsType[str] = (
"Los Cactus",
"Los Laureles",
"Los Piñones",
"Los Helechos",
"Los Higos",
"Los Abedules",
"Los Encinos",
"Los Palmitos",
"Los Naranjos",
"Los Robles",
"Los Pinos",
"Los Coihues",
"Los Calafates",
"Los Digitales",
"Los Lirios",
"Los Tilos",
"Los Girasoles",
"Las Azucenas",
"Las Lilas",
"Las Hortensias",
"Las Margaritas",
"Las Maravillas",
"Las Manzanillas",
"Las Mandarinas",
"Las Araucarias",
"Las Mosquetas",
"Las Malvas",
"Las Mosquetas",
)
road_names = ("Ruta T-%#", "Ruta U-%##", "Ruta %##-CH")
highway_names = ("Ruta 5 Norte", "Ruta 5 Sur")
def commune(self) -> str:
return self.random_element(self.communes.values())
def province(self) -> str:
return self.random_element(self.provinces.values())
def region(self) -> str:
return self.random_element(self.regions.values())
def commune_code(self) -> str:
return self.random_element(self.communes.keys())
def province_code(self) -> str:
return self.random_element(self.provinces.keys())
def region_code(self) -> str:
return self.random_element(self.regions.keys())
def common_street_name(self) -> str:
return self.random_element(self.common_street_names)
def plant_street_name(self) -> str:
return self.random_element(self.plant_street_names)
def historic_people_street_name(self) -> str:
person_names: Tuple[str, ...] = self.random_element(self.historic_people_street_names)
return self.random_element(person_names)
def street_prefix(self) -> str:
return self.random_element(self.street_prefixes)
def secondary_address(self) -> str:
return self.numerify(self.random_element(self.secondary_address_formats))
def commune_and_region(self) -> str:
commune_code = self.commune_code()
commune_name = self.communes[commune_code]
region_index = int(commune_code[0:2]) - 1
region_name = tuple(self.regions.values())[region_index]
return "{:s}, {:s}".format(commune_name, region_name)
def road_name(self) -> str:
self.generator.set_arguments("kilometer", {"min": 1, "max": 35})
return self.numerify(self.generator.parse(self.random_element(self.road_names)))
def highway_name(self) -> str:
self.generator.set_arguments("big_kilometer", {"min": 1, "max": 1000})
return self.numerify(self.generator.parse(self.random_element(self.highway_names)))
def postcode(self) -> str:
return self.numerify("######0")
administrative_unit = region
city = commune
| mit | 99ed30814bed79fb15bd3b3ade833d82 | 29.54784 | 94 | 0.483607 | 2.820202 | false | false | false | false |
joke2k/faker | faker/providers/company/ru_RU/__init__.py | 1 | 45104 | from datetime import datetime
from .. import Provider as CompanyProvider
def calculate_checksum(value: str) -> str:
factors = [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8][-len(value) :]
check_sum = 0
for number, factor in zip(value, factors):
check_sum += int(number) * factor
return str((check_sum % 11) % 10)
class Provider(CompanyProvider):
formats = (
"{{company_prefix}} «{{last_name}}»",
"{{company_prefix}} «{{last_name}} {{last_name}}»",
"{{company_prefix}} «{{last_name}}-{{last_name}}»",
"{{company_prefix}} «{{last_name}}, {{last_name}} и {{last_name}}»",
"{{last_name}} {{company_suffix}}",
"{{large_company}}",
)
company_prefixes = (
"РАО",
"АО",
"ИП",
"НПО",
"ЗАО",
"ООО",
"ОАО",
)
company_suffixes = (
"Инк",
"Инкорпорэйтед",
"и партнеры",
"Групп",
"Лтд",
"Лимитед",
)
# Source: https://www.rbc.ru/rbc500/
large_companies = (
"Газпром",
"ЛУКОЙЛ",
"Роснефть",
"Сбербанк России",
"Российские железные дороги",
"Ростех",
"Сургутнефтегаз",
"X5 Retail Group",
"ВТБ",
"Магнит",
"САФМАР",
"Росатом",
"Российские сети",
"Интер РАО",
"Транснефть",
"Татнефть",
"НОВАТЭК",
"Евраз",
"АФК Система",
"En +",
"НЛМК",
"Норникель",
"ГК Мегаполис",
"Газпромбанк",
"Русал",
"Аэрофлот — Российские авиалинии",
"Сибур Холдинг",
"Северсталь",
"СУЭК",
"ММК",
"Группа УГМК",
"Мобильные телесистемы",
"Металлоинвест",
"Лента",
"Объединенная авиастроительная корпорация",
"РусГидро",
"Сахалин Энерджи",
"Т Плюс",
"Группа М.Видео-Эльдорадо",
"Еврохим",
"ВымпелКом",
"Банковский холдинг Альфа-банка",
"Объединенная судостроительная корпорация",
"МегаФон",
"Ростелеком",
"ТМК",
"Славнефть",
"Тойота Мотор (Toyota)",
"Мечел",
"Автотор холдинг",
"Стройгазмонтаж",
"Дж.Т.И. Россия (JTI)",
"Торговая сеть Красное и Белое",
"АК Алроса",
"Дикси Групп",
"ВЭБ.РФ",
"ФМСМ (PMI)",
"Фольксваген Груп Рус",
"АвтоВАЗ",
"Леруа Мерлен Восток (Leroi Merlin)",
"Ашан (Auchan)",
"Россельхозбанк",
"ДНС Групп",
"ГК ТНС энерго",
"Протек",
"Группа компаний ПИК",
"Объединенная двигателестроительная корпорация",
"Независимая нефтегазовая компания",
"Merlion",
"ФосАгро",
"КМР и СНГ (KIA)",
"Катрен",
"Банк ФК Открытие",
"Корпорация Тактическое ракетное вооружение",
"Группа Рольф",
"ТАИФ-НК",
"Трансмашхолдинг",
"Метро Кэш энд Керри (Metro Cash & Carry)",
"Мостотрест",
"СОГАЗ",
"Эппл Рус (Apple)",
"Арктикгаз",
"Нижнекамскнефтехим",
"«Томскнефть» ВНК",
"Зарубежнефть",
"ЕвроСибЭнерго",
"Вертолеты России",
"Группа ГАЗ",
"Почта России",
"МУМТ (BAT)",
"Стройтранснефтегаз",
"КамАЗ",
"ФК Пульс",
"Полюс",
"Хендэ Мотор СНГ (Hyundai)",
"S7 Group",
"Ямал СПГ",
"Группа Содружество",
"ЧТПЗ",
"Иркутская нефтяная компания",
"Русснефть",
"Национальная компьютерная корпорация",
"Мерседес-Бенц Рус (Mercedes-Benz)",
"Русэнергосбыт",
"ОМК",
"Уралкалий",
"ГК Ташир",
"Компания Газ-Альянс",
"ФортеИнвест",
"Группа Мэйджор",
"Российская электроника",
"ГК СНС",
"Сибирский антрацит",
"Группа О'кей",
"Мосинжпроект",
"UCL Holding",
"Группа Илим",
"Московский кредитный банк",
"Группа Синара",
"Нефтиса",
"Объединенная компания Связной — Евросеть",
"Группа ЛСР",
"Т2 РТК Холдинг",
"НЗНП",
"АльфаСтрахование",
"Ланит",
"НПК Уралвагонзавод",
"Рено Россия (Renault)",
"Удмуртнефть",
"Нестле Россия (Nestle)",
"Райффайзенбанк (Raiffeisen)",
"Техкомпания Хуавэй (Huawei)",
"КДВ Групп",
"Яндекс",
"Мессояханефтегаз",
"БМВ Русланд Трейдинг (BMW)",
"Салым Петролеум",
"Данон (Danone)",
"ЮниКредит Банк (UniCredit)",
"ТД Риф",
"Мираторг",
"Группа Волга-Днепр",
"Вайлдберриз",
"Московский метрополитен",
"Полиметалл",
"Группа РЕСО",
"Пепсико холдингс",
"ГК Эфко",
"СДС-Уголь",
"ЛокоТех",
"ГК Автомир",
"Совкомбанк",
"ФСК Лидер",
"Марс (Mars)",
"Детский мир",
"Группа НПФ Благосостояние",
"Госкорпорация по ОрВД",
"Трансойл",
"ОХК Уралхим",
"Каспийский трубопроводный консорциум-Р",
"Тинькофф Банк",
"Fix Price",
"Промсвязьбанк",
"Акрон",
"Спортмастер",
"Проктер Энд Гэмбл. Дистрибьюторская компания (Procter & Gamble)",
"Eurasia Drilling Company",
"Группа Черкизово",
"ИКЕА Дом (INGKA)",
"Славянск Эко",
"Корпорация ВСМПО-АВИСМА",
"Росбанк (Societe General)",
"Монетка",
"Стройсервис",
"ГК Транстехсервис",
"Совкомфлот",
"ВСК",
"СБСВ-Ключавто",
"Ингосстрах",
"Сэтл групп",
"Гиперглобус (Bruch-Beteiligungs)",
"Технониколь",
"Металлсервис",
"Нефтехимсервис",
"Промышленно-металлургический холдинг",
"Урало-Сибирская металлургическая компания",
"Мария-Ра",
"Globaltrans",
"Кубанская нефтегазовая компания",
"Авиакомпания ЮТэйр",
"НПФ Газфонд пенсионные накопления",
"Русагро",
"Л'Этуаль",
"ЛГ Электроникс Рус (LG)",
"Каргилл (Cargill)",
"ВАД",
"Астон",
"Уральские авиалинии",
"Сталепромышленная компания",
"НИПИ НГ Петон",
"Бристоль",
"Уралвтормет",
"Нефтетранссервис",
"Казаньоргсинтез",
"Газпром бурение",
"ГК Агро-Белогорье",
"Фортум (Fortum)",
"ПК Балтика (Carlsbergfondet)",
"Авилон АГ",
"Шелл Нефть (Shell)",
"Юнипро (Uniper)",
"Технологии машиностроения (Техмаш)",
"НПК Объединенная вагонная компания",
"Велесстрой",
"ТД Интерторг",
"Юнилевер Русь (Unilever)",
"Солид-товарные рынки",
"Вольво Восток (AB Volvo)",
"Энел Россия",
"Марвел КТ",
"ГК Эталон",
"Металлокомплект-М",
"Группа Ренессанс Страхование",
"Военторг",
"Nordgold",
"Сибуглемет",
"Акционерный банк Россия",
"ДОМ.РФ",
"Форд Соллерс Холдинг",
"ИКЕА Торг (INGKA)",
"Макдоналдc (McDonald`s)",
"Кузбасская топливная компания",
"Хенкель Рус (Henkel)",
"Дон-Строй Инвест",
"Главное управление обустройства войск (ГУОВ)",
"СК Росгосстрах",
"Кока-Кола Эйчбиси Евразия (Coca-Cola)",
"Хоум Кредит энд Финанс Банк (PPF)",
"Гленкор Агро Мзк (Firada)",
"Mail.Ru Group",
"Монди СЛПК (Mondi)",
"НПО Алмаз",
"ММС Рус (Mitsubishi Motors)",
"Объединенные кондитеры",
"Комацу СНГ (Komatsu)",
"Национальная медиа группа",
"Агентство по страхованию вкладов (АСВ)",
"Татэнергосбыт",
"Куйбышевазот",
"Азбука вкуса",
"Трансбункер",
"Башкирская содовая компания",
"Инвестнефтетрейд",
"Inventive Retail Group",
"Самсунг Электроникс Рус Калуга (Samsung)",
"Крокус",
"Гугл (Google)",
"АСЦ-Холдинг",
"Новороссийский морской торговый порт",
"Швабе",
"Русская медная компания",
"Евроцемент груп",
"Мосводоканал",
"Международный аэропорт Шереметьево",
"Сегежа",
"Р-Фарм",
"Фармстандарт",
"Ростсельмаш",
"Транспортная группа FESCO",
"Компания Адамас",
"Метафракс",
"Джонсон & Джонсон (Johnson & Johnson)",
"Softline",
"Ягуар ленд ровер",
"Байер",
"Эркафарм",
"Фармперспектива",
"Банк Уралсиб",
"ВО Машиноимпорт",
"Кордиант",
"Новосталь",
"ВкусВилл",
"Л'Ореаль (L'Oreal)",
"DDS",
"ТОАЗ",
"Банк Санкт-Петербург",
"Группа агропредприятий Ресурс",
"Ярче!",
"Ренейссанс Констракшн (Ronesans Holding Anonim Sirketi)",
"Санофи Россия (Sanofi)",
"Группа ГМС",
"Северный ветер",
"БСС",
"Скания-Русь (Scania)",
"ГК Фаворит Моторс",
"Группа РТК",
"Фармкомплект",
"Нокиан Шина (Nokian)",
"ДСК Автобан",
"Омега Групп",
"Квадра",
"Roust",
"ГК Невада (Самбери)",
"Восточный экспресс банк",
"Верисел-трейдинг",
"Гознак",
"Фирма Агрокомплекс им. Ткачева",
"Банк Русский стандарт",
"Мазда Мотор Рус (Mazda)",
"Группа Газфонд",
"СТД Петрович",
"Беркс",
"Кари",
"Арконик СМЗ",
"Мон Дэлис (Mondelez)",
"Комус",
"Группа Агат",
"Великолукский мясокомбинат",
"Верный",
"СДС Азот",
"М Фэшн",
"Белгранкорм-холдинг",
"Группа Нэфис",
"ФГ Будущее",
"Глория Джинс",
"Билла (Rewe)",
"Государственная транспортная лизинговая компания",
"ФК Гранд Капитал",
"ЭС",
"Компания Металл Профиль",
"ГК Орими Трэйд",
"ГСЛ",
"Интернешнл Пейпер (International Paper)",
"Лаборатория Касперского",
"ПСМА Рус",
"Аптечная сеть 36,6",
"Тетра Пак (Tetra Pak)",
"Центральная пригородная пассажирская компания",
"Самараэнерго",
"Азур Эйр",
"Командор-Холдинг",
"Белуга Групп",
"ТД БелАЗ",
"Мосгортранс",
"Спар Миддл Волга",
"Холдинг Транспортные компоненты",
"Московский аэропорт Домодедово",
"Рулог (Havi)",
"Эйч Энд Эм (H&M)",
"Концерн Автоматика",
"Татэнерго",
"Трубная грузовая компания",
"Комос Групп",
"Первая тяжеловесная компания",
"ОМПК",
"НК Дулисьма",
"Ачимгаз",
"Новосибирскэнергосбыт",
"Компания СИМ-Авто",
"Ситибанк",
"Остин",
"Адидас (Adidas)",
"Ферреро Руссия (Ferrero)",
"Пермэнергосбыт",
"РКК Энергия",
"Свеза",
"Росжелдорпроект",
"Мазда Соллерс Мануфэкчуринг Рус",
"БСХ Бытовые приборы (BSH Hausgerate)",
"Московская биржа ММВБ-РТС",
"Русэнергоресурс",
"Компания Луис Дрейфус Восток (Louis Dreyfus)",
"ЭР-Телеком Холдинг",
"Соллерс",
"Объединенная энергетическая компания",
"Уральские локомотивы",
"ТМК Чермет",
"Загорский трубный завод",
"Элко Рус (Elko)",
"Архангельский ЦБК",
"Мособлгаз",
"ДК Рус",
"Энергосбытовая компания Восток",
"ГКНПЦ им. М.В.Хруничева",
"Металлоторг",
"Агросила Групп",
"Ман Трак Энд Бас Рус (Volkswagen)",
"Петербургский метрополитен",
"ТГК-2",
"Концерн Титан-2",
"Ренейссанс Хэви Индастрис Ronesans Endustri",
"Бургер Рус (Burger King)",
"Ozon",
"Сони Электроникс (Sony)",
"Продо",
"Продимекс-Холдинг",
"АвтоГермес",
"Railgo",
"Новотранс",
"Новикомбанк",
"Рив Гош",
"Сибирская горно-металлургическая компания",
"Сименс (Siemens)",
"Лига ставок",
"Банк Ак Барс",
"Группа Полипластик",
"Водоканал Санкт-Петербурга",
"РэйлАльянс",
"Российская телевизионная и радиовещательная сеть",
"Зерно-трейд",
"Ренессанс Кредит",
"Роберт Бош (Robert Bosch)",
"ВО Промсырьеимпорт",
"САП СНГ (SAP)",
"А Групп",
"Приосколье",
"Зара СНГ (Zara)",
"Модум-транс",
"Эбботт лэбораториз (Abbott Laboratories)",
"Группа Магнезит",
"Газпром автоматизация",
"Газэнергосервис",
"Независимая энергосбытовая компания Краснодарского края",
"Группа ЭПМ",
"Минудобрения",
"Либхерр-Русланд (Liebherr)",
"Восточная техника (Vost-Tech)",
"Первый канал",
"ГМК Сплав",
"ГК Автодилерство",
"НМЖК",
"ВГТРК",
"Неофарм",
"Роскосмос",
"Вита Лайн",
"Краснодарзернопродукт-Экспо",
"Алкоторг",
"Красцветмет",
"Касторама Рус (Castorama)",
"Деловые линии",
"ГВСУ по специальным объектам",
"ПКФ ДиПОС",
"Восток-Запад",
"Амурская нефтебаза",
"Юг Руси",
"Шнейдер Электрик (Schneider Electric)",
"Сингента (Chemchina)",
"Титан",
"Петропавловск",
"Фармимэкс",
"АБ Инбев Эфес (Anheuser-Busch Inbev)",
"ABI Product",
"Профитмед",
"ТД Агроторг",
"ТЭК СПБ",
"ТД Ункомтех",
"ОПХ (Heineken)",
"ТГК-16",
"Уральский банк реконструкции и развития",
"QIWI",
"СК Согласие",
"Группа Эссен",
"Втормет",
"Эссити (Essity)",
"Hoff (Домашний интерьер)",
"Сиско Солюшенз (Cisco)",
"ВО ЖДТ России",
"Купишуз (Lamoda)",
"Делл (Dell)",
"ПСК",
"Каменск-Уральский металлургический завод",
"Аргос",
"А.П.Р.",
"ГК 1520",
"Артис-Агро Экспорт",
"Луидор",
"Порше Руссланд (Porsche)",
"Денцу Эйджис Си Эс (Dentsu)",
"Эйвон Бьюти Продактс Компани (Avon)",
"РКЦ Прогресс",
"Силовые машины",
"АНГК",
"Корпорация Гринн",
"Фаберлик",
"Сибирская сервисная компания",
"Банк Возрождение",
"Отисифарм",
"Боэс Констракшн (Boes Construction)",
"Саткинский чугуноплавильный завод",
"Алтайвагон",
"ПТК",
"Щекиноазот",
"Волгоградэнергосбыт",
"Русский уголь",
"Трест КХМ",
"РМ Рейл",
"Восточная горнорудная компания",
"Группа Стройтрансгаз",
"БАСФ (BASF)",
"Мерида",
"Брок-Инвест-Сервис и К",
"Вирлпул Рус (Whirlpool)",
"Карелия Палп",
"Тева (Teva)",
"Media Direction Group",
"Якобс Дау Эгбертс Рус (Jacobs Douwe Egberts)",
"ГК Великан",
"Август",
"Транслом",
"ОТП Банк",
"РусВинил",
"Системный оператор Единой энергетической системы",
"АСР-Углесбыт",
"ЦЭНКИ",
"Транстрейдойл",
"Росморпорт",
"Газнефтетрэйдинг",
"Сладковско-Заречное",
"Кроношпан (Kronoplus)",
"ТЦ Кунцево Лимитед",
"СНПХ",
"Кимберли-Кларк (Kimberly-Clark)",
"Катерпиллар Евразия (Caterpillar)",
"Крок инкорпорейтед",
"Ашинский металлургический завод",
"Автодом",
"Международный центр",
"Мишлен (Michelin)",
"Картли",
"БелАЗ-24",
"Первый завод",
"ГК ЕКС",
"Петролеум Трейдинг",
"Нижфарм (Nidda Midco)",
"Импэкснефтехим",
"Вольво Карс (Zhejiang Geely)",
"Мосметрострой",
"ТЭК Мосэнерго",
"Борисхоф 1 (Inchcape)",
"ГК Титан",
"ПТК Уголь",
"Авторусь",
"Юг-Авто",
"Нова",
"Метрострой",
"Ресурс",
"Сетевая компания",
"РЕ Трэйдинг (LPP)",
"Углетранс",
"ЭйчПи Инк (HP Inc.)",
"ТК Шлюмберже (Schlumberger)",
"ГК Мега-Авто",
"Корпорация Электросевкавмонтаж",
"ГК Российские коммунальные системы",
"Запсибгазпром",
"Нефтепродукттрейд",
"Сатурн-Р",
"Завод имени Дегтярева",
"Такеда Фармасьютикалс (Takeda Pharmaceutical)",
"Слата супермаркет",
"Emex",
"САМ-МБ",
"171 Меридиан",
"Армтек",
"Центр финансовых технологий",
"Группа компаний Пионер",
"АХ Степь",
"Таграс (ТНГ-Групп)",
"Fonbet",
"Сандоз (Sandoz)",
"Берлин-Хеми А. Менарини (Berlin Chemie)",
"ГК Агропромкомплектация",
"МАКС",
"Компания Трасса",
"Башкирэнерго",
"Охрана Росгвардии",
"Гала-Форм",
"КРКА Фарма (KRKA)",
"Максидом",
"Нефтехимремстрой",
"Нефтьмагистраль",
"Авеста Фармацевтика (Baby Dream)",
"Старттех",
"Конар",
"Нортгаз",
"УГС",
"АББ (ABB)",
"Металлстандарт",
"Балтийская топливная компания",
"Мострансавто",
"Аксель-Моторс",
"Группа компаний МИЦ",
"ПК Борец",
"Европа",
"Сибирская аграрная группа",
"РТИ",
"Ферронордик машины (Ferronordic)",
"Южуралзолото ГК",
"Прогресс",
"Юг-Нефтепродукт",
"Камский кабель",
"Familia",
"Транскапиталбанк",
"А-Ойл",
"Сибтрейд",
"МТС-банк",
"Московская инженерно-строительная компания",
"Курганмашзавод",
"Вектрум-К",
"Морской терминал Тамань",
"Таркетт Рус (Tarkett)",
"Несте Санкт-Петербург (Neste)",
"Ново-Уренгойская газовая компания",
"Национальная нерудная компания",
"Октоблу (Decathlon)",
"Снежная Королева",
"Новартис Фарма (Novartis)",
"Магнолия",
"Техинком",
"Дочки-Сыночки",
"Астеллас Фарма",
"General Fueller",
"Автозаправочные комплексы Atan",
"Псковвтормет",
"Авиакомпания Икар",
)
catch_phrase_adj = (
(
"Автоматизированный",
"Автономный",
"Адаптивный",
"Амортизированный",
"Ассимилированный",
"Безопасный",
"Бизнес-ориентированный",
"Взаимовыгодный",
"Виртуальный",
"Глубокий",
"Горизонтальный",
"Делегируемый",
"Децентрализованный",
"Дублируемый",
"Инверсный",
"Инновационный",
"Интегрированный",
"Интуитивный",
"Качественный",
"Клиент-ориентированный",
"Контролируемый",
"Концептуальный",
"Корпоративный",
"Кросс-платформенный",
"Межгрупповой",
"Многогранный",
"Многоканальный",
"Многослойный",
"Многоуровневый",
"Модернизируемый",
"Настраиваемый",
"Новый",
"Общедоступный",
"Объектный",
"Обязательный",
"Оперативный",
"Оптимизированный",
"Опциональный",
"Организованный",
"Органичный",
"Ориентированный",
"Открытый",
"Оцифрованный",
"Переключаемый",
"Переосмысленный",
"Переработанный",
"Перспективный",
"Полный",
"Поэтапный",
"Превентивный",
"Программируемый",
"Прогрессивный",
"Продвинутый",
"Прочный",
"Разнообразный",
"Распределённый",
"Расширенный",
"Реализованный",
"Реконструируемый",
"Самодостаточный",
"Сбалансированный",
"Сетевой",
"Синхронизированный",
"Совместимый",
"Сокращенный",
"Сосредоточенный",
"Стабильный",
"Стратегический",
"Увеличенный",
"Удобный",
"Улучшенный",
"Улучшенный",
"Уменьшенный",
"Универсальный",
"Управляемый",
"Устойчивый",
"Фундаментальный",
"Функциональный",
"Цельный",
"Централизованный",
"Эксклюзивный",
"Элегантный",
"Эргономичный",
),
(
"аналитический",
"асимметричный",
"асинхронный",
"бездефектный",
"бескомпромиссный",
"веб-ориентированный",
"встречный",
"вторичный",
"высокоуровневый",
"гибкий",
"гибридный",
"глобальный",
"двунаправленный",
"действенный",
"динамичный",
"единообразный",
"заметный",
"инструктивный",
"интерактивный",
"исполнительный",
"итернациональный",
"клиент-серверный",
"контекстуальный",
"круглосуточный",
"логистический",
"локальный",
"максимальный",
"масштабируемый",
"методичный",
"многозадачный",
"мобильный",
"модульный",
"мультимедийный",
"наглядный",
"направленный",
"национальный",
"нейтральный",
"нестандартный",
"объектно-ориентированный",
"однородный",
"оптимальный",
"основной",
"отказостойкий",
"переходный",
"последовательный",
"потенциальный",
"пошаговый",
"прибыльный",
"приоритетный",
"промежуточный",
"радикальный",
"раздвоенный",
"региональный",
"связный",
"систематический",
"системный",
"составной",
"социальный",
"специализированный",
"статический",
"третичный",
"ультрасовременный",
"целостный",
"широкий",
"широкопрофильный",
"эвристический",
"экоцентричный",
"энергонезависимый",
"яркий",
),
)
catch_phrase_nouns_masc = (
"адаптер",
"алгоритм",
"альянс",
"анализатор",
"архив",
"веб-сайт",
"вызов",
"графический интерфейс",
"графический интерфейс пользователя",
"доступ",
"инструментарий",
"интерфейс",
"инфопосредник",
"искусственный интеллект",
"массив",
"модератор",
"мониторинг",
"набор инструкций",
"параллелизм",
"подход",
"портал",
"прогноз",
"продукт",
"проект",
"протокол",
"ресурс",
"системный движок",
"успех",
"фреймворк",
"хаб",
"эталон",
)
catch_phrase_nouns_fem = (
"архитектура",
"база данных",
"база знаний",
"вероятность",
"возможность",
"гибкость",
"защищенная линия",
"иерархия",
"инициатива",
"инфраструктура",
"кодировка",
"конгломерация",
"концепция",
"координация",
"локальная сеть",
"матрица",
"методология",
"миграция",
"модель",
"нейронная сеть",
"парадигма",
"поддержка",
"политика",
"проекция",
"производительность",
"прошивка",
"рабочая группа",
"реализация",
"сеть Интранет",
"сеть Экстранет",
"служба поддержки",
"служба техподдержки",
"способность",
"стандартизация",
"стратегия",
"структура",
"суперструктура",
"установка",
"фокус-группа",
"функциональность",
"функция",
"ценовая структура",
"эмуляция",
)
catch_phrase_nouns_neu = (
"взаимодействие",
"групповое программное обеспечение",
"интернет-решение",
"использование",
"межплатформенное программное обеспечение",
"оборудование",
"определение",
"отношение",
"приложение",
"программное обеспечение",
"решение",
"совершенствование процесса",
"сотрудничество",
"управление бюджетом",
"хранилище данных",
"шифрование",
"ядро",
)
bsWords = (
(
"Адаптация",
"Визуализация",
"Включение",
"Внедрение",
"Генерация",
"Инновация",
"Интеграция",
"Использование",
"Итерация",
"Конструирование",
"Координация",
"Культивация",
"Максимизация",
"Модернизация",
"Монетизация",
"Мотивация",
"Обеспечение",
"Объединение",
"Оптимизация",
"Освоение",
"Охват",
"Оцифровка",
"Перезагрузка",
"Переопределение",
"Переосмысление",
"Перепрофилирование",
"Переход",
"Преображение",
"Приспособление",
"Продление",
"Производство",
"Развитие",
"Разворачивание",
"Разработка",
"Распределение",
"Реализация",
"Революция",
"Синтез",
"Синхронизация",
"Сравнение",
"Трансформация",
"Увеличение",
"Управление",
"Ускорение",
"Формирование",
"Шкалирование",
"Эксплуатация",
),
(
"B2B",
"B2C",
"активных",
"безотказных",
"беспроводных",
"богатых",
"веб-ориентированных",
"вертикальных",
"виртуальных",
"глобальных",
"действенных",
"динамичных",
"заказных",
"индивидуальных",
"инновационных",
"интегрированных",
"интерактивных",
"интуитивных",
"концептуальных",
"корпоративных",
"критически важных",
"кроссплатформенных",
"круглогодичных",
"круглосуточных",
"лучших в своём роде",
"масштабируемых",
"мультимедийных",
"наглядных",
"надежных",
"онлайн и офлайн",
"ориентированных на пользователя",
"открытых",
"передовых",
"подробных",
"популярных",
"престижных",
"прибыльных",
"притягательных",
"прозрачных",
"распределённых",
"распространенных",
"расширяемых",
"революционных",
"сенсационных",
"серверных",
"сетевых",
"соблазнительных",
"совместных",
"современных",
"стандартных",
"стратегических",
"ультрасовременных",
"фронт-энд",
"целостных",
"цельных",
"эффективных",
),
(
"архитектур",
"аудиторий",
"веб-сервисов",
"взаимодействий",
"действий",
"диапазонов",
"знаний",
"инициатив",
"интернет-компаний",
"интернет-магазинов",
"интернет-продавцоы",
"интернет-услуг",
"интерфейсов",
"инфопосредников",
"инфраструктур",
"каналов",
"методик",
"метрик",
"моделей",
"ниш",
"областей интереса",
"отношений",
"парадигм",
"партнерств",
"платформ",
"пользователей",
"порталов",
"приложений",
"результатов",
"решений",
"рынков",
"сетей",
"систем",
"систем снабжения",
"сообществ",
"схем",
"технологий",
"функций",
),
)
def catch_phrase(self) -> str:
"""
:example: 'Адаптивный и масштабируемый графический интерфейс'
"""
noun: str = self.random_element(
self.catch_phrase_nouns_masc + self.catch_phrase_nouns_fem + self.catch_phrase_nouns_neu
)
adj_first: str = self.random_element(self.catch_phrase_adj[0])
adj_second: str = self.random_element(self.catch_phrase_adj[1])
if noun in self.catch_phrase_nouns_fem:
adj_first = adj_first[:-2] + "ая"
adj_second = adj_second[:-2] + "ая"
elif noun in self.catch_phrase_nouns_neu:
adj_first = adj_first[:-2] + "ое"
adj_second = adj_second[:-2] + "ое"
return adj_first + " и " + adj_second + " " + noun
def large_company(self) -> str:
"""
:example: 'АвтоВАЗ'
"""
return self.random_element(self.large_companies)
def company_prefix(self) -> str:
"""
:example: 'ООО'
"""
return self.random_element(self.company_prefixes)
def businesses_inn(self) -> str:
"""
Returns tax identification number for businesses (ru. идентификационный номер налогоплательщика, ИНН).
"""
region: str = "%02d" % self.random_int(min=1, max=92)
inspection: str = "%02d" % self.random_int(min=1, max=99)
tail: str = "%05d" % self.random_int(min=1, max=99999)
result: str = region + inspection + tail
return result + calculate_checksum(result)
def individuals_inn(self) -> str:
"""
Returns tax identification number for individuals (ru. идентификационный номер налогоплательщика, ИНН).
"""
region: str = "%02d" % self.random_int(min=1, max=92)
inspection: str = "%02d" % self.random_int(min=1, max=99)
tail: str = "%06d" % self.random_int(min=1, max=999999)
result: str = region + inspection + tail
result += calculate_checksum(result)
return result + calculate_checksum(result)
def businesses_ogrn(self) -> str:
"""
Returns primary state registration number for businesses
(ru. основной государственный регистрационный номер, ОГРН).
"""
sign: str = self.random_element(("1", "5"))
year: str = "%02d" % self.random_int(min=1, max=datetime.now().year - 2000)
region: str = "%02d" % self.random_int(min=1, max=92)
tail: str = "%07d" % self.random_int(min=1, max=9999999)
result: str = sign + year + region + tail
return result + str((int(result) % 11) % 10)
def individuals_ogrn(self) -> str:
"""
Returns primary state registration number for individuals
(ru. основной государственный регистрационный номер, ОГРН).
"""
year: str = "%02d" % self.random_int(min=1, max=datetime.now().year - 2000)
region: str = "%02d" % self.random_int(min=1, max=92)
tail: str = "%09d" % self.random_int(min=1, max=999999999)
result: str = "3" + year + region + tail
return result + str((int(result) % 13) % 10)
def kpp(self) -> str:
"""
Returns tax registration reason code (ru. код причины постановки на учет, КПП).
"""
region: str = "%02d" % self.random_int(min=1, max=92)
inspection: str = "%02d" % self.random_int(min=1, max=99)
reason: str = self.random_element(("01", "43", "44", "45"))
tail: str = "%03d" % self.random_int(min=1, max=999)
return region + inspection + reason + tail
| mit | ed01985107a939e7737711b01e4ad032 | 26.823077 | 111 | 0.491445 | 1.794938 | false | false | false | false |
joke2k/faker | faker/providers/person/ar_AA/__init__.py | 1 | 24447 | from typing import Tuple
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female: Tuple[str, ...] = (
"{{first_name_female}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats_male: Tuple[str, ...] = (
"{{first_name_male}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats = formats_male + formats_female
first_names_female: Tuple[str, ...] = (
"آلاء",
"آيات",
"أجوان",
"أحلام",
"أروى",
"أريج",
"أزهار",
"أسرار",
"أسيل",
"أغاريد",
"أفراح",
"أفنان",
"ألين",
"أناهيد",
"إباء",
"إخلاص",
"إلينا",
"ابتسام",
"ابتكار",
"ابتهاج",
"ابتهال",
"اصيل",
"اعتكاف",
"اعتماد",
"افتكار",
"ايمان",
"بارعة",
"باسمة",
"باهرة",
"بتلاء",
"بتول",
"بثينة",
"بدرالدّجى",
"بشرى",
"بلسم",
"بلقيس",
"بلماء",
"بلند",
"بنان",
"بنفسج",
"بهاء",
"بهجة",
"بهية",
"بوران",
"بيسان",
"بيلسان",
"تالا",
"تاليا",
"ترانيم",
"ترف",
"تمام",
"تولين",
"جالا",
"جلنار",
"جمان",
"جميلة",
"جنى",
"جهراء",
"جوان",
"جوانا",
"جواهر",
"جود",
"جودي",
"جوريّة",
"جوليا",
"جوين",
"جيلان",
"حلا",
"حياة",
"خاشعة",
"دارين",
"دانة",
"دانية",
"دعاء",
"ديمه",
"راما",
"ربى",
"رواء",
"روبين",
"روعة",
"روفيدا",
"ريان",
"ريتاج",
"ريتال",
"ريف",
"ريفال",
"ريم",
"ريما",
"ريمان",
"ريناد",
"زكية",
"زهرة",
"سبأ",
"سجى",
"سديم",
"سلاف",
"سلسبيل",
"شادن",
"شهد",
"ضحى",
"ضياء",
"عالية",
"عتاب",
"غوى",
"غيداء",
"فداء",
"فرات",
"فردوس",
"كاملة",
"كرمة",
"كوثر",
"لارا",
"لاما",
"لتين",
"لوجين",
"لورا",
"لورين",
"لوليا",
"ليان",
"ليساء",
"ليم",
"لينا",
"مادلين",
"ماذى",
"مايا",
"مريم",
"ميار",
"ميرا",
"ميرال",
"ميسون",
"ميلاء",
"ناديه",
"ناردين",
"ناهد",
"نشوة",
"نغم",
"نوال",
"نوره",
"نوف",
"هاجر",
"هايدي",
"هدى",
"هناء",
"هنادي",
"هند",
"هيا",
"هيام",
"سجا",
"وصاف",
"وفاء",
"يارا",
"ياسمين",
"يسرى",
)
first_names_male: Tuple[str, ...] = (
"أحمد",
"تاج",
"تاج الدّين",
"تامر",
"تحسين",
"ترف",
"تقي",
"تقيّ الدّين",
"تميم",
"تمّام",
"توفيق",
"ثائر",
"ثابت",
"ثاقب",
"ثامر",
"ثروت",
"ثقيف",
"جابر",
"جاد",
"جاسم",
"جدير",
"جرير",
"جرّاح",
"جسور",
"جعفر",
"جلاء",
"جلال",
"جلال الدّين",
"جليل",
"جمال",
"جمال الدّين",
"جميل",
"جهاد",
"حاتم",
"حارث",
"حازم",
"حافظ",
"حامد",
"حبّاب",
"حسام",
"حسن",
"حسني",
"حسنين",
"حسيب",
"حسين",
"حفيظ",
"حقّي",
"حكيم",
"حليم",
"حمدان",
"حمدي",
"حمزة",
"حمود",
"حميد",
"حمّاد",
"حنبل",
"حنفي",
"حيدر",
"حيّان",
"خاطر",
"خافق",
"خالد",
"خالدي",
"خضر",
"خطيب",
"خلدون",
"خلف",
"خلوصي",
"خليفة",
"خليل",
"خميس",
"خيري",
"دؤوب",
"داني",
"داهي",
"داوود",
"دريد",
"دليل",
"دهمان",
"ديسم",
"ذريع",
"ذكي",
"ذيب",
"رؤوف",
"رئيس",
"رائد",
"رائف",
"رابح",
"راتب",
"راجح",
"راجي",
"رازي",
"راسم",
"راشد",
"راضي",
"راغب",
"رافع",
"رامح",
"رامز",
"رامي",
"راني",
"راوي",
"رباح",
"ربيع",
"رجاء",
"رجائي",
"رجب",
"رحيب",
"رخاء",
"رزين",
"رستم",
"رسمي",
"رشاد",
"رشدي",
"رشيد",
"رضوان",
"رضي",
"رفيق",
"رمحي",
"رمزي",
"رمضان",
"رهيف",
"روحي",
"ريّان",
"زاخر",
"زاكي",
"زاهر",
"زاهي",
"زايد",
"زبير",
"زغلول",
"زكريا",
"زكي",
"زهدي",
"زهران",
"زهير",
"زياد",
"زيد",
"زيدان",
"زين",
"سائد",
"ساجد",
"ساجي",
"ساطع",
"سالم",
"سامح",
"سامر",
"سامي",
"ساهد",
"ساهر",
"سخاء",
"سراج",
"سراج الدّين",
"سرحان",
"سرور",
"سعد",
"سعدون",
"سعدي",
"سعود",
"سعيد",
"سفيان",
"سفير",
"سلام",
"سلطان",
"سلمان",
"سليم",
"سليمان",
"سموح",
"سمير",
"سنام",
"سنان",
"سهل",
"سهوان",
"سهيل",
"سيف الدّين",
"سيّد",
"شادي",
"شاطر",
"شافع",
"شاكر",
"شامخ",
"شامل",
"شبلي",
"شبيب",
"شجاع",
"شدّاد",
"شريف",
"شعبان",
"شعلان",
"شعيب",
"شفيع",
"شكري",
"شكيب",
"شهاب",
"شهب",
"شهم",
"شهير",
"شوقي",
"شيّق",
"صائب",
"صابر",
"صاحب",
"صادح",
"صادق",
"صارم",
"صافي",
"صالح",
"صامد",
"صباح",
"صبحي",
"صبري",
"صبور",
"صبيح",
"صخر",
"صدر الدّين",
"صدقي",
"صدّاح",
"صدّام",
"صعب",
"صقر",
"صلاح",
"صلاح الدّين",
"صنديد",
"صهيب",
"ضاحك",
"ضاحي",
"ضحّاك",
"ضرغام",
"ضياء",
"ضياء الدّين",
"ضيائي",
"طائع",
"طائف",
"طائل",
"طارق",
"طالب",
"طامح",
"طاهر",
"طبّاع",
"طريف",
"طلال",
"طلعت",
"طموح",
"طه",
"طيّب",
"طيّع",
"ظاعن",
"ظافر",
"ظاهر",
"ظبي",
"ظريف",
"ظهير",
"عائد",
"عابد",
"عاتب",
"عادل",
"عارف",
"عاصم",
"عاطف",
"عاقل",
"عاكف",
"عالم",
"عامر",
"عبد الإله",
"عبد الباري",
"عبد الباقي",
"عبد التّواب",
"عبد الجبّار",
"عبد الجليل",
"عبد الحفيظ",
"عبد الحقّ",
"عبد الحكيم",
"عبد الحليم",
"عبد الحيّ",
"عبد الخالق",
"عبد الرّؤوف",
"عبد الرّحمن",
"عبد الرّحيم",
"عبد الرّزاق",
"عبد الرّشيد",
"عبد السّلام",
"عبد السّميع",
"عبد الشّكور",
"عبد الصّمد",
"عبد العزيز",
"عبد العليم",
"عبد الغفور",
"عبد الغفّار",
"عبد الغني",
"عبد القادر",
"عبد القدّوس",
"عبد القهّار",
"عبد الكريم",
"عبد اللطيف",
"عبد المجيد",
"عبد المحيي",
"عبد الملك",
"عبد المولى",
"عبد الواحد",
"عبدالرّحمن",
"عبدالله",
"عبّاس",
"عبّود",
"عتريس",
"عتيد",
"عتيق",
"عثمان",
"عدلي",
"عدنان",
"عدوي",
"عذب",
"عربي",
"عرفات",
"عرفان",
"عرفه",
"عزاز",
"عزمي",
"عزيز",
"عزّ الدّين",
"عزّت",
"عصام",
"عصمت",
"عطاء",
"عفيف",
"عقيل",
"علاء",
"علاء الدّين",
"علم الدّين",
"علوان",
"علي",
"علّام",
"عماد",
"عمر",
"عمران",
"عمرو",
"عمير",
"عمّار",
"غازي",
"غالب",
"غالي",
"غامد",
"غانم",
"غزوان",
"غزير",
"غسّان",
"غطفان",
"فؤاد",
"فائق",
"فاتح",
"فاخر",
"فادي",
"فارس",
"فارع",
"فاروق",
"فاضل",
"فالح",
"فايد",
"فتحي",
"فتوح",
"فخر",
"فخر الدّين",
"فخري",
"فداء",
"فدائي",
"فراس",
"فرج",
"فرحان",
"فرزدق",
"فضل",
"فطين",
"فكري",
"فلاح",
"فهد",
"فهمي",
"فوزي",
"فوّاز",
"فيصل",
"فيّاض",
"قائد",
"قاسم",
"قاصد",
"قانت",
"قبس",
"قحطان",
"قدري",
"قصي",
"قصيد",
"قطب",
"قطز",
"قنوع",
"قيس",
"كارم",
"كاسر",
"كاشف",
"كاظم",
"كافور",
"كامل",
"كايد",
"كبير",
"كتوم",
"كرم",
"كريم",
"كسّاب",
"كليم",
"كمال",
"كنار",
"كنان",
"كنعان",
"لؤي",
"لبيب",
"لبيد",
"لطفي",
"لطوف",
"لفيف",
"لقاء",
"لقمان",
"لمّاح",
"لهفان",
"ليث",
"مأمون",
"مؤمن",
"مؤنس",
"مؤيّد",
"ماجد",
"مازن",
"مالك",
"ماهر",
"مجاهد",
"مجد",
"محجوب",
"محسن",
"محفوظ",
"محمود",
"محمّد",
"محيي الدّين",
"مختار",
"مخلص",
"مدحت",
"مراد",
"مرادي",
"مرتجي",
"مرتضي",
"مرتقي",
"مرزوق",
"مرسال",
"مرشد",
"مرعي",
"مروان",
"مزهر",
"مسرور",
"مسعود",
"مسلم",
"مشاري",
"مشرف",
"مشرق",
"مشفق",
"مصباح",
"مصطفى",
"مصعب",
"مطاوع",
"مظهر",
"معارف",
"معتوق",
"معزّ",
"معمّر",
"معن",
"معين",
"مفيد",
"مقداد",
"مقدام",
"مكرّم",
"مكّي",
"ملهم",
"ممتاز",
"ممدوح",
"مناف",
"منتصر",
"منسي",
"منصور",
"منيب",
"منيع",
"منيف",
"مهدي",
"مهران",
"مهنّد",
"مهيب",
"موسى",
"موفّق",
"مياس",
"ميثاق",
"ميسور",
"ميمون",
"ميّاد",
"مَجدي",
"مَسعد",
"مُتعب",
"مُتوكّل",
"مُتولي",
"مُتيّم",
"مُخيمر",
"مُرسي",
"مُرضي",
"مُسعف",
"مُصلح",
"مُعتز",
"مُناضل",
"مُنجد",
"مُنذر",
"مُنير",
"نائل",
"ناجح",
"ناجي",
"نادر",
"نادي",
"ناصر",
"ناصر الدّين",
"ناصيف",
"ناضر",
"ناظم",
"ناعم",
"نافذ",
"نافع",
"نبراس",
"نبهان",
"نبيل",
"نبيه",
"نجدت",
"نجم الدّين",
"نجوان",
"نجيب",
"نديم",
"نذير",
"نزار",
"نزيه",
"نسيب",
"نشأت",
"نشوان",
"نصر",
"نصر الدّين",
"نصري",
"نصوح",
"نصور",
"نضال",
"نظام",
"نظمي",
"نعمان",
"نعيم",
"نمر",
"نوح",
"نور",
"نور الدّين",
"نور الحقّ",
"نورس",
"نوري",
"نوّار",
"نوّاف",
"نيازي",
"هادي",
"هاشم",
"هاني",
"هايل",
"هزار",
"هلال",
"هلالي",
"همام",
"هيثم",
"هيكل",
"هيمان",
"وائل",
"واثق",
"وادع",
"واصف",
"واصل",
"وثّاب",
"وجدي",
"وجيه",
"وحيد",
"ودود",
"وديع",
"وريد",
"وسام",
"وسيل",
"وسيم",
"وصفي",
"وضّاح",
"وفائي",
"وفيق",
"وليد",
"وليف",
"ياسر",
"يافع",
"ياقوت",
"يانع",
"يحيى",
"يزيد",
"يسار",
"يسري",
"يعرب",
"يعقوب",
"يقين",
"يمام",
"يوسف",
"يونس",
)
first_names = first_names_male + first_names_female
last_names: Tuple[str, ...] = (
"أشجع",
"أفغاني",
"أكلب",
"ألمع",
"أنمار",
"أولاد بوعزيز",
"أولاد زيان",
"إياد",
"ابو الحاج",
"ابو السعود",
"ابو عيد",
"ارناؤوط",
"ازحيمان",
"اسطمبولي",
"الأزد",
"الأشراف",
"الألجاوي",
"الأنصاري",
"الأوس",
"الأيوبي",
"الامام",
"البامية",
"البخاري",
"البديري",
"البشيتي",
"البغدادي",
"البقوم",
"البيسار القعقور",
"البيطار",
"الترجمان الصالح",
"الترهي",
"التوتنجي",
"الجاعوني",
"الجبشة",
"الجعليين",
"الحجر بن الهنوء بن الأزد",
"الحداء",
"الحسيني",
"الحكم بن سعد العشيرة",
"الحلاق",
"الحلواني",
"الحواش",
"الحويطات",
"الخالدي",
"الخزرج",
"الخطيب بني جماعة الكناني",
"الخلفاوي",
"الداودي",
"الدجاني",
"الدسوقي",
"الدقاق",
"الدليم",
"الدواسر",
"الديسي",
"الرباب",
"الرباطاب",
"الزرقان",
"الزماميري",
"الساحلي",
"السادة",
"السادة الراويون",
"السروري",
"السمان",
"السهول",
"السيفي",
"الشامي",
"الشاويش",
"الشايقية",
"الشحوح",
"الشرفاء",
"الشعباني",
"الشهابي",
"الطحان",
"الظفير",
"العارف",
"العجمان",
"العسلي",
"العفيفي",
"العقيدات",
"العلمي",
"العوازم",
"العوالق",
"الغوانمة",
"الفتياني",
"القاعي",
"القباني",
"القرجولي",
"القزاز",
"القضماني",
"القطب",
"القلموني",
"القواسم",
"الكالوتي",
"الكبابيش",
"الكثيري",
"الكلغاصي",
"الكواهلة",
"المؤقت",
"الماني",
"المتولي",
"المرازيق",
"المظفر",
"المغربي",
"المفتي",
"المملوك",
"المنتفق",
"المهرة",
"الموركة",
"الموسوس",
"النجار",
"النشاشيبي",
"النقيب",
"النمر",
"النمري",
"الهدمي",
"الوعري",
"اليوزباشي",
"اميوني",
"اهرام",
"بارق",
"باهلة",
"بتروني",
"بجيلة",
"بحمدوني",
"بدرية",
"بديرية",
"بعلبكي",
"بكر بن عبد مناة",
"بكر بن وائل",
"بكيل",
"بلغازي",
"بلقرن",
"بلي",
"بنو أسد",
"بنو أمية",
"بنو الأحمر",
"بنو الأحمر بن الحارث",
"بنو الأسمر",
"بنو الحارث بن كعب",
"بنو الدئل",
"بنو العريج",
"بنو النجار",
"بنو حنيفة",
"بنو خالد",
"بنو ذي أصبح",
"بنو زيد",
"بنو سعد بن بكر",
"بنو سعد بن ليث بن بكر",
"بنو شعبة",
"بنو شهر",
"بنو شيبان",
"بنو شيبة",
"بنو صخر",
"بنو ضمرة",
"بنو عبس",
"بنو عجل",
"بنو عدي",
"بنو عمرو",
"بنو فراس",
"بنو كلب",
"بنو كنز",
"بنو لام",
"بنو ليث",
"بنو مالك",
"بنو معقل",
"بنو مهدي",
"بنو هاشم",
"بنو هلال",
"بنو ياس",
"بنو يعلى",
"بني بيات",
"بني رشيد",
"بني عطية",
"بني هاجر",
"بو مدين",
"بيرقدار",
"بيروتي",
"ترابين",
"تغلب بن وائل",
"تميم",
"تنوخ",
"ثقيف",
"جار الله",
"جبيلي",
"جديس",
"جذام",
"جرهم",
"جزار",
"جزيني",
"جعفر",
"جهينة",
"جودة",
"حاشد",
"حب رمان",
"حجازي",
"حرب",
"حمير",
"حميضة",
"حوالة",
"خثعم",
"خزاعة",
"خندف",
"خولان",
"درويش",
"دوبلال",
"راجح",
"ربيعة",
"رصاص",
"زبيد",
"زحلاوي",
"زحيكة",
"زلاطيمو",
"زهران",
"سبيع",
"سرندح",
"سليم",
"سموم",
"سوميرة",
"شتية",
"شرف",
"شمر",
"شمران",
"شهران",
"شويفاتي",
"صيام",
"صيداني",
"صيداوي",
"ضبيعة",
"طرابلسي",
"طزيز",
"طسم",
"طقش",
"طه",
"طوطح",
"طيء",
"عامر بن صعصعة",
"عاملة",
"عبد القيس",
"عبد اللطيف",
"عبده",
"عتيبة",
"عجرمة (العجارمة)",
"عدوان",
"عذرة",
"عرموني",
"عسير",
"عضل",
"عكاوي",
"عليان",
"عنز بن وائل",
"عنزة",
"عنس",
"عويضة",
"غامد",
"غطفان",
"غنيم",
"غوشة",
"فراهيد",
"فهم",
"قبيلة هذيل البقوم",
"قحطان",
"قرش",
"قريش",
"قضاعة",
"قطينة",
"قليبو",
"قيس عيلان",
"كمال",
"كنانة",
"كندة",
"كهلان",
"لخم",
"متني",
"مذحج",
"مراد",
"مرازيق البقوم",
"مزرعاني",
"مزينة",
"مشعشع",
"مضر",
"مطير",
"معتوق",
"ميرفاب",
"نجم",
"نجيب",
"نسيبة",
"نهد",
"نور الدين",
"هذيل",
"همدان",
"هندية",
"هوازن",
"وهبة",
"يافع",
"يشكر",
)
prefixes_female: Tuple[str, ...] = (
"السيدة",
"الآنسة",
"الدكتورة",
"الأستاذة",
"المهندسة",
)
prefixes_male: Tuple[str, ...] = ("السيد", "المهندس", "الدكتور", "الأستاذ")
| mit | 6e255abc053c3e1441896fa1b2cdabeb | 16.475184 | 79 | 0.302162 | 2.086818 | false | false | false | false |
joke2k/faker | faker/providers/address/fa_IR/__init__.py | 1 | 8017 | from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_prefixes = (
"شمال",
"غرب",
"شرق",
"جنوب",
"بندر",
"شهر",
"روستای",
"دهستان",
"شهرستان",
"باغات",
"استان",
)
building_number_formats = ("#####", "####", "###")
street_suffixes = (
"کوچه",
"خیابان",
"پل",
"دره",
"میدان",
"چهار راه",
"بن بست",
"بلوار",
"جنب",
"تقاطع",
"آزاد راه",
"بزرگ راه",
"جزیره",
"کوه",
"جاده",
"تونل",
)
postcode_formats = ("###", "####", "#####", "######", "##########")
states = (
"آذربایجان شرقی",
"آذربایجان غربی",
"اردبیل",
"خراسان",
"کردستان",
"گیلان",
"اصفهان",
"البرز",
"ایلام",
"بوشهر",
"تهران",
"چهارمحال و بختیاری",
"خراسان جنوبی",
"خراسان رضوی",
"خراسان شمالی",
"خوزستان",
"زنجان",
"سمنان",
"سیستان و بلوچستان",
"فارس",
"قزوین",
"قم",
"کرمان",
"کرمانشاه",
"کهگیلویه و بویراحمد",
"گلستان",
"لرستان",
"مازندران",
"مرکزی",
"هرمزگان",
"همدان",
"یزد",
)
countries = (
"جمهوری آذربایجان",
"آرژانتین",
"آفریقای جنوبی",
"جمهوری آفریقای مرکزی",
"آلبانی",
"آلمان",
"آنتیگوا و باربودا",
"آندورا",
"آنگولا",
"اتریش",
"اتیوپی",
"اردن",
"ارمنستان",
"اروگوئه",
"اریتره",
"ازبکستان",
"اسپانیا",
"استرالیا",
"استونی",
"اسرائیل",
"اسلواکی",
"اسلوونی",
"افغانستان",
"اکوادور",
"الجزایر",
"السالوادور",
"امارات متحده عربی",
"اندونزی",
"اوکراین",
"اوگاندا",
"ایالات متحده آمریکا",
"ایتالیا",
"ایران",
"جمهوری ایرلند",
"ایسلند",
"باربادوس",
"باهاما",
"بحرین",
"برزیل",
"برونئی",
"بریتانیا",
"بلاروس",
"بلژیک",
"بلغارستان",
"بلیز",
"بنگلادش",
"بنین",
"پادشاهی بوتان",
"بوتسوانا",
"بورکینافاسو",
"بوروندی",
"بوسنی و هرزگوین",
"بولیوی",
"پاپوآ گینه نو",
"پاراگوئه",
"پاناما",
"پاکستان",
"پرتغال",
"پرو",
"پورتوریکو",
"تاجیکستان",
"تانزانیا",
"تایلند",
"جمهوری چین",
"ترکمنستان",
"ترکیه",
"ترینیداد و توباگو",
"توگو",
"تونس",
"تونگا",
"تووالو",
"تیمور شرقی",
"جامائیکا",
"جزایر سلیمان",
"جزایر مارشال",
"جمهوری چک",
"جمهوری دومینیکن",
"جیبوتی",
"چاد",
"چین",
"دانمارک",
"دومینیکا",
"جمهوری دومینیکن",
"رواندا",
"روسیه",
"رومانی",
"زامبیا",
"نیوزیلند",
"زیمباوه",
"جمهوری دموکراتیک کنگو (زئیر)",
"ژاپن",
"سائوتومه و پرینسیپ",
"ساحل عاج",
"ساموآی غربی",
"سن مارینو",
"سریلانکا",
"سنت کیتس و نویس",
"سنت لوسیا",
"سنت وینسنت و گرنادینها",
"سنگاپور",
"سنگال",
"سوئد",
"سوئیس",
"سوازیلند",
"سودان",
"سودان جنوبی",
"سورینام",
"سوریه",
"سومالی",
"سیرالئون",
"سیشل",
"شیلی",
"صربستان",
"عراق",
"عربستان سعودی",
"عمان",
"غنا",
"فرانسه",
"فلسطین",
"فنلاند",
"فیجی",
"فیلیپین",
"قبرس",
"قرقیزستان",
"قزاقستان",
"قطر",
"کامبوج",
"کامرون",
"کانادا",
"کره جنوبی",
"کره شمالی",
"کرواسی",
"کاستاریکا",
"کلمبیا",
"جمهوری کنگو",
"جمهوری دموکراتیک کنگو",
"کنیا",
"کوبا",
"کوزوو",
"مجمعالجزایر قمر",
"کویت",
"کیپ ورد",
"کیریباتی",
"گابن",
"گامبیا",
"گرجستان",
"گرنادا",
"گرینلند(از مستعمرات دانمارک)",
"گواتمالا",
"گویان",
"گینه",
"گینه استوایی",
"گینه بیسائو",
"لائوس",
"لبنان",
"لتونی",
"لسوتو",
"لهستان",
"لوکزامبورگ",
"لیبریا",
"لیبی",
"لیتوانی",
"لیختناشتاین",
"ماداگاسکار",
"مالاوی",
"مالت",
"مالدیو",
"مالزی",
"مالی",
"مجارستان",
"مراکش",
"مصر",
"مغولستان",
"مقدونیه",
"مکزیک",
"موریتانی",
"موریس",
"موزامبیک",
"مولداوی",
"موناکو",
"مونتهنگرو",
"میانمار",
"ایالات فدرال میکرونزی",
"نائورو",
"نامیبیا",
"نپال",
"نروژ",
"نیجریه",
"نیکاراگوئه",
"نیوزیلند",
"واتیکان",
"وانواتو",
"ونزوئلا",
"ویتنام",
"هائیتی",
"هلند",
"هندوراس",
"هند",
"یمن",
"یونان",
)
city_formats = ("{{city_prefix}} {{first_name}}",)
street_name_formats = (
"{{first_name}} {{street_suffix}}",
"{{last_name}} {{street_suffix}}",
)
street_address_formats = (
"{{building_number}} {{street_name}}",
"{{building_number}} {{street_name}} {{secondary_address}}",
)
address_formats = ("{{street_address}}\n{{city}}, {{state}} {{postcode}}",)
secondary_address_formats = ("سوئیت ###", "واحد ###")
def city_prefix(self) -> str:
return self.random_element(self.city_prefixes)
def secondary_address(self) -> str:
return self.numerify(self.random_element(self.secondary_address_formats))
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
| mit | 3cb7cad930c7af333b296f9d1a25edc8 | 19.773649 | 81 | 0.401529 | 2.030713 | false | false | false | false |
joke2k/faker | faker/providers/profile/__init__.py | 1 | 2082 | import itertools
from datetime import date
from decimal import Decimal
from typing import Dict, List, Optional, Tuple, Union
from ...typing import SexLiteral
from .. import BaseProvider
class Provider(BaseProvider):
"""
This provider is a collection of functions to generate personal profiles and identities.
"""
def simple_profile(self, sex: Optional[SexLiteral] = None) -> Dict[str, Union[str, date, SexLiteral]]:
"""
Generates a basic profile with personal informations
"""
sex_ = self.random_element(["F", "M"]) if sex is None else sex
if sex_ == "F":
name = self.generator.name_female()
elif sex_ == "M":
name = self.generator.name_male()
return {
"username": self.generator.user_name(),
"name": name,
"sex": sex_,
"address": self.generator.address(),
"mail": self.generator.free_email(),
"birthdate": self.generator.date_of_birth(),
}
def profile(
self, fields: Optional[List[str]] = None, sex: Optional[SexLiteral] = None
) -> Dict[str, Union[str, Tuple[Decimal, Decimal], List[str], date]]:
"""
Generates a complete profile.
If "fields" is not empty, only the fields in the list will be returned
"""
if fields is None:
fields = []
d = {
"job": self.generator.job(),
"company": self.generator.company(),
"ssn": self.generator.ssn(),
"residence": self.generator.address(),
"current_location": (self.generator.latitude(), self.generator.longitude()),
"blood_group": "".join(self.random_element(list(itertools.product(["A", "B", "AB", "O"], ["+", "-"])))),
"website": [self.generator.url() for _ in range(1, self.random_int(2, 5))],
}
d = dict(d, **self.generator.simple_profile(sex))
# field selection
if len(fields) > 0:
d = {k: v for k, v in d.items() if k in fields}
return d
| mit | 4e9c355298bf552a47e3945d91980ca8 | 33.7 | 116 | 0.563401 | 3.928302 | false | false | false | false |
joke2k/faker | faker/providers/internet/en_PH/__init__.py | 1 | 2097 | from collections import OrderedDict
from faker.utils.decorators import lowercase, slugify
from .. import Provider as InternetProvider
class Provider(InternetProvider):
"""
Provider for internet stuff for en_PH locale
Free email domains are based on anecdotal evidence and experience. Available TLDs are based on the listed sources.
Because of the local company naming scheme, a custom generator is needed to output convincing company domains.
Sources:
- https://en.wikipedia.org/wiki/.ph
"""
tlds = (
"com",
"net",
"org",
"ph",
"com.ph",
"net.ph",
"org.ph",
)
safe_email_tlds = tlds
free_email_domains = (
"gmail.com",
"yahoo.com",
"zohomail.com",
)
email_formats = OrderedDict(
[
("{{user_name}}@{{domain_name}}", 0.75),
("{{user_name}}@{{free_email_domain}}", 0.25),
]
)
@lowercase
@slugify
def domain_word(self) -> str:
check = self.random_int(0, 99)
if check % 100 < 40:
company_acronym = self.generator.format("random_company_acronym")
if len(company_acronym) == 2:
company_type = self.generator.format("company_type")
return company_acronym + company_type
else:
return company_acronym
else:
if check % 2 == 0:
name_part = self.generator.format("last_name")
else:
name_part = self.generator.format("random_company_adjective")
company_noun_chain = self.generator.format("random_company_noun_chain")
company_nouns = company_noun_chain.split(" ")
if len(company_nouns) == 1:
return name_part + company_noun_chain
else:
company_type = self.generator.format("company_type")
company_elements = [name_part] + company_nouns
acronym = "".join([word[0] for word in company_elements])
return acronym + company_type
| mit | 47adbc05873e3dff9296e945d0ba21a6 | 31.261538 | 118 | 0.562709 | 3.956604 | false | false | false | false |
joke2k/faker | faker/providers/credit_card/ru_RU/__init__.py | 1 | 3179 | from collections import OrderedDict
from typing import Optional
from faker.providers.person.ru_RU import translit
from .. import CardType, CreditCard
from .. import Provider as CreditCardProvider
class Provider(CreditCardProvider):
"""Implement credit card provider for ``ru_RU`` locale.
For all methods that take ``card_type`` as an argument, a random card type
will be used if the supplied value is ``None``. The list of valid card types
includes ``'amex'``, ``'maestro'``, ``'mastercard'``, ``'mir'``,
``'unionpay'``, and ``'visa'``.
Sources:
- https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN)
"""
prefix_visa = ["4"]
prefix_mastercard = [
"51",
"52",
"53",
"54",
"55",
"222%",
"223",
"224",
"225",
"226",
"227",
"228",
"229",
"23",
"24",
"25",
"26",
"270",
"271",
"2720",
]
prefix_mir = ["2200", "2201", "2202", "2203", "2204"]
prefix_maestro = [
"50",
"56",
"57",
"58",
"59",
"60",
"61",
"62",
"63",
"64",
"65",
"66",
"67",
"68",
"69",
]
prefix_amex = ["34", "37"]
prefix_unionpay = ["62", "81"]
credit_card_types = OrderedDict(
(
("visa", CreditCard("Visa", prefix_visa, security_code="CVV2")),
(
"mastercard",
CreditCard("Mastercard", prefix_mastercard, security_code="CVC2"),
),
("mir", CreditCard("МИР", prefix_mir)),
("maestro", CreditCard("Maestro", prefix_maestro, security_code="CVV2")),
(
"amex",
CreditCard(
"American Express",
prefix_amex,
15,
security_code="CID",
security_code_length=4,
),
),
("unionpay", CreditCard("Union Pay", prefix_unionpay)),
)
)
def credit_card_full(self, card_type: Optional[CardType] = None) -> str:
"""Generate a set of credit card details."""
card = self._credit_card_type(card_type)
tpl = "{provider}\n" "{owner}\n" "{number} {expire_date}\n" "{security}: {security_nb}\n" "{issuer}"
tpl = tpl.format(
provider=card.name,
owner=translit(
self.generator.parse(
self.random_element(
[
"{{first_name_male}} {{last_name_male}}",
"{{first_name_female}} {{last_name_female}}",
]
)
)
),
number=self.credit_card_number(card),
expire_date=self.credit_card_expire(),
security=card.security_code,
security_nb=self.credit_card_security_code(card),
issuer=self.generator.parse("{{bank}}"),
)
return self.generator.parse(tpl)
| mit | 9c4bd1dd3ab934666e11f33f90fca2ee | 26.617391 | 108 | 0.460013 | 3.91133 | false | false | false | false |
jazzband/django-axes | axes/migrations/0004_auto_20181024_1538.py | 3 | 2197 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("axes", "0003_auto_20160322_0929")]
operations = [
migrations.AlterModelOptions(
name="accessattempt",
options={
"verbose_name": "access attempt",
"verbose_name_plural": "access attempts",
},
),
migrations.AlterModelOptions(
name="accesslog",
options={
"verbose_name": "access log",
"verbose_name_plural": "access logs",
},
),
migrations.AlterField(
model_name="accessattempt",
name="attempt_time",
field=models.DateTimeField(auto_now_add=True, verbose_name="Attempt Time"),
),
migrations.AlterField(
model_name="accessattempt",
name="user_agent",
field=models.CharField(
db_index=True, max_length=255, verbose_name="User Agent"
),
),
migrations.AlterField(
model_name="accessattempt",
name="username",
field=models.CharField(
db_index=True, max_length=255, null=True, verbose_name="Username"
),
),
migrations.AlterField(
model_name="accesslog",
name="attempt_time",
field=models.DateTimeField(auto_now_add=True, verbose_name="Attempt Time"),
),
migrations.AlterField(
model_name="accesslog",
name="logout_time",
field=models.DateTimeField(
blank=True, null=True, verbose_name="Logout Time"
),
),
migrations.AlterField(
model_name="accesslog",
name="user_agent",
field=models.CharField(
db_index=True, max_length=255, verbose_name="User Agent"
),
),
migrations.AlterField(
model_name="accesslog",
name="username",
field=models.CharField(
db_index=True, max_length=255, null=True, verbose_name="Username"
),
),
]
| mit | 16104e6e684b784ea4e3510f45603593 | 31.308824 | 87 | 0.515703 | 4.694444 | false | false | false | false |
atmtools/typhon | typhon/retrieval/mcmc/mcmc.py | 1 | 11539 | """
The mcmc submodule.
===================
Contains the MCMC which implements the Marcov Chain Monte Carlo method to
sample from a posterior distribution of a retrieval problem.
References
==========
[1] Andrew Gelman et al., Bayesian Data Analysis, 3rd Edition
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
def r_factor(stats):
"""
This computes the R-factor as defined in 'Bayesian Data Analysis'
(Getlman et al.), Chapter 11. If the simulations have converged,
the result of `r_factor` should be close to one.
Args:
stats: A list of arrays of statistics (scalar summaries) computed from
serveral MCMC runs.
"""
n = stats[0].size
m = len(stats)
vars = np.array([np.var(s) for s in stats])
means = np.array([np.mean(s) for s in stats])
mean = np.mean(means)
b = n * np.var(means)
w = np.mean(vars)
var_p = (n - 1) / n * w + b / n
return np.sqrt(var_p / w)
def variogram(stats, t):
"""
Helper function that computes the variogram for a given lag t. The
variogram is the mean of the mean squared sum of deviations of lag t of
each sequence.
Args:
stats: A list of sequences
"""
m = len(stats)
n = stats[0].size
return sum([np.sum((s[t+1:] - s[:n-t-1])**2) for s in stats]) / (m * (n - t))
def split(stats):
"""
Splits a list of sequences in halves.
Sequences generated from MCMC runs should be split in half in order to be
able to properly diagnose mixing.
Args:
stats: A list of sequences
"""
n = stats[0].size
return [s[i * (n // 2) : (i + 1) * (n // 2)]
for i in range(2) for s in stats]
def autocorrelation(stats):
"""
Estimates the autocorrelation of a list of sequences from a MCMC run.
This uses formula (11.7) in [1] to approximate the autocorrelation function
for lags [0, n // 2].
"""
n = stats[0].size
vars = np.array([np.var(s) for s in stats])
means = np.array([np.mean(s) for s in stats])
b = n * np.var(means)
w = np.mean(vars)
var_p = (n - 1) / n * w + b / n
rho = np.zeros(n // 2)
for t in range(n // 2):
vt = variogram(stats, t)
rho[t] = 1.0 - 0.5 * vt / var_p
return rho
def effective_sample_size(stats):
"""
This estimates the effective sample size of independent samples from the
posterior distribution using formula (11.8) in [1].
"""
n = stats[0].size
m = len(stats)
vars = np.array([np.var(s) for s in stats])
means = np.array([np.mean(s) for s in stats])
b = n * np.var(means)
w = np.mean(vars)
var_p = (n - 1) / n * w + b / n
rho = np.zeros(n // 2)
for t in range(n // 2):
vt = variogram(stats, t)
rho[t] = 1.0 - 0.5 * vt / var_p
if t > 2 and (rho[t-1] + rho[t-2]) < 0.0:
break
return m * n / (1.0 + 2.0 * sum(rho[:t-2]))
class MCMC:
"""
The MCMC class represents an ongoing MCMC simulation. An MCMC object can be
used to run a given number of MC steps, test the results for convergence
and perform further calculations if necessary.
"""
@staticmethod
def _check_input(vars, py, stats):
"""
Helper method that checks arguments provided to `__init__`.
"""
if not type(vars) == list and len(vars) > 0:
raise Exception("Argument vars must be of type list and have length"
+ "> 0.")
for v in vars:
if not type(v) == list and len(v) == 3:
raise Exception("Elements of argument vars must be tuples of "
+ " mutable variables, jump functions and prior"
+ " densities.")
if not callable(v[1]):
raise Exception("Non-callable object given as prior density.")
if not callable(v[2]):
raise Exception("Non-callable object given as jump function.")
if not callable(py):
raise Exception("Non-callable object given for conditional"
+ "probability p(y | x).")
for s in stats:
if not callable(s):
raise Exception("Non-callable object given as statistic.")
def __init__(self, vars, y, ly, stats = []):
"""
To construct an MCMC object, the user must provide a list of variables,
prior distributions and likelihood functions, the measurement vector,
a measurement likelihood and optionally a set of stats to evaluate at
each step.
Args:
vars: A list of triples (v,l,j) containing a triple of a variable
v, a prior likelihood function l so that `l(v)` yields a value
proportional to the logarithm of the prior probability of value
of v, and finally a jump function j, so that
`v_new = j(ws, v_old)` yields a new value for the variable v
and manipulates the :class:`~typhon.arts.workspace.Workspace`
object ws so that a subsequent call to the yCalc WSM will compute
the simulated measurement corresponding to the new value `v_new`
of the variable v.
y: The measured vector of brightness temperatures which must be
consistent with the ARTS WSV y
ly: The measurement likelihood such that `ly(y, yf)` gives
the log of the probability that deviations between `y` and
`yf` are due to measurement errors.
stats: This is a list of statstics such that for each element
s `s(ws)` is a scalar value computed on a given workspace.
"""
MCMC._check_input(vars, ly, stats)
self.y = y
self.vars = vars
self.ly = ly
self.stats = stats
def eval_l(self, ws):
"""
Evaluate the likelihood of the current state. This method
simply computes and sums up the measurement likelihood and
the prior likelihoods.
Args:
ws: A :class:`~typhon.arts.workspace.Workspace` object consistent
with the current state of the MCMC run.
"""
lxs = np.zeros(len(self.vars))
ly = self.ly(self.y, ws.y.value)
for i, (x, l, _) in enumerate(self.vars):
lxs[i] = l(x)
return ly, lxs
def step(self, ws, ly_old, lxs_old):
"""
The performs a Gibbs step for a given variable. This will generate
a candidate for the given variable using the corresponding jump
function, call `yCalc()` on the
:class:`~typhon.arts.workspace.Workspace` object `ws`
Args:
ws: A :class:`~typhon.arts.workspace.Workspace` object consistent
with the current state of the MCMC run.
ly_old: The measurement likelihood before the execution of
new step
lxs_old: The prior likelihoods for each of the variables
that are being retrieved.
"""
accepted = np.zeros((1, len(self.vars)), dtype=bool)
lxs = np.zeros(lxs_old.shape)
for i, ((x,l,j), lx_old) in enumerate(zip(self.vars, lxs_old)):
# Generate new step
x_new = j(ws, x)
ws.yCalc()
lx_new = l(x_new)
ly_new = self.ly(self.y, ws.y.value)
# Check Acceptance
r = np.exp(lx_new + ly_new - lx_old - ly_old)
if r > 1.0 or np.random.random() < r:
x[:] = x_new
accepted[0, i] = True
ly_old = ly_new
lxs[i] = lx_new
else:
j(ws, x, revert = True)
accepted[0, i] = False
lxs[i] = lxs_old[i]
return accepted, ly_old, lxs
def print_log(self, step, acceptance):
"""
Prints log output to stdout.
Args:
step: The number of the current step
acceptance: The array of bools tracking the acceptances for
each simulation step.
"""
if step > 0:
ar = sum(acceptance / step)
else:
ar = 0.0
logger.info("MCMC Step " + str(step) + ": " + "ar = " + str(ar))
def warm_up(self, ws, x0s, n_steps):
"""
Run a simulation of `n_steps` on a given workspace `ws`
starting from start values `x0s`.
Args:
ws: A :class:`~typhon.arts.workspace.Workspace` object setup so that
only a call to the `yCalc` WSM is necessary to perform a
simulation
x0s: A list of start values which is used to initialized the
workspace by calling `j(x0)` for each `x0` in `x0s` and `j` is
the jump function of the corresponding variable.
"""
ls = np.zeros(n_steps + 1)
stats = np.zeros((n_steps + 1, len(self.stats)))
hist = [np.zeros((n_steps + 1,) + x.shape) for x,l,j in self.vars]
acceptance = np.zeros((n_steps, len(self.vars)), dtype=bool)
lxs = np.zeros(len(self.vars))
ly = 0.0
# Set initial state.
for i,((x, l, j), x0) in enumerate(zip(self.vars, x0s)):
x[:] = j(ws, x0)
hist[i][0,:] = x[:]
# Evaluate likelihood
ws.yCalc()
ly, lxs = self.eval_l(ws)
ls[0] = ly + sum(lxs)
# Evaluate statistics
for i,s in enumerate(self.stats):
stats[0, i] = s(ws)
for i1 in range(n_steps):
acceptance[i1, :], ly, lxs = self.step(ws, ly, lxs)
for i2, h in enumerate(hist):
hist[i2][i1+1, :] = self.vars[i2][0][:]
for i2,s in enumerate(self.stats):
stats[i1 + 1, i2] = s(ws)
if (i1 % 10) == 0:
self.print_log(i1, acceptance)
self.ly_old = ly
self.lxs_old = lxs
self.stats_old = stats[-1,:]
self.hist_old = [h[-1,:] for h in hist]
return hist, stats, ls, acceptance
def run(self, ws, n_steps):
"""
Run a simulation of `n_steps` on a given workspace `ws`
starting from start values `x0s`.
Args:
ws: A :class:`~typhon.arts.workspace.Workspace` object setup so that
only a call to the `yCalc` WSM is necessary to perform a simulation
x0s: A list of start values which is used to initialized the
workspace by calling `j(x0)` for each `x0` in `x0s` and `j` is
the jump function of the corresponding variable.
"""
ls = np.zeros(n_steps)
stats = np.zeros((n_steps, len(self.stats)))
hist = [np.zeros((n_steps,) + x.shape) for x,l,j in self.vars]
acceptance = np.zeros((n_steps, len(self.vars)), dtype=bool)
ly = self.ly_old
lxs = self.lxs_old
for i1 in range(n_steps):
acceptance[i1, :], ly, lxs = self.step(ws, ly, lxs)
for i2, h in enumerate(hist):
hist[i2][i1, :] = self.vars[i2][0][:]
for i2,s in enumerate(self.stats):
stats[i1, i2] = s(ws)
if (i1 % 10) == 0:
self.print_log(i1, acceptance)
self.ly_old = ly
self.lxs_old = lxs
self.stats_old = stats[-1,:]
self.hist_old = [h[-1,:] for h in hist]
return hist, stats, ls, acceptance
| mit | 79accffb663b746d2505f694cae73871 | 34.072948 | 81 | 0.544328 | 3.637768 | false | false | false | false |
atmtools/typhon | typhon/files/handlers/ocean_rain.py | 1 | 3663 | from .common import NetCDF4, expects_file_info
__all__ = [
'OceanRAIN',
]
class OceanRAIN(NetCDF4):
"""File handler that can read data from OceanRAIN NetCDF4 files.
This object handles OceanRAIN V1.0 NetCDF4 files such as they are
compatible with :mod:`typhon.collocations`, i.e.:
* rename *latitude* and *longitude* field to *lat* and *lon*.
* convert the content of the *time* variable to numpydtetime64 objects.
Examples:
Draw a world map with all measurements of OceanRAIN:
.. :code-block:: python3
from typhon.files import FileSet, OceanRAIN
from typhon.plots import worldmap
# Create a Dataset object that points to the files:
ocean_rain = FileSet(
"OceanRAIN_1.0/OceanRAIN__W__{ship}_{ship_id}__UHAM-ICDC__v1_0.nc",
handler=OceanRAIN(),
info_via="both",
)
# Create a figure:
fig = plt.figure(figsize=(12, 12))
ax = None
# This iterates through all files from OceanRAIN and plots them:
for file, data in ocean_rain.icollect(return_info=True):
label = f"{file.attr['ship']} ({data['lat'].size} mins)"
# We only plot every 1000th point to draw the worldmap faster:
ax, plot = worldmap(
data["lat"][::1000], data["lon"][::1000], ax=ax,
s=6, label=label, background=True
)
# Add a nice legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(
handles, labels, bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, borderaxespad=0., #mode="expand",
)
fig.show()
"""
def __init__(self, **kwargs):
"""Initializes a OceanRAIN file handler class.
Args:
**kwargs: Additional key word arguments that are allowed for the
:class:`typhon.spareice.handlers.common.NetCDF4` class.
"""
# Call the base class initializer
super().__init__(**kwargs)
@expects_file_info()
def get_info(self, file, **kwargs):
"""Get information about an OceanRAIN dataset file
Args:
file: A string containing path and name or a :class:`FileInfo`
object of the file of which to get the information about.
**kwargs: Additional keyword arguments.
Returns:
A :class:`FileInfo` object.
"""
data = super().read(file, fields=("time",))
file.times = [data["time"].min().item(0), data["time"].max().item(0)]
return file
@expects_file_info()
def read(self, filename, **kwargs):
"""Read and parse a NetCDF file and load it to a xarray.Dataset
Args:
filename: Path and name of the file as string or FileInfo object.
**kwargs: Additional key word arguments that are allowed for the
:class:`~typhon.files.handlers.common.NetCDF4` class.
Returns:
A xarray.Dataset object.
"""
# Make sure that the standard fields are always gonna be imported:
fields = kwargs.pop("fields", None)
if fields is not None:
fields = {"time", "latitude", "longitude"} | set(fields)
# This renaming makes the data compatible for collocate routines:
mapping = {
"latitude": "lat",
"longitude": "lon",
**kwargs.get("mapping", {})
}
return super().read(filename, fields=fields, mapping=mapping, **kwargs)
| mit | 2b8fd8210bd99871aa34a710f78b496b | 32 | 83 | 0.56047 | 4.092737 | false | false | false | false |
atmtools/typhon | typhon/plots/maps.py | 1 | 5317 | """Functions related to plotting maps. """
from collections import namedtuple
from matplotlib import pyplot as plt
_CartopyError = ImportError(
"You have to install `cartopy` to use functions located in `typhon.plots.maps`."
)
def worldmap(
lat,
lon,
var=None,
fig=None,
ax=None,
projection=None,
bg=False,
draw_grid=False,
draw_coastlines=False,
interpolation=False,
**kwargs,
):
"""Plots the track of a variable on a worldmap.
Args:
lat: Array of latitudes.
lon: Array of longitudes.
var: Additional array for the variable to plot. If 1-dimensional, the
variable is plotted as track changing the color according to a
color map. If 2-dimensional, variable is plotted as contour plot.
fig: A matplotlib figure object. If not given, the current
figure is used.
ax: A matplotlib axis object. If not given, a new axis
object will be created in the current figure.
projection: If no axis is given, specify here the cartopy projection.
bg: If true, a background image will be drawn.
draw_grid:
draw_coastlines:
**kwargs:
Returns:
Scatter plot objects.
"""
try:
import cartopy.crs as ccrs
except ImportError:
raise _CartopyError
# Default keyword arguments to pass to hist2d().
kwargs_defaults = {
"cmap": "qualitative1",
"s": 1,
# This accelerates the drawing of many points:
"rasterized": lat.size > 100_000,
**kwargs,
}
if fig is None:
fig = plt.gcf()
if projection is None:
if ax is None:
projection = ccrs.PlateCarree()
else:
projection = ax.projection
if ax is None:
ax = fig.add_subplot(111, projection=projection)
if bg:
ax.stock_img()
if draw_grid:
ax.gridlines(draw_labels=True)
if draw_coastlines:
ax.coastlines()
# It is counter-intuitive but if we want to plot our data with normal
# latitudes and longitudes, we always have to set the transform to
# PlateCarree (see https://github.com/SciTools/cartopy/issues/911)
if var is None or len(var.shape) == 1:
kwargs_defaults = {
"cmap": "qualitative1",
"s": 1,
# This accelerates the drawing of many points:
"rasterized": lat.size > 100_000,
**kwargs,
}
plot = ax.scatter(
lon, lat, c=var, transform=ccrs.PlateCarree(), **kwargs_defaults
)
elif interpolation:
kwargs_defaults = {**kwargs}
plot = ax.contourf(
lon, lat, var, transform=ccrs.PlateCarree(), **kwargs_defaults
)
else:
kwargs_defaults = {**kwargs}
plot = ax.pcolormesh(
lon, lat, var, transform=ccrs.PlateCarree(), **kwargs_defaults
)
return plot
def get_cfeatures_at_scale(scale="110m"):
"""Return a collection of `NaturalEarthFeature` at given scale.
Parameters:
scale (str): The dataset scale, i.e. one of ‘10m’, ‘50m’,
or ‘110m’. Corresponding to 1:10,000,000, 1:50,000,000,
and 1:110,000,000 respectively.
Returns:
collections.namedtuple:
Collection of :class:`~cartopy.feature.NaturalEarthFeature`
Examples:
>>> features = get_cfeatures_at_scale('50m')
>>> print(features.COASTLINE.scale)
'50m'
"""
try:
from cartopy.feature import NaturalEarthFeature, COLORS
except ImportError:
raise _CartopyError
d = {}
d["BORDERS"] = NaturalEarthFeature(
category="cultural",
name="admin_0_boundary_lines_land",
scale=scale,
edgecolor="black",
facecolor="none",
)
d["STATES"] = NaturalEarthFeature(
category="cultural",
name="admin_1_states_provinces_lakes",
scale=scale,
edgecolor="black",
facecolor="none",
)
d["COASTLINE"] = NaturalEarthFeature(
category="physical",
name="coastline",
scale=scale,
edgecolor="black",
facecolor="none",
)
d["LAKES"] = NaturalEarthFeature(
category="physical",
name="lakes",
scale=scale,
edgecolor="face",
facecolor=COLORS["water"],
)
d["LAND"] = NaturalEarthFeature(
category="physical",
name="land",
scale=scale,
edgecolor="face",
facecolor=COLORS["land"],
zorder=-1,
)
d["OCEAN"] = NaturalEarthFeature(
category="physical",
name="ocean",
scale=scale,
edgecolor="face",
facecolor=COLORS["water"],
zorder=-1,
)
d["RIVERS"] = NaturalEarthFeature(
category="physical",
name="rivers_lake_centerlines",
scale=scale,
edgecolor=COLORS["water"],
facecolor="none",
)
NaturalEarthFeatures = namedtuple(
typename="NaturalEarthFeatures",
field_names=(
"BORDERS",
"STATES",
"COASTLINE",
"LAKES",
"LAND",
"OCEAN",
"RIVERS",
),
)
return NaturalEarthFeatures(**d)
| mit | cb558795887e9962fc38456d6918df56 | 24.752427 | 84 | 0.572102 | 3.833092 | false | false | false | false |
atmtools/typhon | typhon/files/handlers/tropomi.py | 1 | 2077 | from satpy import Scene
import xarray as xr
import pandas as pd
from .common import expects_file_info, FileHandler
__all__ = [
'TROPOMI',
]
class TROPOMI(FileHandler):
"""File handler for TROPOMI data using Satpy reader
"""
# This file handler always wants to return at least time, lat and lon
# fields. These fields are required for this:
standard_fields = {
'latitude',
'longitude',
'time_utc',
}
# Map the standard fields to standard names:
mapping = {
"latitude": "lat",
"longitude": "lon",
"time_utc": "time",
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.satpy_reader = 'tropomi_l2'
self.user_fields = kwargs.pop("fields", {})
@expects_file_info()
def read(self, filename, **kwargs):
scene = Scene(reader=self.satpy_reader, filenames=[filename.path])
# We need to import at least the standard fields
fields = self.standard_fields | set(self.user_fields)
# If the user has not passed any fields to us, we load all per default.
if fields is None:
fields = scene.available_dataset_ids()
# Load all selected fields
scene.load(fields, **kwargs)
# convert into dataset
dataset = scene.to_xarray_dataset()
# convert string array to datetime array
dataset['time_utc'] = dataset['time_utc'].astype("datetime64[ns]")
# delete useless coords
dataset = dataset.drop_vars(['time', 'crs'])
# rename standard variables
dataset = dataset.rename(self.mapping)
# We catch the user mapping here, since we do not want to deal with
# user-defined names in the further processing. Instead, we use our own
# mapping
user_mapping = kwargs.pop("mapping", None)
if user_mapping is not None:
dataset = dataset.rename(user_mapping)
# clean attributes
for var in dataset.data_vars:
dataset[var].attrs = []
return dataset
| mit | 26ca3d66f64adf90e433ce0d01292958 | 27.847222 | 79 | 0.604718 | 4.088583 | false | false | false | false |
jaraco/keyring | keyring/backends/libsecret.py | 1 | 5973 | import logging
from .. import backend
from .._compat import properties
from ..backend import KeyringBackend
from ..credentials import SimpleCredential
from ..errors import (
PasswordDeleteError,
PasswordSetError,
KeyringLocked,
)
available = False
try:
import gi
from gi.repository import Gio
from gi.repository import GLib
gi.require_version('Secret', '1')
from gi.repository import Secret
available = True
except (AttributeError, ImportError, ValueError):
pass
log = logging.getLogger(__name__)
class Keyring(backend.SchemeSelectable, KeyringBackend):
"""libsecret Keyring"""
appid = 'Python keyring library'
@property
def schema(self):
return Secret.Schema.new(
"org.freedesktop.Secret.Generic",
Secret.SchemaFlags.NONE,
self._query(
Secret.SchemaAttributeType.STRING,
Secret.SchemaAttributeType.STRING,
application=Secret.SchemaAttributeType.STRING,
),
)
@properties.NonDataProperty
def collection(self):
return Secret.COLLECTION_DEFAULT
@properties.classproperty
def priority(cls):
if not available:
raise RuntimeError("libsecret required")
# Make sure there is actually a secret service running
try:
Secret.Service.get_sync(Secret.ServiceFlags.OPEN_SESSION, None)
except GLib.Error as error:
raise RuntimeError("Can't open a session to the secret service") from error
return 4.8
def get_password(self, service, username):
"""Get password of the username for the service"""
attributes = self._query(service, username, application=self.appid)
try:
items = Secret.password_search_sync(
self.schema, attributes, Secret.SearchFlags.UNLOCK, None
)
except GLib.Error as error:
quark = GLib.quark_try_string('g-io-error-quark')
if error.matches(quark, Gio.IOErrorEnum.FAILED):
raise KeyringLocked('Failed to unlock the item!') from error
raise
for item in items:
try:
return item.retrieve_secret_sync().get_text()
except GLib.Error as error:
quark = GLib.quark_try_string('secret-error')
if error.matches(quark, Secret.Error.IS_LOCKED):
raise KeyringLocked('Failed to unlock the item!') from error
raise
def set_password(self, service, username, password):
"""Set password for the username of the service"""
attributes = self._query(service, username, application=self.appid)
label = "Password for '{}' on '{}'".format(username, service)
try:
stored = Secret.password_store_sync(
self.schema, attributes, self.collection, label, password, None
)
except GLib.Error as error:
quark = GLib.quark_try_string('secret-error')
if error.matches(quark, Secret.Error.IS_LOCKED):
raise KeyringLocked("Failed to unlock the collection!") from error
quark = GLib.quark_try_string('g-io-error-quark')
if error.matches(quark, Gio.IOErrorEnum.FAILED):
raise KeyringLocked("Failed to unlock the collection!") from error
raise
if not stored:
raise PasswordSetError("Failed to store password!")
def delete_password(self, service, username):
"""Delete the stored password (only the first one)"""
attributes = self._query(service, username, application=self.appid)
try:
items = Secret.password_search_sync(
self.schema, attributes, Secret.SearchFlags.UNLOCK, None
)
except GLib.Error as error:
quark = GLib.quark_try_string('g-io-error-quark')
if error.matches(quark, Gio.IOErrorEnum.FAILED):
raise KeyringLocked('Failed to unlock the item!') from error
raise
for item in items:
try:
removed = Secret.password_clear_sync(
self.schema, item.get_attributes(), None
)
except GLib.Error as error:
quark = GLib.quark_try_string('secret-error')
if error.matches(quark, Secret.Error.IS_LOCKED):
raise KeyringLocked('Failed to unlock the item!') from error
raise
return removed
raise PasswordDeleteError("No such password!")
def get_credential(self, service, username):
"""Get the first username and password for a service.
Return a Credential instance
The username can be omitted, but if there is one, it will use get_password
and return a SimpleCredential containing the username and password
Otherwise, it will return the first username and password combo that it finds.
"""
query = self._query(service, username)
try:
items = Secret.password_search_sync(
self.schema, query, Secret.SearchFlags.UNLOCK, None
)
except GLib.Error as error:
quark = GLib.quark_try_string('g-io-error-quark')
if error.matches(quark, Gio.IOErrorEnum.FAILED):
raise KeyringLocked('Failed to unlock the item!') from error
raise
for item in items:
username = item.get_attributes().get("username")
try:
return SimpleCredential(
username, item.retrieve_secret_sync().get_text()
)
except GLib.Error as error:
quark = GLib.quark_try_string('secret-error')
if error.matches(quark, Secret.Error.IS_LOCKED):
raise KeyringLocked('Failed to unlock the item!') from error
raise
| mit | 2afef7e919ebbd62ea0e1734025078d1 | 37.288462 | 87 | 0.603047 | 4.514739 | false | false | false | false |
jaraco/keyring | keyring/cli.py | 2 | 3924 | #!/usr/bin/env python
"""Simple command line interface to get/set password from a keyring"""
import getpass
import argparse
import sys
from . import core
from . import backend
from . import set_keyring, get_password, set_password, delete_password
class CommandLineTool:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
"-p",
"--keyring-path",
dest="keyring_path",
default=None,
help="Path to the keyring backend",
)
self.parser.add_argument(
"-b",
"--keyring-backend",
dest="keyring_backend",
default=None,
help="Name of the keyring backend",
)
self.parser.add_argument(
"--list-backends",
action="store_true",
help="List keyring backends and exit",
)
self.parser.add_argument(
"--disable", action="store_true", help="Disable keyring and exit"
)
self.parser.add_argument(
'operation',
help="get|set|del",
nargs="?",
)
self.parser.add_argument(
'service',
nargs="?",
)
self.parser.add_argument(
'username',
nargs="?",
)
def run(self, argv):
args = self.parser.parse_args(argv)
vars(self).update(vars(args))
if args.list_backends:
for k in backend.get_all_keyring():
print(k)
return
if args.disable:
core.disable()
return
self._check_args()
self._load_spec_backend()
method = getattr(self, f'do_{self.operation}', self.invalid_op)
return method()
def _check_args(self):
if self.operation:
if self.service is None or self.username is None:
self.parser.error(f"{self.operation} requires service and username")
def do_get(self):
password = get_password(self.service, self.username)
if password is None:
raise SystemExit(1)
print(password)
def do_set(self):
password = self.input_password(
f"Password for '{self.username}' in '{self.service}': "
)
set_password(self.service, self.username, password)
def do_del(self):
delete_password(self.service, self.username)
def invalid_op(self):
self.parser.error("Specify operation 'get', 'del', or 'set'.")
def _load_spec_backend(self):
if self.keyring_backend is None:
return
try:
if self.keyring_path:
sys.path.insert(0, self.keyring_path)
set_keyring(core.load_keyring(self.keyring_backend))
except (Exception,) as exc:
# Tons of things can go wrong here:
# ImportError when using "fjkljfljkl"
# AttributeError when using "os.path.bar"
# TypeError when using "__builtins__.str"
# So, we play on the safe side, and catch everything.
self.parser.error(f"Unable to load specified keyring: {exc}")
def input_password(self, prompt):
"""Retrieve password from input."""
return self.pass_from_pipe() or getpass.getpass(prompt)
@classmethod
def pass_from_pipe(cls):
"""Return password from pipe if not on TTY, else False."""
is_pipe = not sys.stdin.isatty()
return is_pipe and cls.strip_last_newline(sys.stdin.read())
@staticmethod
def strip_last_newline(str):
"""Strip one last newline, if present."""
return str[: -str.endswith('\n')]
def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
return cli.run(argv)
if __name__ == '__main__':
sys.exit(main())
| mit | 6bb41ee2062599cac1ddb35cbeb750a3 | 27.852941 | 84 | 0.556575 | 4.152381 | false | false | false | false |
ceph/ceph-deploy | ceph_deploy/hosts/common.py | 1 | 7735 | from ceph_deploy.util import paths
from ceph_deploy import conf
from ceph_deploy.lib import remoto
from ceph_deploy.util import constants
from ceph_deploy.util import system
def ceph_version(conn):
"""
Log the remote ceph-version by calling `ceph --version`
"""
return remoto.process.run(conn, ['ceph', '--version'])
def mon_create(distro, args, monitor_keyring):
hostname = distro.conn.remote_module.shortname()
logger = distro.conn.logger
logger.debug('remote hostname: %s' % hostname)
path = paths.mon.path(args.cluster, hostname)
uid = distro.conn.remote_module.path_getuid(constants.base_path)
gid = distro.conn.remote_module.path_getgid(constants.base_path)
done_path = paths.mon.done(args.cluster, hostname)
init_path = paths.mon.init(args.cluster, hostname, distro.init)
conf_data = conf.ceph.load_raw(args)
# write the configuration file
distro.conn.remote_module.write_conf(
args.cluster,
conf_data,
args.overwrite_conf,
)
# if the mon path does not exist, create it
distro.conn.remote_module.create_mon_path(path, uid, gid)
logger.debug('checking for done path: %s' % done_path)
if not distro.conn.remote_module.path_exists(done_path):
logger.debug('done path does not exist: %s' % done_path)
if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path):
logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path)
distro.conn.remote_module.makedir(paths.mon.constants.tmp_path)
keyring = paths.mon.keyring(args.cluster, hostname)
logger.info('creating keyring file: %s' % keyring)
distro.conn.remote_module.write_monitor_keyring(
keyring,
monitor_keyring,
uid, gid,
)
user_args = []
if uid != 0:
user_args = user_args + [ '--setuser', str(uid) ]
if gid != 0:
user_args = user_args + [ '--setgroup', str(gid) ]
remoto.process.run(
distro.conn,
[
'ceph-mon',
'--cluster', args.cluster,
'--mkfs',
'-i', hostname,
'--keyring', keyring,
] + user_args
)
logger.info('unlinking keyring file %s' % keyring)
distro.conn.remote_module.unlink(keyring)
# create the done file
distro.conn.remote_module.create_done_path(done_path, uid, gid)
# create init path
distro.conn.remote_module.create_init_path(init_path, uid, gid)
# start mon service
start_mon_service(distro, args.cluster, hostname)
def mon_add(distro, args, monitor_keyring):
hostname = distro.conn.remote_module.shortname()
logger = distro.conn.logger
path = paths.mon.path(args.cluster, hostname)
uid = distro.conn.remote_module.path_getuid(constants.base_path)
gid = distro.conn.remote_module.path_getgid(constants.base_path)
monmap_path = paths.mon.monmap(args.cluster, hostname)
done_path = paths.mon.done(args.cluster, hostname)
init_path = paths.mon.init(args.cluster, hostname, distro.init)
conf_data = conf.ceph.load_raw(args)
# write the configuration file
distro.conn.remote_module.write_conf(
args.cluster,
conf_data,
args.overwrite_conf,
)
# if the mon path does not exist, create it
distro.conn.remote_module.create_mon_path(path, uid, gid)
logger.debug('checking for done path: %s' % done_path)
if not distro.conn.remote_module.path_exists(done_path):
logger.debug('done path does not exist: %s' % done_path)
if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path):
logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path)
distro.conn.remote_module.makedir(paths.mon.constants.tmp_path)
keyring = paths.mon.keyring(args.cluster, hostname)
logger.info('creating keyring file: %s' % keyring)
distro.conn.remote_module.write_monitor_keyring(
keyring,
monitor_keyring,
uid, gid,
)
# get the monmap
remoto.process.run(
distro.conn,
[
'ceph',
'--cluster', args.cluster,
'mon',
'getmap',
'-o',
monmap_path,
],
)
# now use it to prepare the monitor's data dir
user_args = []
if uid != 0:
user_args = user_args + [ '--setuser', str(uid) ]
if gid != 0:
user_args = user_args + [ '--setgroup', str(gid) ]
remoto.process.run(
distro.conn,
[
'ceph-mon',
'--cluster', args.cluster,
'--mkfs',
'-i', hostname,
'--monmap',
monmap_path,
'--keyring', keyring,
] + user_args
)
logger.info('unlinking keyring file %s' % keyring)
distro.conn.remote_module.unlink(keyring)
# create the done file
distro.conn.remote_module.create_done_path(done_path, uid, gid)
# create init path
distro.conn.remote_module.create_init_path(init_path, uid, gid)
# start mon service
start_mon_service(distro, args.cluster, hostname)
def map_components(notsplit_packages, components):
"""
Returns a list of packages to install based on component names
This is done by checking if a component is in notsplit_packages,
if it is, we know we need to install 'ceph' instead of the
raw component name. Essentially, this component hasn't been
'split' from the master 'ceph' package yet.
"""
packages = set()
for c in components:
if c in notsplit_packages:
packages.add('ceph')
else:
packages.add(c)
return list(packages)
def start_mon_service(distro, cluster, hostname):
"""
start mon service depending on distro init
"""
if distro.init == 'sysvinit':
service = distro.conn.remote_module.which_service()
remoto.process.run(
distro.conn,
[
service,
'ceph',
'-c',
'/etc/ceph/{cluster}.conf'.format(cluster=cluster),
'start',
'mon.{hostname}'.format(hostname=hostname)
],
timeout=7,
)
system.enable_service(distro.conn)
elif distro.init == 'upstart':
remoto.process.run(
distro.conn,
[
'initctl',
'emit',
'ceph-mon',
'cluster={cluster}'.format(cluster=cluster),
'id={hostname}'.format(hostname=hostname),
],
timeout=7,
)
elif distro.init == 'systemd':
# enable ceph target for this host (in case it isn't already enabled)
remoto.process.run(
distro.conn,
[
'systemctl',
'enable',
'ceph.target'
],
timeout=7,
)
# enable and start this mon instance
remoto.process.run(
distro.conn,
[
'systemctl',
'enable',
'ceph-mon@{hostname}'.format(hostname=hostname),
],
timeout=7,
)
remoto.process.run(
distro.conn,
[
'systemctl',
'start',
'ceph-mon@{hostname}'.format(hostname=hostname),
],
timeout=7,
)
| mit | 228b4516a760ca3940d2396bd37fb947 | 30.189516 | 83 | 0.559793 | 3.966667 | false | false | false | false |
ceph/ceph-deploy | ceph_deploy/util/ssh.py | 1 | 1247 | import logging
from ceph_deploy.lib import remoto
from ceph_deploy.connection import get_local_connection
def can_connect_passwordless(hostname):
"""
Ensure that current host can SSH remotely to the remote
host using the ``BatchMode`` option to prevent a password prompt.
That attempt will error with an exit status of 255 and a ``Permission
denied`` message or a``Host key verification failed`` message.
"""
# Ensure we are not doing this for local hosts
if not remoto.backends.needs_ssh(hostname):
return True
logger = logging.getLogger(hostname)
with get_local_connection(logger) as conn:
# Check to see if we can login, disabling password prompts
command = ['ssh', '-CT', '-o', 'BatchMode=yes', hostname, 'true']
out, err, retval = remoto.process.check(conn, command, stop_on_error=False)
permission_denied_error = 'Permission denied '
host_key_verify_error = 'Host key verification failed.'
has_key_error = False
for line in err:
if permission_denied_error in line or host_key_verify_error in line:
has_key_error = True
if retval == 255 and has_key_error:
return False
return True
| mit | 2bc68f0df387e0f1e1ea4fe77cac6260 | 37.96875 | 83 | 0.667201 | 4.212838 | false | false | false | false |
ceph/ceph-deploy | ceph_deploy/hosts/arch/install.py | 1 | 1139 | from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa
from ceph_deploy.hosts.common import map_components
from ceph_deploy.util.system import enable_service, start_service
NON_SPLIT_PACKAGES = [
'ceph-osd',
'ceph-radosgw',
'ceph-mds',
'ceph-mon',
'ceph-mgr',
'ceph-common',
'ceph-test'
]
SYSTEMD_UNITS = [
'ceph.target',
'ceph-radosgw.target',
'ceph-rbd-mirror.target',
'ceph-fuse.target',
'ceph-mds.target',
'ceph-mon.target',
'ceph-mgr.target',
'ceph-osd.target',
]
SYSTEMD_UNITS_SKIP_START = [
'ceph-mgr.target',
'ceph-mon.target',
]
SYSTEMD_UNITS_SKIP_ENABLE = [
]
def install(distro, version_kind, version, adjust_repos, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
distro.packager.install(
packages
)
# Start and enable services
for unit in SYSTEMD_UNITS:
if unit not in SYSTEMD_UNITS_SKIP_START:
start_service(distro.conn, unit)
if unit not in SYSTEMD_UNITS_SKIP_ENABLE:
enable_service(distro.conn, unit)
| mit | 77126f3eb6ed20b819718b5b16097082 | 22.244898 | 81 | 0.632133 | 3.217514 | false | false | false | false |
liiight/notifiers | notifiers/providers/simplepush.py | 1 | 1091 | from ..core import Provider
from ..core import Response
from ..utils import requests
class SimplePush(Provider):
"""Send SimplePush notifications"""
base_url = "https://api.simplepush.io/send"
site_url = "https://simplepush.io/"
name = "simplepush"
_required = {"required": ["key", "message"]}
_schema = {
"type": "object",
"properties": {
"key": {"type": "string", "title": "your user key"},
"message": {"type": "string", "title": "your message"},
"title": {"type": "string", "title": "message title"},
"event": {"type": "string", "title": "Event ID"},
},
"additionalProperties": False,
}
def _prepare_data(self, data: dict) -> dict:
data["msg"] = data.pop("message")
return data
def _send_notification(self, data: dict) -> Response:
path_to_errors = ("message",)
response, errors = requests.post(
self.base_url, data=data, path_to_errors=path_to_errors
)
return self.create_response(data, response, errors)
| mit | e4e3e86f5c7b492ef292383b86123f24 | 31.088235 | 67 | 0.562786 | 3.841549 | false | false | false | false |
abusesa/abusehelper | abusehelper/core/events.py | 1 | 22236 | import re
import hashlib
import inspect
import collections
from base64 import b64decode
import idiokit
from idiokit.xmlcore import Element, Elements
def _replace_non_xml_chars(unicode_obj, replacement=u"\ufffd"):
return _NON_XML.sub(replacement, unicode_obj)
_NON_XML = re.compile(u"[\x00-\x08\x0b\x0c\x0e-\x1f\ud800-\udfff\ufffe\uffff]", re.U)
def _normalize(value):
"""Return the value converted to unicode. Raise a TypeError if the
value is not a string.
>>> _normalize("a")
u'a'
>>> _normalize(u"b")
u'b'
>>> _normalize(1)
Traceback (most recent call last):
...
TypeError: expected a string value, got the value 1 of type int
When converting str objects the default encoding is tried, and an
UnicodeDecodeError is raised if the value can not bot converted.
>>> _normalize("\\xe4")
Traceback (most recent call last):
...
UnicodeDecodeError: ...
"""
if isinstance(value, basestring):
return unicode(value)
name = type(value).__name__
module = inspect.getmodule(value)
if module is not None and module.__name__ != "__builtin__":
name = module.__name__ + "." + name
msg = "expected a string value, got the value %r of type %s" % (value, name)
raise TypeError(msg)
EVENT_NS = "abusehelper#event"
def _unicode_quote(string):
r"""
>>> _unicode_quote(u"a")
u'a'
>>> _unicode_quote(u"=")
u'"="'
>>> _unicode_quote(u"\n")
u'"\n"'
"""
if _UNICODE_QUOTE_CHECK.search(string):
return u'"' + _UNICODE_QUOTE.sub(r'\\\g<0>', string) + u'"'
return string
_UNICODE_QUOTE_CHECK = re.compile(r'[\s"\\,=]', re.U)
_UNICODE_QUOTE = re.compile(r'["\\]', re.U)
def _unicode_parse_part(string, start):
match = _UNICODE_PART.match(string, start)
quoted, unquoted = match.groups()
end = match.end()
if quoted is not None:
return _UNICODE_UNQUOTE.sub("\\1", quoted), end
if unquoted is not None:
return unquoted, end
return u"", end
_UNICODE_UNQUOTE = re.compile(r'\\(.)', re.U)
_UNICODE_PART = re.compile(r'\s*(?:(?:"((?:\\.|[^"])*)")|([^\s"=,]+)|)\s*', re.U)
class Event(object):
__slots__ = ["_attrs"]
_UNDEFINED = object()
@classmethod
def _itemize(cls, *args, **keys):
result = dict()
for obj in args + (keys,):
if type(obj) == Event:
for key, values in obj._attrs.iteritems():
if key not in result:
result[key] = values.copy()
else:
result[key].update(values)
continue
if hasattr(obj, "iteritems"):
obj = obj.iteritems()
elif hasattr(obj, "items"):
obj = obj.items()
for key, values in obj:
if isinstance(values, basestring):
values = (_normalize(values),)
else:
values = (_normalize(x) for x in values)
key = _normalize(key)
if key not in result:
result[key] = set(values)
else:
result[key].update(values)
return result
@classmethod
def from_unicode(cls, string):
r"""
>>> event = Event({"a": "b"})
>>> Event.from_unicode(unicode(event)) == event
True
>>> event = event.union({u'=': u'"'})
>>> Event.from_unicode(unicode(event)) == event
True
Regression test: Check that character escaping
doesn't mess up parsing.
>>> event = Event({
... u"x": u"\\",
... u"y": u"b"
... })
>>> Event.from_unicode(ur'x="\\", "y"=b') == event
True
"""
string = string.strip()
if not string:
return cls()
attrs = collections.defaultdict(list)
index = 0
length = len(string)
while True:
key, index = _unicode_parse_part(string, index)
if index >= length:
raise ValueError("unexpected string end")
if string[index] != u"=":
raise ValueError("unexpected character %r at index %d" %
(string[index], index))
index += 1
value, index = _unicode_parse_part(string, index)
attrs[key].append(value)
if index >= length:
return cls(attrs)
if string[index] != u",":
raise ValueError("unexpected character %r at index %d" %
(string[index], index))
index += 1
@classmethod
def from_elements(self, elements):
"""Yield events parsed from XML element(s).
>>> element = Element("message")
>>> list(Event.from_elements(element))
[]
>>> element.add(Element("event", xmlns=EVENT_NS))
>>> list(Event.from_elements(element)) == [Event()]
True
>>> event = Event({u"\\uffff": u"\\x05"}) # include some forbidden XML chars
>>> element = Element("message")
>>> element.add(event.to_elements())
>>> list(Event.from_elements(element)) == [Event({u"\\ufffd": u"\\ufffd"})]
True
"""
# Future event format
for event_element in elements.children("e", EVENT_NS):
attrs = collections.defaultdict(list)
for key_element in event_element.children("k").with_attrs("a"):
key = b64decode(key_element.get_attr("a")).decode("utf-8")
for value_element in key_element.children("v").with_attrs("a"):
value = b64decode(value_element.get_attr("a")).decode("utf-8")
attrs[key].append(value)
yield Event(attrs)
# Legacy event format
for event_element in elements.children("event", EVENT_NS):
attrs = collections.defaultdict(list)
for attr in event_element.children("attr").with_attrs("key", "value"):
key = attr.get_attr("key")
value = attr.get_attr("value")
attrs[key].append(value)
yield Event(attrs)
def __init__(self, *args, **keys):
"""
Regression test: Keep the the correct internal encoding in the
copy/merge constructor.
>>> event = Event({u"\xe4": u"\xe4"})
>>> Event(event).items()
((u'\\xe4', u'\\xe4'),)
"""
self._attrs = self._itemize(*args, **keys)
def union(self, *args, **keys):
"""Return a new event that contains all key-value pairs from
appearing in the original event and/or Event(*args, **keys).
>>> sorted(Event(a=["1", "2"]).union(a=["1", "3"]).items())
[(u'a', u'1'), (u'a', u'2'), (u'a', u'3')]
"""
return type(self)(self, *args, **keys)
def difference(self, *args, **keys):
"""Return a new event that contains all key-value pairs
from the original event except those also appearing in
Event(*args, **keys).
>>> sorted(Event(a=["1", "2"]).difference(a=["1", "3"]).items())
[(u'a', u'2')]
"""
other = self._itemize(*args, **keys)
result = dict()
for key, values in self._attrs.iteritems():
diff = values.difference(other.get(key, ()))
if diff:
result[key] = diff
return type(self)(result)
def add(self, key, value, *values):
"""Add value(s) for a key.
>>> event = Event()
>>> event.add("key", "1")
>>> event.values("key")
(u'1',)
More than one value can be added with one call.
>>> event = Event()
>>> event.add("key", "1", "2")
>>> sorted(event.values("key"))
[u'1', u'2']
Key-value pairs is already contained by the event are ignored.
>>> event = Event()
>>> event.add("key", "1")
>>> event.values("key")
(u'1',)
>>> event.add("key", "1")
>>> event.values("key")
(u'1',)
"""
self.update(key, (value,) + values)
def update(self, key, values):
"""Update the values of a key.
>>> event = Event()
>>> event.update("key", ["1", "2"])
>>> sorted(event.values("key"))
[u'1', u'2']
The event will not be modified if there are no values to add.
>>> event = Event()
>>> event.update("key", [])
>>> event.contains("key")
False
"""
key = _normalize(key)
if key not in self._attrs:
self._attrs[key] = set()
self._attrs[key].update(_normalize(value) for value in values)
def discard(self, key, value, *values):
"""Discard some value(s) of a key.
>>> event = Event()
>>> event.add("key", "1", "2", "3")
>>> event.discard("key", "1", "3")
>>> event.values("key")
(u'2',)
Values that don't exist for the given key are silently ignored.
>>> event = Event()
>>> event.add("key", "2")
>>> event.discard("key", "1", "2")
>>> event.values("key")
()
"""
key = _normalize(key)
if key not in self._attrs:
return
valueset = self._attrs[key]
valueset.difference_update(_normalize(value) for value in (value,) + values)
if not valueset:
del self._attrs[key]
def clear(self, key):
"""Clear all values of a key.
>>> event = Event()
>>> event.add("key", "1")
>>> event.clear("key")
>>> event.contains("key")
False
Clearing keys that do not exist does nothing.
>>> event = Event()
>>> event.clear("key")
"""
key = _normalize(key)
self._attrs.pop(key, None)
def _unkeyed(self):
for values in self._attrs.itervalues():
for value in values:
yield value
def _iter(self, key, parser, filter):
if key is self._UNDEFINED:
values = set(self._unkeyed())
else:
key = _normalize(key)
values = self._attrs.get(key, ())
if parser is not None:
parsed = (parser(x) for x in values)
if filter is not None:
return (x for x in parsed if filter(x))
else:
return (x for x in parsed if x is not None)
if filter is not None:
return (x for x in values if filter(x))
return values
def pop(self, key, parser=None, filter=None):
"""Pop value(s) of a key and clear them.
>>> event = Event()
>>> event.add("key", "y", "x", "1.2.3.4")
>>> sorted(event.pop("key"))
[u'1.2.3.4', u'x', u'y']
>>> event.contains("key")
False
Perform parsing, validation and filtering by passing in
parsing and filtering functions. Only values that match
are cleared from the event. Values that do not match
are preserved.
>>> def int_parse(string):
... try:
... return int(string)
... except ValueError:
... return None
>>> event = Event()
>>> event.add("key", "1", "a")
>>> sorted(event.pop("key", parser=int_parse))
[1]
>>> sorted(event.values("key"))
[u'a']
"""
key = _normalize(key)
values = tuple(self._attrs.get(key, ()))
if parser is not None:
parsed = ((parser(x), x) for x in values)
else:
parsed = ((x, x) for x in values)
if filter is not None:
filtered = ((x, y) for (x, y) in parsed if filter(x))
else:
filtered = ((x, y) for (x, y) in parsed if x is not None)
results = []
for x, y in filtered:
self.discard(key, y)
results.append(x)
return tuple(results)
def values(self, key=_UNDEFINED, parser=None, filter=None):
"""Return a tuple of event values (for a specific key, if
given).
>>> event = Event(key=["1", "2"], other=["3", "4"])
>>> sorted(event.values())
[u'1', u'2', u'3', u'4']
>>> sorted(event.values("key"))
[u'1', u'2']
Perform parsing, validation and filtering by passing in
parsing and filtering functions (by default all None objects
are filtered when a parsing function has been given).
>>> import socket
>>> def ipv4(string):
... try:
... return socket.inet_ntoa(socket.inet_aton(string))
... except socket.error:
... return None
>>> event = Event(key=["1.2.3.4", "abba"], other="10.10.10.10")
>>> event.values("key", parser=ipv4)
('1.2.3.4',)
>>> sorted(event.values(parser=ipv4))
['1.2.3.4', '10.10.10.10']
"""
return tuple(self._iter(key, parser, filter))
def value(self, key=_UNDEFINED, default=_UNDEFINED,
parser=None, filter=None):
"""Return one event value (for a specific key, if given).
The value can be picked either from the values of some
specific key or amongst event values.
>>> event = Event(key="1", other="2")
>>> event.value("key")
u'1'
>>> event.value() in [u"1", u"2"]
True
A default return value can be defined in case no suitable
value is available:
>>> event = Event()
>>> event.value("key", "default value")
'default value'
>>> event.value(default="default value")
'default value'
KeyError is raised if no suitable values are available and no
default is given.
>>> event = Event()
>>> event.value()
Traceback (most recent call last):
...
KeyError: 'no value available'
>>> event.value("somekey")
Traceback (most recent call last):
...
KeyError: 'somekey'
As with .values(...), parsing and filtering functions can be
given, and they will be used to modify the results.
>>> def int_parse(string):
... try:
... return int(string)
... except ValueError:
... return None
>>> event = Event(key=["1", "a"])
>>> event.value(parser=int_parse)
1
>>> event.value("key", parser=int_parse)
1
>>> event.value("other", parser=int_parse)
Traceback (most recent call last):
...
KeyError: 'other'
"""
for value in self._iter(key, parser, filter):
return value
if default is self._UNDEFINED:
if key is self._UNDEFINED:
raise KeyError("no value available")
raise KeyError(key)
return default
def contains(self, key=_UNDEFINED, value=_UNDEFINED,
parser=None, filter=None):
"""Return whether the event contains a key-value pair (for
specific key and/or value, if given).
>>> event = Event()
>>> event.contains() # Does the event contain any values at all?
False
>>> event = event.union(key="1")
>>> event.contains()
True
>>> event.contains("key") # Any value for key "key"?
True
>>> event.contains(value="1") # Value "1" for any key?
True
>>> event.contains("key", "1") # Value "1" for key "key"?
True
>>> event.contains("other", "2") # Value "2" for key "other"?
False
Parsing and filtering functions can be given to modify the results.
>>> def int_parse(string):
... try:
... return int(string)
... except ValueError:
... return None
>>> event.contains(parser=int_parse) # Any int value for any key?
True
>>> event.contains("key", parser=int_parse) # Any int value for "key"?
True
>>> event.contains(value=1, parser=int_parse) # Value 1 for any key?
True
>>> event = event.union(other="x")
>>> event.contains("other", parser=int_parse)
False
"""
if key is self._UNDEFINED:
values = set(self._unkeyed())
else:
key = _normalize(key)
values = self._attrs.get(key, ())
if parser is not None:
parsed = (parser(x) for x in values)
if filter is not None:
filtered = (x for x in parsed if filter(x))
else:
filtered = (x for x in parsed if x is not None)
elif filter is not None:
filtered = (x for x in values if filter(x))
else:
filtered = values
for filtered_value in filtered:
if value is self._UNDEFINED or value == filtered_value:
return True
return False
def items(self, parser=None, filter=None):
"""Return a tuple of key-value pairs contained by the event.
>>> event = Event()
>>> event.items()
()
>>> event = event.union(key="1", other=["x", "y"])
>>> sorted(event.items())
[(u'key', u'1'), (u'other', u'x'), (u'other', u'y')]
Parsing and filtering functions can be given to modify the results.
>>> def int_parse(string):
... try:
... return int(string)
... except ValueError:
... return None
>>> event.items(parser=int_parse)
((u'key', 1),)
The order of the key-value pairs is undefined.
"""
result = list()
for key, values in self._attrs.iteritems():
for value in values:
if parser is not None:
value = parser(value)
if filter is not None and not filter(value):
continue
if filter is None and value is None:
continue
result.append((key, value))
return tuple(result)
def keys(self, parser=None, filter=None):
"""Return a tuple of keys with at least one value.
>>> event = Event()
>>> event.keys()
()
>>> event = event.union(key="1", other=["x", "y"])
>>> sorted(event.keys())
[u'key', u'other']
Parsing and filtering functions can be given to modify the
results.
>>> def int_parse(string):
... try:
... return int(string)
... except ValueError:
... return None
>>> sorted(event.keys(parser=int_parse))
[u'key']
"""
return tuple(key for key in self._attrs
if self.contains(key, parser=parser, filter=filter))
def to_elements(self, include_body=True):
element = Element("event", xmlns=EVENT_NS)
for key, value in self.items():
key = _replace_non_xml_chars(key)
value = _replace_non_xml_chars(value)
attr = Element("attr", key=key, value=value)
element.add(attr)
if not include_body:
return element
body = Element("body")
body.text = _replace_non_xml_chars(unicode(self))
return Elements(body, element)
def __reduce__(self):
return self.__class__, (self._attrs,)
def __eq__(self, other):
if not isinstance(other, Event):
return NotImplemented
return other._attrs == self._attrs
def __ne__(self, other):
value = self.__eq__(other)
if value is NotImplemented:
return NotImplemented
return not value
def __unicode__(self):
"""Return an unicode representation of the event.
>>> unicode(Event())
u''
>>> unicode(Event({"a,": "b"}))
u'"a,"=b'
The specific order of the key-value pairs is undefined.
"""
return u", ".join(_unicode_quote(key) + u"=" + _unicode_quote(value)
for (key, value) in self.items())
def __repr__(self):
attrs = dict()
for key, value in self.items():
attrs.setdefault(key, list()).append(value)
return self.__class__.__name__ + "(" + repr(attrs) + ")"
def hexdigest(event, func=hashlib.sha1):
"""Return a hexadecimal digest string created by from the given event's
key-value pairs.
The result is guaranteed to be the same for two events e1 and e2 when
e1 == e2. Key-value insertion order does not affect the result.
>>> e1 = Event()
>>> e1.add("a", "b")
>>> e1.add("x", "y")
>>>
>>> e2 = Event()
>>> e2.add("x", "y")
>>> e2.add("a", "b")
>>>
>>> hexdigest(e1) == hexdigest(e2)
True
The result is not guaranteed to be different for two events e1 and e2
when e1 != e2. However such a collision is usually exceedingly unlikely
when a good hashing algorithm is used. SHA1 is the default, but can be
changed by passing in an algorithm implementation with a compatible
interface. For example, algorithms defined in the standard 'hashlib'
library are compatible.
>>> import hashlib
>>> hexdigest(Event(a="b"), hashlib.md5)
'51a8ca876645d37e29419694f6396fbc'
The default hashing algorithm is NOT guaranteed to be SHA1 forever. If you
want to guarantee that the hexdigest is always created using e.g. SHA1,
pass the hash function explicitly as the second parameter:
>>> import hashlib
>>> hexdigest(Event(a="b"), hashlib.sha1)
'edf6294fc1d3f9fe8be4a2d5626788bcfde05e62'
"""
result = func()
for key, value in sorted(event.items()):
result.update(key.encode("utf-8"))
result.update("\xc0")
result.update(value.encode("utf-8"))
result.update("\xc0")
return result.hexdigest()
def stanzas_to_events():
return idiokit.map(Event.from_elements)
def events_to_elements():
return idiokit.map(lambda x: (x.to_elements(),))
| mit | cbfd28e3f370d8707d4625aabd0503d1 | 29.048649 | 85 | 0.519473 | 3.999281 | false | false | false | false |
abusesa/abusehelper | abusehelper/core/rules/classifier.py | 1 | 1033 | class Classifier(object):
def __init__(self):
self._rules = dict()
def inc(self, rule, class_id):
classes = self._rules.get(rule, None)
if classes is None:
classes = dict()
self._rules[rule] = classes
classes[class_id] = classes.get(class_id, 0) + 1
def dec(self, rule, class_id):
classes = self._rules.get(rule, None)
if classes is None:
return
count = classes.get(class_id, 0) - 1
if count > 0:
classes[class_id] = count
else:
classes.pop(class_id, None)
if not classes:
self._rules.pop(rule, None)
def classify(self, obj):
result = set()
cache = dict()
for rule, classes in self._rules.iteritems():
if result.issuperset(classes):
continue
if rule.match(obj, cache):
result.update(classes)
return result
def is_empty(self):
return not self._rules
| mit | 679536d3571d5e898a2423dad1a7107f | 25.487179 | 56 | 0.516941 | 4.066929 | false | false | false | false |
erdc/proteus | proteus/tests/periodic/la_periodicGauss_p.py | 1 | 5749 | import numpy
from proteus import *
from proteus.default_p import *
from math import *
from proteus.mprans import VOF, NCLS, VOS3P
"""
Linear advection of a guassian with periodic bcs
"""
nd = 2#number of space dimensions
#constant velocity
velocity = numpy.array([1.0,1.0])
if nd == 3:
velocity = numpy.array([1.0,1.0,1.0])
#where gaussian starts
center = numpy.array([0.5,0.5])#numpy.array([0.25,0.25])#numpy.array([0.25,0.5])
if nd == 3:
velocity = numpy.array([1.0,1.0,1.0])
center = numpy.array([0.5,0.5,0.5])#numpy.array([0.25,0.25])#numpy.array([0.25,0.5])
#size
sigma = 1./16.
#quadrature
space_quad_order = 4#6 for dgp3
#form of level set equation
useHJ=False#True#False
#number nodes in each direction
nn=11#81#161#41
#number of meshes in multilevel mesh
nLevels = 1
#end time of simulation
T = 1.0
#number of output time steps, ignored for adaptive/CFL based runs
nDTout = 100
#max CFL
runCFL = 0.185#0.3,0.185,0.125 for dgp1,dgp2,dgpk(3)
name = 'la_periodicGauss_{0}d'.format(nd)
## \page Tests Test Problems
# \ref la_periodicGauss_2d_p.py "Linear advection of a Gaussian"
# \addtogroup test
#
# \file la_periodicGauss_2d_p.py
# @{
#
##\ingroup test
# \brief Conservative linear advection of a cone in a constant
# velocity field.
#
class ConstantVelocityGaussian2D:
def __init__(self,sigma=1./8.,b=[1.0,0.0],xc=0.25,yc=0.5):
self.sigma = sigma
self.xc= xc
self.yc= yc
self.b = b
def uOfXT(self,x,t):
centerX = (self.xc + self.b[0]*t)%1.0
centerY = (self.yc + self.b[1]*t)%1.0
d2 = (x[0]-centerX)**2 + (x[1]-centerY)**2
return exp(-0.5*d2/self.sigma**2)
class ConstantVelocityGaussian3D:
def __init__(self,sigma=1./8.,b=[1.0,0.0,0.0],xc=0.25,yc=0.5,zc=0.5):
self.sigma = sigma
self.xc= xc
self.yc= yc
self.zc= zc
self.b = b
def uOfXT(self,x,t):
centerX = (self.xc + self.b[0]*t)%1.0
centerY = (self.yc + self.b[1]*t)%1.0
centerZ = (self.zc + self.b[2]*t)%1.0
d2 = (x[0]-centerX)**2 + (x[1]-centerY)**2 + (x[2]-centerZ)**2
return exp(-0.5*d2/self.sigma**2)
#
analyticalSolution = {0:ConstantVelocityGaussian2D(sigma=sigma,
b=velocity,
xc=center[0],
yc=center[1])}
if nd == 3:
analyticalSolution = {0:ConstantVelocityGaussian3D(sigma=sigma,
b=velocity,
xc=center[0],
yc=center[1],
zc=center[2])}
M = {0:1.0}
A = {0:numpy.zeros((nd,nd),'d')}
B = {0:velocity}
C = {0:0.0}
useVOS=True
useVOF=False
useNCLS=False
if useVOS:
LevelModelType = VOS3P.LevelModel
coefficients = VOS3P.Coefficients(LS_model=None,V_model=None,RD_model=None,ME_model=0,checkMass=False,
epsFact=0.0,useMetrics=1.0,
STABILIZATION_TYPE=2,
LUMPED_MASS_MATRIX=False,
ENTROPY_TYPE=2,
FCT=True,
num_fct_iter=1)
elif useNCLS:
LevelModelType = NCLS.LevelModel
coefficients = NCLS.Coefficients(V_model=None,RD_model=None,ME_model=0,checkMass=False,
epsFact=0.0,useMetrics=1.0)
elif useVOF:
LevelModelType = VOF.LevelModel
coefficients = VOF.Coefficients(LS_model=None,V_model=None,RD_model=None,ME_model=0,checkMass=False,
epsFact=0.0,useMetrics=1.0,FCT=False)
elif useHJ:
coefficients = ConstantVelocityLevelSet(b=velocity)
else:
coefficients = LinearVADR_ConstantCoefficients(nc=1,M=M,A=A,B=B,C=C)
coefficients.variableNames=['u']
#now define the Dirichlet boundary conditions
def getDBC(x,tag):
return None
#and periodic boundary conditions
eps=1.0e-8
def getPDBC(x,tag):
if (x[0] == 0.0 or x[0] == 1.0) and (x[1] == 0.0 or x[1] == 1.0):
return numpy.array([0.0,0.0,0.0])
elif x[0] == 0.0 or x[0] == 1.0:
return numpy.array([0.0,round(x[1],5),0.0])
elif (x[1] == 0.0 or x[1] == 1.0):# and (0.0 < x[0] and x[0] < 1):
return numpy.array([round(x[0],5),0.0,0.0])
if nd == 3:
def getPDBC(x,tag):
if (x[0] == 0.0 or x[0] == 1.0) and (x[1] == 0.0 or x[1] == 1.0) and (x[2] == 0.0 or x[2] == 1.0):
return numpy.array([0.0,0.0,0.0])
elif (x[0] == 0.0 or x[0] == 1.0) and (x[1] == 0.0 or x[1] == 1.0):
return numpy.array([0.0,0.0,round(x[2],5)])
elif (x[0] == 0.0 or x[0] == 1.0) and (x[2] == 0.0 or x[2] == 1.0):
return numpy.array([0.0,round(x[1],5),0.0])
elif (x[1] == 0.0 or x[1] == 1.0) and (x[2] == 0.0 or x[2] == 1.0):
return numpy.array([round(x[0],5),0.0,0.0])
elif x[0] == 0.0 or x[0] == 1.0:
return numpy.array([0.0,round(x[1],5),round(x[2],5)])
elif (x[1] == 0.0 or x[1] == 1.0):# and (0.0 < x[0] and x[0] < 1):
return numpy.array([round(x[0],5),0.0,round(x[2],5)])
elif (x[2] == 0.0 or x[2] == 1.0):# and (0.0 < x[0] and x[0] < 1):
return numpy.array([round(x[0],5),round(x[1],5),0.0])
periodicDirichletConditions = {0:getPDBC}
parallelPeriodic=True
dirichletConditions = {0:getDBC}
initialConditions = {0:analyticalSolution[0]}
def getFluxBC(x, flag):
return lambda x,t: 0.0
advectiveFluxBoundaryConditions = {0:getFluxBC}
diffusiveFluxBoundaryConditions = {0:{}}
| mit | 39a0c895ce3a4dd5a4a07059f08f53b2 | 34.054878 | 106 | 0.539398 | 2.697794 | false | false | false | false |
erdc/proteus | proteus/tests/AddedMass/addedmass3D.py | 1 | 4673 | from __future__ import print_function
from __future__ import division
from past.utils import old_div
import numpy as np
from proteus import Domain
from proteus.mprans import SpatialTools as st
from proteus.mbd import CouplingFSI as fsi
import pychrono as chrono
from proteus.TwoPhaseFlow import TwoPhaseFlowProblem as tpf
from proteus.TwoPhaseFlow.utils import Parameters
import os
rho_0 = 1000.
nu_0 = 1.004e-6
rho_1 = 1.205
nu_1 = 1.500e-5
sigma_01 = 0.0
g = [0., 0., -9.81]
he = 2.5
water_level = 2.5
# GEOMETRY
domain = Domain.PiecewiseLinearComplexDomain()
nd=3
tank_dim = [5.,5.,5.]
tank = st.Tank3D(domain, dim=tank_dim)
rect = st.Cuboid(domain, dim=[1.,1.,1.], coords=[old_div(tank_dim[0],2.),
old_div(tank_dim[1],2.),
old_div(tank_dim[2],2.)])
rect.setHoles(holes=np.array([rect.coords]))
domain.MeshOptions.he = he
# BOUNDARY CONDITIONS
tank.BC['x+'].setNoSlip()
tank.BC['x-'].setNoSlip()
tank.BC['y-'].setNoSlip()
tank.BC['y+'].setNoSlip()
tank.BC['z-'].setNoSlip()
tank.BC['z+'].setAtmosphere()
rect.BC['x+'].setNoSlip()
rect.BC['x-'].setNoSlip()
rect.BC['y+'].setNoSlip()
rect.BC['y-'].setNoSlip()
rect.BC['z+'].setNoSlip()
rect.BC['z-'].setNoSlip()
# CHRONO
system = fsi.ProtChSystem()
system.ChSystem.Set_G_acc(chrono.ChVectorD(g[0], g[1], 0.))
body = fsi.ProtChBody(system=system)
body.attachShape(rect)
body.ChBody.SetMass(500.)
body.ChBody.SetBodyFixed(True) # fixing body
# OTHER PARAMS
st.assembleDomain(domain)
domain.polyfile=domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"mesh3D"
domain.MeshOptions.he = he
domain.MeshOptions.genMesh=False
#domain.writePoly("mesh3D")
# ___ _ _ _ _ ____ _ _ _ _
# |_ _|_ __ (_) |_(_) __ _| | / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | || '_ \| | __| |/ _` | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | || | | | | |_| | (_| | | | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |___|_| |_|_|\__|_|\__,_|_| \____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# Initial Conditions
nd = domain.nd
class PerturbedSurface_p:
def uOfXT(self, x, t):
p_L = 0.0
phi = x[nd-1] - tank_dim[nd-1]
return p_L-g[nd-1]*(rho_0*phi)
class AtRest:
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
# _ _ _
# | \ | |_ _ _ __ ___ ___ _ __(_) ___ ___
# | \| | | | | '_ ` _ \ / _ \ '__| |/ __/ __|
# | |\ | |_| | | | | | | __/ | | | (__\__ \
# |_| \_|\__,_|_| |_| |_|\___|_| |_|\___|___/
# Numerics
myTpFlowProblem = tpf.TwoPhaseFlowProblem()
myTpFlowProblem.domain = domain
myTpFlowProblem.outputStepping.final_time = 0.002
myTpFlowProblem.outputStepping.dt_init = 0.001
myTpFlowProblem.outputStepping.dt_output = 0.001
myTpFlowProblem.outputStepping.dt_fixed = 0.001
myTpFlowProblem.SystemPhysics.setDefaults()
myTpFlowProblem.SystemNumerics.cfl = 0.4
myTpFlowProblem.SystemNumerics.useSuperlu=False
myTpFlowProblem.SystemPhysics.movingDomain = False
params = myTpFlowProblem.SystemPhysics
# PHYSICAL PARAMETERS
params.rho_0 = rho_0 # water
params.rho_1 = rho_1 # air
params.nu_0 = nu_0 # water
params.nu_1 = nu_1 # air
params.surf_tension_coeff = sigma_01
# MODEL PARAMETERS
m = params.modelDict
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelRANS2P,'flow')
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelAddedMass,'addedMass')
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['p']=PerturbedSurface_p()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['u']=AtRest()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['v']=AtRest()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['w']=AtRest()
myTpFlowProblem.SystemPhysics.modelDict['addedMass'].p.initialConditions['addedMass']=AtRest()
m['flow'].p.coefficients.useVF = 1.0
m['flow'].p.coefficients.NONCONSERVATIVE_FORM = 0.0
# auxiliary variables
m['flow'].auxiliaryVariables += [system]
m['addedMass'].auxiliaryVariables += [system.ProtChAddedMass]
flags_rigidbody = np.zeros(20)
for key in rect.boundaryTags_global:
flags_rigidbody[rect.boundaryTags_global[key]] = 1.
max_flag = 0
max_flag = max(domain.vertexFlags)
max_flag = max(domain.segmentFlags+[max_flag])
max_flag = max(domain.facetFlags+[max_flag])
flags_rigidbody = np.zeros(max_flag+1, dtype='int32')
for s in system.subcomponents:
if type(s) is fsi.ProtChBody:
for flag in s.boundaryFlags:
flags_rigidbody[flag] = 1
m['addedMass'].p.coefficients.flags_rigidbody = flags_rigidbody
| mit | 4bd118b79da26841e90c094b77493dc2 | 29.542484 | 94 | 0.625294 | 2.604794 | false | false | false | false |
erdc/proteus | proteus/tests/levelset/rotation/ls_rotation_2d_p.py | 1 | 6625 | from __future__ import absolute_import
from builtins import object
from proteus import *
from proteus.default_p import *
from math import *
try:
from .rotation2D import *
except:
from rotation2D import *
from proteus.mprans import NCLS
#import Profiling
LevelModelType = NCLS.LevelModel
logEvent = Profiling.logEvent
name=soname+"_ls"
nd=2
## \page Tests Test Problems
# \ref ls_rotation_2d_p.py "Linear advection of a circular level set function in an oscillating rotation velocity field"
#
##\ingroup test
# \file la_rotation_2d_p.py
# @{
# \brief Conservative linear advection of a circle signed distance function
# in a oscillating rotation velocity field.
#
# \f{eqnarray*}
# \phi_t + \nabla \cdot (\vec u \phi) &=& 0 \\
# \Omega &=& [0,1] \times [0,1] \\
# u^{x} &=& \cos(\pi t/8)\sin(2\pi y)\sin^2(\pi x) \\
# u^{y} &=& -\cos(\pi t/8)\sin(2\pi x)\sin^{2}(\pi y) \\
# \phi^{0} &=& \left(x-\frac{1}{2}\right)^2 + \left(y-\frac{3}{4}\right)^2 - 0.15^2
# \f}
# The solution should return to the initial condition at \f$T=8\f$.
# Outflow boundaries are applied on \f$\partial \Omega\f$.
#
#
# \image html save_la_rotation_2d_dgp2_exact.jpg "exact solution, T=8.0"
# \image latex save_la_rotation_2d_dgp2_exact.eps "exact solution, T=8.0"
# \image html save_la_rotation_2d_dgp2_phi.jpg "RKDG P^2 solution, Cr=0.1, L^2 error= 7.84e-3"
# \image latex save_la_rotation_2d_dgp2_phi.eps "RKDG $P^2$ solution, Cr=0.1, $L^2$ error= 7.84e-3"
#
class OscillatingRotation2D(object):
#cek changed to put sphere inside arbitrary box with dimensions in L
def __init__(self,L):
self.radius = 0.25
self.xc=0.0
self.yc=0.5
def uOfXT(self,x,t):
return self.radius - math.sqrt((x[0]-self.xc)**2 + (x[1]-self.yc)**2)
class OscillatingRotation2Dcylinder(object):
#cek changed to put sphere inside arbitrary box with dimensions in L
def __init__(self,L):
self.radius = 0.15*L[0]
self.xc=0.5*L[0]
self.yc=0.75*L[1]
def uOfXT(self,x,t):
return self.radius - math.sqrt((x[0]-self.xc)**2 + (x[1]-self.yc)**2)
analyticalSolution = {0:OscillatingRotation2D(L)}
class UnitSquareRotation(NCLS.Coefficients):
from proteus.ctransportCoefficients import unitSquareRotationEvaluate
from proteus.ctransportCoefficients import unitSquareRotationLevelSetEvaluate
def __init__(self,useHJ=False,epsFact=1.5,checkMass=False,
RD_model=None,
useMetrics=0.0,sc_uref=1.0,sc_beta=1.0):
self.waterline_interval=-1
self.epsFact=epsFact
self.useHJ = useHJ
self.RD_modelIndex=RD_model
self.sc_uref=sc_uref
self.sc_beta=sc_beta
self.useMetrics=useMetrics
mass={0:{0:'linear'}}
advection={0:{0:'linear'}}
diffusion={}
potential={}
reaction={}
if self.useHJ:
hamiltonian={0:{0:'linear'}}
else:
hamiltonian={}
NCLS.Coefficients.__init__(self)
self.checkMass=checkMass
self.useMetrics = 0.0
self.sc_uref=1.0
self.sc_beta=1.0
def attachModels(self,modelList):
self.model = modelList[0]
self.u_old_dof = numpy.copy(self.model.u[0].dof)
self.q_v = numpy.zeros(self.model.q[('dH',0,0)].shape,'d')
self.ebqe_v = numpy.zeros(self.model.ebqe[('dH',0,0)].shape,'d')
self.unitSquareRotationLevelSetEvaluate(self.model.timeIntegration.tLast,
self.model.q['x'],
self.model.q[('u',0)],self.model.q[('grad(u)',0)],
self.model.q[('m',0)],self.model.q[('dm',0,0)],
self.model.q[('dH',0,0)],self.model.q[('dH',0,0)],
self.model.q[('H',0)],self.q_v)
self.model.q[('velocity',0)]=self.q_v
self.model.ebqe[('velocity',0)]=self.ebqe_v
if self.RD_modelIndex != None:
#print self.RD_modelIndex,len(modelList)
self.rdModel = modelList[self.RD_modelIndex]
else:
self.rdModel = self.model
# if self.checkMass:
# self.m_pre = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,
# self.model.mesh.elementDiametersArray,
# self.model.q['dV'],
# self.model.q[('m',0)],
# self.model.mesh.nElements_owned)
# logEvent("Attach Models UnitSquareRotation: Phase 0 mass before NCLS step = %12.5e" % (self.m_pre,),level=2)
# self.totalFluxGlobal=0.0
# self.lsGlobalMassArray = [self.m_pre]
# self.lsGlobalMassErrorArray = [0.0]
# self.fluxArray = [0.0]
# self.timeArray = [self.model.timeIntegration.t]
def preStep(self,t,firstStep=False):
self.q_v[...,0] = -2.0*math.pi*self.model.q['x'][...,1]
self.q_v[...,1] = 2.0*math.pi*self.model.q['x'][...,0]
copyInstructions = {}
return copyInstructions
def postStep(self,t,firstStep=False):
self.u_old_dof = numpy.copy(self.model.u[0].dof)
copyInstructions = {}
return copyInstructions
def evaluate(self,t,c):
pass
if applyRedistancing:
RD_model=1
else:
RD_model=None
coefficients = UnitSquareRotation(useHJ=True,epsFact=epsFactHeaviside,checkMass=checkMass,RD_model=RD_model,useMetrics=useMetrics)
coefficients.variableNames=['u']
#now define the Dirichlet boundary conditions
def getDBC(x,flag):
pass
#if (x[1] == 0.0):
# return lambda x,t: 0.0
#if (x[0] == 0.0 or
# x[0] == 1.0 or
# x[1] == 0.0 or
# x[1] == 1.0):
# return lambda x,t: 0.0
def zeroInflow(x):
return lambda x,t: 0.0
# if (x[0] == 0.0 and x[1] <= 0.5):
# return lambda x,t: 0.0
# if (x[0] == 1.0 and x[1] >= 0.5):
# return lambda x,t: 0.0
# if (x[1] == 0.0 and x[0] >= 0.5):
# return lambda x,t: 0.0
# if (x[1] == 1.0 and x[0] <= 0.5):
# return lambda x,t: 0.0
dirichletConditions = {0:getDBC}
initialConditions = {0:analyticalSolution[0]}
fluxBoundaryConditions = {0:'outFlow'}
def zeroadv(x):
return lambda x,t: 0.0
advectiveFluxBoundaryConditions = {}
#advectiveFluxBoundaryConditions = {0:zeroadv}
diffusiveFluxBoundaryConditions = {0:{}}
## @}
| mit | c75d9c112ea8a6bcaea7a7a6cbb3a2a8 | 36.011173 | 130 | 0.569962 | 2.978867 | false | false | false | false |
erdc/proteus | proteus/tests/post_processing/import_modules/bdm_tests_3Dtemplate.py | 1 | 3679 | from builtins import range
from builtins import object
from proteus import iproteus as ip
from proteus import default_p as p
from proteus import default_n as n
from proteus import default_s,default_so
from proteus import Context
import numpy, os, inspect
import proteus as pr
from importlib import reload
reload(p)
reload(n)
p.nd = 3
p.name = "BDM2_Test_File_projection"
current_dir = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() )) [0]))
p.polyfile = os.path.join(current_dir,"reference_simplex_keep")
p.genMesh = False
p.nc = 1
class velEx(object):
def __init__(self,duex,aex):
self.duex = duex
self.aex = aex
def uOfX(self,X):
du = self.duex.duOfX(X)
A = numpy.reshape(self.aex(X),(2,2))
return -numpy.dot(A,du)
def uOfXT(self,X,T):
return self.uOfX(X)
def A(x):
return numpy.array([[1.0, 0.0],[0.0, 1.0]],'d')
def f(x):
return 1.0
class uEx(object):
def __init__(self):
pass
def uOfX(self,x):
return x[0]**2 + x[1]**2
def uOfXT(self,x,T):
return self.uOfX(x)
def duOfX(self,X):
du = 2.0*numpy.reshape(X[0:2],(2,))
return du
def duOfXT(self,X,T):
return self.duOfX(X)
def getDBC(x,flag):
if x[0] in [0.0] or x[1] in [0.0,1.0]:
return lambda x,t: uEx().uOfXT(x,t)
def getAdvFluxBC(x,flag):
pass
def getDiffFluxBC(x,flag):
if x[0]==1.0:
n = numpy.zeros((p.nd,),'d'); n[0]=1.0
return lambda x,t: numpy.dot(velEx(uEx(),A).uOfXT(x,t),n)
p.analyticalSolution = {0:uEx()}
p.dirichletConditions = {0:getDBC}
aOfX = {0:A}; fOfX = {0:f}
p.advectiveFluxBoundaryConditions = {0:getAdvFluxBC}
p.diffusiveFluxBoundaryConditions = {0:{0:getDiffFluxBC}}
p.periodicDirichletConditions = None
p.coefficients = pr.TransportCoefficients.PoissonEquationCoefficients(aOfX,fOfX,p.nc,p.nd)
############################
n.timeIntegration = pr.TimeIntegration.NoIntegration
n.nDTout = 1
n.T = 1
n.parallel = False
n.femSpaces = dict((i,pr.FemTools.C0_AffineQuadraticOnSimplexWithNodalBasis) for i in range(p.nc))
n.elementQuadrature = pr.Quadrature.SimplexGaussQuadrature(p.nd,4)
n.elementBoundaryQuadrature = pr.Quadrature.SimplexGaussQuadrature(p.nd-1,4)
n.nn = 3
n.nLevels = 1
n.genMesh = False
n.subgridError = None
n.shockCapturing = None
n.multilevelNonlinearSolver = pr.NonlinearSolvers.Newton
n.levelNonlinearSolver = pr.NonlinearSolvers.Newton
n.maxNonlinearIts = 1
n.fullNewtonFlag = True
n.totFac = 1.0e-8
n.nl_atol_res = 1.0e-8
n.matrix = pr.LinearAlgebraTools.SparseMatrix
if n.parallel:
n.multilevelLinearSolver = pr.KSP_petsc4py#PETSc#LU
n.levelLinearSolver = pr.KSP_petsc4py#PETSc#LU#MGM#PETSc#
n.nLayersOfOverlapForParallel = 1
n.parallelPartitioningType = pr.MeshParallelPartitioningTypes.element
n.numericalFluxType = pr.Advection_DiagonalUpwind_Diffusion_IIPG_exterior
n.linearSmoother = None
else:
n.multilevelLinearSolver = pr.LinearSolvers.LU
n.levelLinearSolver = pr.LinearSolvers.LU#MGM#
n.linearSolverConvergenceTest= 'r'#r-true'#'r'
n.linearSmoother = pr.LinearSolvers.StarILU#GaussSeidel#Jacobi#StarILU
n.linTolFac = 0.0
n.l_atol_res = 1.0e-10
n.multigridCycles = 0
n.cfluxtag = 'pwl-bdm2'
n.conservativeFlux = {0:'pwl-bdm2'}
#########################################################################
so = default_so
so.name = p.name
so.sList=[default_s]
########################################################################
from proteus import *
opts = Context.Options([("hotStart", True, "Use prescriped reference simplex file.")])
ns = NumericalSolution.NS_base(so,[p],[n],so.sList,ip.opts)
| mit | 589c0b5f17150ddf591d46d1a8b65bd9 | 26.871212 | 109 | 0.665126 | 2.67175 | false | false | false | false |
erdc/proteus | proteus/NonlinearSolvers.py | 1 | 179593 | """
A hierarchy of classes for nonlinear algebraic system solvers.
.. inheritance-diagram:: proteus.NonlinearSolvers
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import zip
from builtins import range
from past.utils import old_div
from builtins import object
import numpy
import numpy as np
from math import *
import math #to disambiguate math.log and log
from .LinearAlgebraTools import *
from .Profiling import *
from . import csmoothers
class NonlinearEquation(object):
"""
The base class for nonlinear equations.
"""
def __init__(self,dim=0,dim_proc=None):
self.dim=dim
if dim_proc is None:
self.dim_proc=self.dim
else:
self.dim_proc = dim_proc
#mwf decide if we can keep solver statistics here
self.nonlinear_function_evaluations = 0
self.nonlinear_function_jacobian_evaluations = 0
def getResidual(u,r):
"""Evaluate the residual r = F(u)"""
pass
def getJacobian(jacobian,usePicard=False):
""""""
pass
def resetNonlinearFunctionStatistics(self):
self.nonlinear_function_evaluations = 0
self.nonlinear_function_jacobian_evaluations = 0
class NonlinearSolver(object):
"""
The base class for nonlinear solvers.
"""
def __init__(self,
F,J=None,du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
unorm = None,
tol_du=0.33):
## @var self.F
#NonlinearEquation
self.F = F
self.J = J
if du is None:
self.du = Vec(F.dim)
else:
self.du = du
self.rtol_r=rtol_r
self.atol_r=atol_r
self.rtol_du=rtol_du
self.atol_du=atol_du
self.maxIts=maxIts
self.its=0
self.solveCalls = 0
self.recordedIts=0
self.solveCalls_failed = 0
self.recordedIts_failed=0
self.rReductionFactor=0.0
self.duReductionFactor=100.0
self.rReductionFactor_avg=0.0
self.duReductionFactor_avg=100.0
self.rReductionOrder=0.0
self.rReductionOrder_avg=0.0
self.duReductionOrder=0.0
self.duReductionOrder_avg=0.0
self.s=100.0
self.ratio_r_current = 1.0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
#mwf begin hacks for conv. rate
self.gustafsson_alpha = -12345.0
self.gustafsson_norm_du_last = -12345.0
#mwf end hacks for conv. rate
self.W = old_div(1.0,float(self.F.dim))
self.kappa_current = 0.0 #condition number
self.kappa_max = 0.0
self.norm_2_J_current = 0.0
self.norm_2_dJ_current = 0.0
self.betaK_current = 0.0 #Norm of J=F'
self.etaK_current = 0.0 #norm of du where F' du = -F
self.betaK_0 = 0.0 #Norm of J=F'
self.etaK_0 = 0.0 #norm of du where F' du = -F
self.betaK_1 = 0.0 #Norm of J=F'
self.etaK_1 = 0.0 #norm of du where F' du = -F
self.gammaK_current = 0.0 #Lipschitz constant of J(u) approximated as ||F'(u+du)+F'(u)||/||du||
self.gammaK_max=0.0
self.norm_r_hist=[]
self.norm_du_hist=[]
self.convergenceTest = convergenceTest
self.computeRates = computeRates
self.printInfo = printInfo
self.norm_function = norm
if unorm is not None:
self.unorm_function = unorm
else:
self.unorm_function = self.norm_function
self.tol_du = tol_du
self.infoString=''
self.convergenceHistoryIsCorrupt=False
self.r=None
self.fullResidual=True
self.lineSearch = True
#need some information for parallel assembly options?
self.par_fullOverlap = True #whether or not partitioning has overlap or not
self.linearSolverFailed = False
self.failedFlag = False
def norm(self,u):
try:
return self.norm_function(u[self.F.owned_local])
except AttributeError:
logEvent("ERROR: F.owned local is not initialised in Transport.MultilevelTranspot.initialize. Make sure that useSuperlu option is set to False")
def unorm(self,u):
try:
return self.unorm_function(u[self.F.owned_local])
except AttributeError:
logEvent("ERROR!: F.owned local is not initialised in Transport.MultilevelTranspot.initialize. Make sure that useSuperlu option is set to False")
def fullNewtonOff(self):
self.fullNewton=False
def fullNewtonOn(self):
self.fullNewton=True
def fullResidualOff(self):
self.fullResidual=False
def fullResidualOn(self):
self.fullResidual=True
def computeResidual(self,u,r,b):
if self.fullResidual:
self.F.getResidual(u,r)
if b is not None:
r-=b
else:
if type(self.J).__name__ == 'ndarray':
r[:] = numpy.dot(u,self.J)
elif type(self.J).__name__ == 'SparseMatrix':
self.J.matvec(u,r)
if b is not None:
r-=b
def solveInitialize(self,u,r,b):
if r is None:
if self.r is None:
self.r = Vec(self.F.dim)
r=self.r
else:
self.r=r
self.computeResidual(u,r,b)
self.its = 0
self.norm_r0 = self.norm(r)
self.norm_r = self.norm_r0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
#self.convergenceHistoryIsCorrupt=False
self.convergingIts = 0
#mwf begin hack for conv. rate
self.gustafsson_alpha = -12345.0
self.gustafsson_norm_du_last = -12345.0
#mwf end hack for conv. rate
return r
def computeConvergenceRates(self):
if self.convergenceHistoryIsCorrupt:
return
else:
#mwf begin hack for conv. rate
#equation (5) in Gustafsson_Soderlind_97
#mwf debug
#import pdb
#pdb.set_trace()
if self.gustafsson_norm_du_last >= 0.0:
tmp = old_div(self.norm_du, (self.gustafsson_norm_du_last + 1.0e-16))
self.gustafsson_alpha = max(self.gustafsson_alpha,tmp)
#
if self.its > 0:
self.gustafsson_norm_du_last = self.norm_du
#mwf end hack for conv. rate
if self.convergingIts > 0:
if self.norm_r < self.lastNorm_r:
self.ratio_r_current = old_div(self.norm_r,self.lastNorm_r)
else:
logEvent("residual increase %s" % self.norm_r)
self.convergingIts=0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
return
if self.ratio_r_current > 1.0e-100:
log_ratio_r_current = math.log(self.ratio_r_current)
else:
logEvent("log(ratio_r) too small ratio_r = %12.5e" % self.ratio_r_current)
self.convergingIts=0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
return
self.ratio_r_solve *= self.ratio_r_current
self.rReductionFactor = pow(self.ratio_r_solve,old_div(1.0,self.convergingIts))
if self.convergingIts > 1:
self.rReductionOrder = old_div(log_ratio_r_current, \
self.last_log_ratio_r)
if self.norm_du < self.lastNorm_du:
ratio_du_current = old_div(self.norm_du,self.lastNorm_du)
else:
logEvent("du increase norm(du_last)=%12.5e, norm(du)=%12.5e, its=%d, convergingIts=%d" % (self.lastNorm_du,self.norm_du,self.its,self.convergingIts))
self.convergingIts=0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
return
if ratio_du_current > 1.0e-100:
log_ratio_du_current = math.log(ratio_du_current)
else:
logEvent("log(du ratio) too small to calculate ratio_du=%12.5e" % ratio_du_current)
self.convergingIts=0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
return
self.ratio_du_solve *= ratio_du_current
self.duReductionFactor = pow(self.ratio_du_solve,
old_div(1.0,(self.convergingIts-1)))
if self.duReductionFactor < 1.0:
self.s = old_div(self.duReductionFactor,(1.0-self.duReductionFactor))
else:
self.s=100.0
if self.convergingIts > 2:
self.duReductionOrder = old_div(log_ratio_du_current, \
self.last_log_ratio_du)
self.last_log_ratio_du = log_ratio_du_current
self.last_log_ratio_r = log_ratio_r_current
self.lastNorm_du = self.norm_du
self.lastNorm_r = self.norm_r
def converged(self,r):
self.convergedFlag = False
self.norm_r = self.norm(r)
self.norm_du = self.unorm(self.du)
if self.computeRates == True:
self.computeConvergenceRates()
if self.convergenceTest == 'its' or self.convergenceTest == 'rits':
if self.its == self.maxIts:
self.convergedFlag = True
#print self.atol_r, self.rtol_r
if self.convergenceTest == 'r' or self.convergenceTest == 'rits':
if (self.its != 0 and
self.norm_r < self.rtol_r*self.norm_r0 + self.atol_r):
self.convergedFlag = True
if self.convergenceTest == 'u':
if (self.convergingIts != 0 and
self.s * self.norm_du < self.tol_du):
self.convergedFlag = True
if self.convergedFlag == True and self.computeRates == True:
self.computeAverages()
if self.printInfo == True:
print(self.info())
#print self.convergedFlag
return self.convergedFlag
def failed(self):
self.failedFlag = False
if self.linearSolverFailed == True:
self.failedFlag = True
return self.failedFlag
if self.its == self.maxIts and self.convergenceTest in ['r','u']:
self.solveCalls_failed +=1
self.recordedIts_failed +=self.its
self.failedFlag = True
logEvent(" Newton it %d == maxIts FAILED convergenceTest = %s" % (self.its,self.convergenceTest))
else:
self.its+=1
self.convergingIts+=1
return self.failedFlag
def computeAverages(self):
self.recordedIts+=self.its
if self.solveCalls == 0:
self.rReductionFactor_avg = self.rReductionFactor
self.duReductionFactor_avg = self.duReductionFactor
self.rReductionOrder_avg = self.rReductionOrder
self.duReductionOrder_avg = self.duReductionOrder
self.solveCalls+=1
else:
self.rReductionFactor_avg*=self.solveCalls
self.rReductionFactor_avg+=self.rReductionFactor
self.duReductionFactor_avg*=self.solveCalls
self.duReductionFactor_avg+=self.duReductionFactor
self.rReductionOrder_avg*=self.solveCalls
self.rReductionOrder_avg+=self.rReductionOrder
self.duReductionOrder_avg*=self.solveCalls
self.duReductionOrder_avg+=self.duReductionOrder
self.solveCalls +=1
self.rReductionFactor_avg/=self.solveCalls
self.duReductionFactor_avg/=self.solveCalls
self.rReductionOrder_avg/=self.solveCalls
self.duReductionOrder_avg/=self.solveCalls
def info(self):
self.infoString = "************Start Nonlinear Solver Info ************ \n"
self.infoString += "its = %i \n" % self.its
self.infoString += "converging its = %i \n" % self.convergingIts
self.infoString += "r reduction factor = %12.5e\n" % self.rReductionFactor
self.infoString += "du reduction factor = %12.5e\n" % self.duReductionFactor
self.infoString += "r reduction order = %12.5e\n" % self.rReductionOrder
self.infoString += "du reduction order = %12.5e\n" % self.duReductionOrder
self.infoString += "<r reduction factor> = %12.5e\n" % self.rReductionFactor_avg
self.infoString += "<du reduction factor> = %12.5e\n" % self.duReductionFactor_avg
self.infoString += "<r reduction order> = %12.5e\n" % self.rReductionOrder_avg
self.infoString += "<du reduction order> = %12.5e\n" % self.duReductionOrder_avg
self.infoString += "total its = %i \n" % self.recordedIts
self.infoString += "solver calls = %i \n" % self.solveCalls
self.infoString += "failures = %i \n" % self.solveCalls_failed
self.infoString += "failed its = %i \n" % self.recordedIts_failed
self.infoString += "maxIts = %i \n" % self.maxIts
self.infoString += "convergenceTest = %s \n" % self.convergenceTest
self.infoString += "atol_r = %12.5e \n" % self.atol_r
self.infoString += "rtol_r = %12.5e \n" % self.rtol_r
self.infoString += "norm(r0) = %12.5e \n" % self.norm_r0
self.infoString += "norm(r) = %12.5e \n" % self.norm_r
if self.convergenceHistoryIsCorrupt:
self.infoString += "CONVERGENCE HISTORY IS CORRUPT!!!\n"
self.infoString += "************End Nonlinear Solver Info ************\n"
return self.infoString
class Newton(NonlinearSolver):
"""
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def __init__(self,
linearSolver,
F,J=None,du=None,par_du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits = 100):
import copy
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self,F,J,du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.updateJacobian=True
self.fullNewton=fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
self.EWtol=EWtol
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver=LU(self.J_t_J,computeEigenvalues=True)
self.dJLsolver=LU(self.dJ_t_dJ,computeEigenvalues=True)
self.u0 = numpy.zeros(self.F.dim,'d')
def setLinearSolverTolerance(self,r):
"""
This function dynamically sets the relative tolerance
of the linear solver associated with the non-linear iteration.
Set useEistenstatWalker=True in a simulation's numerics file
to ensure that this function is used.
Parameters
----------
r : vector
non-linear residual vector
Notes
-----
The size of the relative reduction assigned to the linear
solver depends on two factors: (i) how far the non-linear
solver is from satifying its residual reductions and
(ii) how large the drop in the latest non-linear
residual was.
If the non-linear solver is both far from its residual
reduction targets and the latest non-linear residual showed
a big drop, then expect the algorithm to assign a large
relative reduction to the linear solver.
As the non-linear residual reduction targets get closer, or
the non-linear solver stagnates, the linear solver will be
assigned a smaller relative reduction up to a minimum of
0.001.
"""
self.norm_r = self.norm(r)
gamma = 0.0001
etaMax = 0.001
if self.norm_r == 0.0:
etaMin = 0.0001
else:
etaMin = 0.0001*(self.rtol_r*self.norm_r0 + self.atol_r)/self.norm_r
logEvent("etaMin "+repr(etaMin))
if self.its > 1:
etaA = gamma * self.norm_r**2/self.norm_r_last**2
logEvent("etaA "+repr(etaA))
logEvent("gamma*self.etaLast**2 "+ repr(gamma*self.etaLast**2))
if gamma*self.etaLast**2 < 0.1:
etaC = min(etaMax,etaA)
else:
etaC = min(etaMax,max(etaA,gamma*self.etaLast**2))
else:
etaC = etaMax
logEvent("etaC "+repr(etaC))
eta = min(etaMax,max(etaC,etaMin))
self.etaLast = eta
self.norm_r_last = self.norm_r
self.linearSolver.setResTol(rtol=eta,atol=self.linearSolver.atol_r)
def solve(self,u,r=None,b=None,par_u=None,par_r=None,linear=False):
""" Solves the non-linear system :math:`F(u) = b`.
Parameters
----------
u : :class:`numpy.ndarray`
Solution vector.
r : :class:`numpy.ndarray`
Residual vector, :math:`r = b - F(u)`
b : :class:`numpy.ndarray` (ARB - not sure this is always true)
Right hand side vector
par_u : :class:`proteus.LinearAlgebraTools.ParVec_petsc4py`
Parallel solution vector.
par_r : :class:`proteus.LinearAlgebraTools.ParVec_petsc4py`
Parallel residual vector, :math:`r = b - F(u)`
"""
from . import Viewers
memory()
if self.linearSolver.computeEigenvalues:
self.u0[:]=u
r=self.solveInitialize(u,r,b)
if par_u is not None:
#allow linear solver to know what type of assembly to use
self.linearSolver.par_fullOverlap = self.par_fullOverlap
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
#no overlap or overlap (until we compute norms over only owned dof)
par_r.scatter_forward_insert()
self.norm_r0 = self.norm(r)
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(r) and
not self.failed()):
logEvent(" NumericalAnalytics NewtonIteration: %d, NewtonNorm: %12.5e"
%(self.its-1, self.norm_r), level=7)
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %g test=%s"
% (self.its-1,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r))),self.convergenceTest),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
if self.linearSolver.computeEigenvalues:
logEvent("Calculating eigenvalues of J^t J")
self.JLast[:]=self.J
self.J_t_J[:]=self.J
self.J_t_J *= numpy.transpose(self.J)
self.JLsolver.prepare()#eigenvalue calc happens in prepare
self.norm_2_J_current = sqrt(max(self.JLsolver.eigenvalues_r))
try:
self.norm_2_Jinv_current = old_div(1.0,sqrt(min(self.JLsolver.eigenvalues_r)))
except:
logEvent("Norm of J_inv_current is singular to machine prection 1/sqrt("+repr(min(self.JLsolver.eigenvalues_r))+")")
self.norm_2_Jinv_current = np.inf
self.kappa_current = self.norm_2_J_current*self.norm_2_Jinv_current
self.betaK_current = self.norm_2_Jinv_current
self.linearSolver.prepare(b=r,newton_its=self.its-1)
self.du[:]=0.0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u-=self.du
if par_u is not None:
par_u.scatter_forward_insert()
if linear:
r[:]=0
self.computeRates = False
else:
self.computeResidual(u,r,b)
if par_r is not None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
#print "global r",r
if self.linearSolver.computeEigenvalues:
#approximate Lipschitz constant of J
logEvent("Calculating eigenvalues of dJ^t dJ")
self.F.getJacobian(self.dJ_t_dJ)
self.dJ_t_dJ-=self.JLast
self.dJ_t_dJ *= numpy.transpose(self.dJ_t_dJ)
self.dJLsolver.prepare()
self.norm_2_dJ_current = sqrt(max(self.dJLsolver.eigenvalues_r))
self.etaK_current = self.W*self.norm(self.du)
self.gammaK_current = old_div(self.norm_2_dJ_current,self.etaK_current)
self.gammaK_max = max(self.gammaK_current,self.gammaK_max)
self.norm_r_hist.append(self.W*self.norm(r))
self.norm_du_hist.append(self.W*self.unorm(self.du))
if self.its == 1:
self.betaK_0 = self.betaK_current
self.etaK_0 = self.etaK_current
if self.its == 2:
self.betaK_1 = self.betaK_current
self.etaK_1 = self.etaK_current
print("it = ",self.its)
print("beta(|Jinv|) ",self.betaK_current)
print("eta(|du|) ",self.etaK_current)
print("gamma(Lip J') ",self.gammaK_current)
print("gammaM(Lip J')",self.gammaK_max)
print("kappa(cond(J))",self.kappa_current)
if self.betaK_current*self.etaK_current*self.gammaK_current <= 0.5:
try:
print("r ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_current)),(self.betaK_current*self.gammaK_current)))
except:
pass
if self.betaK_current*self.etaK_current*self.gammaK_max <= 0.5:
try:
print("r_max ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_max)),(self.betaK_current*self.gammaK_max)))
except:
pass
print("lambda_max",max(self.linearSolver.eigenvalues_r))
print("lambda_i_max",max(self.linearSolver.eigenvalues_i))
print("norm_J",self.norm_2_J_current)
print("lambda_min",min(self.linearSolver.eigenvalues_r))
print("lambda_i_min",min(self.linearSolver.eigenvalues_i))
if self.lineSearch:
norm_r_cur = self.norm(r)
ls_its = 0
#print norm_r_cur,self.atol_r,self.rtol_r
# while ( (norm_r_cur >= 0.99 * self.norm_r + self.atol_r) and
# (ls_its < self.maxLSits) and
# norm_r_cur/norm_r_last < 1.0):
if norm_r_cur > self.rtol_r*self.norm_r0 + self.atol_r:#make sure hasn't converged already
while ( (norm_r_cur >= 0.9999 * self.norm_r) and
(ls_its < self.maxLSits)):
self.convergingIts = 0
ls_its +=1
self.du *= 0.5
u += self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
if par_r is not None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
norm_r_cur = self.norm(r)
logEvent("""ls #%d norm_r_cur=%s atol=%g rtol=%g""" % (ls_its,
norm_r_cur,
self.atol_r,
self.rtol_r))
if ls_its > 0:
logEvent("Linesearches = %i" % ls_its,level=3)
else:
if self.linearSolver.computeEigenvalues:
try:
if self.betaK_0*self.etaK_0*self.gammaK_max <= 0.5:
print("r_{-,0} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_0*self.etaK_0*self.gammaK_max)),(self.betaK_0*self.gammaK_max)))
if self.betaK_1*self.etaK_1*self.gammaK_max <= 0.5 and self.its > 1:
print("r_{-,1} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_1*self.etaK_1*self.gammaK_max)),(self.betaK_1*self.gammaK_max)))
except:
pass
print("beta0*eta0*gamma ",self.betaK_0*self.etaK_0*self.gammaK_max)
if Viewers.viewerType == 'gnuplot':
max_r = max(1.0,max(self.linearSolver.eigenvalues_r))
max_i = max(1.0,max(self.linearSolver.eigenvalues_i))
for lambda_r,lambda_i in zip(self.linearSolver.eigenvalues_r,self.linearSolver.eigenvalues_i):
Viewers.datFile.write("%12.5e %12.5e \n" % (old_div(lambda_r,max_r),old_div(lambda_i,max_i)))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with points title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'scaled eigenvalues')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,r in zip(list(range(len(self.norm_r_hist))),self.norm_r_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,math.log(old_div(r,self.norm_r_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(r)/log(r0) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,du in zip(list(range(len(self.norm_du_hist))),self.norm_du_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,math.log(old_div(du,self.norm_du_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(du) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
#raw_input("wait")
logEvent(" NumericalAnalytics NewtonIteration: %d, NewtonNorm: %12.5e"
%(self.its-1, self.norm_r), level=7)
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
logEvent(memory("Newton","Newton"),level=4)
return self.failedFlag
logEvent(" NumericalAnalytics NewtonIteration: %d, NewtonNorm: %12.5e"
%(self.its-1, self.norm_r), level=7)
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
logEvent(memory("Newton","Newton"),level=4)
class AddedMassNewton(Newton):
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
if self.F.timeIntegration.t >= self.F.coefficients.next_solve:
self.F.coefficients.next_solve += self.F.coefficients.solve_rate
self.F.coefficients.updated_global = True
if self.F.coefficients.nd == 3:
accelerations = list(range(6))
elif self.F.coefficients.nd == 2:
accelerations = [0,1,5]
else:
exit(1)
for i in accelerations:
self.F.added_mass_i=i
Newton.solve(self,u,r,b,par_u,par_r)
else:
self.F.coefficients.updated_global = False
logEvent("Skipping model AddedMass; next solve at t={t}".format(t=self.F.coefficients.solve_rate))
class MoveMeshMonitorNewton(Newton):
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
for i in range(self.F.coefficients.ntimes_solved):
self.F.coefficients.ntimes_i = i
if i > 0:
self.F.coefficients.model.tLast_mesh = self.F.coefficients.t_last
self.F.coefficients.model.calculateQuadrature()
self.F.coefficients.preStep(t=None)
Newton.solve(self,u,r,b,par_u,par_r)
if i < self.F.coefficients.ntimes_solved-1:
self.F.coefficients.postStep(t=None)
class TwoStageNewton(Newton):
"""Solves a 2 Stage problem via Newton's solve"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
r""" Solves a 2 Stage problem via Newton's solve"""
#####################################
# ********** FIRST STAGE ********** #
#####################################
logEvent(" FIRST STAGE",level=1)
hasStage = hasattr(self.F,'stage') and hasattr(self.F,'useTwoStageNewton') and self.F.useTwoStageNewton==True
if hasStage==False:
logEvent(" WARNING: TwoStageNewton will consider a single stage",level=1)
else:
self.F.stage = 1
Newton.solve(self,u,r,b,par_u,par_r)
######################################
# ********** SECOND STAGE ********** #
######################################
if hasStage==False:
return self.failedFlag
else:
logEvent(" SECOND STAGE",level=1)
self.F.stage = 2
Newton.solve(self,u,r,b,par_u,par_r)
return self.failedFlag
class ExplicitLumpedMassMatrixShallowWaterEquationsSolver(Newton):
"""
This is a fake solver meant to be used with optimized code
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
######################
# CALCULATE SOLUTION #
######################
self.F.secondCallCalculateResidual = 0
self.computeResidual(u,r,b)
u[:] = r
if par_u is not None:
par_u.scatter_forward_insert()
############################
# FCT STEP ON WATER HEIGHT #
############################
logEvent(" FCT Step/Convex Limiting", level=1)
self.F.FCTStep()
if par_u is not None:
par_u.scatter_forward_insert()
#############################################
# UPDATE SOLUTION THROUGH calculateResidual #
#############################################
logEvent(" Udpating SWFlow solution", level=1)
self.F.secondCallCalculateResidual = 1
self.computeResidual(u,r,b)
if par_u is not None:
par_u.scatter_forward_insert()
self.F.check_positivity_water_height = True
class ExplicitConsistentMassMatrixShallowWaterEquationsSolver(Newton):
"""
This is a fake solver meant to be used with optimized code
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
######################
# CALCULATE SOLUTION #
######################
self.F.secondCallCalculateResidual = 0
logEvent(" Entropy viscosity solution with consistent mass matrix", level=1)
Newton.solve(self,u,r,b,par_u,par_r,linear=True)
if par_u is not None:
par_u.scatter_forward_insert()
############################
# FCT STEP ON WATER HEIGHT #
############################
logEvent(" FCT Step/Convex Limiting", level=1)
self.F.FCTStep()
if par_u is not None:
par_u.scatter_forward_insert()
# DISTRIBUTE SOLUTION FROM u to u[ci].dof
self.F.secondCallCalculateResidual = 1
self.computeResidual(u,r,b)
if par_u is not None:
par_u.scatter_forward_insert()
self.F.check_positivity_water_height = True
class ExplicitLumpedMassMatrix(Newton):
"""
This is a fake solver meant to be used with optimized code
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
# compute fluxes
self.computeResidual(u,r,b)
#u[:]=self.F.uLow
############
# FCT STEP #
############
self.F.kth_FCT_step()
###########################################
# DISTRUBUTE SOLUTION FROM u to u[ci].dof #
###########################################
self.F.auxiliaryCallCalculateResidual = True
self.computeResidual(u,r,b)
self.F.auxiliaryCallCalculateResidual = False
def no_solve(self,u,r=None,b=None,par_u=None,par_r=None):
self.computeResidual(u,r,b)
u[:] = r
############
# FCT STEP #
############
if hasattr(self.F.coefficients,'FCT') and self.F.coefficients.FCT==True:
self.F.FCTStep()
###########################################
# DISTRUBUTE SOLUTION FROM u to u[ci].dof #
###########################################
self.F.auxiliaryCallCalculateResidual = True
self.computeResidual(u,r,b)
self.F.auxiliaryCallCalculateResidual = False
class ExplicitConsistentMassMatrixWithRedistancing(Newton):
"""
This is a fake solver meant to be used with optimized code
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
if (self.F.coefficients.DO_SMOOTHING and self.F.coefficients.pure_redistancing==False):
logEvent("***** Doing smoothing *****",2)
self.F.getRhsSmoothing(u,r)
if (self.F.SmoothingMatrix == None):
self.F.getSmoothingMatrix()
self.linearSolver.L = self.F.SmoothingMatrix
# Save sparse factor for Jacobian
self.F.Jacobian_sparseFactor = self.linearSolver.sparseFactor
# create a new sparse factor. For now use the same as the Jacobian
self.F.SmoothingMatrix_sparseFactor = superluWrappers.SparseFactor(self.linearSolver.n)
# reference the self.linearSolver.sparseFactor to use the new sparse Factor
self.linearSolver.sparseFactor = self.F.SmoothingMatrix_sparseFactor
# Compute the new sparse factor; i.e., self.F.SmoothingMatrix_sparseFactor
self.linearSolver.prepare(b=r)
self.du[:]=0
# Set sparse factors
self.linearSolver.L = self.F.SmoothingMatrix
self.linearSolver.sparseFactor = self.F.SmoothingMatrix_sparseFactor
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u[:]=self.du
self.F.uStar_dof[:] = u
else:
self.F.uStar_dof[:] = self.F.u_dof_old[:]
#############################
### COMPUTE MAIN SOLUTION ###
#############################
if (self.F.coefficients.pure_redistancing==False):
self.computeResidual(u,r,b)
if self.updateJacobian or self.fullNewton:
self.F.getJacobian(self.J)
# set linear solver to be the jacobian
if (self.F.coefficients.DO_SMOOTHING):
self.linearSolver.L = self.J
# set space factors to be the jacobian factor
self.linearSolver.sparseFactor = self.F.Jacobian_sparseFactor
self.linearSolver.prepare(b=r)
self.updateJacobian = False
self.du[:]=0.0
if (self.F.coefficients.DO_SMOOTHING):
# Set sparse factors
self.linearSolver.L = self.J
self.linearSolver.sparseFactor = self.F.Jacobian_sparseFactor
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u-=self.du
# DISTRIBUTE SOLUTION FROM u to u[ci].dof
self.F.auxiliaryCallCalculateResidual = True
self.computeResidual(u,r,b)
self.F.auxiliaryCallCalculateResidual = False
# self.F.setUnknowns(self.F.timeIntegration.u)
############################
##### Do re-distancing #####
############################
logEvent("***** Starting re-distancing *****",2)
numIter=0
self.F.L2_norm_redistancing = self.F.getRedistancingResidual(u,r)
if(self.F.coefficients.DO_REDISTANCING):
if (self.F.coefficients.pure_redistancing==True):
self.F.coefficients.maxIter_redistancing=1
while (self.F.L2_norm_redistancing > self.F.coefficients.redistancing_tolerance*self.F.mesh.h
and numIter < self.F.coefficients.maxIter_redistancing):
self.F.coefficients.u_dof_old = numpy.copy(self.F.u[0].dof)
self.F.getRedistancingResidual(u,r)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
self.linearSolver.prepare(b=r)
self.du[:]=0.0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u-=self.du
self.F.L2_norm_redistancing = self.F.getRedistancingResidual(u,r)
numIter += 1
#self.F.redistancing_L2_norm_history.append(
# (self.F.timeIntegration.t,
# numIter,
# self.F.L2_norm_redistancing,
# self.F.coefficients.redistancing_tolerance*self.F.mesh.h))
logEvent("***** Re-distancing finished. Number of iterations = "+str(numIter)
+ ". L2 norm of error: "+str(self.F.L2_norm_redistancing)
+ ". Tolerance: "+str(self.F.coefficients.redistancing_tolerance*self.F.mesh.h)
,2)
class ExplicitConsistentMassMatrixForVOF(Newton):
"""
This is a fake solver meant to be used with optimized code
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
#########################
# COMPUTE MAIN SOLUTION #
#########################
self.computeResidual(u,r,b)
if self.updateJacobian or self.fullNewton:
self.F.getJacobian(self.J)
self.linearSolver.prepare(b=r)
self.updateJacobian = False
self.du[:]=0.0
# Set sparse factors
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u-=self.du
############
# FCT STEP #
############
if self.F.coefficients.FCT==True:
self.F.FCTStep()
###########################################
# DISTRIBUTE SOLUTION FROM u to u[ci].dof #
###########################################
self.F.auxiliaryCallCalculateResidual = True
self.computeResidual(u,r,b)
self.F.auxiliaryCallCalculateResidual = False
class NewtonWithL2ProjectionForMassCorrection(Newton):
"""
This is a fake solver meant to be used with optimized code
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
"""
from . import Viewers
memory()
if self.linearSolver.computeEigenvalues:
self.u0[:]=u
r=self.solveInitialize(u,r,b)
if par_u != None:
#allow linear solver to know what type of assembly to use
self.linearSolver.par_fullOverlap = self.par_fullOverlap
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
#no overlap or overlap (until we compute norms over only owned dof)
par_r.scatter_forward_insert()
self.norm_r0 = self.norm(r)
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(r) and
not self.failed()):
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %g test=%s"
% (self.its-1,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r))),self.convergenceTest),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
# Set linear solver to be the jacobian
self.linearSolver.L = self.J
# Save sparse factor for Jacobian. Just for the first time
if (self.F.Jacobian_sparseFactor is None):
self.F.Jacobian_sparseFactor = self.linearSolver.sparseFactor #(MQL)
# Set sparse factor to be the jacobian sparse factor
self.linearSolver.sparseFactor = self.F.Jacobian_sparseFactor
if self.linearSolver.computeEigenvalues:
logEvent("Calculating eigenvalues of J^t J")
self.JLast[:]=self.J
self.J_t_J[:]=self.J
self.J_t_J *= numpy.transpose(self.J)
self.JLsolver.prepare()#eigenvalue calc happens in prepare
self.norm_2_J_current = sqrt(max(self.JLsolver.eigenvalues_r))
try:
self.norm_2_Jinv_current = old_div(1.0,sqrt(min(self.JLsolver.eigenvalues_r)))
except:
logEvent("Norm of J_inv_current is singular to machine prection 1/sqrt("+repr(min(self.JLsolver.eigenvalues_r))+")")
self.norm_2_Jinv_current = np.inf
self.kappa_current = self.norm_2_J_current*self.norm_2_Jinv_current
self.betaK_current = self.norm_2_Jinv_current
self.linearSolver.prepare(b=r)
self.du[:]=0.0
# Set matrix of linear soler to be the Jacobian
self.linearSolver.L = self.J
# Set sparse factor
self.linearSolver.sparseFactor = self.F.Jacobian_sparseFactor
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u-=self.du
if par_u != None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
if par_r != None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
else:
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
logEvent(memory("Newton","Newton"),level=4)
if (self.failedFlag == True):
return self.failedFlag
else:
logEvent("+++++ L2 projection of mass-corrected VOF +++++",level=2)
if (self.F.MassMatrix is None):
self.F.getMassMatrix()
# Set matrix of linear solver to be the Mass Matrix
self.linearSolver.L = self.F.MassMatrix
# Create a sparse factor for the Mass Matrix
if (self.F.MassMatrix_sparseFactor is None):
self.F.MassMatrix_sparseFactor = superluWrappers.SparseFactor(self.linearSolver.n)
# reference the self.linearSolver.sparseFactor to use the new sparse Factor
self.linearSolver.sparseFactor = self.F.MassMatrix_sparseFactor
# Compute the new sparse factor; i.e., self.F.MassMatrix_sparseFactor
self.linearSolver.prepare(b=r)
# Compute rhs for L2 projection and low (lumped) L2 projection
self.F.setMassQuadratureEdgeBasedStabilizationMethods()
r[:] = self.F.rhs_mass_correction
# Solve mass matrix for L2 projection
self.du[:]=0.0
# Set linear matrix to be Mass Matrix
self.linearSolver.L = self.F.MassMatrix
# Set sparse factors to be the sparse factors of the mass matrix
self.linearSolver.sparseFactor = self.F.MassMatrix_sparseFactor
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
# copy the solution to the L2p vector
self.F.L2p_vof_mass_correction[:] = self.du
# Perform limitation on L2 projection
self.F.FCTStep()
# Pass the solution to the DOFs of the VOF model
#self.F.coefficients.vofModel.u[0].dof[:] = self.du
self.F.coefficients.vofModel.u[0].dof[:] = self.F.limited_L2p_vof_mass_correction
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
logEvent(memory("Newton","Newton"),level=4)
# Nonlinear solved finished.
# L2 projection of corrected VOF solution at quad points
class CLSVOFNewton(Newton):
def spinUpStep(self,u,r=None,b=None,par_u=None,par_r=None):
# Assemble residual and Jacobian for spin up step
self.F.assembleSpinUpSystem(r,self.J)
# For parallelization
if par_u is not None:
#allow linear solver to know what type of assembly to use
self.linearSolver.par_fullOverlap = self.par_fullOverlap
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
#no overlap or overlap (until we compute norms over only owned dof)
par_r.scatter_forward_insert()
#
self.linearSolver.prepare(b=r)
self.du[:]=0.0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
u[:] = self.du
# For parallelization
if par_u is not None:
par_u.scatter_forward_insert()
#
# Pass the solution to the corresponding vectors in the model
self.F.u[0].dof[:] = u
self.F.u_dof_old[:] = u
self.F.u0_dof[:] = u #To compute metrics
def getNormalReconstruction(self,u,r=None,b=None,par_u=None,par_r=None):
# Assemble weighted matrix and rhs for consistent projection
self.F.getNormalReconstruction(self.J)
if self.F.consistentNormalReconstruction==False or True: # For the moment we make sure this is the only route
logEvent(" ... Normal reconstruction via weighted lumped L2-projection ...",level=2)
# TODO: After 1st paper is accepted we need to delete
# timeStage, timeOrder and tStar vectors. Now we always use: CN+pre-stage
if self.F.timeStage==1:
self.F.projected_qx_tn[:] = old_div(self.F.rhs_qx,self.F.weighted_lumped_mass_matrix)
self.F.projected_qy_tn[:] = old_div(self.F.rhs_qy,self.F.weighted_lumped_mass_matrix)
self.F.projected_qz_tn[:] = old_div(self.F.rhs_qz,self.F.weighted_lumped_mass_matrix)
# Update parallel vectors
self.F.par_projected_qx_tn.scatter_forward_insert()
self.F.par_projected_qy_tn.scatter_forward_insert()
self.F.par_projected_qz_tn.scatter_forward_insert()
else:
self.F.projected_qx_tStar[:] = old_div(self.F.rhs_qx,self.F.weighted_lumped_mass_matrix)
self.F.projected_qy_tStar[:] = old_div(self.F.rhs_qy,self.F.weighted_lumped_mass_matrix)
self.F.projected_qz_tStar[:] = old_div(self.F.rhs_qz,self.F.weighted_lumped_mass_matrix)
# Update parallel vectors
self.F.par_projected_qx_tStar.scatter_forward_insert()
self.F.par_projected_qy_tStar.scatter_forward_insert()
self.F.par_projected_qz_tStar.scatter_forward_insert()
else:
# If the L2-projection is consistent we need to solve the linear systems
logEvent(" ... Normal reconstruction via weighted consistent L2-projection ...",level=2)
# allocate auxiliary vectors for FCT
low_order_solution = numpy.zeros(r.size,'d')
high_order_solution = numpy.zeros(r.size,'d')
# solve for qx
r[:] = self.F.rhs_qx[:]
self.linearSolver.prepare(b=r)
self.du[:]=0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
high_order_solution[:] = self.du
low_order_solution[:] = old_div(self.F.rhs_qx,self.F.weighted_lumped_mass_matrix)
# FCT STEP #
if self.F.timeStage==1:
self.F.FCTStep(self.F.projected_qx_tn,
self.F.u_dof_old,
low_order_solution,
high_order_solution,
self.J)
else:
self.F.FCTStep(self.F.projected_qx_tStar,
self.F.u_dof_old,
low_order_solution,
high_order_solution,
self.J)
# solve for qy
r[:] = self.F.rhs_qy[:]
self.linearSolver.prepare(b=r)
self.du[:]=0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
high_order_solution[:] = self.du
low_order_solution[:] = old_div(self.F.rhs_qy,self.F.weighted_lumped_mass_matrix)
# FCT STEP #
if self.F.timeStage==1:
self.F.FCTStep(self.F.projected_qy_tn,
self.F.u_dof_old,
low_order_solution,
high_order_solution,
self.J)
else:
self.F.FCTStep(self.F.projected_qy_tStar,
self.F.u_dof_old,
low_order_solution,
high_order_solution,
self.J)
if self.F.nSpace_global==3:
# solve for qz
r[:] = self.F.rhs_qz[:]
self.linearSolver.prepare(b=r)
self.du[:]=0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
high_order_solution[:] = self.du
low_order_solution[:] = old_div(self.F.rhs_qz,self.F.weighted_lumped_mass_matrix)
# FCT STEP #
if self.F.timeStage==1:
self.F.FCTStep(self.F.projected_qz_tn,
low_order_solution,
high_order_solution,
self.J)
else:
self.F.FCTStep(self.F.projected_qz_tStar,
low_order_solution,
high_order_solution,
self.J)
else:
if self.F.timeStage==1:
self.F.projected_qz_tn[:]=0
else:
self.F.projected_qz_tStar[:]=0
def project_disc_ICs(self,u,r=None,b=None,par_u=None,par_r=None):
self.F.getRhsL2Proj()
self.F.projected_disc_ICs[:] = old_div(self.F.rhs_l2_proj,self.F.lumped_mass_matrix)
self.F.par_projected_disc_ICs.scatter_forward_insert()
# output of this function
u[:] = self.F.projected_disc_ICs
# pass u to self.F.u[0].dof and recompute interface locator
self.F.preRedistancingStage = 0
self.computeResidual(u,r,b)
# save projected solution also in old DOFs
self.F.u_dof_old[:] = self.F.projected_disc_ICs
def redistance_disc_ICs(self,u,r=None,b=None,par_u=None,par_r=None,max_num_iters=100):
# save and set some variables
self.F.preRedistancingStage = 1
maxIts = self.maxIts
self.maxIts=1
# set tolerances for this spin up stage
tol = self.atol_r
norm_r0 = self.norm(r)
norm_r = 1.0*norm_r0
num_iters = 0
# Loop
while (norm_r > tol or num_iters==0):
self.getNormalReconstruction(u,r,b,par_u,par_r)
Newton.solve(self,u,r,b,par_u,par_r)
self.F.u_dof_old[:] = self.F.u[0].dof
# compute norm
norm_r = self.norm(r)
num_iters += 1
# break if num of iterations is large
if num_iters > max_num_iters:
break
# set back variables
self.maxIts = maxIts
self.F.preRedistancingStage = 0
self.failedFlag=False
def spinup_for_disc_ICs(self,u,r=None,b=None,par_u=None,par_r=None):
logEvent("+++++ Spin up to start with disc ICs +++++",level=2)
########################
# lumped L2 projection #
########################
self.project_disc_ICs(u,r,b,par_u,par_r)
################################
# redistance initial condition #
################################
# save alpha and freeze_interface_...
alpha = self.F.coefficients.alpha
freeze_interface = self.F.coefficients.freeze_interface_during_preRedistancing
# STEP 2: Do redistancing with interface frozen
self.F.coefficients.alpha = 0
self.F.coefficients.freeze_interface_during_preRedistancing = True
self.redistance_disc_ICs(u,r,b,par_u,par_r,max_num_iters=100) # no more than 100 steps
# STEP 3: Do 1 step of redistancing with all but the interface frozen
self.F.coefficients.alpha = 0
self.F.coefficients.freeze_interface_during_preRedistancing = True
self.F.interface_locator[:] = np.logical_not(self.F.interface_locator)
self.redistance_disc_ICs(u,r,b,par_u,par_r,max_num_iters=1)
# STEP 4: Do 1 step of redistancing with nothing frozen
self.F.coefficients.alpha = 1E9
self.F.coefficients.freeze_interface_during_preRedistancing = False
self.redistance_disc_ICs(u,r,b,par_u,par_r,max_num_iters=1)
# to recompute interface locator
self.F.preRedistancingStage = 0
self.computeResidual(u,r,b)
# set back alpha and freeze_interface_...
self.F.coefficients.alpha = alpha
self.F.coefficients.freeze_interface_during_preRedistancing = freeze_interface
# save computed solution into old dofs
self.F.u_dof_old[:] = self.F.u[0].dof
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
if self.F.coefficients.disc_ICs:
self.spinup_for_disc_ICs(u,r,b,par_u,par_r)
self.F.coefficients.disc_ICs=False
# ************************************************ #
# *************** PRE REDISTANCING *************** #
# ************************************************ #
logEvent("+++++ Pre re-distancing +++++",level=2)
self.getNormalReconstruction(u,r,b,par_u,par_r)
# PRE-REDISTANCE #
self.F.preRedistancingStage = 1
maxIts = self.maxIts
self.maxIts=5
Newton.solve(self,u,r,b,par_u,par_r)
# set back variables
self.maxIts = maxIts
self.F.preRedistancingStage = 0
self.failedFlag=False
# ********************************************* #
# *************** CLSVOF Solver *************** #
# ********************************************* #
logEvent("+++++ Nonlinear CLSVOF +++++",level=2)
# GET NORMAL RECONSTRUCTION #
self.getNormalReconstruction(u,r,b,par_u,par_r)
# NOTE: we can set u[0].dof back to be u_dof_old or used the pre-redistanced
# SOLVE NON-LINEAR SYSTEM FOR FIRST STAGE #
Newton.solve(self,u,r,b,par_u,par_r)
# save number of newton iterations
self.F.newton_iterations_stage1 = self.its
# ******************************************** #
# ***** UPDATE VECTORS FOR VISUALIZATION ***** #
# ******************************************** #
self.F.par_vofDOFs.scatter_forward_insert()
def solveOldMethod(self,u,r=None,b=None,par_u=None,par_r=None):
# ******************************************** #
# *************** SPIN UP STEP *************** #
# ******************************************** #
if self.F.coefficients.doSpinUpStep==True and self.F.spinUpStepTaken==False:
logEvent("***** Spin up step for CLSVOF model *****",level=2)
self.F.spinUpStepTaken=True
self.spinUpStep(u,r,b,par_u,par_r)
# ******************************************* #
# *************** FIRST STAGE *************** #
# ******************************************* #
logEvent("+++++ First stage of nonlinear solver +++++",level=2)
# GET NORMAL RECONSTRUCTION #
self.getNormalReconstruction(u,r,b,par_u,par_r)
# SOLVE NON-LINEAR SYSTEM FOR FIRST STAGE #
Newton.solve(self,u,r,b,par_u,par_r)
# save number of newton iterations
self.F.newton_iterations_stage1 = self.its
# ******************************************** #
# *************** SECOND STAGE *************** #
# ******************************************** #
if self.F.coefficients.timeOrder==2:
logEvent("+++++ Second stage of nonlinear solver +++++",level=2)
self.F.timeStage=2
# GET NORMAL RECONSTRUCTION #
self.getNormalReconstruction(u,r,b,par_u,par_r)
# SOLVE NON-LINEAR SYSTEM FOR SECOND STAGE #
Newton.solve(self,u,r,b,par_u,par_r)
self.F.timeStage=1
# save number of newton iterations
self.F.newton_iterations_stage2 = self.its
# ******************************************** #
# ***** UPDATE VECTORS FOR VISUALIZATION ***** #
# ******************************************** #
self.F.par_H_dof.scatter_forward_insert()
self.F.quantDOFs[:] = self.F.H_dof
from . import deim_utils
class POD_Newton(Newton):
"""Newton's method on the reduced order system based on POD"""
from . import deim_utils
def __init__(self,
linearSolver,
F,J=None,du=None,par_du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits = 100,
use_deim=False):
Newton.__init__(self,
linearSolver,
F,J,du,par_du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo,
fullNewton,
directSolver,
EWtol,
maxLSits)
#setup reduced basis for solution
self.DB = 11 #number of basis vectors for solution
U = np.loadtxt('SVD_basis')
self.U = U[:,0:self.DB]
self.U_transpose = self.U.conj().T
self.pod_J = np.zeros((self.DB,self.DB),'d')
self.pod_linearSolver = LU(self.pod_J)
self.J_rowptr,self.J_colind,self.J_nzval = self.J.getCSRrepresentation()
self.pod_du = np.zeros(self.DB)
def computeResidual(self,u,r,b):
"""
Use DEIM algorithm to compute residual if use_deim is turned on
Right now splits the evaluation into two 'temporal' (mass) and spatial piece
As first step for DEIM still does full evaluation
"""
if self.fullResidual:
self.F.getResidual(u,r)
if b is not None:
r-=b
else:
if type(self.J).__name__ == 'ndarray':
r[:] = numpy.dot(u,self.J)
elif type(self.J).__name__ == 'SparseMatrix':
self.J.matvec(u,r)
if b is not None:
r-=b
def norm(self,u):
return self.norm_function(u)
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
"""
pod_u = np.dot(self.U_transpose,u)
u[:] = np.dot(self.U,pod_u)
r=self.solveInitialize(u,r,b)
pod_r = np.dot(self.U_transpose,r)
self.norm_r0 = self.norm(pod_r)
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(pod_r) and
not self.failed()):
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %g test=%s"
% (self.its-1,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r))),self.convergenceTest),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.pod_J[:] = 0.0
self.F.getJacobian(self.J)
for i in range(self.DB):
for j in range(self.DB):
for k in range(self.F.dim):
for m in range(self.J_rowptr[k],self.J_rowptr[k+1]):
self.pod_J[i,j] += self.U_transpose[i,k]*self.J_nzval[m]*self.U[self.J_colind[m],j]
#self.linearSolver.prepare(b=r)
self.pod_linearSolver.prepare(b=pod_r)
self.du[:]=0.0
self.pod_du[:]=0.0
if not self.linearSolverFailed:
#self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
#self.linearSolverFailed = self.linearSolver.failed()
self.pod_linearSolver.solve(u=self.pod_du,b=pod_r)
self.linearSolverFailed = self.pod_linearSolver.failed()
#pod_u-=np.dot(self.U_transpose,self.du)
pod_u-=self.pod_du
u[:] = np.dot(self.U,pod_u)
#mostly for convergence norms
self.du = np.dot(self.U,self.pod_du)
self.computeResidual(u,r,b)
pod_r[:] = np.dot(self.U_transpose,r)
r[:] = np.dot(self.U,pod_r)
else:
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
return self.failedFlag
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
class POD_DEIM_Newton(Newton):
"""Newton's method on the reduced order system based on POD"""
def __init__(self,
linearSolver,
F,J=None,du=None,par_du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits = 100,
use_deim=True):
Newton.__init__(self,
linearSolver,
F,J,du,par_du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo,
fullNewton,
directSolver,
EWtol,
maxLSits)
#setup reduced basis for solution
self.DB = 43 #11 #number of basis vectors for solution
U = np.loadtxt('SVD_basis')
self.U = U[:,0:self.DB]
self.U_transpose = self.U.conj().T
#setup reduced basis for DEIM interpolants
self.use_deim = use_deim
self.DBf = None
self.Uf = None;
self.rho_deim = None; self.Ut_Uf_PtUf_inv=None
self.rs = None; self.rt = None
calculate_deim_internally = True
if self.use_deim:
#mwf this calculates things in the code. Switch for debugging to just reading
if calculate_deim_internally:
Uf = np.loadtxt('Fs_SVD_basis')
self.DBf = min(73,Uf.shape[1],self.F.dim)#debug
self.Uf = Uf[:,0:self.DBf]
#returns rho --> deim indices and deim 'projection' matrix
#U(P^TU)^{-1}
self.rho_deim,Uf_PtUf_inv = deim_utils.deim_alg(self.Uf,self.DBf)
else:
self.Uf = np.loadtxt('Fs_SVD_basis_truncated')
self.DBf = self.Uf.shape[1]
self.rho_deim = np.loadtxt('Fs_DEIM_indices_truncated',dtype='i')
PtUf = self.Uf[self.rho_deim]
assert PtUf.shape == (self.DBf,self.DBf)
PtUfInv = np.linalg.inv(PtUf)
Uf_PtUf_inv = np.dot(self.Uf,PtUfInv)
#go ahead and left multiply projection matrix by solution basis
#to get 'projection' from deim to coarse space
self.Ut_Uf_PtUf_inv = np.dot(self.U_transpose,Uf_PtUf_inv)
self.pod_J = np.zeros((self.DB,self.DB),'d')
self.pod_Jt= np.zeros((self.DB,self.DB),'d')
self.pod_Jtmp= np.zeros((self.DBf,self.DB),'d')
self.pod_linearSolver = LU(self.pod_J)
self.J_rowptr,self.J_colind,self.J_nzval = self.J.getCSRrepresentation()
assert 'getSpatialJacobian' in dir(self.F)
assert 'getMassJacobian' in dir(self.F)
self.Js = self.F.initializeSpatialJacobian()
self.Js_rowptr,self.Js_colind,self.Js_nzval = self.Js.getCSRrepresentation()
self.Jt = self.F.initializeMassJacobian()
self.Jt_rowptr,self.Jt_colind,self.Jt_nzval = self.Jt.getCSRrepresentation()
self.pod_du = np.zeros(self.DB)
self.skip_mass_jacobian_eval = True
self.linear_reduced_mass_matrix=None
def norm(self,u):
return self.norm_function(u)
#mwf add for DEIM
def computeDEIMresiduals(self,u,rs,rt):
"""
wrapper for computing residuals separately for DEIM
"""
assert 'getSpatialResidual' in dir(self.F)
assert 'getMassResidual' in dir(self.F)
self.F.getSpatialResidual(u,rs)
self.F.getMassResidual(u,rt)
def solveInitialize(self,u,r,b):
"""
if using deim modifies base initialization by
splitting up residual evaluation into separate pieces
interpolated by deim (right now just does 'mass' and 'space')
NOT FINISHED
"""
if r is None:
if self.r is None:
self.r = Vec(self.F.dim)
r=self.r
else:
self.r=r
self.computeResidual(u,r,b)
if self.use_deim:
if self.rs is None:
self.rs = Vec(self.F.dim)
if self.rt is None:
self.rt = Vec(self.F.dim)
self.computeDEIMresiduals(u,self.rs,self.rt)
self.its = 0
self.norm_r0 = self.norm(r)
self.norm_r = self.norm_r0
self.ratio_r_solve = 1.0
self.ratio_du_solve = 1.0
self.last_log_ratio_r = 1.0
self.last_log_ratior_du = 1.0
#self.convergenceHistoryIsCorrupt=False
self.convergingIts = 0
#mwf begin hack for conv. rate
self.gustafsson_alpha = -12345.0
self.gustafsson_norm_du_last = -12345.0
#mwf end hack for conv. rate
return r
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
"""
if self.use_deim:
return self.solveDEIM(u,r,b,par_u,par_r)
pod_u = np.dot(self.U_transpose,u)
u[:] = np.dot(self.U,pod_u)
r=self.solveInitialize(u,r,b)
pod_r = np.dot(self.U_transpose,r)
self.norm_r0 = self.norm(pod_r)
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(pod_r) and
not self.failed()):
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %g test=%s"
% (self.its-1,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r))),self.convergenceTest),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
self.pod_J[:] = 0.0
for i in range(self.DB):
for j in range(self.DB):
for k in range(self.F.dim):
for m in range(self.J_rowptr[k],self.J_rowptr[k+1]):
self.pod_J[i,j] += self.U_transpose[i,k]*self.J_nzval[m]*self.U[self.J_colind[m],j]
#self.linearSolver.prepare(b=r)
self.pod_linearSolver.prepare(b=pod_r)
self.du[:]=0.0
self.pod_du[:]=0.0
if not self.linearSolverFailed:
#self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
#self.linearSolverFailed = self.linearSolver.failed()
self.pod_linearSolver.solve(u=self.pod_du,b=pod_r)
self.linearSolverFailed = self.pod_linearSolver.failed()
#pod_u-=np.dot(self.U_transpose,self.du)
pod_u-=self.pod_du
u[:] = np.dot(self.U,pod_u)
#mostly for norm calculations
self.du = np.dot(self.U,self.pod_du)
self.computeResidual(u,r,b)
pod_r[:] = np.dot(self.U_transpose,r)
r[:] = np.dot(self.U,pod_r)
else:
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
return self.failedFlag
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
def solveDEIM(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
using DEIM
Start with brute force just testing things
"""
assert self.use_deim
pod_u = np.dot(self.U_transpose,u)
u[:] = np.dot(self.U,pod_u)
#evaluate fine grid residuals directly
r=self.solveInitialize(u,r,b)
#mwf debug
tmp = r-self.rt-self.rs
assert np.absolute(tmp).all() < 1.0e-12
#r_deim = self.rt[self.rho_deim].copy()
r_deim = self.rs[self.rho_deim]
pod_rt = np.dot(self.U_transpose,self.rt)
pod_r = np.dot(self.Ut_Uf_PtUf_inv,r_deim)
pod_r += pod_rt
#mwf debug
#import pdb
#pdb.set_trace()
assert not numpy.isnan(pod_r).any()
#
self.norm_r0 = self.norm(pod_r)
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(pod_r) and
not self.failed()):
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %g test=%s"
% (self.its-1,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r))),self.convergenceTest),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
#go ahead and evaluate spatial grid on fine grid for now
self.F.getSpatialJacobian(self.Js)
assert not numpy.isnan(self.Js_nzval).any()
self.F.getMassJacobian(self.Jt)
assert not numpy.isnan(self.Jt_nzval).any()
#mwf hack, speed up mass matrix calculation
if self.skip_mass_jacobian_eval and self.linear_reduced_mass_matrix is None:
#now this holds U^T Jt U
self.pod_Jt[:] = 0.0
for i in range(self.DB):
for j in range(self.DB):
for k in range(self.F.dim):
for m in range(self.Jt_rowptr[k],self.Jt_rowptr[k+1]):
self.pod_Jt[i,j] += self.U_transpose[i,k]*self.Jt_nzval[m]*self.U[self.Jt_colind[m],j]
#combined DEIM, coarse grid projection
#self.pod_Jt = np.dot(self.Ut_Uf_PtUf_inv,self.pod_Jtmp)
assert not numpy.isnan(self.pod_Jt).any()
self.linear_reduced_mass_matrix = self.pod_Jt.copy()
#have to scale by dt in general shouldn't affect constant mass matrix
self.Jt_nzval /= self.F.timeIntegration.dt
#mwf debug
self.F.getJacobian(self.J)
tmp = self.Jt_nzval+self.Js_nzval-self.J_nzval
assert numpy.absolute(tmp).all() < 1.0e-12
#now this holds P^T J_s U
self.pod_Jtmp[:] = 0.0
for i in range(self.DBf):
deim_i = self.rho_deim[i]
for j in range(self.DB):
for m in range(self.Js_rowptr[deim_i],self.Js_rowptr[deim_i+1]):
self.pod_Jtmp[i,j] += self.Js_nzval[m]*self.U[self.Js_colind[m],j]
#combined DEIM, coarse grid projection
tmp = np.dot(self.Ut_Uf_PtUf_inv,self.pod_Jtmp)
self.pod_J.flat[:] = tmp.flat[:]
assert not numpy.isnan(self.pod_J).any()
if not self.skip_mass_jacobian_eval:
#now this holds U^T Jt U
self.pod_Jt[:] = 0.0
for i in range(self.DB):
for j in range(self.DB):
for k in range(self.F.dim):
for m in range(self.Jt_rowptr[k],self.Jt_rowptr[k+1]):
self.pod_Jt[i,j] += self.U_transpose[i,k]*self.Jt_nzval[m]*self.U[self.Jt_colind[m],j]
#combined DEIM, coarse grid projection
#self.pod_Jt = np.dot(self.Ut_Uf_PtUf_inv,self.pod_Jtmp)
assert not numpy.isnan(self.pod_Jt).any()
else:
#mwf hack just copy over from precomputed mass matrix
self.pod_Jt.flat[:] = self.linear_reduced_mass_matrix.flat[:]
self.pod_Jt /= self.F.timeIntegration.dt
self.pod_J += self.pod_Jt
#self.linearSolver.prepare(b=r)
self.pod_linearSolver.prepare(b=pod_r)
self.du[:]=0.0
self.pod_du[:]=0.0
if not self.linearSolverFailed:
#self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
#self.linearSolverFailed = self.linearSolver.failed()
self.pod_linearSolver.solve(u=self.pod_du,b=pod_r)
self.linearSolverFailed = self.pod_linearSolver.failed()
assert not self.linearSolverFailed
#pod_u-=np.dot(self.U_transpose,self.du)
assert not numpy.isnan(self.pod_du).any()
pod_u-=self.pod_du
u[:] = np.dot(self.U,pod_u)
#mostly for norm calculations
self.du = np.dot(self.U,self.pod_du)
#self.computeResidual(u,r,b)
self.computeDEIMresiduals(u,self.rs,self.rt)
#mwf debug
self.F.getResidual(u,r)
tmp = r-self.rt-self.rs
assert np.absolute(tmp).all() < 1.0e-12
#mwf debug
#r_deim = self.rt[self.rho_deim].copy()
r_deim = self.rs[self.rho_deim]
pod_rt = np.dot(self.U_transpose,self.rt)
pod_r = np.dot(self.Ut_Uf_PtUf_inv,r_deim)
pod_r += pod_rt
assert not numpy.isnan(pod_r).any()
r[:] = np.dot(self.U,pod_r)
else:
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
return self.failedFlag
logEvent(" Newton it %d norm(r) = %12.5e \t\t norm(r)/(rtol*norm(r0)+atol) = %12.5e"
% (self.its,self.norm_r,(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
class NewtonNS(NonlinearSolver):
"""
A simple iterative solver that is Newton's method
if you give it the right Jacobian
"""
def __init__(self,
linearSolver,
F,J=None,du=None,par_du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits = 100):
import copy
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self,F,J,du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.updateJacobian=True
self.fullNewton=fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
#mwf turned back on self.lineSearch = False
self.EWtol=EWtol
#mwf added
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver=LU(self.J_t_J,computeEigenvalues=True)
self.dJLsolver=LU(self.dJ_t_dJ,computeEigenvalues=True)
self.u0 = numpy.zeros(self.F.dim,'d')
def setLinearSolverTolerance(self,r):
self.norm_r = self.norm(r)
gamma = 0.01
etaMax = 0.01
if self.norm_r == 0.0:
etaMin = 0.01*self.atol_r
else:
etaMin = 0.01*(self.rtol_r*self.norm_r0 + self.atol_r)/self.norm_r
if self.its > 1:
etaA = gamma * self.norm_r**2/self.norm_r_last**2
if self.its > 2:
if gamma*self.etaLast**2 < 0.01:
etaC = min(etaMax,etaA)
else:
etaC = min(etaMax,max(etaA,gamma*self.etaLast**2))
else:
etaC = min(etaMax,etaA)
else:
etaC = etaMax
eta = min(etaMax,max(etaC,etaMin))
self.etaLast = eta
self.norm_r_last = self.norm_r
self.linearSolver.setResTol(rtol=eta,atol=0.0)
def converged(self,r):
self.convergedFlag = False
self.norm_r = self.norm(r)
self.norm_cont_r = self.norm_function(r[:old_div(self.F.dim_proc,4)])
self.norm_mom_r = self.norm_function(r[old_div(self.F.dim_proc,4):self.F.dim_proc])
#self.norm_cont_r = self.norm(r[:r.shape[0]/4])
#self.norm_mom_r = self.norm(r[r.shape[0]/4:])
self.norm_du= old_div(1.0,float(self.its+2))
if self.computeRates == True:
self.computeConvergenceRates()
if self.convergenceTest == 'its' or self.convergenceTest == 'rits':
if self.its == self.maxIts:
self.convergedFlag = True
#print self.atol_r, self.rtol_r
if self.convergenceTest == 'r' or self.convergenceTest == 'rits':
if (self.its != 0 and
self.norm_cont_r < self.atol_r and #cek enforce mass conservation using atol_r only
self.norm_mom_r < self.rtol_r*self.norm_mom_r0 + self.atol_r):
self.convergedFlag = True
if self.convergedFlag == True and self.computeRates == True:
self.computeAverages()
if self.printInfo == True:
print(self.info())
#print self.convergedFlag
return self.convergedFlag
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
"""
from . import Viewers
memory()
if self.linearSolver.computeEigenvalues:
self.u0[:]=u
r=self.solveInitialize(u,r,b)
if par_u is not None:
#allow linear solver to know what type of assembly to use
self.linearSolver.par_fullOverlap = self.par_fullOverlap
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
#no overlap or overlap (until we compute norms over only owned dof)
par_r.scatter_forward_insert()
self.norm_cont_r0 = self.norm_function(r[:old_div(self.F.dim_proc,4)])
self.norm_mom_r0 = self.norm_function(r[old_div(self.F.dim_proc,4):self.F.dim_proc])
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(r) and
not self.failed()):
logEvent(" Newton it %d Mom. norm(r) = %12.5e tol = %12.5e" % (self.its-1,self.norm_mom_r,self.atol_r),level=1)
logEvent(" Newton it %d Cont. norm(r) = %12.5e tol = %12.5e" % (self.its-1,self.norm_cont_r,self.rtol_r*self.norm_mom_r0 + self.atol_r),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
logEvent("Start assembling jacobian",level=4)
self.F.getJacobian(self.J)
logEvent("Done assembling jacobian",level=4)
if self.linearSolver.computeEigenvalues:
logEvent("Performing eigen analyses",level=4)
self.JLast[:]=self.J
self.J_t_J[:]=self.J
self.J_t_J *= numpy.transpose(self.J)
self.JLsolver.prepare()
self.JLsolver.calculateEigenvalues()
self.norm_2_J_current = sqrt(max(self.JLsolver.eigenvalues_r))
self.norm_2_Jinv_current = old_div(1.0,sqrt(min(self.JLsolver.eigenvalues_r)))
self.kappa_current = self.norm_2_J_current*self.norm_2_Jinv_current
self.betaK_current = self.norm_2_Jinv_current
self.linearSolver.prepare(b=r,newton_its=self.its-1)
self.du[:]=0.0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
logEvent("Start linear solve",level=4)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
self.linearSolver.printPerformance()
#print self.du
#if par_du is not None:
# par_du.scatter_forward_insert()
u-=self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
#print "local r",r
if par_r is not None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
#print "global r",r
if self.linearSolver.computeEigenvalues:
#approximate Lipschitz constant of J
self.F.getJacobian(self.dJ_t_dJ)
self.dJ_t_dJ-=self.JLast
self.dJ_t_dJ *= numpy.transpose(self.dJ_t_dJ)
self.dJLsolver.prepare()
self.dJLsolver.calculateEigenvalues()
self.norm_2_dJ_current = sqrt(max(self.dJLsolver.eigenvalues_r))
self.etaK_current = self.W*self.norm(self.du)
self.gammaK_current = old_div(self.norm_2_dJ_current,self.etaK_current)
self.gammaK_max = max(self.gammaK_current,self.gammaK_max)
self.norm_r_hist.append(self.W*self.norm(r))
self.norm_du_hist.append(self.W*self.unorm(self.du))
if self.its == 1:
# print "max(|du|) ",max(numpy.absolute(self.du))
# print self.du[0]
# print self.du[-1]
self.betaK_0 = self.betaK_current
self.etaK_0 = self.etaK_current
if self.its == 2:
self.betaK_1 = self.betaK_current
self.etaK_1 = self.etaK_current
print("it = ",self.its)
print("beta(|Jinv|) ",self.betaK_current)
print("eta(|du|) ",self.etaK_current)
print("gamma(Lip J') ",self.gammaK_current)
print("gammaM(Lip J')",self.gammaK_max)
print("kappa(cond(J))",self.kappa_current)
if self.betaK_current*self.etaK_current*self.gammaK_current <= 0.5:
print("r ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_current)),(self.betaK_current*self.gammaK_current)))
if self.betaK_current*self.etaK_current*self.gammaK_max <= 0.5:
print("r_max ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_max)),(self.betaK_current*self.gammaK_max)))
print("lambda_max",max(self.linearSolver.eigenvalues_r))
print("lambda_i_max",max(self.linearSolver.eigenvalues_i))
print("norm_J",self.norm_2_J_current)
print("lambda_min",min(self.linearSolver.eigenvalues_r))
print("lambda_i_min",min(self.linearSolver.eigenvalues_i))
if self.lineSearch:
norm_r_cur = self.norm(r)
norm_r_last = 2.0*norm_r_cur
ls_its = 0
#print norm_r_cur,self.atol_r,self.rtol_r
# while ( (norm_r_cur >= 0.99 * self.norm_r + self.atol_r) and
# (ls_its < self.maxLSits) and
# norm_r_cur/norm_r_last < 1.0):
while ( (norm_r_cur >= 0.9999 * self.norm_r) and
(ls_its < self.maxLSits)):
self.convergingIts = 0
ls_its +=1
self.du *= 0.5
u += self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
if par_r is not None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
norm_r_last = norm_r_cur
norm_r_cur = self.norm(r)
logEvent("""ls #%d norm_r_cur=%s atol=%g rtol=%g""" % (ls_its,
norm_r_cur,
self.atol_r,
self.rtol_r))
if ls_its > 0:
logEvent("Linesearches = %i" % ls_its,level=3)
else:
if self.linearSolver.computeEigenvalues:
if self.betaK_0*self.etaK_0*self.gammaK_max <= 0.5:
print("r_{-,0} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_0*self.etaK_0*self.gammaK_max)),(self.betaK_0*self.gammaK_max)))
if self.betaK_1*self.etaK_1*self.gammaK_max <= 0.5 and self.its > 1:
print("r_{-,1} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_1*self.etaK_1*self.gammaK_max)),(self.betaK_1*self.gammaK_max)))
print("beta0*eta0*gamma ",self.betaK_0*self.etaK_0*self.gammaK_max)
if Viewers.viewerType == 'gnuplot':
max_r = max(1.0,max(self.linearSolver.eigenvalues_r))
max_i = max(1.0,max(self.linearSolver.eigenvalues_i))
for lambda_r,lambda_i in zip(self.linearSolver.eigenvalues_r,self.linearSolver.eigenvalues_i):
Viewers.datFile.write("%12.5e %12.5e \n" % (old_div(lambda_r,max_r),old_div(lambda_i,max_i)))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with points title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'scaled eigenvalues')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,r in zip(list(range(len(self.norm_r_hist))),self.norm_r_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,math.log(old_div(r,self.norm_r_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(r)/log(r0) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,du in zip(list(range(len(self.norm_du_hist))),self.norm_du_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,math.log(old_div(du,self.norm_du_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(du) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
logEvent(" Final Mom. norm(r) = %12.5e %12.5e" % (self.norm_mom_r,self.rtol_r*self.norm_mom_r0 + self.atol_r),level=1)
logEvent(" Final Cont. norm(r) = %12.5e %12.5e" % (self.norm_cont_r,self.rtol_r*self.norm_mom_r0 + self.atol_r),level=1)
logEvent(memory("NSNewton","NSNewton"),level=4)
class SSPRKNewton(Newton):
"""
Version of Newton for SSPRK so doesn't refactor unnecessarily
"""
def __init__(self,
linearSolver,
F,J=None,du=None,par_du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits = 100):
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
Newton.__init__(self,
linearSolver,
F,J,du,par_du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo,
fullNewton,
directSolver,
EWtol,
maxLSits)
self.isFactored = False
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
"""
from . import Viewers
if self.linearSolver.computeEigenvalues:
self.u0[:]=u
r=self.solveInitialize(u,r,b)
if par_u is not None:
#no overlap
#par_r.scatter_reverse_add()
#no overlap or overlap (until we compute norms over only owned dof)
par_r.scatter_forward_insert()
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(r) and
not self.failed()):
logEvent("SSPRKNewton it "+repr(self.its)+" norm(r) " + repr(self.norm_r),level=3)
if self.updateJacobian or self.fullNewton and not self.isFactored:
self.updateJacobian = False
self.F.getJacobian(self.J)
#print numpy.transpose(self.J)
if self.linearSolver.computeEigenvalues:
self.JLast[:]=self.J
self.J_t_J[:]=self.J
self.J_t_J *= numpy.transpose(self.J)
self.JLsolver.prepare()
self.JLsolver.calculateEigenvalues()
self.norm_2_J_current = sqrt(max(self.JLsolver.eigenvalues_r))
self.norm_2_Jinv_current = old_div(1.0,sqrt(min(self.JLsolver.eigenvalues_r)))
self.kappa_current = self.norm_2_J_current*self.norm_2_Jinv_current
self.betaK_current = self.norm_2_Jinv_current
self.linearSolver.prepare(b=r)
self.isFactored = True
self.du[:]=0.0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
#print self.du
u-=self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
#print "local r",r
if par_r is not None:
#no overlap
#par_r.scatter_reverse_add()
par_r.scatter_forward_insert()
#print "global r",r
if self.linearSolver.computeEigenvalues:
#approximate Lipschitz constant of J
self.F.getJacobian(self.dJ_t_dJ)
self.dJ_t_dJ-=self.JLast
self.dJ_t_dJ *= numpy.transpose(self.dJ_t_dJ)
self.dJLsolver.prepare()
self.dJLsolver.calculateEigenvalues()
self.norm_2_dJ_current = sqrt(max(self.dJLsolver.eigenvalues_r))
self.etaK_current = self.W*self.norm(self.du)
self.gammaK_current = old_div(self.norm_2_dJ_current,self.etaK_current)
self.gammaK_max = max(self.gammaK_current,self.gammaK_max)
self.norm_r_hist.append(self.W*self.norm(r))
self.norm_du_hist.append(self.W*self.norm(self.du))
if self.its == 1:
# print "max(|du|) ",max(numpy.absolute(self.du))
# print self.du[0]
# print self.du[-1]
self.betaK_0 = self.betaK_current
self.etaK_0 = self.etaK_current
if self.its == 2:
self.betaK_1 = self.betaK_current
self.etaK_1 = self.etaK_current
print("it = ",self.its)
print("beta(|Jinv|) ",self.betaK_current)
print("eta(|du|) ",self.etaK_current)
print("gamma(Lip J') ",self.gammaK_current)
print("gammaM(Lip J')",self.gammaK_max)
print("kappa(cond(J))",self.kappa_current)
if self.betaK_current*self.etaK_current*self.gammaK_current <= 0.5:
print("r ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_current)),(self.betaK_current*self.gammaK_current)))
if self.betaK_current*self.etaK_current*self.gammaK_max <= 0.5:
print("r_max ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_max)),(self.betaK_current*self.gammaK_max)))
print("lambda_max",max(self.linearSolver.eigenvalues_r))
print("lambda_i_max",max(self.linearSolver.eigenvalues_i))
print("norm_J",self.norm_2_J_current)
print("lambda_min",min(self.linearSolver.eigenvalues_r))
print("lambda_i_min",min(self.linearSolver.eigenvalues_i))
if self.lineSearch:
norm_r_cur = self.norm(r)
norm_r_last = 2.0*norm_r_cur
ls_its = 0
#print norm_r_cur,self.atol_r,self.rtol_r
while ( (norm_r_cur >= 0.99 * self.norm_r + self.atol_r) and
(ls_its < self.maxLSits) and
old_div(norm_r_cur,norm_r_last) < 1.0):
self.convergingIts = 0
ls_its +=1
self.du *= 0.5
u += self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
if par_r is not None:
#no overlap
#par_r.scatter_reverse_add()
par_r.scatter_forward_insert()
norm_r_last = norm_r_cur
norm_r_cur = self.norm(r)
print("""ls #%d norm_r_cur=%s atol=%g rtol=%g""" % (ls_its,
norm_r_cur,
self.atol_r,
self.rtol_r))
if ls_its > 0:
logEvent("Linesearches = %i" % ls_its,level=3)
else:
if self.linearSolver.computeEigenvalues:
if self.betaK_0*self.etaK_0*self.gammaK_max <= 0.5:
print("r_{-,0} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_0*self.etaK_0*self.gammaK_max)),(self.betaK_0*self.gammaK_max)))
if self.betaK_1*self.etaK_1*self.gammaK_max <= 0.5 and self.its > 1:
print("r_{-,1} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_1*self.etaK_1*self.gammaK_max)),(self.betaK_1*self.gammaK_max)))
print("beta0*eta0*gamma ",self.betaK_0*self.etaK_0*self.gammaK_max)
if Viewers.viewerType == 'gnuplot':
max_r = max(1.0,max(self.linearSolver.eigenvalues_r))
max_i = max(1.0,max(self.linearSolver.eigenvalues_i))
for lambda_r,lambda_i in zip(self.linearSolver.eigenvalues_r,self.linearSolver.eigenvalues_i):
Viewers.datFile.write("%12.5e %12.5e \n" % (old_div(lambda_r,max_r),old_div(lambda_i,max_i)))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with points title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'scaled eigenvalues')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,r in zip(list(range(len(self.norm_r_hist))),self.norm_r_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,log(old_div(r,self.norm_r_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(r)/log(r0) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,du in zip(list(range(len(self.norm_du_hist))),self.norm_du_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,log(old_div(du,self.norm_du_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(du) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
return self.failedFlag
def resetFactorization(self,needToRefactor=True):
self.isFactored = not needToRefactor
class PicardNewton(Newton):
def __init__(self,
linearSolver,
F,J=None,du=None,par_du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits = 100):
Newton.__init__(self,
linearSolver,
F,J,du,par_du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo,
fullNewton,
directSolver,
EWtol,
maxLSits)
self.picardIts = 1
self.picardTol = 1000.0
self.usePicard = True
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
"""
Solve F(u) = b
b -- right hand side
u -- solution
r -- F(u) - b
"""
from . import Viewers
if self.linearSolver.computeEigenvalues:
self.u0[:]=u
r=self.solveInitialize(u,r,b)
if par_u is not None:
#allow linear solver to know what type of assembly to use
self.linearSolver.par_fullOverlap = self.par_fullOverlap
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
#no overlap or overlap (until we compute norms over only owned dof)
par_r.scatter_forward_insert()
self.norm_r0 = self.norm(r)
self.norm_r_hist = []
self.norm_du_hist = []
self.gammaK_max=0.0
self.linearSolverFailed = False
while (not self.converged(r) and
not self.failed()):
if self.maxIts>1:
logEvent(" Newton it %d norm(r) = %12.5e %12.5g \t\t norm(r)/(rtol*norm(r0)+atol) = %g"
% (self.its-1,self.norm_r,100*(old_div(self.norm_r,self.norm_r0)),(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
if self.usePicard and (self.its < self.picardIts or old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)) > self.picardTol):
print("Picard iteration")
self.F.getJacobian(self.J,self.usePicard)
else:
self.F.getJacobian(self.J)
#mwf commented out print numpy.transpose(self.J)
if self.linearSolver.computeEigenvalues:
self.JLast[:]=self.J
self.J_t_J[:]=self.J
self.J_t_J *= numpy.transpose(self.J)
self.JLsolver.prepare()
self.JLsolver.calculateEigenvalues()
self.norm_2_J_current = sqrt(max(self.JLsolver.eigenvalues_r))
self.norm_2_Jinv_current = old_div(1.0,sqrt(min(self.JLsolver.eigenvalues_r)))
self.kappa_current = self.norm_2_J_current*self.norm_2_Jinv_current
self.betaK_current = self.norm_2_Jinv_current
self.linearSolver.prepare(b=r)
self.du[:]=0.0
if not self.directSolver:
if self.EWtol:
self.setLinearSolverTolerance(r)
if not self.linearSolverFailed:
self.linearSolver.solve(u=self.du,b=r,par_u=self.par_du,par_b=par_r)
self.linearSolverFailed = self.linearSolver.failed()
#print self.du
u-=self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
#print "local r",r
if par_r is not None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
#print "global r",r
if self.linearSolver.computeEigenvalues:
#approximate Lipschitz constant of J
self.F.getJacobian(self.dJ_t_dJ)
self.dJ_t_dJ-=self.JLast
self.dJ_t_dJ *= numpy.transpose(self.dJ_t_dJ)
self.dJLsolver.prepare()
self.dJLsolver.calculateEigenvalues()
self.norm_2_dJ_current = sqrt(max(self.dJLsolver.eigenvalues_r))
self.etaK_current = self.W*self.norm(self.du)
self.gammaK_current = old_div(self.norm_2_dJ_current,self.etaK_current)
self.gammaK_max = max(self.gammaK_current,self.gammaK_max)
self.norm_r_hist.append(self.W*self.norm(r))
self.norm_du_hist.append(self.W*self.unorm(self.du))
if self.its == 1:
# print "max(|du|) ",max(numpy.absolute(self.du))
# print self.du[0]
# print self.du[-1]
self.betaK_0 = self.betaK_current
self.etaK_0 = self.etaK_current
if self.its == 2:
self.betaK_1 = self.betaK_current
self.etaK_1 = self.etaK_current
print("it = ",self.its)
print("beta(|Jinv|) ",self.betaK_current)
print("eta(|du|) ",self.etaK_current)
print("gamma(Lip J') ",self.gammaK_current)
print("gammaM(Lip J')",self.gammaK_max)
print("kappa(cond(J))",self.kappa_current)
if self.betaK_current*self.etaK_current*self.gammaK_current <= 0.5:
print("r ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_current)),(self.betaK_current*self.gammaK_current)))
if self.betaK_current*self.etaK_current*self.gammaK_max <= 0.5:
print("r_max ",old_div((1.0+sqrt(1.0-2.0*self.betaK_current*self.etaK_current*self.gammaK_max)),(self.betaK_current*self.gammaK_max)))
print("lambda_max",max(self.linearSolver.eigenvalues_r))
print("lambda_i_max",max(self.linearSolver.eigenvalues_i))
print("norm_J",self.norm_2_J_current)
print("lambda_min",min(self.linearSolver.eigenvalues_r))
print("lambda_i_min",min(self.linearSolver.eigenvalues_i))
if self.lineSearch:
norm_r_cur = self.norm(r)
ls_its = 0
#print norm_r_cur,self.atol_r,self.rtol_r
# while ( (norm_r_cur >= 0.99 * self.norm_r + self.atol_r) and
# (ls_its < self.maxLSits) and
# norm_r_cur/norm_r_last < 1.0):
while ( (norm_r_cur >= 0.9999 * self.norm_r) and
(ls_its < self.maxLSits)):
self.convergingIts = 0
ls_its +=1
self.du *= 0.5
u += self.du
if par_u is not None:
par_u.scatter_forward_insert()
self.computeResidual(u,r,b)
#no overlap
if par_r is not None:
#no overlap
if not self.par_fullOverlap:
par_r.scatter_reverse_add()
else:
par_r.scatter_forward_insert()
norm_r_cur = self.norm(r)
logEvent("""ls #%d norm_r_cur=%s atol=%g rtol=%g""" % (ls_its,
norm_r_cur,
self.atol_r,
self.rtol_r))
if ls_its > 0:
logEvent("Linesearches = %i" % ls_its,level=3)
else:
if self.linearSolver.computeEigenvalues:
if self.betaK_0*self.etaK_0*self.gammaK_max <= 0.5:
print("r_{-,0} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_0*self.etaK_0*self.gammaK_max)),(self.betaK_0*self.gammaK_max)))
if self.betaK_1*self.etaK_1*self.gammaK_max <= 0.5 and self.its > 1:
print("r_{-,1} ",old_div((1.0+sqrt(1.0-2.0*self.betaK_1*self.etaK_1*self.gammaK_max)),(self.betaK_1*self.gammaK_max)))
print("beta0*eta0*gamma ",self.betaK_0*self.etaK_0*self.gammaK_max)
if Viewers.viewerType == 'gnuplot':
max_r = max(1.0,max(self.linearSolver.eigenvalues_r))
max_i = max(1.0,max(self.linearSolver.eigenvalues_i))
for lambda_r,lambda_i in zip(self.linearSolver.eigenvalues_r,self.linearSolver.eigenvalues_i):
Viewers.datFile.write("%12.5e %12.5e \n" % (old_div(lambda_r,max_r),old_div(lambda_i,max_i)))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with points title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'scaled eigenvalues')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,r in zip(list(range(len(self.norm_r_hist))),self.norm_r_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,math.log(old_div(r,self.norm_r_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(r)/log(r0) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
for it,du in zip(list(range(len(self.norm_du_hist))),self.norm_du_hist):
Viewers.datFile.write("%12.5e %12.5e \n" % (it,math.log(old_div(du,self.norm_du_hist[0]))))
Viewers.datFile.write("\n \n")
cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
'log(du) history')
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
if self.maxIts>1:
logEvent(" Newton it %d norm(r) = %12.5e %12.5g \t\t norm(r)/(rtol*norm(r0)+atol) = %g"
% (self.its-1,self.norm_r,100*(old_div(self.norm_r,self.norm_r0)),(old_div(self.norm_r,(self.rtol_r*self.norm_r0+self.atol_r)))),level=1)
return self.failedFlag
class NLJacobi(NonlinearSolver):
"""
Nonlinear Jacobi iteration.
"""
def __init__(self,
F,J,du,
weight=old_div(4.0,5.0),
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True):
NonlinearSolver.__init__(self,F,J,du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.linearSolver = LinearSolver(J)#dummy
self.updateJacobian=True
self.fullNewton=fullNewton
self.M=Vec(self.F.dim)
self.w=weight
self.node_order=numpy.arange(self.F.dim,dtype='i')
self.node_order=numpy.arange(self.F.dim-1,-1,-1,dtype='i')
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
r=self.solveInitialize(u,r,b)
while (not self.converged(r) and
not self.failed()):
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
if type(self.J).__name__ == 'ndarray':
self.M = old_div(self.w,numpy.diagonal(self.J))
elif type(self.J).__name__ == 'SparseMatrix':
csmoothers.jacobi_NR_prepare(self.J,self.w,1.0e-16,self.M)
if type(self.J).__name__ == 'ndarray':
self.du[:]=r
self.du*=self.M
elif type(self.J).__name__ == "SparseMatrix":
csmoothers.jacobi_NR_solve(self.J,self.M,r,self.node_order,self.du)
u -= self.du
self.computeResidual(u,r,b)
else:
return self.failedFlag
class NLGaussSeidel(NonlinearSolver):
"""
Nonlinear Gauss-Seidel.
"""
def __init__(self,
connectionList,
F,J,du,
weight=1.0,
sym=False,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True):
NonlinearSolver.__init__(self,F,J,du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.linearSolver = LinearSolver(J)#dummy
self.updateJacobian=True
self.fullNewton=fullNewton
self.w=weight
self.connectionList=connectionList
self.M=Vec(self.F.dim)
self.node_order=numpy.arange(self.F.dim,dtype='i')
self.sym = sym
self.node_order=numpy.arange(self.F.dim,dtype='i')
self.node_order=numpy.arange(self.F.dim-1,-1,-1,dtype='i')
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
r=self.solveInitialize(u,r,b)
while (not self.converged(r) and
not self.failed()):
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
if type(self.J).__name__ == 'ndarray':
self.M = old_div(self.w,numpy.diagonal(self.J))
elif type(self.J).__name__ == 'SparseMatrix':
dtol = min(numpy.absolute(r))*1.0e-8
csmoothers.gauss_seidel_NR_prepare(self.J,self.w,dtol,self.M)
if type(self.J).__name__ == 'ndarray':
self.du[:]=0.0
for i in range(self.F.dim):
rhat = r[i]
for j in self.connectionList[i]:
rhat -= self.J[j,i]*self.du[j]
self.du[i] = self.M[i]*rhat
if self.sym == True:
u-= self.du
self.computeResidual(u,r,b)
self.du[:]=0.0
for i in range(self.n-1,-1,-1):
rhat = self.r[i]
for j in self.connectionList[i]:
rhat -= self.J[i,j]*self.du[j]
self.du[i] = self.M[i]*rhat
elif type(self.J).__name__ == "SparseMatrix":
csmoothers.gauss_seidel_NR_solve(self.J,self.M,r,self.node_order,self.du)
u -= self.du
self.computeResidual(u,r,b)
else:
return self.failedFlag
class NLStarILU(NonlinearSolver):
"""
Nonlinear alternating Schwarz on node stars.
"""
def __init__(self,
connectionList,
F,J,du,
weight=1.0,
sym=False,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True,
fullNewton=True):
NonlinearSolver.__init__(self,
F,J,du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.linearSolver = LinearSolver(J)#dummy
self.updateJacobian=True
self.fullNewton=fullNewton
self.w=weight
self.sym=sym
if type(self.J).__name__ == 'ndarray':
self.connectionList=connectionList
self.subdomainIndecesList=[]
self.subdomainSizeList=[]
self.subdomainJ=[]
self.subdomainR=[]
self.subdomainDU=[]
self.subdomainSolvers=[]
self.globalToSubdomain=[]
for i in range(self.F.dim):
self.subdomainIndecesList.append([])
connectionList[i].sort()
self.globalToSubdomain.append(dict([(j,J+1) for J,j in
enumerate(connectionList[i])]))
self.globalToSubdomain[i][i]=0
nSubdomain = len(connectionList[i])+1
self.subdomainR.append(Vec(nSubdomain))
self.subdomainDU.append(Vec(nSubdomain))
self.subdomainSizeList.append(len(connectionList[i]))
self.subdomainJ.append(Mat(nSubdomain,nSubdomain))
for J,j in enumerate(connectionList[i]):
self.subdomainIndecesList[i].append(set(connectionList[i]) &
set(connectionList[j]))
self.subdomainIndecesList[i][J].update([i,j])
elif type(self.J).__name__ == 'SparseMatrix':
self.node_order=numpy.arange(self.F.dim-1,-1,-1,dtype='i')
self.asmFactorObject = self.csmoothers.ASMFactor(self.J)
def prepareSubdomains(self):
if type(self.J).__name__ == 'ndarray':
self.subdomainSolvers=[]
for i in range(self.F.dim):
self.subdomainJ[i][0,0] = self.J[i,i]
for J,j in enumerate(self.connectionList[i]):
#first do row 0 (star center)
self.subdomainJ[i][J+1,0] = self.J[j,i]
#now do boundary rows
for k in self.subdomainIndecesList[i][J]:
K = self.globalToSubdomain[i][k]
self.subdomainJ[i][K,J+1]=self.J[k,j]
self.subdomainSolvers.append(LU(self.subdomainJ[i]))
self.subdomainSolvers[i].prepare()
elif type(self.J).__name__ == 'SparseMatrix':
self.csmoothers.asm_NR_prepare(self.J,self.asmFactorObject)
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
r=self.solveInitialize(u,r,b)
while (not self.converged(r) and
not self.failed()):
if self.updateJacobian or self.fullNewton:
self.updateJacobian = False
self.F.getJacobian(self.J)
self.prepareSubdomains()
self.du[:]=0.0
if type(self.J).__name__ == 'ndarray':
for i in range(self.F.dim):
#load subdomain residual
self.subdomainR[i][0] = r[i] - self.J[i,i]*self.du[i]
for j in self.connectionList[i]:
self.subdomainR[i][0] -= self.J[j,i]*self.du[j]
for J,j in enumerate(self.connectionList[i]):
self.subdomainR[i][J+1]=r[j] - self.J[j,j]*self.du[j]
for k in self.connectionList[j]:
self.subdomainR[i][J+1] -= self.J[k,j]*self.du[k]
#solve
self.subdomainSolvers[i].solve(u=self.subdomainDU[i],
b=self.subdomainR[i])
#update du
self.subdomainDU[i]*=self.w
self.du[i]+=self.subdomainDU[i][0]
for J,j in enumerate(self.connectionList[i]):
self.du[j] += self.subdomainDU[i][J+1]
elif type(self.J).__name__ == 'SparseMatrix':
self.csmoothers.asm_NR_solve(self.J,self.w,self.asmFactorObject,self.node_order,r,self.du)
u -= self.du
self.computeResidual(u,r,b)
else:
return self.failedFlag
class FasTwoLevel(NonlinearSolver):
"""
A generic nonlinear two-level solver based on the full approximation scheme. (FAS).
"""
def __init__(self,
prolong,
restrict,
restrictSum,
coarseF,
preSmoother,
postSmoother,
coarseSolver,
F,J=None,du=None,
rtol_r = 1.0e-4,
atol_r = 1.0e-16,
rtol_du = 1.0e-4,
atol_du = 1.0e-16,
maxIts = 100,
norm = l2Norm,
convergenceTest = 'r',
computeRates = True,
printInfo = True):
NonlinearSolver.__init__(self,F,J,du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
class dummyL(object):
def __init__(self):
self.shape = [F.dim,F.dim]
self.linearSolver = LinearSolver(dummyL())#dummy
self.prolong = prolong
self.restrict = restrict
self.restrictSum = restrictSum
self.cF = coarseF
self.preSmoother = preSmoother
self.postSmoother = postSmoother
self.coarseSolver = coarseSolver
self.cb = Vec(prolong.shape[1])
self.cr = Vec(prolong.shape[1])
self.crFAS = Vec(prolong.shape[1])
self.cdu = Vec(prolong.shape[1])
self.cu = Vec(prolong.shape[1])
self.cError = 'FAS-chord'#'linear'#'FAS'#'FAS-chord'#'linear'
def fullNewtonOff(self):
self.preSmoother.fullNewtonOff()
self.postSmoother.fullNewtonOff()
self.coarseSolver.fullNewtonOff()
def fullNewtonOn(self):
self.preSmoother.fullNewtonOn()
self.postSmoother.fullNewtonOn()
self.coarseSolver.fullNewtonOn()
def fullResidualOff(self):
self.preSmoother.fullResidualOff()
self.postSmoother.fullResidualOff()
self.coarseSolver.fullResidualOff()
def fullResidualOn(self):
self.preSmoother.fullResidualOn()
self.postSmoother.fullResidualOn()
self.coarseSolver.fullResidualOn()
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
r=self.solveInitialize(u,r,b)
while (not self.converged(r) and
not self.failed()):
self.preSmoother.solve(u,r,b)
self.restrict.matvec(r,self.cb)
if self.cError == 'linear':
self.coarseSolver.fullResidualOff()
self.coarseSolver.fullNewtonOff()
lsSave = self.coarseSolver.lineSearch
self.coarseSolver.lineSearch = False
self.coarseSolver.solve(u=self.cdu,r=self.crFAS,b=self.cb)
self.coarseSolver.lineSearch = lsSave
self.coarseSolver.fullNewtonOn()
self.coarseSolver.fullResidualOn()
elif self.cError == 'FAS-chord':
self.crFAS[:]= -self.cb
self.restrict.matvec(u,self.cu)
#change restriction to
#be weighted average for u
for i in range(self.cu.shape[0]):
self.cu[i]/=self.restrictSum[i]
self.cF.getResidual(self.cu,self.cr)
self.cb+=self.cr
self.cdu[:]=self.cu
self.coarseSolver.fullNewtonOff()
self.coarseSolver.solve(u=self.cdu,r=self.crFAS,b=self.cb)
self.coarseSolver.fullNewtonOn()
self.cdu-=self.cu
else:
self.crFAS[:]= -self.cb
self.restrict.matvec(u,self.cu)
#change restriction to
#be weighted average for u
for i in range(self.cu.shape[0]):
self.cu[i]/=self.restrictSum[i]
self.cF.getResidual(self.cu,self.cr)
self.cb+=self.cr
self.cdu[:]=self.cu
self.coarseSolver.solve(u=self.cdu,r=self.crFAS,b=self.cb)
self.cdu-=self.cu
self.prolong.matvec(self.cdu,self.du)
u-=self.du
self.postSmoother.solve(u,r,b)
else:
return self.failedFlag
class FAS(object):
"""
A generic nonlinear multigrid W cycle using the Full Approximation Scheme (FAS).
"""
def __init__(self,
prolongList,
restrictList,
restrictSumList,
FList,
preSmootherList,
postSmootherList,
coarseSolver,
mgItsList=[],
printInfo=True):
self.solverList=[coarseSolver]
for i in range(1,len(FList)):
if mgItsList ==[]:
mgItsList.append(1)
self.solverList.append(
FasTwoLevel(prolong = prolongList[i],
restrict = restrictList[i],
restrictSum = restrictSumList[i],
coarseF = FList[i-1],
preSmoother = preSmootherList[i],
postSmoother = postSmootherList[i],
coarseSolver = self.solverList[i-1],
F = FList[i],
maxIts = mgItsList[i],
convergenceTest = 'its',
printInfo=False))
self.fasSolver = self.solverList[len(FList)-1]
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
self.fasSolver.solve(u,r,b)
return self.fasSolver.failedFlag
class MultilevelNonlinearSolver(object):
"""
A generic multilevel solver.
"""
def __init__(self,
fList,
levelNonlinearSolverList,
computeRates=False,
printInfo=False):
self.fList = fList
self.solverList=levelNonlinearSolverList
self.nLevels = len(self.solverList)
for l in range(self.nLevels):
self.solverList[l].computeRates = computeRates
self.solverList[l].printInfo = printInfo
def solveMultilevel(self,uList,rList,bList=None,par_uList=None,par_rList=None):
if bList is None:
bList = [None for r in rList]
for l in range(self.nLevels):
if par_uList is not None and len(par_uList) > 0:
par_u=par_uList[l]
par_r=par_rList[l]
else:
par_u=None
par_r=None
logEvent(" NumericalAnalytics Newton iteration for level " + repr(l), level = 7)
self.solverList[l].solve(u = uList[l],
r = rList[l],
b = bList[l],
par_u = par_u,
par_r = par_r)
return self.solverList[-1].failedFlag
def updateJacobian(self):
for l in range(self.nLevels):
self.solverList[l].updateJacobian = True
def info(self):
self.infoString="********************Start Multilevel Nonlinear Solver Info*********************\n"
for l in range(self.nLevels):
self.infoString += "**************Start Level %i Info********************\n" % l
self.infoString += self.solverList[l].info()
self.infoString += "**************End Level %i Info********************\n" % l
self.infoString+="********************End Multilevel Nonlinear Solver Info*********************\n"
return self.infoString
class NLNI(MultilevelNonlinearSolver):
"""
Nonlinear nested iteration.
"""
def __init__(self,
fList=[],
solverList=[],
prolongList=[],
restrictList=[],
restrictSumList=[],
maxIts = None,
tolList=None,
atol=None,
computeRates=True,
printInfo=True):
MultilevelNonlinearSolver.__init__(self,fList,solverList,computeRates,printInfo)
self.prolongList = prolongList
self.restrictList = restrictList
self.restrictSumList = restrictSumList
self.fineLevel = self.nLevels - 1
self.levelDict={}
self.uList=[]
self.rList=[]
self.bList=[]
for l in range(self.fineLevel+1):
n = solverList[l].F.dim
self.levelDict[n] = l
self.uList.append(Vec(n))
self.rList.append(Vec(n))
self.bList.append(Vec(n))
self.levelDict[solverList[-1].F.dim]=self.fineLevel
self.maxIts = maxIts
self.tolList = tolList
self.atol = atol
self.printInfo = printInfo
self.infoString=''
def solve(self,u,r=None,b=None,par_u=None,par_r=None):
#\todo this is broken because prolong isn't right
currentMesh = self.levelDict[u.shape[0]]
if currentMesh > 0:
self.uList[currentMesh][:] = u
self.rList[currentMesh][:] = r
self.bList[currentMesh][:] = b
for l in range(currentMesh,1,-1):
self.restrictList[l].matvec(self.uList[l],self.uList[l-1])
if b is not None:
self.restrictList[l].matvec(self.bList[l],self.bList[l-1])
for i in range(self.uList[l-1].shape[0]):
self.uList[l-1][i]/=self.restrictSumList[l][i]
for l in range(currentMesh):
if self.tolList is not None:
self.switchToResidualConvergence(self.solverList[l],
self.tolList[l])
self.solverList[l].solve(u=self.uList[l],r=self.rList[l],b=self.bList[l],par_u=self.par_uList[l],par_r=self.par_rList[l])
if self.tolList is not None:
self.revertToFixedIteration(self.solverList[l])
if l < currentMesh -1:
self.prolongList[l+1].matvec(self.uList[l],self.uList[l+1])
else:
self.prolongList[l+1].matvec(self.uList[l],u)
if self.tolList is not None:
self.switchToResidualConvergence(self.solverList[currentMesh],
self.tolList[currentMesh])
self.solverList[currentMesh].solve(u,r,b)
if self.tolList is not None:
self.revertToFixedIteration(self.solverList[currentMesh])
return self.solverList[currentMesh].failedFlag
def solveMultilevel(self,uList,rList,bList=None,par_uList=None,par_rList=None):
if bList is None:
bList = [None for r in rList]
self.infoString="********************Start Multilevel Nonlinear Solver Info*********************\n"
for l in range(self.fineLevel):
if self.tolList is not None:
self.switchToResidualConvergence(self.solverList[l],self.tolList[l])
self.solverList[l].solve(u=uList[l],r=rList[l],b=bList[l],par_u=par_uList[l],par_r=par_rList[l])
self.infoString+="****************Start Level %i Info******************\n" %l
self.infoString+=self.solverList[l].info()
self.infoString+="****************End Level %i Info******************\n" %l
if self.tolList is not None:
self.revertToFixedIteration(self.solverList[l])
#\todo see if there's a better way to do this
#copy user u,r into internal
#because fas will over write u and r with ones
#that aren't solutions and we need to save
#the true solutions on this level
self.uList[l][:]=uList[l]
self.rList[l][:]=rList[l]
for ci,p in self.prolongList.items():
p[l+1].matvec(self.solverList[l].F.u[ci].dof,self.solverList[l+1].F.u[ci].dof)
self.solverList[l+1].F.setFreeDOF(uList[l+1])
if self.tolList is not None:
self.switchToResidualConvergence(self.solverList[self.fineLevel],self.tolList[self.fineLevel])
self.solverList[self.fineLevel].solve(u=uList[self.fineLevel],
r=rList[self.fineLevel],
b=bList[self.fineLevel],
par_u=par_uList[self.fineLevel],
par_r=par_rList[self.fineLevel])
self.infoString+="****************Start Level %i Info******************\n" %self.fineLevel
self.infoString+=self.solverList[self.fineLevel].info()
self.infoString+="****************End Level %i Info******************\n" %self.fineLevel
if self.tolList is not None:
self.revertToFixedIteration(self.solverList[self.fineLevel])
#reset u and r on other levels:
for l in range(self.fineLevel):
self.fList[l].setUnknowns(self.uList[l])
rList[l][:]=self.rList[l]
self.infoString+="********************End Multilevel Nonlinear Solver Info*********************\n"
return self.solverList[-1].failedFlag
def info(self):
return self.infoString
def switchToResidualConvergence(self,solver,rtol):
self.saved_ctest = solver.convergenceTest
self.saved_rtol_r = solver.rtol_r
self.saved_atol_r = solver.atol_r
self.saved_maxIts = solver.maxIts
self.saved_printInfo = solver.printInfo
solver.convergenceTest = 'r'
solver.rtol_r = rtol
solver.atol_r = self.atol
solver.maxIts = self.maxIts
solver.printInfo = self.printInfo
def revertToFixedIteration(self,solver):
solver.convergenceTest = self.saved_ctest
solver.rtol_r = self.saved_rtol_r
solver.atol_r = self.saved_atol_r
solver.maxIts = self.saved_maxIts
solver.printInfo = self.saved_printInfo
def multilevelNonlinearSolverChooser(nonlinearOperatorList,
jacobianList,
par_jacobianList,
duList=None,
par_duList=None,
relativeToleranceList=None,
absoluteTolerance=1.0e-8,
multilevelNonlinearSolverType = NLNI,#Newton,NLJacobi,NLGaussSeidel,NLStarILU,
computeSolverRates=False,
printSolverInfo=False,
linearSolverList=None,
linearDirectSolverFlag=False,
solverFullNewtonFlag=True,
maxSolverIts=500,
solverConvergenceTest='r',
levelNonlinearSolverType=Newton,#NLJacobi,NLGaussSeidel,NLStarILU
levelSolverFullNewtonFlag=True,
levelSolverConvergenceTest='r',
computeLevelSolverRates=False,
printLevelSolverInfo=False,
relaxationFactor=None,
connectionListList=None,
smootherType = 'Jacobi',
prolong_bcList = None,
restrict_bcList = None,
restrict_bcSumList = None,
prolongList = None,
restrictList = None,
restrictionRowSumList = None,
preSmooths=3,
postSmooths=3,
cycles=3,
smootherConvergenceTest='its',
computeSmootherRates=False,
printSmootherInfo=False,
smootherFullNewtonFlag=True,
computeCoarseSolverRates=False,
printCoarseSolverInfo=False,
EWtol=True,
maxLSits=100,
parallelUsesFullOverlap = True,
nonlinearSolverNorm = l2Norm):
if (levelNonlinearSolverType == TwoStageNewton):
levelNonlinearSolverType = TwoStageNewton
elif (levelNonlinearSolverType == ExplicitLumpedMassMatrixShallowWaterEquationsSolver):
levelNonlinearSolverType = ExplicitLumpedMassMatrixShallowWaterEquationsSolver
elif (levelNonlinearSolverType == ExplicitConsistentMassMatrixShallowWaterEquationsSolver):
levelNonlinearSolverType = ExplicitConsistentMassMatrixShallowWaterEquationsSolver
elif (levelNonlinearSolverType == ExplicitLumpedMassMatrix):
levelNonlinearSolverType = ExplicitLumpedMassMatrix
elif (levelNonlinearSolverType == ExplicitConsistentMassMatrixWithRedistancing):
levelNonlinearSolverType = ExplicitConsistentMassMatrixWithRedistancing
elif (levelNonlinearSolverType == ExplicitConsistentMassMatrixForVOF):
levelNonlinearSolverType = ExplicitConsistentMassMatrixForVOF
elif (levelNonlinearSolverType == NewtonWithL2ProjectionForMassCorrection):
levelNonlinearSolverType = NewtonWithL2ProjectionForMassCorrection
elif (levelNonlinearSolverType == CLSVOFNewton):
levelNonlinearSolverType = CLSVOFNewton
elif (multilevelNonlinearSolverType == Newton or
multilevelNonlinearSolverType == NLJacobi or
multilevelNonlinearSolverType == NLGaussSeidel or
multilevelNonlinearSolverType == NLStarILU):
levelNonlinearSolverType = multilevelNonlinearSolverType
nLevels = len(nonlinearOperatorList)
multilevelNonlinearSolver=None
levelNonlinearSolverList=[]
if levelNonlinearSolverType == FAS:
preSmootherList=[]
postSmootherList=[]
mgItsList=[]
for l in range(nLevels):
mgItsList.append(cycles)
if l > 0:
if smootherType == NLJacobi:
if relaxationFactor is None:
relaxationFactor = old_div(2.0,5.0)#4.0/5.0
preSmootherList.append(NLJacobi(F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=preSmooths,
convergenceTest=smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag))
postSmootherList.append(NLJacobi(F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=postSmooths,
convergenceTest=smootherFullNewtonFlag,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag))
elif smootherType == NLGaussSeidel:
if relaxationFactor is None:
relaxationFactor = old_div(3.0,5.0)
preSmootherList.append(NLGaussSeidel(connectionList = connectionListList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=preSmooths,
convergenceTest=smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag))
postSmootherList.append(NLGaussSeidel(connectionList = connectionListList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=postSmooths,
convergenceTest=smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag))
elif smootherType == NLStarILU:
if relaxationFactor is None:
relaxationFactor = old_div(2.0,5.0)
preSmootherList.append(NLStarILU(connectionList = connectionListList[l],
F = nonlinearOperatorList[l],
J = jacobianList[l],
du=duList[l],
weight = relaxationFactor,
maxIts = preSmooths,
convergenceTest = smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo = printSmootherInfo,
fullNewton = smootherFullNewtonFlag))
postSmootherList.append(NLStarILU(connectionList = connectionListList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=postSmooths,
convergenceTest=smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag))
else:
raise RuntimeError("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!smootherType unrecognized")
else:
if smootherType == NLJacobi:
if relaxationFactor is None:
relaxationFactor = old_div(4.0,5.0)
coarseSolver = NLJacobi(F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=postSmooths,
convergenceTest=smootherFullNewtonFlag,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag,
norm = nonlinearSolverNorm)
elif smootherType == NLGaussSeidel:
if relaxationFactor is None:
relaxationFactor = old_div(3.0,5.0)
coarseSolver = NLGaussSeidel(connectionList = connectionListList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
weight=relaxationFactor,
maxIts=postSmooths,
convergenceTest=smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo=printSmootherInfo,
fullNewton=smootherFullNewtonFlag,
norm = nonlinearSolverNorm)
elif smootherType == NLStarILU:
if relaxationFactor is None:
relaxationFactor = old_div(2.0,5.0)
coarseSolver = NLStarILU(connectionList = connectionListList[l],
F = nonlinearOperatorList[l],
J = jacobianList[l],
du=duList[l],
weight = relaxationFactor,
maxIts = preSmooths,
convergenceTest = smootherConvergenceTest,
computeRates = computeSmootherRates,
printInfo = printSmootherInfo,
fullNewton = smootherFullNewtonFlag,
norm = nonlinearSolverNorm)
else:
raise RuntimeError("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!smootherType unrecognized")
preSmootherList.append([])
postSmootherList.append([])
levelNonlinearSolver = FAS(prolongList = prolongList,
restrictList = restrictList,
restrictSumList = restrictionRowSumList,
FList = nonlinearOperatorList,
preSmootherList = preSmootherList,
postSmootherList = postSmootherList,
coarseSolver = coarseSolver,
mgItsList = mgItsList,
printInfo=printLevelSolverInfo)
levelNonlinearSolverList = levelNonlinearSolver.solverList
elif levelNonlinearSolverType == Newton:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(Newton(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits ))
elif levelNonlinearSolverType in [POD_Newton,POD_DEIM_Newton]:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(levelNonlinearSolverType(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits ))
elif levelNonlinearSolverType == NewtonNS:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(NewtonNS(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits ))
elif levelNonlinearSolverType == NLJacobi:
if relaxationFactor is None:
relaxationFactor = old_div(4.0,5.0)
for l in range(nLevels):
levelNonlinearSolverList.append(NLJacobi(F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
norm = nonlinearSolverNorm,
maxIts=maxSolverIts,
convergenceTest = levelSolverConvergenceTest,
weight=relaxationFactor,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag))
elif levelNonlinearSolverType == NLGaussSeidel:
if relaxationFactor is None:
relaxationFactor = old_div(4.0,5.0)
for l in range(nLevels):
levelNonlinearSolverList.append(NLGaussSeidel(F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
connectionList = connectionListList[l],
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
convergenceTest = levelSolverConvergenceTest,
weight=relaxationFactor,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag))
elif levelNonlinearSolverType == NLStarILU:
if relaxationFactor is None:
relaxationFactor = old_div(3.0,5.0)
for l in range(nLevels):
levelNonlinearSolverList.append(NLStarILU(F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
connectionList = connectionListList[l],
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
weight=relaxationFactor,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag))
elif levelNonlinearSolverType == SSPRKNewton:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(SSPRKNewton(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits ))
elif levelNonlinearSolverType == AddedMassNewton:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(AddedMassNewton(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits ))
elif levelNonlinearSolverType == MoveMeshMonitorNewton:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(MoveMeshMonitorNewton(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits))
else:
try:
for l in range(nLevels):
if par_duList is not None and len(par_duList) > 0:
par_du=par_duList[l]
else:
par_du=None
levelNonlinearSolverList.append(levelNonlinearSolverType(linearSolver=linearSolverList[l],
F=nonlinearOperatorList[l],
J=jacobianList[l],
du=duList[l],
par_du=par_du,
rtol_r=relativeToleranceList[l],
atol_r=absoluteTolerance,
maxIts=maxSolverIts,
norm = nonlinearSolverNorm,
convergenceTest = levelSolverConvergenceTest,
computeRates = computeLevelSolverRates,
printInfo=printLevelSolverInfo,
fullNewton=levelSolverFullNewtonFlag,
directSolver=linearDirectSolverFlag,
EWtol=EWtol,
maxLSits=maxLSits ))
except:
raise RuntimeError("Unknown level nonlinear solver "+ levelNonlinearSolverType)
if multilevelNonlinearSolverType == NLNI:
multilevelNonlinearSolver = NLNI(fList = nonlinearOperatorList,
solverList = levelNonlinearSolverList,
prolongList = prolong_bcList,
restrictList = restrict_bcList,
restrictSumList = restrict_bcSumList,
maxIts = maxSolverIts,
tolList = relativeToleranceList,
atol=absoluteTolerance,
computeRates = computeSolverRates,
printInfo=printSolverInfo)
elif (multilevelNonlinearSolverType == Newton or
multilevelNonlinearSolverType == AddedMassNewton or
multilevelNonlinearSolverType == MoveMeshMonitorNewton or
multilevelNonlinearSolverType == POD_Newton or
multilevelNonlinearSolverType == POD_DEIM_Newton or
multilevelNonlinearSolverType == NewtonNS or
multilevelNonlinearSolverType == NLJacobi or
multilevelNonlinearSolverType == NLGaussSeidel or
multilevelNonlinearSolverType == NLStarILU):
multilevelNonlinearSolver = MultilevelNonlinearSolver(nonlinearOperatorList,
levelNonlinearSolverList,
computeRates = computeSolverRates,
printInfo = printSolverInfo)
else:
raise RuntimeError("!!!!!!!!!!!!!!!!!!!!!!!!!Unknown multilevelNonlinearSolverType " + multilevelNonlinearSolverType)
#add minimal configuration for parallel?
for levelSolver in multilevelNonlinearSolver.solverList:
levelSolver.par_fullOverlap = parallelUsesFullOverlap
return multilevelNonlinearSolver
| mit | b364070158a8a7e0f44b7eda304a69c2 | 48.474656 | 173 | 0.480832 | 3.976376 | false | false | false | false |
erdc/proteus | scripts/UTEPblock.py | 1 | 1592 | #!/usr/bin/env python
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy
Lx=1.0; Ly=1.0;
nx=128; ny=128;
#nx=16; ny=16;
dx=old_div(Lx,nx); dy = old_div(Ly,ny)
vertices = []
for j in range(ny+1):
for i in range(nx+1):
vertices.append((0.0+dx*i,0.0+dy*j))
#
#
nVertices = len(vertices)
assert nVertices == (nx+1)*(ny+1)
base = 1
polyfile = open('UTEPexample.poly','w')
polyfile.write('#poly file for UTEP example 128 x 128 \n')
polyfile.write('%d %d %d %d \n' % (nVertices,2,1,0))
polyfile.write('#vertices \n')
for iv in range(len(vertices)):
polyfile.write('%d %12.5e %12.5e %d \n' % (iv+base,vertices[iv][0],vertices[iv][1],1))
#
#write a segment for each edge
nSegments = ny*(nx+1) + nx*(ny+1)
polyfile.write('%d %d \n' % (nSegments,1))
polyfile.write('#segments \n')
curSeg = 0
for j in range(ny+1): #horizontal edges first
for i in range(nx):
vl = i + j*(ny+1); vr = i+1 + j*(ny+1)
polyfile.write('%d %d %d %d \n ' % (curSeg+base,vl+base,vr+base,1))
curSeg += 1
#
#
for i in range(nx+1): #vertical edges
for j in range(ny):
vb = i + j*(ny+1); vt = i + (j+1)*(ny+1)
polyfile.write('%d %d %d %d \n ' % (curSeg+base,vb+base,vt+base,1))
curSeg += 1
#
#
polyfile.write('#holes\n 0\n')
polyfile.write('#regions\n')
nRegions = nx*ny
polyfile.write('%d \n' % nRegions)
curRegion = 0
for j in range(ny):
for i in range(nx):
polyfile.write('%d %12.5e %12.5e %d \n' % (curRegion+base,0.+(i+0.5)*dx,0.+(j+0.5)*dy,curRegion))
curRegion +=1
#
| mit | 2e4b7dfc9515bfac0ff446079786fc8a | 26.929825 | 105 | 0.593593 | 2.441718 | false | false | false | false |
erdc/proteus | proteus/tests/POD/POD.py | 1 | 4974 | #!/usr/bin/env python
"""
Proper orthogonal decomposition for the heat equation solver
"""
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
from heat_init import *
from read_hdf5 import *
#we construct this NS object again ONLY to have access to the load vector at each time step
physics.name = "heat_3d_reduction"
so.name = physics.name
ns = NumericalSolution.NS_base(so,[physics],[numerics],so.sList,opts)
save_projected_soln = True #do we save the projection of the reduced order solution on the fine grid in xmf?
#doing reduction in the remaining part of the code
import petsc4py
from petsc4py import PETSc
from petsc4py.PETSc import Mat
import numpy as np
DB = 4 #the number of basis vectors! Increase or decrease as you wish...
U = np.loadtxt('SVD_basis')
U = U[:,0:DB]
U_transpose = U.conj().T
#load mass matrix and reduce it
rp = np.loadtxt('iam',int)
rp = rp.astype(petsc4py.PETSc.IntType)
ci = np.loadtxt('jam',int)
ci = ci.astype(petsc4py.PETSc.IntType)
nz = np.loadtxt('am')
nr = rp.shape[0] - 1
nc = nr
M = Mat().createAIJ(size=(nr,nc),csr=(rp,ci,nz))
M_intermediate = np.zeros((nr,DB),'d')
for i in range(0,nr):
res = M.getRow(i)
M_intermediate[i,:] = np.dot(res[1],U[res[0],:])
M_pod = np.dot(U_transpose, M_intermediate)
np.savetxt('M_pod', M_pod, delimiter=' ')
#load stiffness matrix and reduce it
rp = np.loadtxt('ias',int)
rp = rp.astype(petsc4py.PETSc.IntType)
ci = np.loadtxt('jas',int)
ci = ci.astype(petsc4py.PETSc.IntType)
nz = np.loadtxt('as')
S = Mat().createAIJ(size=(nr,nc),csr=(rp,ci,nz))
S_intermediate = np.zeros((nr,DB),'d')
for i in range(0,nr):
res = S.getRow(i)
S_intermediate[i,:] = np.dot(res[1],U[res[0],:])
S_pod = np.dot(U_transpose, S_intermediate)
np.savetxt('S_pod', M_pod, delimiter=' ')
del M_intermediate
del S_intermediate
K = M_pod + DT*S_pod #generalized mass matrix
K = np.linalg.inv(K) #its inverse
archive = Archiver.XdmfArchive(".","heat_3d",readOnly=True)
#reading initial condition
u = read_from_hdf5(archive.hdfFile,'/u0')
u_pod = np.dot(U_transpose,u)
f = np.zeros((nr,),'d') #rhs vector
Model = ns.modelList[0].levelModelList[-1]
ns.tCount=0
if save_projected_soln:
Model.u[0].dof[:] = u
for index,model in enumerate(ns.modelList):
ns.archiveInitialSolution(model,index)
import time
start = time.time()
#Backward Euler time loop
for i in range(1,nDTout+1):
t = i*DT
Model.timeIntegration.t = t
Model.getLoadVector(f)
f_pod = np.dot(U_transpose,f)
rhs = np.dot(M_pod,u_pod)-DT*f_pod
u_pod = np.dot(K,rhs)
u_approx = np.dot(U,u_pod)
ns.tCount += 1
ns.tn_last = t
if save_projected_soln:
Model.u[0].dof[:] = u_approx
for index,model in enumerate(ns.modelList):
ns.archiveSolution(model,index,t)
if (i == 50):
U_approx = u_approx
label="/%s%d" % ('u',i)
print('trying to read from %s ' % label)
u = read_from_hdf5(archive.hdfFile,label)
err = u-u_approx
err *= err
err *= old_div(1.0,9261.0)
L2approx = np.sqrt(err.sum())
print('error is %s ' % L2approx)
end = time.time() # we measure time required to obtain the reduced solution
elapsed = end - start
print('time required was %s seconds' % (end-start))
#arrays for using matplotlib's unstructured plotting interface
u_range = U_approx[4410:4851]
u_range = u_range.reshape(21,21)
#x and y are needed for plotting only, z = 0.5
label="/%s%d" % ('nodesSpatial_Domain',0)
print('trying to read from %s ' % label)
coord = read_from_hdf5(archive.hdfFile,label)
x = coord[4410:4851,0]
y = coord[4410:4851,1]
x = x.reshape(21,21)
y = y.reshape(21,21)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
fig = plt.figure()
ax = fig.gca(projection='3d')
surf=ax.plot_surface(x, y, u_range, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.xlabel('x'); plt.ylabel('y')
plt.title('reduced solution at t = 0.5');
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.05f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig("solution_reduced_t0_5.png")
plt.show()
#also look at final time step
u_range = u_approx[4410:4851]
u_range = u_range.reshape(21,21)
fig.clf()
ax = fig.gca(projection='3d')
surf=ax.plot_surface(x, y, u_range, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.xlabel('x'); plt.ylabel('y')
plt.title('reduced solution at t = %s' % (nDTout*DT));
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.05f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig("solution_reduced_final.png")
plt.show()
#cleanup numerical solution files since calculateSolution wasn't called
for index,model in enumerate(ns.modelList):
ns.closeArchive(model,index)
| mit | e630611ffbd97c8e8c78ab9ef0618577 | 26.94382 | 108 | 0.6924 | 2.738987 | false | false | false | false |
erdc/proteus | proteus/tests/periodic/test_periodic.py | 1 | 5314 | #!/usr/bin/env python
"""
Test module for periodic boundary conditions and null space class.
"""
import proteus.test_utils.TestTools as TestTools
from proteus.iproteus import *
Profiling.logLevel = 7
Profiling.verbose = True
import os
import sys
import inspect
import numpy as np
import tables
import pickle
import petsc4py
import pytest
from . import duct
@pytest.fixture()
def load_periodic_duct(request):
nList = []
pList = []
sList = []
reload(duct)
script_dir = os.path.dirname(__file__)
so = proteus.defaults.load_system('duct',
path = script_dir)
for (pModule,nModule) in so.pnList:
if not isinstance(pModule, proteus.defaults.Physics_base):
pList.append(proteus.defaults.load_physics(pModule))
if pList[-1].name == None:
pList[-1].name = pModule
nList.append(proteus.defaults.load_numerics(nModule))
else:
pList.append(pModule)
nList.append(nModule)
#
if so.sList == []:
for i in range(len(so.pnList)):
s = default_s
sList.append(s)
else:
sList = so.sList
yield so, pList, nList, sList
@pytest.fixture()
def load_periodic_opts_2D(request):
opts.contextOptions = "periodic=True grid=True nd=2 nnx=42 triangles=False spaceOrder=1 weak=True coord=True pc_type='selfp_petsc'"
proteus.Context.contextOptionsString=opts.contextOptions
script_dir = os.path.dirname(__file__)
relpath = 'petsc/petsc.options.schur.selfp_petsc.superlu'
opts.petscOptionsFile = os.path.join(script_dir,relpath)
proteus.Comm.argv = TestTools.fixture_set_petsc_options_from_file(opts.petscOptionsFile)
comm = Comm.init()
@pytest.fixture()
def load_periodic_opts_3D(request):
opts.contextOptions = "periodic=True nd=3 coord=True pc_type='selfp_petsc'"
proteus.Context.contextOptionsString=opts.contextOptions
script_dir = os.path.dirname(__file__)
relpath = 'petsc/petsc.options.schur.selfp_petsc.superlu'
opts.petscOptionsFile = os.path.join(script_dir,relpath)
proteus.Comm.argv = TestTools.fixture_set_petsc_options_from_file(opts.petscOptionsFile)
comm = Comm.init()
@pytest.fixture()
def load_periodic_opts_3D_T2(request):
opts.contextOptions = "periodic=True nd=3 coord=True pc_type='selfp_petsc' A_block_AMG=True"
proteus.Context.contextOptionsString=opts.contextOptions
script_dir = os.path.dirname(__file__)
relpath = 'petsc/petsc.options.schur.selfp_petsc.amg'
opts.petscOptionsFile = os.path.join(script_dir,relpath)
proteus.Comm.argv = TestTools.fixture_set_petsc_options_from_file(opts.petscOptionsFile)
comm = Comm.init()
def test_periodic_2D(load_periodic_opts_2D,
load_periodic_duct):
ns = NumericalSolution.NS_base(load_periodic_duct[0],
load_periodic_duct[1],
load_periodic_duct[2],
load_periodic_duct[3],
opts)
ns.calculateSolution('test_run')
script_dir = os.path.dirname(__file__)
actual = tables.open_file('ductq1t12dpghe0.0975609756097561.h5')
expected_path = 'comparison_files/' + 'comparison_' + 'ductq1t12dpghe0.0975609756097561' + '_velocity_t25.csv'
#write comparison file
#np.array(actual.root.velocity_t25).tofile(os.path.join(script_dir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(script_dir, expected_path),sep=","),np.array(actual.root.velocity_t25).flatten(),decimal=7)
actual.close()
def test_periodic_3D(load_periodic_opts_3D,
load_periodic_duct):
ns = NumericalSolution.NS_base(load_periodic_duct[0],
load_periodic_duct[1],
load_periodic_duct[2],
load_periodic_duct[3],
opts)
ns.calculateSolution('test_run')
script_dir = os.path.dirname(__file__)
actual = tables.open_file('ductp1t13dpghe0.4.h5')
expected_path = 'comparison_files/' + 'comparison_' + 'ductp1t13dpghe0.4' + '_velocity_t25.csv'
#write comparison file
#np.array(actual.root.velocity_t25).tofile(os.path.join(script_dir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(script_dir, expected_path),sep=","),np.array(actual.root.velocity_t25).flatten(),decimal=10)
actual.close()
def test_periodic_3D_amg(load_periodic_opts_3D_T2,
load_periodic_duct):
ns = NumericalSolution.NS_base(load_periodic_duct[0],
load_periodic_duct[1],
load_periodic_duct[2],
load_periodic_duct[3],
opts)
ns.calculateSolution('test_run')
script_dir = os.path.dirname(__file__)
actual = tables.open_file('ductp1t13dpghe0.4.h5')
expected_path = 'comparison_files/' + 'comparison_' + 'ductp1t13dpghe0.4' + '_velocity_t25.csv'
np.testing.assert_almost_equal(np.fromfile(os.path.join(script_dir, expected_path),sep=","),np.array(actual.root.velocity_t25).flatten(),decimal=6)
actual.close()
| mit | c4e5802e5fc9457761cb3d6f1ad41b42 | 38.954887 | 152 | 0.633233 | 3.331661 | false | true | false | false |
erdc/proteus | proteus/SWFlow/models/GN_sw_n.py | 1 | 3195 | from __future__ import absolute_import
from proteus import *
from proteus.default_n import *
from GN_sw_p import *
from proteus.Transport import Comm
# *********************************************** #
# ********** Read from mySWFlowProblem ********** #
# *********************************************** #
# READ FROM CONTEXT #
runCFL = mySWFlowProblem.cfl
FESpace = mySWFlowProblem.FESpace
he = mySWFlowProblem.he
useSuperlu = mySWFlowProblem.useSuperlu
domain = mySWFlowProblem.domain
SSPOrder = mySWFlowProblem.swe_parameters['SSPOrder']
LUMPED_MASS_MATRIX = mySWFlowProblem.swe_parameters['LUMPED_MASS_MATRIX']
auxiliaryVariables = mySWFlowProblem.auxiliaryVariables
# *************************************** #
# ********** MESH CONSTRUCTION ********** #
# *************************************** #
if domain is not None:
triangleFlag = mySWFlowProblem.triangleFlag
nnx = mySWFlowProblem.nnx
nny = mySWFlowProblem.nny
nnz = mySWFlowProblem.nnz
triangleOptions = domain.MeshOptions.triangleOptions
# ************************************** #
# ********** TIME INTEGRATION ********** #
# ************************************** #
timeIntegration = GN_SW2DCV.RKEV
timeOrder = SSPOrder
nStagesTime = SSPOrder
# ****************************************** #
# ********** TIME STEP CONTROLLER ********** #
# ****************************************** #
stepController = Min_dt_controller
# ******************************************* #
# ********** FINITE ELEMENT SAPCES ********** #
# ******************************************* #
elementQuadrature = FESpace['elementQuadrature']
elementBoundaryQuadrature = FESpace['elementBoundaryQuadrature']
femSpaces = {0: FESpace['basis'],
1: FESpace['basis'],
2: FESpace['basis'],
3: FESpace['basis'],
4: FESpace['basis'],
5: FESpace['basis']}
# ************************************** #
# ********** NONLINEAR SOLVER ********** #
# ************************************** #
multilevelNonlinearSolver = Newton
fullNewtonFlag = False # NOTE: False just if the method is explicit
# if (LUMPED_MASS_MATRIX == 1):
levelNonlinearSolver = ExplicitLumpedMassMatrixShallowWaterEquationsSolver
# else:
# levelNonlinearSolver = ExplicitConsistentMassMatrixShallowWaterEquationsSolver
# ************************************ #
# ********** NUMERICAL FLUX ********** #
# ************************************ #
try_supg_stabilization = False
subgridError = None
shockCapturing = None
numericalFluxType = GN_SW2DCV.NumericalFlux
# ************************************ #
# ********** LINEAR ALGEBRA ********** #
# ************************************ #
matrix = SparseMatrix
multilevelLinearSolver = LU
levelLinearSolver = LU
# change solver for parallel runs
comm = Comm.get()
if comm.size() > 1:
levelLinearSolver = KSP_petsc4py
multilevelLinearSolver = KSP_petsc4py
levelNonlinearSolverConvergenceTest = 'r'
linearSolverConvergenceTest = 'r-true'
# ******************************** #
# ********** TOLERANCES ********** #
# ******************************** #
nl_atol_res = 1.0e-7
nl_rtol_res = 0.0
l_atol_res = 1.0e-7
l_rtol_res = 0.0
tolFac = 0.0
maxLineSearches = 0
| mit | 23e6e59d5249b36759d9bb24f850afd7 | 32.989362 | 84 | 0.521127 | 3.643101 | false | false | false | false |
erdc/proteus | proteus/mprans/VOF.py | 1 | 72857 | # A type of -*- python -*- file
"""
An optimized volume-of-fluid transport module
"""
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy as np
from math import fabs
import proteus
from proteus import cfemIntegrals, Quadrature, Norms, Comm
from proteus.NonlinearSolvers import NonlinearEquation
from proteus.FemTools import (DOFBoundaryConditions,
FluxBoundaryConditions,
C0_AffineLinearOnSimplexWithNodalBasis)
from proteus.Comm import globalMax
from proteus.Profiling import memory
from proteus.Profiling import logEvent
from proteus.Transport import OneLevelTransport
from proteus.TransportCoefficients import TC_base
from proteus.SubgridError import SGE_base
from proteus.ShockCapturing import ShockCapturing_base
from proteus.LinearAlgebraTools import SparseMat
from proteus.NonlinearSolvers import ExplicitLumpedMassMatrix,ExplicitConsistentMassMatrixForVOF,TwoStageNewton
from proteus import TimeIntegration
from proteus.mprans.cVOF import *
from proteus import *
from proteus.Transport import *
from proteus.Transport import OneLevelTransport
#from . import cVOF3P
from . import cArgumentsDict
class SubgridError(SGE_base):
def __init__(self, coefficients, nd):
proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, lag=False)
def initializeElementQuadrature(self, mesh, t, cq):
pass
def updateSubgridErrorHistory(self, initializationPhase=False):
pass
def calculateSubgridError(self, q):
pass
class ShockCapturing(ShockCapturing_base):
def __init__(self,
coefficients,
nd,
shockCapturingFactor=0.25,
lag=True,
nStepsToDelay=None):
proteus.ShockCapturing.ShockCapturing_base.__init__(self,
coefficients,
nd,
shockCapturingFactor,
lag)
self.nStepsToDelay = nStepsToDelay
self.nSteps = 0
if self.lag:
logEvent("VOF.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying")
self.nStepsToDelay = 1
self.lag = False
def initializeElementQuadrature(self, mesh, t, cq):
self.mesh = mesh
self.numDiff = []
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff.append(cq[('numDiff', ci, ci)])
self.numDiff_last.append(cq[('numDiff', ci, ci)])
def updateShockCapturingHistory(self):
self.nSteps += 1
if self.lag:
for ci in range(self.nc):
self.numDiff_last[ci][:] = self.numDiff[ci]
if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:
logEvent("VOF.ShockCapturing: switched to lagged shock capturing")
self.lag = True
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff_last.append(self.numDiff[ci].copy())
logEvent("VOF: max numDiff %e" % (globalMax(self.numDiff_last[0].max()),))
class NumericalFlux(proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior):
def __init__(self,
vt,
getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions,
getPeriodicBoundaryConditions=None):
proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior.__init__(
self,
vt,
getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions)
class RKEV(proteus.TimeIntegration.SSP):
from proteus import TimeIntegration
"""
Wrapper for SSPRK time integration using EV
... more to come ...
"""
def __init__(self, transport, timeOrder=1, runCFL=0.1, integrateInterpolationPoints=False):
TimeIntegration.SSP.__init__(self,
transport,
integrateInterpolationPoints=integrateInterpolationPoints)
self.runCFL = runCFL
self.dtLast = None
self.isAdaptive = True
assert transport.coefficients.STABILIZATION_TYPE>1, "SSP method just works for edge based EV methods; i.e., STABILIZATION_TYPE>1"
assert hasattr(transport, 'edge_based_cfl'), "No edge based cfl defined"
# About the cfl
self.cfl = transport.edge_based_cfl
# Stuff particular for SSP
self.timeOrder = timeOrder # order of approximation
self.nStages = timeOrder # number of stages total
self.lstage = 0 # last stage completed
# storage vectors
self.u_dof_last = {}
self.m_old = {}
# per component stage values, list with array at each stage
for ci in range(self.nc):
self.m_last[ci] = transport.q[('u',ci)].copy()
self.m_old[ci] = transport.q[('u',ci)].copy()
self.u_dof_last[ci] = transport.u[ci].dof.copy()
def choose_dt(self):
maxCFL = 1.0e-6
maxCFL = max(maxCFL, globalMax(self.cfl.max()))
self.dt = old_div(self.runCFL, maxCFL)
if self.dtLast is None:
self.dtLast = self.dt
self.t = self.tLast + self.dt
self.substeps = [self.t for i in range(self.nStages)] # Manuel is ignoring different time step levels for now
def initialize_dt(self, t0, tOut, q):
"""
Modify self.dt
"""
self.tLast = t0
self.choose_dt()
self.t = t0 + self.dt
def setCoefficients(self):
"""
beta are all 1's here
mwf not used right now
"""
self.alpha = np.zeros((self.nStages, self.nStages), 'd')
self.dcoefs = np.zeros((self.nStages), 'd')
def updateStage(self):
"""
Need to switch to use coefficients
"""
self.lstage += 1
assert self.timeOrder in [1, 2, 3]
assert self.lstage > 0 and self.lstage <= self.timeOrder
if self.timeOrder == 3:
if self.lstage == 1:
logEvent("First stage of SSP33 method", level=4)
for ci in range(self.nc):
# save stage at quad points
self.m_last[ci][:] = self.transport.q[('u',ci)]
# DOFs
self.transport.u_dof_old[:] = self.transport.u[ci].dof
elif self.lstage == 2:
logEvent("Second stage of SSP33 method", level=4)
for ci in range(self.nc):
# Quad points
self.m_last[ci][:] = 1./4*self.transport.q[('u',ci)]
self.m_last[ci][:] += 3./4*self.m_old[ci]
# DOFs
self.transport.u_dof_old[:] = 1./4*self.transport.u[ci].dof
self.transport.u_dof_old[:] += 3./4* self.u_dof_last[ci]
elif self.lstage == 3:
logEvent("Third stage of SSP33 method", level=4)
for ci in range(self.nc):
# Quad points
self.m_last[ci][:] = 2./3*self.transport.q[('u',ci)]
self.m_last[ci][:] += 1./3*self.m_old[ci]
# DOFs
self.transport.u[0].dof[:] = 2./3*self.transport.u[ci].dof
self.transport.u[0].dof[:] += 1./3* self.u_dof_last[ci]
# update u_dof_old
self.transport.u_dof_old[:] = self.u_dof_last[ci]
elif self.timeOrder == 2:
if self.lstage == 1:
logEvent("First stage of SSP22 method", level=4)
for ci in range(self.nc):
# save stage at quad points
self.m_last[ci][:] = self.transport.q[('u',ci)]
# DOFs
self.transport.u_dof_old[:] = self.transport.u[ci].dof
elif self.lstage == 2:
logEvent("Second stage of SSP22 method", level=4)
for ci in range(self.nc):
# Quad points
self.m_last[ci][:] = 1./2*self.transport.q[('u',ci)]
self.m_last[ci][:] += 1./2*self.m_old[ci]
# DOFs
self.transport.u[0].dof[:] = 1./2*self.transport.u[ci].dof
self.transport.u[0].dof[:] += 1./2*self.u_dof_last[ci]
# update u_dof_old
self.transport.u_dof_old[:] = self.u_dof_last[ci]
else:
assert self.timeOrder == 1
for ci in range(self.nc):
self.m_last[ci][:] = self.transport.q[('u',ci)]
def initializeTimeHistory(self, resetFromDOF=True):
"""
Push necessary information into time history arrays
"""
for ci in range(self.nc):
self.m_old[ci][:] = self.transport.q[('u',ci)]
self.m_last[ci][:] = self.transport.q[('u',ci)]
self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]
def updateTimeHistory(self, resetFromDOF=False):
"""
assumes successful step has been taken
"""
self.t = self.tLast + self.dt
for ci in range(self.nc):
self.m_old[ci][:] = self.m_last[ci][:]
self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]
self.lstage = 0
self.dtLast = self.dt
self.tLast = self.t
def generateSubsteps(self, tList):
"""
create list of substeps over time values given in tList. These correspond to stages
"""
self.substeps = []
tLast = self.tLast
for t in tList:
dttmp = t - tLast
self.substeps.extend([tLast + dttmp for i in range(self.nStages)])
tLast = t
def resetOrder(self, order):
"""
initialize data structures for stage updges
"""
self.timeOrder = order # order of approximation
self.nStages = order # number of stages total
self.lstage = 0 # last stage completed
self.substeps = [self.t for i in range(self.nStages)]
def setFromOptions(self, nOptions):
"""
allow classes to set various numerical parameters
"""
if 'runCFL' in dir(nOptions):
self.runCFL = nOptions.runCFL
flags = ['timeOrder']
for flag in flags:
if flag in dir(nOptions):
val = getattr(nOptions, flag)
setattr(self, flag, val)
if flag == 'timeOrder':
self.resetOrder(self.timeOrder)
class Coefficients(proteus.TransportCoefficients.TC_base):
from proteus.ctransportCoefficients import VOFCoefficientsEvaluate
from proteus.ctransportCoefficients import VolumeAveragedVOFCoefficientsEvaluate
from proteus.cfemIntegrals import copyExteriorElementBoundaryValuesFromElementBoundaryValues
def __init__(self,
LS_model=None,
V_model=0,
RD_model=None,
ME_model=1,
VOS_model=None,
checkMass=True,
epsFact=0.0,
useMetrics=0.0,
sc_uref=1.0,
sc_beta=1.0,
setParamsFunc=None,
movingDomain=False,
set_vos=None,
forceStrongConditions=False,
STABILIZATION_TYPE=0,
# 0: supg
# 1: Taylor Galerkin with EV
# 2: EV with FCT (with or without art comp)
# 3: Smoothness indicator (with or without art comp)
# 4: DK's with FCT
#FOR EDGE BASED EV
ENTROPY_TYPE=0,
# 0: quadratic
# 1: logarithmic
# FOR ENTROPY VISCOSITY
cE=1.0,
cMax=1.0,
uL=0.0,
uR=1.0,
# FOR ARTIFICIAL COMPRESSION
cK=0.0,
LUMPED_MASS_MATRIX=False,
FCT=True,
outputQuantDOFs=False,
#NULLSPACE INFO
nullSpace='NoNullSpace',
initialize=True):
self.variableNames = ['vof']
self.LS_modelIndex = LS_model
self.V_model = V_model
self.RD_modelIndex = RD_model
self.modelIndex = ME_model
self.VOS_model=VOS_model
self.checkMass = checkMass
self.epsFact = epsFact
self.flowModelIndex = V_model
self.modelIndex = ME_model
self.RD_modelIndex = RD_model
self.LS_modelIndex = LS_model
self.V_model = V_model
self.RD_modelIndex = RD_model
self.modelIndex = ME_model
self.VOS_model=VOS_model
self.checkMass = checkMass
self.epsFact = epsFact
self.useMetrics = useMetrics
self.sc_uref = sc_uref
self.sc_beta = sc_beta
self.setParamsFunc = setParamsFunc
self.movingDomain = movingDomain
self.forceStrongConditions = forceStrongConditions
self.STABILIZATION_TYPE = STABILIZATION_TYPE
self.ENTROPY_TYPE = ENTROPY_TYPE
self.cE = cE
self.cMax = cMax
self.uL = uL
self.uR = uR
self.cK = cK
self.LUMPED_MASS_MATRIX = LUMPED_MASS_MATRIX
self.FCT = FCT
self.outputQuantDOFs = outputQuantDOFs
self.nullSpace = nullSpace
# VRANS
self.q_porosity = None
self.ebq_porosity = None
self.ebqe_porosity = None
self.porosity_dof = None
self.flowCoefficients = None
self.set_vos = set_vos
if initialize:
self.initialize()
def initialize(self):
nc = 1
mass = {0: {0: 'linear'}}
advection = {0: {0: 'linear'}}
hamiltonian = {}
diffusion = {}
potential = {}
reaction = {}
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
self.variableNames,
movingDomain=self.movingDomain)
def initializeMesh(self, mesh):
self.eps = self.epsFact * mesh.h
def attachModels(self, modelList):
# self
self.model = modelList[self.modelIndex]
# redistanced level set
if self.RD_modelIndex is not None:
self.rdModel = modelList[self.RD_modelIndex]
# level set
if self.LS_modelIndex is not None:
self.lsModel = modelList[self.LS_modelIndex]
self.q_phi = modelList[self.LS_modelIndex].q[('u', 0)]
self.ebqe_phi = modelList[self.LS_modelIndex].ebqe[('u', 0)]
if ('u', 0) in modelList[self.LS_modelIndex].ebq:
self.ebq_phi = modelList[self.LS_modelIndex].ebq[('u', 0)]
else:
self.ebqe_phi = np.zeros(self.model.ebqe[('u', 0)].shape, 'd') # cek hack, we don't need this
# flow model
if self.V_model is not None:
if ('velocity', 0) in modelList[self.V_model].q:
self.q_v = modelList[self.V_model].q[('velocity', 0)]
self.ebqe_v = modelList[self.V_model].ebqe[('velocity', 0)]
else:
self.q_v = modelList[self.V_model].q[('f', 0)]
self.ebqe_v = modelList[self.V_model].ebqe[('f', 0)]
if ('velocity', 0) in modelList[self.V_model].ebq:
self.ebq_v = modelList[self.V_model].ebq[('velocity', 0)]
else:
if ('f', 0) in modelList[self.V_model].ebq:
self.ebq_v = modelList[self.V_model].ebq[('f', 0)]
else:
self.q_v = np.ones(self.model.q[('u',0)].shape+(self.model.nSpace_global,),'d')
self.ebqe_v = np.ones(self.model.ebqe[('u',0)].shape+(self.model.nSpace_global,),'d')
# VRANS
if self.V_model is not None:
self.flowCoefficients = modelList[self.V_model].coefficients
else:
self.flowCoefficients = None
if hasattr(self.flowCoefficients, 'q_porosity'):
self.q_porosity = self.flowCoefficients.q_porosity
if self.STABILIZATION_TYPE > 1: # edge based stabilization: EV or smoothness based
assert hasattr(self.flowCoefficients, 'porosity_dof'), 'If STABILIZATION_TYPE>1, the flow model must have porosity_dof'
self.porosity_dof = self.flowCoefficients.porosity_dof
else:
self.porosity_dof = np.ones(modelList[self.modelIndex].u[0].dof.shape, 'd')
else:
# If the flow model doesn't have porosity then set q_porosity=1 and porosity_dof=1
self.q_porosity = np.ones(modelList[self.modelIndex].q[('u', 0)].shape, 'd')
self.porosity_dof = np.ones(modelList[self.modelIndex].u[0].dof.shape, 'd')
if self.setParamsFunc is not None:
self.setParamsFunc(modelList[self.modelIndex].q['x'], self.q_porosity)
#
#
if hasattr(self.flowCoefficients, 'ebq_porosity'):
self.ebq_porosity = self.flowCoefficients.ebq_porosity
elif ('u', 0) in modelList[self.modelIndex].ebq:
self.ebq_porosity = np.ones(modelList[self.modelIndex].ebq[('u', 0)].shape,'d')
if self.setParamsFunc is not None:
self.setParamsFunc(modelList[self.modelIndex].ebq['x'], self.ebq_porosity)
#
#
if hasattr(self.flowCoefficients, 'ebqe_porosity'):
self.ebqe_porosity = self.flowCoefficients.ebqe_porosity
else:
self.ebqe_porosity = np.ones(self.model.ebqe[('u', 0)].shape,'d')
if self.setParamsFunc is not None:
self.setParamsFunc(modelList[self.LS_modelIndex].ebqe['x'], self.ebqe_porosity)
#
#
def initializeElementQuadrature(self, t, cq):
# VRANS
self.q_porosity = np.ones(cq[('u', 0)].shape, 'd')
def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):
# VRANS
self.ebq_porosity = np.ones(cebq[('u', 0)].shape, 'd')
def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):
# VRANS
self.ebqe_porosity = np.ones(cebqe[('u', 0)].shape, 'd')
def preStep(self, t, firstStep=False):
# SAVE OLD SOLUTION #
self.model.u_dof_old[:] = self.model.u[0].dof
# Restart flags for stages of taylor galerkin
self.model.stage = 1
self.model.auxTaylorGalerkinFlag = 1
# COMPUTE NEW VELOCITY (if given by user) #
if self.model.hasVelocityFieldAsFunction:
self.model.updateVelocityFieldAsFunction()
if self.checkMass:
self.m_pre = Norms.scalarDomainIntegral(self.model.q['dV_last'],
self.model.q[('m', 0)],
self.model.mesh.nElements_owned)
logEvent("Phase 0 mass before VOF step = %12.5e" % (self.m_pre,), level=2)
copyInstructions = {}
return copyInstructions
def postStep(self, t, firstStep=False):
self.model.q['dV_last'][:] = self.model.q['dV']
if self.checkMass:
self.m_post = Norms.scalarDomainIntegral(self.model.q['dV'],
self.model.q[('m', 0)],
self.model.mesh.nElements_owned)
logEvent("Phase 0 mass after VOF step = %12.5e" % (self.m_post,), level=2)
# self.fluxIntegral = Norms.fluxDomainBoundaryIntegral(self.model.ebqe['dS'],
# self.model.ebqe[('advectiveFlux',0)],
# self.model.mesh)
#logEvent("Phase 0 mass flux boundary integral after VOF step = %12.5e" % (self.fluxIntegral,),level=2)
#logEvent("Phase 0 mass conservation after VOF step = %12.5e" % (self.m_post - self.m_last + self.model.timeIntegration.dt*self.fluxIntegral,),level=2)
# divergence = Norms.fluxDomainBoundaryIntegralFromVector(self.model.ebqe['dS'],
# self.ebqe_v,
# self.model.ebqe['n'],
# self.model.mesh)
#logEvent("Divergence = %12.5e" % (divergence,),level=2)
copyInstructions = {}
return copyInstructions
def updateToMovingDomain(self, t, c):
# in a moving domain simulation the velocity coming in is already for the moving domain
pass
def evaluate(self, t, c):
# mwf debug
# print "VOFcoeficients eval t=%s " % t
if c[('f', 0)].shape == self.q_v.shape:
v = self.q_v
phi = self.q_phi
porosity = self.q_porosity
elif c[('f', 0)].shape == self.ebqe_v.shape:
v = self.ebqe_v
phi = self.ebqe_phi
porosity = self.ebq_porosity
elif ((self.ebq_v is not None and self.ebq_phi is not None) and c[('f', 0)].shape == self.ebq_v.shape):
v = self.ebq_v
phi = self.ebq_phi
porosity = self.ebq_porosity
else:
v = None
phi = None
porosity = None
if v is not None:
# self.VOFCoefficientsEvaluate(self.eps,
# v,
# phi,
# c[('u',0)],
# c[('m',0)],
# c[('dm',0,0)],
# c[('f',0)],
# c[('df',0,0)])
self.VolumeAveragedVOFCoefficientsEvaluate(self.eps,
v,
phi,
porosity,
c[('u', 0)],
c[('m', 0)],
c[('dm', 0, 0)],
c[('f', 0)],
c[('df', 0, 0)])
# if self.checkMass:
# logEvent("Phase 0 mass in eavl = %12.5e" % (Norms.scalarDomainIntegral(self.model.q['dV'],
# self.model.q[('m',0)],
# self.model.mesh.nElements_owned),),level=2)
class LevelModel(proteus.Transport.OneLevelTransport):
nCalls = 0
def __init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=None,
advectiveFluxBoundaryConditionsSetterDict=None,
diffusiveFluxBoundaryConditionsSetterDictDict=None,
stressTraceBoundaryConditionsSetterDict=None,
stabilization=None,
shockCapturing=None,
conservativeFluxDict=None,
numericalFluxType=None,
TimeIntegrationClass=None,
massLumping=False,
reactionLumping=False,
options=None,
name='defaultName',
reuse_trial_and_test_quadrature=True,
sd=True,
movingDomain=False,
bdyNullSpace=False):
self.auxiliaryCallCalculateResidual = False
#
# set the objects describing the method and boundary conditions
#
self.bdyNullSpace = bdyNullSpace
self.movingDomain = movingDomain
self.tLast_mesh = None
#
self.name = name
self.sd = sd
self.Hess = False
self.lowmem = True
self.timeTerm = True # allow turning off the time derivative
# self.lowmem=False
self.testIsTrial = True
self.phiTrialIsTrial = True
self.u = uDict
self.ua = {} # analytical solutions
self.phi = phiDict
self.dphi = {}
self.matType = matType
# mwf try to reuse test and trial information across components if spaces are the same
self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False
if self.reuse_test_trial_quadrature:
for ci in range(1, coefficients.nc):
assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, "to reuse_test_trial_quad all femSpaces must be the same!"
self.u_dof_old = None
# Simplicial Mesh
self.mesh = self.u[0].femSpace.mesh # assume the same mesh for all components for now
self.testSpace = testSpaceDict
self.dirichletConditions = dofBoundaryConditionsDict
self.dirichletNodeSetList = None # explicit Dirichlet conditions for now, no Dirichlet BC constraints
self.coefficients = coefficients
self.coefficients.initializeMesh(self.mesh)
self.nc = self.coefficients.nc
self.stabilization = stabilization
self.shockCapturing = shockCapturing
self.conservativeFlux = conservativeFluxDict # no velocity post-processing for now
self.fluxBoundaryConditions = fluxBoundaryConditionsDict
self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict
self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict
# determine whether the stabilization term is nonlinear
self.stabilizationIsNonlinear = False
# cek come back
if self.stabilization is not None:
for ci in range(self.nc):
if ci in coefficients.mass:
for flag in list(coefficients.mass[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.advection:
for flag in list(coefficients.advection[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.diffusion:
for diffusionDict in list(coefficients.diffusion[ci].values()):
for flag in list(diffusionDict.values()):
if flag != 'constant':
self.stabilizationIsNonlinear = True
if ci in coefficients.potential:
for flag in list(coefficients.potential[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.reaction:
for flag in list(coefficients.reaction[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.hamiltonian:
for flag in list(coefficients.hamiltonian[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
# determine if we need element boundary storage
self.elementBoundaryIntegrals = {}
for ci in range(self.nc):
self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or
(numericalFluxType is not None) or
(self.fluxBoundaryConditions[ci] == 'outFlow') or
(self.fluxBoundaryConditions[ci] == 'mixedFlow') or
(self.fluxBoundaryConditions[ci] == 'setFlow'))
#
# calculate some dimensions
#
self.nSpace_global = self.u[0].femSpace.nSpace_global # assume same space dim for all variables
self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]
self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]
self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]
self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]
self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]
self.nVDOF_element = sum(self.nDOF_trial_element)
self.nFreeVDOF_global = sum(self.nFreeDOF_global)
#
NonlinearEquation.__init__(self, self.nFreeVDOF_global)
#
# build the quadrature point dictionaries from the input (this
# is just for convenience so that the input doesn't have to be
# complete)
#
elementQuadratureDict = {}
elemQuadIsDict = isinstance(elementQuadrature, dict)
if elemQuadIsDict: # set terms manually
for I in self.coefficients.elementIntegralKeys:
if I in elementQuadrature:
elementQuadratureDict[I] = elementQuadrature[I]
else:
elementQuadratureDict[I] = elementQuadrature['default']
else:
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = elementQuadrature
if self.stabilization is not None:
for I in self.coefficients.elementIntegralKeys:
if elemQuadIsDict:
if I in elementQuadrature:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature[I]
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature['default']
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature
if self.shockCapturing is not None:
for ci in self.shockCapturing.components:
if elemQuadIsDict:
if ('numDiff', ci, ci) in elementQuadrature:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[('numDiff', ci, ci)]
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature['default']
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature
if massLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
if reactionLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
elementBoundaryQuadratureDict = {}
if isinstance(elementBoundaryQuadrature, dict): # set terms manually
for I in self.coefficients.elementBoundaryIntegralKeys:
if I in elementBoundaryQuadrature:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]
else:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']
else:
for I in self.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature
#
# find the union of all element quadrature points and
# build a quadrature rule for each integral that has a
# weight at each point in the union
# mwf include tag telling me which indices are which quadrature rule?
(self.elementQuadraturePoints, self.elementQuadratureWeights,
self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)
self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]
self.nQuadraturePoints_global = self.nQuadraturePoints_element * self.mesh.nElements_global
#
# Repeat the same thing for the element boundary quadrature
#
(self.elementBoundaryQuadraturePoints,
self.elementBoundaryQuadratureWeights,
self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)
self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]
self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global *
self.mesh.nElementBoundaries_element *
self.nElementBoundaryQuadraturePoints_elementBoundary)
#
# storage dictionaries
self.scalars_element = set()
#
# simplified allocations for test==trial and also check if space is mixed or not
#
self.q = {}
self.ebq = {}
self.ebq_global = {}
self.ebqe = {}
self.phi_ip = {}
self.edge_based_cfl = np.zeros(self.u[0].dof.shape)
# mesh
self.q['x'] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, 3), 'd')
self.ebqe['x'] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary, 3), 'd')
self.q[('u', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('dV_u', 0)] = (old_div(1.0, self.mesh.nElements_global)) * np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('grad(u)', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
self.q[('m_last', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('mt', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q['dV'] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q['dV_last'] = -1000 * np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('m_tmp', 0)] = self.q[('u', 0)].copy()
self.q[('m', 0)] = self.q[('m_tmp', 0)]
self.q[('cfl', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('numDiff', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.ebqe[('u', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('grad(u)', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,
self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')
self.ebqe[('advectiveFlux_bc_flag', 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'i')
self.ebqe[('advectiveFlux_bc', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('advectiveFlux', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.points_elementBoundaryQuadrature = set()
self.scalars_elementBoundaryQuadrature = set([('u', ci) for ci in range(self.nc)])
self.vectors_elementBoundaryQuadrature = set()
self.tensors_elementBoundaryQuadrature = set()
self.inflowBoundaryBC = {}
self.inflowBoundaryBC_values = {}
self.inflowFlux = {}
for cj in range(self.nc):
self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,), 'i')
self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')
self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.internalNodes = set(range(self.mesh.nNodes_global))
# identify the internal nodes this is ought to be in mesh
# \todo move this to mesh
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]
ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN, 0]
for i in range(self.mesh.nNodes_element):
if i != ebN_element:
I = self.mesh.elementNodesArray[eN_global, i]
self.internalNodes -= set([I])
self.nNodes_internal = len(self.internalNodes)
self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')
for nI, n in enumerate(self.internalNodes):
self.internalNodesArray[nI] = n
#
del self.internalNodes
self.internalNodes = None
logEvent("Updating local to global mappings", 2)
self.updateLocal2Global()
logEvent("Building time integration object", 2)
logEvent(memory("inflowBC, internalNodes,updateLocal2Global", "OneLevelTransport"), level=4)
# mwf for interpolating subgrid error for gradients etc
if self.stabilization and self.stabilization.usesGradientStabilization:
self.timeIntegration = TimeIntegrationClass(self, integrateInterpolationPoints=True)
else:
self.timeIntegration = TimeIntegrationClass(self)
if options is not None:
self.timeIntegration.setFromOptions(options)
logEvent(memory("TimeIntegration", "OneLevelTransport"), level=4)
logEvent("Calculating numerical quadrature formulas", 2)
self.calculateQuadrature()
self.setupFieldStrides()
comm = Comm.get()
self.comm = comm
if comm.size() > 1:
assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, "You must use a numerical flux to apply weak boundary conditions for parallel runs"
logEvent(memory("stride+offset", "OneLevelTransport"), level=4)
if numericalFluxType is not None:
if options is None or options.periodicDirichletConditions is None:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict)
else:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict,
options.periodicDirichletConditions)
else:
self.numericalFlux = None
# set penalty terms
# cek todo move into numerical flux initialization
if 'penalty' in self.ebq_global:
for ebN in range(self.mesh.nElementBoundaries_global):
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, \
(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))
# penalty term
# cek move to Numerical flux initialization
if 'penalty' in self.ebqe:
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \
self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)
logEvent(memory("numericalFlux", "OneLevelTransport"), level=4)
self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray
# use post processing tools to get conservative fluxes, None by default
from proteus import PostProcessingTools
self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)
logEvent(memory("velocity postprocessor", "OneLevelTransport"), level=4)
# helper for writing out data storage
from proteus import Archiver
self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
# TODO get rid of this
for ci, fbcObject in list(self.fluxBoundaryConditionsObjectsDict.items()):
self.ebqe[('advectiveFlux_bc_flag', ci)] = np.zeros(self.ebqe[('advectiveFlux_bc', ci)].shape, 'i')
for t, g in list(fbcObject.advectiveFluxBoundaryConditionsDict.items()):
if ci in self.coefficients.advection:
self.ebqe[('advectiveFlux_bc', ci)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag', ci)][t[0], t[1]] = 1
if hasattr(self.numericalFlux, 'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
if not hasattr(self.numericalFlux, 'isDOFBoundary'):
self.numericalFlux.isDOFBoundary = {0: np.zeros(self.ebqe[('u', 0)].shape, 'i')}
if not hasattr(self.numericalFlux, 'ebqe'):
self.numericalFlux.ebqe = {('u', 0): np.zeros(self.ebqe[('u', 0)].shape, 'd')}
# TODO how to handle redistancing calls for calculateCoefficients,calculateElementResidual etc
self.globalResidualDummy = None
compKernelFlag = 0
self.vof = cVOF_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag)
self.forceStrongConditions = False
if self.forceStrongConditions:
self.dirichletConditionsForceDOF = DOFBoundaryConditions(self.u[0].femSpace, dofBoundaryConditionsSetterDict[0], weakDirichletConditions=False)
if self.movingDomain:
self.MOVING_DOMAIN = 1.0
else:
self.MOVING_DOMAIN = 0.0
if self.mesh.nodeVelocityArray is None:
self.mesh.nodeVelocityArray = np.zeros(self.mesh.nodeArray.shape, 'd')
# Stuff added by mql.
# Some ASSERTS to restrict the combination of the methods
if self.coefficients.STABILIZATION_TYPE > 1:
assert self.timeIntegration.isSSP == True, "If STABILIZATION_TYPE>1, use RKEV timeIntegration within VOF model"
cond = 'levelNonlinearSolver' in dir(options) and (options.levelNonlinearSolver ==
ExplicitLumpedMassMatrix or options.levelNonlinearSolver == ExplicitConsistentMassMatrixForVOF)
assert cond, "If STABILIZATION_TYPE>1, use levelNonlinearSolver=ExplicitLumpedMassMatrix or ExplicitConsistentMassMatrixForVOF"
if 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitLumpedMassMatrix:
assert self.coefficients.LUMPED_MASS_MATRIX, "If levelNonlinearSolver=ExplicitLumpedMassMatrix, use LUMPED_MASS_MATRIX=True"
if self.coefficients.LUMPED_MASS_MATRIX == True:
cond = 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitLumpedMassMatrix
assert cond, "Use levelNonlinearSolver=ExplicitLumpedMassMatrix when the mass matrix is lumped"
if self.coefficients.FCT == True:
cond = self.coefficients.STABILIZATION_TYPE > 1, "Use FCT just with STABILIZATION_TYPE>1; i.e., edge based stabilization"
if self.coefficients.STABILIZATION_TYPE==1:
cond = 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == TwoStageNewton
assert cond, "If STABILIZATION_TYPE==1, use levelNonlinearSolver=TwoStageNewton"
if self.coefficients.STABILIZATION_TYPE==1:
self.useTwoStageNewton = True
assert isinstance(self.timeIntegration, proteus.TimeIntegration.BackwardEuler_cfl), "If STABILIZATION_TYPE=1, use BackwardEuler_cfl"
assert options.levelNonlinearSolver == TwoStageNewton, "If STABILIZATION_TYPE=1, use levelNonlinearSolver=TwoStageNewton"
assert self.coefficients.ENTROPY_TYPE in [0,1], "Set ENTROPY_TYPE={0,1}"
assert self.coefficients.STABILIZATION_TYPE in [0,1,2,3,4]
if self.coefficients.STABILIZATION_TYPE==4:
assert self.coefficients.FCT==True, "If STABILIZATION_TYPE=4, use FCT=True"
# mql. Allow the user to provide functions to define the velocity field
self.hasVelocityFieldAsFunction = False
if ('velocityFieldAsFunction') in dir(options):
self.velocityFieldAsFunction = options.velocityFieldAsFunction
self.hasVelocityFieldAsFunction = True
# For edge based methods
self.ML = None # lumped mass matrix
self.MC_global = None # consistent mass matrix
self.uLow = None
self.dt_times_dC_minus_dL = None
self.dLow = None
self.min_u_bc = None
self.max_u_bc = None
self.quantDOFs = np.zeros(self.u[0].dof.shape, 'd')
# For Taylor Galerkin methods
self.stage = 1
self.auxTaylorGalerkinFlag = 1
self.uTilde_dof = np.zeros(self.u[0].dof.shape)
self.degree_polynomial = 1
try:
self.degree_polynomial = self.u[0].femSpace.order
except:
pass
self.calculateJacobian = self.vof.calculateJacobian
if (self.coefficients.STABILIZATION_TYPE <= 1): # SUPG or Taylor Galerkin
self.calculateResidual = self.vof.calculateResidualElementBased
else:
self.calculateResidual = self.vof.calculateResidualEdgeBased
def FCTStep(self):
rowptr, colind, MassMatrix = self.MC_global.getCSRrepresentation()
limited_solution = np.zeros(self.u[0].dof.shape)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["dt"] = self.timeIntegration.dt
argsDict["NNZ"] = self.nnz
argsDict["numDOFs"] = len(rowptr) - 1
argsDict["lumped_mass_matrix"] = self.ML
argsDict["soln"] = self.u_dof_old
argsDict["solH"] = self.timeIntegration.u
argsDict["uLow"] = self.uLow
argsDict["dLow"] = self.dLow
argsDict["limited_solution"] = limited_solution
argsDict["csrRowIndeces_DofLoops"] = rowptr
argsDict["csrColumnOffsets_DofLoops"] = colind
argsDict["MassMatrix"] = MassMatrix
argsDict["dt_times_dH_minus_dL"] = self.dt_times_dC_minus_dL
argsDict["min_u_bc"] = self.min_u_bc
argsDict["max_u_bc"] = self.max_u_bc
argsDict["LUMPED_MASS_MATRIX"] = self.coefficients.LUMPED_MASS_MATRIX
argsDict["STABILIZATION_TYPE"] = self.coefficients.STABILIZATION_TYPE
self.vof.FCTStep(argsDict)
#self.timeIntegration.u[:] = limited_solution
fromFreeToGlobal=0 #direction copying
cfemIntegrals.copyBetweenFreeUnknownsAndGlobalUnknowns(fromFreeToGlobal,
self.offset[0],
self.stride[0],
self.dirichletConditions[0].global2freeGlobal_global_dofs,
self.dirichletConditions[0].global2freeGlobal_free_dofs,
self.timeIntegration.u,
limited_solution)
def updateVelocityFieldAsFunction(self):
X = {0: self.q[('x')][:, :, 0],
1: self.q[('x')][:, :, 1],
2: self.q[('x')][:, :, 2]}
t = self.timeIntegration.t
self.coefficients.q_v[..., 0] = self.velocityFieldAsFunction[0](X, t)
self.coefficients.q_v[..., 1] = self.velocityFieldAsFunction[1](X, t)
if (self.nSpace_global == 3):
self.coefficients.q_v[..., 2] = self.velocityFieldAsFunction[2](X, t)
# BOUNDARY
ebqe_X = {0: self.ebqe['x'][:, :, 0],
1: self.ebqe['x'][:, :, 1],
2: self.ebqe['x'][:, :, 2]}
self.coefficients.ebqe_v[..., 0] = self.velocityFieldAsFunction[0](ebqe_X, t)
self.coefficients.ebqe_v[..., 1] = self.velocityFieldAsFunction[1](ebqe_X, t)
if (self.nSpace_global == 3):
self.coefficients.ebqe_v[..., 2] = self.velocityFieldAsFunction[2](ebqe_X, t)
def calculateCoefficients(self):
pass
def calculateElementResidual(self):
if self.globalResidualDummy is not None:
self.getResidual(self.u[0].dof, self.globalResidualDummy)
def getMassMatrix(self):
# JACOBIANS (FOR ELEMENT TRANSFORMATION)
self.q[('J')] = np.zeros((self.mesh.nElements_global,
self.nQuadraturePoints_element,
self.nSpace_global,
self.nSpace_global),
'd')
self.q[('inverse(J)')] = np.zeros((self.mesh.nElements_global,
self.nQuadraturePoints_element,
self.nSpace_global,
self.nSpace_global),
'd')
self.q[('det(J)')] = np.zeros((self.mesh.nElements_global,
self.nQuadraturePoints_element),
'd')
self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,
self.q['J'],
self.q['inverse(J)'],
self.q['det(J)'])
self.q['abs(det(J))'] = np.abs(self.q['det(J)'])
# SHAPE FUNCTIONS
self.q[('w',0)] = np.zeros((self.mesh.nElements_global,
self.nQuadraturePoints_element,
self.nDOF_test_element[0]),
'd')
self.q[('w*dV_m',0)] = self.q[('w',0)].copy()
self.u[0].femSpace.getBasisValues(self.elementQuadraturePoints, self.q[('w',0)])
cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('u',0)],
self.q['abs(det(J))'],
self.q[('w',0)],
self.q[('w*dV_m',0)])
# assume a linear mass term
dm = np.ones(self.q[('u', 0)].shape, 'd')
elementMassMatrix = np.zeros((self.mesh.nElements_global,
self.nDOF_test_element[0],
self.nDOF_trial_element[0]), 'd')
cfemIntegrals.updateMassJacobian_weak_lowmem(dm,
self.q[('w', 0)],
self.q[('w*dV_m', 0)],
elementMassMatrix)
self.MC_a = self.nzval.copy()
self.MC_global = SparseMat(self.nFreeDOF_global[0],
self.nFreeDOF_global[0],
self.nnz,
self.MC_a,
self.colind,
self.rowptr)
cfemIntegrals.zeroJacobian_CSR(self.nnz, self.MC_global)
cfemIntegrals.updateGlobalJacobianFromElementJacobian_CSR(self.l2g[0]['nFreeDOF'],
self.l2g[0]['freeLocal'],
self.l2g[0]['nFreeDOF'],
self.l2g[0]['freeLocal'],
self.csrRowIndeces[(0, 0)],
self.csrColumnOffsets[(0, 0)],
elementMassMatrix,
self.MC_global)
self.ML = np.zeros((self.nFreeDOF_global[0],), 'd')
for i in range(self.nFreeDOF_global[0]):
self.ML[i] = self.MC_a[self.rowptr[i]:self.rowptr[i + 1]].sum()
np.testing.assert_almost_equal(self.ML.sum(),
self.mesh.volume,
err_msg="Trace of lumped mass matrix should be the domain volume", verbose=True)
def initVectors(self):
if self.coefficients.porosity_dof is None:
self.coefficients.porosity_dof = np.ones(self.u[0].dof.shape, 'd')
if self.u_dof_old is None:
# Pass initial condition to u_dof_old
self.u_dof_old = np.copy(self.u[0].dof)
rowptr, colind, MC = self.MC_global.getCSRrepresentation()
# This is dummy. I just care about the csr structure of the sparse matrix
self.dt_times_dC_minus_dL = np.zeros(MC.shape, 'd')
self.uLow = np.zeros(self.u[0].dof.shape, 'd')
self.dLow = np.zeros(MC.shape, 'd')
def getResidual(self, u, r):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
if self.MC_global is None:
self.getMassMatrix()
self.initVectors()
if self.coefficients.set_vos:
self.coefficients.set_vos(self.q['x'], self.coefficients.q_vos)
# Reset some vectors for FCT
self.min_u_bc = np.zeros(self.u[0].dof.shape, 'd') + 1E10
self.max_u_bc = np.zeros(self.u[0].dof.shape, 'd') - 1E10
self.dt_times_dC_minus_dL.fill(0.0)
self.uLow.fill(0.0)
self.dLow.fill(0.0)
r.fill(0.0)
# Load the unknowns into the finite element dof
self.timeIntegration.calculateCoefs()
self.timeIntegration.calculateU(u)
self.setUnknowns(self.timeIntegration.u)
# cek can put in logic to skip of BC's don't depend on t or u
# Dirichlet boundary conditions
# if hasattr(self.numericalFlux,'setDirichletValues'):
if (self.stage!=2):
self.numericalFlux.setDirichletValues(self.ebqe)
# flux boundary conditions
for t, g in list(self.fluxBoundaryConditionsObjectsDict[0].advectiveFluxBoundaryConditionsDict.items()):
self.ebqe[('advectiveFlux_bc', 0)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag', 0)][t[0], t[1]] = 1
if self.forceStrongConditions:
for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):
self.u[0].dof[dofN] = g(self.dirichletConditionsForceDOF.DOFBoundaryPointDict[dofN], self.timeIntegration.t)
if (self.stage==2 and self.auxTaylorGalerkinFlag==1):
self.uTilde_dof[:] = self.u[0].dof
self.auxTaylorGalerkinFlag=0
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["dt"] = self.timeIntegration.dt
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_velocity_dof"] = self.mesh.nodeVelocityArray
argsDict["MOVING_DOMAIN"] = self.MOVING_DOMAIN
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["alphaBDF"] = self.timeIntegration.alpha_bdf
argsDict["lag_shockCapturing"] = self.shockCapturing.lag
argsDict["shockCapturingDiffusion"] = float(self.shockCapturing.shockCapturingFactor)
argsDict["sc_uref"] = self.coefficients.sc_uref
argsDict["sc_alpha"] = self.coefficients.sc_beta
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["porosity_dof"] = self.coefficients.porosity_dof
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["r_l2g"] = self.l2g[0]['freeGlobal']
argsDict["elementDiameter"] = self.mesh.elementDiametersArray
argsDict["degree_polynomial"] = float(self.degree_polynomial)
argsDict["u_dof"] = self.u[0].dof
argsDict["u_dof_old"] = self.u_dof_old
argsDict["velocity"] = self.coefficients.q_v
argsDict["q_m"] = self.timeIntegration.m_tmp[0]
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_m_betaBDF"] = self.timeIntegration.beta_bdf[0]
argsDict["q_dV"] = self.q['dV']
argsDict["q_dV_last"] = self.q['dV_last']
argsDict["cfl"] = self.q[('cfl', 0)]
argsDict["edge_based_cfl"] = self.edge_based_cfl
argsDict["q_numDiff_u"] = self.shockCapturing.numDiff[0]
argsDict["q_numDiff_u_last"] = self.shockCapturing.numDiff_last[0]
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["ebqe_velocity_ext"] = self.coefficients.ebqe_v
argsDict["ebqe_porosity_ext"] = self.coefficients.ebqe_porosity
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u', 0)]
argsDict["isFluxBoundary_u"] = self.ebqe[('advectiveFlux_bc_flag', 0)]
argsDict["ebqe_bc_flux_u_ext"] = self.ebqe[('advectiveFlux_bc', 0)]
argsDict["ebqe_phi"] = self.coefficients.ebqe_phi
argsDict["epsFact"] = self.coefficients.epsFact
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_flux"] = self.ebqe[('advectiveFlux', 0)]
argsDict["stage"] = self.stage
argsDict["uTilde_dof"] = self.uTilde_dof
argsDict["cE"] = self.coefficients.cE
argsDict["cMax"] = self.coefficients.cMax
argsDict["cK"] = self.coefficients.cK
argsDict["uL"] = self.coefficients.uL
argsDict["uR"] = self.coefficients.uR
argsDict["numDOFs"] = len(self.rowptr) - 1
argsDict["NNZ"] = self.nnz
argsDict["csrRowIndeces_DofLoops"] = self.rowptr
argsDict["csrColumnOffsets_DofLoops"] = self.colind
argsDict["csrRowIndeces_CellLoops"] = self.csrRowIndeces[(0, 0)]
argsDict["csrColumnOffsets_CellLoops"] = self.csrColumnOffsets[(0, 0)]
argsDict["csrColumnOffsets_eb_CellLoops"] = self.csrColumnOffsets_eb[(0, 0)]
argsDict["ML"] = self.ML
argsDict["LUMPED_MASS_MATRIX"] = self.coefficients.LUMPED_MASS_MATRIX
argsDict["STABILIZATION_TYPE"] = self.coefficients.STABILIZATION_TYPE
argsDict["ENTROPY_TYPE"] = self.coefficients.ENTROPY_TYPE
argsDict["uLow"] = self.uLow
argsDict["dLow"] = self.dLow
argsDict["dt_times_dH_minus_dL"] = self.dt_times_dC_minus_dL
argsDict["min_u_bc"] = self.min_u_bc
argsDict["max_u_bc"] = self.max_u_bc
argsDict["quantDOFs"] = self.quantDOFs
self.calculateResidual(argsDict)
if self.forceStrongConditions:
for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):
r[dofN] = 0
if (self.auxiliaryCallCalculateResidual == False):
edge_based_cflMax = globalMax(self.edge_based_cfl.max()) * self.timeIntegration.dt
cell_based_cflMax = globalMax(self.q[('cfl', 0)].max()) * self.timeIntegration.dt
logEvent("... Current dt = " + str(self.timeIntegration.dt), level=4)
logEvent("... Maximum Cell Based CFL = " + str(cell_based_cflMax), level=2)
logEvent("... Maximum Edge Based CFL = " + str(edge_based_cflMax), level=2)
if self.stabilization:
self.stabilization.accumulateSubgridMassHistory(self.q)
logEvent("Global residual", level=9, data=r)
self.nonlinear_function_evaluations += 1
if self.globalResidualDummy is None:
self.globalResidualDummy = np.zeros(r.shape, 'd')
def getJacobian(self, jacobian):
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,
jacobian)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_velocity_dof"] = self.mesh.nodeVelocityArray
argsDict["MOVING_DOMAIN"] = self.MOVING_DOMAIN
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["alphaBDF"] = self.timeIntegration.alpha_bdf
argsDict["lag_shockCapturing"] = self.shockCapturing.lag
argsDict["shockCapturingDiffusion"] = float(self.shockCapturing.shockCapturingFactor)
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["r_l2g"] = self.l2g[0]['freeGlobal']
argsDict["elementDiameter"] = self.mesh.elementDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["velocity"] = self.coefficients.q_v
argsDict["q_m_betaBDF"] = self.timeIntegration.beta_bdf[0]
argsDict["cfl"] = self.q[('cfl', 0)]
argsDict["q_numDiff_u_last"] = self.shockCapturing.numDiff_last[0]
argsDict["csrRowIndeces_u_u"] = self.csrRowIndeces[(0, 0)]
argsDict["csrColumnOffsets_u_u"] = self.csrColumnOffsets[(0, 0)]
argsDict["globalJacobian"] = jacobian.getCSRrepresentation()[2]
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["ebqe_velocity_ext"] = self.coefficients.ebqe_v
argsDict["ebqe_porosity_ext"] = self.coefficients.ebqe_porosity
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u', 0)]
argsDict["isFluxBoundary_u"] = self.ebqe[('advectiveFlux_bc_flag', 0)]
argsDict["ebqe_bc_flux_u_ext"] = self.ebqe[('advectiveFlux_bc', 0)]
argsDict["csrColumnOffsets_eb_u_u"] = self.csrColumnOffsets_eb[(0, 0)]
argsDict["STABILIZATION_TYPE"] = self.coefficients.STABILIZATION_TYPE
self.calculateJacobian(argsDict)
# Load the Dirichlet conditions directly into residual
if self.forceStrongConditions:
scaling = 1.0 # probably want to add some scaling to match non-dirichlet diagonals in linear system
for dofN in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.keys()):
global_dofN = dofN
for i in range(self.rowptr[global_dofN],
self.rowptr[global_dofN + 1]):
if (self.colind[i] == global_dofN):
# print "RBLES forcing residual cj = %s dofN= %s
# global_dofN= %s was self.nzval[i]= %s now =%s " %
# (cj,dofN,global_dofN,self.nzval[i],scaling)
self.nzval[i] = scaling
else:
self.nzval[i] = 0.0
# print "RBLES zeroing residual cj = %s dofN= %s
# global_dofN= %s " % (cj,dofN,global_dofN)
logEvent("Jacobian ", level=10, data=jacobian)
# mwf decide if this is reasonable for solver statistics
self.nonlinear_function_jacobian_evaluations += 1
return jacobian
def calculateElementQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points.
This function should be called only when the mesh changes.
"""
self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,
self.q['x'])
self.u[0].femSpace.elementMaps.getBasisValuesRef(
self.elementQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(
self.elementQuadraturePoints)
self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.coefficients.initializeElementQuadrature(self.timeIntegration.t,
self.q)
if self.stabilization is not None:
self.stabilization.initializeElementQuadrature(
self.mesh, self.timeIntegration.t, self.q)
self.stabilization.initializeTimeIntegration(self.timeIntegration)
if self.shockCapturing is not None:
self.shockCapturing.initializeElementQuadrature(
self.mesh, self.timeIntegration.t, self.q)
def calculateElementBoundaryQuadrature(self):
pass
def calculateExteriorElementBoundaryQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points on global element boundaries.
This function should be called only when the mesh changes.
"""
#
# get physical locations of element boundary quadrature points
#
# assume all components live on the same mesh
self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(
self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(
self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisValuesTraceRef(
self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesTraceRef(
self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(
self.elementBoundaryQuadraturePoints, self.ebqe['x'])
self.fluxBoundaryConditionsObjectsDict = dict([(cj, FluxBoundaryConditions(self.mesh,
self.nElementBoundaryQuadraturePoints_elementBoundary,
self.ebqe[('x')],
self.advectiveFluxBoundaryConditionsSetterDict[cj],
self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))
for cj in list(self.advectiveFluxBoundaryConditionsSetterDict.keys())])
self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(
self.timeIntegration.t, self.ebqe)
def estimate_mt(self):
pass
def calculateSolutionAtQuadrature(self):
pass
def calculateAuxiliaryQuantitiesAfterStep(self):
pass
def updateAfterMeshMotion(self):
pass
| mit | d76fe172900896013b78953c6e4dd6a5 | 51.077913 | 182 | 0.578476 | 3.820904 | false | false | false | false |
erdc/proteus | proteus/tests/MeshAdaptPUMI/Sizefields/cylinder2D_ibm/test_cylinder2D_ibm_rans2p.py | 1 | 3900 | """Tests for 2d flow around an immersed boundary cylinder with rans2p"""
from proteus.iproteus import *
from proteus import Comm
from proteus import Context
from proteus import MeshTools
from proteus import Domain
from proteus.MeshAdaptPUMI import MeshAdapt
import tables
import importlib
import subprocess
import pytest
comm = Comm.get()
Profiling.logLevel = 7
Profiling.verbose = False
import numpy as np
class Test_Adapt_ibm():
@classmethod
def setup_class(cls):
cls._scriptdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,cls._scriptdir)
@classmethod
def teardown_class(cls):
sys.path.remove(cls._scriptdir)
pass
def setup_method(self, method):
"""Initialize the test problem. """
self.aux_names = []
def teardown_method(self, method):
""" Tear down function """
FileList = ['mesh.ele',
'mesh.edge',
'mesh.node',
'mesh.neigh',
'mesh.face',
'mesh.poly',
'mesh.1.neigh',
'mesh.1.poly',
'mesh.ply',
'mesh.asy',
'cylinder_so.log',
#'Reconstructed.dmg', #can't remove since teardown is called after each test
#'Reconstructed0.smb',
'proteus.log',
'cylinder.xmf',
'cylinder.h5',
'finalMesh0.smb',
'particle_forceHistory.txt',
'particle_momentHistory.txt',
'particle_vforceHistory.txt',
'particle_pforceHistory.txt',
'momentHistory.txt',
'TimeList.txt',
'timeHistory.txt',
'forceHistory_p.txt',
'forceHistory_v.txt',
'wettedAreaHistory.txt'
]
for file in FileList:
if os.path.isfile(file):
os.remove(file)
else:
pass
def test_adaptIBM_genMesh(self):
currentPath = os.path.dirname(os.path.abspath(__file__))
runCommand = "cd "+currentPath+"; parun -l5 cylinder_so.py -C 'T=0.01 onlySaveFinalSolution=True genMesh=True usePUMI=True';"
subprocess.check_call(runCommand,shell=True )
@pytest.mark.skip
def test_adaptIBM_adaptMesh(self):
currentPath = os.path.dirname(os.path.abspath(__file__))
runCommand = "cd "+currentPath+"; parun -l5 cylinder_so.py -C 'T=0.01 onlySaveFinalSolution=True genMesh=False usePUMI=True';"
subprocess.check_call(runCommand,shell=True )
#load initial mesh and extract element count
domain = Domain.PUMIDomain(manager=MeshAdapt.AdaptManager()) #initialize the domain
filePath=bytes(currentPath+'/','utf-8')
domain.AdaptManager.PUMIAdapter.loadModelAndMesh(filePath+b"Reconstructed.dmg", filePath+b"Reconstructed.smb")
mesh = MeshTools.TetrahedralMesh()
mesh.convertFromPUMI(domain,domain.AdaptManager.PUMIAdapter,
[1],
[1],
parallel = comm.size() > 1,
dim=2)
nElements_initial = mesh.nElements_global
#load final mesh and extract element count
domain.AdaptManager.PUMIAdapter.loadModelAndMesh(filePath+b"Reconstructed.dmg", filePath+b"finalMesh.smb")
mesh2 = MeshTools.TetrahedralMesh()
mesh2.convertFromPUMI(domain,domain.AdaptManager.PUMIAdapter,
[1],
[1],
parallel = comm.size() > 1,
dim=2)
nElements_final = mesh2.nElements_global
#adapted mesh should have less elements
assert(nElements_final<nElements_initial)
| mit | d489367aafb5116d9fa6dd535d6f6adf | 36.142857 | 134 | 0.562564 | 3.939394 | false | true | false | false |
erdc/proteus | proteus/tests/POD/heat.py | 1 | 2171 | #!/usr/bin/env python
"""
Fine-scale heat equation solver
The equation is du/du - Laplace u + u + f(x,y,z,t) = 0
"""
from __future__ import print_function
from heat_init import *
physics.name = "heat_3d"
so.name = physics.name
ns = NumericalSolution.NS_base(so,[physics],[numerics],so.sList,opts)
import time
start = time.time()
failed=ns.calculateSolution('run1')
assert(not failed)
end = time.time() # we measure time required to obtain the fully resolved solution
print('Time required was %s seconds' % (end - start))
#arrays for using matplotlib's unstructured plotting interface
x = ns.modelList[0].levelModelList[-1].mesh.nodeArray[:,0]
y = ns.modelList[0].levelModelList[-1].mesh.nodeArray[:,1]
z = ns.modelList[0].levelModelList[-1].mesh.nodeArray[:,2]
u = ns.modelList[0].levelModelList[-1].u[0].dof
#we want to build a 3d plot of f(x,y,z0), when z0 = 0.5
u_range = u[4410:4851]
x_range = x[4410:4851]
y_range = y[4410:4851]
u_range = u_range.reshape(21,21)
x_range = x_range.reshape(21,21)
y_range = y_range.reshape(21,21)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
fig = plt.figure()
ax = fig.gca(projection='3d')
surf=ax.plot_surface(x_range, y_range, u_range, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.xlabel('x'); plt.ylabel('y')
plt.title('approximate solution at t = 1');
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.05f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig("solution.png")
plt.show()
#saving mass and stiffness matrices below
Model = ns.modelList[0].levelModelList[-1]
mj = Model.initializeMassJacobian()
Model.getMassJacobian(mj)
kj = Model.initializeSpatialJacobian()
Model.getSpatialJacobian(kj)
rowptr,colind,nzval = mj.getCSRrepresentation()
np.savetxt('iam',rowptr,'%d', ' ')
np.savetxt('jam',colind,'%d', ' ')
np.savetxt('am',nzval,delimiter=' ')
rowptr_s,colind_s,nzval_s = kj.getCSRrepresentation()
np.savetxt('ias',rowptr_s,'%d',' ')
np.savetxt('jas',colind_s,'%d',' ')
np.savetxt('as',nzval_s,delimiter=' ')
| mit | 19f62eeadd10b464b36e140a0f08fed2 | 31.402985 | 119 | 0.725933 | 2.690211 | false | false | false | false |
erdc/proteus | proteus/elastoplastic/ElastoPlastic.py | 1 | 68998 | from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import proteus
from proteus import FemTools, Archiver
from .cElastoPlastic import *
from math import atan, tan
import numpy as np
from proteus.Transport import OneLevelTransport, sqrt, TC_base
from proteus.Transport import NonlinearEquation, logEvent, memory
from proteus.Transport import FluxBoundaryConditions, Comm, cfemIntegrals
from proteus.Transport import DOFBoundaryConditions, Quadrature
from proteus.mprans import cArgumentsDict
class Coefficients(proteus.TransportCoefficients.TC_base):
def __init__(self,
modelType_block,
modelParams_block,
g=[0.0,0.0,-9.8],#gravitational acceleration
rhow=998.2,#kg/m^3 water density (used if pore pressures specified)
pa=101325.0,#N/m^2 atmospheric pressure
nd=3,
meIndex=0,seepageIndex=-1,SRF=1.0,pore_pressure_file_base=None,pore_pressure_field_path=None):
import copy
self.modelType_block = modelType_block
self.modelParams_block = modelParams_block
self.materialProperties = self.modelParams_block
self.materialProperties_default = copy.deepcopy(self.modelParams_block)
self.nMaterialProperties = len(self.materialProperties[-1])
self.SRF=SRF
self.g = np.array(g)
self.gmag = sqrt(sum([gi**2 for gi in g]))
self.rhow=rhow
self.pore_fluid_unit_weight = self.gmag*self.rhow
print("pore_fluid_unit_weight", self.pore_fluid_unit_weight)
print("soil_unit_weight", self.materialProperties[0,13]*self.gmag)
self.pa=pa
self.nd=nd
mass={}
advection={}
diffusion={}
potential={}
reaction={}
hamiltonian={}
stress={}
assert(nd==3)
variableNames=['hx','hy','hz']
mass = {0:{0:'linear'},
1:{1:'linear'},
2:{2:'linear'}}
reaction = {0:{0:'constant'},
1:{1:'constant'},
2:{2:'constant'}}
stress= {0:{0:'linear',1:'linear',2:'linear'},
1:{0:'linear',1:'linear',2:'linear'},
2:{0:'linear',1:'linear',2:'linear'}}
TC_base.__init__(self,
3,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
stress=stress)
self.vectorComponents=[0,1,2]
self.vectorName="displacement"
self.firstCall=True
self.gravityStep=1
self.lastStepWasGravityStep=False
self.meIndex = meIndex
self.seepageIndex = seepageIndex
self.copyInstructions = {'reset_uList':True}
self.pore_pressure_file_base=pore_pressure_file_base
self.pore_pressure_field_path=pore_pressure_field_path
def attachModels(self,modelList):
self.model = modelList[self.meIndex]
self.pore_pressure_head = np.zeros((self.model.mesh.nodeArray.shape[0],),'d')
if self.seepageIndex > 0:
self.seepageModel = modelList[self.seepageIndex]
self.pore_pressure_head_save = self.seepageModel.u.dof[0]
self.pre_pressure_head[:]=self.pore_pressure_head_save
elif self.pore_pressure_file_base != None:
import h5py
archive = h5py.File(self.pore_pressure_file_base+".h5","r")
permute = np.argsort(self.mesh.globalMesh.nodeNumbering_subdomain2global)
self.pore_pressure_head[permute] = archive[self.pore_pressure_field_path][self.mesh.globalMesh.nodeNumbering_subdomain2global[permute].tolist()]
self.pore_pressure_head_save = self.pore_pressure_head.copy()
else:
self.pore_pressure_head_save = np.zeros((self.model.mesh.nodeArray.shape[0],),'d')
def initializeElementQuadrature(self,t,cq):
"""
Give the TC object access to the element quadrature storage
"""
if self.firstCall:
self.firstCall=False
self.cq=cq
cq['strain0'] = cq['strain'].copy()
cq['strain_last'] = cq['strain'].copy()
cq['plasticStrain'] = cq['strain'].copy()
cq['plasticStrain_last'] = cq['strain'].copy()
cq['strain_last'][:]=0.0
cq['plasticStrain'][:]=0.0
cq['plasticStrain_last'][:]=0.0
self.bodyForce = cq['bodyForce']
for eN in range(self.bodyForce.shape[0]):
rhos = self.materialProperties[self.mesh.elementMaterialTypes[eN],13]
for k in range(self.bodyForce.shape[1]):
self.bodyForce[eN,k,:] = self.g*rhos
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
"""
Give the TC object access to the element quadrature storage
"""
self.cebqe = cebqe
cebqe['strain0'] = cebqe['strain'].copy()
cebqe['strain_last'] = cebqe['strain'].copy()
cebqe['plasticStrain'] = cebqe['strain'].copy()
cebqe['plasticStrain_last'] = cebqe['strain'].copy()
cebqe['strain_last'][:]=0.0
cebqe['plasticStrain'][:]=0.0
cebqe['plasticStrain_last'][:]=0.0
def initializeMesh(self,mesh):
self.mesh = mesh
def postStep(self,t,firstStep=False):
if self.gravityStep:
if self.gravityStep == 1:
self.cq['strain0'][:]=self.cq['strain']
self.cebqe['strain0'][:] = self.cebqe['strain']
self.gravityStep = 2
else:
self.gravityStep = 0
self.cq['strain_last'][:] = self.cq['strain']
self.cq['plasticStrain_last'][:] = 0.0#self.cq['plasticStrain']
self.cebqe['strain_last'][:] = self.cebqe['strain']
self.cebqe['plasticStrain_last'][:] = 0.0#self.cebqe['plasticStrain']
self.pore_pressure_head[:] = self.pore_pressure_head_save
self.lastStepWasGravityStep=True
#self.model.u[0].dof[:]=0.0
#self.model.u[1].dof[:]=0.0
#self.model.u[2].dof[:]=0.0
for it in range(self.materialProperties.shape[0]):
#cek hack for veg on levees
if it != 7:
self.materialProperties[it,5] = atan(old_div(tan(self.materialProperties_default[it,5]),self.SRF))#phi_mc
self.materialProperties[it,6] = old_div(self.materialProperties_default[it,6],self.SRF)#c_mc
self.copyInstructions = {'reset_uList':True}
else:
print("Completed========================SRF = "+repr(self.SRF)+"===============================")
self.lastStepWasGravityStep = False
self.SRF += 0.05
for it in range(self.materialProperties.shape[0]):
if it != 7:
self.materialProperties[it,5] = atan(old_div(tan(self.materialProperties_default[it,5]),self.SRF))#phi_mc
self.materialProperties[it,6] = old_div(self.materialProperties_default[it,6],self.SRF)#c_mc
self.copyInstructions = None
print("=========Not Updating Mesh=================")
#self.mesh.nodeArray[:,0]+=self.model.u[0].dof
#self.mesh.nodeArray[:,1]+=self.model.u[1].dof
#self.mesh.nodeArray[:,2]+=self.model.u[2].dof
def preStep(self,t,firstStep=False):
print("Starting========================SRF = "+repr(self.SRF)+"===============================")
if self.lastStepWasGravityStep:
self.model.u[0].dof[:]=0.0
self.model.u[1].dof[:]=0.0
self.model.u[2].dof[:]=0.0
return self.copyInstructions
def evaluate(self,t,c):
pass
class LevelModel(proteus.Transport.OneLevelTransport):
nCalls=0
def __init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=None,
advectiveFluxBoundaryConditionsSetterDict=None,
diffusiveFluxBoundaryConditionsSetterDictDict=None,
stressFluxBoundaryConditionsSetterDict=None,
stabilization=None,
shockCapturing=None,
conservativeFluxDict=None,
numericalFluxType=None,
TimeIntegrationClass=None,
massLumping=False,
reactionLumping=False,
options=None,
name='Plasticity',
reuse_trial_and_test_quadrature=True,
sd = True,
movingDomain=False,
bdyNullSpace=False):
self.bdyNullSpace=bdyNullSpace
#
#set the objects describing the method and boundary conditions
#
self.velocityPostProcessor = None
self.movingDomain=movingDomain
self.tLast_mesh=None
#
#cek todo clean up these flags in the optimized version
self.bcsTimeDependent=options.bcsTimeDependent
self.bcsSet=False
self.name=name
self.sd=sd
self.lowmem=True
self.timeTerm=True#allow turning off the time derivative
self.testIsTrial=True
self.phiTrialIsTrial=True
self.u = uDict
self.Hess=False
if isinstance(self.u[0].femSpace,FemTools.C0_AffineQuadraticOnSimplexWithNodalBasis):
self.Hess=True
self.ua = {}#analytical solutions
self.phi = phiDict
self.dphi={}
self.matType = matType
#mwf try to reuse test and trial information across components if spaces are the same
self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False
if self.reuse_test_trial_quadrature:
for ci in range(1,coefficients.nc):
assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, "to reuse_test_trial_quad all femSpaces must be the same!"
## Simplicial Mesh
self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now
self.testSpace = testSpaceDict
self.dirichletConditions = dofBoundaryConditionsDict
self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints
self.coefficients = coefficients
self.coefficients.initializeMesh(self.mesh)
self.nc = self.coefficients.nc
self.stabilization = stabilization
self.shockCapturing = shockCapturing
self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now
self.fluxBoundaryConditions=fluxBoundaryConditionsDict
self.stressFluxBoundaryConditionsSetterDict=stressFluxBoundaryConditionsSetterDict
#determine whether the stabilization term is nonlinear
self.stabilizationIsNonlinear = False
#cek come back
if self.stabilization != None:
for ci in range(self.nc):
if ci in coefficients.mass:
for flag in list(coefficients.mass[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if ci in coefficients.advection:
for flag in list(coefficients.advection[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if ci in coefficients.diffusion:
for diffusionDict in list(coefficients.diffusion[ci].values()):
for flag in list(diffusionDict.values()):
if flag != 'constant':
self.stabilizationIsNonlinear=True
if ci in coefficients.potential:
for flag in list(coefficients.potential[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if ci in coefficients.reaction:
for flag in list(coefficients.reaction[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if ci in coefficients.hamiltonian:
for flag in list(coefficients.hamiltonian[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
#determine if we need element boundary storage
self.elementBoundaryIntegrals = {}
for ci in range(self.nc):
self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or
(numericalFluxType != None) or
(self.fluxBoundaryConditions[ci] == 'outFlow') or
(self.fluxBoundaryConditions[ci] == 'mixedFlow') or
(self.fluxBoundaryConditions[ci] == 'setFlow'))
#
#calculate some dimensions
#
self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables
self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]
self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]
self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]
self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]
self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]
self.nVDOF_element = sum(self.nDOF_trial_element)
self.nFreeVDOF_global = sum(self.nFreeDOF_global)
#
NonlinearEquation.__init__(self,self.nFreeVDOF_global)
#
#build the quadrature point dictionaries from the input (this
#is just for convenience so that the input doesn't have to be
#complete)
#
elementQuadratureDict={}
elemQuadIsDict = isinstance(elementQuadrature,dict)
if elemQuadIsDict: #set terms manually
for I in self.coefficients.elementIntegralKeys:
if I in elementQuadrature:
elementQuadratureDict[I] = elementQuadrature[I]
else:
elementQuadratureDict[I] = elementQuadrature['default']
else:
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = elementQuadrature
if self.stabilization != None:
for I in self.coefficients.elementIntegralKeys:
if elemQuadIsDict:
if I in elementQuadrature:
elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]
else:
elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']
else:
elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature
if self.shockCapturing != None:
for ci in self.shockCapturing.components:
if elemQuadIsDict:
if ('numDiff',ci,ci) in elementQuadrature:
elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]
else:
elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']
else:
elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature
if massLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
if reactionLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
elementBoundaryQuadratureDict={}
if isinstance(elementBoundaryQuadrature,dict): #set terms manually
for I in self.coefficients.elementBoundaryIntegralKeys:
if I in elementBoundaryQuadrature:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]
else:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']
else:
for I in self.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature
#
# find the union of all element quadrature points and
# build a quadrature rule for each integral that has a
# weight at each point in the union
#mwf include tag telling me which indices are which quadrature rule?
(self.elementQuadraturePoints,self.elementQuadratureWeights,
self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)
self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]
self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global
#
#Repeat the same thing for the element boundary quadrature
#
(self.elementBoundaryQuadraturePoints,
self.elementBoundaryQuadratureWeights,
self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)
self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]
self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*
self.mesh.nElementBoundaries_element*
self.nElementBoundaryQuadraturePoints_elementBoundary)
# if isinstance(self.u[0].femSpace,C0_AffineLinearOnSimplexWithNodalBasis):
# print self.nQuadraturePoints_element
# if self.nSpace_global == 3:
# assert(self.nQuadraturePoints_element == 5)
# elif self.nSpace_global == 2:
# assert(self.nQuadraturePoints_element == 6)
# elif self.nSpace_global == 1:
# assert(self.nQuadraturePoints_element == 3)
# print self.nElementBoundaryQuadraturePoints_elementBoundary
# if self.nSpace_global == 3:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 2:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 1:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 1)
#
#simplified allocations for test==trial and also check if space is mixed or not
#
self.q={}
self.ebq={}
self.ebq_global={}
self.ebqe={}
self.phi_ip={}
#mesh
#self.q['x'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')
#self.q['det(J)'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
#self.q['abs(det(J))'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
#self.q['J'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')
#self.q['inverse(J)'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')
self.ebqe['x'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebqe['g'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,
# self.nElementBoundaryQuadraturePoints_elementBoundary,
#max(1,self.nSpace_global-1),
#max(1,self.nSpace_global-1)),
#'d')
#self.ebqe['inverse(J)'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')
#self.ebqe['hat(x)'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebqe['bar(x)'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebqe['sqrt(det(g))'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
#self.ebqe[('n')] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')
#ebq for post-processing
#self.ebq['x'] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebq['g'] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,
# self.nElementBoundaryQuadraturePoints_elementBoundary,
# max(1,self.nSpace_global-1),
# max(1,self.nSpace_global-1)),
# 'd')
#self.ebq['inverse(J)'] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,self.nElementBoundaryQuadraturePoints_elementBoundary,
# self.nSpace_global,self.nSpace_global),'d')
#self.ebq['hat(x)'] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebq['bar(x)'] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebq['sqrt(det(g))'] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
#self.ebq[('n')] = np.zeros((self.mesh.nElements_global,self.mesh.nElementBoundaries_element,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')
#ebq_global for post-processing
#self.ebq_global['x'] = np.zeros((self.mesh.nElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebq_global['g'] = np.zeros((self.mesh.nElementBoundaries_global,
# self.nElementBoundaryQuadraturePoints_elementBoundary,
# max(1,self.nSpace_global-1),
# max(1,self.nSpace_global-1)),
# 'd')
#self.ebq_global['inverse(J)'] = np.zeros((self.mesh.nElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,
# self.nSpace_global,self.nSpace_global),'d')
#self.ebq_global['hat(x)'] = np.zeros((self.mesh.nElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebq_global['bar(x)'] = np.zeros((self.mesh.nElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
#self.ebq_global['sqrt(det(g))'] = np.zeros((self.mesh.nElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
#self.ebq_global[('n')] = np.zeros((self.mesh.nElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')
#need to calculate
#shape
#self.q[('v',0)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')
#self.q[('grad(v)',0)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')
#self.q[('w*dV_r',0)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1]),'d')
#self.q[('grad(w)*dV_f',0)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1],self.nSpace_global),'d')
#self.ebqe[('v',0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')
#self.ebqe[('grad(v)',0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')
#self.ebqe[('w*dS_f',0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')
#if self.nDOF_trial_element[1] != self.nDOF_trial_element[0]:
# self.q[('v',1)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1]),'d')
# self.q[('grad(v)',1)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1],self.nSpace_global),'d')
# self.q[('w*dV_r',1)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1]),'d')
# self.q[('grad(w)*dV_f',1)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1],self.nSpace_global),'d')
# self.ebqe[('v',1)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[1]),'d')
# self.ebqe[('grad(v)',1)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[1],self.nSpace_global),'d')
#self.ebqe[('w*dS_f',1)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[1]),'d')
#else:
# self.q[('v',1)] = self.q[('v',0)]
# self.q[('grad(v)',1)] = self.q[('grad(v)',0)]
# self.q[('w*dV_r',1)] = self.q[('w*dV_r',0)]
# self.q[('grad(w)*dV_f',1)] = self.q[('grad(w)*dV_f',0)]
# self.ebqe[('v',1)] = self.ebqe[('v',0)]
# self.ebqe[('grad(v)',1)] = self.ebqe[('grad(v)',0)]
# self.ebqe[('w*dS_f',1)] = self.ebqe[('w*dS_f',0)]
#if self.Hess:
# self.q[('Hess(w)',1)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1],self.nSpace_global,self.nSpace_global),'d')
# self.q[('Hess(w)*dV_a',1,1)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[1],self.nSpace_global,self.nSpace_global),'d')
#else:
# #dummy
# self.q[('Hess(w)',1)] = np.zeros((1,),'d')
# self.q[('Hess(w)*dV_a',1,1)] = self.q[('Hess(w)',1)]
#self.q[('Hess(v)',1)] = self.q[('Hess(w)',1)]
#self.q[('Hess(v)',2)] = self.q[('Hess(w)',1)]
#self.q[('Hess(v)',3)] = self.q[('Hess(w)',1)]
#self.q[('Hess(w)',2)] = self.q[('Hess(w)',1)]
#self.q[('Hess(w)',3)] = self.q[('Hess(w)',1)]
#self.q[('Hess(w)*dV_a',2,2)] = self.q[('Hess(w)*dV_a',1,1)]
#self.q[('Hess(w)*dV_a',3,3)] = self.q[('Hess(w)*dV_a',1,1)]
#self.q[('v',2)] = self.q[('v',1)]
#self.q[('grad(v)',2)] = self.q[('grad(v)',1)]
#self.q[('w*dV_r',2)] = self.q[('w*dV_r',1)]
#self.q[('grad(w)*dV_f',2)] = self.q[('grad(w)*dV_f',1)]
#self.ebqe[('v',2)] = self.ebqe[('v',1)]
#self.ebqe[('grad(v)',2)] = self.ebqe[('grad(v)',1)]
#self.ebqe[('w*dS_f',2)] = self.ebqe[('w*dS_f',1)]
#self.q[('v',3)] = self.q[('v',1)]
#self.q[('grad(v)',3)] = self.q[('grad(v)',1)]
#self.q[('w*dV_r',3)] = self.q[('w*dV_r',1)]
#self.q[('grad(w)*dV_f',3)] = self.q[('grad(w)*dV_f',1)]
#self.ebqe[('v',3)] = self.ebqe[('v',1)]
#self.ebqe[('grad(v)',3)] = self.ebqe[('grad(v)',1)]
#self.ebqe[('w*dS_f',3)] = self.ebqe[('w*dS_f',1)]
#for ci in range(self.nc):
# self.q[('w*dV_m',ci)] = self.q[('w*dV_r',ci)]
# self.q[('w',ci)] = self.q[('v',ci)]
# self.q[('grad(w)',ci)] = self.q[('grad(v)',ci)]
# self.ebqe[('w',ci)] = self.ebqe[('v',ci)]
# self.ebqe[('grad(w)',ci)] = self.ebqe[('grad(v)',ci)]
#self.q[('u',0)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q['bodyForce'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')
self.q['strain'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,6),'d')
self.q[('u',0)] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.ebqe[('u',0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe['penalty'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe[('strain')] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,6),'d')
self.ebqe[('u',1)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe[('u',2)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe[('stressFlux_bc_flag',0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'i')
self.ebqe[('stressFlux_bc_flag',1)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'i')
self.ebqe[('stressFlux_bc_flag',2)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'i')
self.ebqe[('stressFlux_bc',0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe[('stressFlux_bc',1)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe[('stressFlux_bc',2)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.points_elementBoundaryQuadrature= set()
self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])
self.vectors_elementBoundaryQuadrature= set()
self.tensors_elementBoundaryQuadrature= set()
#
#show quadrature
#
logEvent("Dumping quadrature shapes for model %s" % self.name,level=9)
logEvent("Element quadrature array (q)", level=9)
for (k,v) in list(self.q.items()): logEvent(str((k,v.shape)),level=9)
logEvent("Element boundary quadrature (ebq)",level=9)
for (k,v) in list(self.ebq.items()): logEvent(str((k,v.shape)),level=9)
logEvent("Global element boundary quadrature (ebq_global)",level=9)
for (k,v) in list(self.ebq_global.items()): logEvent(str((k,v.shape)),level=9)
logEvent("Exterior element boundary quadrature (ebqe)",level=9)
for (k,v) in list(self.ebqe.items()): logEvent(str((k,v.shape)),level=9)
logEvent("Interpolation points for nonlinear diffusion potential (phi_ip)",level=9)
for (k,v) in list(self.phi_ip.items()): logEvent(str((k,v.shape)),level=9)
#
# allocate residual and Jacobian storage
#
self.elementResidual = [np.zeros(
(self.mesh.nElements_global,
self.nDOF_test_element[ci]),
'd') for ci in range(self.nc)]
self.elementSpatialResidual = [np.zeros(
(self.mesh.nElements_global,
self.nDOF_test_element[ci]),
'd') for ci in range(self.nc)]
self.inflowBoundaryBC = {}
self.inflowBoundaryBC_values = {}
self.inflowFlux = {}
for cj in range(self.nc):
self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')
self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')
self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.internalNodes = set(range(self.mesh.nNodes_global))
#identify the internal nodes this is ought to be in mesh
##\todo move this to mesh
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]
ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]
for i in range(self.mesh.nNodes_element):
if i != ebN_element:
I = self.mesh.elementNodesArray[eN_global,i]
self.internalNodes -= set([I])
self.nNodes_internal = len(self.internalNodes)
self.internalNodesArray=np.zeros((self.nNodes_internal,),'i')
for nI,n in enumerate(self.internalNodes):
self.internalNodesArray[nI]=n
#
del self.internalNodes
self.internalNodes = None
logEvent("Updating local to global mappings",2)
self.updateLocal2Global()
logEvent("Building time integration object",2)
logEvent(memory("inflowBC, internalNodes,updateLocal2Global","OneLevelTransport"),level=4)
#mwf for interpolating subgrid error for gradients etc
if self.stabilization and self.stabilization.usesGradientStabilization:
self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)
else:
self.timeIntegration = TimeIntegrationClass(self)
if options != None:
self.timeIntegration.setFromOptions(options)
logEvent(memory("TimeIntegration","OneLevelTransport"),level=4)
logEvent("Calculating numerical quadrature formulas",2)
self.calculateQuadrature()
#lay out components/equations contiguously for now
self.offset = [0]
for ci in range(1,self.nc):
self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]
self.stride = [1 for ci in range(self.nc)]
#use contiguous layout of components for parallel, requires weak DBC's
comm = Comm.get()
self.comm=comm
if comm.size() > 1:
assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,"You must use a numerical flux to apply weak boundary conditions for parallel runs"
self.offset = [0]
for ci in range(1,self.nc):
self.offset += [ci]
self.stride = [self.nc for ci in range(self.nc)]
#
logEvent(memory("stride+offset","OneLevelTransport"),level=4)
if numericalFluxType != None:
if options is None or options.periodicDirichletConditions is None:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict)
else:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict,
options.periodicDirichletConditions)
else:
self.numericalFlux = None
#set penalty terms
#cek todo move into numerical flux initialization
if 'penalty' in self.ebq_global:
for ebN in range(self.mesh.nElementBoundaries_global):
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebq_global['penalty'][ebN,k] = old_div(self.numericalFlux.penalty_constant,(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))
#penalty term
#cek move to Numerical flux initialization
if 'penalty' in self.ebqe:
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebqe['penalty'][ebNE,k] = old_div(self.numericalFlux.penalty_constant,self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)
logEvent(memory("numericalFlux","OneLevelTransport"),level=4)
self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray
#use post processing tools to get conservative fluxes, None by default
#helper for writing out data storage
self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
for ci,sbcObject in list(self.stressFluxBoundaryConditionsObjectsDict.items()):
self.ebqe[('stressFlux_bc_flag',ci)] = np.zeros(self.ebqe[('stressFlux_bc',ci)].shape,'i')
for t,g in list(sbcObject.stressFluxBoundaryConditionsDict.items()):
self.ebqe[('stressFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)
self.ebqe[('stressFlux_bc_flag',ci)][t[0],t[1]] = 1
self.numericalFlux.setDirichletValues(self.ebqe)
self.forceStrongConditions=False
self.dirichletConditionsForceDOF = {}
if self.forceStrongConditions:
for cj in range(self.nc):
self.dirichletConditionsForceDOF[cj] = DOFBoundaryConditions(self.u[cj].femSpace,dofBoundaryConditionsSetterDict[cj],weakDirichletConditions=False)
compKernelFlag=0
self.elastoPlastic = cElastoPlastic_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag)
def getResidual(self,u,r):
"""
Calculate the element residuals and add in to the global residual
"""
#Load the unknowns into the finite element dof
self.timeIntegration.calculateCoefs()
self.timeIntegration.calculateU(u)
self.setUnknowns(self.timeIntegration.u)
#hack
if self.bcsTimeDependent or not self.bcsSet:
self.bcsSet=True
#Dirichlet boundary conditions
self.numericalFlux.setDirichletValues(self.ebqe)
#Flux boundary conditions
for ci,fbcObject in list(self.stressFluxBoundaryConditionsObjectsDict.items()):
for t,g in list(fbcObject.stressFluxBoundaryConditionsDict.items()):
self.ebqe[('stressFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)
self.ebqe[('stressFlux_bc_flag',ci)][t[0],t[1]] = 1
r.fill(0.0)
self.elementResidual[0].fill(0.0)
self.elementResidual[1].fill(0.0)
self.elementResidual[2].fill(0.0)
#import pdb
#print self.mesh.elementMaterialTypes,
#print self.coefficients.nMaterialProperties,
#print self.coefficients.materialProperties,
if self.forceStrongConditions:
for cj in range(len(self.dirichletConditionsForceDOF)):
for dofN,g in list(self.dirichletConditionsForceDOF[cj].DOFBoundaryConditionsDict.items()):
self.u[cj].dof[dofN] = g(self.dirichletConditionsForceDOF[cj].DOFBoundaryPointDict[dofN],self.timeIntegration.t)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u',0)]
argsDict["disp_trial_ref"] = self.u[0].femSpace.psi
argsDict["disp_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["disp_test_ref"] = self.u[0].femSpace.psi
argsDict["disp_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u',0)]
argsDict["disp_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["disp_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["disp_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["disp_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["ebqe_penalty"] = self.ebqe['penalty']
argsDict["gravityStep"] = int(self.coefficients.gravityStep)
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["materialTypes"] = self.mesh.elementMaterialTypes
argsDict["nMaterialProperties"] = self.coefficients.nMaterialProperties
argsDict["materialProperties"] = self.coefficients.materialProperties
argsDict["pore_fluid_unit_weight"] = self.coefficients.pore_fluid_unit_weight
argsDict["pore_pressure_head_dof"] = self.coefficients.pore_pressure_head
argsDict["q_strain"] = self.q['strain']
argsDict["q_strain0"] = self.q['strain0']
argsDict["q_strain_last"] = self.q['strain_last']
argsDict["q_plasticStrain"] = self.q['plasticStrain']
argsDict["q_plasticStrain_last"] = self.q['plasticStrain_last']
argsDict["ebqe_strain"] = self.ebqe['strain']
argsDict["ebqe_strain0"] = self.ebqe['strain0']
argsDict["ebqe_strain_last"] = self.ebqe['strain_last']
argsDict["ebqe_plasticStrain"] = self.ebqe['plasticStrain']
argsDict["ebqe_plasticStrain_last"] = self.ebqe['plasticStrain_last']
argsDict["disp_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["u_dof"] = self.u[0].dof
argsDict["v_dof"] = self.u[1].dof
argsDict["w_dof"] = self.u[2].dof
argsDict["bodyForce"] = self.coefficients.bodyForce
argsDict["offset_u"] = self.offset[0]
argsDict["offset_v"] = self.offset[1]
argsDict["offset_w"] = self.offset[2]
argsDict["stride_u"] = self.stride[0]
argsDict["stride_v"] = self.stride[1]
argsDict["stride_w"] = self.stride[2]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["isDOFBoundary_v"] = self.numericalFlux.isDOFBoundary[1]
argsDict["isDOFBoundary_w"] = self.numericalFlux.isDOFBoundary[2]
argsDict["isStressFluxBoundary_u"] = self.ebqe[('stressFlux_bc_flag',0)]
argsDict["isStressFluxBoundary_v"] = self.ebqe[('stressFlux_bc_flag',1)]
argsDict["isStressFluxBoundary_w"] = self.ebqe[('stressFlux_bc_flag',2)]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u',0)]
argsDict["ebqe_bc_v_ext"] = self.numericalFlux.ebqe[('u',1)]
argsDict["ebqe_bc_w_ext"] = self.numericalFlux.ebqe[('u',2)]
argsDict["ebqe_bc_stressFlux_u_ext"] = self.ebqe[('stressFlux_bc',0)]
argsDict["ebqe_bc_stressFlux_v_ext"] = self.ebqe[('stressFlux_bc',1)]
argsDict["ebqe_bc_stressFlux_w_ext"] = self.ebqe[('stressFlux_bc',2)]
self.elastoPlastic.calculateResidual(argsDict)
logEvent("Global residual",level=9,data=r)
if self.forceStrongConditions:#
for cj in range(len(self.dirichletConditionsForceDOF)):#
for dofN,g in list(self.dirichletConditionsForceDOF[cj].DOFBoundaryConditionsDict.items()):
r[self.offset[cj]+self.stride[cj]*dofN] = 0
self.nonlinear_function_evaluations += 1
def getJacobian(self,jacobian,usePicard=False):
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,
jacobian)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["usePicard"] = usePicard
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u',0)]
argsDict["disp_trial_ref"] = self.u[0].femSpace.psi
argsDict["disp_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["disp_test_ref"] = self.u[0].femSpace.psi
argsDict["disp_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u',0)]
argsDict["disp_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["disp_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["disp_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["disp_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["ebqe_penalty"] = self.ebqe['penalty']
argsDict["gravityStep"] = int(self.coefficients.gravityStep)
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["materialTypes"] = self.mesh.elementMaterialTypes
argsDict["nMaterialProperties"] = self.coefficients.nMaterialProperties
argsDict["materialProperties"] = self.coefficients.materialProperties
argsDict["pore_fluid_unit_weight"] = self.coefficients.pore_fluid_unit_weight
argsDict["pore_pressure_head_dof"] = self.coefficients.pore_pressure_head
argsDict["q_strain"] = self.q['strain']
argsDict["q_strain0"] = self.q['strain0']
argsDict["q_strain_last"] = self.q['strain_last']
argsDict["q_plasticStrain"] = self.q['plasticStrain']
argsDict["q_plasticStrain_last"] = self.q['plasticStrain_last']
argsDict["ebqe_strain"] = self.ebqe['strain']
argsDict["ebqe_strain0"] = self.ebqe['strain0']
argsDict["ebqe_strain_last"] = self.ebqe['strain_last']
argsDict["ebqe_plasticStrain"] = self.ebqe['plasticStrain']
argsDict["ebqe_plasticStrain_last"] = self.ebqe['plasticStrain_last']
argsDict["disp_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["u_dof"] = self.u[0].dof
argsDict["v_dof"] = self.u[1].dof
argsDict["w_dof"] = self.u[2].dof
argsDict["bodyForce"] = self.coefficients.bodyForce
argsDict["csrRowIndeces_u_u"] = self.csrRowIndeces[(0,0)]
argsDict["csrColumnOffsets_u_u"] = self.csrColumnOffsets[(0,0)]
argsDict["csrRowIndeces_u_v"] = self.csrRowIndeces[(0,1)]
argsDict["csrColumnOffsets_u_v"] = self.csrColumnOffsets[(0,1)]
argsDict["csrRowIndeces_u_w"] = self.csrRowIndeces[(0,2)]
argsDict["csrColumnOffsets_u_w"] = self.csrColumnOffsets[(0,2)]
argsDict["csrRowIndeces_v_u"] = self.csrRowIndeces[(1,0)]
argsDict["csrColumnOffsets_v_u"] = self.csrColumnOffsets[(1,0)]
argsDict["csrRowIndeces_v_v"] = self.csrRowIndeces[(1,1)]
argsDict["csrColumnOffsets_v_v"] = self.csrColumnOffsets[(1,1)]
argsDict["csrRowIndeces_v_w"] = self.csrRowIndeces[(1,2)]
argsDict["csrColumnOffsets_v_w"] = self.csrColumnOffsets[(1,2)]
argsDict["csrRowIndeces_w_u"] = self.csrRowIndeces[(2,0)]
argsDict["csrColumnOffsets_w_u"] = self.csrColumnOffsets[(2,0)]
argsDict["csrRowIndeces_w_v"] = self.csrRowIndeces[(2,1)]
argsDict["csrColumnOffsets_w_v"] = self.csrColumnOffsets[(2,1)]
argsDict["csrRowIndeces_w_w"] = self.csrRowIndeces[(2,2)]
argsDict["csrColumnOffsets_w_w"] = self.csrColumnOffsets[(2,2)]
argsDict["globalJacobian"] = jacobian.getCSRrepresentation()[2]
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["isDOFBoundary_v"] = self.numericalFlux.isDOFBoundary[1]
argsDict["isDOFBoundary_w"] = self.numericalFlux.isDOFBoundary[2]
argsDict["isStressFluxBoundary_u"] = self.ebqe[('stressFlux_bc_flag',0)]
argsDict["isStressFluxBoundary_v"] = self.ebqe[('stressFlux_bc_flag',1)]
argsDict["isStressFluxBoundary_w"] = self.ebqe[('stressFlux_bc_flag',2)]
argsDict["csrColumnOffsets_eb_u_u"] = self.csrColumnOffsets_eb[(0,0)]
argsDict["csrColumnOffsets_eb_u_v"] = self.csrColumnOffsets_eb[(0,1)]
argsDict["csrColumnOffsets_eb_u_w"] = self.csrColumnOffsets_eb[(0,2)]
argsDict["csrColumnOffsets_eb_v_u"] = self.csrColumnOffsets_eb[(1,0)]
argsDict["csrColumnOffsets_eb_v_v"] = self.csrColumnOffsets_eb[(1,1)]
argsDict["csrColumnOffsets_eb_v_w"] = self.csrColumnOffsets_eb[(1,2)]
argsDict["csrColumnOffsets_eb_w_u"] = self.csrColumnOffsets_eb[(2,0)]
argsDict["csrColumnOffsets_eb_w_v"] = self.csrColumnOffsets_eb[(2,1)]
argsDict["csrColumnOffsets_eb_w_w"] = self.csrColumnOffsets_eb[(2,2)]
self.elastoPlastic.calculateJacobian(argsDict)
logEvent("Jacobian ",level=10,data=jacobian)
if self.forceStrongConditions:
scaling = 1.0#probably want to add some scaling to match non-dirichlet diagonals in linear system
for cj in range(self.nc):
for dofN in list(self.dirichletConditionsForceDOF[cj].DOFBoundaryConditionsDict.keys()):
global_dofN = self.offset[cj]+self.stride[cj]*dofN
for i in range(self.rowptr[global_dofN],self.rowptr[global_dofN+1]):
if (self.colind[i] == global_dofN):
self.nzval[i] = scaling
else:
self.nzval[i] = 0.0
#mwf decide if this is reasonable for solver statistics
self.nonlinear_function_jacobian_evaluations += 1
#jacobian.fwrite("jacobian_p"+`self.nonlinear_function_jacobian_evaluations`)
return jacobian
def calculateElementQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points.
This function should be called only when the mesh changes.
"""
self.u[0].femSpace.elementMaps.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)
#
#get physical locations of quadrature points and jacobian information there
#assume all components live on the same mesh
#
# self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,
# self.q['x'])
# if self.movingDomain:
# if self.tLast_mesh != None:
# self.q['xt'][:]=self.q['x']
# self.q['xt']-=self.q['x_last']
# alpha = 1.0/(self.t_mesh - self.tLast_mesh)
# self.q['xt']*=alpha
# else:
# self.q['xt'][:]=0.0
# self.q['x_last'][:]=self.q['x']
# self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,
# self.q['J'],
# self.q['inverse(J)'],
# self.q['det(J)'])
# self.q['abs(det(J))']=np.absolute(self.q['det(J)'])
# #
# # get physical space integration weights
# #
# self.q['dV'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
# cfemIntegrals.calculateIntegrationWeights(self.q['abs(det(J))'],
# self.elementQuadratureWeights[('u',0)],
# self.q['dV'])
# for ci in range(self.nc): self.q[('dV_u',ci)] = self.q['dV']
# #
# #get shape information at the quadrature points
# #
# self.testSpace[0].getBasisValues(self.elementQuadraturePoints,
# self.q[('w',0)])
# cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('r',1)],
# self.q['abs(det(J))'],
# self.q[('w',1)],
# self.q[('w*dV_r',1)])
# self.testSpace[1].getBasisGradientValues(self.elementQuadraturePoints,
# self.q['inverse(J)'],
# self.q[('grad(w)',1)])
# cfemIntegrals.calculateWeightedShapeGradients(self.elementQuadratureWeights[('f',0)],
# self.q['abs(det(J))'],
# self.q[('grad(w)',0)],
# self.q[('grad(w)*dV_f',0)])
# if self.Hess:
# self.testSpace[1].getBasisHessianValues(self.elementQuadraturePoints,
# self.q['inverse(J)'],
# self.q[('Hess(w)',1)])
# cfemIntegrals.calculateWeightedShapeGradients(self.elementQuadratureWeights[('a',1,1)],
# self.q['abs(det(J))'],
# self.q[('Hess(w)',1)],
# self.q[('Hess(w)*dV_a',1,1)])
self.coefficients.initializeElementQuadrature(self.timeIntegration.t,self.q)
def calculateElementBoundaryQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points on element boundaries.
This function should be called only when the mesh changes.
"""
#cek todo, a local calculation of the element boundary quadrature permuations
#
#get physical locations of element boundary quadrature points
#
#assume all components live on the same mesh
# self.u[0].femSpace.elementMaps.getValuesTrace(self.elementBoundaryQuadraturePoints,
# self.ebq['x'])
#
#get metric tensor and unit normals
#
# if self.movingDomain:
# if self.tLast_mesh != None:
# self.ebq['xt'][:]=self.ebq['x']
# self.ebq['xt']-=self.ebq['x_last']
# alpha = 1.0/(self.t_mesh - self.tLast_mesh)
# self.ebq['xt']*=alpha
# else:
# self.ebq['xt'][:]=0.0
# self.ebq['x_last'][:]=self.ebq['x']
# self.u[0].femSpace.elementMaps.getJacobianValuesTrace_movingDomain(self.elementBoundaryQuadraturePoints,
# self.ebq['xt'],
# self.ebq['inverse(J)'],
# self.ebq['g'],
# self.ebq['sqrt(det(g))'],
# self.ebq['n'])
# else:
# self.u[0].femSpace.elementMaps.getJacobianValuesTrace(self.elementBoundaryQuadraturePoints,
# self.ebq['inverse(J)'],
# self.ebq['g'],
# self.ebq['sqrt(det(g))'],
# self.ebq['n'])
# cfemIntegrals.copyLeftElementBoundaryInfo(self.mesh.elementBoundaryElementsArray,
# self.mesh.elementBoundaryLocalElementBoundariesArray,
# self.mesh.exteriorElementBoundariesArray,
# self.mesh.interiorElementBoundariesArray,
# self.ebq['x'],
# self.ebq['n'],
# self.ebq_global['x'],
# self.ebq_global['n'])
# if self.movingDomain:
# cfemIntegrals.copyLeftElementBoundaryInfo_movingDomain(self.mesh.elementBoundaryElementsArray,
# self.mesh.elementBoundaryLocalElementBoundariesArray,
# self.mesh.exteriorElementBoundariesArray,
# self.mesh.interiorElementBoundariesArray,
# self.ebq['xt'])
# #now map the physical points back to the reference element
# #assume all components live on same mesh
# self.u[0].femSpace.elementMaps.getInverseValuesTrace(self.ebq['inverse(J)'],self.ebq['x'],self.ebq['hat(x)'])
# self.u[0].femSpace.elementMaps.getPermutations(self.ebq['hat(x)'])
#
#since the points on the reference boundary may be reordered on many right element boundaries, we
#have to use an array of reference boundary points on all element boundaries
#first copy the left reference element boundary quadrature points from the reference element boundary
#
#get the shape information at the reference element boundary quadrature points
#
# self.testSpace[0].getBasisValuesTrace(self.u[0].femSpace.elementMaps.permutations,
# self.ebq['hat(x)'],
# self.ebq[('w',0)])
# cfemIntegrals.calculateWeightedShapeTrace(self.elementBoundaryQuadratureWeights[('u',0)],
# self.ebq['sqrt(det(g))'],
# self.ebq[('w',0)],
# self.ebq[('w*dS_u',0)])
# self.u[0].femSpace.getBasisGradientValuesTrace(self.u[0].femSpace.elementMaps.permutations,
# self.ebq['hat(x)'],
# self.ebq['inverse(J)'],
# self.ebq[('grad(v)',0)])
# cfemIntegrals.calculateElementBoundaryIntegrationWeights(self.ebq['sqrt(det(g))'],
# self.elementBoundaryQuadratureWeights[('u',0)],
# self.ebq[('dS_u',0)])
def calculateExteriorElementBoundaryQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points on global element boundaries.
This function should be called only when the mesh changes.
"""
#
#get physical locations of element boundary quadrature points
#
#assume all components live on the same mesh
self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,
self.ebqe['x'])
# #
# #get metric tensor and unit normals
# #
# if self.movingDomain:
# if self.tLast_mesh != None:
# self.ebqe['xt'][:]=self.ebqe['x']
# self.ebqe['xt']-=self.ebqe['x_last']
# alpha = 1.0/(self.t_mesh - self.tLast_mesh)
# self.ebqe['xt']*=alpha
# else:
# self.ebqe['xt'][:]=0.0
# self.ebqe['x_last'][:]=self.ebqe['x']
# self.u[0].femSpace.elementMaps.getJacobianValuesGlobalExteriorTrace_movingDomain(self.elementBoundaryQuadraturePoints,
# self.ebqe['xt'],
# self.ebqe['inverse(J)'],
# self.ebqe['g'],
# self.ebqe['sqrt(det(g))'],
# self.ebqe['n'])
# else:
# self.u[0].femSpace.elementMaps.getJacobianValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,
# self.ebqe['inverse(J)'],
# self.ebqe['g'],
# self.ebqe['sqrt(det(g))'],
# self.ebqe['n'])
# #now map the physical points back to the reference element
# #assume all components live on same mesh
# self.u[0].femSpace.elementMaps.getInverseValuesGlobalExteriorTrace(self.ebqe['inverse(J)'],self.ebqe['x'],self.ebqe['hat(x)'])
# #
# #since the points on the reference boundary may be reordered on many right element boundaries, we
# #have to use an array of reference boundary points on all element boundaries
# #first copy the left reference element boundary quadrature points from the reference element boundary
# self.testSpace[0].getBasisValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,
# self.ebqe[('w',0)])
# cfemIntegrals.calculateWeightedShapeGlobalExteriorTrace(self.mesh.exteriorElementBoundariesArray,
# self.mesh.elementBoundaryElementsArray,
# self.mesh.elementBoundaryLocalElementBoundariesArray,
# self.elementBoundaryQuadratureWeights[('f',0)],
# self.ebqe['sqrt(det(g))'],
# self.ebqe[('w',0)],
# self.ebqe[('w*dS_f',0)])
# self.u[0].femSpace.getBasisGradientValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,
# self.ebqe['inverse(J)'],
# self.ebqe[('grad(v)',0)])
# #setup flux boundary conditions
self.stressFluxBoundaryConditionsObjectsDict = dict([(cj,FluxBoundaryConditions(self.mesh,
self.nElementBoundaryQuadraturePoints_elementBoundary,
self.ebqe[('x')],
getStressFluxBoundaryConditions=self.stressFluxBoundaryConditionsSetterDict[cj]))
for cj in list(self.stressFluxBoundaryConditionsSetterDict.keys())])
# self.ebqe['dS'] = np.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
# cfemIntegrals.calculateIntegrationWeights(self.ebqe['sqrt(det(g))'],
# self.elementBoundaryQuadratureWeights[('u',0)],
# self.ebqe['dS'])
# for ci in range(self.nc): self.ebqe[('dS_u',ci)] = self.ebqe['dS']
self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(self.timeIntegration.t,self.ebqe)
def estimate_mt(self):
pass
def calculateSolutionAtQuadrature(self):
pass
def calculateAuxiliaryQuantitiesAfterStep(self):
OneLevelTransport.calculateAuxiliaryQuantitiesAfterStep(self)
| mit | 58604b8d9b96b62ec9630107cf52ede9 | 63.544434 | 199 | 0.587321 | 3.680875 | false | false | false | false |
erdc/proteus | proteus/tests/levelset/rotation/rotation2D.py | 1 | 3647 | from __future__ import absolute_import
from __future__ import division
from past.utils import old_div
from proteus import Domain
import os
#if True uses PETSc solvers
parallel = False
linearSmoother = None
#compute mass balance statistics or not
checkMass=False#True
#number of space dimensions
nd=2
#time integration, not relevant if using BDF with cfl timestepping
rtol_u = {0:1.0e-4}
atol_u = {0:1.0e-4}
rtol_res = {0:1.0e-4}
atol_res = {0:1.0e-4}
#
timeIntegration_vof = "vbdf"#vbdf,be,flcbdf,rk
timeIntegration_ls = "vbdf"#vbdf,be,flcbdf,rk
timeOrder = 2
runCFL = 0.3#0.3,0.185,0.125 for dgp1,dgp2,dgpk(3)
#
#spatial approximation orders
cDegree_ls=0 #0 -- CG. -1 -- DG
cDegree_vof=0
pDegree_ls=1 #level set
pDegree_vof=pDegree_ls #volume of fluid should match ls for now
useHex=False#True
useMetrics=1.0
#
#spatial quadrature orders
#2*max(pDegree_vof,pDegree_ls)+1
if pDegree_ls == 2:
rotation_quad_order = 5
else:
rotation_quad_order = 3
#parallel partitioning info
from proteus import MeshTools
partitioningType = MeshTools.MeshParallelPartitioningTypes.node
#spatial mesh
lRefinement=0#1
#tag simulation name to level of refinement
#soname="rotationcgp2_bdf2_mc"+`lRefinement`
nn=nnx=nny=8#(2**lRefinement)*5+1
nnz=1
he=old_div(1.0,(nnx-1.0))
L=[1.0,1.0]
unstructured=True#True for tetgen, false for tet or hex from rectangular grid
box=Domain.RectangularDomain(L=(2.0,2.0),
x=(-1.0,-1.0),
name="box");
genMesh=False
#box.writePoly("box")
if unstructured:
try:
from .rotationDomain import *
except:
from rotationDomain import *
domain=Domain.PlanarStraightLineGraphDomain(fileprefix="box")
domain.boundaryTags = box.boundaryTags
bt = domain.boundaryTags
domain.MeshOptions.triangleOptions="pAq30Dena%8.8f" % (0.5*he**2,)
domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"box"
else:
domain = box
domain.MeshOptions.nn = nn
domain.MeshOptions.nnx = nnx
domain.MeshOptions.nny = nny
domain.MeshOptions.nnz = nnz
domain.MeshOptions.triangleFlag=0
domain.MeshOptions.genMesh=genMesh
#end time of simulation, full problem is T=8.0
T = 1.0#8.0#
#number of output time steps
nDTout = 10
#mass correction
applyCorrection=True
applyRedistancing=True
redist_Newton=True
onlyVOF=False#True
#smoothing factors
#eps
epsFactHeaviside=epsFactDirac=epsFact_vof=1.5
epsFactRedistance=0.33
epsFactDiffusion=10.0
#
if useMetrics:
shockCapturingFactor_vof=0.5
shockCapturingFactor_ls=0.5
shockCapturingFactor_rd=0.5
lag_shockCapturing_vof=True
lag_shockCapturing_ls=True
lag_shockCapturing_rd=False
else:
shockCapturingFactor_vof=0.2
shockCapturingFactor_ls=0.2
shockCapturingFactor_rd=0.9
lag_shockCapturing_vof=True
lag_shockCapturing_ls=True
lag_shockCapturing_rd=False
#use absolute tolerances on al models
atolRedistance = max(1.0e-12,0.1*he)
atolConservation = max(1.0e-12,0.001*he**2)
atolVolumeOfFluid= max(1.0e-12,0.001*he**2)
atolLevelSet = max(1.0e-12,0.001*he**2)
#controls
linearSolverConvergenceTest = 'r-true' #rits is do a set number of iterations, r-true uses true residual, PETSc default is preconditioned residual
#redist solver
fmmFlag=0
#
#correctionType = 'dg'
#correctionType = 'dgp0'
#correctionType = 'global'
correctionType = 'cg'
#correctionType = 'none'
if useHex:
hex=True
soname="rotation_c0q"+repr(pDegree_ls)+correctionType+"_"+timeIntegration_vof+"_"+repr(timeOrder)+"_level_"+repr(lRefinement)
else:
soname="rotation_c0p"+repr(pDegree_ls)+correctionType+"_"+timeIntegration_vof+"_"+repr(timeOrder)+"_level_"+repr(lRefinement)
| mit | b528b31e5f26d4e4f3c9e54702434d01 | 27.944444 | 146 | 0.733205 | 2.721642 | false | false | false | false |
erdc/proteus | proteus/tests/levelset/vortex2D/ls_consrv_vortex_2d_p.py | 1 | 1478 | from __future__ import absolute_import
from builtins import object
from proteus import *
from proteus.default_p import *
try:
from .vortex2D import *
except:
from vortex2D import *
name=soname+"_phicor"
from proteus.mprans import MCorr
LevelModelType = MCorr.LevelModel
coefficients = MCorr.Coefficients(applyCorrection=applyCorrection,
epsFactHeaviside=epsFactHeaviside,
epsFactDirac=epsFactDirac,
epsFactDiffusion=epsFactDiffusion,
LSModel_index=0,
V_model=0,
me_model=3,
VOFModel_index=2,
checkMass=checkMass,
nd=nd,
useMetrics=useMetrics,
useExact=useExact)
class zero_phi(object):
def __init__(self):
pass
def uOfX(self,X):
return 0.0
def uOfXT(self,X,t):
return self.uOfX(X)
analyticalSolutions = None
def getDBC_cnsrv(x,flag):
pass
dirichletConditions = {0:getDBC_cnsrv}
initialConditions = {0:zero_phi()}
fluxBoundaryConditions = {0:'noFlow'}
def getAFBC_cnsrv(x):
return lambda x,t: 0.0
def getDFBC_cnsrv(x):
return lambda x,t: 0.0
advectiveFluxBoundaryConditions = {}
diffusiveFluxBoundaryConditions = {0:{0:getDFBC_cnsrv}}
| mit | 469f726cfa9cdce84711dbccc69f8c84 | 26.37037 | 68 | 0.550744 | 3.713568 | false | false | false | false |
erdc/proteus | proteus/tests/levelset/vortex2D/tank2dDomain.py | 2 | 1565 | import math
from proteus import Domain
def tank2d(L=[1.0,1.0,1.0],fileprefix=None):
boundaries=['left','right','bottom','top','front','back','obstacle']
boundaryTags=dict([(key,i+1) for (i,key) in enumerate(boundaries)])
vertices=[[0.0,0.0],#0
[L[0],0.0],#1
[L[0],L[1]],#2
[0.0,L[1]]]#3
vertexFlags=[boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left']]
segments=[[0,1],
[1,2],
[2,3],
[3,0]]
segmentFlags=[boundaryTags['front'],
boundaryTags['right'],
boundaryTags['back'],
boundaryTags['left']]
regions=[[0.5*L[0],0.5*L[1]]]
regionFlags=[1.0]
domain = Domain.PlanarStraightLineGraphDomain(fileprefix=fileprefix,
vertices=vertices,
vertexFlags=vertexFlags,
segments=segments,
segmentFlags=segmentFlags,
regions=regions,
regionFlags=regionFlags)
#go ahead and add a boundary tags member
domain.boundaryTags = boundaryTags
return domain
if __name__=='__main__':
import os
domain = tank2d()
domain.writePoly("tank2d")
domain.writePLY("tank2d")
os.system("asy -V tank2d")
| mit | 1fe742716738057d5330dd77ccf3d395 | 36.261905 | 76 | 0.461342 | 4.264305 | false | false | false | false |
erdc/proteus | proteus/tests/ci/poisson_3d_tetgen_p.py | 1 | 13327 | from __future__ import division
from builtins import range
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
"""
Heterogeneous Poisson's equation, -div(a(x)u) = f(x), on unit domain [0,1]x[0,1]x[0,1]
"""
##\page Tests Test Problems
# \ref poisson_3d_p.py "Heterogeneous Poisson's equation, -div(a(x)u) = f(x), on unit domain [0,1]x[0,1]x[0,1]"
#
##\ingroup test
#\file poisson_3d_p.py
#
#\brief Heterogenous Poisson's equations in 3D unit domain [0,1]x[0,1]x[0,1]
#----------------------------------------------------
# Domain - mesh - quadrature
#----------------------------------------------------
#space dimension
nd = 3
hull_length = 0.5
hull_beam = 0.5
hull_draft = 0.5
L=(2.0*hull_length,
2.0*hull_beam,
2.0*hull_draft)
x_ll = (0.0,
0.0,
0.0)
hull_center = (0.5*hull_length,
0.5*hull_beam,
0.5*hull_draft)
nLevels = 1
he = old_div(L[0],10.0)
#he = hull_draft/1.0
#he = hull_draft/6.0
genMesh=True#False
vessel = None
#vessel = 'cube'
#vessel = 'wigley'
boundaryTags = { 'bottom': 1, 'front':2, 'right':3, 'back': 4, 'left':5, 'top':6, 'obstacle':7}
if vessel is 'wigley-gmsh':
domain = Domain.MeshTetgenDomain(fileprefix="mesh")
domain.boundaryTags = boundaryTags
else:
vertices=[[x_ll[0],x_ll[1],x_ll[2]],#0
[x_ll[0]+L[0],x_ll[1],x_ll[2]],#1
[x_ll[0]+L[0],x_ll[1]+L[1],x_ll[2]],#2
[x_ll[0],x_ll[1]+L[1],x_ll[2]],#3
[x_ll[0],x_ll[1],x_ll[2]+L[2]],#4
[x_ll[0]+L[0],x_ll[1],x_ll[2]+L[2]],#5
[x_ll[0]+L[0],x_ll[1]+L[1],x_ll[2]+L[2]],#6
[x_ll[0],x_ll[1]+L[1],x_ll[2]+L[2]]]#7
vertexFlags=[boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left'],
boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left']]
facets=[[[0,1,2,3]],
[[0,1,5,4]],
[[1,2,6,5]],
[[2,3,7,6]],
[[3,0,4,7]],
[[4,5,6,7]]]
facetFlags=[boundaryTags['bottom'],
boundaryTags['front'],
boundaryTags['right'],
boundaryTags['back'],
boundaryTags['left'],
boundaryTags['top']]
regions=[[x_ll[0]+0.5*L[0],x_ll[1]+0.5*L[1],x_ll[2]+0.5*L[2]]]
regionFlags=[1.0]
holes=[]
if vessel is 'wigley':
from math import log
he_hull = old_div(log(64.0*he+1.0),64.0)
#print he,he_hull
#he_hull = he
n_points_length = int(ceil(old_div(hull_length,he_hull)))+1
n_points_draft = 2*int(ceil(old_div(hull_draft,he_hull)))+1
#print "points",n_points_length,n_points_draft
dx = old_div(hull_length,float(n_points_length-1))
dz = 2.0*hull_draft/float(n_points_draft-1)
#print "he",he,dx,dz
#grid on right half of hull
for i in range(n_points_length):
for j in range(n_points_draft):
x = i*dx - 0.5*hull_length
z = j*dz - hull_draft
zStar = min(0.0,z)
y = 0.5*hull_beam*(1.0 - 4.0*(old_div(x,hull_length))**2) * (1.0 - (old_div(zStar,hull_draft))**2)
vertices.append([x+hull_center[0],
y+hull_center[1],
z+hull_center[2]])
vertexFlags.append(boundaryTags['obstacle'])
def vN_right(i,j):
return 8 + i*n_points_draft+j
for i in range(n_points_length-1):
for j in range(n_points_draft-1):
if i < old_div(n_points_length,2):
facets.append([[vN_right(i,j),vN_right(i+1,j+1),vN_right(i+1,j)]])
facetFlags.append(boundaryTags['obstacle'])
facets.append([[vN_right(i,j),vN_right(i,j+1),vN_right(i+1,j+1)]])
facetFlags.append(boundaryTags['obstacle'])
else:
facets.append([[vN_right(i,j),vN_right(i,j+1),vN_right(i+1,j)]])
facetFlags.append(boundaryTags['obstacle'])
facets.append([[vN_right(i,j+1),vN_right(i+1,j+1),vN_right(i+1,j)]])
facetFlags.append(boundaryTags['obstacle'])
#grid on left half of hull
for i in range(1,n_points_length-1):
for j in range(1,n_points_draft):
x = i*dx - 0.5*hull_length
z = j*dz - hull_draft
zStar = min(0.0,z)
y = 0.5*hull_beam*(1.0 - 4.0*(old_div(x,hull_length))**2) * (1.0 - (old_div(zStar,hull_draft))**2)
vertices.append([x+hull_center[0],
hull_center[1] - y,
z+hull_center[2]])
vertexFlags.append(boundaryTags['obstacle'])
def vN_left(i,j):
if i== 0 or j==0:
return vN_right(i,j)
if i == (n_points_length-1):# or j==(n_points_draft-1):
return vN_right(i,j)
else:
return 8 + n_points_length*n_points_draft+(i-1)*(n_points_draft-1)+j-1
for i in range(n_points_length-1):
for j in range(n_points_draft-1):
if i < old_div(n_points_length,2):
facets.append([[vN_left(i,j),vN_left(i+1,j+1),vN_left(i+1,j)]])
facetFlags.append(boundaryTags['obstacle'])
facets.append([[vN_left(i,j),vN_left(i,j+1),vN_left(i+1,j+1)]])
facetFlags.append(boundaryTags['obstacle'])
else:
facets.append([[vN_left(i,j),vN_left(i,j+1),vN_left(i+1,j)]])
facetFlags.append(boundaryTags['obstacle'])
facets.append([[vN_left(i,j+1),vN_left(i+1,j+1),vN_left(i+1,j)]])
facetFlags.append(boundaryTags['obstacle'])
topFacet=[]
for i in range(n_points_length):
topFacet.append(vN_right(i,n_points_draft-1))
for i in range(n_points_length-2,0,-1):
topFacet.append(vN_left(i,n_points_draft-1))
facets.append([topFacet])
facetFlags.append(boundaryTags['obstacle'])
#for v in vertices: print v
#for f in facets: print f
holes.append(hull_center)
if vessel is 'cube':
nStart = len(vertices)
vertices.append([hull_center[0] - 0.5*hull_length,
hull_center[1] - 0.5*hull_beam,
hull_center[2] - 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] - 0.5*hull_length,
hull_center[1] + 0.5*hull_beam,
hull_center[2] - 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] + 0.5*hull_length,
hull_center[1] + 0.5*hull_beam,
hull_center[2] - 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] + 0.5*hull_length,
hull_center[1] - 0.5*hull_beam,
hull_center[2] - 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] - 0.5*hull_length,
hull_center[1] - 0.5*hull_beam,
hull_center[2] + 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] - 0.5*hull_length,
hull_center[1] + 0.5*hull_beam,
hull_center[2] + 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] + 0.5*hull_length,
hull_center[1] + 0.5*hull_beam,
hull_center[2] + 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
vertices.append([hull_center[0] + 0.5*hull_length,
hull_center[1] - 0.5*hull_beam,
hull_center[2] + 0.5*hull_draft])
vertexFlags.append(boundaryTags['obstacle'])
facets.append([[nStart,nStart+1,nStart+2,nStart+3]])#1
facetFlags.append(boundaryTags['obstacle'])
facets.append([[nStart,nStart+1,nStart+5,nStart+4]])#2
facetFlags.append(boundaryTags['obstacle'])
facets.append([[nStart+1,nStart+2,nStart+6,nStart+5]])#3
facetFlags.append(boundaryTags['obstacle'])
facets.append([[nStart+2,nStart+3,nStart+7,nStart+6]])#4
facetFlags.append(boundaryTags['obstacle'])
facets.append([[nStart+3,nStart,nStart+4,nStart+7]])#5
facetFlags.append(boundaryTags['obstacle'])
facets.append([[nStart+4,nStart+5,nStart+6,nStart+7]])#6
facetFlags.append(boundaryTags['obstacle'])
holes.append(hull_center)
domain = Domain.PiecewiseLinearComplexDomain(vertices=vertices,
vertexFlags=vertexFlags,
facets=facets,
facetFlags=facetFlags,
regions=regions,
regionFlags=regionFlags,
holes=holes)
#go ahead and add a boundary tags member
domain.boundaryTags = boundaryTags
from proteus import Comm
comm = Comm.get()
if vessel:
polyfile="mesh_"+vessel
else:
polyfile="meshNoVessel"
if comm.isMaster():
domain.writePoly(polyfile)
else:
domain.polyfile=polyfile
domain.MeshOptions.genMesh=genMesh
restrictFineSolutionToAllMeshes=False
parallelPartitioningType = MeshTools.MeshParallelPartitioningTypes.node
nLayersOfOverlapForParallel = 0
#steady-state so no initial conditions
initialConditions = None
#use sparse diffusion representation
sd=True
#identity tensor for defining analytical heterogeneity functions
Ident = numpy.zeros((nd,nd),'d')
Ident[0,0]=1.0; Ident[1,1] = 1.0; Ident[2,2]=1.0
#for computing exact 'Darcy' velocity
class velEx(object):
def __init__(self,duex,aex):
self.duex = duex
self.aex = aex
def uOfX(self,X):
du = self.duex.duOfX(X)
A = numpy.reshape(self.aex(X),(3,3))
return -numpy.dot(A,du)
def uOfXT(self,X,T):
return self.uOfX(X)
##################################################
#define coefficients a(x)=[a_{ij}] i,j=0,2, right hand side f(x) and analytical solution u(x)
#u = x*x + y*y + z*z, a_00 = x + 5, a_11 = y + 5.0 + a_22 = z + 10.0
#f = -2*x -2*(5+x) -2*y-2*(5+y) -2*z-2*(10+z)
#
def a5(x):
return numpy.array([[x[0] + 5.0,0.0,0.0],[0.0,x[1] + 5.0,0.0],[0.0,0.0,x[2]+10.0]],'d')
def f5(x):
return -2.0*x[0] -2*(5.+x[0]) -2.*x[1]-2.*(5.+x[1]) -2.*x[2]-2.*(10+x[2])
#'manufactured' analytical solution
class u5Ex(object):
def __init__(self):
pass
def uOfX(self,x):
return x[0]**2+x[1]**2+x[2]**2
def uOfXT(self,X,T):
return self.uOfX(X)
def duOfX(self,X):
du = 2.0*numpy.reshape(X[0:3],(3,))
return du
def duOfXT(self,X,T):
return self.duOfX(X)
#dirichlet boundary condition functions on (x=0,y,z), (x,y=0,z), (x,y=1,z), (x,y,z=0), (x,y,z=1)
def getDBC5(x,flag):
if flag in [boundaryTags['bottom'],boundaryTags['top'],boundaryTags['front'],boundaryTags['back'],boundaryTags['left']]:
return lambda x,t: u5Ex().uOfXT(x,t)
def getAdvFluxBC5(x,flag):
pass
#specify flux on (x=1,y,z)
def getDiffFluxBC5(x,flag):
if flag == boundaryTags['right']:
n = numpy.zeros((nd,),'d'); n[0]=1.0
return lambda x,t: numpy.dot(velEx(u5Ex(),a5).uOfXT(x,t),n)
elif flag == 0:
return lambda x,t: 0.0
#dirichlet boundary condition functions on (x=0,y,z), (x,y=0,z), (x,y=1,z), (x,y,z=0), (x,y,z=1)
# def getDBC5(x,flag):
# if x[0] in [0.0] or x[1] in [0.0,1.0] or x[2] in [0.0,1.0]:
# return lambda x,t: u5Ex().uOfXT(x,t)
# def getAdvFluxBC5(x,flag):
# pass
# #specify flux on (x=1,y,z)
# def getDiffFluxBC5(x,flag):
# if x[0] == 1.0:
# n = numpy.zeros((nd,),'d'); n[0]=1.0
# return lambda x,t: numpy.dot(velEx(u5Ex(),a5).uOfXT(x,t),n)
# if not (x[0] in [0.0] or x[1] in [0.0,1.0] or x[2] in [0.0,1.0]):
# return lambda x,t: 0.0
#store a,f in dictionaries since coefficients class allows for one entry per component
aOfX = {0:a5}; fOfX = {0:f5}
#one component
nc = 1
#load analytical solution, dirichlet conditions, flux boundary conditions into the expected variables
analyticalSolution = {0:u5Ex()}
analyticalSolutionVelocity = {0:velEx(analyticalSolution[0],aOfX[0])}
#
dirichletConditions = {0:getDBC5}
advectiveFluxBoundaryConditions = {0:getAdvFluxBC5}
diffusiveFluxBoundaryConditions = {0:{0:getDiffFluxBC5}}
fluxBoundaryConditions = {0:'setFlow'} #options are 'setFlow','noFlow','mixedFlow'
#equation coefficient names
coefficients = TransportCoefficients.PoissonEquationCoefficients(aOfX,fOfX,nc,nd)
#
coefficients.variableNames=['u0']
| mit | f263689770cc958df8abd57df124d055 | 39.880368 | 124 | 0.536955 | 2.857419 | false | false | false | false |
erdc/proteus | proteus/TwoPhaseFlow/utils/Parameters.py | 1 | 85685 | from __future__ import division
from past.utils import old_div
from builtins import object
import numpy as np
from petsc4py import PETSc
from proteus.Profiling import logEvent
from proteus.MeshTools import MeshOptions
from proteus.defaults import (Physics_base,
Numerics_base,
System_base)
from proteus import Comm
comm=Comm.get()
if comm.size() > 1:
parallel=True
else:
parallel=False
# models
from proteus.mprans import (RANS2P,
RANS3PF,
VOF,
RDLS,
NCLS,
MCorr,
CLSVOF,
AddedMass,
MoveMesh,
MoveMeshMonitor,
Pres,
PresInit,
PresInc,
Kappa,
Dissipation)
# numerical options
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools,
NumericalFlux)
# default values for several models
epsFact = 1.5
sc_uref = 1.
sc_beta = 1.5
shockCapturingFactor = 0.5
minTol = 1e-8
default_kappa_turbulence = 1e-3
default_dissipation_turbulence = 1e-3
class ParametersHolder:
"""
"""
def __init__(self, ProblemInstance=None):
# gain access to problem class if necessary
self._Problem = ProblemInstance
# default options
self.model_list = []
self.physical = self._Problem.SystemPhysics
def initializeParameters(self):
logEvent('----------')
logEvent('Mesh Options')
for key, value in self.mesh.__dict__.items():
if key[0] != '_': # do not print hidden attributes
logEvent('{key}: {value}'. format(key=key, value=value))
logEvent('----------')
logEvent('Physical Parameters')
for key, value in self.physical.__dict__.items():
if key[0] != '_': # do not print hidden attributes
logEvent('{key}: {value}'. format(key=key, value=value))
logEvent('----------')
n_base = Numerics_base()
p_base = Physics_base()
for (idx,model) in enumerate(self.model_list):
model.initializePhysics()
model.initializeNumerics()
model.initializePETScOptions()
logEvent('TwoPhaseFlow parameters for model: {name}'.format(name=model['name']))
logEvent('-----')
logEvent('{name} PHYSICS'.format(name=model.name))
logEvent('-----')
logEvent('COEFFICIENTS OPTIONS')
for key, value in sorted(model.p.coefficients.__dict__.items()):
if key[0] != '_': # do not print hidden attributes
logEvent('{key}: {value}'. format(key=key, value=value))
logEvent('END OF COEFFICIENTS OPTIONS')
for key, value in sorted(model.p.__dict__.items()):
if key[0] != '_': # do not print hidden attributes
if key in p_base.__dict__.keys():
if value != p_base.__dict__[key]:
logEvent('(!) {key}: {value}'. format(key=key, value=value))
else:
logEvent('{key}: {value}'. format(key=key, value=value))
else:
logEvent('{key}: {value}'. format(key=key, value=value))
logEvent('-----')
logEvent('{name} NUMERICS'.format(name=model.name))
logEvent('-----')
for key, value in sorted(model.n.__dict__.items()):
if key[0] != '_': # do not print hidden attributes
if key in n_base.__dict__.keys():
if value != n_base.__dict__[key]:
logEvent('(!) {key}: {value}'. format(key=key, value=value))
else:
logEvent('{key}: {value}'. format(key=key, value=value))
else:
logEvent('{key}: {value}'. format(key=key, value=value))
logEvent('----------')
logEvent('-----')
logEvent('PETSc OPTIONS')
petsc_info = PETSc.Options().getAll()
for key, val in sorted(petsc_info.items()):
logEvent(str(key)+': '+str(val))
logEvent('-----')
class FreezableClass(object):
"""Base class for all parameters class, enforces attribute freezing
"""
__frozen = False
def __init__(self, name=None):
self.name = name
def __getitem__(self, key):
if key not in self.__dict__:
raise AttributeError("{key} is not an option for class {name}".format(key=key, name=self.__class__.__name__))
return self.__dict__[key]
def __setitem__(self, key, val):
self.__setattr__(key, val)
def __setattr__(self, key, val):
if self.__frozen and not hasattr(self, key):
raise AttributeError("{key} is not an option for class {name}".format(key=key, name=self.__class__.__name__))
object.__setattr__(self, key, val)
def _freeze(self):
self.__frozen = True
def addOption(self, name, value):
self.__frozen = False
self.__setattr__(name, value)
self._freeze()
class ParametersModelBase(FreezableClass):
"""
"""
def __init__(self,
name=None,Problem=None):
super(ParametersModelBase, self).__init__(name=name)
self.index = None
self.auxiliaryVariables = []
self._Problem = Problem
self.OptDB = PETSc.Options()
self.p = Physics_base()
self.p.name = name
# self.p.CoefficientsOptions = FreezableClass()
self.p._freeze()
self.n = Numerics_base()
self.n.name = name
self.n.ShockCapturingOptions = FreezableClass()
self.n.SubgridErrorOptions = FreezableClass()
self.n._freeze()
# NON LINEAR SOLVERS
# self.n.fullNewtonFlag = True
# self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# self.n.levelNonlinearSolver = NonlinearSolvers.Newton
# self.n.nonlinearSmoother = None
# self.n.levelNonlinearSolverConvergenceTest = 'r'
# self.n.nonlinearSolverConvergenceTest = 'r'
# LINEAR ALGEBRA
# self.n.matrix = LinearAlgebraTools.SparseMatrix
# self.n.linearSmoother = None
# NUMERICAL FLUX
# self.n.massLumping = False
self.n.conservativeFlux = None
# TOLERANCES
self.n.nl_atol_res = None
self.n.l_atol_res = None
# self.n.linTolFac = 0.001
# self.n.useEisenstatWalker = False
# self.n.tolFac = 0.
# self.n.maxNonlinearIts = 50
# self.n.maxLineSearches = 0
def initializePhysics(self):
self.p.domain = self._Problem.domain
self.p.nd = self._Problem.domain.nd
self.p.movingDomain = self._Problem.SystemPhysics.movingDomain
self.p.genMesh = self._Problem.domain.MeshOptions.genMesh
# initialize extra parameters
self._initializePhysics()
self.p._unfreeze()
def _initializePhysics(self):
# to overwrite for each models
pass
def _setPhysicsValues(self):
coeffs = self.p.coefficients
coeffs.movingDomain = self.p.movingDomain
pparams = self._Problem.SystemPhysics
coeffs.sigma = pparams.surf_tension_coeff
coeffs.rho_0 = pparams.rho_0
coeffs.rho_1 = pparams.rho_1
coeffs.nu_0 = pparams.nu_0
coeffs.nu_1 = pparams.nu_1
coeffs.g = np.array(pparams.gravity)
def initializeNumerics(self):
self.n.runCFL = self._Problem.SystemNumerics.cfl
# MESH
meshOptions = self._Problem.domain.MeshOptions
self.n.triangleFlag = meshOptions.triangleFlag
self.n.nnx = meshOptions.nnx
self.n.nny = meshOptions.nny
self.n.nnz = meshOptions.nnz
self.n.triangleOptions = meshOptions.triangleOptions
self.n.parallelPartitioningType = meshOptions.parallelPartitioningType
self.n.nLayersOfOverlapForParallel = meshOptions.nLayersOfOverlapForParallel
self.n.restrictFineSolutionToAllMeshes = meshOptions.restrictFineSolutionToAllMeshes
# TIME INTEGRATION
self.n.runCFL = self._Problem.SystemNumerics.cfl
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.elementQuadrature = FESpace['elementQuadrature']
self.n.elementBoundaryQuadrature = FESpace['elementBoundaryQuadrature']
# SUPERLU
if self._Problem.SystemNumerics.useSuperlu and not parallel:
self.n.multilevelLinearSolver = LinearSolvers.LU
self.n.levelLinearSolver = LinearSolvers.LU
# AUXILIARY VARIABLES
self.n.auxiliaryVariables = self.auxiliaryVariables
# initialize extra parameters
self._initializeNumerics()
self.n._unfreeze()
def _initializeNumerics(self):
# to overwrite for each models
pass
def initializePETScOptions(self):
if not self._Problem.SystemNumerics.usePETScOptionsFileExternal:
# use default options if no file
self._initializePETScOptions()
else:
# check if file contains options for model
# use default options for model if no options in file
prefix = self.n.linear_solver_options_prefix
petsc_options = PETSc.Options().getAll()
initialize = True
i = 0
for key in petsc_options.keys():
i += 1
if prefix == key[:len(prefix)]:
initialize = False
break
if initialize:
self._initializePETScOptions()
def _initializePETScOptions(self):
pass
def fetchIndex(self,idxDict,name):
try:
return idxDict[name]
except:
return None
def setInitialConditionStructure(self):
self.p.initialConditions = FreezableClass()
for name in self.p.coefficients.variableNames:
setattr(self.p.initialConditions,name,None)
self.p.initialConditions._freeze()
self.p.LevelModelType.var2idxDict = {self.p.coefficients.variableNames[i]: i for i,_ in enumerate(self.p.coefficients.variableNames) }
class ParametersModelRANS2P(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelRANS2P, self).__init__(name='rans2p',Problem=ProblemInstance)
self.timeDiscretization = 'be'
self.p.coefficients = RANS2P.Coefficients(
nd = self._Problem.domain.nd,
initialize=False,
useMetrics=1.,
epsFact=epsFact,
eb_penalty_constant=100.,
particle_epsFact = 3.,
useExact=False#Problem.useExact
)
scopts = self.n.ShockCapturingOptions
scopts.shockCapturingFactor = shockCapturingFactor
scopts.lag = True
scopts._freeze()
seopts = self.n.SubgridErrorOptions
seopts.lag = True
seopts._freeze()
# LEVEL MODEL
self.p.LevelModelType = RANS2P.LevelModel
self.n.timeOrder = 1
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# NUMERICAL FLUX
self.n.numericalFluxType = RANS2P.NumericalFlux
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'rans2p_'
self.n.linearSolverConvergenceTest = 'r-true'
self.n.linearSmoother = LinearSolvers.SimpleNavierStokes3D
self.n.conservativeFlux= {0:'pwl-bdm-opt'}
# TOLERANCES
self.n.linTolFac = 0.01
self.n.tolFac = 0.
self.n.maxNonlinearIts = 100
self.n.maxLineSearches = 0
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
pparams = self._Problem.SystemPhysics # physical parameters
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEX
idxDict = self._Problem.SystemPhysics._modelIdxDict
ME_model = self.fetchIndex(idxDict,self.name)
assert ME_model is not None, 'rans2p model index was not set!'
CLSVOF_model = self.fetchIndex(idxDict, 'clsvof')
VF_model = self.fetchIndex(idxDict, 'vof')
LS_model = self.fetchIndex(idxDict, 'ncls')
K_model = self.fetchIndex(idxDict, 'kappa')
DISS_model = self.fetchIndex(idxDict, 'dissipation')
# POROSITY / RELAXATION
if hasattr(domain, 'porosityTypes'):
porosityTypes = domain.porosityTypes
dragAlphaTypes = domain.dragAlphaTypes
dragBetaTypes = domain.dragBetaTypes
epsFact_porous = domain.epsFact_porous
else:
porosityTypes = None
dragAlphaTypes = None
dragBetaTypes = None
epsFact_solid = None
epsFact_porous = None
# COEFFICIENTS
coeffs = self.p.coefficients
self._setPhysicsValues()
coeffs.nd = nd
coeffs.ME_model = ME_model
coeffs.CLSVOF_model = CLSVOF_model
coeffs.VF_model = VF_model
coeffs.LS_model = LS_model
coeffs.Closure_0_model = K_model
coeffs.Closure_1_model = DISS_model
coeffs.porosityTypes = porosityTypes
coeffs.dragAlphaTypes = dragAlphaTypes
coeffs.dragBetaTypes = dragBetaTypes
if coeffs.barycenters is None:
coeffs.barycenters = domain.barycenters
coeffs.initialize()
# BOUNDARY CONDITIONS
boundaryConditions = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: boundaryConditions['pressure_DBC'],
1: boundaryConditions['vel_u_DBC'],
2: boundaryConditions['vel_v_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: boundaryConditions['pressure_AFBC'],
1: boundaryConditions['vel_u_AFBC'],
2: boundaryConditions['vel_v_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0: {},
1: {1: boundaryConditions['vel_u_DFBC']},
2: {2: boundaryConditions['vel_v_DFBC']}}
if nd == 3:
self.p.dirichletConditions[3] = boundaryConditions['vel_w_DBC']
self.p.advectiveFluxBoundaryConditions[3] = boundaryConditions['vel_w_AFBC']
self.p.diffusiveFluxBoundaryConditions[3] = {3: boundaryConditions['vel_w_DFBC']}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].p_dirichlet.uOfXT,
1: lambda x, flag: domain.BCbyFlag[flag].u_dirichlet.uOfXT,
2: lambda x, flag: domain.BCbyFlag[flag].v_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].p_advective.uOfXT,
1: lambda x, flag: domain.BCbyFlag[flag].u_advective.uOfXT,
2: lambda x, flag: domain.BCbyFlag[flag].v_advective.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {},
1: {1:lambda x, flag: domain.BCbyFlag[flag].u_diffusive.uOfXT},
2: {2:lambda x, flag: domain.BCbyFlag[flag].v_diffusive.uOfXT}}
if nd == 3:
self.p.dirichletConditions[3] = lambda x, flag: domain.BCbyFlag[flag].w_dirichlet.uOfXT
self.p.advectiveFluxBoundaryConditions[3] = lambda x, flag: domain.BCbyFlag[flag].w_advective.uOfXT
self.p.diffusiveFluxBoundaryConditions[3] = {3: lambda x, flag: domain.BCbyFlag[flag].w_diffusive.uOfXT}
def _initializeNumerics(self):
nd = self._Problem.domain.nd
# TIME
if self.timeDiscretization=='vbdf':
self.n.timeIntegration = TimeIntegration.VBDF
elif self.timeDiscretization=='be': #backward euler
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
else:
raise ValueError("{scheme} scheme is not valid. Accepted schemes values are 'be' and 'vbdf'".format(scheme=self.timeDiscretization))
self.n.stepController = StepControl.Min_dt_cfl_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['pBasis'],
1: FESpace['velBasis'],
2: FESpace['velBasis']}
if nd == 3:
self.n.femSpaces[3] = FESpace['velBasis']
# NUMERICAL FLUX
seopts = self.n.SubgridErrorOptions
self.n.subgridError = RANS2P.SubgridError(coefficients=self.p.coefficients,
nd=nd,
lag=seopts.lag,
hFactor=FESpace['hFactor'])
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = RANS2P.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.01*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
if self._Problem.SystemNumerics.useSuperlu:
self.OptDB.setValue(prefix+'ksp_type', 'preonly')
self.OptDB.setValue(prefix+'pc_type', 'lu')
self.OptDB.setValue(prefix+'pc_factor_mat_solver_type', 'superlu_dist')
elif self.n.linearSmoother == LinearSolvers.SimpleNavierStokes3D:
self.OptDB.setValue(prefix+'ksp_type', 'gmres')
self.OptDB.setValue(prefix+'pc_type', 'asm')
self.OptDB.setValue(prefix+'pc_asm_type', 'basic')
self.OptDB.setValue(prefix+'ksp_max_it', 2000)
self.OptDB.setValue(prefix+'ksp_gmres_modifiedgramschmidt', 1)
self.OptDB.setValue(prefix+'ksp_gmres_restart', 300)
self.OptDB.setValue(prefix+'sub_ksp_type', 'preonly')
self.OptDB.setValue(prefix+'sub_pc_factor_mat_solver_type', 'superlu')
self.OptDB.setValue(prefix+'ksp_knoll', 1)
self.OptDB.setValue(prefix+'sub_pc_type', 'lu')
elif self.n.linearSmoother == LinearSolvers.NavierStokes_TwoPhasePCD:
# Options for PCD
# Global KSP options
self.OptDB.setValue(prefix+'ksp_type', 'fgmres')
self.OptDB.setValue(prefix+'ksp_gmres_restart', 300)
self.OptDB.setValue(prefix+'ksp_gmres_modifiedgramschmidt', 1)
self.OptDB.setValue(prefix+'ksp_pc_side','right')
self.OptDB.setValue(prefix+'pc_fieldsplit_type', 'schur')
self.OptDB.setValue(prefix+'pc_fieldsplit_schur_fact_type', 'upper')
self.OptDB.setValue(prefix+'pc_fieldsplit_schur_precondition', 'user')
# Velocity block options
self.OptDB.setValue(prefix+'fieldsplit_velocity_ksp_type', 'gmres')
self.OptDB.setValue(prefix+'fieldsplit_velocity_ksp_gmres_modifiedgramschmidt', 1)
self.OptDB.setValue(prefix+'fieldsplit_velocity_ksp_atol', 1e-5)
self.OptDB.setValue(prefix+'fieldsplit_velocity_ksp_rtol', 1e-5)
self.OptDB.setValue(prefix+'fieldsplit_velocity_ksp_pc_side', 'right')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_u_ksp_type', 'preonly')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_u_pc_type', 'hypre')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_u_pc_hypre_type', 'boomeramg')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_u_pc_hypre_boomeramg_coarsen_type', 'HMIS')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_v_ksp_type', 'preonly')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_v_pc_type', 'hypre')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_v_pc_hypre_type', 'boomeramg')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_v_pc_hypre_boomeramg_coarsen_type', 'HMIS')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_w_ksp_type', 'preonly')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_w_pc_type', 'hypre')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_w_pc_hypre_type', 'boomeramg')
self.OptDB.setValue(prefix+'fieldsplit_velocity_fieldsplit_w_pc_hypre_boomeramg_coarsen_type', 'HMIS')
#PCD Schur Complement options
self.OptDB.setValue(prefix+'fieldsplit_pressure_ksp_type', 'preonly')
self.OptDB.setValue('innerTPPCDsolver_Qp_visc_ksp_type', 'preonly')
self.OptDB.setValue('innerTPPCDsolver_Qp_visc_pc_type', 'lu')
self.OptDB.setValue('innerTPPCDsolver_Qp_visc_pc_factor_mat_solver_type', 'superlu_dist')
self.OptDB.setValue('innerTPPCDsolver_Qp_dens_ksp_type', 'preonly')
self.OptDB.setValue('innerTPPCDsolver_Qp_dens_pc_type', 'lu')
self.OptDB.setValue('innerTPPCDsolver_Qp_dens_pc_factor_mat_solver_type', 'superlu_dist')
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_ksp_type', 'richardson')
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_ksp_max_it', 1)
#self.OptDB.setValue('innerTPPCDsolver_Ap_rho_ksp_constant_null_space',1)
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_pc_type', 'hypre')
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_type', 'boomeramg')
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_strong_threshold', 0.5)
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_interp_type', 'ext+i-cc')
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_coarsen_type', 'HMIS')
self.OptDB.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_agg_nl', 2)
class ParametersModelRANS3PF(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelRANS3PF, self).__init__(name='rans3p',Problem=ProblemInstance)
self.p.coefficients = RANS3PF.Coefficients(
nd = self._Problem.domain.nd,
initialize=False,
useMetrics=1.,
epsFact_density=epsFact,
particle_epsFact=3.,
eb_penalty_constant = 100.0,
ARTIFICIAL_VISCOSITY=3,
INT_BY_PARTS_PRESSURE=1,
USE_SUPG=0,
)
scopts = self.n.ShockCapturingOptions
scopts.shockCapturingFactor = shockCapturingFactor
scopts.lag = True
scopts._freeze()
seopts = self.n.SubgridErrorOptions
seopts.lag = True
seopts._freeze()
# LEVEL MODEL
self.p.LevelModelType = RANS3PF.LevelModel
# TIME DISCRETIZATION
self.timeDiscretization = 'vbdf'
# NUMERICAL FLUX
self.n.numericalFluxType = RANS3PF.NumericalFlux
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'rans3p_'
self.n.linearSolverConvergenceTest = 'r-true'
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
self.n.nonlinearSolverConvergenceTest = 'rits'
self.n.levelNonlinearSolverConvergenceTest = 'rits'
# TOLERANCES
self.n.linTolFac = 0.
self.n.tolFac = 0.
self.n.maxNonlinearIts = 1 # This is a linear problem
self.n.maxLineSearches = 0
# OTHER
self.n.addOption('forceTerms', None)
self.p.addOption('forceTerms', None)
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
pparams = self._Problem.SystemPhysics # physical parameters
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEX
idxDict = self._Problem.SystemPhysics._modelIdxDict
nModelId = self.fetchIndex(idxDict, 'ncls')
VOF_model=self.fetchIndex(idxDict,'vof')
LS_model=self.fetchIndex(idxDict,'ncls')
RD_model=self.fetchIndex(idxDict,'rdls')
MCORR_model=self.fetchIndex(idxDict,'mcorr')
SED_model=None
VOS_model=None
CLSVOF_model = self.fetchIndex(idxDict,'clsvof')
V_model = self.fetchIndex(idxDict,self.name)
PINC_model = self.fetchIndex(idxDict,'pressureIncrement')
PRESSURE_model = self.fetchIndex(idxDict,'pressure')
K_model = self.fetchIndex(idxDict,'kappa')
DISS_model = self.fetchIndex(idxDict,'dissipation')
# COEFFICIENTS
coeffs = self.p.coefficients
if coeffs.forceTerms is not None:
self.p.forceTerms = coeffs.forceTerms
coeffs.MULTIPLY_EXTERNAL_FORCE_BY_DENSITY = 1
coeffs.nd = nd
self._setPhysicsValues()
coeffs.nd = nd
coeffs.ME_model = V_model
coeffs.VOF_model = VOF_model
coeffs.CLSVOF_model = CLSVOF_model
coeffs.LS_model = LS_model
coeffs.SED_model = SED_model
coeffs.VOS_model = VOS_model
coeffs.PRESSURE_model = PRESSURE_model
coeffs.Closure_0_model = K_model
coeffs.Closure_1_model = DISS_model
coeffs.initialize()
# BOUNDARY CONDITIONS
boundaryConditions = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: boundaryConditions['vel_u_DBC'],
1: boundaryConditions['vel_v_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: boundaryConditions['vel_u_AFBC'],
1: boundaryConditions['vel_v_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0: {0: boundaryConditions['vel_u_DFBC']},
1: {1: boundaryConditions['vel_v_DFBC']}}
if nd == 3:
self.p.dirichletConditions[2] = boundaryConditions['vel_w_DBC']
self.p.advectiveFluxBoundaryConditions[2] = boundaryConditions['vel_w_AFBC']
self.p.diffusiveFluxBoundaryConditions[2] = {2: boundaryConditions['vel_w_DFBC']}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].u_dirichlet.uOfXT,
1: lambda x, flag: domain.BCbyFlag[flag].v_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].u_advective.uOfXT,
1: lambda x, flag: domain.BCbyFlag[flag].v_advective.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {0:lambda x, flag: domain.BCbyFlag[flag].u_diffusive.uOfXT},
1: {1:lambda x, flag: domain.BCbyFlag[flag].v_diffusive.uOfXT}}
if nd == 3:
self.p.dirichletConditions[2] = lambda x, flag: domain.BCbyFlag[flag].w_dirichlet.uOfXT
self.p.advectiveFluxBoundaryConditions[2] = lambda x, flag: domain.BCbyFlag[flag].w_advective.uOfXT
self.p.diffusiveFluxBoundaryConditions[2] = {2: lambda x, flag: domain.BCbyFlag[flag].w_diffusive.uOfXT}
def _initializeNumerics(self):
self.n.forceTerms = self.p.forceTerms
nd = self._Problem.domain.nd
# TIME
if self.timeDiscretization=='vbdf':
self.n.timeIntegration = TimeIntegration.VBDF
self.n.timeOrder = 2
else: #backward euler
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['velBasis'],
1: FESpace['velBasis']}
if nd == 3:
self.n.femSpaces[2] = FESpace['velBasis']
# NUMERICAL FLUX
seopts = self.n.SubgridErrorOptions
self.n.subgridError = RANS3PF.SubgridError(coefficients=self.p.coefficients,
nd=nd,
lag=seopts.lag,
hFactor=FESpace['hFactor'])
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = RANS3PF.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1*self.n.nl_atol_res
class ParametersModelPressure(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelPressure, self).__init__(name='pressure',Problem=ProblemInstance)
self.p.coefficients = Pres.Coefficients(
initialize=False,
)
# LEVEL MODEL
self.p.LevelModelType = Pres.LevelModel
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'pressure_'
self.n.linearSolverConvergenceTest = 'r-true'
# NUMERICAL FLUX
self.n.numericalFluxType = NumericalFlux.ConstantAdvection_exterior
# TOLERANCES
self.n.tolFac = 0.
self.n.linTolFac = 0.
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
PRESSURE_model = self.fetchIndex(idxDict,'pressure')
V_model = self.fetchIndex(idxDict,'rans3p')
PINC_model = self.fetchIndex(idxDict,'pressureIncrement')
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.modelIndex = PRESSURE_model
coeffs.fluidModelIndex = V_model
coeffs.pressureIncrementModelIndex = PINC_model
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: BC['pressure_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: BC['pressure_AFBC']}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].p_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].p_advective.uOfXT}
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# TIME
self.n.stepController = StepControl.FixedStep
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['pBasis']}
# TOLERANCE
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1*self.n.nl_atol_res
class ParametersModelPressureInitial(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelPressureInitial, self).__init__(name='pressureInitial',Problem=ProblemInstance)
self.p.coefficients = PresInit.Coefficients(
initialize=False,
)
# NUMERICAL FLUX
self.n.numericalFluxType = NumericalFlux.ConstantAdvection_exterior
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linearSmoother = LinearSolvers.NavierStokesPressureCorrection # pure neumann laplacian solver
self.n.linear_solver_options_prefix = 'pinit_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.tolFac = 0.
self.n.linTolFac = 0.
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
PRESSURE_model = self.fetchIndex(idxDict,'pressure')
V_model = self.fetchIndex(idxDict,'rans3p')
PINIT_model = self.fetchIndex(idxDict,'pressureInitial')
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.nd = nd
coeffs.modelIndex = PINIT_model
coeffs.fluidModelIndex = V_model
coeffs.pressureModelIndex = PRESSURE_model
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: BC['pressure_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: BC['pressure_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0:{0: BC['pressure_increment_DFBC']}}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].p_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].p_advective.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {0: lambda x, flag: domain.BCbyFlag[flag].pInc_diffusive.uOfXT}}
# freeze attributes
self._freeze()
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# TIME
self.n.stepController = StepControl.FixedStep
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['pBasis']}
# LINEAR ALGEBRA
if self._Problem.SystemNumerics.useSuperlu:
self.n.linearSmoother = None
# TOLERANCE
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1*self.n.nl_atol_res
class ParametersModelPressureIncrement(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelPressureIncrement, self).__init__(name='pressureIncrement',Problem=ProblemInstance)
self.p.coefficients = PresInc.Coefficients(
initialize=False,
)
# LEVEL MODEL
self.p.LevelModelType = PresInc.LevelModel
# NUMERICAL FLUX
self.n.numericalFluxType = PresInc.NumericalFlux
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'phi_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.tolFac = 0.
self.n.linTolFac = 0.
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
pparams = self._Problem.SystemPhysics
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
V_model = self.fetchIndex(idxDict,'rans3p')
PINC_model = self.fetchIndex(idxDict,'pressureIncrement')
# COEFFICIENTS
coeffs = self.p.coefficients
self._setPhysicsValues()
coeffs.rho_f_min = (1.0-1.0e-8)*coeffs.rho_1
coeffs.rho_s_min = (1.0-1.0e-8)*coeffs.rho_0
coeffs.nd = nd
coeffs.modelIndex = PINC_model
coeffs.fluidModelIndex = V_model
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: BC['pressure_increment_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: BC['pressure_increment_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0:{0: BC['pressure_increment_DFBC']}}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].pInc_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].pInc_advective.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {0: lambda x, flag: domain.BCbyFlag[flag].pInc_diffusive.uOfXT}}
# freeze attributes
self._freeze()
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# TIME
self.n.stepController = StepControl.FixedStep
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['pBasis']}
# LINEAR ALGEBRA
if self._Problem.SystemNumerics.useSuperlu:
self.n.linearSmoother = None
# TOLERANCE
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.01*self.n.nl_atol_res
class ParametersModelKappa(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelKappa, self).__init__(name='kappa',Problem=ProblemInstance)
self.timeOrder = 1
self.timeDiscretization = 'be'
self.p.coefficients = Kappa.Coefficients(
initialize=False,
useMetrics=1.,
epsFact=epsFact,
sc_uref=sc_uref,
sc_beta=sc_beta,
)
scopts = self.n.ShockCapturingOptions
scopts.shockCapturingFactor = shockCapturingFactor
scopts.lag = True
scopts._freeze()
seopts = self.n.SubgridErrorOptions
seopts.lag = True
seopts._freeze()
# LEVEL MODEL
self.p.LevelModelType = Kappa.LevelModel
# NUMERICAL FLUX
self.n.numericalFluxType = Kappa.NumericalFlux
self.n.conservativeFlux = None
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'kappa_'
self.n.linearSolverConvergenceTest = 'r-true'
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
self.n.nonlinearSolverConvergenceTest = 'rits'
self.n.levelNonlinearSolverConvergenceTest = 'rits'
# TOLERANCES
self.n.linTolFac = 0.
self.n.tolFac = 0.
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
idxDict = self._Problem.SystemPhysics._modelIdxDict
pparams = self._Problem.SystemPhysics # physical parameters
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEX
VOF_model=self.fetchIndex(idxDict,'vof')
LS_model=self.fetchIndex(idxDict,'ncls')
RD_model=self.fetchIndex(idxDict,'rdls')
MCORR_model=self.fetchIndex(idxDict,'mcorr')
SED_model=None
VOS_model=None
CLSVOF_model = self.fetchIndex(idxDict,'clsvof')
if(self.fetchIndex(idxDict,'rans3p') is not None):
V_model = self.fetchIndex(idxDict,'rans3p')
elif(self.fetchIndex(idxDict,'rans2p') is not None):
V_model = self.fetchIndex(idxDict,'rans2p')
else:
raise ValueError("Kappa model: RANS2P or RANS3P model has not been defined. Please define either one (but not both)")
PINC_model = self.fetchIndex(idxDict,'pressureIncrement')
PRESSURE_model = self.fetchIndex(idxDict,'pressure')
K_model = self.fetchIndex(idxDict,'kappa')
DISS_model = self.fetchIndex(idxDict,'dissipation')
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.VOS_model = VOS_model
coeffs.flowModelIndex = V_model
coeffs.LS_modelIndex = LS_model
coeffs.RD_modelIndex = RD_model
coeffs.dissipation_modelIndex = DISS_model
coeffs.modelIndex = K_model
coeffs.SED_modelIndex = SED_model
coeffs.dissipation_model_flag = pparams.useRANS
#coeffs.c_mu = pparams.c_mu
#coeffs.sigma_k = pparams.sigma_k
self._setPhysicsValues()
coeffs.nd = nd
coeffs.initialize()
# BOUNDARY CONDITIONS
boundaryConditions = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: boundaryConditions['k_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: boundaryConditions['k_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0: {0: boundaryConditions['k_DFBC']}}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.bc[flag].k_dirichlet.init_cython()}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.bc[flag].k_advective.init_cython()}
self.p.diffusiveFluxBoundaryConditions = {0: {0:lambda x, flag: domain.bc[flag].k_diffusive.init_cython()}}
def _initializeNumerics(self):
nd = self._Problem.domain.nd
# TIME
self.n.timeOrder = 1
self.n.timeIntegration = TimeIntegration.VBDF#BackwardEuler
self.n.stepController = StepControl.Min_dt_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# NUMERICAL FLUX
seopts=self.n.SubgridErrorOptions
self.n.subgridError = Kappa.SubgridError(coefficients=self.p.coefficients,
nd=nd)
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = Kappa.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1*self.n.nl_atol_res
class ParametersModelDissipation(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelDissipation, self).__init__(name='dissipation',Problem=ProblemInstance)
self.p.coefficients = Dissipation.Coefficients(
initialize=False,
useMetrics=1.,
epsFact=epsFact,
sc_uref=sc_uref,
sc_beta=sc_beta,
)
scopts = self.n.ShockCapturingOptions
scopts.shockCapturingFactor = shockCapturingFactor
scopts.lag = True
scopts._freeze()
seopts = self.n.SubgridErrorOptions
seopts.lag = True
seopts._freeze()
# LEVEL MODEL
self.p.LevelModelType = Dissipation.LevelModel
# TIME DISCRETIZATION
# self.n.timeOrder = 2
# self.n.timeDiscretization = 'be'
# NUMERICAL FLUX
self.n.numericalFluxType = Dissipation.NumericalFlux
self.n.conservativeFlux = None
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'dissipation_'
self.n.linearSolverConvergenceTest = 'r-true'
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
self.n.nonlinearSolverConvergenceTest = 'rits'
self.n.levelNonlinearSolverConvergenceTest = 'rits'
# TOLERANCES
self.n.linTolFac = 0.
self.n.tolFac = 0.
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
idxDict = self._Problem.SystemPhysics._modelIdxDict
pparams = self._Problem.Parameters.physical # physical parameters
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEX
VOF_model = self.fetchIndex(idxDict,'vof')
LS_model = self.fetchIndex(idxDict,'ncls')
RD_model = self.fetchIndex(idxDict,'rdls')
MCORR_model = self.fetchIndex(idxDict,'mcorr')
SED_model = None
VOS_model = None
CLSVOF_model = self.fetchIndex(idxDict,'clsvof')
if(self.fetchIndex(idxDict,'rans3p') is not None):
V_model = self.fetchIndex(idxDict,'rans3p')
elif(self.fetchIndex(idxDict,'rans2p') is not None):
V_model = self.fetchIndex(idxDict,'rans2p')
else:
raise ValueError("Dissipation model: RANS2P or RANS3P model has not been defined. Please define either one (but not both)")
PINC_model = self.fetchIndex(idxDict,'pressureIncrement')
PRESSURE_model = self.fetchIndex(idxDict,'pressure')
K_model = self.fetchIndex(idxDict,'kappa')
DISS_model = self.fetchIndex(idxDict,'dissipation')
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.VOS_modelIndex = VOS_model
coeffs.flowModelIndex = V_model
coeffs.LS_modelIndex = LS_model
coeffs.RD_modelIndex = RD_model
coeffs.kappa_modelIndex = K_model
coeffs.modelIndex = DISS_model
coeffs.SED_modelIndex = SED_model
#coeffs.c_mu = pparams.c_mu
#coeffs.c_1 = pparams.c_1
#coeffs.c_2 = pparams.c_2
#coeffs.c_e = pparams.c_e
#coeffs.sigma_e = pparams.sigma_e
self._setPhysicsValues()
coeffs.nd = nd
# default K-Epsilon, 2 --> K-Omega, 1998, 3 --> K-Omega 1988
coeffs.dissipation_model_flag = pparams.useRANS
coeffs.initialize()
# BOUNDARY CONDITIONS
boundaryConditions = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: boundaryConditions['dissipation_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: boundaryConditions['dissipation_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0: {0: boundaryConditions['dissipation_DFBC']}}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.bc[flag].dissipation_dirichlet.init_cython()}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.bc[flag].dissipation_advective.init_cython()}
self.p.diffusiveFluxBoundaryConditions = {0: {0:lambda x, flag: domain.bc[flag].dissipation_diffusive.init_cython()}}
def _initializeNumerics(self):
nd = self._Problem.domain.nd
# TIME
self.n.timeOrder = 1
self.n.timeIntegration = TimeIntegration.BackwardEuler
self.n.stepController = StepControl.Min_dt_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# NUMERICAL FLUX
seopts = self.n.SubgridErrorOptions
self.n.subgridError = Dissipation.SubgridError(coefficients=self.p.coefficients,
nd=nd)
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = Dissipation.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01 * meshOptions.he ** 2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1 * self.n.nl_atol_res
class ParametersModelCLSVOF(ParametersModelBase):
def __init__(self,ProblemInstance):
super(ParametersModelCLSVOF, self).__init__(name='clsvof',Problem=ProblemInstance)
self.p.coefficients = CLSVOF.Coefficients(
initialize=False,
useMetrics=1,
epsFactHeaviside=epsFact,
epsFactDirac=epsFact,
epsFactRedist=0.33,
lambdaFact=10.,
computeMetrics=1,
)
# LEVEL MODEL
self.p.LevelModelType = CLSVOF.LevelModel
# NUMERICAL FLUX
self.n.numericalFluxType = CLSVOF.NumericalFlux
# NON LINEAR SOLVER
self.n.levelNonlinearSolver = NonlinearSolvers.CLSVOFNewton
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
self.n.addOption('updateJacobian', True)
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'clsvof_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.tolFac = 0.
self.n.maxNonlinearIts = 50
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
CLSVOF_model = self.fetchIndex(idxDict,'clsvof')
V_model = self.fetchIndex(idxDict,'rans2p')
if V_model is None:
V_model = self.fetchIndex(idxDict,'rans3p')
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.flowModelIndex = V_model
coeffs.modelIndex = CLSVOF_model
coeffs.movingDomain = self.p.movingDomain
coeffs.variableNames = ['phi']
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: BC['clsvof_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: BC['clsvof_AFBC']}
self.p.diffusiveFluxBoundaryConditions = {0:{0: BC['clsvof_DFBC']}}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].vof_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].vof_advective.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {0: lambda x, flag: domain.BCbyFlag[flag].clsvof_diffusive.uOfXT}}
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# TIME
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# TOLERANCE
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.01*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1*self.n.nl_atol_res
class ParametersModelVOF(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelVOF, self).__init__(name='vof',Problem=ProblemInstance)
self.p.coefficients = VOF.Coefficients(
initialize=False,
useMetrics=1.,
epsFact=epsFact,
sc_uref=sc_uref,
sc_beta=sc_beta,
)
# LEVEL MODEL
self.p.LevelModelType = VOF.LevelModel
# TIME INTEGRATION
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# NUMERICS
self.n.ShockCapturingOptions.shockCapturingFactor = shockCapturingFactor
self.n.ShockCapturingOptions.lag = True
# NUMERICAL FLUX
self.n.numericalFluxType = VOF.NumericalFlux
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'vof_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
self.n.tolFac = 0.
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEX
idxDict = self._Problem.SystemPhysics._modelIdxDict
ME_model = self.fetchIndex(idxDict,self.name)
assert ME_model is not None, 'vof model index was not set!'
if('rans2p' in idxDict):
V_model = self.fetchIndex(idxDict, 'rans2p')
elif ('rans3p' in idxDict):
V_model = self.fetchIndex(idxDict, 'rans3p')
else:
assert False, 'RANS2P or RANS3PF must be used with VOF'
RD_model = self.fetchIndex(idxDict,'rdls')
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.V_model = V_model
coeffs.RD_modelIndex = RD_model
coeffs.modelIndex = ME_model
coeffs.movingDomain = self.p.movingDomain
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: BC['vof_DBC']}
self.p.advectiveFluxBoundaryConditions = {0: BC['vof_AFBC']}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].vof_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].vof_advective.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {}}
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# TIME
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# NUMERICAL FLUX
self.n.subgridError = VOF.SubgridError(coefficients=self.p.coefficients,
nd=nd)
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = VOF.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCE
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.001*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
if self._Problem.SystemNumerics.useSuperlu:
self.OptDB.setValue(prefix+'ksp_type', 'preonly')
self.OptDB.setValue(prefix+'pc_type', 'lu')
self.OptDB.setValue(prefix+'pc_factor_mat_solver_type', 'superlu_dist')
else:
self.OptDB.setValue(prefix+'ksp_type', 'gmres')
self.OptDB.setValue(prefix+'pc_type', 'hypre')
self.OptDB.setValue(prefix+'pc_pc_hypre_type', 'boomeramg')
self.OptDB.setValue(prefix+'ksp_gmres_restart', 300)
self.OptDB.setValue(prefix+'ksp_knoll', 1)
self.OptDB.setValue(prefix+'ksp_max_it', 2000)
class ParametersModelNCLS(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelNCLS, self).__init__(name='ncls',Problem=ProblemInstance)
# PHYSICS
self.p.coefficients = NCLS.Coefficients(
initialize=False,
useMetrics=1.,
checkMass=False,
sc_uref=sc_uref,
sc_beta=sc_beta,
epsFact=epsFact,
)
# LEVEL MODEL
self.p.LevelModelType = NCLS.LevelModel
# TIME INTEGRATION
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# NUMERICAL FLUX
self.n.numericalFluxType = NCLS.NumericalFlux
self.n.ShockCapturingOptions.shockCapturingFactor = shockCapturingFactor
self.n.ShockCapturingOptions.lag = True
# NUMERICAL FLUX
self.n.numericalFluxType = NCLS.NumericalFlux
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'ncls_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
self.n.tolFac = 0.
# freeze attributes
self._freeze()
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
ME_model = self.fetchIndex(idxDict, self.name)
assert ME_model is not None, 'ls model index was not set!'
if('rans2p' in idxDict):
V_model = self.fetchIndex(idxDict, 'rans2p')
elif ('rans3p' in idxDict):
V_model = self.fetchIndex(idxDict, 'rans3p')
else:
assert mparams.rans2p.index is not None or mparams.rans3p.index is not None, 'RANS2P or RANS3PF must be used with VOF'
#RD_model = mparams.rdls.index
RD_model=self.fetchIndex(idxDict,'rdls')
coeffs = self.p.coefficients
coeffs.flowModelIndex = V_model
coeffs.RD_modelIndex = RD_model
coeffs.modelIndex = ME_model
coeffs.movingDomain = self.p.movingDomain
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if self.p.dirichletConditions is None or len(self.p.dirichletConditions) is 0:
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
if 'ncls_DBC' in BC:
self.p.dirichletConditions = {0: BC['ncls_DBC']}
else:
self.p.dirichletCondtions = {0: lambda x,t: None}
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].phi_dirichlet.uOfXT}
self.p.diffusiveFluxBoundaryConditions = {0: {}}
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# TIME
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# NUMERICAL FLUX
self.n.subgridError = NCLS.SubgridError(coefficients=self.p.coefficients,
nd=nd)
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = NCLS.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.001*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
if self._Problem.SystemNumerics.useSuperlu:
self.OptDB.setValue(prefix+'ksp_type', 'preonly')
self.OptDB.setValue(prefix+'pc_type', 'lu')
self.OptDB.setValue(prefix+'pc_factor_mat_solver_type', 'superlu_dist')
else:
self.OptDB.setValue(prefix+'ksp_type', 'gmres')
self.OptDB.setValue(prefix+'pc_type', 'hypre')
self.OptDB.setValue(prefix+'pc_pc_hypre_type', 'boomeramg')
self.OptDB.setValue(prefix+'ksp_gmres_restart', 300)
self.OptDB.setValue(prefix+'ksp_knoll', 1)
self.OptDB.setValue(prefix+'ksp_max_it', 2000)
class ParametersModelRDLS(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelRDLS, self).__init__(name='rdls',Problem=ProblemInstance)
self.p.coefficients = RDLS.Coefficients(
initialize=False,
useMetrics=1.,
epsFact=0.75,
)
scopts = self.n.ShockCapturingOptions
scopts.shockCapturingFactor = 0.9
scopts.lag = False
scopts._freeze()
# LEVEL MODEL
self.p.LevelModelType = RDLS.LevelModel
# TIME INTEGRATION
self.n.timeIntegration = TimeIntegration.NoIntegration
self.n.stepController = StepControl.Newton_controller
# NONLINEAR SOLVERS
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
self.n.nonlinearSmoother = NonlinearSolvers.NLGaussSeidel
# NUMERICAL FLUX
self.n.numericalFluxType = NumericalFlux.DoNothing
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'rdls_'
self.n.linearSolverConvergenceTest = 'r-true'
#self.n.nonlinearSolverConvergenceTest = 'rits'
#self.n.levelNonlinearSolverConvergenceTest = 'rits'
# TOLERANCES
self.n.tolFac = 0.
#self.n.maxNonlinearIts = 1
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
self.setInitialConditionStructure()
def _initializePhysics(self):
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
nModelId = self.fetchIndex(idxDict, 'ncls')
assert nModelId is not None, 'ncls model index was not set!'
rdModelId = self.fetchIndex(idxDict, self.name)
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.nModelId = nModelId
coeffs.rdModelId = rdModelId
coeffs.initialize()
# BOUNDARY CONDITIONS
self.p.dirichletConditions = {0: lambda x, flag: None}
self.p.weakDirichletConditions = {0: RDLS.setZeroLSweakDirichletBCsSimple}
self.p.advectiveFluxBoundaryConditions = {}
self.p.diffusiveFluxBoundaryConditions = {0: {}}
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# NON LINEAR SOLVER
self.n.nonlinearSmoother = NonlinearSolvers.NLGaussSeidel
# NUMERICAL FLUX
self.n.subgridError = RDLS.SubgridError(coefficients=self.p.coefficients,
nd=nd)
scopts = self.n.ShockCapturingOptions
self.n.shockCapturing = RDLS.ShockCapturing(coefficients=self.p.coefficients,
nd=nd,
shockCapturingFactor=scopts.shockCapturingFactor,
lag=scopts.lag)
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.1*meshOptions.he)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.001*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
if self._Problem.SystemNumerics.useSuperlu:
self.OptDB.setValue(prefix+'ksp_type', 'preonly')
self.OptDB.setValue(prefix+'pc_type', 'lu')
self.OptDB.setValue(prefix+'pc_factor_mat_solver_type', 'superlu_dist')
else:
self.OptDB.setValue(prefix+'ksp_type', 'gmres')
self.OptDB.setValue(prefix+'pc_type', 'asm')
self.OptDB.setValue(prefix+'pc_pc_asm_type', 'basic')
self.OptDB.setValue(prefix+'ksp_gmres_modifiedgramschmidt', 1)
self.OptDB.setValue(prefix+'ksp_gmres_restart', 300)
self.OptDB.setValue(prefix+'ksp_knoll', 1)
self.OptDB.setValue(prefix+'sub_ksp_type', 'preonly')
self.OptDB.setValue(prefix+'sub_pc_factor_mat_solver_type', 'superlu')
self.OptDB.setValue(prefix+'sub_pc_type', 'lu')
self.OptDB.setValue(prefix+'max_it', 2000)
class ParametersModelMCorr(ParametersModelBase):
"""
"""
def __init__(self,ProblemInstance):
super(ParametersModelMCorr, self).__init__(name='mcorr',Problem=ProblemInstance)
self.p.coefficients = MCorr.Coefficients(
initialize=False,
useMetrics=1.,
checkMass=False,
epsFactHeaviside=epsFact,
epsFactDirac=epsFact,
epsFactDiffusion=10.,
)
# LEVEL MODEL
self.p.LevelModelType = MCorr.LevelModel
# TIME
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# NUMERICAL FLUX
self.n.numericalFluxType = NumericalFlux.DoNothing
# NON LINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'mcorr_'
self.n.linearSolverConvergenceTest = 'r-true'
#self.n.nonlinearSolverConvergenceTest = 'rits'
#self.n.levelNonlinearSolverConvergenceTest = 'rits'
# TOLERANCES
self.n.linTolFac = 0.
self.n.tolFac = 0.
self.n.maxNonlinearIts = 50
self.n.maxLineSearches = 0
self.n.useEisenstatWalker = True
# freeze attributes
self._freeze()
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
ME_model = self.fetchIndex(idxDict, self.name)
assert ME_model is not None, 'mcorr model index was not set!'
LS_model = self.fetchIndex(idxDict, 'ncls')
VOF_model = self.fetchIndex(idxDict, 'vof')
if('rans2p' in idxDict):
V_model = self.fetchIndex(idxDict, 'rans2p')
elif ('rans3p' in idxDict):
V_model = self.fetchIndex(idxDict, 'rans3p')
else:
assert False, 'RANS2P or RANS3PF must be used with VOF'
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.flowModelIndex = V_model
coeffs.me_model = ME_model
coeffs.VOFModelIndex = VOF_model
coeffs.levelSetModelIndex = LS_model
coeffs.nd = nd
coeffs.initialize()
# BOUNDARY CONDITIONS
# N/A
def _initializeNumerics(self):
# TIME
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.0001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.001*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
if self._Problem.SystemNumerics.useSuperlu:
self.OptDB.setValue(prefix+'ksp_type', 'preonly')
self.OptDB.setValue(prefix+'pc_type', 'lu')
self.OptDB.setValue(prefix+'pc_factor_mat_solver_type', 'superlu_dist')
else:
self.OptDB.setValue(prefix+'ksp_type', 'cg')
self.OptDB.setValue(prefix+'pc_type', 'hypre')
self.OptDB.setValue(prefix+'pc_pc_hypre_type', 'boomeramg')
self.OptDB.setValue(prefix+'ksp_max_it', 2000)
class ParametersModelAddedMass(ParametersModelBase):
"""
"""
def __init__(self, ProblemInstance):
super(ParametersModelAddedMass, self).__init__(name='addedMass', Problem=ProblemInstance)
self.p.coefficients = AddedMass.Coefficients(
initialize=False,
)
# LEVEL MODEL
self.p.LevelModelType = AddedMass.LevelModel
# TIME
self.n.timeIntegration = TimeIntegration.BackwardEuler_cfl
self.n.stepController = StepControl.Min_dt_cfl_controller
# NONLINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.AddedMassNewton
self.n.levelNonlinearSolver = NonlinearSolvers.AddedMassNewton
self.n.nonlinearSmoother = NonlinearSolvers.AddedMassNewton
# NUMERICAL FLUX
self.n.numericalFluxType = AddedMass.NumericalFlux
# LINEAR ALGEBRA
self.n.linearSmoother = LinearSolvers.NavierStokesPressureCorrection
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'am_'
self.n.linearSolverConvergenceTest = 'r-true'
#self.n.nonlinearSolverConvergenceTest = 'rits'
#self.n.levelNonlinearSolverConvergenceTest = 'rits'
# TOLERANCES
self.n.linTolFac = 0.
self.n.tolFac = 0.
self.n.maxNonlinearIts = 1
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
if self.fetchIndex(idxDict,'rans2p') is not None:
V_model = self.fetchIndex(idxDict,'rans2p')
elif self.fetchIndex(idxDict,'rans3p') is not None:
V_model = self.fetchIndex(idxDict,'rans3p')
else:
assert self.fetchIndex(idxDict,'rans2p') is not None or self.fetchIndex(idxDict,'rans3p') is not None, 'RANS2P or RANS3PF must be used with addedMass'
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.flowModelIndex = V_model
coeffs.barycenters = domain.barycenters
coeffs.nd = nd
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].pAddedMass_dirichlet.uOfXT}
self.p.advectiveFluxBoundaryConditions = {}
def getFlux_am(x, flag):
#the unit rigid motions will applied internally
#leave this set to zero
return lambda x,t: 0.0
self.p.diffusiveFluxBoundaryConditions = {0: {0: getFlux_am}}
def _initializeNumerics(self):
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.0001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.001*self.n.nl_atol_res
#override LU selection, even in serial
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
self.OptDB.setValue(prefix+'ksp_type', 'cg')
self.OptDB.setValue(prefix+'pc_type', 'gamg')
self.OptDB.setValue(prefix+'ksp_max_it', 2000)
class ParametersModelMoveMeshMonitor(ParametersModelBase):
"""
"""
def __init__(self, ProblemInstance):
super(ParametersModelMoveMeshMonitor, self).__init__(name='moveMeshMonitor', Problem=ProblemInstance)
self.p.coefficients = MoveMeshMonitor.Coefficients(
initialize=False,
ME_MODEL=None,
func=lambda x, t: 1000.,
he_min=0.,
he_max=1000.,
epsFact_density=epsFact,
epsTimeStep=0.1,
grading_type=2,
useLS=True,
)
# TIME INTEGRATION
self.n.timeIntegration = TimeIntegration.NoIntegration
# NONLINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.MoveMeshMonitorNewton
self.n.levelNonlinearSolver = NonlinearSolvers.MoveMeshMonitorNewton
self.n.nonlinearSmoother = NonlinearSolvers.MoveMeshMonitorNewton
# NUMERICAL FLUX
self.n.numericalFluxType = NumericalFlux.Diffusion_SIPG_exterior
# LINEAR ALGEBRA
self.n.linearSmoother = LinearSolvers.NavierStokesPressureCorrection
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'mesh2_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.linTolFac = 0.
self.n.tolFac = 0.
self.n.maxNonlinearIts = 1
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
ME_MODEL = self.index
assert ME_MODEL is not None, 'moveMeshMonitor model index was not set!'
if self.p.coefficients.useLS is True:
LS_MODEL = mparams.ncls.index
else:
LS_MODEL = None
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.LS_MODEL = LS_MODEL
coeffs.ME_MODEL = ME_MODEL
coeffs.nd = nd
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
self.p.dirichletConditions = {0: lambda x, flag: None}
# self.p.advectiveFluxBoundaryConditions = {}
self.p.diffusiveFluxBoundaryConditions = {0: {0: lambda x, flag: lambda x, t: 0.}}
def _initializeNumerics(self):
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['lsBasis']}
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.0001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.001*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
self.OptDB.setValue(prefix+'ksp_type', 'cg')
self.OptDB.setValue(prefix+'pc_type', 'hypre')
self.OptDB.setValue(prefix+'pc_hypre_type', 'boomeramg')
# self.OptDB.setValue(prefix+'ksp_constant_null_space', 1)
# self.OptDB.setValue(prefix+'pc_factor_shift_type', 'NONZERO')
# self.OptDB.setValue(prefix+'pc_factor_shift_amount', 1e-10)
# self.OptDB.setValue(prefix+'ksp_max_it', 2000)
class ParametersModelMoveMeshElastic(ParametersModelBase):
"""
"""
def __init__(self, ProblemInstance):
super(ParametersModelMoveMeshElastic, self).__init__(name='moveMeshElastic', Problem=ProblemInstance)
self.p.coefficients = MoveMesh.Coefficients(
nd = self._Problem.domain.nd,
initialize=False,
modelType_block=None,
modelParams_block=None,
)
# LEVEL MODEL
self.p.LevelModelType = MoveMesh.LevelModel
# TIME INTEGRATION
self.n.timeIntegration = TimeIntegration.NoIntegration
# NONLINEAR SOLVER
self.n.multilevelNonlinearSolver = NonlinearSolvers.Newton
# NUMERICAL FLUX
self.n.numericalFluxType = NumericalFlux.Stress_IIPG_exterior
# LINEAR ALGEBRA
self.n.multilevelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.levelLinearSolver = LinearSolvers.KSP_petsc4py
self.n.linear_solver_options_prefix = 'mesh_'
self.n.linearSolverConvergenceTest = 'r-true'
# TOLERANCES
self.n.tolFac = 0.
self.n.linTolFac = 0.
self.n.maxNonlinearIts = 4
self.n.maxLineSearches = 0
# freeze attributes
self._freeze()
#Initial Conditions
self.setInitialConditionStructure()
def _initializePhysics(self):
domain = self._Problem.domain
nd = domain.nd
# NUM PARAMS
nMediaTypes = len(domain.regionFlags) # (!) should be region flags
smTypes = np.zeros((nMediaTypes+1, 2), 'd')
smFlags = np.zeros((nMediaTypes+1,), 'i')
smTypes[:, 0] = 1.
smTypes[:, 1] = 0.3
# MODEL INDEXING
idxDict = self._Problem.SystemPhysics._modelIdxDict
ME_model = self.fetchIndex(idxDict,'moveMeshElastic')
assert ME_model is not None, 'vof model index was not set!'
if self.fetchIndex(idxDict,'rans2p') is not None:
V_model = self.fetchIndex(idxDict,'rans2p')
elif self.fetchIndex(idxDict,'rans3p') is not None:
V_model = self.fetchIndex(idxDict,'rans3p')
else:
assert self.fetchIndex(idxDict,'rans2p') is not None or self.fetchIndex(idxDict,'rans3p') is not None, 'RANS2P or RANS3PF must be used with VOF'
# COEFFICIENTS
coeffs = self.p.coefficients
coeffs.flowModelIndex = V_model
coeffs.meIndex = ME_model
coeffs.modelType_block = smFlags
coeffs.modelParams_block = smTypes
coeffs.nd = nd
coeffs.initialize()
# BOUNDARY CONDITIONS
BC = self._Problem.SystemPhysics.boundaryConditions
if domain.useSpatialTools is False or self._Problem.SystemPhysics.useBoundaryConditionsModule is False:
self.p.dirichletConditions = {0: BC['hx'],
1: BC['hy']}
self.p.stressFluxBoundaryConditions = {0: BC['u_stress'],
1: BC['v_stress']}
if nd == 3:
self.p.dirichletConditions[2] = BC['hz']
self.p.stressFluxBoundaryConditions[2] = BC['w_stress']
else:
self.p.dirichletConditions = {0: lambda x, flag: domain.BCbyFlag[flag].hx_dirichlet.uOfXT,
1: lambda x, flag: domain.BCbyFlag[flag].hy_dirichlet.uOfXT}
self.p.stressFluxBoundaryConditions = {0: lambda x, flag: domain.BCbyFlag[flag].u_stress.uOfXT,
1: lambda x, flag: domain.BCbyFlag[flag].v_stress.uOfXT}
if nd == 3:
self.p.dirichletConditions[2] = lambda x, flag: domain.BCbyFlag[flag].hz_dirichlet.uOfXT
self.p.stressFluxBoundaryConditions[2] = lambda x, flag: domain.BCbyFlag[flag].w_stress.uOfXT
self.p.fluxBoundaryConditions = {0: 'noFlow',
1: 'noFlow'}
self.p.advectiveFluxBoundaryConditions = {}
self.p.diffusiveFluxBoundaryConditions = {0: {},
1: {}}
if nd == 3:
self.p.fluxBoundaryConditions[2] = 'noFlow'
self.p.diffusiveFluxBoundaryConditions[2] = {}
def _initializeNumerics(self):
domain = self._Problem.domain
nd = domain.nd
# FINITE ELEMENT SPACES
FESpace = self._Problem.FESpace
self.n.femSpaces = {0: FESpace['velBasis'],
1: FESpace['velBasis']}
if nd == 3:
self.n.femSpaces[2] = FESpace['velBasis']
# TOLERANCES
meshOptions = self._Problem.domain.MeshOptions
if self.n.nl_atol_res is None:
self.n.nl_atol_res = max(minTol, 0.0001*meshOptions.he**2)
if self.n.l_atol_res is None:
self.n.l_atol_res = 0.1*self.n.nl_atol_res
def _initializePETScOptions(self):
prefix = self.n.linear_solver_options_prefix
if self._Problem.SystemNumerics.useSuperlu:
self.OptDB.setValue(prefix+'ksp_type', 'preonly')
self.OptDB.setValue(prefix+'pc_type', 'lu')
self.OptDB.setValue(prefix+'pc_factor_mat_solver_type', 'superlu_dist')
else:
self.OptDB.setValue(prefix+'ksp_type', 'cg')
self.OptDB.setValue(prefix+'pc_type', 'asm')
self.OptDB.setValue(prefix+'pc_asm_type', 'basic')
self.OptDB.setValue(prefix+'ksp_max_it', 2000)
self.OptDB.setValue(prefix+'sub_ksp_type', 'preonly')
self.OptDB.setValue(prefix+'sub_pc_factor_mat_solver_type', 'superlu')
self.OptDB.setValue(prefix+'ksp_knoll', 1)
self.OptDB.setValue(prefix+'sub_pc_type', 'lu')
| mit | b163bc450f9a717ad48bff5d40371fb9 | 43.122039 | 170 | 0.609768 | 3.624424 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.