text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def private_key_to_address(private_key: Union[str, bytes]) -> ChecksumAddress: """ Converts a private key to an Ethereum address. """ |
if isinstance(private_key, str):
private_key_bytes = to_bytes(hexstr=private_key)
else:
private_key_bytes = private_key
pk = PrivateKey(private_key_bytes)
return public_key_to_address(pk.public_key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def public_key_to_address(public_key: Union[PublicKey, bytes]) -> ChecksumAddress: """ Converts a public key to an Ethereum address. """ |
if isinstance(public_key, PublicKey):
public_key = public_key.format(compressed=False)
assert isinstance(public_key, bytes)
return to_checksum_address(sha3(public_key[1:])[-20:]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_waited_log(self, event: dict):
""" A subroutine of handle_log Increment self.event_count, forget about waiting, and call the callback if any. """ |
txn_hash = event['transactionHash']
event_name = event['event']
assert event_name in self.event_waiting
assert txn_hash in self.event_waiting[event_name]
self.event_count[event_name][txn_hash] += 1
event_entry = self.event_waiting[event_name][txn_hash]
if event_entry.count == self.event_count[event_name][txn_hash]:
self.event_waiting[event_name].pop(txn_hash)
# Call callback function with event
if event_entry.callback:
event_entry.callback(event) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assert_event(self, txn_hash, event_name, args, timeout=5):
""" Assert that `event_name` is emitted with the `args` For use in tests only. """ |
def assert_args(event):
assert event['args'] == args, f'{event["args"]} == {args}'
self.add(txn_hash=txn_hash, event_name=event_name, callback=assert_args)
self.check(timeout=timeout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join_sources(source_module: DeploymentModule, contract_name: str):
""" Use join-contracts.py to concatenate all imported Solidity files. Args: source_module: a module name to look up contracts_source_path() contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc. """ |
joined_file = Path(__file__).parent.joinpath('joined.sol')
remapping = {module: str(path) for module, path in contracts_source_path().items()}
command = [
'./utils/join-contracts.py',
'--import-map',
json.dumps(remapping),
str(contracts_source_path_of_deployment_module(
source_module,
).joinpath(contract_name + '.sol')),
str(joined_file),
]
working_dir = Path(__file__).parent.parent
try:
subprocess.check_call(command, cwd=working_dir)
except subprocess.CalledProcessError as ex:
print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')
raise ex
return joined_file.read_text() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def etherscan_verify_contract( chain_id: int, apikey: str, source_module: DeploymentModule, contract_name: str, ):
""" Calls Etherscan API for verifying the Solidity source of a contract. Args: chain_id: EIP-155 chain id of the Ethereum chain apikey: key for calling Etherscan API source_module: a module name to look up contracts_source_path() contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc. """ |
etherscan_api = api_of_chain_id[chain_id]
deployment_info = get_contracts_deployment_info(
chain_id=chain_id,
module=source_module,
)
if deployment_info is None:
raise FileNotFoundError(
f'Deployment file not found for chain_id={chain_id} and module={source_module}',
)
contract_manager = ContractManager(contracts_precompiled_path())
data = post_data_for_etherscan_verification(
apikey=apikey,
deployment_info=deployment_info['contracts'][contract_name],
source=join_sources(source_module=source_module, contract_name=contract_name),
contract_name=contract_name,
metadata=json.loads(contract_manager.contracts[contract_name]['metadata']),
constructor_args=get_constructor_args(
deployment_info=deployment_info,
contract_name=contract_name,
contract_manager=contract_manager,
),
)
response = requests.post(etherscan_api, data=data)
content = json.loads(response.content.decode())
print(content)
print(f'Status: {content["status"]}; {content["message"]} ; GUID = {content["result"]}')
etherscan_url = etherscan_api.replace('api-', '').replace('api', '')
etherscan_url += '/verifyContract2?a=' + data['contractaddress']
manual_submission_guide = f"""Usually a manual submission to Etherscan works.
Visit {etherscan_url}
Use raiden_contracts/deploy/joined.sol."""
if content['status'] != '1':
if content['result'] == 'Contract source code already verified':
return
else:
raise ValueError(
'Etherscan submission failed for an unknown reason\n' +
manual_submission_guide,
)
# submission succeeded, obtained GUID
guid = content['result']
status = '0'
retries = 10
while status == '0' and retries > 0:
retries -= 1
r = guid_status(etherscan_api=etherscan_api, guid=guid)
status = r['status']
if r['result'] == 'Fail - Unable to verify':
raise ValueError(manual_submission_guide)
if r['result'] == 'Pass - Verified':
return
print('Retrying...')
sleep(5)
raise TimeoutError(manual_submission_guide) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def error_removed_option(message: str):
""" Takes a message and returns a callback that raises NoSuchOption if the value is not None. The message is used as an argument to NoSuchOption. """ |
def f(_, param, value):
if value is not None:
raise click.NoSuchOption(
f'--{param.name.replace("_", "-")} is no longer a valid option. ' +
message,
)
return f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def common_options(func):
"""A decorator that combines commonly appearing @click.option decorators.""" |
@click.option(
'--private-key',
required=True,
help='Path to a private key store.',
)
@click.option(
'--rpc-provider',
default='http://127.0.0.1:8545',
help='Address of the Ethereum RPC provider',
)
@click.option(
'--wait',
default=300,
help='Max tx wait time in s.',
)
@click.option(
'--gas-price',
default=5,
type=int,
help='Gas price to use in gwei',
)
@click.option(
'--gas-limit',
default=5_500_000,
)
@click.option(
'--contracts-version',
default=None,
help='Contracts version to verify. Current version will be used by default.',
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contracts_source_path_with_stem(stem):
"""The directory remapping given to the Solidity compiler.""" |
return {
'lib': _BASE.joinpath(stem, 'lib'),
'raiden': _BASE.joinpath(stem, 'raiden'),
'test': _BASE.joinpath(stem, 'test'),
'services': _BASE.joinpath(stem, 'services'),
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_contracts(self, target_path: Path) -> ContractManager: """ Store compiled contracts JSON at `target_path`. """ |
self.checksum_contracts()
if self.overall_checksum is None:
raise ContractSourceManagerCompilationError('Checksumming failed.')
contracts_compiled = self._compile_all_contracts()
target_path.parent.mkdir(parents=True, exist_ok=True)
with target_path.open(mode='w') as target_file:
target_file.write(
json.dumps(
dict(
contracts=contracts_compiled,
contracts_checksums=self.contracts_checksums,
overall_checksum=self.overall_checksum,
contracts_version=None,
),
sort_keys=True,
indent=4,
),
)
return ContractManager(target_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify_precompiled_checksums(self, precompiled_path: Path) -> None: """ Compare source code checksums with those from a precompiled file. """ |
# We get the precompiled file data
contracts_precompiled = ContractManager(precompiled_path)
# Silence mypy
assert self.contracts_checksums is not None
# Compare each contract source code checksum with the one from the precompiled file
for contract, checksum in self.contracts_checksums.items():
try:
# Silence mypy
assert contracts_precompiled.contracts_checksums is not None
precompiled_checksum = contracts_precompiled.contracts_checksums[contract]
except KeyError:
raise ContractSourceManagerVerificationError(
f'No checksum for {contract}',
)
if precompiled_checksum != checksum:
raise ContractSourceManagerVerificationError(
f'checksum of {contract} does not match {precompiled_checksum} != {checksum}',
)
# Compare the overall source code checksum with the one from the precompiled file
if self.overall_checksum != contracts_precompiled.overall_checksum:
raise ContractSourceManagerVerificationError(
f'overall checksum does not match '
f'{self.overall_checksum} != {contracts_precompiled.overall_checksum}',
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checksum_contracts(self) -> None: """Remember the checksum of each source, and the overall checksum.""" |
checksums: Dict[str, str] = {}
for contracts_dir in self.contracts_source_dirs.values():
file: Path
for file in contracts_dir.glob('*.sol'):
checksums[file.name] = hashlib.sha256(file.read_bytes()).hexdigest()
self.overall_checksum = hashlib.sha256(
':'.join(checksums[key] for key in sorted(checksums)).encode(),
).hexdigest()
self.contracts_checksums = checksums |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _hash_pair(first: bytes, second: bytes) -> bytes: """ Computes the hash of the items in lexicographic order """ |
if first is None:
return second
if second is None:
return first
if first > second:
return keccak(second + first)
else:
return keccak(first + second) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_merkle_tree(items: Iterable[bytes]) -> MerkleTree: """ Calculates the merkle root for a given list of items """ |
if not all(isinstance(l, bytes) and len(l) == 32 for l in items):
raise ValueError('Not all items are hashes')
leaves = sorted(items)
if len(leaves) == 0:
return MerkleTree(layers=[[EMPTY_MERKLE_ROOT]])
if not len(leaves) == len(set(leaves)):
raise ValueError('The leaves items must not contain duplicate items')
tree = [leaves]
layer = leaves
while len(layer) > 1:
# [a, b, c, d, e] -> [(a, b), (c, d), (e, None)]
iterator = iter(layer)
paired_items = zip_longest(iterator, iterator)
layer = [_hash_pair(a, b) for a, b in paired_items]
tree.append(layer)
return MerkleTree(layers=tree) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_merkle_root(merkle_tree: MerkleTree) -> bytes: """ Returns the root element of the merkle tree. """ |
assert merkle_tree.layers, 'the merkle tree layers are empty'
assert merkle_tree.layers[-1], 'the root layer is empty'
return merkle_tree.layers[-1][0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contracts_version_expects_deposit_limits(contracts_version: Optional[str]) -> bool: """ Answers whether TokenNetworkRegistry of the contracts_vesion needs deposit limits """ |
if contracts_version is None:
return True
if contracts_version == '0.3._':
return False
return compare(contracts_version, '0.9.0') > -1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _verify_deployed_contract( self, deployment_data: DeployedContracts, contract_name: str, ) -> Contract: """ Verify deployment info against the chain Verifies: - the runtime bytecode - precompiled data against the chain - information stored in deployment_*.json against the chain, except for the constructor arguments, which have to be checked separately. Returns: (onchain_instance, constructor_arguments) """ |
contracts = deployment_data['contracts']
contract_address = contracts[contract_name]['address']
contract_instance = self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name),
address=contract_address,
)
# Check that the deployed bytecode matches the precompiled data
blockchain_bytecode = self.web3.eth.getCode(contract_address).hex()
compiled_bytecode = self.contract_manager.get_runtime_hexcode(contract_name)
assert blockchain_bytecode == compiled_bytecode
print(
f'{contract_name} at {contract_address} '
f'matches the compiled data from contracts.json',
)
# Check blockchain transaction hash & block information
receipt = self.web3.eth.getTransactionReceipt(
contracts[contract_name]['transaction_hash'],
)
assert receipt['blockNumber'] == contracts[contract_name]['block_number'], (
f'We have block_number {contracts[contract_name]["block_number"]} in the deployment '
f'info, but {receipt["blockNumber"]} in the transaction receipt from web3.'
)
assert receipt['gasUsed'] == contracts[contract_name]['gas_cost'], (
f'We have gasUsed {contracts[contract_name]["gas_cost"]} in the deployment info, '
f'but {receipt["gasUsed"]} in the transaction receipt from web3.'
)
assert receipt['contractAddress'] == contracts[contract_name]['address'], (
f'We have contractAddress {contracts[contract_name]["address"]} in the deployment info'
f' but {receipt["contractAddress"]} in the transaction receipt from web3.'
)
# Check the contract version
version = contract_instance.functions.contract_version().call()
assert version == deployment_data['contracts_version'], \
f'got {version} expected {deployment_data["contracts_version"]}.' \
f'contract_manager has contracts_version {self.contract_manager.contracts_version}'
return contract_instance, contracts[contract_name]['constructor_arguments'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contracts_data_path(version: Optional[str] = None):
"""Returns the deployment data directory for a version.""" |
if version is None:
return _BASE.joinpath('data')
return _BASE.joinpath(f'data_{version}') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contracts_precompiled_path(version: Optional[str] = None) -> Path: """Returns the path of JSON file where the bytecode can be found.""" |
data_path = contracts_data_path(version)
return data_path.joinpath('contracts.json') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contracts_deployed_path( chain_id: int, version: Optional[str] = None, services: bool = False, ):
"""Returns the path of the deplolyment data JSON file.""" |
data_path = contracts_data_path(version)
chain_name = ID_TO_NETWORKNAME[chain_id] if chain_id in ID_TO_NETWORKNAME else 'private_net'
return data_path.joinpath(f'deployment_{"services_" if services else ""}{chain_name}.json') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts: """ Take contents of two deployment JSON files and merge them The dictionary under 'contracts' key will be merged. The 'contracts' contents from different JSON files must not overlap. The contents under other keys must be identical. """ |
if not dict1:
return dict2
if not dict2:
return dict1
common_contracts: Dict[str, DeployedContract] = deepcopy(dict1['contracts'])
assert not common_contracts.keys() & dict2['contracts'].keys()
common_contracts.update(dict2['contracts'])
assert dict2['chain_id'] == dict1['chain_id']
assert dict2['contracts_version'] == dict1['contracts_version']
return {
'contracts': common_contracts,
'chain_id': dict1['chain_id'],
'contracts_version': dict1['contracts_version'],
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_contracts_deployment_info( chain_id: int, version: Optional[str] = None, module: DeploymentModule = DeploymentModule.ALL, ) -> Optional[DeployedContracts]: """Reads the deployment data. Returns None if the file is not found. Parameter: module The name of the module. ALL means deployed contracts from all modules that are available for the version. """ |
if module not in DeploymentModule:
raise ValueError(f'Unknown module {module} given to get_contracts_deployment_info()')
def module_chosen(to_be_added: DeploymentModule):
return module == to_be_added or module == DeploymentModule.ALL
files: List[Path] = []
if module_chosen(DeploymentModule.RAIDEN):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=False,
))
if module == DeploymentModule.SERVICES and not version_provides_services(version):
raise ValueError(
f'SERVICES module queried for version {version}, but {version} '
'does not provide service contracts.',
)
if module_chosen(DeploymentModule.SERVICES) and version_provides_services(version):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=True,
))
deployment_data: DeployedContracts = {} # type: ignore
for f in files:
deployment_data = merge_deployment_data(
deployment_data,
_load_json_from_path(f),
)
if not deployment_data:
deployment_data = None
return deployment_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_contract(self, contract_name: str) -> Dict: """ Return ABI, BIN of the given contract. """ |
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_contract_abi(self, contract_name: str) -> Dict: """ Returns the ABI for a given contract. """ |
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name]['abi'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_event_abi(self, contract_name: str, event_name: str) -> Dict: """ Returns the ABI for a given event. """ |
# Import locally to avoid web3 dependency during installation via `compile_contracts`
from web3.utils.contracts import find_matching_event_abi
assert self.contracts, 'ContractManager should have contracts compiled'
contract_abi = self.get_contract_abi(contract_name)
return find_matching_event_abi(
abi=contract_abi,
event_name=event_name,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_base_filename(self, record):
""" Determine if builder should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ |
time_tuple = time.localtime()
if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(
self.baseFilename + '.' + self.suffix_time):
return 1
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_base_filename(self):
""" do builder; in this case, old time stamp is removed from filename and a new time stamp is append to the filename """ |
if self.stream:
self.stream.close()
self.stream = None
# remove old suffix
if self.suffix_time != "":
index = self.baseFilename.find("." + self.suffix_time)
if index == -1:
index = self.baseFilename.rfind(".")
self.baseFilename = self.baseFilename[:index]
# add new suffix
current_time_tuple = time.localtime()
self.suffix_time = time.strftime(self.suffix, current_time_tuple)
self.baseFilename = self.baseFilename + "." + self.suffix_time
self.mode = 'a'
if not self.delay:
self.stream = self._open() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cycle_proceedings(self):
""" Finds next proceedings and clone them for cycling if it exists. """ |
next_approvals = self._get_next_approvals().exclude(
status=PENDING).exclude(cloned=True)
for ta in next_approvals:
clone_transition_approval, c = TransitionApproval.objects.get_or_create(
source_state=ta.source_state,
destination_state=ta.destination_state,
content_type=ta.content_type,
object_id=ta.object_id,
field_name=ta.field_name,
skip=ta.skip,
priority=ta.priority,
enabled=ta.enabled,
status=PENDING,
meta=ta.meta
)
if c:
clone_transition_approval.permissions.add(*ta.permissions.all())
clone_transition_approval.groups.add(*ta.groups.all())
next_approvals.update(cloned=True)
return True if next_approvals.count() else False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_subscription_data(post_data):
"""Process the subscription data according to out model""" |
subscription_data = post_data.pop("subscription", {})
# As our database saves the auth and p256dh key in separate field,
# we need to refactor it and insert the auth and p256dh keys in the same dictionary
keys = subscription_data.pop("keys", {})
subscription_data.update(keys)
# Insert the browser name
subscription_data["browser"] = post_data.pop("browser")
return subscription_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_avatar(strategy, details, user=None, *args, **kwargs):
"""Get user avatar from social provider.""" |
if user:
backend_name = kwargs['backend'].__class__.__name__.lower()
response = kwargs.get('response', {})
social_thumb = None
if 'facebook' in backend_name:
if 'id' in response:
social_thumb = (
'http://graph.facebook.com/{0}/picture?type=normal'
).format(response['id'])
elif 'twitter' in backend_name and response.get('profile_image_url'):
social_thumb = response['profile_image_url']
elif 'googleoauth2' in backend_name and response.get('image', {}).get('url'):
social_thumb = response['image']['url'].split('?')[0]
else:
social_thumb = 'http://www.gravatar.com/avatar/'
social_thumb += hashlib.md5(user.email.lower().encode('utf8')).hexdigest()
social_thumb += '?size=100'
if social_thumb and user.social_thumb != social_thumb:
user.social_thumb = social_thumb
strategy.storage.user.changed(user) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_image_loader(filename, flags, **kwargs):
""" This default image loader just returns filename, rect, and any flags """ |
def load(rect=None, flags=None):
return filename, rect, flags
return load |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_gid(raw_gid):
""" Decode a GID from TMX data as of 0.7.0 it determines if the tile should be flipped when rendered as of 0.8.0 bit 30 determines if GID is rotated :param raw_gid: 32-bit number from TMX layer data :return: gid, flags """ |
flags = TileFlags(
raw_gid & GID_TRANS_FLIPX == GID_TRANS_FLIPX,
raw_gid & GID_TRANS_FLIPY == GID_TRANS_FLIPY,
raw_gid & GID_TRANS_ROT == GID_TRANS_ROT)
gid = raw_gid & ~(GID_TRANS_FLIPX | GID_TRANS_FLIPY | GID_TRANS_ROT)
return gid, flags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_to_bool(text):
""" Convert a few common variations of "true" and "false" to boolean :param text: string to test :return: boolean :raises: ValueError """ |
# handle "1" and "0"
try:
return bool(int(text))
except:
pass
text = str(text).lower()
if text == "true":
return True
if text == "yes":
return True
if text == "false":
return False
if text == "no":
return False
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_properties(node):
""" Parse a Tiled xml node and return a dict that represents a tiled "property" :param node: etree element :return: dict """ |
d = dict()
for child in node.findall('properties'):
for subnode in child.findall('property'):
cls = None
try:
if "type" in subnode.keys():
module = importlib.import_module('builtins')
cls = getattr(module, subnode.get("type"))
except AttributeError:
logger.info("Type [} Not a built-in type. Defaulting to string-cast.")
d[subnode.get('name')] = cls(subnode.get('value')) if cls is not None else subnode.get('value')
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_properties(self, node):
""" Create dict containing Tiled object attributes from xml data read the xml attributes and tiled "properties" from a xml node and fill in the values into the object's dictionary. Names will be checked to make sure that they do not conflict with reserved names. :param node: etree element :return: dict """ |
self._cast_and_set_attributes_from_node_items(node.items())
properties = parse_properties(node)
if (not self.allow_duplicate_names and
self._contains_invalid_property_name(properties.items())):
self._log_property_error_message()
raise ValueError("Reserved names and duplicate names are not allowed. Please rename your property inside the .tmx-file")
self.properties = properties |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_xml(self, node):
""" Parse a map from ElementTree xml node :param node: ElementTree xml node :return: self """ |
self._set_properties(node)
self.background_color = node.get('backgroundcolor',
self.background_color)
# *** do not change this load order! *** #
# *** gid mapping errors will occur if changed *** #
for subnode in node.findall('layer'):
self.add_layer(TiledTileLayer(self, subnode))
for subnode in node.findall('imagelayer'):
self.add_layer(TiledImageLayer(self, subnode))
for subnode in node.findall('objectgroup'):
self.add_layer(TiledObjectGroup(self, subnode))
for subnode in node.findall('tileset'):
self.add_tileset(TiledTileset(self, subnode))
# "tile objects", objects with a GID, have need to have their attributes
# set after the tileset is loaded, so this step must be performed last
# also, this step is performed for objects to load their tiles.
# tiled stores the origin of GID objects by the lower right corner
# this is different for all other types, so i just adjust it here
# so all types loaded with pytmx are uniform.
# iterate through tile objects and handle the image
for o in [o for o in self.objects if o.gid]:
# gids might also have properties assigned to them
# in that case, assign the gid properties to the object as well
p = self.get_tile_properties_by_gid(o.gid)
if p:
for key in p:
o.properties.setdefault(key, p[key])
if self.invert_y:
o.y -= o.height
self.reload_images()
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reload_images(self):
""" Load the map images from disk This method will use the image loader passed in the constructor to do the loading or will use a generic default, in which case no images will be loaded. :return: None """ |
self.images = [None] * self.maxgid
# iterate through tilesets to get source images
for ts in self.tilesets:
# skip tilesets without a source
if ts.source is None:
continue
path = os.path.join(os.path.dirname(self.filename), ts.source)
colorkey = getattr(ts, 'trans', None)
loader = self.image_loader(path, colorkey, tileset=ts)
p = product(range(ts.margin,
ts.height + ts.margin - ts.tileheight + 1,
ts.tileheight + ts.spacing),
range(ts.margin,
ts.width + ts.margin - ts.tilewidth + 1,
ts.tilewidth + ts.spacing))
# iterate through the tiles
for real_gid, (y, x) in enumerate(p, ts.firstgid):
rect = (x, y, ts.tilewidth, ts.tileheight)
gids = self.map_gid(real_gid)
# gids is None if the tile is never used
# but give another chance to load the gid anyway
if gids is None:
if self.load_all_tiles or real_gid in self.optional_gids:
# TODO: handle flags? - might never be an issue, though
gids = [self.register_gid(real_gid, flags=0)]
if gids:
# flags might rotate/flip the image, so let the loader
# handle that here
for gid, flags in gids:
self.images[gid] = loader(rect, flags)
# load image layer images
for layer in (i for i in self.layers if isinstance(i, TiledImageLayer)):
source = getattr(layer, 'source', None)
if source:
colorkey = getattr(layer, 'trans', None)
real_gid = len(self.images)
gid = self.register_gid(real_gid)
layer.gid = gid
path = os.path.join(os.path.dirname(self.filename), source)
loader = self.image_loader(path, colorkey)
image = loader()
self.images.append(image)
# load images in tiles.
# instead of making a new gid, replace the reference to the tile that
# was loaded from the tileset
for real_gid, props in self.tile_properties.items():
source = props.get('source', None)
if source:
colorkey = props.get('trans', None)
path = os.path.join(os.path.dirname(self.filename), source)
loader = self.image_loader(path, colorkey)
image = loader()
self.images[real_gid] = image |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tile_locations_by_gid(self, gid):
""" Search map for tile locations by the GID Return (int, int, int) tuples, where the layer is index of the visible tile layers. Note: Not a fast operation. Cache results if used often. :param gid: GID to be searched for :rtype: generator of tile locations """ |
for l in self.visible_tile_layers:
for x, y, _gid in [i for i in self.layers[l].iter_data() if i[2] == gid]:
yield x, y, l |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tile_properties_by_layer(self, layer):
""" Get the tile properties of each GID in layer :param layer: layer number :rtype: iterator of (gid, properties) tuples """ |
try:
assert (int(layer) >= 0)
layer = int(layer)
except (TypeError, AssertionError):
msg = "Layer must be a positive integer. Got {0} instead."
logger.debug(msg.format(type(layer)))
raise ValueError
p = product(range(self.width), range(self.height))
layergids = set(self.layers[layer].data[y][x] for x, y in p)
for gid in layergids:
try:
yield gid, self.tile_properties[gid]
except KeyError:
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tileset(self, tileset):
""" Add a tileset to the map :param tileset: TiledTileset """ |
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_layer_by_name(self, name):
"""Return a layer by name :param name: Name of layer. Case-sensitive. :rtype: Layer object if found, otherwise ValueError """ |
try:
return self.layernames[name]
except KeyError:
msg = 'Layer "{0}" not found.'
logger.debug(msg.format(name))
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_object_by_name(self, name):
"""Find an object :param name: Name of object. Case-sensitive. :rtype: Object if found, otherwise ValueError """ |
for obj in self.objects:
if obj.name == name:
return obj
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tileset_from_gid(self, gid):
""" Return tileset that owns the gid Note: this is a slow operation, so if you are expecting to do this often, it would be worthwhile to cache the results of this. :param gid: gid of tile image :rtype: TiledTileset if found, otherwise ValueError """ |
try:
tiled_gid = self.tiledgidmap[gid]
except KeyError:
raise ValueError
for tileset in sorted(self.tilesets, key=attrgetter('firstgid'),
reverse=True):
if tiled_gid >= tileset.firstgid:
return tileset
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visible_tile_layers(self):
"""Return iterator of layer indexes that are set 'visible' :rtype: Iterator """ |
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledTileLayer)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visible_object_groups(self):
"""Return iterator of object group indexes that are set 'visible' :rtype: Iterator """ |
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledObjectGroup)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_gid(self, tiled_gid, flags=None):
""" Used to manage the mapping of GIDs between the tmx and pytmx :param tiled_gid: GID that is found in TMX data :rtype: GID that pytmx uses for the the GID passed """ |
if flags is None:
flags = TileFlags(0, 0, 0)
if tiled_gid:
try:
return self.imagemap[(tiled_gid, flags)][0]
except KeyError:
gid = self.maxgid
self.maxgid += 1
self.imagemap[(tiled_gid, flags)] = (gid, flags)
self.gidmap[tiled_gid].append((gid, flags))
self.tiledgidmap[gid] = tiled_gid
return gid
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_gid(self, tiled_gid):
""" Used to lookup a GID read from a TMX file's data :param tiled_gid: GID that is found in TMX data :rtype: (GID, flags) for the the GID passed, None if not found """ |
try:
return self.gidmap[int(tiled_gid)]
except KeyError:
return None
except TypeError:
msg = "GIDs must be an integer"
logger.debug(msg)
raise TypeError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_gid2(self, tiled_gid):
""" WIP. need to refactor the gid code :param tiled_gid: :return: """ |
tiled_gid = int(tiled_gid)
# gidmap is a default dict, so cannot trust to raise KeyError
if tiled_gid in self.gidmap:
return self.gidmap[tiled_gid]
else:
gid = self.register_gid(tiled_gid)
return [(gid, None)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_data(self):
""" Iterate over layer data Yields X, Y, GID tuples for each tile in the layer :return: Generator """ |
for y, row in enumerate(self.data):
for x, gid in enumerate(row):
yield x, y, gid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tiles(self):
""" Iterate over tile images of this layer This is an optimised generator function that returns (tile_x, tile_y, tile_image) tuples, :rtype: Generator :return: (x, y, image) tuples """ |
images = self.parent.images
for x, y, gid in [i for i in self.iter_data() if i[2]]:
yield x, y, images[gid] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_xml(self, node):
""" Parse a Tile Layer from ElementTree xml node :param node: ElementTree xml node :return: self """ |
import struct
import array
self._set_properties(node)
data = None
next_gid = None
data_node = node.find('data')
encoding = data_node.get('encoding', None)
if encoding == 'base64':
from base64 import b64decode
data = b64decode(data_node.text.strip())
elif encoding == 'csv':
next_gid = map(int, "".join(
line.strip() for line in data_node.text.strip()).split(","))
elif encoding:
msg = 'TMX encoding type: {0} is not supported.'
logger.error(msg.format(encoding))
raise Exception
compression = data_node.get('compression', None)
if compression == 'gzip':
import gzip
with gzip.GzipFile(fileobj=six.BytesIO(data)) as fh:
data = fh.read()
elif compression == 'zlib':
import zlib
data = zlib.decompress(data)
elif compression:
msg = 'TMX compression type: {0} is not supported.'
logger.error(msg.format(compression))
raise Exception
# if data is None, then it was not decoded or decompressed, so
# we assume here that it is going to be a bunch of tile elements
# TODO: this will/should raise an exception if there are no tiles
if encoding == next_gid is None:
def get_children(parent):
for child in parent.findall('tile'):
yield int(child.get('gid'))
next_gid = get_children(data_node)
elif data:
if type(data) == bytes:
fmt = struct.Struct('<L')
iterator = (data[i:i + 4] for i in range(0, len(data), 4))
next_gid = (fmt.unpack(i)[0] for i in iterator)
else:
msg = 'layer data not in expected format ({})'
logger.error(msg.format(type(data)))
raise Exception
init = lambda: [0] * self.width
reg = self.parent.register_gid
# H (16-bit) may be a limitation for very detailed maps
self.data = tuple(array.array('H', init()) for i in range(self.height))
for (y, x) in product(range(self.height), range(self.width)):
self.data[y][x] = reg(*decode_gid(next(next_gid)))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_xml(self, node):
""" Parse an Object Group from ElementTree xml node :param node: ElementTree xml node :return: self """ |
self._set_properties(node)
self.extend(TiledObject(self.parent, child)
for child in node.findall('object'))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_xml(self, node):
""" Parse an Object from ElementTree xml node :param node: ElementTree xml node :return: self """ |
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(float, i.split(','))) for i in text.split())
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid)
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False
if points:
x1 = x2 = y1 = y2 = 0
for x, y in points:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple(
[(i[0] + self.x, i[1] + self.y) for i in points])
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_xml(self, node):
""" Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self """ |
self._set_properties(node)
self.name = node.get('name', None)
self.opacity = node.get('opacity', self.opacity)
self.visible = node.get('visible', self.visible)
image_node = node.find('image')
self.source = image_node.get('source', None)
self.trans = image_node.get('trans', None)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def smart_convert(original, colorkey, pixelalpha):
""" this method does several tests on a surface to determine the optimal flags and pixel format for each tile surface. this is done for the best rendering speeds and removes the need to convert() the images on your own """ |
tile_size = original.get_size()
threshold = 127 # the default
try:
# count the number of pixels in the tile that are not transparent
px = pygame.mask.from_surface(original, threshold).count()
except:
# pygame_sdl2 will fail because the mask module is not included
# in this case, just convert_alpha and return it
return original.convert_alpha()
# there are no transparent pixels in the image
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
# there are transparent pixels, and tiled set a colorkey
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
# there are transparent pixels, and set for perpixel alpha
elif pixelalpha:
tile = original.convert_alpha()
# there are transparent pixels, and we won't handle them
else:
tile = original.convert()
return tile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pygame_image_loader(filename, colorkey, **kwargs):
""" pytmx image loader for pygame :param filename: :param colorkey: :param kwargs: :return: """ |
if colorkey:
colorkey = pygame.Color('#{0}'.format(colorkey))
pixelalpha = kwargs.get('pixelalpha', True)
image = pygame.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
tile = image.subsurface(rect)
except ValueError:
logger.error('Tile bounds outside bounds of tileset image')
raise
else:
tile = image.copy()
if flags:
tile = handle_transformation(tile, flags)
tile = smart_convert(tile, colorkey, pixelalpha)
return tile
return load_image |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_pygame(filename, *args, **kwargs):
""" Load a TMX file, images, and return a TiledMap class PYGAME USERS: Use me. this utility has 'smart' tile loading. by default any tile without transparent pixels will be loaded for quick blitting. if the tile has transparent pixels, then it will be loaded with per-pixel alpha. this is a per-tile, per-image check. if a color key is specified as an argument, or in the tmx data, the per-pixel alpha will not be used at all. if the tileset's image has colorkey transparency set in Tiled, the util_pygam will return images that have their transparency already set. TL;DR: Don't attempt to convert() or convert_alpha() the individual tiles. It is already done for you. """ |
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution of the specified gid. useful for generating rects for use in collision detection GID Note: You will need to add 1 to the GID reported by Tiled. :param tmxmap: TiledMap object :param layer: int or string name of layer :param tileset: int or string name of tileset :param real_gid: Tiled GID of the tile + 1 (see note) :return: List of pygame Rect objects """ |
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pyglet_image_loader(filename, colorkey, **kwargs):
"""basic image loading with pyglet returns pyglet Images, not textures This is a basic proof-of-concept and is likely to fail in some situations. Missing: Transparency Tile Rotation This is slow as well. """ |
if colorkey:
logger.debug('colorkey not implemented')
image = pyglet.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
x, y, w, h = rect
y = image.height - y - h
tile = image.get_region(x, y, w, h)
except:
logger.error('cannot get region %s of image', rect)
raise
else:
tile = image
if flags:
logger.error('tile flags are not implemented')
return tile
return load_image |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reduce_wind_speed(wind_speed, wind_efficiency_curve_name='dena_mean'):
r""" Reduces wind speed by a wind efficiency curve. The wind efficiency curves are provided in the windpowerlib and were calculated in the dena-Netzstudie II and in the work of Knorr (see [1]_ and [2]_). Parameters wind_speed : pandas.Series or numpy.array Wind speed time series. wind_efficiency_curve_name : string Name of the wind efficiency curve. Use :py:func:`~.get_wind_efficiency_curve` to get all provided wind efficiency curves. Default: 'dena_mean'. Returns ------- reduced_wind_speed : pd.Series or np.array `wind_speed` reduced by wind efficiency curve. References .. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020 mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena), Tech. rept., 2010, p. 101 .. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der Windenergieeinspeisung für wetterdatenbasierte Windleistungssimulationen". Universität Kassel, Diss., 2016, p. 124 """ |
# Get wind efficiency curve
wind_efficiency_curve = get_wind_efficiency_curve(
curve_name=wind_efficiency_curve_name)
# Reduce wind speed by wind efficiency
reduced_wind_speed = wind_speed * np.interp(
wind_speed, wind_efficiency_curve['wind_speed'],
wind_efficiency_curve['efficiency'])
return reduced_wind_speed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_weather_data(filename='weather.csv', **kwargs):
r""" Imports weather data from a file. The data include wind speed at two different heights in m/s, air temperature in two different heights in K, surface roughness length in m and air pressure in Pa. The file is located in the example folder of the windpowerlib. The height in m for which the data applies is specified in the second row. Parameters filename : string Filename of the weather data file. Default: 'weather.csv'. Other Parameters datapath : string, optional Path where the weather data file is stored. Default: 'windpowerlib/example'. Returns ------- weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s, temperature `temperature` in K, roughness length `roughness_length` in m, and pressure `pressure` in Pa. The columns of the DataFrame are a MultiIndex where the first level contains the variable name as string (e.g. 'wind_speed') and the second level contains the height as integer at which it applies (e.g. 10, if it was measured at a height of 10 m). """ |
if 'datapath' not in kwargs:
kwargs['datapath'] = os.path.join(os.path.split(
os.path.dirname(__file__))[0], 'example')
file = os.path.join(kwargs['datapath'], filename)
# read csv file
weather_df = pd.read_csv(
file, index_col=0, header=[0, 1],
date_parser=lambda idx: pd.to_datetime(idx, utc=True))
# change type of index to datetime and set time zone
weather_df.index = pd.to_datetime(weather_df.index).tz_convert(
'Europe/Berlin')
# change type of height from str to int by resetting columns
weather_df.columns = [weather_df.axes[1].levels[0][
weather_df.axes[1].codes[0]],
weather_df.axes[1].levels[1][
weather_df.axes[1].codes[1]].astype(int)]
return weather_df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_example():
r""" Runs the basic example. """ |
weather = get_weather_data('weather.csv')
my_turbine, e126, dummy_turbine = initialize_wind_turbines()
calculate_power_output(weather, my_turbine, e126, dummy_turbine)
plot_or_print(my_turbine, e126, dummy_turbine) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mean_hub_height(self):
r""" Calculates the mean hub height of the wind farm. The mean hub height of a wind farm is necessary for power output calculations with an aggregated wind farm power curve containing wind turbines with different hub heights. Hub heights of wind turbines with higher nominal power weigh more than others. Assigns the hub height to the wind farm object. Returns ------- self Notes ----- The following equation is used [1]_: .. math:: h_{WF} = e^{\sum\limits_{k}{ln(h_{WT,k})} \frac{P_{N,k}}{\sum\limits_{k}{P_{N,k}}}} with: :math:`h_{WF}`: mean hub height of wind farm, :math:`h_{WT,k}`: hub height of the k-th wind turbine of a wind farm, :math:`P_{N,k}`: nominal power of the k-th wind turbine References .. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der Windenergieeinspeisung für wetterdatenbasierte Windleistungssimulationen". Universität Kassel, Diss., 2016, p. 35 """ |
self.hub_height = np.exp(
sum(np.log(wind_dict['wind_turbine'].hub_height) *
wind_dict['wind_turbine'].nominal_power *
wind_dict['number_of_turbines']
for wind_dict in self.wind_turbine_fleet) /
self.get_installed_power())
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logarithmic_profile(wind_speed, wind_speed_height, hub_height, roughness_length, obstacle_height=0.0):
r""" Calculates the wind speed at hub height using a logarithmic wind profile. The logarithmic height equation is used. There is the possibility of including the height of the surrounding obstacles in the calculation. This function is carried out when the parameter `wind_speed_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'. Parameters wind_speed : pandas.Series or numpy.array Wind speed time series. wind_speed_height : float Height for which the parameter `wind_speed` applies. hub_height : float Hub height of wind turbine. roughness_length : pandas.Series or numpy.array or float Roughness length. obstacle_height : float Height of obstacles in the surrounding area of the wind turbine. Set `obstacle_height` to zero for wide spread obstacles. Default: 0. Returns ------- pandas.Series or numpy.array Wind speed at hub height. Data type depends on type of `wind_speed`. Notes ----- The following equation is used [1]_, [2]_, [3]_: .. math:: v_{wind,hub}=v_{wind,data}\cdot \frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left( \frac{h_{data}-d}{z_{0}}\right)} with: v: wind speed, h: height, :math:`z_{0}`: roughness length, d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`) For d = 0 it results in the following equation [2]_, [3]_: .. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}} {z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)} :math:`h_{data}` is the height at which the wind speed :math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind speed at hub height :math:`h_{hub}` of the wind turbine. Parameters `wind_speed_height`, `roughness_length`, `hub_height` and `obstacle_height` have to be of the same unit. References .. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser Verlag, 2011, p. 278 .. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden, Vieweg + Teubner, 2010, p. 129 .. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz, Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515 """ |
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hellman(wind_speed, wind_speed_height, hub_height, roughness_length=None, hellman_exponent=None):
r""" Calculates the wind speed at hub height using the hellman equation. It is assumed that the wind profile follows a power law. This function is carried out when the parameter `wind_speed_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'hellman'. Parameters wind_speed : pandas.Series or numpy.array Wind speed time series. wind_speed_height : float Height for which the parameter `wind_speed` applies. hub_height : float Hub height of wind turbine. roughness_length : pandas.Series or numpy.array or float Roughness length. If given and `hellman_exponent` is None: `hellman_exponent` = 1 / ln(hub_height/roughness_length), otherwise `hellman_exponent` = 1/7. Default: None. hellman_exponent : None or float The Hellman exponent, which combines the increase in wind speed due to stability of atmospheric conditions and surface roughness into one constant. If None and roughness length is given `hellman_exponent` = 1 / ln(hub_height/roughness_length), otherwise `hellman_exponent` = 1/7. Default: None. Returns ------- pandas.Series or numpy.array Wind speed at hub height. Data type depends on type of `wind_speed`. Notes ----- The following equation is used [1]_, [2]_, [3]_: .. math:: v_{wind,hub}=v_{wind,data}\cdot \left(\frac{h_{hub}}{h_{data}} \right)^\alpha with: v: wind speed, h: height, :math:`\alpha`: Hellman exponent :math:`h_{data}` is the height in which the wind speed :math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind speed at hub height :math:`h_{hub}` of the wind turbine. For the Hellman exponent :math:`\alpha` many studies use a value of 1/7 for onshore and a value of 1/9 for offshore. The Hellman exponent can also be calulated by the following equation [2]_, [3]_: .. math:: \alpha = \frac{1}{\ln\left(\frac{h_{hub}}{z_0} \right)} with: :math:`z_{0}`: roughness length Parameters `wind_speed_height`, `roughness_length`, `hub_height` and `obstacle_height` have to be of the same unit. References .. [1] Sharp, E.: "Spatiotemporal disaggregation of GB scenarios depicting increased wind capacity and electrified heat demand in dwellings". UCL, Energy Institute, 2015, p. 83 .. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz, Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 517 .. [3] Quaschning V.: "Regenerative Energiesysteme". München, Hanser Verlag, 2011, p. 279 """ |
if hellman_exponent is None:
if roughness_length is not None:
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
hellman_exponent = 1 / np.log(hub_height / roughness_length)
else:
hellman_exponent = 1/7
return wind_speed * (hub_height / wind_speed_height) ** hellman_exponent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def smooth_power_curve(power_curve_wind_speeds, power_curve_values, block_width=0.5, wind_speed_range=15.0, standard_deviation_method='turbulence_intensity', mean_gauss=0, **kwargs):
r""" Smooths the input power curve values by using a Gauss distribution. The smoothing serves for taking the distribution of wind speeds over space into account. Parameters power_curve_wind_speeds : pandas.Series or numpy.array Wind speeds in m/s for which the power curve values are provided in `power_curve_values`. power_curve_values : pandas.Series or numpy.array Power curve values corresponding to wind speeds in `power_curve_wind_speeds`. block_width : float Width between the wind speeds in the sum of equation :eq:`power`. Default: 0.5. wind_speed_range : float The sum in the equation below is taken for this wind speed range below and above the power curve wind speed. Default: 15.0. standard_deviation_method : string Method for calculating the standard deviation for the Gauss distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'. Default: 'turbulence_intensity'. mean_gauss : float Mean of the Gauss distribution in :py:func:`~.tools.gauss_distribution`. Default: 0. Other Parameters turbulence intensity : float, optional Turbulence intensity at hub height of the wind turbine, wind farm or wind turbine cluster the power curve is smoothed for. Returns ------- smoothed_power_curve_df : pd.DataFrame Smoothed power curve. DataFrame has 'wind_speed' and 'value' columns with wind speeds in m/s and the corresponding power curve value in W. Notes ----- The following equation is used to calculated the power curves values of the smoothed power curve [1]_: .. math:: P_{smoothed}(v_{std}) = \sum\limits_{v_i} \Delta v_i \cdot P(v_i) \cdot \frac{1}{\sigma \sqrt{2 \pi}} \exp \left[-\frac{(v_{std} - v_i -\mu)^2}{2 \sigma^2} \right] :label: power with: P: power [W], v: wind speed [m/s], :math:`\sigma`: standard deviation (Gauss), :math:`\mu`: mean (Gauss) :math:`P_{smoothed}` is the smoothed power curve value, :math:`v_{std}` is the standard wind speed in the power curve, :math:`\Delta v_i` is the interval length between :math:`v_\text{i}` and :math:`v_\text{i+1}` Power curve smoothing is applied to take account for the spatial distribution of wind speed. This way of smoothing power curves is also used in [2]_ and [3]_. The standard deviation :math:`\sigma` of the above equation can be calculated by the following methods. 'turbulence_intensity' [2]_: .. math:: \sigma = v_\text{std} \cdot \sigma_\text{n} = v_\text{std} \cdot TI with: TI: turbulence intensity 'Staffell_Pfenninger' [4]_: .. math:: \sigma = 0.6 \cdot 0.2 \cdot v_\text{std} References .. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der Windenergieeinspeisung für wetterdatenbasierte Windleistungssimulationen". Universität Kassel, Diss., 2016, p. 106 .. [2] Nørgaard, P. and Holttinen, H.: "A Multi-Turbine and Power Curve Approach". Nordic Wind Power Conference, 1.–2.3.2004, 2000, p. 5 .. [3] Kohler, S. and Agricola, A.-Cl. and Seidl, H.: "dena-Netzstudie II. Integration erneuerbarer Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020 mit Ausblick 2025". Technical report, 2010. .. [4] Staffell, I. and Pfenninger, S.: "Using Bias-Corrected Reanalysis to Simulate Current and Future Wind Power Output". 2005, p. 11 """ |
# Specify normalized standard deviation
if standard_deviation_method == 'turbulence_intensity':
if ('turbulence_intensity' in kwargs and
kwargs['turbulence_intensity'] is not np.nan):
normalized_standard_deviation = kwargs['turbulence_intensity']
else:
raise ValueError("Turbulence intensity must be defined for " +
"using 'turbulence_intensity' as " +
"`standard_deviation_method`")
elif standard_deviation_method == 'Staffell_Pfenninger':
normalized_standard_deviation = 0.2
else:
raise ValueError("{} is no valid `standard_deviation_method`. Valid "
+ "options are 'turbulence_intensity', or "
+ "'Staffell_Pfenninger'".format(
standard_deviation_method))
# Initialize list for power curve values
smoothed_power_curve_values = []
# Append wind speeds to `power_curve_wind_speeds`
maximum_value = power_curve_wind_speeds.values[-1] + wind_speed_range
while power_curve_wind_speeds.values[-1] < maximum_value:
power_curve_wind_speeds = power_curve_wind_speeds.append(
pd.Series(power_curve_wind_speeds.iloc[-1] + 0.5,
index=[power_curve_wind_speeds.index[-1] + 1]))
power_curve_values = power_curve_values.append(
pd.Series(0.0, index=[power_curve_values.index[-1] + 1]))
for power_curve_wind_speed in power_curve_wind_speeds:
# Create array of wind speeds for the sum
wind_speeds_block = (np.arange(
-wind_speed_range, wind_speed_range + block_width, block_width) +
power_curve_wind_speed)
# Get standard deviation for Gauss function
standard_deviation = (
(power_curve_wind_speed * normalized_standard_deviation + 0.6)
if standard_deviation_method is 'Staffell_Pfenninger'
else power_curve_wind_speed * normalized_standard_deviation)
# Get the smoothed value of the power output
if standard_deviation == 0.0:
# The gaussian distribution is not defined for a standard deviation
# of zero. Smoothed power curve value is set to zero.
smoothed_value = 0.0
else:
smoothed_value = sum(
block_width * np.interp(wind_speed, power_curve_wind_speeds,
power_curve_values, left=0, right=0) *
tools.gauss_distribution(
power_curve_wind_speed - wind_speed,
standard_deviation, mean_gauss)
for wind_speed in wind_speeds_block)
# Add value to list - add zero if `smoothed_value` is nan as Gauss
# distribution for a standard deviation of zero.
smoothed_power_curve_values.append(smoothed_value)
# Create smoothed power curve data frame
smoothed_power_curve_df = pd.DataFrame(
data=[list(power_curve_wind_speeds.values),
smoothed_power_curve_values]).transpose()
# Rename columns of the data frame
smoothed_power_curve_df.columns = ['wind_speed', 'value']
return smoothed_power_curve_df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def temperature_hub(self, weather_df):
r""" Calculates the temperature of air at hub height. The temperature is calculated using the method specified by the parameter `temperature_model`. Parameters weather_df : pandas.DataFrame DataFrame with time series for temperature `temperature` in K. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. temperature) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`ModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- temperature_hub : pandas.Series or numpy.array Temperature of air in K at hub height. Notes ----- If `weather_df` contains temperatures at different heights the given temperature(s) closest to the hub height are used. """ |
if self.power_plant.hub_height in weather_df['temperature']:
temperature_hub = weather_df['temperature'][
self.power_plant.hub_height]
elif self.temperature_model == 'linear_gradient':
logging.debug('Calculating temperature using temperature '
'gradient.')
closest_height = weather_df['temperature'].columns[
min(range(len(weather_df['temperature'].columns)),
key=lambda i: abs(weather_df['temperature'].columns[i] -
self.power_plant.hub_height))]
temperature_hub = temperature.linear_gradient(
weather_df['temperature'][closest_height], closest_height,
self.power_plant.hub_height)
elif self.temperature_model == 'interpolation_extrapolation':
logging.debug('Calculating temperature using linear inter- or '
'extrapolation.')
temperature_hub = tools.linear_interpolation_extrapolation(
weather_df['temperature'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.temperature_model) + "`temperature_model` must be "
"'linear_gradient' or 'interpolation_extrapolation'.")
return temperature_hub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def density_hub(self, weather_df):
r""" Calculates the density of air at hub height. The density is calculated using the method specified by the parameter `density_model`. Previous to the calculation of the density the temperature at hub height is calculated using the method specified by the parameter `temperature_model`. Parameters weather_df : pandas.DataFrame DataFrame with time series for temperature `temperature` in K, pressure `pressure` in Pa and/or density `density` in kg/m³, depending on the `density_model` used. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. temperature) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`ModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- density_hub : pandas.Series or numpy.array Density of air in kg/m³ at hub height. Notes ----- If `weather_df` contains data at different heights the data closest to the hub height are used. If `interpolation_extrapolation` is used to calculate the density at hub height, the `weather_df` must contain at least two time series for density. """ |
if self.density_model != 'interpolation_extrapolation':
temperature_hub = self.temperature_hub(weather_df)
# Calculation of density in kg/m³ at hub height
if self.density_model == 'barometric':
logging.debug('Calculating density using barometric height '
'equation.')
closest_height = weather_df['pressure'].columns[
min(range(len(weather_df['pressure'].columns)),
key=lambda i: abs(weather_df['pressure'].columns[i] -
self.power_plant.hub_height))]
density_hub = density.barometric(
weather_df['pressure'][closest_height], closest_height,
self.power_plant.hub_height, temperature_hub)
elif self.density_model == 'ideal_gas':
logging.debug('Calculating density using ideal gas equation.')
closest_height = weather_df['pressure'].columns[
min(range(len(weather_df['pressure'].columns)),
key=lambda i: abs(weather_df['pressure'].columns[i] -
self.power_plant.hub_height))]
density_hub = density.ideal_gas(
weather_df['pressure'][closest_height], closest_height,
self.power_plant.hub_height, temperature_hub)
elif self.density_model == 'interpolation_extrapolation':
logging.debug('Calculating density using linear inter- or '
'extrapolation.')
density_hub = tools.linear_interpolation_extrapolation(
weather_df['density'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.density_model) + "`density_model` " +
"must be 'barometric', 'ideal_gas' or " +
"'interpolation_extrapolation'.")
return density_hub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wind_speed_hub(self, weather_df):
r""" Calculates the wind speed at hub height. The method specified by the parameter `wind_speed_model` is used. Parameters weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s and roughness length `roughness_length` in m. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. wind_speed) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`ModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- wind_speed_hub : pandas.Series or numpy.array Wind speed in m/s at hub height. Notes ----- If `weather_df` contains wind speeds at different heights the given wind speed(s) closest to the hub height are used. """ |
if self.power_plant.hub_height in weather_df['wind_speed']:
wind_speed_hub = weather_df['wind_speed'][
self.power_plant.hub_height]
elif self.wind_speed_model == 'logarithmic':
logging.debug('Calculating wind speed using logarithmic wind '
'profile.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.logarithmic_profile(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.obstacle_height)
elif self.wind_speed_model == 'hellman':
logging.debug('Calculating wind speed using hellman equation.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.hellman(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.hellman_exp)
elif self.wind_speed_model == 'interpolation_extrapolation':
logging.debug('Calculating wind speed using linear inter- or '
'extrapolation.')
wind_speed_hub = tools.linear_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
elif self.wind_speed_model == 'log_interpolation_extrapolation':
logging.debug('Calculating wind speed using logarithmic inter- or '
'extrapolation.')
wind_speed_hub = tools.logarithmic_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.wind_speed_model) + "`wind_speed_model` must be "
"'logarithmic', 'hellman', 'interpolation_extrapolation' " +
"or 'log_interpolation_extrapolation'.")
return wind_speed_hub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_power_output(self, wind_speed_hub, density_hub):
r""" Calculates the power output of the wind power plant. The method specified by the parameter `power_output_model` is used. Parameters wind_speed_hub : pandas.Series or numpy.array Wind speed at hub height in m/s. density_hub : pandas.Series or numpy.array Density of air at hub height in kg/m³. Returns ------- pandas.Series Electrical power output of the wind turbine in W. """ |
if self.power_output_model == 'power_curve':
if self.power_plant.power_curve is None:
raise TypeError("Power curve values of " +
self.power_plant.name +
" are missing.")
logging.debug('Calculating power output using power curve.')
return (power_output.power_curve(
wind_speed_hub,
self.power_plant.power_curve['wind_speed'],
self.power_plant.power_curve['value'],
density_hub, self.density_correction))
elif self.power_output_model == 'power_coefficient_curve':
if self.power_plant.power_coefficient_curve is None:
raise TypeError("Power coefficient curve values of " +
self.power_plant.name +
" are missing.")
logging.debug('Calculating power output using power coefficient '
'curve.')
return (power_output.power_coefficient_curve(
wind_speed_hub,
self.power_plant.power_coefficient_curve[
'wind_speed'],
self.power_plant.power_coefficient_curve[
'value'],
self.power_plant.rotor_diameter, density_hub))
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.power_output_model) +
"`power_output_model` must be " +
"'power_curve' or 'power_coefficient_curve'.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def linear_interpolation_extrapolation(df, target_height):
r""" Linear inter- or extrapolates between the values of a data frame. This function can be used for the inter-/extrapolation of a parameter (e.g wind speed) available at two or more different heights, to approximate the value at hub height. The function is carried out when the parameter `wind_speed_model`, `density_model` or `temperature_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'interpolation_extrapolation'. Parameters df : pandas.DataFrame DataFrame with time series for parameter that is to be interpolated or extrapolated. The columns of the DataFrame are the different heights for which the parameter is available. If more than two heights are given, the two closest heights are used. See example below on how the DataFrame should look like and how the function can be used. target_height : float Height for which the parameter is approximated (e.g. hub height). Returns ------- pandas.Series Result of the inter-/extrapolation (e.g. wind speed at hub height). Notes ----- For the inter- and extrapolation the following equation is used: .. math:: f(x) = \frac{(f(x_2) - f(x_1))}{(x_2 - x_1)} \cdot (x - x_1) + f(x_1) Examples --------- """ |
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((df[heights_sorted[1]] - df[heights_sorted[0]]) /
(heights_sorted[1] - heights_sorted[0]) *
(target_height - heights_sorted[0]) + df[heights_sorted[0]]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logarithmic_interpolation_extrapolation(df, target_height):
r""" Logarithmic inter- or extrapolates between the values of a data frame. This function can be used for the inter-/extrapolation of the wind speed if it is available at two or more different heights, to approximate the value at hub height. The function is carried out when the parameter `wind_speed_model` :class:`~.modelchain.ModelChain` class is 'log_interpolation_extrapolation'. Parameters df : pandas.DataFrame DataFrame with time series for parameter that is to be interpolated or extrapolated. The columns of the DataFrame are the different heights for which the parameter is available. If more than two heights are given, the two closest heights are used. See example in :py:func:`~.linear_interpolation_extrapolation` on how the DataFrame should look like and how the function can be used. target_height : float Height for which the parameter is approximated (e.g. hub height). Returns ------- pandas.Series Result of the inter-/extrapolation (e.g. wind speed at hub height). Notes ----- For the logarithmic inter- and extrapolation the following equation is used [1]_: .. math:: f(x) = \frac{\ln(x) \cdot (f(x_2) - f(x_1)) - f(x_2) \cdot \ln(x_1) + f(x_1) \cdot \ln(x_2)}{\ln(x_2) - \ln(x_1)} References .. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der Windenergieeinspeisung für wetterdatenbasierte Windleistungssimulationen". Universität Kassel, Diss., 2016, p. 83 """ |
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((np.log(target_height) *
(df[heights_sorted[1]] - df[heights_sorted[0]]) -
df[heights_sorted[1]] * np.log(heights_sorted[0]) +
df[heights_sorted[0]] * np.log(heights_sorted[1])) /
(np.log(heights_sorted[1]) - np.log(heights_sorted[0]))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gauss_distribution(function_variable, standard_deviation, mean=0):
r""" Gauss distribution. The Gauss distribution is used in the function :py:func:`~.power_curves.smooth_power_curve` for power curve smoothing. Parameters function_variable : float Variable of the gaussian distribution. standard_deviation : float Standard deviation of the Gauss distribution. mean : Float Defines the offset of the Gauss distribution. Default: 0. Returns ------- pandas.Series or numpy.array Wind speed at hub height. Data type depends on the type of `wind_speed`. Notes ----- The following equation is used [1]_: .. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp \left[-\frac{(x-\mu)^2}{2 \sigma^2}\right] with: :math:`\sigma`: standard deviation, :math:`\mu`: mean References .. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis". New York, Cambridge University Press, 2011, p. 37 """ |
return (1 / (standard_deviation * np.sqrt(2 * np.pi)) *
np.exp(-(function_variable - mean)**2 /
(2 * standard_deviation**2))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assign_power_curve(self, weather_df):
r""" Calculates the power curve of the wind turbine cluster. The power curve is aggregated from the wind farms' and wind turbines' power curves by using :func:`power_plant.assign_power_curve`. Depending on the parameters of the WindTurbineCluster power curves are smoothed and/or wake losses are taken into account. Parameters weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s, and roughness length `roughness_length` in m, as well as optionally temperature `temperature` in K, pressure `pressure` in Pa, density `density` in kg/m³ and turbulence intensity `turbulence_intensity` depending on `power_output_model`, `density_model` and `standard_deviation_model` chosen. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. wind_speed) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`TurbineClusterModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- self """ |
# Set turbulence intensity for assigning power curve
turbulence_intensity = (
weather_df['turbulence_intensity'].values.mean() if
'turbulence_intensity' in
weather_df.columns.get_level_values(0) else None)
# Assign power curve
if (self.wake_losses_model == 'power_efficiency_curve' or
self.wake_losses_model == 'constant_efficiency' or
self.wake_losses_model is None):
wake_losses_model_to_power_curve = self.wake_losses_model
if self.wake_losses_model is None:
logging.debug('Wake losses in wind farms not considered.')
else:
logging.debug('Wake losses considered with {}.'.format(
self.wake_losses_model))
else:
logging.debug('Wake losses considered by {} wind '.format(
self.wake_losses_model) + 'efficiency curve.')
wake_losses_model_to_power_curve = None
self.power_plant.assign_power_curve(
wake_losses_model=wake_losses_model_to_power_curve,
smoothing=self.smoothing, block_width=self.block_width,
standard_deviation_method=self.standard_deviation_method,
smoothing_order=self.smoothing_order,
roughness_length=weather_df['roughness_length'][0].mean(),
turbulence_intensity=turbulence_intensity)
# Further logging messages
if self.smoothing is None:
logging.debug('Aggregated power curve not smoothed.')
else:
logging.debug('Aggregated power curve smoothed by method: ' +
self.standard_deviation_method)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def power_coefficient_curve(wind_speed, power_coefficient_curve_wind_speeds, power_coefficient_curve_values, rotor_diameter, density):
r""" Calculates the turbine power output using a power coefficient curve. This function is carried out when the parameter `power_output_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'power_coefficient_curve'. Parameters wind_speed : pandas.Series or numpy.array Wind speed at hub height in m/s. power_coefficient_curve_wind_speeds : pandas.Series or numpy.array Wind speeds in m/s for which the power coefficients are provided in `power_coefficient_curve_values`. power_coefficient_curve_values : pandas.Series or numpy.array Power coefficients corresponding to wind speeds in `power_coefficient_curve_wind_speeds`. rotor_diameter : float Rotor diameter in m. density : pandas.Series or numpy.array Density of air at hub height in kg/m³. Returns ------- pandas.Series or numpy.array Electrical power output of the wind turbine in W. Data type depends on type of `wind_speed`. Notes ----- The following equation is used if the parameter `density_corr` is False [1]_, [2]_: .. math:: P=\frac{1}{8}\cdot\rho_{hub}\cdot d_{rotor}^{2} \cdot\pi\cdot v_{wind}^{3}\cdot cp\left(v_{wind}\right) with: P: power [W], :math:`\rho`: density [kg/m³], d: diameter [m], v: wind speed [m/s], cp: power coefficient It is assumed that the power output for wind speeds above the maximum and below the minimum wind speed given in the power coefficient curve is zero. References .. [1] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden, Vieweg + Teubner, 2010, pages 35ff, 208 .. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz, Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 542 """ |
power_coefficient_time_series = np.interp(
wind_speed, power_coefficient_curve_wind_speeds,
power_coefficient_curve_values, left=0, right=0)
power_output = (1 / 8 * density * rotor_diameter ** 2 * np.pi *
np.power(wind_speed, 3) *
power_coefficient_time_series)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
return power_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def power_curve(wind_speed, power_curve_wind_speeds, power_curve_values, density=None, density_correction=False):
r""" Calculates the turbine power output using a power curve. This function is carried out when the parameter `power_output_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'power_curve'. If the parameter `density_correction` is True the density corrected power curve (See :py:func:`~.power_curve_density_correction`) is used. Parameters wind_speed : pandas.Series or numpy.array Wind speed at hub height in m/s. power_curve_wind_speeds : pandas.Series or numpy.array Wind speeds in m/s for which the power curve values are provided in `power_curve_values`. power_curve_values : pandas.Series or numpy.array Power curve values corresponding to wind speeds in `power_curve_wind_speeds`. density : pandas.Series or numpy.array Density of air at hub height in kg/m³. This parameter is needed if `density_correction` is True. Default: None. density_correction : boolean If the parameter is True the density corrected power curve is used for the calculation of the turbine power output. In this case `density` cannot be None. Default: False. Returns ------- pandas.Series or numpy.array Electrical power output of the wind turbine in W. Data type depends on type of `wind_speed`. Notes ------- It is assumed that the power output for wind speeds above the maximum and below the minimum wind speed given in the power curve is zero. """ |
if density_correction is False:
power_output = np.interp(wind_speed, power_curve_wind_speeds,
power_curve_values, left=0, right=0)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
elif density_correction is True:
power_output = power_curve_density_correction(
wind_speed, power_curve_wind_speeds, power_curve_values, density)
else:
raise TypeError("'{0}' is an invalid type. ".format(type(
density_correction)) + "`density_correction` must " +
"be Boolean (True or False).")
return power_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def power_curve_density_correction(wind_speed, power_curve_wind_speeds, power_curve_values, density):
r""" Calculates the turbine power output using a density corrected power curve. This function is carried out when the parameter `density_correction` of an instance of the :class:`~.modelchain.ModelChain` class is True. Parameters wind_speed : pandas.Series or numpy.array Wind speed at hub height in m/s. power_curve_wind_speeds : pandas.Series or numpy.array Wind speeds in m/s for which the power curve values are provided in `power_curve_values`. power_curve_values : pandas.Series or numpy.array Power curve values corresponding to wind speeds in `power_curve_wind_speeds`. density : pandas.Series or numpy.array Density of air at hub height in kg/m³. Returns ------- pandas.Series or numpy.array Electrical power output of the wind turbine in W. Data type depends on type of `wind_speed`. Notes ----- The following equation is used for the site specific power curve wind speeds [1]_, [2]_, [3]_: .. math:: v_{site}=v_{std}\cdot\left(\frac{\rho_0} {\rho_{site}}\right)^{p(v)} with: .. math:: p=\begin{cases} \frac{1}{3} & v_{std} \leq 7.5\text{ m/s}\\ \frac{1}{15}\cdot v_{std}-\frac{1}{6} & 7.5 \text{ m/s}<v_{std}<12.5\text{ m/s}\\ \frac{2}{3} & \geq 12.5 \text{ m/s} \end{cases}, v: wind speed [m/s], :math:`\rho`: density [kg/m³] :math:`v_{std}` is the standard wind speed in the power curve (:math:`v_{std}`, :math:`P_{std}`), :math:`v_{site}` is the density corrected wind speed for the power curve (:math:`v_{site}`, :math:`P_{std}`), :math:`\rho_0` is the ambient density (1.225 kg/m³) and :math:`\rho_{site}` the density at site conditions (and hub height). It is assumed that the power output for wind speeds above the maximum and below the minimum wind speed given in the power curve is zero. References .. [1] Svenningsen, L.: "Power Curve Air Density Correction And Other Power Curve Options in WindPRO". 1st edition, Aalborg, EMD International A/S , 2010, p. 4 .. [2] Svenningsen, L.: "Proposal of an Improved Power Curve Correction". EMD International A/S , 2010 .. [3] Biank, M.: "Methodology, Implementation and Validation of a Variable Scale Simulation Model for Windpower based on the Georeferenced Installation Register of Germany". Master's Thesis at Reiner Lemoine Institute, 2014, p. 13 """ |
if density is None:
raise TypeError("`density` is None. For the calculation with a " +
"density corrected power curve density at hub " +
"height is needed.")
power_output = [(np.interp(
wind_speed[i], power_curve_wind_speeds * (1.225 / density[i]) ** (
np.interp(power_curve_wind_speeds, [7.5, 12.5], [1/3, 2/3])),
power_curve_values, left=0, right=0)) for i in range(len(wind_speed))]
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
return power_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mean_hub_height(self):
r""" Calculates the mean hub height of the wind turbine cluster. The mean hub height of a wind turbine cluster is necessary for power output calculations with an aggregated wind turbine cluster power curve. Hub heights of wind farms with higher nominal power weigh more than others. Assigns the hub height to the turbine cluster object. Returns ------- self Notes ----- The following equation is used [1]_: .. math:: h_{WTC} = e^{\sum\limits_{k}{ln(h_{WF,k})} \frac{P_{N,k}}{\sum\limits_{k}{P_{N,k}}}} with: :math:`h_{WTC}`: mean hub height of wind turbine cluster, :math:`h_{WF,k}`: hub height of the k-th wind farm of the cluster, :math:`P_{N,k}`: installed power of the k-th wind farm References .. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der Windenergieeinspeisung für wetterdatenbasierte Windleistungssimulationen". Universität Kassel, Diss., 2016, p. 35 """ |
self.hub_height = np.exp(sum(
np.log(wind_farm.hub_height) * wind_farm.get_installed_power() for
wind_farm in self.wind_farms) / self.get_installed_power())
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_installed_power(self):
r""" Calculates the installed power of a wind turbine cluster. Returns ------- float Installed power of the wind turbine cluster. """ |
for wind_farm in self.wind_farms:
wind_farm.installed_power = wind_farm.get_installed_power()
return sum(wind_farm.installed_power for wind_farm in self.wind_farms) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assign_power_curve(self, wake_losses_model='power_efficiency_curve', smoothing=False, block_width=0.5, standard_deviation_method='turbulence_intensity', smoothing_order='wind_farm_power_curves', turbulence_intensity=None, **kwargs):
r""" Calculates the power curve of a wind turbine cluster. The turbine cluster power curve is calculated by aggregating the wind farm power curves of wind farms within the turbine cluster. Depending on the parameters the power curves are smoothed (before or after the aggregation) and/or a wind farm efficiency is applied before the aggregation. After the calculations the power curve is assigned to the attribute `power_curve`. Parameters wake_losses_model : string Defines the method for taking wake losses within the farm into consideration. Options: 'power_efficiency_curve', 'constant_efficiency' or None. Default: 'power_efficiency_curve'. smoothing : boolean If True the power curves will be smoothed before or after the aggregation of power curves depending on `smoothing_order`. Default: False. block_width : float Width between the wind speeds in the sum of the equation in :py:func:`~.power_curves.smooth_power_curve`. Default: 0.5. standard_deviation_method : string Method for calculating the standard deviation for the Gauss distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'. Default: 'turbulence_intensity'. smoothing_order : string Defines when the smoothing takes place if `smoothing` is True. Options: 'turbine_power_curves' (to the single turbine power curves), 'wind_farm_power_curves'. Default: 'wind_farm_power_curves'. turbulence_intensity : float Turbulence intensity at hub height of the wind farm or wind turbine cluster for power curve smoothing with 'turbulence_intensity' method. Can be calculated from `roughness_length` instead. Default: None. Other Parameters roughness_length : float, optional. Roughness length. If `standard_deviation_method` is 'turbulence_intensity' and `turbulence_intensity` is not given the turbulence intensity is calculated via the roughness length. Returns ------- self """ |
# Assign wind farm power curves to wind farms of wind turbine cluster
for farm in self.wind_farms:
# Assign hub heights (needed for power curve and later for
# hub height of turbine cluster)
farm.mean_hub_height()
# Assign wind farm power curve
farm.assign_power_curve(
wake_losses_model=wake_losses_model,
smoothing=smoothing, block_width=block_width,
standard_deviation_method=standard_deviation_method,
smoothing_order=smoothing_order,
turbulence_intensity=turbulence_intensity, **kwargs)
# Create data frame from power curves of all wind farms
df = pd.concat([farm.power_curve.set_index(['wind_speed']).rename(
columns={'value': farm.name}) for
farm in self.wind_farms], axis=1)
# Sum up power curves
cluster_power_curve = pd.DataFrame(
df.interpolate(method='index').sum(axis=1))
cluster_power_curve.columns = ['value']
# Return wind speed (index) to a column of the data frame
cluster_power_curve.reset_index('wind_speed', inplace=True)
self.power_curve = cluster_power_curve
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_example():
r""" Runs the example. """ |
weather = mc_e.get_weather_data('weather.csv')
my_turbine, e126, dummy_turbine = mc_e.initialize_wind_turbines()
example_farm, example_farm_2 = initialize_wind_farms(my_turbine, e126)
example_cluster = initialize_wind_turbine_cluster(example_farm,
example_farm_2)
calculate_power_output(weather, example_farm, example_cluster)
plot_or_print(example_farm, example_cluster) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_turbine_data(self, fetch_curve, data_source):
r""" Fetches data of the requested wind turbine. Method fetches nominal power as well as power coefficient curve or power curve from a data set provided in the OpenEnergy Database (oedb). You can also import your own power (coefficient) curves from a file. For that the wind speeds in m/s have to be in the first row and the corresponding power coefficient curve values or power curve values in W in a row where the first column contains the turbine name. See `example_power_curves.csv' and `example_power_coefficient_curves.csv` in example/data for the required form of a csv file (more columns can be added). See :py:func:`~.get_turbine_data_from_file` for an example reading data from a csv file. Parameters fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. data_source : string Specifies whether turbine data (f.e. nominal power, power curve, power coefficient curve) is loaded from the OpenEnergy Database ('oedb') or from a csv file ('<path including file name>'). Default: 'oedb'. Returns ------- self Examples -------- 0.44 4200000.0 """ |
if data_source == 'oedb':
curve_df, nominal_power = get_turbine_data_from_oedb(
turbine_type=self.name, fetch_curve=fetch_curve)
else:
curve_df, nominal_power = get_turbine_data_from_file(
turbine_type=self.name, file_=data_source)
if fetch_curve == 'power_curve':
self.power_curve = curve_df
elif fetch_curve == 'power_coefficient_curve':
self.power_coefficient_curve = curve_df
else:
raise ValueError("'{0}' is an invalid value. ".format(
fetch_curve) + "`fetch_curve` must be " +
"'power_curve' or 'power_coefficient_curve'.")
if self.nominal_power is None:
self.nominal_power = nominal_power
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance(latitude_1, longitude_1, latitude_2, longitude_2):
""" Distance between two points. """ |
coef = mod_math.cos(latitude_1 / 180. * mod_math.pi)
x = latitude_1 - latitude_2
y = (longitude_1 - longitude_2) * coef
return mod_math.sqrt(x * x + y * y) * ONE_DEGREE |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_color_between(color1, color2, i):
|
if i <= 0:
return color1
if i >= 1:
return color2
return (int(color1[0] + (color2[0] - color1[0]) * i),
int(color1[1] + (color2[1] - color1[1]) * i),
int(color1[2] + (color2[2] - color1[2]) * i)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _IDW(self, latitude, longitude, radius=1):
""" Return the interpolated elevation at a point. Load the correct tile for latitude and longitude given. If the tile doesn't exist, return None. Otherwise, call the tile's Inverse Distance Weighted function and return the elevation. Args: latitude: float with the latitude in decimal degrees longitude: float with the longitude in decimal degrees radius: int of 1 or 2 indicating the approximate radius of adjacent cells to include Returns: a float of the interpolated elevation with the same unit as the .hgt file (meters) """ |
tile = self.get_file(latitude, longitude)
if tile is None:
return None
return tile._InverseDistanceWeighted(latitude, longitude, radius) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_image(self, size, latitude_interval, longitude_interval, max_elevation, min_elevation=0, unknown_color = (255, 255, 255, 255), zero_color = (0, 0, 255, 255), min_color = (0, 0, 0, 255), max_color = (0, 255, 0, 255), mode='image'):
""" Returns a numpy array or PIL image. """ |
if not size or len(size) != 2:
raise Exception('Invalid size %s' % size)
if not latitude_interval or len(latitude_interval) != 2:
raise Exception('Invalid latitude interval %s' % latitude_interval)
if not longitude_interval or len(longitude_interval) != 2:
raise Exception('Invalid longitude interval %s' % longitude_interval)
width, height = size
width, height = int(width), int(height)
latitude_from, latitude_to = latitude_interval
longitude_from, longitude_to = longitude_interval
if mode == 'array':
import numpy as np
array = np.empty((height,width))
for row in range(height):
for column in range(width):
latitude = latitude_from + float(row) / height * (latitude_to - latitude_from)
longitude = longitude_from + float(column) / width * (longitude_to - longitude_from)
elevation = self.get_elevation(latitude, longitude)
array[row,column] = elevation
return array
elif mode == 'image':
try: import Image as mod_image
except: from PIL import Image as mod_image
try: import ImageDraw as mod_imagedraw
except: from PIL import ImageDraw as mod_imagedraw
image = mod_image.new('RGBA', (width, height),
(255, 255, 255, 255))
draw = mod_imagedraw.Draw(image)
max_elevation -= min_elevation
for row in range(height):
for column in range(width):
latitude = latitude_from + float(row) / height * (latitude_to - latitude_from)
longitude = longitude_from + float(column) / width * (longitude_to - longitude_from)
elevation = self.get_elevation(latitude, longitude)
if elevation == None:
color = unknown_color
else:
elevation_coef = (elevation - min_elevation) / float(max_elevation)
if elevation_coef < 0: elevation_coef = 0
if elevation_coef > 1: elevation_coef = 1
color = mod_utils.get_color_between(min_color, max_color, elevation_coef)
if elevation <= 0:
color = zero_color
draw.point((column, height - row), color)
return image
else:
raise Exception('Invalid mode ' + mode) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_interval_elevations(self, gpx, min_interval_length=100):
""" Adds elevation on points every min_interval_length and add missing elevation between """ |
for track in gpx.tracks:
for segment in track.segments:
last_interval_changed = 0
previous_point = None
length = 0
for no, point in enumerate(segment.points):
if previous_point:
length += point.distance_2d(previous_point)
if no == 0 or no == len(segment.points) - 1 or length > last_interval_changed:
last_interval_changed += min_interval_length
point.elevation = self.get_elevation(point.latitude, point.longitude)
else:
point.elevation = None
previous_point = point
gpx.add_missing_elevations() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_elevation(self, latitude, longitude, approximate=None):
""" If approximate is True then only the points from SRTM grid will be used, otherwise a basic aproximation of nearby points will be calculated. """ |
if not (self.latitude - self.resolution <= latitude < self.latitude + 1):
raise Exception('Invalid latitude %s for file %s' % (latitude, self.file_name))
if not (self.longitude <= longitude < self.longitude + 1 + self.resolution):
raise Exception('Invalid longitude %s for file %s' % (longitude, self.file_name))
row, column = self.get_row_and_column(latitude, longitude)
if approximate:
return self.approximation(latitude, longitude)
else:
return self.get_elevation_from_row_and_column(int(row), int(column)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def approximation(self, latitude, longitude):
""" Dummy approximation with nearest points. The nearest the neighbour the more important will be its elevation. """ |
d = 1. / self.square_side
d_meters = d * mod_utils.ONE_DEGREE
# Since the less the distance => the more important should be the
# distance of the point, we'll use d-distance as importance coef
# here:
importance_1 = d_meters - mod_utils.distance(latitude + d, longitude, latitude, longitude)
elevation_1 = self.geo_elevation_data.get_elevation(latitude + d, longitude, approximate=False)
importance_2 = d_meters - mod_utils.distance(latitude - d, longitude, latitude, longitude)
elevation_2 = self.geo_elevation_data.get_elevation(latitude - d, longitude, approximate=False)
importance_3 = d_meters - mod_utils.distance(latitude, longitude + d, latitude, longitude)
elevation_3 = self.geo_elevation_data.get_elevation(latitude, longitude + d, approximate=False)
importance_4 = d_meters - mod_utils.distance(latitude, longitude - d, latitude, longitude)
elevation_4 = self.geo_elevation_data.get_elevation(latitude, longitude - d, approximate=False)
# TODO(TK) Check if coordinates inside the same file, and only then decide if to call
# self.geo_elevation_data.get_elevation or just self.get_elevation
if elevation_1 == None or elevation_2 == None or elevation_3 == None or elevation_4 == None:
elevation = self.get_elevation(latitude, longitude, approximate=False)
if not elevation:
return None
elevation_1 = elevation_1 or elevation
elevation_2 = elevation_2 or elevation
elevation_3 = elevation_3 or elevation
elevation_4 = elevation_4 or elevation
# Normalize importance:
sum_importances = float(importance_1 + importance_2 + importance_3 + importance_4)
# Check normalization:
assert abs(importance_1 / sum_importances + \
importance_2 / sum_importances + \
importance_3 / sum_importances + \
importance_4 / sum_importances - 1 ) < 0.000001
result = importance_1 / sum_importances * elevation_1 + \
importance_2 / sum_importances * elevation_2 + \
importance_3 / sum_importances * elevation_3 + \
importance_4 / sum_importances * elevation_4
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _InverseDistanceWeighted(self, latitude, longitude, radius=1):
""" Return the Inverse Distance Weighted Elevation. Interpolate the elevation of the given point using the inverse distance weigthing algorithm (exp of 1) in the form: sum((1/distance) * elevation)/sum(1/distance) for each point in the matrix. The matrix size is determined by the radius. A radius of 1 uses 5 points and a radius of 2 uses 13 points. The matrices are set up to use cells adjacent to and including the one that contains the given point. Any cells referenced by the matrix that are on neighboring tiles are ignored. Args: latitude: float of the latitude in decimal degrees longitude: float of the longitude in decimal degrees radius: int of 1 or 2 indicating the size of the matrix Returns: a float of the interpolated elevation in the same units as the underlying .hgt file (meters) Exceptions: raises a ValueError if an invalid radius is supplied """ |
if radius == 1:
offsetmatrix = (None, (0, 1), None,
(-1, 0), (0, 0), (1, 0),
None, (0, -1), None)
elif radius == 2:
offsetmatrix = (None, None, (0, 2), None, None,
None, (-1, 1), (0, 1), (1, 1), None,
(-2, 0), (-1, 0), (0, 0), (1, 0), (2, 0),
None, (-1, -1), (0, -1), (1, -1), None,
None, None, (0, -2), None, None)
else:
raise ValueError("Radius {} invalid, "
"expected 1 or 2".format(radius))
row, column = self.get_row_and_column(latitude, longitude)
center_lat, center_long = self.get_lat_and_long(row, column)
if latitude == center_lat and longitude == center_long:
# return direct elev at point (infinite weight)
return self.get_elevation_from_row_and_column(int(row), int(column))
weights = 0
elevation = 0
for offset in offsetmatrix:
if (offset is not None and
0 <= row + offset[0] < self.square_side and
0 <= column + offset[1] < self.square_side):
cell = self.get_elevation_from_row_and_column(int(row + offset[0]),
int(column + offset[1]))
if cell is not None:
# does not need to be meters, anything proportional
distance = mod_utils.distance(latitude, longitude,
center_lat + float(offset[0])/(self.square_side-1),
center_long + float(offset[1])/(self.square_side-1))
weights += 1/distance
elevation += cell/distance
return elevation/weights |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data(srtm1=True, srtm3=True, leave_zipped=False, file_handler=None, use_included_urls=True, batch_mode=False):
""" Get the utility object for querying elevation data. All data files will be stored in localy (note that it may be gigabytes of data so clean it from time to time). On first run -- all files needed url will be stored and for every next elevation query if the SRTM file is not found it will be retrieved and saved. If you need to change the way the files are saved locally (for example if you need to save them locally) -- change the file_handler. See srtm.main.FileHandler. If leave_zipped is True then files will be stored locally as compressed zip files. That means less disk space but more computing space for every file loaded. If use_included_urls is True urls to SRTM files included in the library will be used. Set to false if you need to reload them on first run. If batch_mode is True, only the most recent file will be stored. This is ideal for situations where you want to use this function to enrich a very large dataset. If your data are spread over a wide geographic area, this setting will make this function slower but will greatly reduce the risk of out-of-memory errors. Default is False. With srtm1 or srtm3 params you can decide which SRTM format to use. Srtm3 has a resolution of three arc-seconds (cca 90 meters between points). Srtm1 has a resolution of one arc-second (cca 30 meters). Srtm1 is available only for the United states. If both srtm1 ans srtm3 are True and both files are present for a location -- the srtm1 will be used. """ |
if not file_handler:
file_handler = FileHandler()
if not srtm1 and not srtm3:
raise Exception('At least one of srtm1 and srtm3 must be True')
srtm1_files, srtm3_files = _get_urls(use_included_urls, file_handler)
assert srtm1_files
assert srtm3_files
if not srtm1:
srtm1_files = {}
if not srtm3:
srtm3_files = {}
assert srtm1_files or srtm3_files
return mod_data.GeoElevationData(srtm1_files, srtm3_files, file_handler=file_handler,
leave_zipped=leave_zipped, batch_mode=batch_mode) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_srtm_dir(self):
""" The default path to store files. """ |
# Local cache path:
result = ""
if 'HOME' in mod_os.environ:
result = mod_os.sep.join([mod_os.environ['HOME'], '.cache', 'srtm'])
elif 'HOMEPATH' in mod_os.environ:
result = mod_os.sep.join([mod_os.environ['HOMEPATH'], '.cache', 'srtm'])
else:
raise Exception('No default HOME directory found, please specify a path where to store files')
if not mod_path.exists(result):
mod_os.makedirs(result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def unsubscribe(self, topic: str):
"""Unsubscribe from a topic :param topic: required :returns: None Sample ws response .. code-block:: python { "id": "1545910840805", "type": "ack" } """ |
req_msg = {
'type': 'unsubscribe',
'topic': topic,
'response': True
}
await self._conn.send_message(req_msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generate_signature(self, nonce, method, path, data):
"""Generate the call signature :param path: :param data: :param nonce: :return: signature string """ |
data_json = ""
endpoint = path
if method == "get":
if data:
query_string = self._get_params_for_sig(data)
endpoint = "{}?{}".format(path, query_string)
elif data:
data_json = compact_json_dict(data)
sig_str = ("{}{}{}{}".format(nonce, method.upper(), endpoint, data_json)).encode('utf-8')
m = hmac.new(self.API_SECRET.encode('utf-8'), sig_str, hashlib.sha256)
return base64.b64encode(m.digest()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_response(response):
"""Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response. """ |
if not str(response.status_code).startswith('2'):
raise KucoinAPIException(response)
try:
res = response.json()
if 'code' in res and res['code'] != "200000":
raise KucoinAPIException(response)
if 'success' in res and not res['success']:
raise KucoinAPIException(response)
# by default return full response
# if it's a normal response we have a data attribute, return that
if 'data' in res:
res = res['data']
return res
except ValueError:
raise KucoinRequestException('Invalid Response: %s' % response.text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_account(self, account_type, currency):
"""Create an account https://docs.kucoin.com/#create-an-account :param account_type: Account type - main or trade :type account_type: string :param currency: Currency code :type currency: string .. code:: python account = client.create_account('trade', 'BTC') :returns: API Response .. code-block:: python { "id": "5bd6e9286d99522a52e458de" } :raises: KucoinResponseException, KucoinAPIException """ |
data = {
'type': account_type,
'currency': currency
}
return self._post('accounts', True, data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_account_activity(self, account_id, start=None, end=None, page=None, limit=None):
"""Get list of account activity https://docs.kucoin.com/#get-account-history :param account_id: ID for account - from list_accounts() :type account_id: string :param start: (optional) Start time as unix timestamp :type start: string :param end: (optional) End time as unix timestamp :type end: string :param page: (optional) Current page - default 1 :type page: int :param limit: (optional) Number of results to return - default 50 :type limit: int .. code:: python history = client.get_account_activity('5bd6e9216d99522a52e458d6') history = client.get_account_activity('5bd6e9216d99522a52e458d6', start='1540296039000') history = client.get_account_activity('5bd6e9216d99522a52e458d6', page=2, page_size=10) :returns: API Response .. code-block:: python { "currentPage": 1, "pageSize": 10, "totalNum": 2, "totalPage": 1, "items": [ { "currency": "KCS", "amount": "0.0998", "fee": "0", "balance": "1994.040596", "bizType": "withdraw", "direction": "in", "createdAt": 1540296039000, "context": { "orderId": "5bc7f080b39c5c03286eef8a", "currency": "BTC" } }, { "currency": "KCS", "amount": "0.0998", "fee": "0", "balance": "1994.140396", "bizType": "trade exchange", "direction": "in", "createdAt": 1540296039000, "context": { "orderId": "5bc7f080b39c5c03286eef8e", "tradeId": "5bc7f080b3949c03286eef8a", "symbol": "BTC-USD" } } ] } :raises: KucoinResponseException, KucoinAPIException """ |
data = {}
if start:
data['startAt'] = start
if end:
data['endAt'] = end
if page:
data['currentPage'] = page
if limit:
data['pageSize'] = limit
return self._get('accounts/{}/ledgers'.format(account_id), True, data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_deposit_address(self, currency):
"""Create deposit address of currency for deposit. You can just create one deposit address. https://docs.kucoin.com/#create-deposit-address :param currency: Name of currency :type currency: string .. code:: python address = client.create_deposit_address('NEO') :returns: ApiResponse .. code:: python { "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1", "memo": "5c247c8a03aa677cea2a251d" } :raises: KucoinResponseException, KucoinAPIException """ |
data = {
'currency': currency
}
return self._post('deposit-addresses', True, data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_deposit_address(self, currency):
"""Get deposit address for a currency https://docs.kucoin.com/#get-deposit-address :param currency: Name of currency :type currency: string .. code:: python address = client.get_deposit_address('NEO') :returns: ApiResponse .. code:: python { "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1", "memo": "5c247c8a03aa677cea2a251d" } :raises: KucoinResponseException, KucoinAPIException """ |
data = {
'currency': currency
}
return self._get('deposit-addresses', True, data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_withdrawals(self, currency=None, status=None, start=None, end=None, page=None, limit=None):
"""Get deposit records for a currency https://docs.kucoin.com/#get-withdrawals-list :param currency: Name of currency (optional) :type currency: string :param status: optional - Status of deposit (PROCESSING, SUCCESS, FAILURE) :type status: string :param start: (optional) Start time as unix timestamp :type start: string :param end: (optional) End time as unix timestamp :type end: string :param page: (optional) Page to fetch :type page: int :param limit: (optional) Number of transactions :type limit: int .. code:: python withdrawals = client.get_withdrawals('NEO') :returns: ApiResponse .. code:: python { "currentPage": 1, "pageSize": 10, "totalNum": 1, "totalPage": 1, "items": [ { "id": "5c2dc64e03aa675aa263f1ac", "address": "0x5bedb060b8eb8d823e2414d82acce78d38be7fe9", "memo": "", "currency": "ETH", "amount": 1.0000000, "fee": 0.0100000, "walletTxId": "3e2414d82acce78d38be7fe9", "isInner": false, "status": "FAILURE", "createdAt": 1546503758000, "updatedAt": 1546504603000 } ] } :raises: KucoinResponseException, KucoinAPIException """ |
data = {}
if currency:
data['currency'] = currency
if status:
data['status'] = status
if start:
data['startAt'] = start
if end:
data['endAt'] = end
if limit:
data['pageSize'] = limit
if page:
data['page'] = page
return self._get('withdrawals', True, data=data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.