repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
jleclanche/fireplace | fireplace/card.py | PlayableCard.powered_up | def powered_up(self):
"""
Returns True whether the card is "powered up".
"""
if not self.data.scripts.powered_up:
return False
for script in self.data.scripts.powered_up:
if not script.check(self):
return False
return True | python | def powered_up(self):
"""
Returns True whether the card is "powered up".
"""
if not self.data.scripts.powered_up:
return False
for script in self.data.scripts.powered_up:
if not script.check(self):
return False
return True | [
"def",
"powered_up",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"data",
".",
"scripts",
".",
"powered_up",
":",
"return",
"False",
"for",
"script",
"in",
"self",
".",
"data",
".",
"scripts",
".",
"powered_up",
":",
"if",
"not",
"script",
".",
"... | Returns True whether the card is "powered up". | [
"Returns",
"True",
"whether",
"the",
"card",
"is",
"powered",
"up",
"."
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L177-L186 | train | 202,000 |
jleclanche/fireplace | fireplace/card.py | PlayableCard.play | def play(self, target=None, index=None, choose=None):
"""
Queue a Play action on the card.
"""
if choose:
if self.must_choose_one:
choose = card = self.choose_cards.filter(id=choose)[0]
self.log("%r: choosing %r", self, choose)
else:
raise InvalidAction("%r cannot be played with choice %r" % (self, choose))
else:
if self.must_choose_one:
raise InvalidAction("%r requires a choice (one of %r)" % (self, self.choose_cards))
card = self
if not self.is_playable():
raise InvalidAction("%r isn't playable." % (self))
if card.requires_target():
if not target:
raise InvalidAction("%r requires a target to play." % (self))
elif target not in self.play_targets:
raise InvalidAction("%r is not a valid target for %r." % (target, self))
elif target:
self.logger.warning("%r does not require a target, ignoring target %r", self, target)
self.game.play_card(self, target, index, choose)
return self | python | def play(self, target=None, index=None, choose=None):
"""
Queue a Play action on the card.
"""
if choose:
if self.must_choose_one:
choose = card = self.choose_cards.filter(id=choose)[0]
self.log("%r: choosing %r", self, choose)
else:
raise InvalidAction("%r cannot be played with choice %r" % (self, choose))
else:
if self.must_choose_one:
raise InvalidAction("%r requires a choice (one of %r)" % (self, self.choose_cards))
card = self
if not self.is_playable():
raise InvalidAction("%r isn't playable." % (self))
if card.requires_target():
if not target:
raise InvalidAction("%r requires a target to play." % (self))
elif target not in self.play_targets:
raise InvalidAction("%r is not a valid target for %r." % (target, self))
elif target:
self.logger.warning("%r does not require a target, ignoring target %r", self, target)
self.game.play_card(self, target, index, choose)
return self | [
"def",
"play",
"(",
"self",
",",
"target",
"=",
"None",
",",
"index",
"=",
"None",
",",
"choose",
"=",
"None",
")",
":",
"if",
"choose",
":",
"if",
"self",
".",
"must_choose_one",
":",
"choose",
"=",
"card",
"=",
"self",
".",
"choose_cards",
".",
"... | Queue a Play action on the card. | [
"Queue",
"a",
"Play",
"action",
"on",
"the",
"card",
"."
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L283-L307 | train | 202,001 |
jleclanche/fireplace | fireplace/card.py | PlayableCard.morph | def morph(self, into):
"""
Morph the card into another card
"""
return self.game.cheat_action(self, [actions.Morph(self, into)]) | python | def morph(self, into):
"""
Morph the card into another card
"""
return self.game.cheat_action(self, [actions.Morph(self, into)]) | [
"def",
"morph",
"(",
"self",
",",
"into",
")",
":",
"return",
"self",
".",
"game",
".",
"cheat_action",
"(",
"self",
",",
"[",
"actions",
".",
"Morph",
"(",
"self",
",",
"into",
")",
"]",
")"
] | Morph the card into another card | [
"Morph",
"the",
"card",
"into",
"another",
"card"
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L316-L320 | train | 202,002 |
jleclanche/fireplace | fireplace/card.py | PlayableCard.shuffle_into_deck | def shuffle_into_deck(self):
"""
Shuffle the card into the controller's deck
"""
return self.game.cheat_action(self, [actions.Shuffle(self.controller, self)]) | python | def shuffle_into_deck(self):
"""
Shuffle the card into the controller's deck
"""
return self.game.cheat_action(self, [actions.Shuffle(self.controller, self)]) | [
"def",
"shuffle_into_deck",
"(",
"self",
")",
":",
"return",
"self",
".",
"game",
".",
"cheat_action",
"(",
"self",
",",
"[",
"actions",
".",
"Shuffle",
"(",
"self",
".",
"controller",
",",
"self",
")",
"]",
")"
] | Shuffle the card into the controller's deck | [
"Shuffle",
"the",
"card",
"into",
"the",
"controller",
"s",
"deck"
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L322-L326 | train | 202,003 |
jleclanche/fireplace | fireplace/card.py | PlayableCard.battlecry_requires_target | def battlecry_requires_target(self):
"""
True if the play action of the card requires a target
"""
if self.has_combo and self.controller.combo:
if PlayReq.REQ_TARGET_FOR_COMBO in self.requirements:
return True
for req in TARGETING_PREREQUISITES:
if req in self.requirements:
return True
return False | python | def battlecry_requires_target(self):
"""
True if the play action of the card requires a target
"""
if self.has_combo and self.controller.combo:
if PlayReq.REQ_TARGET_FOR_COMBO in self.requirements:
return True
for req in TARGETING_PREREQUISITES:
if req in self.requirements:
return True
return False | [
"def",
"battlecry_requires_target",
"(",
"self",
")",
":",
"if",
"self",
".",
"has_combo",
"and",
"self",
".",
"controller",
".",
"combo",
":",
"if",
"PlayReq",
".",
"REQ_TARGET_FOR_COMBO",
"in",
"self",
".",
"requirements",
":",
"return",
"True",
"for",
"re... | True if the play action of the card requires a target | [
"True",
"if",
"the",
"play",
"action",
"of",
"the",
"card",
"requires",
"a",
"target"
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L328-L339 | train | 202,004 |
jleclanche/fireplace | fireplace/card.py | PlayableCard.requires_target | def requires_target(self):
"""
True if the card currently requires a target
"""
if self.has_combo and PlayReq.REQ_TARGET_FOR_COMBO in self.requirements:
if self.controller.combo:
return True
if PlayReq.REQ_TARGET_IF_AVAILABLE in self.requirements:
return bool(self.play_targets)
if PlayReq.REQ_TARGET_IF_AVAILABLE_AND_DRAGON_IN_HAND in self.requirements:
if self.controller.hand.filter(race=Race.DRAGON):
return bool(self.play_targets)
req = self.requirements.get(PlayReq.REQ_TARGET_IF_AVAILABLE_AND_MINIMUM_FRIENDLY_MINIONS)
if req is not None:
if len(self.controller.field) >= req:
return bool(self.play_targets)
req = self.requirements.get(PlayReq.REQ_TARGET_IF_AVAILABLE_AND_MINIMUM_FRIENDLY_SECRETS)
if req is not None:
if len(self.controller.secrets) >= req:
return bool(self.play_targets)
return PlayReq.REQ_TARGET_TO_PLAY in self.requirements | python | def requires_target(self):
"""
True if the card currently requires a target
"""
if self.has_combo and PlayReq.REQ_TARGET_FOR_COMBO in self.requirements:
if self.controller.combo:
return True
if PlayReq.REQ_TARGET_IF_AVAILABLE in self.requirements:
return bool(self.play_targets)
if PlayReq.REQ_TARGET_IF_AVAILABLE_AND_DRAGON_IN_HAND in self.requirements:
if self.controller.hand.filter(race=Race.DRAGON):
return bool(self.play_targets)
req = self.requirements.get(PlayReq.REQ_TARGET_IF_AVAILABLE_AND_MINIMUM_FRIENDLY_MINIONS)
if req is not None:
if len(self.controller.field) >= req:
return bool(self.play_targets)
req = self.requirements.get(PlayReq.REQ_TARGET_IF_AVAILABLE_AND_MINIMUM_FRIENDLY_SECRETS)
if req is not None:
if len(self.controller.secrets) >= req:
return bool(self.play_targets)
return PlayReq.REQ_TARGET_TO_PLAY in self.requirements | [
"def",
"requires_target",
"(",
"self",
")",
":",
"if",
"self",
".",
"has_combo",
"and",
"PlayReq",
".",
"REQ_TARGET_FOR_COMBO",
"in",
"self",
".",
"requirements",
":",
"if",
"self",
".",
"controller",
".",
"combo",
":",
"return",
"True",
"if",
"PlayReq",
"... | True if the card currently requires a target | [
"True",
"if",
"the",
"card",
"currently",
"requires",
"a",
"target"
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L341-L361 | train | 202,005 |
jleclanche/fireplace | fireplace/cards/__init__.py | CardDB.merge | def merge(id, card, cardscript=None):
"""
Find the xmlcard and the card definition of \a id
Then return a merged class of the two
"""
if card is None:
card = cardxml.CardXML(id)
if cardscript is None:
cardscript = get_script_definition(id)
if cardscript:
card.scripts = type(id, (cardscript, ), {})
else:
card.scripts = type(id, (), {})
scriptnames = (
"activate", "combo", "deathrattle", "draw", "inspire", "play",
"enrage", "update", "powered_up"
)
for script in scriptnames:
actions = getattr(card.scripts, script, None)
if actions is None:
# Set the action by default to avoid runtime hasattr() calls
setattr(card.scripts, script, [])
elif not callable(actions):
if not hasattr(actions, "__iter__"):
# Ensure the actions are always iterable
setattr(card.scripts, script, (actions, ))
for script in ("events", "secret"):
events = getattr(card.scripts, script, None)
if events is None:
setattr(card.scripts, script, [])
elif not hasattr(events, "__iter__"):
setattr(card.scripts, script, [events])
if not hasattr(card.scripts, "cost_mod"):
card.scripts.cost_mod = None
if not hasattr(card.scripts, "Hand"):
card.scripts.Hand = type("Hand", (), {})
if not hasattr(card.scripts.Hand, "events"):
card.scripts.Hand.events = []
if not hasattr(card.scripts.Hand.events, "__iter__"):
card.scripts.Hand.events = [card.scripts.Hand.events]
if not hasattr(card.scripts.Hand, "update"):
card.scripts.Hand.update = ()
if not hasattr(card.scripts.Hand.update, "__iter__"):
card.scripts.Hand.update = (card.scripts.Hand.update, )
# Set choose one cards
if hasattr(cardscript, "choose"):
card.choose_cards = cardscript.choose[:]
else:
card.choose_cards = []
if hasattr(cardscript, "tags"):
for tag, value in cardscript.tags.items():
card.tags[tag] = value
# Set some additional events based on the base tags...
if card.poisonous:
card.scripts.events.append(POISONOUS)
return card | python | def merge(id, card, cardscript=None):
"""
Find the xmlcard and the card definition of \a id
Then return a merged class of the two
"""
if card is None:
card = cardxml.CardXML(id)
if cardscript is None:
cardscript = get_script_definition(id)
if cardscript:
card.scripts = type(id, (cardscript, ), {})
else:
card.scripts = type(id, (), {})
scriptnames = (
"activate", "combo", "deathrattle", "draw", "inspire", "play",
"enrage", "update", "powered_up"
)
for script in scriptnames:
actions = getattr(card.scripts, script, None)
if actions is None:
# Set the action by default to avoid runtime hasattr() calls
setattr(card.scripts, script, [])
elif not callable(actions):
if not hasattr(actions, "__iter__"):
# Ensure the actions are always iterable
setattr(card.scripts, script, (actions, ))
for script in ("events", "secret"):
events = getattr(card.scripts, script, None)
if events is None:
setattr(card.scripts, script, [])
elif not hasattr(events, "__iter__"):
setattr(card.scripts, script, [events])
if not hasattr(card.scripts, "cost_mod"):
card.scripts.cost_mod = None
if not hasattr(card.scripts, "Hand"):
card.scripts.Hand = type("Hand", (), {})
if not hasattr(card.scripts.Hand, "events"):
card.scripts.Hand.events = []
if not hasattr(card.scripts.Hand.events, "__iter__"):
card.scripts.Hand.events = [card.scripts.Hand.events]
if not hasattr(card.scripts.Hand, "update"):
card.scripts.Hand.update = ()
if not hasattr(card.scripts.Hand.update, "__iter__"):
card.scripts.Hand.update = (card.scripts.Hand.update, )
# Set choose one cards
if hasattr(cardscript, "choose"):
card.choose_cards = cardscript.choose[:]
else:
card.choose_cards = []
if hasattr(cardscript, "tags"):
for tag, value in cardscript.tags.items():
card.tags[tag] = value
# Set some additional events based on the base tags...
if card.poisonous:
card.scripts.events.append(POISONOUS)
return card | [
"def",
"merge",
"(",
"id",
",",
"card",
",",
"cardscript",
"=",
"None",
")",
":",
"if",
"card",
"is",
"None",
":",
"card",
"=",
"cardxml",
".",
"CardXML",
"(",
"id",
")",
"if",
"cardscript",
"is",
"None",
":",
"cardscript",
"=",
"get_script_definition"... | Find the xmlcard and the card definition of \a id
Then return a merged class of the two | [
"Find",
"the",
"xmlcard",
"and",
"the",
"card",
"definition",
"of",
"\\",
"a",
"id",
"Then",
"return",
"a",
"merged",
"class",
"of",
"the",
"two"
] | d0fc0e97e185c0210de86631be20638659c0609e | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/cards/__init__.py#L15-L85 | train | 202,006 |
greyli/flask-dropzone | flask_dropzone/__init__.py | _Dropzone.load_css | def load_css(css_url=None, version='5.2.0'):
"""Load Dropzone's css resources with given version.
.. versionadded:: 1.4.4
:param css_url: The CSS url for Dropzone.js.
:param version: The version of Dropzone.js.
"""
css_filename = 'dropzone.min.css'
serve_local = current_app.config['DROPZONE_SERVE_LOCAL']
if serve_local:
css = '<link rel="stylesheet" href="%s" type="text/css">\n' % \
url_for('dropzone.static', filename=css_filename)
else:
css = '<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/min/%s"' \
' type="text/css">\n' % (version, css_filename)
if css_url:
css = '<link rel="stylesheet" href="%s" type="text/css">\n' % css_url
return Markup(css) | python | def load_css(css_url=None, version='5.2.0'):
"""Load Dropzone's css resources with given version.
.. versionadded:: 1.4.4
:param css_url: The CSS url for Dropzone.js.
:param version: The version of Dropzone.js.
"""
css_filename = 'dropzone.min.css'
serve_local = current_app.config['DROPZONE_SERVE_LOCAL']
if serve_local:
css = '<link rel="stylesheet" href="%s" type="text/css">\n' % \
url_for('dropzone.static', filename=css_filename)
else:
css = '<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/min/%s"' \
' type="text/css">\n' % (version, css_filename)
if css_url:
css = '<link rel="stylesheet" href="%s" type="text/css">\n' % css_url
return Markup(css) | [
"def",
"load_css",
"(",
"css_url",
"=",
"None",
",",
"version",
"=",
"'5.2.0'",
")",
":",
"css_filename",
"=",
"'dropzone.min.css'",
"serve_local",
"=",
"current_app",
".",
"config",
"[",
"'DROPZONE_SERVE_LOCAL'",
"]",
"if",
"serve_local",
":",
"css",
"=",
"'<... | Load Dropzone's css resources with given version.
.. versionadded:: 1.4.4
:param css_url: The CSS url for Dropzone.js.
:param version: The version of Dropzone.js. | [
"Load",
"Dropzone",
"s",
"css",
"resources",
"with",
"given",
"version",
"."
] | eb1d5ef16d8f83a12e6fed1bb9412a0c12c6d584 | https://github.com/greyli/flask-dropzone/blob/eb1d5ef16d8f83a12e6fed1bb9412a0c12c6d584/flask_dropzone/__init__.py#L137-L157 | train | 202,007 |
greyli/flask-dropzone | flask_dropzone/__init__.py | _Dropzone.load_js | def load_js(js_url=None, version='5.2.0'):
"""Load Dropzone's js resources with given version.
.. versionadded:: 1.4.4
:param js_url: The JS url for Dropzone.js.
:param version: The version of Dropzone.js.
"""
js_filename = 'dropzone.min.js'
serve_local = current_app.config['DROPZONE_SERVE_LOCAL']
if serve_local:
js = '<script src="%s"></script>\n' % url_for('dropzone.static', filename=js_filename)
else:
js = '<script src="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/%s"></script>\n' % (version, js_filename)
if js_url:
js = '<script src="%s"></script>\n' % js_url
return Markup(js) | python | def load_js(js_url=None, version='5.2.0'):
"""Load Dropzone's js resources with given version.
.. versionadded:: 1.4.4
:param js_url: The JS url for Dropzone.js.
:param version: The version of Dropzone.js.
"""
js_filename = 'dropzone.min.js'
serve_local = current_app.config['DROPZONE_SERVE_LOCAL']
if serve_local:
js = '<script src="%s"></script>\n' % url_for('dropzone.static', filename=js_filename)
else:
js = '<script src="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/%s"></script>\n' % (version, js_filename)
if js_url:
js = '<script src="%s"></script>\n' % js_url
return Markup(js) | [
"def",
"load_js",
"(",
"js_url",
"=",
"None",
",",
"version",
"=",
"'5.2.0'",
")",
":",
"js_filename",
"=",
"'dropzone.min.js'",
"serve_local",
"=",
"current_app",
".",
"config",
"[",
"'DROPZONE_SERVE_LOCAL'",
"]",
"if",
"serve_local",
":",
"js",
"=",
"'<scrip... | Load Dropzone's js resources with given version.
.. versionadded:: 1.4.4
:param js_url: The JS url for Dropzone.js.
:param version: The version of Dropzone.js. | [
"Load",
"Dropzone",
"s",
"js",
"resources",
"with",
"given",
"version",
"."
] | eb1d5ef16d8f83a12e6fed1bb9412a0c12c6d584 | https://github.com/greyli/flask-dropzone/blob/eb1d5ef16d8f83a12e6fed1bb9412a0c12c6d584/flask_dropzone/__init__.py#L160-L178 | train | 202,008 |
bjodah/chempy | examples/demo_kinetics.py | main | def main():
"""
This example demonstrates how to generate pretty equations from the analytic
expressions found in ``chempy.kinetics.integrated``.
"""
t, kf, t0, major, minor, prod, beta = sympy.symbols(
't k_f t0 Y Z X beta', negative=False)
for f in funcs:
args = [t, kf, prod, major, minor]
if f in (pseudo_rev, binary_rev):
args.insert(2, kf/beta)
expr = f(*args, backend='sympy')
with open(f.__name__ + '.png', 'wb') as ofh:
sympy.printing.preview(expr, output='png', filename='out.png',
viewer='BytesIO', outputbuffer=ofh)
with open(f.__name__ + '_diff.png', 'wb') as ofh:
sympy.printing.preview(expr.diff(t).subs({t0: 0}).simplify(),
output='png', filename='out.png',
viewer='BytesIO', outputbuffer=ofh) | python | def main():
"""
This example demonstrates how to generate pretty equations from the analytic
expressions found in ``chempy.kinetics.integrated``.
"""
t, kf, t0, major, minor, prod, beta = sympy.symbols(
't k_f t0 Y Z X beta', negative=False)
for f in funcs:
args = [t, kf, prod, major, minor]
if f in (pseudo_rev, binary_rev):
args.insert(2, kf/beta)
expr = f(*args, backend='sympy')
with open(f.__name__ + '.png', 'wb') as ofh:
sympy.printing.preview(expr, output='png', filename='out.png',
viewer='BytesIO', outputbuffer=ofh)
with open(f.__name__ + '_diff.png', 'wb') as ofh:
sympy.printing.preview(expr.diff(t).subs({t0: 0}).simplify(),
output='png', filename='out.png',
viewer='BytesIO', outputbuffer=ofh) | [
"def",
"main",
"(",
")",
":",
"t",
",",
"kf",
",",
"t0",
",",
"major",
",",
"minor",
",",
"prod",
",",
"beta",
"=",
"sympy",
".",
"symbols",
"(",
"'t k_f t0 Y Z X beta'",
",",
"negative",
"=",
"False",
")",
"for",
"f",
"in",
"funcs",
":",
"args",
... | This example demonstrates how to generate pretty equations from the analytic
expressions found in ``chempy.kinetics.integrated``. | [
"This",
"example",
"demonstrates",
"how",
"to",
"generate",
"pretty",
"equations",
"from",
"the",
"analytic",
"expressions",
"found",
"in",
"chempy",
".",
"kinetics",
".",
"integrated",
"."
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/examples/demo_kinetics.py#L15-L33 | train | 202,009 |
bjodah/chempy | chempy/equilibria.py | EqSystem.dissolved | def dissolved(self, concs):
""" Return dissolved concentrations """
new_concs = concs.copy()
for r in self.rxns:
if r.has_precipitates(self.substances):
net_stoich = np.asarray(r.net_stoich(self.substances))
s_net, s_stoich, s_idx = r.precipitate_stoich(self.substances)
new_concs -= new_concs[s_idx]/s_stoich * net_stoich
return new_concs | python | def dissolved(self, concs):
""" Return dissolved concentrations """
new_concs = concs.copy()
for r in self.rxns:
if r.has_precipitates(self.substances):
net_stoich = np.asarray(r.net_stoich(self.substances))
s_net, s_stoich, s_idx = r.precipitate_stoich(self.substances)
new_concs -= new_concs[s_idx]/s_stoich * net_stoich
return new_concs | [
"def",
"dissolved",
"(",
"self",
",",
"concs",
")",
":",
"new_concs",
"=",
"concs",
".",
"copy",
"(",
")",
"for",
"r",
"in",
"self",
".",
"rxns",
":",
"if",
"r",
".",
"has_precipitates",
"(",
"self",
".",
"substances",
")",
":",
"net_stoich",
"=",
... | Return dissolved concentrations | [
"Return",
"dissolved",
"concentrations"
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/equilibria.py#L93-L101 | train | 202,010 |
bjodah/chempy | chempy/properties/gas_sol_electrolytes_schumpe_1993.py | lg_solubility_ratio | def lg_solubility_ratio(electrolytes, gas, units=None, warn=True):
""" Returns the log10 value of the solubilty ratio
Implements equation 16, p 156. from Schumpe (1993)
Parameters
----------
electrolytes : dict
Mapping substance key (one in ``p_ion_rM``) to concentration.
gas : str
Substance key for the gas (one in ``p_gas_rM``).
units : object (optional)
object with attribute: molar
warn : bool (default: True)
Emit UserWarning when 'F-' among electrolytes.
"""
if units is None:
M = 1
else:
M = units.molar
if warn and 'F-' in electrolytes:
warnings.warn("In Schumpe 1993: data for fluoride uncertain.")
return sum([(p_gas_rM[gas]/M+p_ion_rM[k]/M)*v for k, v in electrolytes.items()]) | python | def lg_solubility_ratio(electrolytes, gas, units=None, warn=True):
""" Returns the log10 value of the solubilty ratio
Implements equation 16, p 156. from Schumpe (1993)
Parameters
----------
electrolytes : dict
Mapping substance key (one in ``p_ion_rM``) to concentration.
gas : str
Substance key for the gas (one in ``p_gas_rM``).
units : object (optional)
object with attribute: molar
warn : bool (default: True)
Emit UserWarning when 'F-' among electrolytes.
"""
if units is None:
M = 1
else:
M = units.molar
if warn and 'F-' in electrolytes:
warnings.warn("In Schumpe 1993: data for fluoride uncertain.")
return sum([(p_gas_rM[gas]/M+p_ion_rM[k]/M)*v for k, v in electrolytes.items()]) | [
"def",
"lg_solubility_ratio",
"(",
"electrolytes",
",",
"gas",
",",
"units",
"=",
"None",
",",
"warn",
"=",
"True",
")",
":",
"if",
"units",
"is",
"None",
":",
"M",
"=",
"1",
"else",
":",
"M",
"=",
"units",
".",
"molar",
"if",
"warn",
"and",
"'F-'"... | Returns the log10 value of the solubilty ratio
Implements equation 16, p 156. from Schumpe (1993)
Parameters
----------
electrolytes : dict
Mapping substance key (one in ``p_ion_rM``) to concentration.
gas : str
Substance key for the gas (one in ``p_gas_rM``).
units : object (optional)
object with attribute: molar
warn : bool (default: True)
Emit UserWarning when 'F-' among electrolytes. | [
"Returns",
"the",
"log10",
"value",
"of",
"the",
"solubilty",
"ratio"
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/properties/gas_sol_electrolytes_schumpe_1993.py#L72-L95 | train | 202,011 |
bjodah/chempy | chempy/henry.py | Henry_H_at_T | def Henry_H_at_T(T, H, Tderiv, T0=None, units=None, backend=None):
""" Evaluate Henry's constant H at temperature T
Parameters
----------
T: float
Temperature (with units), assumed to be in Kelvin if ``units == None``
H: float
Henry's constant
Tderiv: float (optional)
dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``.
T0: float
Reference temperature, assumed to be in Kelvin if ``units == None``
default: 298.15 K
units: object (optional)
object with attributes: kelvin (e.g. chempy.units.default_units)
backend : module (optional)
module with "exp", default: numpy, math
"""
be = get_backend(backend)
if units is None:
K = 1
else:
K = units.Kelvin
if T0 is None:
T0 = 298.15*K
return H * be.exp(Tderiv*(1/T - 1/T0)) | python | def Henry_H_at_T(T, H, Tderiv, T0=None, units=None, backend=None):
""" Evaluate Henry's constant H at temperature T
Parameters
----------
T: float
Temperature (with units), assumed to be in Kelvin if ``units == None``
H: float
Henry's constant
Tderiv: float (optional)
dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``.
T0: float
Reference temperature, assumed to be in Kelvin if ``units == None``
default: 298.15 K
units: object (optional)
object with attributes: kelvin (e.g. chempy.units.default_units)
backend : module (optional)
module with "exp", default: numpy, math
"""
be = get_backend(backend)
if units is None:
K = 1
else:
K = units.Kelvin
if T0 is None:
T0 = 298.15*K
return H * be.exp(Tderiv*(1/T - 1/T0)) | [
"def",
"Henry_H_at_T",
"(",
"T",
",",
"H",
",",
"Tderiv",
",",
"T0",
"=",
"None",
",",
"units",
"=",
"None",
",",
"backend",
"=",
"None",
")",
":",
"be",
"=",
"get_backend",
"(",
"backend",
")",
"if",
"units",
"is",
"None",
":",
"K",
"=",
"1",
... | Evaluate Henry's constant H at temperature T
Parameters
----------
T: float
Temperature (with units), assumed to be in Kelvin if ``units == None``
H: float
Henry's constant
Tderiv: float (optional)
dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``.
T0: float
Reference temperature, assumed to be in Kelvin if ``units == None``
default: 298.15 K
units: object (optional)
object with attributes: kelvin (e.g. chempy.units.default_units)
backend : module (optional)
module with "exp", default: numpy, math | [
"Evaluate",
"Henry",
"s",
"constant",
"H",
"at",
"temperature",
"T"
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/henry.py#L13-L40 | train | 202,012 |
bjodah/chempy | chempy/util/periodic.py | mass_from_composition | def mass_from_composition(composition):
""" Calculates molecular mass from atomic weights
Parameters
----------
composition: dict
Dictionary mapping int (atomic number) to int (coefficient)
Returns
-------
float
molecular weight in atomic mass units
Notes
-----
Atomic number 0 denotes charge or "net electron defficiency"
Examples
--------
>>> '%.2f' % mass_from_composition({0: -1, 1: 1, 8: 1})
'17.01'
"""
mass = 0.0
for k, v in composition.items():
if k == 0: # electron
mass -= v*5.489e-4
else:
mass += v*relative_atomic_masses[k-1]
return mass | python | def mass_from_composition(composition):
""" Calculates molecular mass from atomic weights
Parameters
----------
composition: dict
Dictionary mapping int (atomic number) to int (coefficient)
Returns
-------
float
molecular weight in atomic mass units
Notes
-----
Atomic number 0 denotes charge or "net electron defficiency"
Examples
--------
>>> '%.2f' % mass_from_composition({0: -1, 1: 1, 8: 1})
'17.01'
"""
mass = 0.0
for k, v in composition.items():
if k == 0: # electron
mass -= v*5.489e-4
else:
mass += v*relative_atomic_masses[k-1]
return mass | [
"def",
"mass_from_composition",
"(",
"composition",
")",
":",
"mass",
"=",
"0.0",
"for",
"k",
",",
"v",
"in",
"composition",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"0",
":",
"# electron",
"mass",
"-=",
"v",
"*",
"5.489e-4",
"else",
":",
"mass"... | Calculates molecular mass from atomic weights
Parameters
----------
composition: dict
Dictionary mapping int (atomic number) to int (coefficient)
Returns
-------
float
molecular weight in atomic mass units
Notes
-----
Atomic number 0 denotes charge or "net electron defficiency"
Examples
--------
>>> '%.2f' % mass_from_composition({0: -1, 1: 1, 8: 1})
'17.01' | [
"Calculates",
"molecular",
"mass",
"from",
"atomic",
"weights"
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/util/periodic.py#L94-L123 | train | 202,013 |
bjodah/chempy | chempy/properties/water_diffusivity_holz_2000.py | water_self_diffusion_coefficient | def water_self_diffusion_coefficient(T=None, units=None, warn=True,
err_mult=None):
"""
Temperature-dependent self-diffusion coefficient of water.
Parameters
----------
T : float
Temperature (default: in Kelvin)
units : object (optional)
object with attributes: Kelvin, meter, kilogram
warn : bool (default: True)
Emit UserWarning when outside temperature range.
err_mult : length 2 array_like (default: None)
Perturb paramaters D0 and TS with err_mult[0]*dD0 and
err_mult[1]*dTS respectively, where dD0 and dTS are the
reported uncertainties in the fitted paramters. Useful
for estimating error in diffusion coefficient.
References
----------
Temperature-dependent self-diffusion coefficients of water and six selected
molecular liquids for calibration in accurate 1H NMR PFG measurements
Manfred Holz, Stefan R. Heila, Antonio Saccob;
Phys. Chem. Chem. Phys., 2000,2, 4740-4742
http://pubs.rsc.org/en/Content/ArticleLanding/2000/CP/b005319h
DOI: 10.1039/B005319H
"""
if units is None:
K = 1
m = 1
s = 1
else:
K = units.Kelvin
m = units.meter
s = units.second
if T is None:
T = 298.15*K
_D0 = D0 * m**2 * s**-1
_TS = TS * K
if err_mult is not None:
_dD0 = dD0 * m**2 * s**-1
_dTS = dTS * K
_D0 += err_mult[0]*_dD0
_TS += err_mult[1]*_dTS
if warn and (_any(T < low_t_bound*K) or _any(T > high_t_bound*K)):
warnings.warn("Temperature is outside range (0-100 degC)")
return _D0*((T/_TS) - 1)**gamma | python | def water_self_diffusion_coefficient(T=None, units=None, warn=True,
err_mult=None):
"""
Temperature-dependent self-diffusion coefficient of water.
Parameters
----------
T : float
Temperature (default: in Kelvin)
units : object (optional)
object with attributes: Kelvin, meter, kilogram
warn : bool (default: True)
Emit UserWarning when outside temperature range.
err_mult : length 2 array_like (default: None)
Perturb paramaters D0 and TS with err_mult[0]*dD0 and
err_mult[1]*dTS respectively, where dD0 and dTS are the
reported uncertainties in the fitted paramters. Useful
for estimating error in diffusion coefficient.
References
----------
Temperature-dependent self-diffusion coefficients of water and six selected
molecular liquids for calibration in accurate 1H NMR PFG measurements
Manfred Holz, Stefan R. Heila, Antonio Saccob;
Phys. Chem. Chem. Phys., 2000,2, 4740-4742
http://pubs.rsc.org/en/Content/ArticleLanding/2000/CP/b005319h
DOI: 10.1039/B005319H
"""
if units is None:
K = 1
m = 1
s = 1
else:
K = units.Kelvin
m = units.meter
s = units.second
if T is None:
T = 298.15*K
_D0 = D0 * m**2 * s**-1
_TS = TS * K
if err_mult is not None:
_dD0 = dD0 * m**2 * s**-1
_dTS = dTS * K
_D0 += err_mult[0]*_dD0
_TS += err_mult[1]*_dTS
if warn and (_any(T < low_t_bound*K) or _any(T > high_t_bound*K)):
warnings.warn("Temperature is outside range (0-100 degC)")
return _D0*((T/_TS) - 1)**gamma | [
"def",
"water_self_diffusion_coefficient",
"(",
"T",
"=",
"None",
",",
"units",
"=",
"None",
",",
"warn",
"=",
"True",
",",
"err_mult",
"=",
"None",
")",
":",
"if",
"units",
"is",
"None",
":",
"K",
"=",
"1",
"m",
"=",
"1",
"s",
"=",
"1",
"else",
... | Temperature-dependent self-diffusion coefficient of water.
Parameters
----------
T : float
Temperature (default: in Kelvin)
units : object (optional)
object with attributes: Kelvin, meter, kilogram
warn : bool (default: True)
Emit UserWarning when outside temperature range.
err_mult : length 2 array_like (default: None)
Perturb paramaters D0 and TS with err_mult[0]*dD0 and
err_mult[1]*dTS respectively, where dD0 and dTS are the
reported uncertainties in the fitted paramters. Useful
for estimating error in diffusion coefficient.
References
----------
Temperature-dependent self-diffusion coefficients of water and six selected
molecular liquids for calibration in accurate 1H NMR PFG measurements
Manfred Holz, Stefan R. Heila, Antonio Saccob;
Phys. Chem. Chem. Phys., 2000,2, 4740-4742
http://pubs.rsc.org/en/Content/ArticleLanding/2000/CP/b005319h
DOI: 10.1039/B005319H | [
"Temperature",
"-",
"dependent",
"self",
"-",
"diffusion",
"coefficient",
"of",
"water",
"."
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/properties/water_diffusivity_holz_2000.py#L27-L74 | train | 202,014 |
bjodah/chempy | chempy/util/graph.py | rsys2graph | def rsys2graph(rsys, fname, output_dir=None, prog=None, save=False, **kwargs):
"""
Convenience function to call `rsys2dot` and write output to file
and render the graph
Parameters
----------
rsys : ReactionSystem
fname : str
filename
output_dir : str (optional)
path to directory (default: temporary directory)
prog : str (optional)
default: 'dot'
save : bool
removes temporary directory if False, default: False
\\*\\*kwargs :
Keyword arguments passed along to py:func:`rsys2dot`.
Returns
-------
str
Outpath
Examples
--------
>>> rsys2graph(rsys, sbstncs, '/tmp/out.png') # doctest: +SKIP
"""
lines = rsys2dot(rsys, **kwargs)
created_tempdir = False
try:
if output_dir is None:
output_dir = tempfile.mkdtemp()
created_tempdir = True
basename, ext = os.path.splitext(os.path.basename(fname))
outpath = os.path.join(output_dir, fname)
dotpath = os.path.join(output_dir, basename + '.dot')
with open(dotpath, 'wt') as ofh:
ofh.writelines(lines)
if ext == '.tex':
cmds = [prog or 'dot2tex']
else:
cmds = [prog or 'dot', '-T'+outpath.split('.')[-1]]
p = subprocess.Popen(cmds + [dotpath, '-o', outpath])
retcode = p.wait()
if retcode:
fmtstr = "{}\n returned with exit status {}"
raise RuntimeError(fmtstr.format(' '.join(cmds), retcode))
return outpath
finally:
if save is True or save == 'True':
pass
else:
if save is False or save == 'False':
if created_tempdir:
shutil.rmtree(output_dir)
else:
# interpret save as path to copy pdf to.
shutil.copy(outpath, save) | python | def rsys2graph(rsys, fname, output_dir=None, prog=None, save=False, **kwargs):
"""
Convenience function to call `rsys2dot` and write output to file
and render the graph
Parameters
----------
rsys : ReactionSystem
fname : str
filename
output_dir : str (optional)
path to directory (default: temporary directory)
prog : str (optional)
default: 'dot'
save : bool
removes temporary directory if False, default: False
\\*\\*kwargs :
Keyword arguments passed along to py:func:`rsys2dot`.
Returns
-------
str
Outpath
Examples
--------
>>> rsys2graph(rsys, sbstncs, '/tmp/out.png') # doctest: +SKIP
"""
lines = rsys2dot(rsys, **kwargs)
created_tempdir = False
try:
if output_dir is None:
output_dir = tempfile.mkdtemp()
created_tempdir = True
basename, ext = os.path.splitext(os.path.basename(fname))
outpath = os.path.join(output_dir, fname)
dotpath = os.path.join(output_dir, basename + '.dot')
with open(dotpath, 'wt') as ofh:
ofh.writelines(lines)
if ext == '.tex':
cmds = [prog or 'dot2tex']
else:
cmds = [prog or 'dot', '-T'+outpath.split('.')[-1]]
p = subprocess.Popen(cmds + [dotpath, '-o', outpath])
retcode = p.wait()
if retcode:
fmtstr = "{}\n returned with exit status {}"
raise RuntimeError(fmtstr.format(' '.join(cmds), retcode))
return outpath
finally:
if save is True or save == 'True':
pass
else:
if save is False or save == 'False':
if created_tempdir:
shutil.rmtree(output_dir)
else:
# interpret save as path to copy pdf to.
shutil.copy(outpath, save) | [
"def",
"rsys2graph",
"(",
"rsys",
",",
"fname",
",",
"output_dir",
"=",
"None",
",",
"prog",
"=",
"None",
",",
"save",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"lines",
"=",
"rsys2dot",
"(",
"rsys",
",",
"*",
"*",
"kwargs",
")",
"created_te... | Convenience function to call `rsys2dot` and write output to file
and render the graph
Parameters
----------
rsys : ReactionSystem
fname : str
filename
output_dir : str (optional)
path to directory (default: temporary directory)
prog : str (optional)
default: 'dot'
save : bool
removes temporary directory if False, default: False
\\*\\*kwargs :
Keyword arguments passed along to py:func:`rsys2dot`.
Returns
-------
str
Outpath
Examples
--------
>>> rsys2graph(rsys, sbstncs, '/tmp/out.png') # doctest: +SKIP | [
"Convenience",
"function",
"to",
"call",
"rsys2dot",
"and",
"write",
"output",
"to",
"file",
"and",
"render",
"the",
"graph"
] | bd62c3e1f7cb797782471203acd3bcf23b21c47e | https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/util/graph.py#L92-L152 | train | 202,015 |
raiden-network/raiden-contracts | raiden_contracts/utils/private_key.py | check_permission_safety | def check_permission_safety(path):
"""Check if the file at the given path is safe to use as a state file.
This checks that group and others have no permissions on the file and that the current user is
the owner.
"""
f_stats = os.stat(path)
return (f_stats.st_mode & (stat.S_IRWXG | stat.S_IRWXO)) == 0 and f_stats.st_uid == os.getuid() | python | def check_permission_safety(path):
"""Check if the file at the given path is safe to use as a state file.
This checks that group and others have no permissions on the file and that the current user is
the owner.
"""
f_stats = os.stat(path)
return (f_stats.st_mode & (stat.S_IRWXG | stat.S_IRWXO)) == 0 and f_stats.st_uid == os.getuid() | [
"def",
"check_permission_safety",
"(",
"path",
")",
":",
"f_stats",
"=",
"os",
".",
"stat",
"(",
"path",
")",
"return",
"(",
"f_stats",
".",
"st_mode",
"&",
"(",
"stat",
".",
"S_IRWXG",
"|",
"stat",
".",
"S_IRWXO",
")",
")",
"==",
"0",
"and",
"f_stat... | Check if the file at the given path is safe to use as a state file.
This checks that group and others have no permissions on the file and that the current user is
the owner. | [
"Check",
"if",
"the",
"file",
"at",
"the",
"given",
"path",
"is",
"safe",
"to",
"use",
"as",
"a",
"state",
"file",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/private_key.py#L17-L24 | train | 202,016 |
raiden-network/raiden-contracts | raiden_contracts/utils/private_key.py | get_private_key | def get_private_key(key_path, password_path=None):
"""Open a JSON-encoded private key and return it
If a password file is provided, uses it to decrypt the key. If not, the
password is asked interactively. Raw hex-encoded private keys are supported,
but deprecated."""
assert key_path, key_path
if not os.path.exists(key_path):
log.fatal('%s: no such file', key_path)
return None
if not check_permission_safety(key_path):
log.fatal('Private key file %s must be readable only by its owner.', key_path)
return None
if password_path and not check_permission_safety(password_path):
log.fatal('Password file %s must be readable only by its owner.', password_path)
return None
with open(key_path) as keyfile:
private_key = keyfile.readline().strip()
if is_hex(private_key) and len(decode_hex(private_key)) == 32:
log.warning('Private key in raw format. Consider switching to JSON-encoded')
else:
keyfile.seek(0)
try:
json_data = json.load(keyfile)
if password_path:
with open(password_path) as password_file:
password = password_file.readline().strip()
else:
password = getpass.getpass('Enter the private key password: ')
if json_data['crypto']['kdf'] == 'pbkdf2':
password = password.encode() # type: ignore
private_key = encode_hex(decode_keyfile_json(json_data, password))
except ValueError:
log.fatal('Invalid private key format or password!')
return None
return private_key | python | def get_private_key(key_path, password_path=None):
"""Open a JSON-encoded private key and return it
If a password file is provided, uses it to decrypt the key. If not, the
password is asked interactively. Raw hex-encoded private keys are supported,
but deprecated."""
assert key_path, key_path
if not os.path.exists(key_path):
log.fatal('%s: no such file', key_path)
return None
if not check_permission_safety(key_path):
log.fatal('Private key file %s must be readable only by its owner.', key_path)
return None
if password_path and not check_permission_safety(password_path):
log.fatal('Password file %s must be readable only by its owner.', password_path)
return None
with open(key_path) as keyfile:
private_key = keyfile.readline().strip()
if is_hex(private_key) and len(decode_hex(private_key)) == 32:
log.warning('Private key in raw format. Consider switching to JSON-encoded')
else:
keyfile.seek(0)
try:
json_data = json.load(keyfile)
if password_path:
with open(password_path) as password_file:
password = password_file.readline().strip()
else:
password = getpass.getpass('Enter the private key password: ')
if json_data['crypto']['kdf'] == 'pbkdf2':
password = password.encode() # type: ignore
private_key = encode_hex(decode_keyfile_json(json_data, password))
except ValueError:
log.fatal('Invalid private key format or password!')
return None
return private_key | [
"def",
"get_private_key",
"(",
"key_path",
",",
"password_path",
"=",
"None",
")",
":",
"assert",
"key_path",
",",
"key_path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"key_path",
")",
":",
"log",
".",
"fatal",
"(",
"'%s: no such file'",
",",
... | Open a JSON-encoded private key and return it
If a password file is provided, uses it to decrypt the key. If not, the
password is asked interactively. Raw hex-encoded private keys are supported,
but deprecated. | [
"Open",
"a",
"JSON",
"-",
"encoded",
"private",
"key",
"and",
"return",
"it"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/private_key.py#L27-L68 | train | 202,017 |
raiden-network/raiden-contracts | raiden_contracts/deploy/contract_deployer.py | ContractDeployer.deploy_token_contract | def deploy_token_contract(
self,
token_supply: int,
token_decimals: int,
token_name: str,
token_symbol: str,
token_type: str = 'CustomToken',
):
"""Deploy a token contract."""
receipt = self.deploy(
contract_name=token_type,
args=[token_supply, token_decimals, token_name, token_symbol],
)
token_address = receipt['contractAddress']
assert token_address and is_address(token_address)
token_address = to_checksum_address(token_address)
return {token_type: token_address} | python | def deploy_token_contract(
self,
token_supply: int,
token_decimals: int,
token_name: str,
token_symbol: str,
token_type: str = 'CustomToken',
):
"""Deploy a token contract."""
receipt = self.deploy(
contract_name=token_type,
args=[token_supply, token_decimals, token_name, token_symbol],
)
token_address = receipt['contractAddress']
assert token_address and is_address(token_address)
token_address = to_checksum_address(token_address)
return {token_type: token_address} | [
"def",
"deploy_token_contract",
"(",
"self",
",",
"token_supply",
":",
"int",
",",
"token_decimals",
":",
"int",
",",
"token_name",
":",
"str",
",",
"token_symbol",
":",
"str",
",",
"token_type",
":",
"str",
"=",
"'CustomToken'",
",",
")",
":",
"receipt",
... | Deploy a token contract. | [
"Deploy",
"a",
"token",
"contract",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L141-L157 | train | 202,018 |
raiden-network/raiden-contracts | raiden_contracts/deploy/contract_deployer.py | ContractDeployer._deploy_and_remember | def _deploy_and_remember(
self,
contract_name: str,
arguments: List,
deployed_contracts: 'DeployedContracts',
) -> Contract:
""" Deploys contract_name with arguments and store the result in deployed_contracts. """
receipt = self.deploy(contract_name, arguments)
deployed_contracts['contracts'][contract_name] = _deployed_data_from_receipt(
receipt=receipt,
constructor_arguments=arguments,
)
return self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name),
address=deployed_contracts['contracts'][contract_name]['address'],
) | python | def _deploy_and_remember(
self,
contract_name: str,
arguments: List,
deployed_contracts: 'DeployedContracts',
) -> Contract:
""" Deploys contract_name with arguments and store the result in deployed_contracts. """
receipt = self.deploy(contract_name, arguments)
deployed_contracts['contracts'][contract_name] = _deployed_data_from_receipt(
receipt=receipt,
constructor_arguments=arguments,
)
return self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name),
address=deployed_contracts['contracts'][contract_name]['address'],
) | [
"def",
"_deploy_and_remember",
"(",
"self",
",",
"contract_name",
":",
"str",
",",
"arguments",
":",
"List",
",",
"deployed_contracts",
":",
"'DeployedContracts'",
",",
")",
"->",
"Contract",
":",
"receipt",
"=",
"self",
".",
"deploy",
"(",
"contract_name",
",... | Deploys contract_name with arguments and store the result in deployed_contracts. | [
"Deploys",
"contract_name",
"with",
"arguments",
"and",
"store",
"the",
"result",
"in",
"deployed_contracts",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L199-L214 | train | 202,019 |
raiden-network/raiden-contracts | raiden_contracts/deploy/contract_deployer.py | ContractDeployer.register_token_network | def register_token_network(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract."""
with_limits = contracts_version_expects_deposit_limits(self.contracts_version)
if with_limits:
return self._register_token_network_with_limits(
token_registry_abi,
token_registry_address,
token_address,
channel_participant_deposit_limit,
token_network_deposit_limit,
)
else:
return self._register_token_network_without_limits(
token_registry_abi,
token_registry_address,
token_address,
channel_participant_deposit_limit,
token_network_deposit_limit,
) | python | def register_token_network(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract."""
with_limits = contracts_version_expects_deposit_limits(self.contracts_version)
if with_limits:
return self._register_token_network_with_limits(
token_registry_abi,
token_registry_address,
token_address,
channel_participant_deposit_limit,
token_network_deposit_limit,
)
else:
return self._register_token_network_without_limits(
token_registry_abi,
token_registry_address,
token_address,
channel_participant_deposit_limit,
token_network_deposit_limit,
) | [
"def",
"register_token_network",
"(",
"self",
",",
"token_registry_abi",
":",
"Dict",
",",
"token_registry_address",
":",
"str",
",",
"token_address",
":",
"str",
",",
"channel_participant_deposit_limit",
":",
"Optional",
"[",
"int",
"]",
",",
"token_network_deposit_l... | Register token with a TokenNetworkRegistry contract. | [
"Register",
"token",
"with",
"a",
"TokenNetworkRegistry",
"contract",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L216-L241 | train | 202,020 |
raiden-network/raiden-contracts | raiden_contracts/deploy/contract_deployer.py | ContractDeployer._register_token_network_without_limits | def _register_token_network_without_limits(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect '
'channel_participant_deposit_limit',
)
if token_network_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect token_network_deposit_limit',
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi,
address=token_registry_address,
)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(
f'got {version_from_onchain} from the chain, expected '
f'{self.contract_manager.version_string} in the deployment data',
)
command = token_network_registry.functions.createERC20TokenNetwork(
token_address,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address,
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f'TokenNetwork address: {token_network_address}')
return token_network_address | python | def _register_token_network_without_limits(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect '
'channel_participant_deposit_limit',
)
if token_network_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect token_network_deposit_limit',
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi,
address=token_registry_address,
)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(
f'got {version_from_onchain} from the chain, expected '
f'{self.contract_manager.version_string} in the deployment data',
)
command = token_network_registry.functions.createERC20TokenNetwork(
token_address,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address,
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f'TokenNetwork address: {token_network_address}')
return token_network_address | [
"def",
"_register_token_network_without_limits",
"(",
"self",
",",
"token_registry_abi",
":",
"Dict",
",",
"token_registry_address",
":",
"str",
",",
"token_address",
":",
"str",
",",
"channel_participant_deposit_limit",
":",
"Optional",
"[",
"int",
"]",
",",
"token_n... | Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor. | [
"Register",
"token",
"with",
"a",
"TokenNetworkRegistry",
"contract"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L243-L287 | train | 202,021 |
raiden-network/raiden-contracts | raiden_contracts/deploy/contract_deployer.py | ContractDeployer.deploy_service_contracts | def deploy_service_contracts(
self,
token_address: str,
user_deposit_whole_balance_limit: int,
):
"""Deploy 3rd party service contracts"""
chain_id = int(self.web3.version.network)
deployed_contracts: DeployedContracts = {
'contracts_version': self.contract_version_string(),
'chain_id': chain_id,
'contracts': {},
}
self._deploy_and_remember(CONTRACT_SERVICE_REGISTRY, [token_address], deployed_contracts)
user_deposit = self._deploy_and_remember(
contract_name=CONTRACT_USER_DEPOSIT,
arguments=[token_address, user_deposit_whole_balance_limit],
deployed_contracts=deployed_contracts,
)
monitoring_service_constructor_args = [
token_address,
deployed_contracts['contracts'][CONTRACT_SERVICE_REGISTRY]['address'],
deployed_contracts['contracts'][CONTRACT_USER_DEPOSIT]['address'],
]
msc = self._deploy_and_remember(
contract_name=CONTRACT_MONITORING_SERVICE,
arguments=monitoring_service_constructor_args,
deployed_contracts=deployed_contracts,
)
one_to_n = self._deploy_and_remember(
contract_name=CONTRACT_ONE_TO_N,
arguments=[user_deposit.address, chain_id],
deployed_contracts=deployed_contracts,
)
# Tell the UserDeposit instance about other contracts.
LOG.debug(
'Calling UserDeposit.init() with '
f'msc_address={msc.address} '
f'one_to_n_address={one_to_n.address}',
)
self.transact(user_deposit.functions.init(
_msc_address=msc.address,
_one_to_n_address=one_to_n.address,
))
return deployed_contracts | python | def deploy_service_contracts(
self,
token_address: str,
user_deposit_whole_balance_limit: int,
):
"""Deploy 3rd party service contracts"""
chain_id = int(self.web3.version.network)
deployed_contracts: DeployedContracts = {
'contracts_version': self.contract_version_string(),
'chain_id': chain_id,
'contracts': {},
}
self._deploy_and_remember(CONTRACT_SERVICE_REGISTRY, [token_address], deployed_contracts)
user_deposit = self._deploy_and_remember(
contract_name=CONTRACT_USER_DEPOSIT,
arguments=[token_address, user_deposit_whole_balance_limit],
deployed_contracts=deployed_contracts,
)
monitoring_service_constructor_args = [
token_address,
deployed_contracts['contracts'][CONTRACT_SERVICE_REGISTRY]['address'],
deployed_contracts['contracts'][CONTRACT_USER_DEPOSIT]['address'],
]
msc = self._deploy_and_remember(
contract_name=CONTRACT_MONITORING_SERVICE,
arguments=monitoring_service_constructor_args,
deployed_contracts=deployed_contracts,
)
one_to_n = self._deploy_and_remember(
contract_name=CONTRACT_ONE_TO_N,
arguments=[user_deposit.address, chain_id],
deployed_contracts=deployed_contracts,
)
# Tell the UserDeposit instance about other contracts.
LOG.debug(
'Calling UserDeposit.init() with '
f'msc_address={msc.address} '
f'one_to_n_address={one_to_n.address}',
)
self.transact(user_deposit.functions.init(
_msc_address=msc.address,
_one_to_n_address=one_to_n.address,
))
return deployed_contracts | [
"def",
"deploy_service_contracts",
"(",
"self",
",",
"token_address",
":",
"str",
",",
"user_deposit_whole_balance_limit",
":",
"int",
",",
")",
":",
"chain_id",
"=",
"int",
"(",
"self",
".",
"web3",
".",
"version",
".",
"network",
")",
"deployed_contracts",
"... | Deploy 3rd party service contracts | [
"Deploy",
"3rd",
"party",
"service",
"contracts"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L338-L386 | train | 202,022 |
raiden-network/raiden-contracts | raiden_contracts/utils/signature.py | private_key_to_address | def private_key_to_address(private_key: Union[str, bytes]) -> ChecksumAddress:
""" Converts a private key to an Ethereum address. """
if isinstance(private_key, str):
private_key_bytes = to_bytes(hexstr=private_key)
else:
private_key_bytes = private_key
pk = PrivateKey(private_key_bytes)
return public_key_to_address(pk.public_key) | python | def private_key_to_address(private_key: Union[str, bytes]) -> ChecksumAddress:
""" Converts a private key to an Ethereum address. """
if isinstance(private_key, str):
private_key_bytes = to_bytes(hexstr=private_key)
else:
private_key_bytes = private_key
pk = PrivateKey(private_key_bytes)
return public_key_to_address(pk.public_key) | [
"def",
"private_key_to_address",
"(",
"private_key",
":",
"Union",
"[",
"str",
",",
"bytes",
"]",
")",
"->",
"ChecksumAddress",
":",
"if",
"isinstance",
"(",
"private_key",
",",
"str",
")",
":",
"private_key_bytes",
"=",
"to_bytes",
"(",
"hexstr",
"=",
"priv... | Converts a private key to an Ethereum address. | [
"Converts",
"a",
"private",
"key",
"to",
"an",
"Ethereum",
"address",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/signature.py#L25-L32 | train | 202,023 |
raiden-network/raiden-contracts | raiden_contracts/utils/signature.py | public_key_to_address | def public_key_to_address(public_key: Union[PublicKey, bytes]) -> ChecksumAddress:
""" Converts a public key to an Ethereum address. """
if isinstance(public_key, PublicKey):
public_key = public_key.format(compressed=False)
assert isinstance(public_key, bytes)
return to_checksum_address(sha3(public_key[1:])[-20:]) | python | def public_key_to_address(public_key: Union[PublicKey, bytes]) -> ChecksumAddress:
""" Converts a public key to an Ethereum address. """
if isinstance(public_key, PublicKey):
public_key = public_key.format(compressed=False)
assert isinstance(public_key, bytes)
return to_checksum_address(sha3(public_key[1:])[-20:]) | [
"def",
"public_key_to_address",
"(",
"public_key",
":",
"Union",
"[",
"PublicKey",
",",
"bytes",
"]",
")",
"->",
"ChecksumAddress",
":",
"if",
"isinstance",
"(",
"public_key",
",",
"PublicKey",
")",
":",
"public_key",
"=",
"public_key",
".",
"format",
"(",
"... | Converts a public key to an Ethereum address. | [
"Converts",
"a",
"public",
"key",
"to",
"an",
"Ethereum",
"address",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/signature.py#L35-L40 | train | 202,024 |
raiden-network/raiden-contracts | raiden_contracts/utils/logs.py | LogHandler._handle_waited_log | def _handle_waited_log(self, event: dict):
""" A subroutine of handle_log
Increment self.event_count, forget about waiting, and call the callback if any.
"""
txn_hash = event['transactionHash']
event_name = event['event']
assert event_name in self.event_waiting
assert txn_hash in self.event_waiting[event_name]
self.event_count[event_name][txn_hash] += 1
event_entry = self.event_waiting[event_name][txn_hash]
if event_entry.count == self.event_count[event_name][txn_hash]:
self.event_waiting[event_name].pop(txn_hash)
# Call callback function with event
if event_entry.callback:
event_entry.callback(event) | python | def _handle_waited_log(self, event: dict):
""" A subroutine of handle_log
Increment self.event_count, forget about waiting, and call the callback if any.
"""
txn_hash = event['transactionHash']
event_name = event['event']
assert event_name in self.event_waiting
assert txn_hash in self.event_waiting[event_name]
self.event_count[event_name][txn_hash] += 1
event_entry = self.event_waiting[event_name][txn_hash]
if event_entry.count == self.event_count[event_name][txn_hash]:
self.event_waiting[event_name].pop(txn_hash)
# Call callback function with event
if event_entry.callback:
event_entry.callback(event) | [
"def",
"_handle_waited_log",
"(",
"self",
",",
"event",
":",
"dict",
")",
":",
"txn_hash",
"=",
"event",
"[",
"'transactionHash'",
"]",
"event_name",
"=",
"event",
"[",
"'event'",
"]",
"assert",
"event_name",
"in",
"self",
".",
"event_waiting",
"assert",
"tx... | A subroutine of handle_log
Increment self.event_count, forget about waiting, and call the callback if any. | [
"A",
"subroutine",
"of",
"handle_log",
"Increment",
"self",
".",
"event_count",
"forget",
"about",
"waiting",
"and",
"call",
"the",
"callback",
"if",
"any",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/logs.py#L50-L67 | train | 202,025 |
raiden-network/raiden-contracts | raiden_contracts/utils/logs.py | LogHandler.assert_event | def assert_event(self, txn_hash, event_name, args, timeout=5):
""" Assert that `event_name` is emitted with the `args`
For use in tests only.
"""
def assert_args(event):
assert event['args'] == args, f'{event["args"]} == {args}'
self.add(txn_hash=txn_hash, event_name=event_name, callback=assert_args)
self.check(timeout=timeout) | python | def assert_event(self, txn_hash, event_name, args, timeout=5):
""" Assert that `event_name` is emitted with the `args`
For use in tests only.
"""
def assert_args(event):
assert event['args'] == args, f'{event["args"]} == {args}'
self.add(txn_hash=txn_hash, event_name=event_name, callback=assert_args)
self.check(timeout=timeout) | [
"def",
"assert_event",
"(",
"self",
",",
"txn_hash",
",",
"event_name",
",",
"args",
",",
"timeout",
"=",
"5",
")",
":",
"def",
"assert_args",
"(",
"event",
")",
":",
"assert",
"event",
"[",
"'args'",
"]",
"==",
"args",
",",
"f'{event[\"args\"]} == {args}'... | Assert that `event_name` is emitted with the `args`
For use in tests only. | [
"Assert",
"that",
"event_name",
"is",
"emitted",
"with",
"the",
"args"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/logs.py#L105-L113 | train | 202,026 |
raiden-network/raiden-contracts | raiden_contracts/deploy/etherscan_verify.py | join_sources | def join_sources(source_module: DeploymentModule, contract_name: str):
""" Use join-contracts.py to concatenate all imported Solidity files.
Args:
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
"""
joined_file = Path(__file__).parent.joinpath('joined.sol')
remapping = {module: str(path) for module, path in contracts_source_path().items()}
command = [
'./utils/join-contracts.py',
'--import-map',
json.dumps(remapping),
str(contracts_source_path_of_deployment_module(
source_module,
).joinpath(contract_name + '.sol')),
str(joined_file),
]
working_dir = Path(__file__).parent.parent
try:
subprocess.check_call(command, cwd=working_dir)
except subprocess.CalledProcessError as ex:
print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')
raise ex
return joined_file.read_text() | python | def join_sources(source_module: DeploymentModule, contract_name: str):
""" Use join-contracts.py to concatenate all imported Solidity files.
Args:
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
"""
joined_file = Path(__file__).parent.joinpath('joined.sol')
remapping = {module: str(path) for module, path in contracts_source_path().items()}
command = [
'./utils/join-contracts.py',
'--import-map',
json.dumps(remapping),
str(contracts_source_path_of_deployment_module(
source_module,
).joinpath(contract_name + '.sol')),
str(joined_file),
]
working_dir = Path(__file__).parent.parent
try:
subprocess.check_call(command, cwd=working_dir)
except subprocess.CalledProcessError as ex:
print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')
raise ex
return joined_file.read_text() | [
"def",
"join_sources",
"(",
"source_module",
":",
"DeploymentModule",
",",
"contract_name",
":",
"str",
")",
":",
"joined_file",
"=",
"Path",
"(",
"__file__",
")",
".",
"parent",
".",
"joinpath",
"(",
"'joined.sol'",
")",
"remapping",
"=",
"{",
"module",
":"... | Use join-contracts.py to concatenate all imported Solidity files.
Args:
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc. | [
"Use",
"join",
"-",
"contracts",
".",
"py",
"to",
"concatenate",
"all",
"imported",
"Solidity",
"files",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/etherscan_verify.py#L101-L126 | train | 202,027 |
raiden-network/raiden-contracts | raiden_contracts/deploy/etherscan_verify.py | etherscan_verify_contract | def etherscan_verify_contract(
chain_id: int,
apikey: str,
source_module: DeploymentModule,
contract_name: str,
):
""" Calls Etherscan API for verifying the Solidity source of a contract.
Args:
chain_id: EIP-155 chain id of the Ethereum chain
apikey: key for calling Etherscan API
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
"""
etherscan_api = api_of_chain_id[chain_id]
deployment_info = get_contracts_deployment_info(
chain_id=chain_id,
module=source_module,
)
if deployment_info is None:
raise FileNotFoundError(
f'Deployment file not found for chain_id={chain_id} and module={source_module}',
)
contract_manager = ContractManager(contracts_precompiled_path())
data = post_data_for_etherscan_verification(
apikey=apikey,
deployment_info=deployment_info['contracts'][contract_name],
source=join_sources(source_module=source_module, contract_name=contract_name),
contract_name=contract_name,
metadata=json.loads(contract_manager.contracts[contract_name]['metadata']),
constructor_args=get_constructor_args(
deployment_info=deployment_info,
contract_name=contract_name,
contract_manager=contract_manager,
),
)
response = requests.post(etherscan_api, data=data)
content = json.loads(response.content.decode())
print(content)
print(f'Status: {content["status"]}; {content["message"]} ; GUID = {content["result"]}')
etherscan_url = etherscan_api.replace('api-', '').replace('api', '')
etherscan_url += '/verifyContract2?a=' + data['contractaddress']
manual_submission_guide = f"""Usually a manual submission to Etherscan works.
Visit {etherscan_url}
Use raiden_contracts/deploy/joined.sol."""
if content['status'] != '1':
if content['result'] == 'Contract source code already verified':
return
else:
raise ValueError(
'Etherscan submission failed for an unknown reason\n' +
manual_submission_guide,
)
# submission succeeded, obtained GUID
guid = content['result']
status = '0'
retries = 10
while status == '0' and retries > 0:
retries -= 1
r = guid_status(etherscan_api=etherscan_api, guid=guid)
status = r['status']
if r['result'] == 'Fail - Unable to verify':
raise ValueError(manual_submission_guide)
if r['result'] == 'Pass - Verified':
return
print('Retrying...')
sleep(5)
raise TimeoutError(manual_submission_guide) | python | def etherscan_verify_contract(
chain_id: int,
apikey: str,
source_module: DeploymentModule,
contract_name: str,
):
""" Calls Etherscan API for verifying the Solidity source of a contract.
Args:
chain_id: EIP-155 chain id of the Ethereum chain
apikey: key for calling Etherscan API
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
"""
etherscan_api = api_of_chain_id[chain_id]
deployment_info = get_contracts_deployment_info(
chain_id=chain_id,
module=source_module,
)
if deployment_info is None:
raise FileNotFoundError(
f'Deployment file not found for chain_id={chain_id} and module={source_module}',
)
contract_manager = ContractManager(contracts_precompiled_path())
data = post_data_for_etherscan_verification(
apikey=apikey,
deployment_info=deployment_info['contracts'][contract_name],
source=join_sources(source_module=source_module, contract_name=contract_name),
contract_name=contract_name,
metadata=json.loads(contract_manager.contracts[contract_name]['metadata']),
constructor_args=get_constructor_args(
deployment_info=deployment_info,
contract_name=contract_name,
contract_manager=contract_manager,
),
)
response = requests.post(etherscan_api, data=data)
content = json.loads(response.content.decode())
print(content)
print(f'Status: {content["status"]}; {content["message"]} ; GUID = {content["result"]}')
etherscan_url = etherscan_api.replace('api-', '').replace('api', '')
etherscan_url += '/verifyContract2?a=' + data['contractaddress']
manual_submission_guide = f"""Usually a manual submission to Etherscan works.
Visit {etherscan_url}
Use raiden_contracts/deploy/joined.sol."""
if content['status'] != '1':
if content['result'] == 'Contract source code already verified':
return
else:
raise ValueError(
'Etherscan submission failed for an unknown reason\n' +
manual_submission_guide,
)
# submission succeeded, obtained GUID
guid = content['result']
status = '0'
retries = 10
while status == '0' and retries > 0:
retries -= 1
r = guid_status(etherscan_api=etherscan_api, guid=guid)
status = r['status']
if r['result'] == 'Fail - Unable to verify':
raise ValueError(manual_submission_guide)
if r['result'] == 'Pass - Verified':
return
print('Retrying...')
sleep(5)
raise TimeoutError(manual_submission_guide) | [
"def",
"etherscan_verify_contract",
"(",
"chain_id",
":",
"int",
",",
"apikey",
":",
"str",
",",
"source_module",
":",
"DeploymentModule",
",",
"contract_name",
":",
"str",
",",
")",
":",
"etherscan_api",
"=",
"api_of_chain_id",
"[",
"chain_id",
"]",
"deployment... | Calls Etherscan API for verifying the Solidity source of a contract.
Args:
chain_id: EIP-155 chain id of the Ethereum chain
apikey: key for calling Etherscan API
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc. | [
"Calls",
"Etherscan",
"API",
"for",
"verifying",
"the",
"Solidity",
"source",
"of",
"a",
"contract",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/etherscan_verify.py#L174-L245 | train | 202,028 |
raiden-network/raiden-contracts | raiden_contracts/deploy/__main__.py | error_removed_option | def error_removed_option(message: str):
""" Takes a message and returns a callback that raises NoSuchOption
if the value is not None. The message is used as an argument to NoSuchOption. """
def f(_, param, value):
if value is not None:
raise click.NoSuchOption(
f'--{param.name.replace("_", "-")} is no longer a valid option. ' +
message,
)
return f | python | def error_removed_option(message: str):
""" Takes a message and returns a callback that raises NoSuchOption
if the value is not None. The message is used as an argument to NoSuchOption. """
def f(_, param, value):
if value is not None:
raise click.NoSuchOption(
f'--{param.name.replace("_", "-")} is no longer a valid option. ' +
message,
)
return f | [
"def",
"error_removed_option",
"(",
"message",
":",
"str",
")",
":",
"def",
"f",
"(",
"_",
",",
"param",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"raise",
"click",
".",
"NoSuchOption",
"(",
"f'--{param.name.replace(\"_\", \"-\")} is n... | Takes a message and returns a callback that raises NoSuchOption
if the value is not None. The message is used as an argument to NoSuchOption. | [
"Takes",
"a",
"message",
"and",
"returns",
"a",
"callback",
"that",
"raises",
"NoSuchOption"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/__main__.py#L36-L46 | train | 202,029 |
raiden-network/raiden-contracts | raiden_contracts/deploy/__main__.py | common_options | def common_options(func):
"""A decorator that combines commonly appearing @click.option decorators."""
@click.option(
'--private-key',
required=True,
help='Path to a private key store.',
)
@click.option(
'--rpc-provider',
default='http://127.0.0.1:8545',
help='Address of the Ethereum RPC provider',
)
@click.option(
'--wait',
default=300,
help='Max tx wait time in s.',
)
@click.option(
'--gas-price',
default=5,
type=int,
help='Gas price to use in gwei',
)
@click.option(
'--gas-limit',
default=5_500_000,
)
@click.option(
'--contracts-version',
default=None,
help='Contracts version to verify. Current version will be used by default.',
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper | python | def common_options(func):
"""A decorator that combines commonly appearing @click.option decorators."""
@click.option(
'--private-key',
required=True,
help='Path to a private key store.',
)
@click.option(
'--rpc-provider',
default='http://127.0.0.1:8545',
help='Address of the Ethereum RPC provider',
)
@click.option(
'--wait',
default=300,
help='Max tx wait time in s.',
)
@click.option(
'--gas-price',
default=5,
type=int,
help='Gas price to use in gwei',
)
@click.option(
'--gas-limit',
default=5_500_000,
)
@click.option(
'--contracts-version',
default=None,
help='Contracts version to verify. Current version will be used by default.',
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper | [
"def",
"common_options",
"(",
"func",
")",
":",
"@",
"click",
".",
"option",
"(",
"'--private-key'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Path to a private key store.'",
",",
")",
"@",
"click",
".",
"option",
"(",
"'--rpc-provider'",
",",
"defau... | A decorator that combines commonly appearing @click.option decorators. | [
"A",
"decorator",
"that",
"combines",
"commonly",
"appearing"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/__main__.py#L49-L84 | train | 202,030 |
raiden-network/raiden-contracts | raiden_contracts/contract_source_manager.py | contracts_source_path_with_stem | def contracts_source_path_with_stem(stem):
"""The directory remapping given to the Solidity compiler."""
return {
'lib': _BASE.joinpath(stem, 'lib'),
'raiden': _BASE.joinpath(stem, 'raiden'),
'test': _BASE.joinpath(stem, 'test'),
'services': _BASE.joinpath(stem, 'services'),
} | python | def contracts_source_path_with_stem(stem):
"""The directory remapping given to the Solidity compiler."""
return {
'lib': _BASE.joinpath(stem, 'lib'),
'raiden': _BASE.joinpath(stem, 'raiden'),
'test': _BASE.joinpath(stem, 'test'),
'services': _BASE.joinpath(stem, 'services'),
} | [
"def",
"contracts_source_path_with_stem",
"(",
"stem",
")",
":",
"return",
"{",
"'lib'",
":",
"_BASE",
".",
"joinpath",
"(",
"stem",
",",
"'lib'",
")",
",",
"'raiden'",
":",
"_BASE",
".",
"joinpath",
"(",
"stem",
",",
"'raiden'",
")",
",",
"'test'",
":",... | The directory remapping given to the Solidity compiler. | [
"The",
"directory",
"remapping",
"given",
"to",
"the",
"Solidity",
"compiler",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_source_manager.py#L162-L169 | train | 202,031 |
raiden-network/raiden-contracts | raiden_contracts/contract_source_manager.py | ContractSourceManager.compile_contracts | def compile_contracts(self, target_path: Path) -> ContractManager:
""" Store compiled contracts JSON at `target_path`. """
self.checksum_contracts()
if self.overall_checksum is None:
raise ContractSourceManagerCompilationError('Checksumming failed.')
contracts_compiled = self._compile_all_contracts()
target_path.parent.mkdir(parents=True, exist_ok=True)
with target_path.open(mode='w') as target_file:
target_file.write(
json.dumps(
dict(
contracts=contracts_compiled,
contracts_checksums=self.contracts_checksums,
overall_checksum=self.overall_checksum,
contracts_version=None,
),
sort_keys=True,
indent=4,
),
)
return ContractManager(target_path) | python | def compile_contracts(self, target_path: Path) -> ContractManager:
""" Store compiled contracts JSON at `target_path`. """
self.checksum_contracts()
if self.overall_checksum is None:
raise ContractSourceManagerCompilationError('Checksumming failed.')
contracts_compiled = self._compile_all_contracts()
target_path.parent.mkdir(parents=True, exist_ok=True)
with target_path.open(mode='w') as target_file:
target_file.write(
json.dumps(
dict(
contracts=contracts_compiled,
contracts_checksums=self.contracts_checksums,
overall_checksum=self.overall_checksum,
contracts_version=None,
),
sort_keys=True,
indent=4,
),
)
return ContractManager(target_path) | [
"def",
"compile_contracts",
"(",
"self",
",",
"target_path",
":",
"Path",
")",
"->",
"ContractManager",
":",
"self",
".",
"checksum_contracts",
"(",
")",
"if",
"self",
".",
"overall_checksum",
"is",
"None",
":",
"raise",
"ContractSourceManagerCompilationError",
"(... | Store compiled contracts JSON at `target_path`. | [
"Store",
"compiled",
"contracts",
"JSON",
"at",
"target_path",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_source_manager.py#L78-L102 | train | 202,032 |
raiden-network/raiden-contracts | raiden_contracts/contract_source_manager.py | ContractSourceManager.verify_precompiled_checksums | def verify_precompiled_checksums(self, precompiled_path: Path) -> None:
""" Compare source code checksums with those from a precompiled file. """
# We get the precompiled file data
contracts_precompiled = ContractManager(precompiled_path)
# Silence mypy
assert self.contracts_checksums is not None
# Compare each contract source code checksum with the one from the precompiled file
for contract, checksum in self.contracts_checksums.items():
try:
# Silence mypy
assert contracts_precompiled.contracts_checksums is not None
precompiled_checksum = contracts_precompiled.contracts_checksums[contract]
except KeyError:
raise ContractSourceManagerVerificationError(
f'No checksum for {contract}',
)
if precompiled_checksum != checksum:
raise ContractSourceManagerVerificationError(
f'checksum of {contract} does not match {precompiled_checksum} != {checksum}',
)
# Compare the overall source code checksum with the one from the precompiled file
if self.overall_checksum != contracts_precompiled.overall_checksum:
raise ContractSourceManagerVerificationError(
f'overall checksum does not match '
f'{self.overall_checksum} != {contracts_precompiled.overall_checksum}',
) | python | def verify_precompiled_checksums(self, precompiled_path: Path) -> None:
""" Compare source code checksums with those from a precompiled file. """
# We get the precompiled file data
contracts_precompiled = ContractManager(precompiled_path)
# Silence mypy
assert self.contracts_checksums is not None
# Compare each contract source code checksum with the one from the precompiled file
for contract, checksum in self.contracts_checksums.items():
try:
# Silence mypy
assert contracts_precompiled.contracts_checksums is not None
precompiled_checksum = contracts_precompiled.contracts_checksums[contract]
except KeyError:
raise ContractSourceManagerVerificationError(
f'No checksum for {contract}',
)
if precompiled_checksum != checksum:
raise ContractSourceManagerVerificationError(
f'checksum of {contract} does not match {precompiled_checksum} != {checksum}',
)
# Compare the overall source code checksum with the one from the precompiled file
if self.overall_checksum != contracts_precompiled.overall_checksum:
raise ContractSourceManagerVerificationError(
f'overall checksum does not match '
f'{self.overall_checksum} != {contracts_precompiled.overall_checksum}',
) | [
"def",
"verify_precompiled_checksums",
"(",
"self",
",",
"precompiled_path",
":",
"Path",
")",
"->",
"None",
":",
"# We get the precompiled file data",
"contracts_precompiled",
"=",
"ContractManager",
"(",
"precompiled_path",
")",
"# Silence mypy",
"assert",
"self",
".",
... | Compare source code checksums with those from a precompiled file. | [
"Compare",
"source",
"code",
"checksums",
"with",
"those",
"from",
"a",
"precompiled",
"file",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_source_manager.py#L104-L133 | train | 202,033 |
raiden-network/raiden-contracts | raiden_contracts/contract_source_manager.py | ContractSourceManager.checksum_contracts | def checksum_contracts(self) -> None:
"""Remember the checksum of each source, and the overall checksum."""
checksums: Dict[str, str] = {}
for contracts_dir in self.contracts_source_dirs.values():
file: Path
for file in contracts_dir.glob('*.sol'):
checksums[file.name] = hashlib.sha256(file.read_bytes()).hexdigest()
self.overall_checksum = hashlib.sha256(
':'.join(checksums[key] for key in sorted(checksums)).encode(),
).hexdigest()
self.contracts_checksums = checksums | python | def checksum_contracts(self) -> None:
"""Remember the checksum of each source, and the overall checksum."""
checksums: Dict[str, str] = {}
for contracts_dir in self.contracts_source_dirs.values():
file: Path
for file in contracts_dir.glob('*.sol'):
checksums[file.name] = hashlib.sha256(file.read_bytes()).hexdigest()
self.overall_checksum = hashlib.sha256(
':'.join(checksums[key] for key in sorted(checksums)).encode(),
).hexdigest()
self.contracts_checksums = checksums | [
"def",
"checksum_contracts",
"(",
"self",
")",
"->",
"None",
":",
"checksums",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"{",
"}",
"for",
"contracts_dir",
"in",
"self",
".",
"contracts_source_dirs",
".",
"values",
"(",
")",
":",
"file",
":",
"Path... | Remember the checksum of each source, and the overall checksum. | [
"Remember",
"the",
"checksum",
"of",
"each",
"source",
"and",
"the",
"overall",
"checksum",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_source_manager.py#L135-L146 | train | 202,034 |
raiden-network/raiden-contracts | raiden_contracts/utils/merkle.py | _hash_pair | def _hash_pair(first: bytes, second: bytes) -> bytes:
""" Computes the hash of the items in lexicographic order """
if first is None:
return second
if second is None:
return first
if first > second:
return keccak(second + first)
else:
return keccak(first + second) | python | def _hash_pair(first: bytes, second: bytes) -> bytes:
""" Computes the hash of the items in lexicographic order """
if first is None:
return second
if second is None:
return first
if first > second:
return keccak(second + first)
else:
return keccak(first + second) | [
"def",
"_hash_pair",
"(",
"first",
":",
"bytes",
",",
"second",
":",
"bytes",
")",
"->",
"bytes",
":",
"if",
"first",
"is",
"None",
":",
"return",
"second",
"if",
"second",
"is",
"None",
":",
"return",
"first",
"if",
"first",
">",
"second",
":",
"ret... | Computes the hash of the items in lexicographic order | [
"Computes",
"the",
"hash",
"of",
"the",
"items",
"in",
"lexicographic",
"order"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/merkle.py#L12-L23 | train | 202,035 |
raiden-network/raiden-contracts | raiden_contracts/utils/merkle.py | compute_merkle_tree | def compute_merkle_tree(items: Iterable[bytes]) -> MerkleTree:
""" Calculates the merkle root for a given list of items """
if not all(isinstance(l, bytes) and len(l) == 32 for l in items):
raise ValueError('Not all items are hashes')
leaves = sorted(items)
if len(leaves) == 0:
return MerkleTree(layers=[[EMPTY_MERKLE_ROOT]])
if not len(leaves) == len(set(leaves)):
raise ValueError('The leaves items must not contain duplicate items')
tree = [leaves]
layer = leaves
while len(layer) > 1:
# [a, b, c, d, e] -> [(a, b), (c, d), (e, None)]
iterator = iter(layer)
paired_items = zip_longest(iterator, iterator)
layer = [_hash_pair(a, b) for a, b in paired_items]
tree.append(layer)
return MerkleTree(layers=tree) | python | def compute_merkle_tree(items: Iterable[bytes]) -> MerkleTree:
""" Calculates the merkle root for a given list of items """
if not all(isinstance(l, bytes) and len(l) == 32 for l in items):
raise ValueError('Not all items are hashes')
leaves = sorted(items)
if len(leaves) == 0:
return MerkleTree(layers=[[EMPTY_MERKLE_ROOT]])
if not len(leaves) == len(set(leaves)):
raise ValueError('The leaves items must not contain duplicate items')
tree = [leaves]
layer = leaves
while len(layer) > 1:
# [a, b, c, d, e] -> [(a, b), (c, d), (e, None)]
iterator = iter(layer)
paired_items = zip_longest(iterator, iterator)
layer = [_hash_pair(a, b) for a, b in paired_items]
tree.append(layer)
return MerkleTree(layers=tree) | [
"def",
"compute_merkle_tree",
"(",
"items",
":",
"Iterable",
"[",
"bytes",
"]",
")",
"->",
"MerkleTree",
":",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"l",
",",
"bytes",
")",
"and",
"len",
"(",
"l",
")",
"==",
"32",
"for",
"l",
"in",
"items",
")... | Calculates the merkle root for a given list of items | [
"Calculates",
"the",
"merkle",
"root",
"for",
"a",
"given",
"list",
"of",
"items"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/merkle.py#L26-L49 | train | 202,036 |
raiden-network/raiden-contracts | raiden_contracts/utils/merkle.py | get_merkle_root | def get_merkle_root(merkle_tree: MerkleTree) -> bytes:
""" Returns the root element of the merkle tree. """
assert merkle_tree.layers, 'the merkle tree layers are empty'
assert merkle_tree.layers[-1], 'the root layer is empty'
return merkle_tree.layers[-1][0] | python | def get_merkle_root(merkle_tree: MerkleTree) -> bytes:
""" Returns the root element of the merkle tree. """
assert merkle_tree.layers, 'the merkle tree layers are empty'
assert merkle_tree.layers[-1], 'the root layer is empty'
return merkle_tree.layers[-1][0] | [
"def",
"get_merkle_root",
"(",
"merkle_tree",
":",
"MerkleTree",
")",
"->",
"bytes",
":",
"assert",
"merkle_tree",
".",
"layers",
",",
"'the merkle tree layers are empty'",
"assert",
"merkle_tree",
".",
"layers",
"[",
"-",
"1",
"]",
",",
"'the root layer is empty'",... | Returns the root element of the merkle tree. | [
"Returns",
"the",
"root",
"element",
"of",
"the",
"merkle",
"tree",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/merkle.py#L52-L57 | train | 202,037 |
raiden-network/raiden-contracts | raiden_contracts/utils/versions.py | contracts_version_expects_deposit_limits | def contracts_version_expects_deposit_limits(contracts_version: Optional[str]) -> bool:
""" Answers whether TokenNetworkRegistry of the contracts_vesion needs deposit limits """
if contracts_version is None:
return True
if contracts_version == '0.3._':
return False
return compare(contracts_version, '0.9.0') > -1 | python | def contracts_version_expects_deposit_limits(contracts_version: Optional[str]) -> bool:
""" Answers whether TokenNetworkRegistry of the contracts_vesion needs deposit limits """
if contracts_version is None:
return True
if contracts_version == '0.3._':
return False
return compare(contracts_version, '0.9.0') > -1 | [
"def",
"contracts_version_expects_deposit_limits",
"(",
"contracts_version",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"bool",
":",
"if",
"contracts_version",
"is",
"None",
":",
"return",
"True",
"if",
"contracts_version",
"==",
"'0.3._'",
":",
"return",
"False... | Answers whether TokenNetworkRegistry of the contracts_vesion needs deposit limits | [
"Answers",
"whether",
"TokenNetworkRegistry",
"of",
"the",
"contracts_vesion",
"needs",
"deposit",
"limits"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/versions.py#L9-L15 | train | 202,038 |
raiden-network/raiden-contracts | raiden_contracts/deploy/contract_verifier.py | ContractVerifier._verify_deployed_contract | def _verify_deployed_contract(
self,
deployment_data: DeployedContracts,
contract_name: str,
) -> Contract:
""" Verify deployment info against the chain
Verifies:
- the runtime bytecode - precompiled data against the chain
- information stored in deployment_*.json against the chain,
except for the constructor arguments, which have to be checked
separately.
Returns: (onchain_instance, constructor_arguments)
"""
contracts = deployment_data['contracts']
contract_address = contracts[contract_name]['address']
contract_instance = self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name),
address=contract_address,
)
# Check that the deployed bytecode matches the precompiled data
blockchain_bytecode = self.web3.eth.getCode(contract_address).hex()
compiled_bytecode = self.contract_manager.get_runtime_hexcode(contract_name)
assert blockchain_bytecode == compiled_bytecode
print(
f'{contract_name} at {contract_address} '
f'matches the compiled data from contracts.json',
)
# Check blockchain transaction hash & block information
receipt = self.web3.eth.getTransactionReceipt(
contracts[contract_name]['transaction_hash'],
)
assert receipt['blockNumber'] == contracts[contract_name]['block_number'], (
f'We have block_number {contracts[contract_name]["block_number"]} in the deployment '
f'info, but {receipt["blockNumber"]} in the transaction receipt from web3.'
)
assert receipt['gasUsed'] == contracts[contract_name]['gas_cost'], (
f'We have gasUsed {contracts[contract_name]["gas_cost"]} in the deployment info, '
f'but {receipt["gasUsed"]} in the transaction receipt from web3.'
)
assert receipt['contractAddress'] == contracts[contract_name]['address'], (
f'We have contractAddress {contracts[contract_name]["address"]} in the deployment info'
f' but {receipt["contractAddress"]} in the transaction receipt from web3.'
)
# Check the contract version
version = contract_instance.functions.contract_version().call()
assert version == deployment_data['contracts_version'], \
f'got {version} expected {deployment_data["contracts_version"]}.' \
f'contract_manager has contracts_version {self.contract_manager.contracts_version}'
return contract_instance, contracts[contract_name]['constructor_arguments'] | python | def _verify_deployed_contract(
self,
deployment_data: DeployedContracts,
contract_name: str,
) -> Contract:
""" Verify deployment info against the chain
Verifies:
- the runtime bytecode - precompiled data against the chain
- information stored in deployment_*.json against the chain,
except for the constructor arguments, which have to be checked
separately.
Returns: (onchain_instance, constructor_arguments)
"""
contracts = deployment_data['contracts']
contract_address = contracts[contract_name]['address']
contract_instance = self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name),
address=contract_address,
)
# Check that the deployed bytecode matches the precompiled data
blockchain_bytecode = self.web3.eth.getCode(contract_address).hex()
compiled_bytecode = self.contract_manager.get_runtime_hexcode(contract_name)
assert blockchain_bytecode == compiled_bytecode
print(
f'{contract_name} at {contract_address} '
f'matches the compiled data from contracts.json',
)
# Check blockchain transaction hash & block information
receipt = self.web3.eth.getTransactionReceipt(
contracts[contract_name]['transaction_hash'],
)
assert receipt['blockNumber'] == contracts[contract_name]['block_number'], (
f'We have block_number {contracts[contract_name]["block_number"]} in the deployment '
f'info, but {receipt["blockNumber"]} in the transaction receipt from web3.'
)
assert receipt['gasUsed'] == contracts[contract_name]['gas_cost'], (
f'We have gasUsed {contracts[contract_name]["gas_cost"]} in the deployment info, '
f'but {receipt["gasUsed"]} in the transaction receipt from web3.'
)
assert receipt['contractAddress'] == contracts[contract_name]['address'], (
f'We have contractAddress {contracts[contract_name]["address"]} in the deployment info'
f' but {receipt["contractAddress"]} in the transaction receipt from web3.'
)
# Check the contract version
version = contract_instance.functions.contract_version().call()
assert version == deployment_data['contracts_version'], \
f'got {version} expected {deployment_data["contracts_version"]}.' \
f'contract_manager has contracts_version {self.contract_manager.contracts_version}'
return contract_instance, contracts[contract_name]['constructor_arguments'] | [
"def",
"_verify_deployed_contract",
"(",
"self",
",",
"deployment_data",
":",
"DeployedContracts",
",",
"contract_name",
":",
"str",
",",
")",
"->",
"Contract",
":",
"contracts",
"=",
"deployment_data",
"[",
"'contracts'",
"]",
"contract_address",
"=",
"contracts",
... | Verify deployment info against the chain
Verifies:
- the runtime bytecode - precompiled data against the chain
- information stored in deployment_*.json against the chain,
except for the constructor arguments, which have to be checked
separately.
Returns: (onchain_instance, constructor_arguments) | [
"Verify",
"deployment",
"info",
"against",
"the",
"chain"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_verifier.py#L184-L240 | train | 202,039 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | contracts_data_path | def contracts_data_path(version: Optional[str] = None):
"""Returns the deployment data directory for a version."""
if version is None:
return _BASE.joinpath('data')
return _BASE.joinpath(f'data_{version}') | python | def contracts_data_path(version: Optional[str] = None):
"""Returns the deployment data directory for a version."""
if version is None:
return _BASE.joinpath('data')
return _BASE.joinpath(f'data_{version}') | [
"def",
"contracts_data_path",
"(",
"version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
":",
"if",
"version",
"is",
"None",
":",
"return",
"_BASE",
".",
"joinpath",
"(",
"'data'",
")",
"return",
"_BASE",
".",
"joinpath",
"(",
"f'data_{version}'",... | Returns the deployment data directory for a version. | [
"Returns",
"the",
"deployment",
"data",
"directory",
"for",
"a",
"version",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L114-L118 | train | 202,040 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | contracts_precompiled_path | def contracts_precompiled_path(version: Optional[str] = None) -> Path:
"""Returns the path of JSON file where the bytecode can be found."""
data_path = contracts_data_path(version)
return data_path.joinpath('contracts.json') | python | def contracts_precompiled_path(version: Optional[str] = None) -> Path:
"""Returns the path of JSON file where the bytecode can be found."""
data_path = contracts_data_path(version)
return data_path.joinpath('contracts.json') | [
"def",
"contracts_precompiled_path",
"(",
"version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Path",
":",
"data_path",
"=",
"contracts_data_path",
"(",
"version",
")",
"return",
"data_path",
".",
"joinpath",
"(",
"'contracts.json'",
")"
] | Returns the path of JSON file where the bytecode can be found. | [
"Returns",
"the",
"path",
"of",
"JSON",
"file",
"where",
"the",
"bytecode",
"can",
"be",
"found",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L121-L124 | train | 202,041 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | contracts_deployed_path | def contracts_deployed_path(
chain_id: int,
version: Optional[str] = None,
services: bool = False,
):
"""Returns the path of the deplolyment data JSON file."""
data_path = contracts_data_path(version)
chain_name = ID_TO_NETWORKNAME[chain_id] if chain_id in ID_TO_NETWORKNAME else 'private_net'
return data_path.joinpath(f'deployment_{"services_" if services else ""}{chain_name}.json') | python | def contracts_deployed_path(
chain_id: int,
version: Optional[str] = None,
services: bool = False,
):
"""Returns the path of the deplolyment data JSON file."""
data_path = contracts_data_path(version)
chain_name = ID_TO_NETWORKNAME[chain_id] if chain_id in ID_TO_NETWORKNAME else 'private_net'
return data_path.joinpath(f'deployment_{"services_" if services else ""}{chain_name}.json') | [
"def",
"contracts_deployed_path",
"(",
"chain_id",
":",
"int",
",",
"version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"services",
":",
"bool",
"=",
"False",
",",
")",
":",
"data_path",
"=",
"contracts_data_path",
"(",
"version",
")",
"chain_na... | Returns the path of the deplolyment data JSON file. | [
"Returns",
"the",
"path",
"of",
"the",
"deplolyment",
"data",
"JSON",
"file",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L133-L142 | train | 202,042 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | merge_deployment_data | def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts:
""" Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical.
"""
if not dict1:
return dict2
if not dict2:
return dict1
common_contracts: Dict[str, DeployedContract] = deepcopy(dict1['contracts'])
assert not common_contracts.keys() & dict2['contracts'].keys()
common_contracts.update(dict2['contracts'])
assert dict2['chain_id'] == dict1['chain_id']
assert dict2['contracts_version'] == dict1['contracts_version']
return {
'contracts': common_contracts,
'chain_id': dict1['chain_id'],
'contracts_version': dict1['contracts_version'],
} | python | def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts:
""" Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical.
"""
if not dict1:
return dict2
if not dict2:
return dict1
common_contracts: Dict[str, DeployedContract] = deepcopy(dict1['contracts'])
assert not common_contracts.keys() & dict2['contracts'].keys()
common_contracts.update(dict2['contracts'])
assert dict2['chain_id'] == dict1['chain_id']
assert dict2['contracts_version'] == dict1['contracts_version']
return {
'contracts': common_contracts,
'chain_id': dict1['chain_id'],
'contracts_version': dict1['contracts_version'],
} | [
"def",
"merge_deployment_data",
"(",
"dict1",
":",
"DeployedContracts",
",",
"dict2",
":",
"DeployedContracts",
")",
"->",
"DeployedContracts",
":",
"if",
"not",
"dict1",
":",
"return",
"dict2",
"if",
"not",
"dict2",
":",
"return",
"dict1",
"common_contracts",
"... | Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical. | [
"Take",
"contents",
"of",
"two",
"deployment",
"JSON",
"files",
"and",
"merge",
"them"
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L145-L167 | train | 202,043 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | get_contracts_deployment_info | def get_contracts_deployment_info(
chain_id: int,
version: Optional[str] = None,
module: DeploymentModule = DeploymentModule.ALL,
) -> Optional[DeployedContracts]:
"""Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version.
"""
if module not in DeploymentModule:
raise ValueError(f'Unknown module {module} given to get_contracts_deployment_info()')
def module_chosen(to_be_added: DeploymentModule):
return module == to_be_added or module == DeploymentModule.ALL
files: List[Path] = []
if module_chosen(DeploymentModule.RAIDEN):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=False,
))
if module == DeploymentModule.SERVICES and not version_provides_services(version):
raise ValueError(
f'SERVICES module queried for version {version}, but {version} '
'does not provide service contracts.',
)
if module_chosen(DeploymentModule.SERVICES) and version_provides_services(version):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=True,
))
deployment_data: DeployedContracts = {} # type: ignore
for f in files:
deployment_data = merge_deployment_data(
deployment_data,
_load_json_from_path(f),
)
if not deployment_data:
deployment_data = None
return deployment_data | python | def get_contracts_deployment_info(
chain_id: int,
version: Optional[str] = None,
module: DeploymentModule = DeploymentModule.ALL,
) -> Optional[DeployedContracts]:
"""Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version.
"""
if module not in DeploymentModule:
raise ValueError(f'Unknown module {module} given to get_contracts_deployment_info()')
def module_chosen(to_be_added: DeploymentModule):
return module == to_be_added or module == DeploymentModule.ALL
files: List[Path] = []
if module_chosen(DeploymentModule.RAIDEN):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=False,
))
if module == DeploymentModule.SERVICES and not version_provides_services(version):
raise ValueError(
f'SERVICES module queried for version {version}, but {version} '
'does not provide service contracts.',
)
if module_chosen(DeploymentModule.SERVICES) and version_provides_services(version):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=True,
))
deployment_data: DeployedContracts = {} # type: ignore
for f in files:
deployment_data = merge_deployment_data(
deployment_data,
_load_json_from_path(f),
)
if not deployment_data:
deployment_data = None
return deployment_data | [
"def",
"get_contracts_deployment_info",
"(",
"chain_id",
":",
"int",
",",
"version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"module",
":",
"DeploymentModule",
"=",
"DeploymentModule",
".",
"ALL",
",",
")",
"->",
"Optional",
"[",
"DeployedContracts... | Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version. | [
"Reads",
"the",
"deployment",
"data",
".",
"Returns",
"None",
"if",
"the",
"file",
"is",
"not",
"found",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L180-L229 | train | 202,044 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | ContractManager.get_contract | def get_contract(self, contract_name: str) -> Dict:
""" Return ABI, BIN of the given contract. """
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name] | python | def get_contract(self, contract_name: str) -> Dict:
""" Return ABI, BIN of the given contract. """
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name] | [
"def",
"get_contract",
"(",
"self",
",",
"contract_name",
":",
"str",
")",
"->",
"Dict",
":",
"assert",
"self",
".",
"contracts",
",",
"'ContractManager should have contracts compiled'",
"return",
"self",
".",
"contracts",
"[",
"contract_name",
"]"
] | Return ABI, BIN of the given contract. | [
"Return",
"ABI",
"BIN",
"of",
"the",
"given",
"contract",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L66-L69 | train | 202,045 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | ContractManager.get_contract_abi | def get_contract_abi(self, contract_name: str) -> Dict:
""" Returns the ABI for a given contract. """
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name]['abi'] | python | def get_contract_abi(self, contract_name: str) -> Dict:
""" Returns the ABI for a given contract. """
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name]['abi'] | [
"def",
"get_contract_abi",
"(",
"self",
",",
"contract_name",
":",
"str",
")",
"->",
"Dict",
":",
"assert",
"self",
".",
"contracts",
",",
"'ContractManager should have contracts compiled'",
"return",
"self",
".",
"contracts",
"[",
"contract_name",
"]",
"[",
"'abi... | Returns the ABI for a given contract. | [
"Returns",
"the",
"ABI",
"for",
"a",
"given",
"contract",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L71-L74 | train | 202,046 |
raiden-network/raiden-contracts | raiden_contracts/contract_manager.py | ContractManager.get_event_abi | def get_event_abi(self, contract_name: str, event_name: str) -> Dict:
""" Returns the ABI for a given event. """
# Import locally to avoid web3 dependency during installation via `compile_contracts`
from web3.utils.contracts import find_matching_event_abi
assert self.contracts, 'ContractManager should have contracts compiled'
contract_abi = self.get_contract_abi(contract_name)
return find_matching_event_abi(
abi=contract_abi,
event_name=event_name,
) | python | def get_event_abi(self, contract_name: str, event_name: str) -> Dict:
""" Returns the ABI for a given event. """
# Import locally to avoid web3 dependency during installation via `compile_contracts`
from web3.utils.contracts import find_matching_event_abi
assert self.contracts, 'ContractManager should have contracts compiled'
contract_abi = self.get_contract_abi(contract_name)
return find_matching_event_abi(
abi=contract_abi,
event_name=event_name,
) | [
"def",
"get_event_abi",
"(",
"self",
",",
"contract_name",
":",
"str",
",",
"event_name",
":",
"str",
")",
"->",
"Dict",
":",
"# Import locally to avoid web3 dependency during installation via `compile_contracts`",
"from",
"web3",
".",
"utils",
".",
"contracts",
"import... | Returns the ABI for a given event. | [
"Returns",
"the",
"ABI",
"for",
"a",
"given",
"event",
"."
] | a7e72a9477f2204b03f3706360ea8d9c0a8e7063 | https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L76-L86 | train | 202,047 |
chinapnr/fishbase | fishbase/fish_logger.py | SafeFileHandler.check_base_filename | def check_base_filename(self, record):
"""
Determine if builder should occur.
record is not used, as we are just comparing times,
but it is needed so the method signatures are the same
"""
time_tuple = time.localtime()
if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(
self.baseFilename + '.' + self.suffix_time):
return 1
else:
return 0 | python | def check_base_filename(self, record):
"""
Determine if builder should occur.
record is not used, as we are just comparing times,
but it is needed so the method signatures are the same
"""
time_tuple = time.localtime()
if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(
self.baseFilename + '.' + self.suffix_time):
return 1
else:
return 0 | [
"def",
"check_base_filename",
"(",
"self",
",",
"record",
")",
":",
"time_tuple",
"=",
"time",
".",
"localtime",
"(",
")",
"if",
"self",
".",
"suffix_time",
"!=",
"time",
".",
"strftime",
"(",
"self",
".",
"suffix",
",",
"time_tuple",
")",
"or",
"not",
... | Determine if builder should occur.
record is not used, as we are just comparing times,
but it is needed so the method signatures are the same | [
"Determine",
"if",
"builder",
"should",
"occur",
"."
] | 23c5147a6bc0d8ed36409e55352ffb2c5b0edc82 | https://github.com/chinapnr/fishbase/blob/23c5147a6bc0d8ed36409e55352ffb2c5b0edc82/fishbase/fish_logger.py#L56-L69 | train | 202,048 |
chinapnr/fishbase | fishbase/fish_logger.py | SafeFileHandler.build_base_filename | def build_base_filename(self):
"""
do builder; in this case,
old time stamp is removed from filename and
a new time stamp is append to the filename
"""
if self.stream:
self.stream.close()
self.stream = None
# remove old suffix
if self.suffix_time != "":
index = self.baseFilename.find("." + self.suffix_time)
if index == -1:
index = self.baseFilename.rfind(".")
self.baseFilename = self.baseFilename[:index]
# add new suffix
current_time_tuple = time.localtime()
self.suffix_time = time.strftime(self.suffix, current_time_tuple)
self.baseFilename = self.baseFilename + "." + self.suffix_time
self.mode = 'a'
if not self.delay:
self.stream = self._open() | python | def build_base_filename(self):
"""
do builder; in this case,
old time stamp is removed from filename and
a new time stamp is append to the filename
"""
if self.stream:
self.stream.close()
self.stream = None
# remove old suffix
if self.suffix_time != "":
index = self.baseFilename.find("." + self.suffix_time)
if index == -1:
index = self.baseFilename.rfind(".")
self.baseFilename = self.baseFilename[:index]
# add new suffix
current_time_tuple = time.localtime()
self.suffix_time = time.strftime(self.suffix, current_time_tuple)
self.baseFilename = self.baseFilename + "." + self.suffix_time
self.mode = 'a'
if not self.delay:
self.stream = self._open() | [
"def",
"build_base_filename",
"(",
"self",
")",
":",
"if",
"self",
".",
"stream",
":",
"self",
".",
"stream",
".",
"close",
"(",
")",
"self",
".",
"stream",
"=",
"None",
"# remove old suffix",
"if",
"self",
".",
"suffix_time",
"!=",
"\"\"",
":",
"index",... | do builder; in this case,
old time stamp is removed from filename and
a new time stamp is append to the filename | [
"do",
"builder",
";",
"in",
"this",
"case",
"old",
"time",
"stamp",
"is",
"removed",
"from",
"filename",
"and",
"a",
"new",
"time",
"stamp",
"is",
"append",
"to",
"the",
"filename"
] | 23c5147a6bc0d8ed36409e55352ffb2c5b0edc82 | https://github.com/chinapnr/fishbase/blob/23c5147a6bc0d8ed36409e55352ffb2c5b0edc82/fishbase/fish_logger.py#L71-L95 | train | 202,049 |
javrasya/django-river | river/core/instanceworkflowobject.py | InstanceWorkflowObject._cycle_proceedings | def _cycle_proceedings(self):
"""
Finds next proceedings and clone them for cycling if it exists.
"""
next_approvals = self._get_next_approvals().exclude(
status=PENDING).exclude(cloned=True)
for ta in next_approvals:
clone_transition_approval, c = TransitionApproval.objects.get_or_create(
source_state=ta.source_state,
destination_state=ta.destination_state,
content_type=ta.content_type,
object_id=ta.object_id,
field_name=ta.field_name,
skip=ta.skip,
priority=ta.priority,
enabled=ta.enabled,
status=PENDING,
meta=ta.meta
)
if c:
clone_transition_approval.permissions.add(*ta.permissions.all())
clone_transition_approval.groups.add(*ta.groups.all())
next_approvals.update(cloned=True)
return True if next_approvals.count() else False | python | def _cycle_proceedings(self):
"""
Finds next proceedings and clone them for cycling if it exists.
"""
next_approvals = self._get_next_approvals().exclude(
status=PENDING).exclude(cloned=True)
for ta in next_approvals:
clone_transition_approval, c = TransitionApproval.objects.get_or_create(
source_state=ta.source_state,
destination_state=ta.destination_state,
content_type=ta.content_type,
object_id=ta.object_id,
field_name=ta.field_name,
skip=ta.skip,
priority=ta.priority,
enabled=ta.enabled,
status=PENDING,
meta=ta.meta
)
if c:
clone_transition_approval.permissions.add(*ta.permissions.all())
clone_transition_approval.groups.add(*ta.groups.all())
next_approvals.update(cloned=True)
return True if next_approvals.count() else False | [
"def",
"_cycle_proceedings",
"(",
"self",
")",
":",
"next_approvals",
"=",
"self",
".",
"_get_next_approvals",
"(",
")",
".",
"exclude",
"(",
"status",
"=",
"PENDING",
")",
".",
"exclude",
"(",
"cloned",
"=",
"True",
")",
"for",
"ta",
"in",
"next_approvals... | Finds next proceedings and clone them for cycling if it exists. | [
"Finds",
"next",
"proceedings",
"and",
"clone",
"them",
"for",
"cycling",
"if",
"it",
"exists",
"."
] | c6a9442617d8f24eeaa8fc52954e2007f6af7c88 | https://github.com/javrasya/django-river/blob/c6a9442617d8f24eeaa8fc52954e2007f6af7c88/river/core/instanceworkflowobject.py#L229-L254 | train | 202,050 |
safwanrahman/django-webpush | webpush/views.py | process_subscription_data | def process_subscription_data(post_data):
"""Process the subscription data according to out model"""
subscription_data = post_data.pop("subscription", {})
# As our database saves the auth and p256dh key in separate field,
# we need to refactor it and insert the auth and p256dh keys in the same dictionary
keys = subscription_data.pop("keys", {})
subscription_data.update(keys)
# Insert the browser name
subscription_data["browser"] = post_data.pop("browser")
return subscription_data | python | def process_subscription_data(post_data):
"""Process the subscription data according to out model"""
subscription_data = post_data.pop("subscription", {})
# As our database saves the auth and p256dh key in separate field,
# we need to refactor it and insert the auth and p256dh keys in the same dictionary
keys = subscription_data.pop("keys", {})
subscription_data.update(keys)
# Insert the browser name
subscription_data["browser"] = post_data.pop("browser")
return subscription_data | [
"def",
"process_subscription_data",
"(",
"post_data",
")",
":",
"subscription_data",
"=",
"post_data",
".",
"pop",
"(",
"\"subscription\"",
",",
"{",
"}",
")",
"# As our database saves the auth and p256dh key in separate field,",
"# we need to refactor it and insert the auth and ... | Process the subscription data according to out model | [
"Process",
"the",
"subscription",
"data",
"according",
"to",
"out",
"model"
] | c2d523436de961f44702fd1bf4e3824604101f96 | https://github.com/safwanrahman/django-webpush/blob/c2d523436de961f44702fd1bf4e3824604101f96/webpush/views.py#L51-L60 | train | 202,051 |
st4lk/django-rest-social-auth | example_project/users/social_pipeline.py | save_avatar | def save_avatar(strategy, details, user=None, *args, **kwargs):
"""Get user avatar from social provider."""
if user:
backend_name = kwargs['backend'].__class__.__name__.lower()
response = kwargs.get('response', {})
social_thumb = None
if 'facebook' in backend_name:
if 'id' in response:
social_thumb = (
'http://graph.facebook.com/{0}/picture?type=normal'
).format(response['id'])
elif 'twitter' in backend_name and response.get('profile_image_url'):
social_thumb = response['profile_image_url']
elif 'googleoauth2' in backend_name and response.get('image', {}).get('url'):
social_thumb = response['image']['url'].split('?')[0]
else:
social_thumb = 'http://www.gravatar.com/avatar/'
social_thumb += hashlib.md5(user.email.lower().encode('utf8')).hexdigest()
social_thumb += '?size=100'
if social_thumb and user.social_thumb != social_thumb:
user.social_thumb = social_thumb
strategy.storage.user.changed(user) | python | def save_avatar(strategy, details, user=None, *args, **kwargs):
"""Get user avatar from social provider."""
if user:
backend_name = kwargs['backend'].__class__.__name__.lower()
response = kwargs.get('response', {})
social_thumb = None
if 'facebook' in backend_name:
if 'id' in response:
social_thumb = (
'http://graph.facebook.com/{0}/picture?type=normal'
).format(response['id'])
elif 'twitter' in backend_name and response.get('profile_image_url'):
social_thumb = response['profile_image_url']
elif 'googleoauth2' in backend_name and response.get('image', {}).get('url'):
social_thumb = response['image']['url'].split('?')[0]
else:
social_thumb = 'http://www.gravatar.com/avatar/'
social_thumb += hashlib.md5(user.email.lower().encode('utf8')).hexdigest()
social_thumb += '?size=100'
if social_thumb and user.social_thumb != social_thumb:
user.social_thumb = social_thumb
strategy.storage.user.changed(user) | [
"def",
"save_avatar",
"(",
"strategy",
",",
"details",
",",
"user",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"user",
":",
"backend_name",
"=",
"kwargs",
"[",
"'backend'",
"]",
".",
"__class__",
".",
"__name__",
".",
"lo... | Get user avatar from social provider. | [
"Get",
"user",
"avatar",
"from",
"social",
"provider",
"."
] | 6ba9a67659e8c981ddfa75e7e51e80a842183285 | https://github.com/st4lk/django-rest-social-auth/blob/6ba9a67659e8c981ddfa75e7e51e80a842183285/example_project/users/social_pipeline.py#L10-L31 | train | 202,052 |
bitcraft/PyTMX | pytmx/pytmx.py | default_image_loader | def default_image_loader(filename, flags, **kwargs):
""" This default image loader just returns filename, rect, and any flags
"""
def load(rect=None, flags=None):
return filename, rect, flags
return load | python | def default_image_loader(filename, flags, **kwargs):
""" This default image loader just returns filename, rect, and any flags
"""
def load(rect=None, flags=None):
return filename, rect, flags
return load | [
"def",
"default_image_loader",
"(",
"filename",
",",
"flags",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"load",
"(",
"rect",
"=",
"None",
",",
"flags",
"=",
"None",
")",
":",
"return",
"filename",
",",
"rect",
",",
"flags",
"return",
"load"
] | This default image loader just returns filename, rect, and any flags | [
"This",
"default",
"image",
"loader",
"just",
"returns",
"filename",
"rect",
"and",
"any",
"flags"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L71-L78 | train | 202,053 |
bitcraft/PyTMX | pytmx/pytmx.py | decode_gid | def decode_gid(raw_gid):
""" Decode a GID from TMX data
as of 0.7.0 it determines if the tile should be flipped when rendered
as of 0.8.0 bit 30 determines if GID is rotated
:param raw_gid: 32-bit number from TMX layer data
:return: gid, flags
"""
flags = TileFlags(
raw_gid & GID_TRANS_FLIPX == GID_TRANS_FLIPX,
raw_gid & GID_TRANS_FLIPY == GID_TRANS_FLIPY,
raw_gid & GID_TRANS_ROT == GID_TRANS_ROT)
gid = raw_gid & ~(GID_TRANS_FLIPX | GID_TRANS_FLIPY | GID_TRANS_ROT)
return gid, flags | python | def decode_gid(raw_gid):
""" Decode a GID from TMX data
as of 0.7.0 it determines if the tile should be flipped when rendered
as of 0.8.0 bit 30 determines if GID is rotated
:param raw_gid: 32-bit number from TMX layer data
:return: gid, flags
"""
flags = TileFlags(
raw_gid & GID_TRANS_FLIPX == GID_TRANS_FLIPX,
raw_gid & GID_TRANS_FLIPY == GID_TRANS_FLIPY,
raw_gid & GID_TRANS_ROT == GID_TRANS_ROT)
gid = raw_gid & ~(GID_TRANS_FLIPX | GID_TRANS_FLIPY | GID_TRANS_ROT)
return gid, flags | [
"def",
"decode_gid",
"(",
"raw_gid",
")",
":",
"flags",
"=",
"TileFlags",
"(",
"raw_gid",
"&",
"GID_TRANS_FLIPX",
"==",
"GID_TRANS_FLIPX",
",",
"raw_gid",
"&",
"GID_TRANS_FLIPY",
"==",
"GID_TRANS_FLIPY",
",",
"raw_gid",
"&",
"GID_TRANS_ROT",
"==",
"GID_TRANS_ROT",... | Decode a GID from TMX data
as of 0.7.0 it determines if the tile should be flipped when rendered
as of 0.8.0 bit 30 determines if GID is rotated
:param raw_gid: 32-bit number from TMX layer data
:return: gid, flags | [
"Decode",
"a",
"GID",
"from",
"TMX",
"data"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L81-L95 | train | 202,054 |
bitcraft/PyTMX | pytmx/pytmx.py | convert_to_bool | def convert_to_bool(text):
""" Convert a few common variations of "true" and "false" to boolean
:param text: string to test
:return: boolean
:raises: ValueError
"""
# handle "1" and "0"
try:
return bool(int(text))
except:
pass
text = str(text).lower()
if text == "true":
return True
if text == "yes":
return True
if text == "false":
return False
if text == "no":
return False
raise ValueError | python | def convert_to_bool(text):
""" Convert a few common variations of "true" and "false" to boolean
:param text: string to test
:return: boolean
:raises: ValueError
"""
# handle "1" and "0"
try:
return bool(int(text))
except:
pass
text = str(text).lower()
if text == "true":
return True
if text == "yes":
return True
if text == "false":
return False
if text == "no":
return False
raise ValueError | [
"def",
"convert_to_bool",
"(",
"text",
")",
":",
"# handle \"1\" and \"0\"",
"try",
":",
"return",
"bool",
"(",
"int",
"(",
"text",
")",
")",
"except",
":",
"pass",
"text",
"=",
"str",
"(",
"text",
")",
".",
"lower",
"(",
")",
"if",
"text",
"==",
"\"... | Convert a few common variations of "true" and "false" to boolean
:param text: string to test
:return: boolean
:raises: ValueError | [
"Convert",
"a",
"few",
"common",
"variations",
"of",
"true",
"and",
"false",
"to",
"boolean"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L98-L121 | train | 202,055 |
bitcraft/PyTMX | pytmx/pytmx.py | parse_properties | def parse_properties(node):
""" Parse a Tiled xml node and return a dict that represents a tiled "property"
:param node: etree element
:return: dict
"""
d = dict()
for child in node.findall('properties'):
for subnode in child.findall('property'):
cls = None
try:
if "type" in subnode.keys():
module = importlib.import_module('builtins')
cls = getattr(module, subnode.get("type"))
except AttributeError:
logger.info("Type [} Not a built-in type. Defaulting to string-cast.")
d[subnode.get('name')] = cls(subnode.get('value')) if cls is not None else subnode.get('value')
return d | python | def parse_properties(node):
""" Parse a Tiled xml node and return a dict that represents a tiled "property"
:param node: etree element
:return: dict
"""
d = dict()
for child in node.findall('properties'):
for subnode in child.findall('property'):
cls = None
try:
if "type" in subnode.keys():
module = importlib.import_module('builtins')
cls = getattr(module, subnode.get("type"))
except AttributeError:
logger.info("Type [} Not a built-in type. Defaulting to string-cast.")
d[subnode.get('name')] = cls(subnode.get('value')) if cls is not None else subnode.get('value')
return d | [
"def",
"parse_properties",
"(",
"node",
")",
":",
"d",
"=",
"dict",
"(",
")",
"for",
"child",
"in",
"node",
".",
"findall",
"(",
"'properties'",
")",
":",
"for",
"subnode",
"in",
"child",
".",
"findall",
"(",
"'property'",
")",
":",
"cls",
"=",
"None... | Parse a Tiled xml node and return a dict that represents a tiled "property"
:param node: etree element
:return: dict | [
"Parse",
"a",
"Tiled",
"xml",
"node",
"and",
"return",
"a",
"dict",
"that",
"represents",
"a",
"tiled",
"property"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L196-L213 | train | 202,056 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledElement._set_properties | def _set_properties(self, node):
""" Create dict containing Tiled object attributes from xml data
read the xml attributes and tiled "properties" from a xml node and fill
in the values into the object's dictionary. Names will be checked to
make sure that they do not conflict with reserved names.
:param node: etree element
:return: dict
"""
self._cast_and_set_attributes_from_node_items(node.items())
properties = parse_properties(node)
if (not self.allow_duplicate_names and
self._contains_invalid_property_name(properties.items())):
self._log_property_error_message()
raise ValueError("Reserved names and duplicate names are not allowed. Please rename your property inside the .tmx-file")
self.properties = properties | python | def _set_properties(self, node):
""" Create dict containing Tiled object attributes from xml data
read the xml attributes and tiled "properties" from a xml node and fill
in the values into the object's dictionary. Names will be checked to
make sure that they do not conflict with reserved names.
:param node: etree element
:return: dict
"""
self._cast_and_set_attributes_from_node_items(node.items())
properties = parse_properties(node)
if (not self.allow_duplicate_names and
self._contains_invalid_property_name(properties.items())):
self._log_property_error_message()
raise ValueError("Reserved names and duplicate names are not allowed. Please rename your property inside the .tmx-file")
self.properties = properties | [
"def",
"_set_properties",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_cast_and_set_attributes_from_node_items",
"(",
"node",
".",
"items",
"(",
")",
")",
"properties",
"=",
"parse_properties",
"(",
"node",
")",
"if",
"(",
"not",
"self",
".",
"allow_du... | Create dict containing Tiled object attributes from xml data
read the xml attributes and tiled "properties" from a xml node and fill
in the values into the object's dictionary. Names will be checked to
make sure that they do not conflict with reserved names.
:param node: etree element
:return: dict | [
"Create",
"dict",
"containing",
"Tiled",
"object",
"attributes",
"from",
"xml",
"data"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L262-L279 | train | 202,057 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.parse_xml | def parse_xml(self, node):
""" Parse a map from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
self._set_properties(node)
self.background_color = node.get('backgroundcolor',
self.background_color)
# *** do not change this load order! *** #
# *** gid mapping errors will occur if changed *** #
for subnode in node.findall('layer'):
self.add_layer(TiledTileLayer(self, subnode))
for subnode in node.findall('imagelayer'):
self.add_layer(TiledImageLayer(self, subnode))
for subnode in node.findall('objectgroup'):
self.add_layer(TiledObjectGroup(self, subnode))
for subnode in node.findall('tileset'):
self.add_tileset(TiledTileset(self, subnode))
# "tile objects", objects with a GID, have need to have their attributes
# set after the tileset is loaded, so this step must be performed last
# also, this step is performed for objects to load their tiles.
# tiled stores the origin of GID objects by the lower right corner
# this is different for all other types, so i just adjust it here
# so all types loaded with pytmx are uniform.
# iterate through tile objects and handle the image
for o in [o for o in self.objects if o.gid]:
# gids might also have properties assigned to them
# in that case, assign the gid properties to the object as well
p = self.get_tile_properties_by_gid(o.gid)
if p:
for key in p:
o.properties.setdefault(key, p[key])
if self.invert_y:
o.y -= o.height
self.reload_images()
return self | python | def parse_xml(self, node):
""" Parse a map from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
self._set_properties(node)
self.background_color = node.get('backgroundcolor',
self.background_color)
# *** do not change this load order! *** #
# *** gid mapping errors will occur if changed *** #
for subnode in node.findall('layer'):
self.add_layer(TiledTileLayer(self, subnode))
for subnode in node.findall('imagelayer'):
self.add_layer(TiledImageLayer(self, subnode))
for subnode in node.findall('objectgroup'):
self.add_layer(TiledObjectGroup(self, subnode))
for subnode in node.findall('tileset'):
self.add_tileset(TiledTileset(self, subnode))
# "tile objects", objects with a GID, have need to have their attributes
# set after the tileset is loaded, so this step must be performed last
# also, this step is performed for objects to load their tiles.
# tiled stores the origin of GID objects by the lower right corner
# this is different for all other types, so i just adjust it here
# so all types loaded with pytmx are uniform.
# iterate through tile objects and handle the image
for o in [o for o in self.objects if o.gid]:
# gids might also have properties assigned to them
# in that case, assign the gid properties to the object as well
p = self.get_tile_properties_by_gid(o.gid)
if p:
for key in p:
o.properties.setdefault(key, p[key])
if self.invert_y:
o.y -= o.height
self.reload_images()
return self | [
"def",
"parse_xml",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_set_properties",
"(",
"node",
")",
"self",
".",
"background_color",
"=",
"node",
".",
"get",
"(",
"'backgroundcolor'",
",",
"self",
".",
"background_color",
")",
"# *** do not chang... | Parse a map from ElementTree xml node
:param node: ElementTree xml node
:return: self | [
"Parse",
"a",
"map",
"from",
"ElementTree",
"xml",
"node"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L378-L424 | train | 202,058 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.reload_images | def reload_images(self):
""" Load the map images from disk
This method will use the image loader passed in the constructor
to do the loading or will use a generic default, in which case no
images will be loaded.
:return: None
"""
self.images = [None] * self.maxgid
# iterate through tilesets to get source images
for ts in self.tilesets:
# skip tilesets without a source
if ts.source is None:
continue
path = os.path.join(os.path.dirname(self.filename), ts.source)
colorkey = getattr(ts, 'trans', None)
loader = self.image_loader(path, colorkey, tileset=ts)
p = product(range(ts.margin,
ts.height + ts.margin - ts.tileheight + 1,
ts.tileheight + ts.spacing),
range(ts.margin,
ts.width + ts.margin - ts.tilewidth + 1,
ts.tilewidth + ts.spacing))
# iterate through the tiles
for real_gid, (y, x) in enumerate(p, ts.firstgid):
rect = (x, y, ts.tilewidth, ts.tileheight)
gids = self.map_gid(real_gid)
# gids is None if the tile is never used
# but give another chance to load the gid anyway
if gids is None:
if self.load_all_tiles or real_gid in self.optional_gids:
# TODO: handle flags? - might never be an issue, though
gids = [self.register_gid(real_gid, flags=0)]
if gids:
# flags might rotate/flip the image, so let the loader
# handle that here
for gid, flags in gids:
self.images[gid] = loader(rect, flags)
# load image layer images
for layer in (i for i in self.layers if isinstance(i, TiledImageLayer)):
source = getattr(layer, 'source', None)
if source:
colorkey = getattr(layer, 'trans', None)
real_gid = len(self.images)
gid = self.register_gid(real_gid)
layer.gid = gid
path = os.path.join(os.path.dirname(self.filename), source)
loader = self.image_loader(path, colorkey)
image = loader()
self.images.append(image)
# load images in tiles.
# instead of making a new gid, replace the reference to the tile that
# was loaded from the tileset
for real_gid, props in self.tile_properties.items():
source = props.get('source', None)
if source:
colorkey = props.get('trans', None)
path = os.path.join(os.path.dirname(self.filename), source)
loader = self.image_loader(path, colorkey)
image = loader()
self.images[real_gid] = image | python | def reload_images(self):
""" Load the map images from disk
This method will use the image loader passed in the constructor
to do the loading or will use a generic default, in which case no
images will be loaded.
:return: None
"""
self.images = [None] * self.maxgid
# iterate through tilesets to get source images
for ts in self.tilesets:
# skip tilesets without a source
if ts.source is None:
continue
path = os.path.join(os.path.dirname(self.filename), ts.source)
colorkey = getattr(ts, 'trans', None)
loader = self.image_loader(path, colorkey, tileset=ts)
p = product(range(ts.margin,
ts.height + ts.margin - ts.tileheight + 1,
ts.tileheight + ts.spacing),
range(ts.margin,
ts.width + ts.margin - ts.tilewidth + 1,
ts.tilewidth + ts.spacing))
# iterate through the tiles
for real_gid, (y, x) in enumerate(p, ts.firstgid):
rect = (x, y, ts.tilewidth, ts.tileheight)
gids = self.map_gid(real_gid)
# gids is None if the tile is never used
# but give another chance to load the gid anyway
if gids is None:
if self.load_all_tiles or real_gid in self.optional_gids:
# TODO: handle flags? - might never be an issue, though
gids = [self.register_gid(real_gid, flags=0)]
if gids:
# flags might rotate/flip the image, so let the loader
# handle that here
for gid, flags in gids:
self.images[gid] = loader(rect, flags)
# load image layer images
for layer in (i for i in self.layers if isinstance(i, TiledImageLayer)):
source = getattr(layer, 'source', None)
if source:
colorkey = getattr(layer, 'trans', None)
real_gid = len(self.images)
gid = self.register_gid(real_gid)
layer.gid = gid
path = os.path.join(os.path.dirname(self.filename), source)
loader = self.image_loader(path, colorkey)
image = loader()
self.images.append(image)
# load images in tiles.
# instead of making a new gid, replace the reference to the tile that
# was loaded from the tileset
for real_gid, props in self.tile_properties.items():
source = props.get('source', None)
if source:
colorkey = props.get('trans', None)
path = os.path.join(os.path.dirname(self.filename), source)
loader = self.image_loader(path, colorkey)
image = loader()
self.images[real_gid] = image | [
"def",
"reload_images",
"(",
"self",
")",
":",
"self",
".",
"images",
"=",
"[",
"None",
"]",
"*",
"self",
".",
"maxgid",
"# iterate through tilesets to get source images",
"for",
"ts",
"in",
"self",
".",
"tilesets",
":",
"# skip tilesets without a source",
"if",
... | Load the map images from disk
This method will use the image loader passed in the constructor
to do the loading or will use a generic default, in which case no
images will be loaded.
:return: None | [
"Load",
"the",
"map",
"images",
"from",
"disk"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L426-L496 | train | 202,059 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.get_tile_locations_by_gid | def get_tile_locations_by_gid(self, gid):
""" Search map for tile locations by the GID
Return (int, int, int) tuples, where the layer is index of
the visible tile layers.
Note: Not a fast operation. Cache results if used often.
:param gid: GID to be searched for
:rtype: generator of tile locations
"""
for l in self.visible_tile_layers:
for x, y, _gid in [i for i in self.layers[l].iter_data() if i[2] == gid]:
yield x, y, l | python | def get_tile_locations_by_gid(self, gid):
""" Search map for tile locations by the GID
Return (int, int, int) tuples, where the layer is index of
the visible tile layers.
Note: Not a fast operation. Cache results if used often.
:param gid: GID to be searched for
:rtype: generator of tile locations
"""
for l in self.visible_tile_layers:
for x, y, _gid in [i for i in self.layers[l].iter_data() if i[2] == gid]:
yield x, y, l | [
"def",
"get_tile_locations_by_gid",
"(",
"self",
",",
"gid",
")",
":",
"for",
"l",
"in",
"self",
".",
"visible_tile_layers",
":",
"for",
"x",
",",
"y",
",",
"_gid",
"in",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"layers",
"[",
"l",
"]",
".",
"iter_... | Search map for tile locations by the GID
Return (int, int, int) tuples, where the layer is index of
the visible tile layers.
Note: Not a fast operation. Cache results if used often.
:param gid: GID to be searched for
:rtype: generator of tile locations | [
"Search",
"map",
"for",
"tile",
"locations",
"by",
"the",
"GID"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L598-L611 | train | 202,060 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.get_tile_properties_by_layer | def get_tile_properties_by_layer(self, layer):
""" Get the tile properties of each GID in layer
:param layer: layer number
:rtype: iterator of (gid, properties) tuples
"""
try:
assert (int(layer) >= 0)
layer = int(layer)
except (TypeError, AssertionError):
msg = "Layer must be a positive integer. Got {0} instead."
logger.debug(msg.format(type(layer)))
raise ValueError
p = product(range(self.width), range(self.height))
layergids = set(self.layers[layer].data[y][x] for x, y in p)
for gid in layergids:
try:
yield gid, self.tile_properties[gid]
except KeyError:
continue | python | def get_tile_properties_by_layer(self, layer):
""" Get the tile properties of each GID in layer
:param layer: layer number
:rtype: iterator of (gid, properties) tuples
"""
try:
assert (int(layer) >= 0)
layer = int(layer)
except (TypeError, AssertionError):
msg = "Layer must be a positive integer. Got {0} instead."
logger.debug(msg.format(type(layer)))
raise ValueError
p = product(range(self.width), range(self.height))
layergids = set(self.layers[layer].data[y][x] for x, y in p)
for gid in layergids:
try:
yield gid, self.tile_properties[gid]
except KeyError:
continue | [
"def",
"get_tile_properties_by_layer",
"(",
"self",
",",
"layer",
")",
":",
"try",
":",
"assert",
"(",
"int",
"(",
"layer",
")",
">=",
"0",
")",
"layer",
"=",
"int",
"(",
"layer",
")",
"except",
"(",
"TypeError",
",",
"AssertionError",
")",
":",
"msg",... | Get the tile properties of each GID in layer
:param layer: layer number
:rtype: iterator of (gid, properties) tuples | [
"Get",
"the",
"tile",
"properties",
"of",
"each",
"GID",
"in",
"layer"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L632-L653 | train | 202,061 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.add_tileset | def add_tileset(self, tileset):
""" Add a tileset to the map
:param tileset: TiledTileset
"""
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) | python | def add_tileset(self, tileset):
""" Add a tileset to the map
:param tileset: TiledTileset
"""
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) | [
"def",
"add_tileset",
"(",
"self",
",",
"tileset",
")",
":",
"assert",
"(",
"isinstance",
"(",
"tileset",
",",
"TiledTileset",
")",
")",
"self",
".",
"tilesets",
".",
"append",
"(",
"tileset",
")"
] | Add a tileset to the map
:param tileset: TiledTileset | [
"Add",
"a",
"tileset",
"to",
"the",
"map"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L667-L673 | train | 202,062 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.get_layer_by_name | def get_layer_by_name(self, name):
"""Return a layer by name
:param name: Name of layer. Case-sensitive.
:rtype: Layer object if found, otherwise ValueError
"""
try:
return self.layernames[name]
except KeyError:
msg = 'Layer "{0}" not found.'
logger.debug(msg.format(name))
raise ValueError | python | def get_layer_by_name(self, name):
"""Return a layer by name
:param name: Name of layer. Case-sensitive.
:rtype: Layer object if found, otherwise ValueError
"""
try:
return self.layernames[name]
except KeyError:
msg = 'Layer "{0}" not found.'
logger.debug(msg.format(name))
raise ValueError | [
"def",
"get_layer_by_name",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"self",
".",
"layernames",
"[",
"name",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"'Layer \"{0}\" not found.'",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
... | Return a layer by name
:param name: Name of layer. Case-sensitive.
:rtype: Layer object if found, otherwise ValueError | [
"Return",
"a",
"layer",
"by",
"name"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L675-L686 | train | 202,063 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.get_object_by_name | def get_object_by_name(self, name):
"""Find an object
:param name: Name of object. Case-sensitive.
:rtype: Object if found, otherwise ValueError
"""
for obj in self.objects:
if obj.name == name:
return obj
raise ValueError | python | def get_object_by_name(self, name):
"""Find an object
:param name: Name of object. Case-sensitive.
:rtype: Object if found, otherwise ValueError
"""
for obj in self.objects:
if obj.name == name:
return obj
raise ValueError | [
"def",
"get_object_by_name",
"(",
"self",
",",
"name",
")",
":",
"for",
"obj",
"in",
"self",
".",
"objects",
":",
"if",
"obj",
".",
"name",
"==",
"name",
":",
"return",
"obj",
"raise",
"ValueError"
] | Find an object
:param name: Name of object. Case-sensitive.
:rtype: Object if found, otherwise ValueError | [
"Find",
"an",
"object"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L688-L697 | train | 202,064 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.get_tileset_from_gid | def get_tileset_from_gid(self, gid):
""" Return tileset that owns the gid
Note: this is a slow operation, so if you are expecting to do this
often, it would be worthwhile to cache the results of this.
:param gid: gid of tile image
:rtype: TiledTileset if found, otherwise ValueError
"""
try:
tiled_gid = self.tiledgidmap[gid]
except KeyError:
raise ValueError
for tileset in sorted(self.tilesets, key=attrgetter('firstgid'),
reverse=True):
if tiled_gid >= tileset.firstgid:
return tileset
raise ValueError | python | def get_tileset_from_gid(self, gid):
""" Return tileset that owns the gid
Note: this is a slow operation, so if you are expecting to do this
often, it would be worthwhile to cache the results of this.
:param gid: gid of tile image
:rtype: TiledTileset if found, otherwise ValueError
"""
try:
tiled_gid = self.tiledgidmap[gid]
except KeyError:
raise ValueError
for tileset in sorted(self.tilesets, key=attrgetter('firstgid'),
reverse=True):
if tiled_gid >= tileset.firstgid:
return tileset
raise ValueError | [
"def",
"get_tileset_from_gid",
"(",
"self",
",",
"gid",
")",
":",
"try",
":",
"tiled_gid",
"=",
"self",
".",
"tiledgidmap",
"[",
"gid",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"for",
"tileset",
"in",
"sorted",
"(",
"self",
".",
"tilesets",
... | Return tileset that owns the gid
Note: this is a slow operation, so if you are expecting to do this
often, it would be worthwhile to cache the results of this.
:param gid: gid of tile image
:rtype: TiledTileset if found, otherwise ValueError | [
"Return",
"tileset",
"that",
"owns",
"the",
"gid"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L699-L718 | train | 202,065 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.visible_tile_layers | def visible_tile_layers(self):
"""Return iterator of layer indexes that are set 'visible'
:rtype: Iterator
"""
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledTileLayer)) | python | def visible_tile_layers(self):
"""Return iterator of layer indexes that are set 'visible'
:rtype: Iterator
"""
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledTileLayer)) | [
"def",
"visible_tile_layers",
"(",
"self",
")",
":",
"return",
"(",
"i",
"for",
"(",
"i",
",",
"l",
")",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
"if",
"l",
".",
"visible",
"and",
"isinstance",
"(",
"l",
",",
"TiledTileLayer",
")",
")"
] | Return iterator of layer indexes that are set 'visible'
:rtype: Iterator | [
"Return",
"iterator",
"of",
"layer",
"indexes",
"that",
"are",
"set",
"visible"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L746-L752 | train | 202,066 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.visible_object_groups | def visible_object_groups(self):
"""Return iterator of object group indexes that are set 'visible'
:rtype: Iterator
"""
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledObjectGroup)) | python | def visible_object_groups(self):
"""Return iterator of object group indexes that are set 'visible'
:rtype: Iterator
"""
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledObjectGroup)) | [
"def",
"visible_object_groups",
"(",
"self",
")",
":",
"return",
"(",
"i",
"for",
"(",
"i",
",",
"l",
")",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
"if",
"l",
".",
"visible",
"and",
"isinstance",
"(",
"l",
",",
"TiledObjectGroup",
")",
")"... | Return iterator of object group indexes that are set 'visible'
:rtype: Iterator | [
"Return",
"iterator",
"of",
"object",
"group",
"indexes",
"that",
"are",
"set",
"visible"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L755-L761 | train | 202,067 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.register_gid | def register_gid(self, tiled_gid, flags=None):
""" Used to manage the mapping of GIDs between the tmx and pytmx
:param tiled_gid: GID that is found in TMX data
:rtype: GID that pytmx uses for the the GID passed
"""
if flags is None:
flags = TileFlags(0, 0, 0)
if tiled_gid:
try:
return self.imagemap[(tiled_gid, flags)][0]
except KeyError:
gid = self.maxgid
self.maxgid += 1
self.imagemap[(tiled_gid, flags)] = (gid, flags)
self.gidmap[tiled_gid].append((gid, flags))
self.tiledgidmap[gid] = tiled_gid
return gid
else:
return 0 | python | def register_gid(self, tiled_gid, flags=None):
""" Used to manage the mapping of GIDs between the tmx and pytmx
:param tiled_gid: GID that is found in TMX data
:rtype: GID that pytmx uses for the the GID passed
"""
if flags is None:
flags = TileFlags(0, 0, 0)
if tiled_gid:
try:
return self.imagemap[(tiled_gid, flags)][0]
except KeyError:
gid = self.maxgid
self.maxgid += 1
self.imagemap[(tiled_gid, flags)] = (gid, flags)
self.gidmap[tiled_gid].append((gid, flags))
self.tiledgidmap[gid] = tiled_gid
return gid
else:
return 0 | [
"def",
"register_gid",
"(",
"self",
",",
"tiled_gid",
",",
"flags",
"=",
"None",
")",
":",
"if",
"flags",
"is",
"None",
":",
"flags",
"=",
"TileFlags",
"(",
"0",
",",
"0",
",",
"0",
")",
"if",
"tiled_gid",
":",
"try",
":",
"return",
"self",
".",
... | Used to manage the mapping of GIDs between the tmx and pytmx
:param tiled_gid: GID that is found in TMX data
:rtype: GID that pytmx uses for the the GID passed | [
"Used",
"to",
"manage",
"the",
"mapping",
"of",
"GIDs",
"between",
"the",
"tmx",
"and",
"pytmx"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L763-L784 | train | 202,068 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.map_gid | def map_gid(self, tiled_gid):
""" Used to lookup a GID read from a TMX file's data
:param tiled_gid: GID that is found in TMX data
:rtype: (GID, flags) for the the GID passed, None if not found
"""
try:
return self.gidmap[int(tiled_gid)]
except KeyError:
return None
except TypeError:
msg = "GIDs must be an integer"
logger.debug(msg)
raise TypeError | python | def map_gid(self, tiled_gid):
""" Used to lookup a GID read from a TMX file's data
:param tiled_gid: GID that is found in TMX data
:rtype: (GID, flags) for the the GID passed, None if not found
"""
try:
return self.gidmap[int(tiled_gid)]
except KeyError:
return None
except TypeError:
msg = "GIDs must be an integer"
logger.debug(msg)
raise TypeError | [
"def",
"map_gid",
"(",
"self",
",",
"tiled_gid",
")",
":",
"try",
":",
"return",
"self",
".",
"gidmap",
"[",
"int",
"(",
"tiled_gid",
")",
"]",
"except",
"KeyError",
":",
"return",
"None",
"except",
"TypeError",
":",
"msg",
"=",
"\"GIDs must be an integer\... | Used to lookup a GID read from a TMX file's data
:param tiled_gid: GID that is found in TMX data
:rtype: (GID, flags) for the the GID passed, None if not found | [
"Used",
"to",
"lookup",
"a",
"GID",
"read",
"from",
"a",
"TMX",
"file",
"s",
"data"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L786-L799 | train | 202,069 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.map_gid2 | def map_gid2(self, tiled_gid):
""" WIP. need to refactor the gid code
:param tiled_gid:
:return:
"""
tiled_gid = int(tiled_gid)
# gidmap is a default dict, so cannot trust to raise KeyError
if tiled_gid in self.gidmap:
return self.gidmap[tiled_gid]
else:
gid = self.register_gid(tiled_gid)
return [(gid, None)] | python | def map_gid2(self, tiled_gid):
""" WIP. need to refactor the gid code
:param tiled_gid:
:return:
"""
tiled_gid = int(tiled_gid)
# gidmap is a default dict, so cannot trust to raise KeyError
if tiled_gid in self.gidmap:
return self.gidmap[tiled_gid]
else:
gid = self.register_gid(tiled_gid)
return [(gid, None)] | [
"def",
"map_gid2",
"(",
"self",
",",
"tiled_gid",
")",
":",
"tiled_gid",
"=",
"int",
"(",
"tiled_gid",
")",
"# gidmap is a default dict, so cannot trust to raise KeyError",
"if",
"tiled_gid",
"in",
"self",
".",
"gidmap",
":",
"return",
"self",
".",
"gidmap",
"[",
... | WIP. need to refactor the gid code
:param tiled_gid:
:return: | [
"WIP",
".",
"need",
"to",
"refactor",
"the",
"gid",
"code"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L801-L814 | train | 202,070 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledTileLayer.iter_data | def iter_data(self):
""" Iterate over layer data
Yields X, Y, GID tuples for each tile in the layer
:return: Generator
"""
for y, row in enumerate(self.data):
for x, gid in enumerate(row):
yield x, y, gid | python | def iter_data(self):
""" Iterate over layer data
Yields X, Y, GID tuples for each tile in the layer
:return: Generator
"""
for y, row in enumerate(self.data):
for x, gid in enumerate(row):
yield x, y, gid | [
"def",
"iter_data",
"(",
"self",
")",
":",
"for",
"y",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"for",
"x",
",",
"gid",
"in",
"enumerate",
"(",
"row",
")",
":",
"yield",
"x",
",",
"y",
",",
"gid"
] | Iterate over layer data
Yields X, Y, GID tuples for each tile in the layer
:return: Generator | [
"Iterate",
"over",
"layer",
"data"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L971-L980 | train | 202,071 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledTileLayer.tiles | def tiles(self):
""" Iterate over tile images of this layer
This is an optimised generator function that returns
(tile_x, tile_y, tile_image) tuples,
:rtype: Generator
:return: (x, y, image) tuples
"""
images = self.parent.images
for x, y, gid in [i for i in self.iter_data() if i[2]]:
yield x, y, images[gid] | python | def tiles(self):
""" Iterate over tile images of this layer
This is an optimised generator function that returns
(tile_x, tile_y, tile_image) tuples,
:rtype: Generator
:return: (x, y, image) tuples
"""
images = self.parent.images
for x, y, gid in [i for i in self.iter_data() if i[2]]:
yield x, y, images[gid] | [
"def",
"tiles",
"(",
"self",
")",
":",
"images",
"=",
"self",
".",
"parent",
".",
"images",
"for",
"x",
",",
"y",
",",
"gid",
"in",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"iter_data",
"(",
")",
"if",
"i",
"[",
"2",
"]",
"]",
":",
"yield",
... | Iterate over tile images of this layer
This is an optimised generator function that returns
(tile_x, tile_y, tile_image) tuples,
:rtype: Generator
:return: (x, y, image) tuples | [
"Iterate",
"over",
"tile",
"images",
"of",
"this",
"layer"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L982-L993 | train | 202,072 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledTileLayer.parse_xml | def parse_xml(self, node):
""" Parse a Tile Layer from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
import struct
import array
self._set_properties(node)
data = None
next_gid = None
data_node = node.find('data')
encoding = data_node.get('encoding', None)
if encoding == 'base64':
from base64 import b64decode
data = b64decode(data_node.text.strip())
elif encoding == 'csv':
next_gid = map(int, "".join(
line.strip() for line in data_node.text.strip()).split(","))
elif encoding:
msg = 'TMX encoding type: {0} is not supported.'
logger.error(msg.format(encoding))
raise Exception
compression = data_node.get('compression', None)
if compression == 'gzip':
import gzip
with gzip.GzipFile(fileobj=six.BytesIO(data)) as fh:
data = fh.read()
elif compression == 'zlib':
import zlib
data = zlib.decompress(data)
elif compression:
msg = 'TMX compression type: {0} is not supported.'
logger.error(msg.format(compression))
raise Exception
# if data is None, then it was not decoded or decompressed, so
# we assume here that it is going to be a bunch of tile elements
# TODO: this will/should raise an exception if there are no tiles
if encoding == next_gid is None:
def get_children(parent):
for child in parent.findall('tile'):
yield int(child.get('gid'))
next_gid = get_children(data_node)
elif data:
if type(data) == bytes:
fmt = struct.Struct('<L')
iterator = (data[i:i + 4] for i in range(0, len(data), 4))
next_gid = (fmt.unpack(i)[0] for i in iterator)
else:
msg = 'layer data not in expected format ({})'
logger.error(msg.format(type(data)))
raise Exception
init = lambda: [0] * self.width
reg = self.parent.register_gid
# H (16-bit) may be a limitation for very detailed maps
self.data = tuple(array.array('H', init()) for i in range(self.height))
for (y, x) in product(range(self.height), range(self.width)):
self.data[y][x] = reg(*decode_gid(next(next_gid)))
return self | python | def parse_xml(self, node):
""" Parse a Tile Layer from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
import struct
import array
self._set_properties(node)
data = None
next_gid = None
data_node = node.find('data')
encoding = data_node.get('encoding', None)
if encoding == 'base64':
from base64 import b64decode
data = b64decode(data_node.text.strip())
elif encoding == 'csv':
next_gid = map(int, "".join(
line.strip() for line in data_node.text.strip()).split(","))
elif encoding:
msg = 'TMX encoding type: {0} is not supported.'
logger.error(msg.format(encoding))
raise Exception
compression = data_node.get('compression', None)
if compression == 'gzip':
import gzip
with gzip.GzipFile(fileobj=six.BytesIO(data)) as fh:
data = fh.read()
elif compression == 'zlib':
import zlib
data = zlib.decompress(data)
elif compression:
msg = 'TMX compression type: {0} is not supported.'
logger.error(msg.format(compression))
raise Exception
# if data is None, then it was not decoded or decompressed, so
# we assume here that it is going to be a bunch of tile elements
# TODO: this will/should raise an exception if there are no tiles
if encoding == next_gid is None:
def get_children(parent):
for child in parent.findall('tile'):
yield int(child.get('gid'))
next_gid = get_children(data_node)
elif data:
if type(data) == bytes:
fmt = struct.Struct('<L')
iterator = (data[i:i + 4] for i in range(0, len(data), 4))
next_gid = (fmt.unpack(i)[0] for i in iterator)
else:
msg = 'layer data not in expected format ({})'
logger.error(msg.format(type(data)))
raise Exception
init = lambda: [0] * self.width
reg = self.parent.register_gid
# H (16-bit) may be a limitation for very detailed maps
self.data = tuple(array.array('H', init()) for i in range(self.height))
for (y, x) in product(range(self.height), range(self.width)):
self.data[y][x] = reg(*decode_gid(next(next_gid)))
return self | [
"def",
"parse_xml",
"(",
"self",
",",
"node",
")",
":",
"import",
"struct",
"import",
"array",
"self",
".",
"_set_properties",
"(",
"node",
")",
"data",
"=",
"None",
"next_gid",
"=",
"None",
"data_node",
"=",
"node",
".",
"find",
"(",
"'data'",
")",
"e... | Parse a Tile Layer from ElementTree xml node
:param node: ElementTree xml node
:return: self | [
"Parse",
"a",
"Tile",
"Layer",
"from",
"ElementTree",
"xml",
"node"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1004-L1078 | train | 202,073 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledObjectGroup.parse_xml | def parse_xml(self, node):
""" Parse an Object Group from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
self._set_properties(node)
self.extend(TiledObject(self.parent, child)
for child in node.findall('object'))
return self | python | def parse_xml(self, node):
""" Parse an Object Group from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
self._set_properties(node)
self.extend(TiledObject(self.parent, child)
for child in node.findall('object'))
return self | [
"def",
"parse_xml",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_set_properties",
"(",
"node",
")",
"self",
".",
"extend",
"(",
"TiledObject",
"(",
"self",
".",
"parent",
",",
"child",
")",
"for",
"child",
"in",
"node",
".",
"findall",
"(",
"'o... | Parse an Object Group from ElementTree xml node
:param node: ElementTree xml node
:return: self | [
"Parse",
"an",
"Object",
"Group",
"from",
"ElementTree",
"xml",
"node"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1102-L1112 | train | 202,074 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledObject.parse_xml | def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(float, i.split(','))) for i in text.split())
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid)
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False
if points:
x1 = x2 = y1 = y2 = 0
for x, y in points:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple(
[(i[0] + self.x, i[1] + self.y) for i in points])
return self | python | def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(float, i.split(','))) for i in text.split())
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid)
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False
if points:
x1 = x2 = y1 = y2 = 0
for x, y in points:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple(
[(i[0] + self.x, i[1] + self.y) for i in points])
return self | [
"def",
"parse_xml",
"(",
"self",
",",
"node",
")",
":",
"def",
"read_points",
"(",
"text",
")",
":",
"\"\"\"parse a text string of float tuples and return [(x,...),...]\n \"\"\"",
"return",
"tuple",
"(",
"tuple",
"(",
"map",
"(",
"float",
",",
"i",
".",
... | Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self | [
"Parse",
"an",
"Object",
"from",
"ElementTree",
"xml",
"node"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1146-L1187 | train | 202,075 |
bitcraft/PyTMX | pytmx/pytmx.py | TiledImageLayer.parse_xml | def parse_xml(self, node):
""" Parse an Image Layer from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
self._set_properties(node)
self.name = node.get('name', None)
self.opacity = node.get('opacity', self.opacity)
self.visible = node.get('visible', self.visible)
image_node = node.find('image')
self.source = image_node.get('source', None)
self.trans = image_node.get('trans', None)
return self | python | def parse_xml(self, node):
""" Parse an Image Layer from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
self._set_properties(node)
self.name = node.get('name', None)
self.opacity = node.get('opacity', self.opacity)
self.visible = node.get('visible', self.visible)
image_node = node.find('image')
self.source = image_node.get('source', None)
self.trans = image_node.get('trans', None)
return self | [
"def",
"parse_xml",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_set_properties",
"(",
"node",
")",
"self",
".",
"name",
"=",
"node",
".",
"get",
"(",
"'name'",
",",
"None",
")",
"self",
".",
"opacity",
"=",
"node",
".",
"get",
"(",
"'opacity... | Parse an Image Layer from ElementTree xml node
:param node: ElementTree xml node
:return: self | [
"Parse",
"an",
"Image",
"Layer",
"from",
"ElementTree",
"xml",
"node"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1216-L1229 | train | 202,076 |
bitcraft/PyTMX | pytmx/util_pygame.py | smart_convert | def smart_convert(original, colorkey, pixelalpha):
"""
this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own
"""
tile_size = original.get_size()
threshold = 127 # the default
try:
# count the number of pixels in the tile that are not transparent
px = pygame.mask.from_surface(original, threshold).count()
except:
# pygame_sdl2 will fail because the mask module is not included
# in this case, just convert_alpha and return it
return original.convert_alpha()
# there are no transparent pixels in the image
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
# there are transparent pixels, and tiled set a colorkey
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
# there are transparent pixels, and set for perpixel alpha
elif pixelalpha:
tile = original.convert_alpha()
# there are transparent pixels, and we won't handle them
else:
tile = original.convert()
return tile | python | def smart_convert(original, colorkey, pixelalpha):
"""
this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own
"""
tile_size = original.get_size()
threshold = 127 # the default
try:
# count the number of pixels in the tile that are not transparent
px = pygame.mask.from_surface(original, threshold).count()
except:
# pygame_sdl2 will fail because the mask module is not included
# in this case, just convert_alpha and return it
return original.convert_alpha()
# there are no transparent pixels in the image
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
# there are transparent pixels, and tiled set a colorkey
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
# there are transparent pixels, and set for perpixel alpha
elif pixelalpha:
tile = original.convert_alpha()
# there are transparent pixels, and we won't handle them
else:
tile = original.convert()
return tile | [
"def",
"smart_convert",
"(",
"original",
",",
"colorkey",
",",
"pixelalpha",
")",
":",
"tile_size",
"=",
"original",
".",
"get_size",
"(",
")",
"threshold",
"=",
"127",
"# the default",
"try",
":",
"# count the number of pixels in the tile that are not transparent",
"... | this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own | [
"this",
"method",
"does",
"several",
"tests",
"on",
"a",
"surface",
"to",
"determine",
"the",
"optimal",
"flags",
"and",
"pixel",
"format",
"for",
"each",
"tile",
"surface",
"."
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/util_pygame.py#L49-L85 | train | 202,077 |
bitcraft/PyTMX | pytmx/util_pygame.py | pygame_image_loader | def pygame_image_loader(filename, colorkey, **kwargs):
""" pytmx image loader for pygame
:param filename:
:param colorkey:
:param kwargs:
:return:
"""
if colorkey:
colorkey = pygame.Color('#{0}'.format(colorkey))
pixelalpha = kwargs.get('pixelalpha', True)
image = pygame.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
tile = image.subsurface(rect)
except ValueError:
logger.error('Tile bounds outside bounds of tileset image')
raise
else:
tile = image.copy()
if flags:
tile = handle_transformation(tile, flags)
tile = smart_convert(tile, colorkey, pixelalpha)
return tile
return load_image | python | def pygame_image_loader(filename, colorkey, **kwargs):
""" pytmx image loader for pygame
:param filename:
:param colorkey:
:param kwargs:
:return:
"""
if colorkey:
colorkey = pygame.Color('#{0}'.format(colorkey))
pixelalpha = kwargs.get('pixelalpha', True)
image = pygame.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
tile = image.subsurface(rect)
except ValueError:
logger.error('Tile bounds outside bounds of tileset image')
raise
else:
tile = image.copy()
if flags:
tile = handle_transformation(tile, flags)
tile = smart_convert(tile, colorkey, pixelalpha)
return tile
return load_image | [
"def",
"pygame_image_loader",
"(",
"filename",
",",
"colorkey",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"colorkey",
":",
"colorkey",
"=",
"pygame",
".",
"Color",
"(",
"'#{0}'",
".",
"format",
"(",
"colorkey",
")",
")",
"pixelalpha",
"=",
"kwargs",
".",
... | pytmx image loader for pygame
:param filename:
:param colorkey:
:param kwargs:
:return: | [
"pytmx",
"image",
"loader",
"for",
"pygame"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/util_pygame.py#L88-L118 | train | 202,078 |
bitcraft/PyTMX | pytmx/util_pygame.py | load_pygame | def load_pygame(filename, *args, **kwargs):
""" Load a TMX file, images, and return a TiledMap class
PYGAME USERS: Use me.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you.
"""
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs) | python | def load_pygame(filename, *args, **kwargs):
""" Load a TMX file, images, and return a TiledMap class
PYGAME USERS: Use me.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you.
"""
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs) | [
"def",
"load_pygame",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'image_loader'",
"]",
"=",
"pygame_image_loader",
"return",
"pytmx",
".",
"TiledMap",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
... | Load a TMX file, images, and return a TiledMap class
PYGAME USERS: Use me.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you. | [
"Load",
"a",
"TMX",
"file",
"images",
"and",
"return",
"a",
"TiledMap",
"class"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/util_pygame.py#L121-L141 | train | 202,079 |
bitcraft/PyTMX | pytmx/util_pygame.py | build_rects | def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects | python | def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects | [
"def",
"build_rects",
"(",
"tmxmap",
",",
"layer",
",",
"tileset",
"=",
"None",
",",
"real_gid",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"tileset",
",",
"int",
")",
":",
"try",
":",
"tileset",
"=",
"tmxmap",
".",
"tilesets",
"[",
"tileset",
"... | generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects | [
"generate",
"a",
"set",
"of",
"non",
"-",
"overlapping",
"rects",
"that",
"represents",
"the",
"distribution",
"of",
"the",
"specified",
"gid",
"."
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/util_pygame.py#L144-L208 | train | 202,080 |
bitcraft/PyTMX | pytmx/util_pyglet.py | pyglet_image_loader | def pyglet_image_loader(filename, colorkey, **kwargs):
"""basic image loading with pyglet
returns pyglet Images, not textures
This is a basic proof-of-concept and is likely to fail in some situations.
Missing:
Transparency
Tile Rotation
This is slow as well.
"""
if colorkey:
logger.debug('colorkey not implemented')
image = pyglet.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
x, y, w, h = rect
y = image.height - y - h
tile = image.get_region(x, y, w, h)
except:
logger.error('cannot get region %s of image', rect)
raise
else:
tile = image
if flags:
logger.error('tile flags are not implemented')
return tile
return load_image | python | def pyglet_image_loader(filename, colorkey, **kwargs):
"""basic image loading with pyglet
returns pyglet Images, not textures
This is a basic proof-of-concept and is likely to fail in some situations.
Missing:
Transparency
Tile Rotation
This is slow as well.
"""
if colorkey:
logger.debug('colorkey not implemented')
image = pyglet.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
x, y, w, h = rect
y = image.height - y - h
tile = image.get_region(x, y, w, h)
except:
logger.error('cannot get region %s of image', rect)
raise
else:
tile = image
if flags:
logger.error('tile flags are not implemented')
return tile
return load_image | [
"def",
"pyglet_image_loader",
"(",
"filename",
",",
"colorkey",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"colorkey",
":",
"logger",
".",
"debug",
"(",
"'colorkey not implemented'",
")",
"image",
"=",
"pyglet",
".",
"image",
".",
"load",
"(",
"filename",
")... | basic image loading with pyglet
returns pyglet Images, not textures
This is a basic proof-of-concept and is likely to fail in some situations.
Missing:
Transparency
Tile Rotation
This is slow as well. | [
"basic",
"image",
"loading",
"with",
"pyglet"
] | 3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9 | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/util_pyglet.py#L37-L72 | train | 202,081 |
wind-python/windpowerlib | windpowerlib/wake_losses.py | reduce_wind_speed | def reduce_wind_speed(wind_speed, wind_efficiency_curve_name='dena_mean'):
r"""
Reduces wind speed by a wind efficiency curve.
The wind efficiency curves are provided in the windpowerlib and were
calculated in the dena-Netzstudie II and in the work of Knorr
(see [1]_ and [2]_).
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_efficiency_curve_name : string
Name of the wind efficiency curve. Use
:py:func:`~.get_wind_efficiency_curve` to get all provided wind
efficiency curves. Default: 'dena_mean'.
Returns
-------
reduced_wind_speed : pd.Series or np.array
`wind_speed` reduced by wind efficiency curve.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
"""
# Get wind efficiency curve
wind_efficiency_curve = get_wind_efficiency_curve(
curve_name=wind_efficiency_curve_name)
# Reduce wind speed by wind efficiency
reduced_wind_speed = wind_speed * np.interp(
wind_speed, wind_efficiency_curve['wind_speed'],
wind_efficiency_curve['efficiency'])
return reduced_wind_speed | python | def reduce_wind_speed(wind_speed, wind_efficiency_curve_name='dena_mean'):
r"""
Reduces wind speed by a wind efficiency curve.
The wind efficiency curves are provided in the windpowerlib and were
calculated in the dena-Netzstudie II and in the work of Knorr
(see [1]_ and [2]_).
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_efficiency_curve_name : string
Name of the wind efficiency curve. Use
:py:func:`~.get_wind_efficiency_curve` to get all provided wind
efficiency curves. Default: 'dena_mean'.
Returns
-------
reduced_wind_speed : pd.Series or np.array
`wind_speed` reduced by wind efficiency curve.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
"""
# Get wind efficiency curve
wind_efficiency_curve = get_wind_efficiency_curve(
curve_name=wind_efficiency_curve_name)
# Reduce wind speed by wind efficiency
reduced_wind_speed = wind_speed * np.interp(
wind_speed, wind_efficiency_curve['wind_speed'],
wind_efficiency_curve['efficiency'])
return reduced_wind_speed | [
"def",
"reduce_wind_speed",
"(",
"wind_speed",
",",
"wind_efficiency_curve_name",
"=",
"'dena_mean'",
")",
":",
"# Get wind efficiency curve",
"wind_efficiency_curve",
"=",
"get_wind_efficiency_curve",
"(",
"curve_name",
"=",
"wind_efficiency_curve_name",
")",
"# Reduce wind sp... | r"""
Reduces wind speed by a wind efficiency curve.
The wind efficiency curves are provided in the windpowerlib and were
calculated in the dena-Netzstudie II and in the work of Knorr
(see [1]_ and [2]_).
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_efficiency_curve_name : string
Name of the wind efficiency curve. Use
:py:func:`~.get_wind_efficiency_curve` to get all provided wind
efficiency curves. Default: 'dena_mean'.
Returns
-------
reduced_wind_speed : pd.Series or np.array
`wind_speed` reduced by wind efficiency curve.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124 | [
"r",
"Reduces",
"wind",
"speed",
"by",
"a",
"wind",
"efficiency",
"curve",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wake_losses.py#L15-L56 | train | 202,082 |
wind-python/windpowerlib | example/modelchain_example.py | get_weather_data | def get_weather_data(filename='weather.csv', **kwargs):
r"""
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The file is located in the example folder of the
windpowerlib. The height in m for which the data applies is specified in
the second row.
Parameters
----------
filename : string
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : string, optional
Path where the weather data file is stored.
Default: 'windpowerlib/example'.
Returns
-------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m).
"""
if 'datapath' not in kwargs:
kwargs['datapath'] = os.path.join(os.path.split(
os.path.dirname(__file__))[0], 'example')
file = os.path.join(kwargs['datapath'], filename)
# read csv file
weather_df = pd.read_csv(
file, index_col=0, header=[0, 1],
date_parser=lambda idx: pd.to_datetime(idx, utc=True))
# change type of index to datetime and set time zone
weather_df.index = pd.to_datetime(weather_df.index).tz_convert(
'Europe/Berlin')
# change type of height from str to int by resetting columns
weather_df.columns = [weather_df.axes[1].levels[0][
weather_df.axes[1].codes[0]],
weather_df.axes[1].levels[1][
weather_df.axes[1].codes[1]].astype(int)]
return weather_df | python | def get_weather_data(filename='weather.csv', **kwargs):
r"""
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The file is located in the example folder of the
windpowerlib. The height in m for which the data applies is specified in
the second row.
Parameters
----------
filename : string
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : string, optional
Path where the weather data file is stored.
Default: 'windpowerlib/example'.
Returns
-------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m).
"""
if 'datapath' not in kwargs:
kwargs['datapath'] = os.path.join(os.path.split(
os.path.dirname(__file__))[0], 'example')
file = os.path.join(kwargs['datapath'], filename)
# read csv file
weather_df = pd.read_csv(
file, index_col=0, header=[0, 1],
date_parser=lambda idx: pd.to_datetime(idx, utc=True))
# change type of index to datetime and set time zone
weather_df.index = pd.to_datetime(weather_df.index).tz_convert(
'Europe/Berlin')
# change type of height from str to int by resetting columns
weather_df.columns = [weather_df.axes[1].levels[0][
weather_df.axes[1].codes[0]],
weather_df.axes[1].levels[1][
weather_df.axes[1].codes[1]].astype(int)]
return weather_df | [
"def",
"get_weather_data",
"(",
"filename",
"=",
"'weather.csv'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'datapath'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'datapath'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"... | r"""
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The file is located in the example folder of the
windpowerlib. The height in m for which the data applies is specified in
the second row.
Parameters
----------
filename : string
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : string, optional
Path where the weather data file is stored.
Default: 'windpowerlib/example'.
Returns
-------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m). | [
"r",
"Imports",
"weather",
"data",
"from",
"a",
"file",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/example/modelchain_example.py#L36-L86 | train | 202,083 |
wind-python/windpowerlib | example/modelchain_example.py | run_example | def run_example():
r"""
Runs the basic example.
"""
weather = get_weather_data('weather.csv')
my_turbine, e126, dummy_turbine = initialize_wind_turbines()
calculate_power_output(weather, my_turbine, e126, dummy_turbine)
plot_or_print(my_turbine, e126, dummy_turbine) | python | def run_example():
r"""
Runs the basic example.
"""
weather = get_weather_data('weather.csv')
my_turbine, e126, dummy_turbine = initialize_wind_turbines()
calculate_power_output(weather, my_turbine, e126, dummy_turbine)
plot_or_print(my_turbine, e126, dummy_turbine) | [
"def",
"run_example",
"(",
")",
":",
"weather",
"=",
"get_weather_data",
"(",
"'weather.csv'",
")",
"my_turbine",
",",
"e126",
",",
"dummy_turbine",
"=",
"initialize_wind_turbines",
"(",
")",
"calculate_power_output",
"(",
"weather",
",",
"my_turbine",
",",
"e126"... | r"""
Runs the basic example. | [
"r",
"Runs",
"the",
"basic",
"example",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/example/modelchain_example.py#L265-L273 | train | 202,084 |
wind-python/windpowerlib | windpowerlib/wind_farm.py | WindFarm.mean_hub_height | def mean_hub_height(self):
r"""
Calculates the mean hub height of the wind farm.
The mean hub height of a wind farm is necessary for power output
calculations with an aggregated wind farm power curve containing wind
turbines with different hub heights. Hub heights of wind turbines with
higher nominal power weigh more than others. Assigns the hub height to
the wind farm object.
Returns
-------
self
Notes
-----
The following equation is used [1]_:
.. math:: h_{WF} = e^{\sum\limits_{k}{ln(h_{WT,k})}
\frac{P_{N,k}}{\sum\limits_{k}{P_{N,k}}}}
with:
:math:`h_{WF}`: mean hub height of wind farm,
:math:`h_{WT,k}`: hub height of the k-th wind turbine of a wind
farm, :math:`P_{N,k}`: nominal power of the k-th wind turbine
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 35
"""
self.hub_height = np.exp(
sum(np.log(wind_dict['wind_turbine'].hub_height) *
wind_dict['wind_turbine'].nominal_power *
wind_dict['number_of_turbines']
for wind_dict in self.wind_turbine_fleet) /
self.get_installed_power())
return self | python | def mean_hub_height(self):
r"""
Calculates the mean hub height of the wind farm.
The mean hub height of a wind farm is necessary for power output
calculations with an aggregated wind farm power curve containing wind
turbines with different hub heights. Hub heights of wind turbines with
higher nominal power weigh more than others. Assigns the hub height to
the wind farm object.
Returns
-------
self
Notes
-----
The following equation is used [1]_:
.. math:: h_{WF} = e^{\sum\limits_{k}{ln(h_{WT,k})}
\frac{P_{N,k}}{\sum\limits_{k}{P_{N,k}}}}
with:
:math:`h_{WF}`: mean hub height of wind farm,
:math:`h_{WT,k}`: hub height of the k-th wind turbine of a wind
farm, :math:`P_{N,k}`: nominal power of the k-th wind turbine
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 35
"""
self.hub_height = np.exp(
sum(np.log(wind_dict['wind_turbine'].hub_height) *
wind_dict['wind_turbine'].nominal_power *
wind_dict['number_of_turbines']
for wind_dict in self.wind_turbine_fleet) /
self.get_installed_power())
return self | [
"def",
"mean_hub_height",
"(",
"self",
")",
":",
"self",
".",
"hub_height",
"=",
"np",
".",
"exp",
"(",
"sum",
"(",
"np",
".",
"log",
"(",
"wind_dict",
"[",
"'wind_turbine'",
"]",
".",
"hub_height",
")",
"*",
"wind_dict",
"[",
"'wind_turbine'",
"]",
".... | r"""
Calculates the mean hub height of the wind farm.
The mean hub height of a wind farm is necessary for power output
calculations with an aggregated wind farm power curve containing wind
turbines with different hub heights. Hub heights of wind turbines with
higher nominal power weigh more than others. Assigns the hub height to
the wind farm object.
Returns
-------
self
Notes
-----
The following equation is used [1]_:
.. math:: h_{WF} = e^{\sum\limits_{k}{ln(h_{WT,k})}
\frac{P_{N,k}}{\sum\limits_{k}{P_{N,k}}}}
with:
:math:`h_{WF}`: mean hub height of wind farm,
:math:`h_{WT,k}`: hub height of the k-th wind turbine of a wind
farm, :math:`P_{N,k}`: nominal power of the k-th wind turbine
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 35 | [
"r",
"Calculates",
"the",
"mean",
"hub",
"height",
"of",
"the",
"wind",
"farm",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_farm.py#L98-L138 | train | 202,085 |
wind-python/windpowerlib | windpowerlib/wind_speed.py | logarithmic_profile | def logarithmic_profile(wind_speed, wind_speed_height, hub_height,
roughness_length, obstacle_height=0.0):
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length)) | python | def logarithmic_profile(wind_speed, wind_speed_height, hub_height,
roughness_length, obstacle_height=0.0):
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length)) | [
"def",
"logarithmic_profile",
"(",
"wind_speed",
",",
"wind_speed_height",
",",
"hub_height",
",",
"roughness_length",
",",
"obstacle_height",
"=",
"0.0",
")",
":",
"if",
"0.7",
"*",
"obstacle_height",
">",
"wind_speed_height",
":",
"raise",
"ValueError",
"(",
"\"... | r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515 | [
"r",
"Calculates",
"the",
"wind",
"speed",
"at",
"hub",
"height",
"using",
"a",
"logarithmic",
"wind",
"profile",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_speed.py#L14-L89 | train | 202,086 |
wind-python/windpowerlib | windpowerlib/wind_speed.py | hellman | def hellman(wind_speed, wind_speed_height, hub_height,
roughness_length=None, hellman_exponent=None):
r"""
Calculates the wind speed at hub height using the hellman equation.
It is assumed that the wind profile follows a power law. This function is
carried out when the parameter `wind_speed_model` of an instance of
the :class:`~.modelchain.ModelChain` class is 'hellman'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length. If given and `hellman_exponent` is None:
`hellman_exponent` = 1 / ln(hub_height/roughness_length),
otherwise `hellman_exponent` = 1/7. Default: None.
hellman_exponent : None or float
The Hellman exponent, which combines the increase in wind speed due to
stability of atmospheric conditions and surface roughness into one
constant. If None and roughness length is given
`hellman_exponent` = 1 / ln(hub_height/roughness_length),
otherwise `hellman_exponent` = 1/7. Default: None.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot \left(\frac{h_{hub}}{h_{data}}
\right)^\alpha
with:
v: wind speed, h: height, :math:`\alpha`: Hellman exponent
:math:`h_{data}` is the height in which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
For the Hellman exponent :math:`\alpha` many studies use a value of 1/7 for
onshore and a value of 1/9 for offshore. The Hellman exponent can also
be calulated by the following equation [2]_, [3]_:
.. math:: \alpha = \frac{1}{\ln\left(\frac{h_{hub}}{z_0} \right)}
with:
:math:`z_{0}`: roughness length
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Sharp, E.: "Spatiotemporal disaggregation of GB scenarios depicting
increased wind capacity and electrified heat demand in dwellings".
UCL, Energy Institute, 2015, p. 83
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 517
.. [3] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 279
"""
if hellman_exponent is None:
if roughness_length is not None:
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
hellman_exponent = 1 / np.log(hub_height / roughness_length)
else:
hellman_exponent = 1/7
return wind_speed * (hub_height / wind_speed_height) ** hellman_exponent | python | def hellman(wind_speed, wind_speed_height, hub_height,
roughness_length=None, hellman_exponent=None):
r"""
Calculates the wind speed at hub height using the hellman equation.
It is assumed that the wind profile follows a power law. This function is
carried out when the parameter `wind_speed_model` of an instance of
the :class:`~.modelchain.ModelChain` class is 'hellman'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length. If given and `hellman_exponent` is None:
`hellman_exponent` = 1 / ln(hub_height/roughness_length),
otherwise `hellman_exponent` = 1/7. Default: None.
hellman_exponent : None or float
The Hellman exponent, which combines the increase in wind speed due to
stability of atmospheric conditions and surface roughness into one
constant. If None and roughness length is given
`hellman_exponent` = 1 / ln(hub_height/roughness_length),
otherwise `hellman_exponent` = 1/7. Default: None.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot \left(\frac{h_{hub}}{h_{data}}
\right)^\alpha
with:
v: wind speed, h: height, :math:`\alpha`: Hellman exponent
:math:`h_{data}` is the height in which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
For the Hellman exponent :math:`\alpha` many studies use a value of 1/7 for
onshore and a value of 1/9 for offshore. The Hellman exponent can also
be calulated by the following equation [2]_, [3]_:
.. math:: \alpha = \frac{1}{\ln\left(\frac{h_{hub}}{z_0} \right)}
with:
:math:`z_{0}`: roughness length
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Sharp, E.: "Spatiotemporal disaggregation of GB scenarios depicting
increased wind capacity and electrified heat demand in dwellings".
UCL, Energy Institute, 2015, p. 83
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 517
.. [3] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 279
"""
if hellman_exponent is None:
if roughness_length is not None:
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
hellman_exponent = 1 / np.log(hub_height / roughness_length)
else:
hellman_exponent = 1/7
return wind_speed * (hub_height / wind_speed_height) ** hellman_exponent | [
"def",
"hellman",
"(",
"wind_speed",
",",
"wind_speed_height",
",",
"hub_height",
",",
"roughness_length",
"=",
"None",
",",
"hellman_exponent",
"=",
"None",
")",
":",
"if",
"hellman_exponent",
"is",
"None",
":",
"if",
"roughness_length",
"is",
"not",
"None",
... | r"""
Calculates the wind speed at hub height using the hellman equation.
It is assumed that the wind profile follows a power law. This function is
carried out when the parameter `wind_speed_model` of an instance of
the :class:`~.modelchain.ModelChain` class is 'hellman'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length. If given and `hellman_exponent` is None:
`hellman_exponent` = 1 / ln(hub_height/roughness_length),
otherwise `hellman_exponent` = 1/7. Default: None.
hellman_exponent : None or float
The Hellman exponent, which combines the increase in wind speed due to
stability of atmospheric conditions and surface roughness into one
constant. If None and roughness length is given
`hellman_exponent` = 1 / ln(hub_height/roughness_length),
otherwise `hellman_exponent` = 1/7. Default: None.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot \left(\frac{h_{hub}}{h_{data}}
\right)^\alpha
with:
v: wind speed, h: height, :math:`\alpha`: Hellman exponent
:math:`h_{data}` is the height in which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
For the Hellman exponent :math:`\alpha` many studies use a value of 1/7 for
onshore and a value of 1/9 for offshore. The Hellman exponent can also
be calulated by the following equation [2]_, [3]_:
.. math:: \alpha = \frac{1}{\ln\left(\frac{h_{hub}}{z_0} \right)}
with:
:math:`z_{0}`: roughness length
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Sharp, E.: "Spatiotemporal disaggregation of GB scenarios depicting
increased wind capacity and electrified heat demand in dwellings".
UCL, Energy Institute, 2015, p. 83
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 517
.. [3] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 279 | [
"r",
"Calculates",
"the",
"wind",
"speed",
"at",
"hub",
"height",
"using",
"the",
"hellman",
"equation",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_speed.py#L92-L171 | train | 202,087 |
wind-python/windpowerlib | windpowerlib/power_curves.py | smooth_power_curve | def smooth_power_curve(power_curve_wind_speeds, power_curve_values,
block_width=0.5, wind_speed_range=15.0,
standard_deviation_method='turbulence_intensity',
mean_gauss=0, **kwargs):
r"""
Smooths the input power curve values by using a Gauss distribution.
The smoothing serves for taking the distribution of wind speeds over space
into account.
Parameters
----------
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
block_width : float
Width between the wind speeds in the sum of equation :eq:`power`.
Default: 0.5.
wind_speed_range : float
The sum in the equation below is taken for this wind speed range below
and above the power curve wind speed. Default: 15.0.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'.
Default: 'turbulence_intensity'.
mean_gauss : float
Mean of the Gauss distribution in
:py:func:`~.tools.gauss_distribution`. Default: 0.
Other Parameters
----------------
turbulence intensity : float, optional
Turbulence intensity at hub height of the wind turbine, wind farm or
wind turbine cluster the power curve is smoothed for.
Returns
-------
smoothed_power_curve_df : pd.DataFrame
Smoothed power curve. DataFrame has 'wind_speed' and 'value' columns
with wind speeds in m/s and the corresponding power curve value in W.
Notes
-----
The following equation is used to calculated the power curves values of the
smoothed power curve [1]_:
.. math:: P_{smoothed}(v_{std}) = \sum\limits_{v_i} \Delta v_i \cdot P(v_i)
\cdot \frac{1}{\sigma \sqrt{2 \pi}}
\exp \left[-\frac{(v_{std} - v_i -\mu)^2}{2 \sigma^2} \right]
:label: power
with:
P: power [W], v: wind speed [m/s],
:math:`\sigma`: standard deviation (Gauss), :math:`\mu`: mean (Gauss)
:math:`P_{smoothed}` is the smoothed power curve value,
:math:`v_{std}` is the standard wind speed in the power curve,
:math:`\Delta v_i` is the interval length between
:math:`v_\text{i}` and :math:`v_\text{i+1}`
Power curve smoothing is applied to take account for the spatial
distribution of wind speed. This way of smoothing power curves is also used
in [2]_ and [3]_.
The standard deviation :math:`\sigma` of the above equation can be
calculated by the following methods.
'turbulence_intensity' [2]_:
.. math:: \sigma = v_\text{std} \cdot \sigma_\text{n} = v_\text{std}
\cdot TI
with:
TI: turbulence intensity
'Staffell_Pfenninger' [4]_:
.. math:: \sigma = 0.6 \cdot 0.2 \cdot v_\text{std}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 106
.. [2] Nørgaard, P. and Holttinen, H.: "A Multi-Turbine and Power Curve
Approach". Nordic Wind Power Conference, 1.–2.3.2004, 2000, p. 5
.. [3] Kohler, S. and Agricola, A.-Cl. and Seidl, H.:
"dena-Netzstudie II. Integration erneuerbarer Energien in die
deutsche Stromversorgung im Zeitraum 2015 – 2020 mit Ausblick
2025". Technical report, 2010.
.. [4] Staffell, I. and Pfenninger, S.: "Using Bias-Corrected Reanalysis
to Simulate Current and Future Wind Power Output". 2005, p. 11
"""
# Specify normalized standard deviation
if standard_deviation_method == 'turbulence_intensity':
if ('turbulence_intensity' in kwargs and
kwargs['turbulence_intensity'] is not np.nan):
normalized_standard_deviation = kwargs['turbulence_intensity']
else:
raise ValueError("Turbulence intensity must be defined for " +
"using 'turbulence_intensity' as " +
"`standard_deviation_method`")
elif standard_deviation_method == 'Staffell_Pfenninger':
normalized_standard_deviation = 0.2
else:
raise ValueError("{} is no valid `standard_deviation_method`. Valid "
+ "options are 'turbulence_intensity', or "
+ "'Staffell_Pfenninger'".format(
standard_deviation_method))
# Initialize list for power curve values
smoothed_power_curve_values = []
# Append wind speeds to `power_curve_wind_speeds`
maximum_value = power_curve_wind_speeds.values[-1] + wind_speed_range
while power_curve_wind_speeds.values[-1] < maximum_value:
power_curve_wind_speeds = power_curve_wind_speeds.append(
pd.Series(power_curve_wind_speeds.iloc[-1] + 0.5,
index=[power_curve_wind_speeds.index[-1] + 1]))
power_curve_values = power_curve_values.append(
pd.Series(0.0, index=[power_curve_values.index[-1] + 1]))
for power_curve_wind_speed in power_curve_wind_speeds:
# Create array of wind speeds for the sum
wind_speeds_block = (np.arange(
-wind_speed_range, wind_speed_range + block_width, block_width) +
power_curve_wind_speed)
# Get standard deviation for Gauss function
standard_deviation = (
(power_curve_wind_speed * normalized_standard_deviation + 0.6)
if standard_deviation_method is 'Staffell_Pfenninger'
else power_curve_wind_speed * normalized_standard_deviation)
# Get the smoothed value of the power output
if standard_deviation == 0.0:
# The gaussian distribution is not defined for a standard deviation
# of zero. Smoothed power curve value is set to zero.
smoothed_value = 0.0
else:
smoothed_value = sum(
block_width * np.interp(wind_speed, power_curve_wind_speeds,
power_curve_values, left=0, right=0) *
tools.gauss_distribution(
power_curve_wind_speed - wind_speed,
standard_deviation, mean_gauss)
for wind_speed in wind_speeds_block)
# Add value to list - add zero if `smoothed_value` is nan as Gauss
# distribution for a standard deviation of zero.
smoothed_power_curve_values.append(smoothed_value)
# Create smoothed power curve data frame
smoothed_power_curve_df = pd.DataFrame(
data=[list(power_curve_wind_speeds.values),
smoothed_power_curve_values]).transpose()
# Rename columns of the data frame
smoothed_power_curve_df.columns = ['wind_speed', 'value']
return smoothed_power_curve_df | python | def smooth_power_curve(power_curve_wind_speeds, power_curve_values,
block_width=0.5, wind_speed_range=15.0,
standard_deviation_method='turbulence_intensity',
mean_gauss=0, **kwargs):
r"""
Smooths the input power curve values by using a Gauss distribution.
The smoothing serves for taking the distribution of wind speeds over space
into account.
Parameters
----------
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
block_width : float
Width between the wind speeds in the sum of equation :eq:`power`.
Default: 0.5.
wind_speed_range : float
The sum in the equation below is taken for this wind speed range below
and above the power curve wind speed. Default: 15.0.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'.
Default: 'turbulence_intensity'.
mean_gauss : float
Mean of the Gauss distribution in
:py:func:`~.tools.gauss_distribution`. Default: 0.
Other Parameters
----------------
turbulence intensity : float, optional
Turbulence intensity at hub height of the wind turbine, wind farm or
wind turbine cluster the power curve is smoothed for.
Returns
-------
smoothed_power_curve_df : pd.DataFrame
Smoothed power curve. DataFrame has 'wind_speed' and 'value' columns
with wind speeds in m/s and the corresponding power curve value in W.
Notes
-----
The following equation is used to calculated the power curves values of the
smoothed power curve [1]_:
.. math:: P_{smoothed}(v_{std}) = \sum\limits_{v_i} \Delta v_i \cdot P(v_i)
\cdot \frac{1}{\sigma \sqrt{2 \pi}}
\exp \left[-\frac{(v_{std} - v_i -\mu)^2}{2 \sigma^2} \right]
:label: power
with:
P: power [W], v: wind speed [m/s],
:math:`\sigma`: standard deviation (Gauss), :math:`\mu`: mean (Gauss)
:math:`P_{smoothed}` is the smoothed power curve value,
:math:`v_{std}` is the standard wind speed in the power curve,
:math:`\Delta v_i` is the interval length between
:math:`v_\text{i}` and :math:`v_\text{i+1}`
Power curve smoothing is applied to take account for the spatial
distribution of wind speed. This way of smoothing power curves is also used
in [2]_ and [3]_.
The standard deviation :math:`\sigma` of the above equation can be
calculated by the following methods.
'turbulence_intensity' [2]_:
.. math:: \sigma = v_\text{std} \cdot \sigma_\text{n} = v_\text{std}
\cdot TI
with:
TI: turbulence intensity
'Staffell_Pfenninger' [4]_:
.. math:: \sigma = 0.6 \cdot 0.2 \cdot v_\text{std}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 106
.. [2] Nørgaard, P. and Holttinen, H.: "A Multi-Turbine and Power Curve
Approach". Nordic Wind Power Conference, 1.–2.3.2004, 2000, p. 5
.. [3] Kohler, S. and Agricola, A.-Cl. and Seidl, H.:
"dena-Netzstudie II. Integration erneuerbarer Energien in die
deutsche Stromversorgung im Zeitraum 2015 – 2020 mit Ausblick
2025". Technical report, 2010.
.. [4] Staffell, I. and Pfenninger, S.: "Using Bias-Corrected Reanalysis
to Simulate Current and Future Wind Power Output". 2005, p. 11
"""
# Specify normalized standard deviation
if standard_deviation_method == 'turbulence_intensity':
if ('turbulence_intensity' in kwargs and
kwargs['turbulence_intensity'] is not np.nan):
normalized_standard_deviation = kwargs['turbulence_intensity']
else:
raise ValueError("Turbulence intensity must be defined for " +
"using 'turbulence_intensity' as " +
"`standard_deviation_method`")
elif standard_deviation_method == 'Staffell_Pfenninger':
normalized_standard_deviation = 0.2
else:
raise ValueError("{} is no valid `standard_deviation_method`. Valid "
+ "options are 'turbulence_intensity', or "
+ "'Staffell_Pfenninger'".format(
standard_deviation_method))
# Initialize list for power curve values
smoothed_power_curve_values = []
# Append wind speeds to `power_curve_wind_speeds`
maximum_value = power_curve_wind_speeds.values[-1] + wind_speed_range
while power_curve_wind_speeds.values[-1] < maximum_value:
power_curve_wind_speeds = power_curve_wind_speeds.append(
pd.Series(power_curve_wind_speeds.iloc[-1] + 0.5,
index=[power_curve_wind_speeds.index[-1] + 1]))
power_curve_values = power_curve_values.append(
pd.Series(0.0, index=[power_curve_values.index[-1] + 1]))
for power_curve_wind_speed in power_curve_wind_speeds:
# Create array of wind speeds for the sum
wind_speeds_block = (np.arange(
-wind_speed_range, wind_speed_range + block_width, block_width) +
power_curve_wind_speed)
# Get standard deviation for Gauss function
standard_deviation = (
(power_curve_wind_speed * normalized_standard_deviation + 0.6)
if standard_deviation_method is 'Staffell_Pfenninger'
else power_curve_wind_speed * normalized_standard_deviation)
# Get the smoothed value of the power output
if standard_deviation == 0.0:
# The gaussian distribution is not defined for a standard deviation
# of zero. Smoothed power curve value is set to zero.
smoothed_value = 0.0
else:
smoothed_value = sum(
block_width * np.interp(wind_speed, power_curve_wind_speeds,
power_curve_values, left=0, right=0) *
tools.gauss_distribution(
power_curve_wind_speed - wind_speed,
standard_deviation, mean_gauss)
for wind_speed in wind_speeds_block)
# Add value to list - add zero if `smoothed_value` is nan as Gauss
# distribution for a standard deviation of zero.
smoothed_power_curve_values.append(smoothed_value)
# Create smoothed power curve data frame
smoothed_power_curve_df = pd.DataFrame(
data=[list(power_curve_wind_speeds.values),
smoothed_power_curve_values]).transpose()
# Rename columns of the data frame
smoothed_power_curve_df.columns = ['wind_speed', 'value']
return smoothed_power_curve_df | [
"def",
"smooth_power_curve",
"(",
"power_curve_wind_speeds",
",",
"power_curve_values",
",",
"block_width",
"=",
"0.5",
",",
"wind_speed_range",
"=",
"15.0",
",",
"standard_deviation_method",
"=",
"'turbulence_intensity'",
",",
"mean_gauss",
"=",
"0",
",",
"*",
"*",
... | r"""
Smooths the input power curve values by using a Gauss distribution.
The smoothing serves for taking the distribution of wind speeds over space
into account.
Parameters
----------
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
block_width : float
Width between the wind speeds in the sum of equation :eq:`power`.
Default: 0.5.
wind_speed_range : float
The sum in the equation below is taken for this wind speed range below
and above the power curve wind speed. Default: 15.0.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'.
Default: 'turbulence_intensity'.
mean_gauss : float
Mean of the Gauss distribution in
:py:func:`~.tools.gauss_distribution`. Default: 0.
Other Parameters
----------------
turbulence intensity : float, optional
Turbulence intensity at hub height of the wind turbine, wind farm or
wind turbine cluster the power curve is smoothed for.
Returns
-------
smoothed_power_curve_df : pd.DataFrame
Smoothed power curve. DataFrame has 'wind_speed' and 'value' columns
with wind speeds in m/s and the corresponding power curve value in W.
Notes
-----
The following equation is used to calculated the power curves values of the
smoothed power curve [1]_:
.. math:: P_{smoothed}(v_{std}) = \sum\limits_{v_i} \Delta v_i \cdot P(v_i)
\cdot \frac{1}{\sigma \sqrt{2 \pi}}
\exp \left[-\frac{(v_{std} - v_i -\mu)^2}{2 \sigma^2} \right]
:label: power
with:
P: power [W], v: wind speed [m/s],
:math:`\sigma`: standard deviation (Gauss), :math:`\mu`: mean (Gauss)
:math:`P_{smoothed}` is the smoothed power curve value,
:math:`v_{std}` is the standard wind speed in the power curve,
:math:`\Delta v_i` is the interval length between
:math:`v_\text{i}` and :math:`v_\text{i+1}`
Power curve smoothing is applied to take account for the spatial
distribution of wind speed. This way of smoothing power curves is also used
in [2]_ and [3]_.
The standard deviation :math:`\sigma` of the above equation can be
calculated by the following methods.
'turbulence_intensity' [2]_:
.. math:: \sigma = v_\text{std} \cdot \sigma_\text{n} = v_\text{std}
\cdot TI
with:
TI: turbulence intensity
'Staffell_Pfenninger' [4]_:
.. math:: \sigma = 0.6 \cdot 0.2 \cdot v_\text{std}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 106
.. [2] Nørgaard, P. and Holttinen, H.: "A Multi-Turbine and Power Curve
Approach". Nordic Wind Power Conference, 1.–2.3.2004, 2000, p. 5
.. [3] Kohler, S. and Agricola, A.-Cl. and Seidl, H.:
"dena-Netzstudie II. Integration erneuerbarer Energien in die
deutsche Stromversorgung im Zeitraum 2015 – 2020 mit Ausblick
2025". Technical report, 2010.
.. [4] Staffell, I. and Pfenninger, S.: "Using Bias-Corrected Reanalysis
to Simulate Current and Future Wind Power Output". 2005, p. 11 | [
"r",
"Smooths",
"the",
"input",
"power",
"curve",
"values",
"by",
"using",
"a",
"Gauss",
"distribution",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/power_curves.py#L16-L172 | train | 202,088 |
wind-python/windpowerlib | windpowerlib/modelchain.py | ModelChain.temperature_hub | def temperature_hub(self, weather_df):
r"""
Calculates the temperature of air at hub height.
The temperature is calculated using the method specified by
the parameter `temperature_model`.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for temperature `temperature` in K.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. temperature) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
temperature_hub : pandas.Series or numpy.array
Temperature of air in K at hub height.
Notes
-----
If `weather_df` contains temperatures at different heights the given
temperature(s) closest to the hub height are used.
"""
if self.power_plant.hub_height in weather_df['temperature']:
temperature_hub = weather_df['temperature'][
self.power_plant.hub_height]
elif self.temperature_model == 'linear_gradient':
logging.debug('Calculating temperature using temperature '
'gradient.')
closest_height = weather_df['temperature'].columns[
min(range(len(weather_df['temperature'].columns)),
key=lambda i: abs(weather_df['temperature'].columns[i] -
self.power_plant.hub_height))]
temperature_hub = temperature.linear_gradient(
weather_df['temperature'][closest_height], closest_height,
self.power_plant.hub_height)
elif self.temperature_model == 'interpolation_extrapolation':
logging.debug('Calculating temperature using linear inter- or '
'extrapolation.')
temperature_hub = tools.linear_interpolation_extrapolation(
weather_df['temperature'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.temperature_model) + "`temperature_model` must be "
"'linear_gradient' or 'interpolation_extrapolation'.")
return temperature_hub | python | def temperature_hub(self, weather_df):
r"""
Calculates the temperature of air at hub height.
The temperature is calculated using the method specified by
the parameter `temperature_model`.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for temperature `temperature` in K.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. temperature) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
temperature_hub : pandas.Series or numpy.array
Temperature of air in K at hub height.
Notes
-----
If `weather_df` contains temperatures at different heights the given
temperature(s) closest to the hub height are used.
"""
if self.power_plant.hub_height in weather_df['temperature']:
temperature_hub = weather_df['temperature'][
self.power_plant.hub_height]
elif self.temperature_model == 'linear_gradient':
logging.debug('Calculating temperature using temperature '
'gradient.')
closest_height = weather_df['temperature'].columns[
min(range(len(weather_df['temperature'].columns)),
key=lambda i: abs(weather_df['temperature'].columns[i] -
self.power_plant.hub_height))]
temperature_hub = temperature.linear_gradient(
weather_df['temperature'][closest_height], closest_height,
self.power_plant.hub_height)
elif self.temperature_model == 'interpolation_extrapolation':
logging.debug('Calculating temperature using linear inter- or '
'extrapolation.')
temperature_hub = tools.linear_interpolation_extrapolation(
weather_df['temperature'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.temperature_model) + "`temperature_model` must be "
"'linear_gradient' or 'interpolation_extrapolation'.")
return temperature_hub | [
"def",
"temperature_hub",
"(",
"self",
",",
"weather_df",
")",
":",
"if",
"self",
".",
"power_plant",
".",
"hub_height",
"in",
"weather_df",
"[",
"'temperature'",
"]",
":",
"temperature_hub",
"=",
"weather_df",
"[",
"'temperature'",
"]",
"[",
"self",
".",
"p... | r"""
Calculates the temperature of air at hub height.
The temperature is calculated using the method specified by
the parameter `temperature_model`.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for temperature `temperature` in K.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. temperature) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
temperature_hub : pandas.Series or numpy.array
Temperature of air in K at hub height.
Notes
-----
If `weather_df` contains temperatures at different heights the given
temperature(s) closest to the hub height are used. | [
"r",
"Calculates",
"the",
"temperature",
"of",
"air",
"at",
"hub",
"height",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/modelchain.py#L124-L175 | train | 202,089 |
wind-python/windpowerlib | windpowerlib/modelchain.py | ModelChain.density_hub | def density_hub(self, weather_df):
r"""
Calculates the density of air at hub height.
The density is calculated using the method specified by the parameter
`density_model`. Previous to the calculation of the density the
temperature at hub height is calculated using the method specified by
the parameter `temperature_model`.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for temperature `temperature` in K,
pressure `pressure` in Pa and/or density `density` in kg/m³,
depending on the `density_model` used.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. temperature) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
density_hub : pandas.Series or numpy.array
Density of air in kg/m³ at hub height.
Notes
-----
If `weather_df` contains data at different heights the data closest to
the hub height are used.
If `interpolation_extrapolation` is used to calculate the density at
hub height, the `weather_df` must contain at least two time series for
density.
"""
if self.density_model != 'interpolation_extrapolation':
temperature_hub = self.temperature_hub(weather_df)
# Calculation of density in kg/m³ at hub height
if self.density_model == 'barometric':
logging.debug('Calculating density using barometric height '
'equation.')
closest_height = weather_df['pressure'].columns[
min(range(len(weather_df['pressure'].columns)),
key=lambda i: abs(weather_df['pressure'].columns[i] -
self.power_plant.hub_height))]
density_hub = density.barometric(
weather_df['pressure'][closest_height], closest_height,
self.power_plant.hub_height, temperature_hub)
elif self.density_model == 'ideal_gas':
logging.debug('Calculating density using ideal gas equation.')
closest_height = weather_df['pressure'].columns[
min(range(len(weather_df['pressure'].columns)),
key=lambda i: abs(weather_df['pressure'].columns[i] -
self.power_plant.hub_height))]
density_hub = density.ideal_gas(
weather_df['pressure'][closest_height], closest_height,
self.power_plant.hub_height, temperature_hub)
elif self.density_model == 'interpolation_extrapolation':
logging.debug('Calculating density using linear inter- or '
'extrapolation.')
density_hub = tools.linear_interpolation_extrapolation(
weather_df['density'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.density_model) + "`density_model` " +
"must be 'barometric', 'ideal_gas' or " +
"'interpolation_extrapolation'.")
return density_hub | python | def density_hub(self, weather_df):
r"""
Calculates the density of air at hub height.
The density is calculated using the method specified by the parameter
`density_model`. Previous to the calculation of the density the
temperature at hub height is calculated using the method specified by
the parameter `temperature_model`.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for temperature `temperature` in K,
pressure `pressure` in Pa and/or density `density` in kg/m³,
depending on the `density_model` used.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. temperature) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
density_hub : pandas.Series or numpy.array
Density of air in kg/m³ at hub height.
Notes
-----
If `weather_df` contains data at different heights the data closest to
the hub height are used.
If `interpolation_extrapolation` is used to calculate the density at
hub height, the `weather_df` must contain at least two time series for
density.
"""
if self.density_model != 'interpolation_extrapolation':
temperature_hub = self.temperature_hub(weather_df)
# Calculation of density in kg/m³ at hub height
if self.density_model == 'barometric':
logging.debug('Calculating density using barometric height '
'equation.')
closest_height = weather_df['pressure'].columns[
min(range(len(weather_df['pressure'].columns)),
key=lambda i: abs(weather_df['pressure'].columns[i] -
self.power_plant.hub_height))]
density_hub = density.barometric(
weather_df['pressure'][closest_height], closest_height,
self.power_plant.hub_height, temperature_hub)
elif self.density_model == 'ideal_gas':
logging.debug('Calculating density using ideal gas equation.')
closest_height = weather_df['pressure'].columns[
min(range(len(weather_df['pressure'].columns)),
key=lambda i: abs(weather_df['pressure'].columns[i] -
self.power_plant.hub_height))]
density_hub = density.ideal_gas(
weather_df['pressure'][closest_height], closest_height,
self.power_plant.hub_height, temperature_hub)
elif self.density_model == 'interpolation_extrapolation':
logging.debug('Calculating density using linear inter- or '
'extrapolation.')
density_hub = tools.linear_interpolation_extrapolation(
weather_df['density'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.density_model) + "`density_model` " +
"must be 'barometric', 'ideal_gas' or " +
"'interpolation_extrapolation'.")
return density_hub | [
"def",
"density_hub",
"(",
"self",
",",
"weather_df",
")",
":",
"if",
"self",
".",
"density_model",
"!=",
"'interpolation_extrapolation'",
":",
"temperature_hub",
"=",
"self",
".",
"temperature_hub",
"(",
"weather_df",
")",
"# Calculation of density in kg/m³ at hub heig... | r"""
Calculates the density of air at hub height.
The density is calculated using the method specified by the parameter
`density_model`. Previous to the calculation of the density the
temperature at hub height is calculated using the method specified by
the parameter `temperature_model`.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for temperature `temperature` in K,
pressure `pressure` in Pa and/or density `density` in kg/m³,
depending on the `density_model` used.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. temperature) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
density_hub : pandas.Series or numpy.array
Density of air in kg/m³ at hub height.
Notes
-----
If `weather_df` contains data at different heights the data closest to
the hub height are used.
If `interpolation_extrapolation` is used to calculate the density at
hub height, the `weather_df` must contain at least two time series for
density. | [
"r",
"Calculates",
"the",
"density",
"of",
"air",
"at",
"hub",
"height",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/modelchain.py#L177-L245 | train | 202,090 |
wind-python/windpowerlib | windpowerlib/modelchain.py | ModelChain.wind_speed_hub | def wind_speed_hub(self, weather_df):
r"""
Calculates the wind speed at hub height.
The method specified by the parameter `wind_speed_model` is used.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s and
roughness length `roughness_length` in m.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
wind_speed_hub : pandas.Series or numpy.array
Wind speed in m/s at hub height.
Notes
-----
If `weather_df` contains wind speeds at different heights the given
wind speed(s) closest to the hub height are used.
"""
if self.power_plant.hub_height in weather_df['wind_speed']:
wind_speed_hub = weather_df['wind_speed'][
self.power_plant.hub_height]
elif self.wind_speed_model == 'logarithmic':
logging.debug('Calculating wind speed using logarithmic wind '
'profile.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.logarithmic_profile(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.obstacle_height)
elif self.wind_speed_model == 'hellman':
logging.debug('Calculating wind speed using hellman equation.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.hellman(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.hellman_exp)
elif self.wind_speed_model == 'interpolation_extrapolation':
logging.debug('Calculating wind speed using linear inter- or '
'extrapolation.')
wind_speed_hub = tools.linear_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
elif self.wind_speed_model == 'log_interpolation_extrapolation':
logging.debug('Calculating wind speed using logarithmic inter- or '
'extrapolation.')
wind_speed_hub = tools.logarithmic_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.wind_speed_model) + "`wind_speed_model` must be "
"'logarithmic', 'hellman', 'interpolation_extrapolation' " +
"or 'log_interpolation_extrapolation'.")
return wind_speed_hub | python | def wind_speed_hub(self, weather_df):
r"""
Calculates the wind speed at hub height.
The method specified by the parameter `wind_speed_model` is used.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s and
roughness length `roughness_length` in m.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
wind_speed_hub : pandas.Series or numpy.array
Wind speed in m/s at hub height.
Notes
-----
If `weather_df` contains wind speeds at different heights the given
wind speed(s) closest to the hub height are used.
"""
if self.power_plant.hub_height in weather_df['wind_speed']:
wind_speed_hub = weather_df['wind_speed'][
self.power_plant.hub_height]
elif self.wind_speed_model == 'logarithmic':
logging.debug('Calculating wind speed using logarithmic wind '
'profile.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.logarithmic_profile(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.obstacle_height)
elif self.wind_speed_model == 'hellman':
logging.debug('Calculating wind speed using hellman equation.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.hellman(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.hellman_exp)
elif self.wind_speed_model == 'interpolation_extrapolation':
logging.debug('Calculating wind speed using linear inter- or '
'extrapolation.')
wind_speed_hub = tools.linear_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
elif self.wind_speed_model == 'log_interpolation_extrapolation':
logging.debug('Calculating wind speed using logarithmic inter- or '
'extrapolation.')
wind_speed_hub = tools.logarithmic_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.wind_speed_model) + "`wind_speed_model` must be "
"'logarithmic', 'hellman', 'interpolation_extrapolation' " +
"or 'log_interpolation_extrapolation'.")
return wind_speed_hub | [
"def",
"wind_speed_hub",
"(",
"self",
",",
"weather_df",
")",
":",
"if",
"self",
".",
"power_plant",
".",
"hub_height",
"in",
"weather_df",
"[",
"'wind_speed'",
"]",
":",
"wind_speed_hub",
"=",
"weather_df",
"[",
"'wind_speed'",
"]",
"[",
"self",
".",
"power... | r"""
Calculates the wind speed at hub height.
The method specified by the parameter `wind_speed_model` is used.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s and
roughness length `roughness_length` in m.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
wind_speed_hub : pandas.Series or numpy.array
Wind speed in m/s at hub height.
Notes
-----
If `weather_df` contains wind speeds at different heights the given
wind speed(s) closest to the hub height are used. | [
"r",
"Calculates",
"the",
"wind",
"speed",
"at",
"hub",
"height",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/modelchain.py#L247-L317 | train | 202,091 |
wind-python/windpowerlib | windpowerlib/modelchain.py | ModelChain.calculate_power_output | def calculate_power_output(self, wind_speed_hub, density_hub):
r"""
Calculates the power output of the wind power plant.
The method specified by the parameter `power_output_model` is used.
Parameters
----------
wind_speed_hub : pandas.Series or numpy.array
Wind speed at hub height in m/s.
density_hub : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series
Electrical power output of the wind turbine in W.
"""
if self.power_output_model == 'power_curve':
if self.power_plant.power_curve is None:
raise TypeError("Power curve values of " +
self.power_plant.name +
" are missing.")
logging.debug('Calculating power output using power curve.')
return (power_output.power_curve(
wind_speed_hub,
self.power_plant.power_curve['wind_speed'],
self.power_plant.power_curve['value'],
density_hub, self.density_correction))
elif self.power_output_model == 'power_coefficient_curve':
if self.power_plant.power_coefficient_curve is None:
raise TypeError("Power coefficient curve values of " +
self.power_plant.name +
" are missing.")
logging.debug('Calculating power output using power coefficient '
'curve.')
return (power_output.power_coefficient_curve(
wind_speed_hub,
self.power_plant.power_coefficient_curve[
'wind_speed'],
self.power_plant.power_coefficient_curve[
'value'],
self.power_plant.rotor_diameter, density_hub))
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.power_output_model) +
"`power_output_model` must be " +
"'power_curve' or 'power_coefficient_curve'.") | python | def calculate_power_output(self, wind_speed_hub, density_hub):
r"""
Calculates the power output of the wind power plant.
The method specified by the parameter `power_output_model` is used.
Parameters
----------
wind_speed_hub : pandas.Series or numpy.array
Wind speed at hub height in m/s.
density_hub : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series
Electrical power output of the wind turbine in W.
"""
if self.power_output_model == 'power_curve':
if self.power_plant.power_curve is None:
raise TypeError("Power curve values of " +
self.power_plant.name +
" are missing.")
logging.debug('Calculating power output using power curve.')
return (power_output.power_curve(
wind_speed_hub,
self.power_plant.power_curve['wind_speed'],
self.power_plant.power_curve['value'],
density_hub, self.density_correction))
elif self.power_output_model == 'power_coefficient_curve':
if self.power_plant.power_coefficient_curve is None:
raise TypeError("Power coefficient curve values of " +
self.power_plant.name +
" are missing.")
logging.debug('Calculating power output using power coefficient '
'curve.')
return (power_output.power_coefficient_curve(
wind_speed_hub,
self.power_plant.power_coefficient_curve[
'wind_speed'],
self.power_plant.power_coefficient_curve[
'value'],
self.power_plant.rotor_diameter, density_hub))
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.power_output_model) +
"`power_output_model` must be " +
"'power_curve' or 'power_coefficient_curve'.") | [
"def",
"calculate_power_output",
"(",
"self",
",",
"wind_speed_hub",
",",
"density_hub",
")",
":",
"if",
"self",
".",
"power_output_model",
"==",
"'power_curve'",
":",
"if",
"self",
".",
"power_plant",
".",
"power_curve",
"is",
"None",
":",
"raise",
"TypeError",... | r"""
Calculates the power output of the wind power plant.
The method specified by the parameter `power_output_model` is used.
Parameters
----------
wind_speed_hub : pandas.Series or numpy.array
Wind speed at hub height in m/s.
density_hub : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series
Electrical power output of the wind turbine in W. | [
"r",
"Calculates",
"the",
"power",
"output",
"of",
"the",
"wind",
"power",
"plant",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/modelchain.py#L319-L367 | train | 202,092 |
wind-python/windpowerlib | windpowerlib/tools.py | linear_interpolation_extrapolation | def linear_interpolation_extrapolation(df, target_height):
r"""
Linear inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of a parameter
(e.g wind speed) available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model`, `density_model` or `temperature_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example below on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the inter- and extrapolation the following equation is used:
.. math:: f(x) = \frac{(f(x_2) - f(x_1))}{(x_2 - x_1)} \cdot
(x - x_1) + f(x_1)
Examples
---------
>>> import numpy as np
>>> import pandas as pd
>>> wind_speed_10m = np.array([[3], [4]])
>>> wind_speed_80m = np.array([[6], [6]])
>>> weather_df = pd.DataFrame(np.hstack((wind_speed_10m,
... wind_speed_80m)),
... index=pd.date_range('1/1/2012',
... periods=2,
... freq='H'),
... columns=[np.array(['wind_speed',
... 'wind_speed']),
... np.array([10, 80])])
>>> value = linear_interpolation_extrapolation(
... weather_df['wind_speed'], 100)[0]
"""
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((df[heights_sorted[1]] - df[heights_sorted[0]]) /
(heights_sorted[1] - heights_sorted[0]) *
(target_height - heights_sorted[0]) + df[heights_sorted[0]]) | python | def linear_interpolation_extrapolation(df, target_height):
r"""
Linear inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of a parameter
(e.g wind speed) available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model`, `density_model` or `temperature_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example below on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the inter- and extrapolation the following equation is used:
.. math:: f(x) = \frac{(f(x_2) - f(x_1))}{(x_2 - x_1)} \cdot
(x - x_1) + f(x_1)
Examples
---------
>>> import numpy as np
>>> import pandas as pd
>>> wind_speed_10m = np.array([[3], [4]])
>>> wind_speed_80m = np.array([[6], [6]])
>>> weather_df = pd.DataFrame(np.hstack((wind_speed_10m,
... wind_speed_80m)),
... index=pd.date_range('1/1/2012',
... periods=2,
... freq='H'),
... columns=[np.array(['wind_speed',
... 'wind_speed']),
... np.array([10, 80])])
>>> value = linear_interpolation_extrapolation(
... weather_df['wind_speed'], 100)[0]
"""
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((df[heights_sorted[1]] - df[heights_sorted[0]]) /
(heights_sorted[1] - heights_sorted[0]) *
(target_height - heights_sorted[0]) + df[heights_sorted[0]]) | [
"def",
"linear_interpolation_extrapolation",
"(",
"df",
",",
"target_height",
")",
":",
"# find closest heights",
"heights_sorted",
"=",
"df",
".",
"columns",
"[",
"sorted",
"(",
"range",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
",",
"key",
"=",
"lam... | r"""
Linear inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of a parameter
(e.g wind speed) available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model`, `density_model` or `temperature_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example below on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the inter- and extrapolation the following equation is used:
.. math:: f(x) = \frac{(f(x_2) - f(x_1))}{(x_2 - x_1)} \cdot
(x - x_1) + f(x_1)
Examples
---------
>>> import numpy as np
>>> import pandas as pd
>>> wind_speed_10m = np.array([[3], [4]])
>>> wind_speed_80m = np.array([[6], [6]])
>>> weather_df = pd.DataFrame(np.hstack((wind_speed_10m,
... wind_speed_80m)),
... index=pd.date_range('1/1/2012',
... periods=2,
... freq='H'),
... columns=[np.array(['wind_speed',
... 'wind_speed']),
... np.array([10, 80])])
>>> value = linear_interpolation_extrapolation(
... weather_df['wind_speed'], 100)[0] | [
"r",
"Linear",
"inter",
"-",
"or",
"extrapolates",
"between",
"the",
"values",
"of",
"a",
"data",
"frame",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/tools.py#L14-L73 | train | 202,093 |
wind-python/windpowerlib | windpowerlib/tools.py | logarithmic_interpolation_extrapolation | def logarithmic_interpolation_extrapolation(df, target_height):
r"""
Logarithmic inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of the wind speed if
it is available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model` :class:`~.modelchain.ModelChain` class is
'log_interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example in
:py:func:`~.linear_interpolation_extrapolation` on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the logarithmic inter- and extrapolation the following equation is
used [1]_:
.. math:: f(x) = \frac{\ln(x) \cdot (f(x_2) - f(x_1)) - f(x_2) \cdot
\ln(x_1) + f(x_1) \cdot \ln(x_2)}{\ln(x_2) - \ln(x_1)}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 83
"""
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((np.log(target_height) *
(df[heights_sorted[1]] - df[heights_sorted[0]]) -
df[heights_sorted[1]] * np.log(heights_sorted[0]) +
df[heights_sorted[0]] * np.log(heights_sorted[1])) /
(np.log(heights_sorted[1]) - np.log(heights_sorted[0]))) | python | def logarithmic_interpolation_extrapolation(df, target_height):
r"""
Logarithmic inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of the wind speed if
it is available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model` :class:`~.modelchain.ModelChain` class is
'log_interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example in
:py:func:`~.linear_interpolation_extrapolation` on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the logarithmic inter- and extrapolation the following equation is
used [1]_:
.. math:: f(x) = \frac{\ln(x) \cdot (f(x_2) - f(x_1)) - f(x_2) \cdot
\ln(x_1) + f(x_1) \cdot \ln(x_2)}{\ln(x_2) - \ln(x_1)}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 83
"""
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((np.log(target_height) *
(df[heights_sorted[1]] - df[heights_sorted[0]]) -
df[heights_sorted[1]] * np.log(heights_sorted[0]) +
df[heights_sorted[0]] * np.log(heights_sorted[1])) /
(np.log(heights_sorted[1]) - np.log(heights_sorted[0]))) | [
"def",
"logarithmic_interpolation_extrapolation",
"(",
"df",
",",
"target_height",
")",
":",
"# find closest heights",
"heights_sorted",
"=",
"df",
".",
"columns",
"[",
"sorted",
"(",
"range",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
",",
"key",
"=",
... | r"""
Logarithmic inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of the wind speed if
it is available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model` :class:`~.modelchain.ModelChain` class is
'log_interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example in
:py:func:`~.linear_interpolation_extrapolation` on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the logarithmic inter- and extrapolation the following equation is
used [1]_:
.. math:: f(x) = \frac{\ln(x) \cdot (f(x_2) - f(x_1)) - f(x_2) \cdot
\ln(x_1) + f(x_1) \cdot \ln(x_2)}{\ln(x_2) - \ln(x_1)}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 83 | [
"r",
"Logarithmic",
"inter",
"-",
"or",
"extrapolates",
"between",
"the",
"values",
"of",
"a",
"data",
"frame",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/tools.py#L76-L128 | train | 202,094 |
wind-python/windpowerlib | windpowerlib/tools.py | gauss_distribution | def gauss_distribution(function_variable, standard_deviation, mean=0):
r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37
"""
return (1 / (standard_deviation * np.sqrt(2 * np.pi)) *
np.exp(-(function_variable - mean)**2 /
(2 * standard_deviation**2))) | python | def gauss_distribution(function_variable, standard_deviation, mean=0):
r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37
"""
return (1 / (standard_deviation * np.sqrt(2 * np.pi)) *
np.exp(-(function_variable - mean)**2 /
(2 * standard_deviation**2))) | [
"def",
"gauss_distribution",
"(",
"function_variable",
",",
"standard_deviation",
",",
"mean",
"=",
"0",
")",
":",
"return",
"(",
"1",
"/",
"(",
"standard_deviation",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"*",
"np",
".",
"... | r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37 | [
"r",
"Gauss",
"distribution",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/tools.py#L131-L171 | train | 202,095 |
wind-python/windpowerlib | windpowerlib/turbine_cluster_modelchain.py | TurbineClusterModelChain.assign_power_curve | def assign_power_curve(self, weather_df):
r"""
Calculates the power curve of the wind turbine cluster.
The power curve is aggregated from the wind farms' and wind turbines'
power curves by using :func:`power_plant.assign_power_curve`. Depending
on the parameters of the WindTurbineCluster power curves are smoothed
and/or wake losses are taken into account.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s, and
roughness length `roughness_length` in m, as well as optionally
temperature `temperature` in K, pressure `pressure` in Pa,
density `density` in kg/m³ and turbulence intensity
`turbulence_intensity` depending on `power_output_model`,
`density_model` and `standard_deviation_model` chosen.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`TurbineClusterModelChain.run_model` for an example on how
to create the weather_df DataFrame.
Returns
-------
self
"""
# Set turbulence intensity for assigning power curve
turbulence_intensity = (
weather_df['turbulence_intensity'].values.mean() if
'turbulence_intensity' in
weather_df.columns.get_level_values(0) else None)
# Assign power curve
if (self.wake_losses_model == 'power_efficiency_curve' or
self.wake_losses_model == 'constant_efficiency' or
self.wake_losses_model is None):
wake_losses_model_to_power_curve = self.wake_losses_model
if self.wake_losses_model is None:
logging.debug('Wake losses in wind farms not considered.')
else:
logging.debug('Wake losses considered with {}.'.format(
self.wake_losses_model))
else:
logging.debug('Wake losses considered by {} wind '.format(
self.wake_losses_model) + 'efficiency curve.')
wake_losses_model_to_power_curve = None
self.power_plant.assign_power_curve(
wake_losses_model=wake_losses_model_to_power_curve,
smoothing=self.smoothing, block_width=self.block_width,
standard_deviation_method=self.standard_deviation_method,
smoothing_order=self.smoothing_order,
roughness_length=weather_df['roughness_length'][0].mean(),
turbulence_intensity=turbulence_intensity)
# Further logging messages
if self.smoothing is None:
logging.debug('Aggregated power curve not smoothed.')
else:
logging.debug('Aggregated power curve smoothed by method: ' +
self.standard_deviation_method)
return self | python | def assign_power_curve(self, weather_df):
r"""
Calculates the power curve of the wind turbine cluster.
The power curve is aggregated from the wind farms' and wind turbines'
power curves by using :func:`power_plant.assign_power_curve`. Depending
on the parameters of the WindTurbineCluster power curves are smoothed
and/or wake losses are taken into account.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s, and
roughness length `roughness_length` in m, as well as optionally
temperature `temperature` in K, pressure `pressure` in Pa,
density `density` in kg/m³ and turbulence intensity
`turbulence_intensity` depending on `power_output_model`,
`density_model` and `standard_deviation_model` chosen.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`TurbineClusterModelChain.run_model` for an example on how
to create the weather_df DataFrame.
Returns
-------
self
"""
# Set turbulence intensity for assigning power curve
turbulence_intensity = (
weather_df['turbulence_intensity'].values.mean() if
'turbulence_intensity' in
weather_df.columns.get_level_values(0) else None)
# Assign power curve
if (self.wake_losses_model == 'power_efficiency_curve' or
self.wake_losses_model == 'constant_efficiency' or
self.wake_losses_model is None):
wake_losses_model_to_power_curve = self.wake_losses_model
if self.wake_losses_model is None:
logging.debug('Wake losses in wind farms not considered.')
else:
logging.debug('Wake losses considered with {}.'.format(
self.wake_losses_model))
else:
logging.debug('Wake losses considered by {} wind '.format(
self.wake_losses_model) + 'efficiency curve.')
wake_losses_model_to_power_curve = None
self.power_plant.assign_power_curve(
wake_losses_model=wake_losses_model_to_power_curve,
smoothing=self.smoothing, block_width=self.block_width,
standard_deviation_method=self.standard_deviation_method,
smoothing_order=self.smoothing_order,
roughness_length=weather_df['roughness_length'][0].mean(),
turbulence_intensity=turbulence_intensity)
# Further logging messages
if self.smoothing is None:
logging.debug('Aggregated power curve not smoothed.')
else:
logging.debug('Aggregated power curve smoothed by method: ' +
self.standard_deviation_method)
return self | [
"def",
"assign_power_curve",
"(",
"self",
",",
"weather_df",
")",
":",
"# Set turbulence intensity for assigning power curve",
"turbulence_intensity",
"=",
"(",
"weather_df",
"[",
"'turbulence_intensity'",
"]",
".",
"values",
".",
"mean",
"(",
")",
"if",
"'turbulence_in... | r"""
Calculates the power curve of the wind turbine cluster.
The power curve is aggregated from the wind farms' and wind turbines'
power curves by using :func:`power_plant.assign_power_curve`. Depending
on the parameters of the WindTurbineCluster power curves are smoothed
and/or wake losses are taken into account.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s, and
roughness length `roughness_length` in m, as well as optionally
temperature `temperature` in K, pressure `pressure` in Pa,
density `density` in kg/m³ and turbulence intensity
`turbulence_intensity` depending on `power_output_model`,
`density_model` and `standard_deviation_model` chosen.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`TurbineClusterModelChain.run_model` for an example on how
to create the weather_df DataFrame.
Returns
-------
self | [
"r",
"Calculates",
"the",
"power",
"curve",
"of",
"the",
"wind",
"turbine",
"cluster",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/turbine_cluster_modelchain.py#L155-L219 | train | 202,096 |
wind-python/windpowerlib | windpowerlib/power_output.py | power_coefficient_curve | def power_coefficient_curve(wind_speed, power_coefficient_curve_wind_speeds,
power_coefficient_curve_values, rotor_diameter,
density):
r"""
Calculates the turbine power output using a power coefficient curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'power_coefficient_curve'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_coefficient_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power coefficients are provided in
`power_coefficient_curve_values`.
power_coefficient_curve_values : pandas.Series or numpy.array
Power coefficients corresponding to wind speeds in
`power_coefficient_curve_wind_speeds`.
rotor_diameter : float
Rotor diameter in m.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used if the parameter `density_corr` is False
[1]_, [2]_:
.. math:: P=\frac{1}{8}\cdot\rho_{hub}\cdot d_{rotor}^{2}
\cdot\pi\cdot v_{wind}^{3}\cdot cp\left(v_{wind}\right)
with:
P: power [W], :math:`\rho`: density [kg/m³], d: diameter [m],
v: wind speed [m/s], cp: power coefficient
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power coefficient curve is
zero.
References
----------
.. [1] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, pages 35ff, 208
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 542
"""
power_coefficient_time_series = np.interp(
wind_speed, power_coefficient_curve_wind_speeds,
power_coefficient_curve_values, left=0, right=0)
power_output = (1 / 8 * density * rotor_diameter ** 2 * np.pi *
np.power(wind_speed, 3) *
power_coefficient_time_series)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
return power_output | python | def power_coefficient_curve(wind_speed, power_coefficient_curve_wind_speeds,
power_coefficient_curve_values, rotor_diameter,
density):
r"""
Calculates the turbine power output using a power coefficient curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'power_coefficient_curve'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_coefficient_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power coefficients are provided in
`power_coefficient_curve_values`.
power_coefficient_curve_values : pandas.Series or numpy.array
Power coefficients corresponding to wind speeds in
`power_coefficient_curve_wind_speeds`.
rotor_diameter : float
Rotor diameter in m.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used if the parameter `density_corr` is False
[1]_, [2]_:
.. math:: P=\frac{1}{8}\cdot\rho_{hub}\cdot d_{rotor}^{2}
\cdot\pi\cdot v_{wind}^{3}\cdot cp\left(v_{wind}\right)
with:
P: power [W], :math:`\rho`: density [kg/m³], d: diameter [m],
v: wind speed [m/s], cp: power coefficient
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power coefficient curve is
zero.
References
----------
.. [1] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, pages 35ff, 208
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 542
"""
power_coefficient_time_series = np.interp(
wind_speed, power_coefficient_curve_wind_speeds,
power_coefficient_curve_values, left=0, right=0)
power_output = (1 / 8 * density * rotor_diameter ** 2 * np.pi *
np.power(wind_speed, 3) *
power_coefficient_time_series)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
return power_output | [
"def",
"power_coefficient_curve",
"(",
"wind_speed",
",",
"power_coefficient_curve_wind_speeds",
",",
"power_coefficient_curve_values",
",",
"rotor_diameter",
",",
"density",
")",
":",
"power_coefficient_time_series",
"=",
"np",
".",
"interp",
"(",
"wind_speed",
",",
"pow... | r"""
Calculates the turbine power output using a power coefficient curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'power_coefficient_curve'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_coefficient_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power coefficients are provided in
`power_coefficient_curve_values`.
power_coefficient_curve_values : pandas.Series or numpy.array
Power coefficients corresponding to wind speeds in
`power_coefficient_curve_wind_speeds`.
rotor_diameter : float
Rotor diameter in m.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used if the parameter `density_corr` is False
[1]_, [2]_:
.. math:: P=\frac{1}{8}\cdot\rho_{hub}\cdot d_{rotor}^{2}
\cdot\pi\cdot v_{wind}^{3}\cdot cp\left(v_{wind}\right)
with:
P: power [W], :math:`\rho`: density [kg/m³], d: diameter [m],
v: wind speed [m/s], cp: power coefficient
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power coefficient curve is
zero.
References
----------
.. [1] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, pages 35ff, 208
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 542 | [
"r",
"Calculates",
"the",
"turbine",
"power",
"output",
"using",
"a",
"power",
"coefficient",
"curve",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/power_output.py#L14-L81 | train | 202,097 |
wind-python/windpowerlib | windpowerlib/power_output.py | power_curve | def power_curve(wind_speed, power_curve_wind_speeds, power_curve_values,
density=None, density_correction=False):
r"""
Calculates the turbine power output using a power curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'power_curve'. If
the parameter `density_correction` is True the density corrected power
curve (See :py:func:`~.power_curve_density_correction`) is used.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³. This parameter is needed
if `density_correction` is True. Default: None.
density_correction : boolean
If the parameter is True the density corrected power curve is used for
the calculation of the turbine power output. In this case `density`
cannot be None. Default: False.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-------
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
"""
if density_correction is False:
power_output = np.interp(wind_speed, power_curve_wind_speeds,
power_curve_values, left=0, right=0)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
elif density_correction is True:
power_output = power_curve_density_correction(
wind_speed, power_curve_wind_speeds, power_curve_values, density)
else:
raise TypeError("'{0}' is an invalid type. ".format(type(
density_correction)) + "`density_correction` must " +
"be Boolean (True or False).")
return power_output | python | def power_curve(wind_speed, power_curve_wind_speeds, power_curve_values,
density=None, density_correction=False):
r"""
Calculates the turbine power output using a power curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'power_curve'. If
the parameter `density_correction` is True the density corrected power
curve (See :py:func:`~.power_curve_density_correction`) is used.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³. This parameter is needed
if `density_correction` is True. Default: None.
density_correction : boolean
If the parameter is True the density corrected power curve is used for
the calculation of the turbine power output. In this case `density`
cannot be None. Default: False.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-------
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
"""
if density_correction is False:
power_output = np.interp(wind_speed, power_curve_wind_speeds,
power_curve_values, left=0, right=0)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
elif density_correction is True:
power_output = power_curve_density_correction(
wind_speed, power_curve_wind_speeds, power_curve_values, density)
else:
raise TypeError("'{0}' is an invalid type. ".format(type(
density_correction)) + "`density_correction` must " +
"be Boolean (True or False).")
return power_output | [
"def",
"power_curve",
"(",
"wind_speed",
",",
"power_curve_wind_speeds",
",",
"power_curve_values",
",",
"density",
"=",
"None",
",",
"density_correction",
"=",
"False",
")",
":",
"if",
"density_correction",
"is",
"False",
":",
"power_output",
"=",
"np",
".",
"i... | r"""
Calculates the turbine power output using a power curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'power_curve'. If
the parameter `density_correction` is True the density corrected power
curve (See :py:func:`~.power_curve_density_correction`) is used.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³. This parameter is needed
if `density_correction` is True. Default: None.
density_correction : boolean
If the parameter is True the density corrected power curve is used for
the calculation of the turbine power output. In this case `density`
cannot be None. Default: False.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-------
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero. | [
"r",
"Calculates",
"the",
"turbine",
"power",
"output",
"using",
"a",
"power",
"curve",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/power_output.py#L84-L140 | train | 202,098 |
wind-python/windpowerlib | windpowerlib/power_output.py | power_curve_density_correction | def power_curve_density_correction(wind_speed, power_curve_wind_speeds,
power_curve_values, density):
r"""
Calculates the turbine power output using a density corrected power curve.
This function is carried out when the parameter `density_correction` of an
instance of the :class:`~.modelchain.ModelChain` class is True.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used for the site specific power curve wind
speeds [1]_, [2]_, [3]_:
.. math:: v_{site}=v_{std}\cdot\left(\frac{\rho_0}
{\rho_{site}}\right)^{p(v)}
with:
.. math:: p=\begin{cases}
\frac{1}{3} & v_{std} \leq 7.5\text{ m/s}\\
\frac{1}{15}\cdot v_{std}-\frac{1}{6} & 7.5
\text{ m/s}<v_{std}<12.5\text{ m/s}\\
\frac{2}{3} & \geq 12.5 \text{ m/s}
\end{cases},
v: wind speed [m/s], :math:`\rho`: density [kg/m³]
:math:`v_{std}` is the standard wind speed in the power curve
(:math:`v_{std}`, :math:`P_{std}`),
:math:`v_{site}` is the density corrected wind speed for the power curve
(:math:`v_{site}`, :math:`P_{std}`),
:math:`\rho_0` is the ambient density (1.225 kg/m³)
and :math:`\rho_{site}` the density at site conditions (and hub height).
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
References
----------
.. [1] Svenningsen, L.: "Power Curve Air Density Correction And Other
Power Curve Options in WindPRO". 1st edition, Aalborg,
EMD International A/S , 2010, p. 4
.. [2] Svenningsen, L.: "Proposal of an Improved Power Curve Correction".
EMD International A/S , 2010
.. [3] Biank, M.: "Methodology, Implementation and Validation of a
Variable Scale Simulation Model for Windpower based on the
Georeferenced Installation Register of Germany". Master's Thesis
at Reiner Lemoine Institute, 2014, p. 13
"""
if density is None:
raise TypeError("`density` is None. For the calculation with a " +
"density corrected power curve density at hub " +
"height is needed.")
power_output = [(np.interp(
wind_speed[i], power_curve_wind_speeds * (1.225 / density[i]) ** (
np.interp(power_curve_wind_speeds, [7.5, 12.5], [1/3, 2/3])),
power_curve_values, left=0, right=0)) for i in range(len(wind_speed))]
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
return power_output | python | def power_curve_density_correction(wind_speed, power_curve_wind_speeds,
power_curve_values, density):
r"""
Calculates the turbine power output using a density corrected power curve.
This function is carried out when the parameter `density_correction` of an
instance of the :class:`~.modelchain.ModelChain` class is True.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used for the site specific power curve wind
speeds [1]_, [2]_, [3]_:
.. math:: v_{site}=v_{std}\cdot\left(\frac{\rho_0}
{\rho_{site}}\right)^{p(v)}
with:
.. math:: p=\begin{cases}
\frac{1}{3} & v_{std} \leq 7.5\text{ m/s}\\
\frac{1}{15}\cdot v_{std}-\frac{1}{6} & 7.5
\text{ m/s}<v_{std}<12.5\text{ m/s}\\
\frac{2}{3} & \geq 12.5 \text{ m/s}
\end{cases},
v: wind speed [m/s], :math:`\rho`: density [kg/m³]
:math:`v_{std}` is the standard wind speed in the power curve
(:math:`v_{std}`, :math:`P_{std}`),
:math:`v_{site}` is the density corrected wind speed for the power curve
(:math:`v_{site}`, :math:`P_{std}`),
:math:`\rho_0` is the ambient density (1.225 kg/m³)
and :math:`\rho_{site}` the density at site conditions (and hub height).
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
References
----------
.. [1] Svenningsen, L.: "Power Curve Air Density Correction And Other
Power Curve Options in WindPRO". 1st edition, Aalborg,
EMD International A/S , 2010, p. 4
.. [2] Svenningsen, L.: "Proposal of an Improved Power Curve Correction".
EMD International A/S , 2010
.. [3] Biank, M.: "Methodology, Implementation and Validation of a
Variable Scale Simulation Model for Windpower based on the
Georeferenced Installation Register of Germany". Master's Thesis
at Reiner Lemoine Institute, 2014, p. 13
"""
if density is None:
raise TypeError("`density` is None. For the calculation with a " +
"density corrected power curve density at hub " +
"height is needed.")
power_output = [(np.interp(
wind_speed[i], power_curve_wind_speeds * (1.225 / density[i]) ** (
np.interp(power_curve_wind_speeds, [7.5, 12.5], [1/3, 2/3])),
power_curve_values, left=0, right=0)) for i in range(len(wind_speed))]
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(data=power_output, index=wind_speed.index,
name='feedin_power_plant')
else:
power_output = np.array(power_output)
return power_output | [
"def",
"power_curve_density_correction",
"(",
"wind_speed",
",",
"power_curve_wind_speeds",
",",
"power_curve_values",
",",
"density",
")",
":",
"if",
"density",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"`density` is None. For the calculation with a \"",
"+",
"\"de... | r"""
Calculates the turbine power output using a density corrected power curve.
This function is carried out when the parameter `density_correction` of an
instance of the :class:`~.modelchain.ModelChain` class is True.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : pandas.Series or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : pandas.Series or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
pandas.Series or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used for the site specific power curve wind
speeds [1]_, [2]_, [3]_:
.. math:: v_{site}=v_{std}\cdot\left(\frac{\rho_0}
{\rho_{site}}\right)^{p(v)}
with:
.. math:: p=\begin{cases}
\frac{1}{3} & v_{std} \leq 7.5\text{ m/s}\\
\frac{1}{15}\cdot v_{std}-\frac{1}{6} & 7.5
\text{ m/s}<v_{std}<12.5\text{ m/s}\\
\frac{2}{3} & \geq 12.5 \text{ m/s}
\end{cases},
v: wind speed [m/s], :math:`\rho`: density [kg/m³]
:math:`v_{std}` is the standard wind speed in the power curve
(:math:`v_{std}`, :math:`P_{std}`),
:math:`v_{site}` is the density corrected wind speed for the power curve
(:math:`v_{site}`, :math:`P_{std}`),
:math:`\rho_0` is the ambient density (1.225 kg/m³)
and :math:`\rho_{site}` the density at site conditions (and hub height).
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
References
----------
.. [1] Svenningsen, L.: "Power Curve Air Density Correction And Other
Power Curve Options in WindPRO". 1st edition, Aalborg,
EMD International A/S , 2010, p. 4
.. [2] Svenningsen, L.: "Proposal of an Improved Power Curve Correction".
EMD International A/S , 2010
.. [3] Biank, M.: "Methodology, Implementation and Validation of a
Variable Scale Simulation Model for Windpower based on the
Georeferenced Installation Register of Germany". Master's Thesis
at Reiner Lemoine Institute, 2014, p. 13 | [
"r",
"Calculates",
"the",
"turbine",
"power",
"output",
"using",
"a",
"density",
"corrected",
"power",
"curve",
"."
] | 421b316139743311b7cb68a69f6b53d2665f7e23 | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/power_output.py#L143-L226 | train | 202,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.