body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
befcb4ff100850de765f74e9ece6e44a686a0b3e0599583f2900772fc2835c48 | @answers.setter
def answers(self, values: Union[(Iterable[Answer], Iterable[TrueFalseAnswer])]) -> None:
'Set answers given a sequence of them, overriding any\n previous data.\n '
self._answers = []
self._correct_answer = None
list(map(self.add_answer, values)) | Set answers given a sequence of them, overriding any
previous data. | exam2pdf/question.py | answers | agossino/exam2pdf | 0 | python | @answers.setter
def answers(self, values: Union[(Iterable[Answer], Iterable[TrueFalseAnswer])]) -> None:
'Set answers given a sequence of them, overriding any\n previous data.\n '
self._answers = []
self._correct_answer = None
list(map(self.add_answer, values)) | @answers.setter
def answers(self, values: Union[(Iterable[Answer], Iterable[TrueFalseAnswer])]) -> None:
'Set answers given a sequence of them, overriding any\n previous data.\n '
self._answers = []
self._correct_answer = None
list(map(self.add_answer, values))<|docstring|>Set answers given a sequence of them, overriding any
previous data.<|endoftext|> |
39fe85541ac6cdccbd9171ef67d30af4ef84c9262d9ad8b12ed4f66e0ef79996 | def add_answer(self, answer: Answer, is_correct: bool=False) -> None:
'Add an Answer. Correct answer is set.\n The first answer is the correct one: successive answers\n are set accordingly to is_correct argument.\n '
self._answers.append(answer)
if (is_correct or (self._correct_answer is None)):
self.correct_answer = answer | Add an Answer. Correct answer is set.
The first answer is the correct one: successive answers
are set accordingly to is_correct argument. | exam2pdf/question.py | add_answer | agossino/exam2pdf | 0 | python | def add_answer(self, answer: Answer, is_correct: bool=False) -> None:
'Add an Answer. Correct answer is set.\n The first answer is the correct one: successive answers\n are set accordingly to is_correct argument.\n '
self._answers.append(answer)
if (is_correct or (self._correct_answer is None)):
self.correct_answer = answer | def add_answer(self, answer: Answer, is_correct: bool=False) -> None:
'Add an Answer. Correct answer is set.\n The first answer is the correct one: successive answers\n are set accordingly to is_correct argument.\n '
self._answers.append(answer)
if (is_correct or (self._correct_answer is None)):
self.correct_answer = answer<|docstring|>Add an Answer. Correct answer is set.
The first answer is the correct one: successive answers
are set accordingly to is_correct argument.<|endoftext|> |
66e1170004af5d1522994402486d2938df922799b550cbf4408bf4c07f5c8aa4 | @correct_answer.setter
def correct_answer(self, value) -> None:
'Set the given answer as the correct one.\n '
if (value in self._answers):
self._correct_answer = value
else:
raise ValueError(f'correct_answer argument has never been added')
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = chr((ord(LETTER_A) + self.correct_index)) | Set the given answer as the correct one. | exam2pdf/question.py | correct_answer | agossino/exam2pdf | 0 | python | @correct_answer.setter
def correct_answer(self, value) -> None:
'\n '
if (value in self._answers):
self._correct_answer = value
else:
raise ValueError(f'correct_answer argument has never been added')
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = chr((ord(LETTER_A) + self.correct_index)) | @correct_answer.setter
def correct_answer(self, value) -> None:
'\n '
if (value in self._answers):
self._correct_answer = value
else:
raise ValueError(f'correct_answer argument has never been added')
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = chr((ord(LETTER_A) + self.correct_index))<|docstring|>Set the given answer as the correct one.<|endoftext|> |
a81663213faeb293597d6d67dd773d5b9d11bf58506ce94462bcbe96ac326db7 | @correct_index.setter
def correct_index(self, value: int) -> None:
'Set the correct answer given its index.\n '
try:
self.correct_answer = self._answers[value]
except IndexError as index_error:
raise ValueError(f'no answer with index {value}') from index_error | Set the correct answer given its index. | exam2pdf/question.py | correct_index | agossino/exam2pdf | 0 | python | @correct_index.setter
def correct_index(self, value: int) -> None:
'\n '
try:
self.correct_answer = self._answers[value]
except IndexError as index_error:
raise ValueError(f'no answer with index {value}') from index_error | @correct_index.setter
def correct_index(self, value: int) -> None:
'\n '
try:
self.correct_answer = self._answers[value]
except IndexError as index_error:
raise ValueError(f'no answer with index {value}') from index_error<|docstring|>Set the correct answer given its index.<|endoftext|> |
7c2e37442228f218a9ff8b93eb0ac84998d61921b33865773a875b4dfaa1b5a6 | @correct_option.setter
def correct_option(self, value: str) -> None:
'Set the correct answer according to the given letter,\n where the first answer added is labeled A'
try:
pointer = (ord(value) - ord(LETTER_A))
self.correct_answer = self._answers[pointer]
except IndexError as index_error:
raise ValueError(f'no answer with letter {value}') from index_error | Set the correct answer according to the given letter,
where the first answer added is labeled A | exam2pdf/question.py | correct_option | agossino/exam2pdf | 0 | python | @correct_option.setter
def correct_option(self, value: str) -> None:
'Set the correct answer according to the given letter,\n where the first answer added is labeled A'
try:
pointer = (ord(value) - ord(LETTER_A))
self.correct_answer = self._answers[pointer]
except IndexError as index_error:
raise ValueError(f'no answer with letter {value}') from index_error | @correct_option.setter
def correct_option(self, value: str) -> None:
'Set the correct answer according to the given letter,\n where the first answer added is labeled A'
try:
pointer = (ord(value) - ord(LETTER_A))
self.correct_answer = self._answers[pointer]
except IndexError as index_error:
raise ValueError(f'no answer with letter {value}') from index_error<|docstring|>Set the correct answer according to the given letter,
where the first answer added is labeled A<|endoftext|> |
19a6d30c50bdcc7d361b010074db53cc2c3abaf08040014d0853a0c201a06829 | def shuffle(self) -> None:
'Shuffle the answers.\n '
if self._correct_answer:
shuffle(self._answers)
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = chr((ord(LETTER_A) + pointer)) | Shuffle the answers. | exam2pdf/question.py | shuffle | agossino/exam2pdf | 0 | python | def shuffle(self) -> None:
'\n '
if self._correct_answer:
shuffle(self._answers)
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = chr((ord(LETTER_A) + pointer)) | def shuffle(self) -> None:
'\n '
if self._correct_answer:
shuffle(self._answers)
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = chr((ord(LETTER_A) + pointer))<|docstring|>Shuffle the answers.<|endoftext|> |
ac213536bb694034bc1e6912e9ad03b02b8baf4dc8bddc7f23ebb0a62a1f8170 | def add_parent_path(self, file_path: Path) -> None:
'Add the given path to all images. If the given path is not a\n directory, it is supposed to be a file.\n '
parent: Path = (file_path if file_path.is_dir() else file_path.parent)
if (self.image != Path()):
self.image = (parent / self.image)
for answer in self.answers:
if (answer.image != Path()):
answer.image = (parent / answer.image) | Add the given path to all images. If the given path is not a
directory, it is supposed to be a file. | exam2pdf/question.py | add_parent_path | agossino/exam2pdf | 0 | python | def add_parent_path(self, file_path: Path) -> None:
'Add the given path to all images. If the given path is not a\n directory, it is supposed to be a file.\n '
parent: Path = (file_path if file_path.is_dir() else file_path.parent)
if (self.image != Path()):
self.image = (parent / self.image)
for answer in self.answers:
if (answer.image != Path()):
answer.image = (parent / answer.image) | def add_parent_path(self, file_path: Path) -> None:
'Add the given path to all images. If the given path is not a\n directory, it is supposed to be a file.\n '
parent: Path = (file_path if file_path.is_dir() else file_path.parent)
if (self.image != Path()):
self.image = (parent / self.image)
for answer in self.answers:
if (answer.image != Path()):
answer.image = (parent / answer.image)<|docstring|>Add the given path to all images. If the given path is not a
directory, it is supposed to be a file.<|endoftext|> |
f0b4a6fd7f3558a525825f5d7e6a4b0eb03bc1fb0a30c0089b63876254f87327 | def load_sequentially(self, iterator: Iterator[Any]) -> None:
'Load all the attribute sequentially from iterator, according to\n attr_load_sequence and type_caster_sequence. Empty answers are skipped.\n Empty means without text and image. Returns when\n iterator is exhausted and StopIteration is caught.\n '
attribute_iterator: Iterator[str] = iter(self.attr_load_sequence)
caster_iterator: Iterator[CasterType] = iter(self._type_caster_sequence)
attribute: Optional[str] = next(attribute_iterator, None)
caster: Optional[CasterType] = next(caster_iterator, None)
try:
while ((attribute is not None) and (caster is not None)):
try:
setattr(self, attribute, caster(next(iterator)))
except TypeError:
raise Exam2pdfException('Invalid type in cvs file')
attribute = next(attribute_iterator, None)
caster = next(caster_iterator, None)
self._load_answers(iterator)
except StopIteration:
pass | Load all the attribute sequentially from iterator, according to
attr_load_sequence and type_caster_sequence. Empty answers are skipped.
Empty means without text and image. Returns when
iterator is exhausted and StopIteration is caught. | exam2pdf/question.py | load_sequentially | agossino/exam2pdf | 0 | python | def load_sequentially(self, iterator: Iterator[Any]) -> None:
'Load all the attribute sequentially from iterator, according to\n attr_load_sequence and type_caster_sequence. Empty answers are skipped.\n Empty means without text and image. Returns when\n iterator is exhausted and StopIteration is caught.\n '
attribute_iterator: Iterator[str] = iter(self.attr_load_sequence)
caster_iterator: Iterator[CasterType] = iter(self._type_caster_sequence)
attribute: Optional[str] = next(attribute_iterator, None)
caster: Optional[CasterType] = next(caster_iterator, None)
try:
while ((attribute is not None) and (caster is not None)):
try:
setattr(self, attribute, caster(next(iterator)))
except TypeError:
raise Exam2pdfException('Invalid type in cvs file')
attribute = next(attribute_iterator, None)
caster = next(caster_iterator, None)
self._load_answers(iterator)
except StopIteration:
pass | def load_sequentially(self, iterator: Iterator[Any]) -> None:
'Load all the attribute sequentially from iterator, according to\n attr_load_sequence and type_caster_sequence. Empty answers are skipped.\n Empty means without text and image. Returns when\n iterator is exhausted and StopIteration is caught.\n '
attribute_iterator: Iterator[str] = iter(self.attr_load_sequence)
caster_iterator: Iterator[CasterType] = iter(self._type_caster_sequence)
attribute: Optional[str] = next(attribute_iterator, None)
caster: Optional[CasterType] = next(caster_iterator, None)
try:
while ((attribute is not None) and (caster is not None)):
try:
setattr(self, attribute, caster(next(iterator)))
except TypeError:
raise Exam2pdfException('Invalid type in cvs file')
attribute = next(attribute_iterator, None)
caster = next(caster_iterator, None)
self._load_answers(iterator)
except StopIteration:
pass<|docstring|>Load all the attribute sequentially from iterator, according to
attr_load_sequence and type_caster_sequence. Empty answers are skipped.
Empty means without text and image. Returns when
iterator is exhausted and StopIteration is caught.<|endoftext|> |
1e0913bd741814ebaa5fbacafd2bb29b0ac503f9b65a9f89b479559b6d9b10d9 | def _load_answers(self, iterator: Iterator[Any]) -> None:
'Load answers. An answer is filled even if there are not enough elements\n in the iterator. Empty answers are not loaded.\n Returns when iterator is exhausted and StopIteration is caught.\n '
wrote_attr = 1
while wrote_attr:
answer = self._answer_type()
wrote_attr = self._load_1_answer(answer, iterator) | Load answers. An answer is filled even if there are not enough elements
in the iterator. Empty answers are not loaded.
Returns when iterator is exhausted and StopIteration is caught. | exam2pdf/question.py | _load_answers | agossino/exam2pdf | 0 | python | def _load_answers(self, iterator: Iterator[Any]) -> None:
'Load answers. An answer is filled even if there are not enough elements\n in the iterator. Empty answers are not loaded.\n Returns when iterator is exhausted and StopIteration is caught.\n '
wrote_attr = 1
while wrote_attr:
answer = self._answer_type()
wrote_attr = self._load_1_answer(answer, iterator) | def _load_answers(self, iterator: Iterator[Any]) -> None:
'Load answers. An answer is filled even if there are not enough elements\n in the iterator. Empty answers are not loaded.\n Returns when iterator is exhausted and StopIteration is caught.\n '
wrote_attr = 1
while wrote_attr:
answer = self._answer_type()
wrote_attr = self._load_1_answer(answer, iterator)<|docstring|>Load answers. An answer is filled even if there are not enough elements
in the iterator. Empty answers are not loaded.
Returns when iterator is exhausted and StopIteration is caught.<|endoftext|> |
4089c014b0414ec0afb5a00f3b7b430daaa50a5074ff6f2fb0425a833f06291e | @correct_answer.setter
def correct_answer(self, value) -> None:
'Set the given answer as the correct one.\n '
if (value in self._answers):
self._correct_answer = value
else:
raise ValueError(f'correct_answer argument has never been added')
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = self.correct_answer.text | Set the given answer as the correct one. | exam2pdf/question.py | correct_answer | agossino/exam2pdf | 0 | python | @correct_answer.setter
def correct_answer(self, value) -> None:
'\n '
if (value in self._answers):
self._correct_answer = value
else:
raise ValueError(f'correct_answer argument has never been added')
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = self.correct_answer.text | @correct_answer.setter
def correct_answer(self, value) -> None:
'\n '
if (value in self._answers):
self._correct_answer = value
else:
raise ValueError(f'correct_answer argument has never been added')
pointer = self._answers.index(self._correct_answer)
self._correct_index = pointer
self._correct_option = self.correct_answer.text<|docstring|>Set the given answer as the correct one.<|endoftext|> |
7fa3bcfbc65e8b9a78a34c07028d61209d6c6bdf029b6e1c30c54a8d78f3e25b | @correct_option.setter
def correct_option(self, value: bool) -> None:
'Set the correct answer according to the boolean\n '
for answer in self.answers:
if (answer.boolean == value):
self.correct_answer = answer | Set the correct answer according to the boolean | exam2pdf/question.py | correct_option | agossino/exam2pdf | 0 | python | @correct_option.setter
def correct_option(self, value: bool) -> None:
'\n '
for answer in self.answers:
if (answer.boolean == value):
self.correct_answer = answer | @correct_option.setter
def correct_option(self, value: bool) -> None:
'\n '
for answer in self.answers:
if (answer.boolean == value):
self.correct_answer = answer<|docstring|>Set the correct answer according to the boolean<|endoftext|> |
833fffd6bbac8085923ec7c532056cad21c9b96aaa9ee1ebff04003ffd0cb9d4 | def add_answer(self, answer: TrueFalseAnswer, is_correct: bool=False) -> None:
'Add an Answer. Correct answer is set.\n The first answer is the correct one: successive answers\n are set accordingly to is_correct argument.\n '
if (len(self._answers) == 0):
self._answers.append(answer)
self.correct_answer = answer
elif (len(self._answers) == 1):
if (answer.boolean == self._correct_answer.boolean):
raise ValueError('Only two alternative answers are allowed')
self._answers.append(answer)
if is_correct:
self.correct_answer = answer
else:
raise ValueError('Only two alternative answers are allowed') | Add an Answer. Correct answer is set.
The first answer is the correct one: successive answers
are set accordingly to is_correct argument. | exam2pdf/question.py | add_answer | agossino/exam2pdf | 0 | python | def add_answer(self, answer: TrueFalseAnswer, is_correct: bool=False) -> None:
'Add an Answer. Correct answer is set.\n The first answer is the correct one: successive answers\n are set accordingly to is_correct argument.\n '
if (len(self._answers) == 0):
self._answers.append(answer)
self.correct_answer = answer
elif (len(self._answers) == 1):
if (answer.boolean == self._correct_answer.boolean):
raise ValueError('Only two alternative answers are allowed')
self._answers.append(answer)
if is_correct:
self.correct_answer = answer
else:
raise ValueError('Only two alternative answers are allowed') | def add_answer(self, answer: TrueFalseAnswer, is_correct: bool=False) -> None:
'Add an Answer. Correct answer is set.\n The first answer is the correct one: successive answers\n are set accordingly to is_correct argument.\n '
if (len(self._answers) == 0):
self._answers.append(answer)
self.correct_answer = answer
elif (len(self._answers) == 1):
if (answer.boolean == self._correct_answer.boolean):
raise ValueError('Only two alternative answers are allowed')
self._answers.append(answer)
if is_correct:
self.correct_answer = answer
else:
raise ValueError('Only two alternative answers are allowed')<|docstring|>Add an Answer. Correct answer is set.
The first answer is the correct one: successive answers
are set accordingly to is_correct argument.<|endoftext|> |
81a90e7ce09444bf95c1602752eb3cedd96c260871e3bfccdae8a7bc00e1dd6a | def _source() -> Source:
'\n checks HTTP header source is present and points to an available resource\n\n :return: parsed HTTP headers, transformed into a Source object\n '
source_header = request.headers.get('source')
if (not source_header):
abort(Response("HTTP header 'source' not found", 400))
if re.match('^http[s]?://', source_header):
abort(Response('HTTP source not supported', 400))
if (not source_header.startswith('s3://')):
if (not os.path.exists(source_header)):
abort(Response("couldn't find source [{}]".format(source_header), 400))
sim_year = request.headers.get('sim_year')
if (not sim_year):
sim_year = int(os.getenv('SIM_YEAR', datetime.now().year))
logging.info('Using pinned simulation year: {}'.format(sim_year))
return Source.of(source_header, int(sim_year)) | checks HTTP header source is present and points to an available resource
:return: parsed HTTP headers, transformed into a Source object | grafener/backend.py | _source | airboxlab/grafener | 6 | python | def _source() -> Source:
'\n checks HTTP header source is present and points to an available resource\n\n :return: parsed HTTP headers, transformed into a Source object\n '
source_header = request.headers.get('source')
if (not source_header):
abort(Response("HTTP header 'source' not found", 400))
if re.match('^http[s]?://', source_header):
abort(Response('HTTP source not supported', 400))
if (not source_header.startswith('s3://')):
if (not os.path.exists(source_header)):
abort(Response("couldn't find source [{}]".format(source_header), 400))
sim_year = request.headers.get('sim_year')
if (not sim_year):
sim_year = int(os.getenv('SIM_YEAR', datetime.now().year))
logging.info('Using pinned simulation year: {}'.format(sim_year))
return Source.of(source_header, int(sim_year)) | def _source() -> Source:
'\n checks HTTP header source is present and points to an available resource\n\n :return: parsed HTTP headers, transformed into a Source object\n '
source_header = request.headers.get('source')
if (not source_header):
abort(Response("HTTP header 'source' not found", 400))
if re.match('^http[s]?://', source_header):
abort(Response('HTTP source not supported', 400))
if (not source_header.startswith('s3://')):
if (not os.path.exists(source_header)):
abort(Response("couldn't find source [{}]".format(source_header), 400))
sim_year = request.headers.get('sim_year')
if (not sim_year):
sim_year = int(os.getenv('SIM_YEAR', datetime.now().year))
logging.info('Using pinned simulation year: {}'.format(sim_year))
return Source.of(source_header, int(sim_year))<|docstring|>checks HTTP header source is present and points to an available resource
:return: parsed HTTP headers, transformed into a Source object<|endoftext|> |
16c46ccc6a93385e42d3c9c97e770cf7930c1a0d40d06db9252902f3c8d77782 | def __init__(self, title: str, enable_exit=True):
'\n :param title: Title of the image window\n :param enable_exit: Allows exit by pressing escape\n '
super().__init__([])
self.title = title
self.enable_exit = enable_exit | :param title: Title of the image window
:param enable_exit: Allows exit by pressing escape | ialab/pipeline/output.py | __init__ | hselvaggi/ialab-core | 0 | python | def __init__(self, title: str, enable_exit=True):
'\n :param title: Title of the image window\n :param enable_exit: Allows exit by pressing escape\n '
super().__init__([])
self.title = title
self.enable_exit = enable_exit | def __init__(self, title: str, enable_exit=True):
'\n :param title: Title of the image window\n :param enable_exit: Allows exit by pressing escape\n '
super().__init__([])
self.title = title
self.enable_exit = enable_exit<|docstring|>:param title: Title of the image window
:param enable_exit: Allows exit by pressing escape<|endoftext|> |
6d366a3c221b6c1280f2b67e10098388adbce4236264b961a617a6f2f61c6919 | def __init__(self, destination: str):
'\n :param destination: Folder where to write the image\n '
super().__init__([])
self.destination = destination | :param destination: Folder where to write the image | ialab/pipeline/output.py | __init__ | hselvaggi/ialab-core | 0 | python | def __init__(self, destination: str):
'\n \n '
super().__init__([])
self.destination = destination | def __init__(self, destination: str):
'\n \n '
super().__init__([])
self.destination = destination<|docstring|>:param destination: Folder where to write the image<|endoftext|> |
9490395b662763fc3030ca35863257fd26c99938bebcb001eea8b892d2f45611 | def cree_fenetre(largeur, hauteur):
'\n Crée une fenêtre de dimensions ``largeur`` x ``hauteur`` pixels.\n '
global __canevas
if (__canevas is not None):
raise FenetreDejaCree('La fenêtre a déjà été crée avec la fonction "cree_fenetre".')
__canevas = CustomCanvas(largeur, hauteur) | Crée une fenêtre de dimensions ``largeur`` x ``hauteur`` pixels. | upemtk.py | cree_fenetre | Berachem/Azul-Game | 3 | python | def cree_fenetre(largeur, hauteur):
'\n \n '
global __canevas
if (__canevas is not None):
raise FenetreDejaCree('La fenêtre a déjà été crée avec la fonction "cree_fenetre".')
__canevas = CustomCanvas(largeur, hauteur) | def cree_fenetre(largeur, hauteur):
'\n \n '
global __canevas
if (__canevas is not None):
raise FenetreDejaCree('La fenêtre a déjà été crée avec la fonction "cree_fenetre".')
__canevas = CustomCanvas(largeur, hauteur)<|docstring|>Crée une fenêtre de dimensions ``largeur`` x ``hauteur`` pixels.<|endoftext|> |
fa0425dc6cfa23820e112608f89dfca9a8cb35d795b531ae53b675066cf9f95c | def ferme_fenetre():
'\n Détruit la fenêtre.\n '
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
__canevas.root.destroy()
__canevas = None | Détruit la fenêtre. | upemtk.py | ferme_fenetre | Berachem/Azul-Game | 3 | python | def ferme_fenetre():
'\n \n '
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
__canevas.root.destroy()
__canevas = None | def ferme_fenetre():
'\n \n '
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
__canevas.root.destroy()
__canevas = None<|docstring|>Détruit la fenêtre.<|endoftext|> |
ce33b4325b4e68a5d66131fa2852680503de55d8bf7fb01be48886a26277ea32 | def mise_a_jour():
"\n Met à jour la fenêtre. Les dessins ne sont affichés qu'après \n l'appel à cette fonction.\n "
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
__canevas.update() | Met à jour la fenêtre. Les dessins ne sont affichés qu'après
l'appel à cette fonction. | upemtk.py | mise_a_jour | Berachem/Azul-Game | 3 | python | def mise_a_jour():
"\n Met à jour la fenêtre. Les dessins ne sont affichés qu'après \n l'appel à cette fonction.\n "
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
__canevas.update() | def mise_a_jour():
"\n Met à jour la fenêtre. Les dessins ne sont affichés qu'après \n l'appel à cette fonction.\n "
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
__canevas.update()<|docstring|>Met à jour la fenêtre. Les dessins ne sont affichés qu'après
l'appel à cette fonction.<|endoftext|> |
64c995e24d0accb9301337b871f8e87d6dd0963bd80bb7d1422651fa9ff3239d | def ligne(ax, ay, bx, by, couleur='black', epaisseur=1, tag=''):
"\n Trace un segment reliant le point ``(ax, ay)`` au point ``(bx, by)``.\n\n :param float ax: abscisse du premier point\n :param float ay: ordonnée du premier point\n :param float bx: abscisse du second point\n :param float by: ordonnée du second point\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_line(ax, ay, bx, by, fill=couleur, width=epaisseur, tag=tag) | Trace un segment reliant le point ``(ax, ay)`` au point ``(bx, by)``.
:param float ax: abscisse du premier point
:param float ay: ordonnée du premier point
:param float bx: abscisse du second point
:param float by: ordonnée du second point
:param str couleur: couleur de trait (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | ligne | Berachem/Azul-Game | 3 | python | def ligne(ax, ay, bx, by, couleur='black', epaisseur=1, tag=):
"\n Trace un segment reliant le point ``(ax, ay)`` au point ``(bx, by)``.\n\n :param float ax: abscisse du premier point\n :param float ay: ordonnée du premier point\n :param float bx: abscisse du second point\n :param float by: ordonnée du second point\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_line(ax, ay, bx, by, fill=couleur, width=epaisseur, tag=tag) | def ligne(ax, ay, bx, by, couleur='black', epaisseur=1, tag=):
"\n Trace un segment reliant le point ``(ax, ay)`` au point ``(bx, by)``.\n\n :param float ax: abscisse du premier point\n :param float ay: ordonnée du premier point\n :param float bx: abscisse du second point\n :param float by: ordonnée du second point\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_line(ax, ay, bx, by, fill=couleur, width=epaisseur, tag=tag)<|docstring|>Trace un segment reliant le point ``(ax, ay)`` au point ``(bx, by)``.
:param float ax: abscisse du premier point
:param float ay: ordonnée du premier point
:param float bx: abscisse du second point
:param float by: ordonnée du second point
:param str couleur: couleur de trait (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
9cdb541f65c7aadcebe42b6cfd913ecd7bd667a1c255bc380ef31745beab28e0 | def fleche(ax, ay, bx, by, couleur='black', epaisseur=1, tag=''):
"\n Trace une flèche du point ``(ax, ay)`` au point ``(bx, by)``.\n\n :param float ax: abscisse du premier point\n :param float ay: ordonnée du premier point\n :param float bx: abscisse du second point\n :param float by: ordonnée du second point\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
(x, y) = ((bx - ax), (by - ay))
n = (((x ** 2) + (y ** 2)) ** 0.5)
(x, y) = ((x / n), (y / n))
points = [bx, by, ((bx - (x * 5)) - (2 * y)), ((by - (5 * y)) + (2 * x)), ((bx - (x * 5)) + (2 * y)), ((by - (5 * y)) - (2 * x))]
return __canevas.canvas.create_polygon(points, fill=couleur, outline=couleur, width=epaisseur, tag=tag) | Trace une flèche du point ``(ax, ay)`` au point ``(bx, by)``.
:param float ax: abscisse du premier point
:param float ay: ordonnée du premier point
:param float bx: abscisse du second point
:param float by: ordonnée du second point
:param str couleur: couleur de trait (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | fleche | Berachem/Azul-Game | 3 | python | def fleche(ax, ay, bx, by, couleur='black', epaisseur=1, tag=):
"\n Trace une flèche du point ``(ax, ay)`` au point ``(bx, by)``.\n\n :param float ax: abscisse du premier point\n :param float ay: ordonnée du premier point\n :param float bx: abscisse du second point\n :param float by: ordonnée du second point\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
(x, y) = ((bx - ax), (by - ay))
n = (((x ** 2) + (y ** 2)) ** 0.5)
(x, y) = ((x / n), (y / n))
points = [bx, by, ((bx - (x * 5)) - (2 * y)), ((by - (5 * y)) + (2 * x)), ((bx - (x * 5)) + (2 * y)), ((by - (5 * y)) - (2 * x))]
return __canevas.canvas.create_polygon(points, fill=couleur, outline=couleur, width=epaisseur, tag=tag) | def fleche(ax, ay, bx, by, couleur='black', epaisseur=1, tag=):
"\n Trace une flèche du point ``(ax, ay)`` au point ``(bx, by)``.\n\n :param float ax: abscisse du premier point\n :param float ay: ordonnée du premier point\n :param float bx: abscisse du second point\n :param float by: ordonnée du second point\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
(x, y) = ((bx - ax), (by - ay))
n = (((x ** 2) + (y ** 2)) ** 0.5)
(x, y) = ((x / n), (y / n))
points = [bx, by, ((bx - (x * 5)) - (2 * y)), ((by - (5 * y)) + (2 * x)), ((bx - (x * 5)) + (2 * y)), ((by - (5 * y)) - (2 * x))]
return __canevas.canvas.create_polygon(points, fill=couleur, outline=couleur, width=epaisseur, tag=tag)<|docstring|>Trace une flèche du point ``(ax, ay)`` au point ``(bx, by)``.
:param float ax: abscisse du premier point
:param float ay: ordonnée du premier point
:param float bx: abscisse du second point
:param float by: ordonnée du second point
:param str couleur: couleur de trait (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
18c7355f65c85886202a1ad68a80876c60d7de48444f2f399c0832195cd3af62 | def polygone(points, couleur='black', remplissage='', epaisseur=1, tag=''):
"\n Trace un polygone dont la liste de points est fournie.\n\n :param list points: liste de couples (abscisse, ordonnee) de points\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_polygon(points, fill=remplissage, outline=couleur, width=epaisseur, tag=tag) | Trace un polygone dont la liste de points est fournie.
:param list points: liste de couples (abscisse, ordonnee) de points
:param str couleur: couleur de trait (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | polygone | Berachem/Azul-Game | 3 | python | def polygone(points, couleur='black', remplissage=, epaisseur=1, tag=):
"\n Trace un polygone dont la liste de points est fournie.\n\n :param list points: liste de couples (abscisse, ordonnee) de points\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_polygon(points, fill=remplissage, outline=couleur, width=epaisseur, tag=tag) | def polygone(points, couleur='black', remplissage=, epaisseur=1, tag=):
"\n Trace un polygone dont la liste de points est fournie.\n\n :param list points: liste de couples (abscisse, ordonnee) de points\n :param str couleur: couleur de trait (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_polygon(points, fill=remplissage, outline=couleur, width=epaisseur, tag=tag)<|docstring|>Trace un polygone dont la liste de points est fournie.
:param list points: liste de couples (abscisse, ordonnee) de points
:param str couleur: couleur de trait (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
d15cb4078265b933c49ca16d61708b5f06148d60d09527c09b07223ee80cd0fb | def rectangle(ax, ay, bx, by, couleur='black', remplissage='', epaisseur=1, tag=''):
"\n Trace un rectangle noir ayant les point ``(ax, ay)`` et ``(bx, by)``\n comme coins opposés.\n\n :param float ax: abscisse du premier coin\n :param float ay: ordonnée du premier coin\n :param float bx: abscisse du second coin\n :param float by: ordonnée du second coin\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_rectangle(ax, ay, bx, by, outline=couleur, fill=remplissage, width=epaisseur, tag=tag) | Trace un rectangle noir ayant les point ``(ax, ay)`` et ``(bx, by)``
comme coins opposés.
:param float ax: abscisse du premier coin
:param float ay: ordonnée du premier coin
:param float bx: abscisse du second coin
:param float by: ordonnée du second coin
:param str couleur: couleur de trait (défaut 'black')
:param str remplissage: couleur de fond (défaut transparent)
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | rectangle | Berachem/Azul-Game | 3 | python | def rectangle(ax, ay, bx, by, couleur='black', remplissage=, epaisseur=1, tag=):
"\n Trace un rectangle noir ayant les point ``(ax, ay)`` et ``(bx, by)``\n comme coins opposés.\n\n :param float ax: abscisse du premier coin\n :param float ay: ordonnée du premier coin\n :param float bx: abscisse du second coin\n :param float by: ordonnée du second coin\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_rectangle(ax, ay, bx, by, outline=couleur, fill=remplissage, width=epaisseur, tag=tag) | def rectangle(ax, ay, bx, by, couleur='black', remplissage=, epaisseur=1, tag=):
"\n Trace un rectangle noir ayant les point ``(ax, ay)`` et ``(bx, by)``\n comme coins opposés.\n\n :param float ax: abscisse du premier coin\n :param float ay: ordonnée du premier coin\n :param float bx: abscisse du second coin\n :param float by: ordonnée du second coin\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_rectangle(ax, ay, bx, by, outline=couleur, fill=remplissage, width=epaisseur, tag=tag)<|docstring|>Trace un rectangle noir ayant les point ``(ax, ay)`` et ``(bx, by)``
comme coins opposés.
:param float ax: abscisse du premier coin
:param float ay: ordonnée du premier coin
:param float bx: abscisse du second coin
:param float by: ordonnée du second coin
:param str couleur: couleur de trait (défaut 'black')
:param str remplissage: couleur de fond (défaut transparent)
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
4b2202d1ddff54f4084f1e07c5715e8aaf785e2cf61574fa4d116b32664d63dc | def cercle(x, y, r, couleur='black', remplissage='', epaisseur=1, tag=''):
" \n Trace un cercle de centre ``(x, y)`` et de rayon ``r`` en noir.\n\n :param float x: abscisse du centre\n :param float y: ordonnée du centre\n :param float r: rayon\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_oval((x - r), (y - r), (x + r), (y + r), outline=couleur, fill=remplissage, width=epaisseur, tag=tag) | Trace un cercle de centre ``(x, y)`` et de rayon ``r`` en noir.
:param float x: abscisse du centre
:param float y: ordonnée du centre
:param float r: rayon
:param str couleur: couleur de trait (défaut 'black')
:param str remplissage: couleur de fond (défaut transparent)
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | cercle | Berachem/Azul-Game | 3 | python | def cercle(x, y, r, couleur='black', remplissage=, epaisseur=1, tag=):
" \n Trace un cercle de centre ``(x, y)`` et de rayon ``r`` en noir.\n\n :param float x: abscisse du centre\n :param float y: ordonnée du centre\n :param float r: rayon\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_oval((x - r), (y - r), (x + r), (y + r), outline=couleur, fill=remplissage, width=epaisseur, tag=tag) | def cercle(x, y, r, couleur='black', remplissage=, epaisseur=1, tag=):
" \n Trace un cercle de centre ``(x, y)`` et de rayon ``r`` en noir.\n\n :param float x: abscisse du centre\n :param float y: ordonnée du centre\n :param float r: rayon\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_oval((x - r), (y - r), (x + r), (y + r), outline=couleur, fill=remplissage, width=epaisseur, tag=tag)<|docstring|>Trace un cercle de centre ``(x, y)`` et de rayon ``r`` en noir.
:param float x: abscisse du centre
:param float y: ordonnée du centre
:param float r: rayon
:param str couleur: couleur de trait (défaut 'black')
:param str remplissage: couleur de fond (défaut transparent)
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
7e4deed747e0831bd682774cfc1547b0998e00acf541e9eb7d6aa9e6daa61281 | def arc(x, y, r, ouverture=90, depart=0, couleur='black', remplissage='', epaisseur=1, tag=''):
"\n Trace un arc de cercle de centre ``(x, y)``, de rayon ``r`` et\n d'angle d'ouverture ``ouverture`` (défaut : 90 degrés, dans le sens\n contraire des aiguilles d'une montre) depuis l'angle initial ``depart``\n (défaut : direction 'est').\n\n :param float x: abscisse du centre\n :param float y: ordonnée du centre\n :param float r: rayon\n :param float ouverture: abscisse du centre\n :param float depart: ordonnée du centre\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_arc((x - r), (y - r), (x + r), (y + r), extent=ouverture, start=init, style=ARC, outline=couleur, fill=remplissage, width=epaisseur, tag=tag) | Trace un arc de cercle de centre ``(x, y)``, de rayon ``r`` et
d'angle d'ouverture ``ouverture`` (défaut : 90 degrés, dans le sens
contraire des aiguilles d'une montre) depuis l'angle initial ``depart``
(défaut : direction 'est').
:param float x: abscisse du centre
:param float y: ordonnée du centre
:param float r: rayon
:param float ouverture: abscisse du centre
:param float depart: ordonnée du centre
:param str couleur: couleur de trait (défaut 'black')
:param str remplissage: couleur de fond (défaut transparent)
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | arc | Berachem/Azul-Game | 3 | python | def arc(x, y, r, ouverture=90, depart=0, couleur='black', remplissage=, epaisseur=1, tag=):
"\n Trace un arc de cercle de centre ``(x, y)``, de rayon ``r`` et\n d'angle d'ouverture ``ouverture`` (défaut : 90 degrés, dans le sens\n contraire des aiguilles d'une montre) depuis l'angle initial ``depart``\n (défaut : direction 'est').\n\n :param float x: abscisse du centre\n :param float y: ordonnée du centre\n :param float r: rayon\n :param float ouverture: abscisse du centre\n :param float depart: ordonnée du centre\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_arc((x - r), (y - r), (x + r), (y + r), extent=ouverture, start=init, style=ARC, outline=couleur, fill=remplissage, width=epaisseur, tag=tag) | def arc(x, y, r, ouverture=90, depart=0, couleur='black', remplissage=, epaisseur=1, tag=):
"\n Trace un arc de cercle de centre ``(x, y)``, de rayon ``r`` et\n d'angle d'ouverture ``ouverture`` (défaut : 90 degrés, dans le sens\n contraire des aiguilles d'une montre) depuis l'angle initial ``depart``\n (défaut : direction 'est').\n\n :param float x: abscisse du centre\n :param float y: ordonnée du centre\n :param float r: rayon\n :param float ouverture: abscisse du centre\n :param float depart: ordonnée du centre\n :param str couleur: couleur de trait (défaut 'black')\n :param str remplissage: couleur de fond (défaut transparent)\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
return __canevas.canvas.create_arc((x - r), (y - r), (x + r), (y + r), extent=ouverture, start=init, style=ARC, outline=couleur, fill=remplissage, width=epaisseur, tag=tag)<|docstring|>Trace un arc de cercle de centre ``(x, y)``, de rayon ``r`` et
d'angle d'ouverture ``ouverture`` (défaut : 90 degrés, dans le sens
contraire des aiguilles d'une montre) depuis l'angle initial ``depart``
(défaut : direction 'est').
:param float x: abscisse du centre
:param float y: ordonnée du centre
:param float r: rayon
:param float ouverture: abscisse du centre
:param float depart: ordonnée du centre
:param str couleur: couleur de trait (défaut 'black')
:param str remplissage: couleur de fond (défaut transparent)
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
cbf233f227970d6fc010dca818d2a9344fa1a8fea96c054f684309a16818a12e | def point(x, y, couleur='black', epaisseur=1, tag=''):
"\n Trace un point aux coordonnées ``(x, y)`` en noir.\n\n :param float x: abscisse\n :param float y: ordonnée\n :param str couleur: couleur du point (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
return ligne(x, y, (x + epaisseur), (y + epaisseur), couleur, epaisseur, tag) | Trace un point aux coordonnées ``(x, y)`` en noir.
:param float x: abscisse
:param float y: ordonnée
:param str couleur: couleur du point (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | point | Berachem/Azul-Game | 3 | python | def point(x, y, couleur='black', epaisseur=1, tag=):
"\n Trace un point aux coordonnées ``(x, y)`` en noir.\n\n :param float x: abscisse\n :param float y: ordonnée\n :param str couleur: couleur du point (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
return ligne(x, y, (x + epaisseur), (y + epaisseur), couleur, epaisseur, tag) | def point(x, y, couleur='black', epaisseur=1, tag=):
"\n Trace un point aux coordonnées ``(x, y)`` en noir.\n\n :param float x: abscisse\n :param float y: ordonnée\n :param str couleur: couleur du point (défaut 'black')\n :param float epaisseur: épaisseur de trait en pixels (défaut 1)\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
return ligne(x, y, (x + epaisseur), (y + epaisseur), couleur, epaisseur, tag)<|docstring|>Trace un point aux coordonnées ``(x, y)`` en noir.
:param float x: abscisse
:param float y: ordonnée
:param str couleur: couleur du point (défaut 'black')
:param float epaisseur: épaisseur de trait en pixels (défaut 1)
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
314efbe36dc2311e70c4841f5e2820009c03065e92de8c2ffadc9bcda215fef4 | def marque(x, y, couleur='red'):
"\n Place la marque sur la position (x, y) qui peut être effacé en appelant\n ``efface_marque()`` ou ``efface('marque')``. Une seule marque peut être\n présente simultanément.\n\n :param float x: abscisse\n :param float y: ordonnée\n :param str couleur: couleur de trait (défaut 'black')\n :return: ``None``\n "
global __canevas
efface_marque()
__canevas.marqueh = ligne((x - __canevas.tailleMarque), y, (x + __canevas.tailleMarque), y, couleur, tag='marque')
__canevas.marquev = ligne(x, (y - __canevas.tailleMarque), x, (y + __canevas.tailleMarque), couleur, tag='marque') | Place la marque sur la position (x, y) qui peut être effacé en appelant
``efface_marque()`` ou ``efface('marque')``. Une seule marque peut être
présente simultanément.
:param float x: abscisse
:param float y: ordonnée
:param str couleur: couleur de trait (défaut 'black')
:return: ``None`` | upemtk.py | marque | Berachem/Azul-Game | 3 | python | def marque(x, y, couleur='red'):
"\n Place la marque sur la position (x, y) qui peut être effacé en appelant\n ``efface_marque()`` ou ``efface('marque')``. Une seule marque peut être\n présente simultanément.\n\n :param float x: abscisse\n :param float y: ordonnée\n :param str couleur: couleur de trait (défaut 'black')\n :return: ``None``\n "
global __canevas
efface_marque()
__canevas.marqueh = ligne((x - __canevas.tailleMarque), y, (x + __canevas.tailleMarque), y, couleur, tag='marque')
__canevas.marquev = ligne(x, (y - __canevas.tailleMarque), x, (y + __canevas.tailleMarque), couleur, tag='marque') | def marque(x, y, couleur='red'):
"\n Place la marque sur la position (x, y) qui peut être effacé en appelant\n ``efface_marque()`` ou ``efface('marque')``. Une seule marque peut être\n présente simultanément.\n\n :param float x: abscisse\n :param float y: ordonnée\n :param str couleur: couleur de trait (défaut 'black')\n :return: ``None``\n "
global __canevas
efface_marque()
__canevas.marqueh = ligne((x - __canevas.tailleMarque), y, (x + __canevas.tailleMarque), y, couleur, tag='marque')
__canevas.marquev = ligne(x, (y - __canevas.tailleMarque), x, (y + __canevas.tailleMarque), couleur, tag='marque')<|docstring|>Place la marque sur la position (x, y) qui peut être effacé en appelant
``efface_marque()`` ou ``efface('marque')``. Une seule marque peut être
présente simultanément.
:param float x: abscisse
:param float y: ordonnée
:param str couleur: couleur de trait (défaut 'black')
:return: ``None``<|endoftext|> |
cfd0bf8b7fb54714b2ebf58a80225faa0b87c41d8dd9e9831fb1e6999a78cb47 | def image(x, y, fichier, ancrage='center', tag=''):
"\n Affiche l'image contenue dans ``fichier`` avec ``(x, y)`` comme centre. Les\n valeurs possibles du point d'ancrage sont ``'center'``, ``'nw'``, etc.\n\n :param float x: abscisse du point d'ancrage\n :param float y: ordonnée du point d'ancrage\n :param str fichier: nom du fichier contenant l'image\n :param ancrage: position du point d'ancrage par rapport à l'image\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
global __img
img = PhotoImage(file=fichier)
img_object = __canevas.canvas.create_image(x, y, anchor=ancrage, image=img, tag=tag)
__img[img_object] = img
return img_object | Affiche l'image contenue dans ``fichier`` avec ``(x, y)`` comme centre. Les
valeurs possibles du point d'ancrage sont ``'center'``, ``'nw'``, etc.
:param float x: abscisse du point d'ancrage
:param float y: ordonnée du point d'ancrage
:param str fichier: nom du fichier contenant l'image
:param ancrage: position du point d'ancrage par rapport à l'image
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet | upemtk.py | image | Berachem/Azul-Game | 3 | python | def image(x, y, fichier, ancrage='center', tag=):
"\n Affiche l'image contenue dans ``fichier`` avec ``(x, y)`` comme centre. Les\n valeurs possibles du point d'ancrage sont ``'center'``, ``'nw'``, etc.\n\n :param float x: abscisse du point d'ancrage\n :param float y: ordonnée du point d'ancrage\n :param str fichier: nom du fichier contenant l'image\n :param ancrage: position du point d'ancrage par rapport à l'image\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
global __img
img = PhotoImage(file=fichier)
img_object = __canevas.canvas.create_image(x, y, anchor=ancrage, image=img, tag=tag)
__img[img_object] = img
return img_object | def image(x, y, fichier, ancrage='center', tag=):
"\n Affiche l'image contenue dans ``fichier`` avec ``(x, y)`` comme centre. Les\n valeurs possibles du point d'ancrage sont ``'center'``, ``'nw'``, etc.\n\n :param float x: abscisse du point d'ancrage\n :param float y: ordonnée du point d'ancrage\n :param str fichier: nom du fichier contenant l'image\n :param ancrage: position du point d'ancrage par rapport à l'image\n :param str tag: étiquette d'objet (défaut : pas d'étiquette)\n :return: identificateur d'objet\n "
global __canevas
global __img
img = PhotoImage(file=fichier)
img_object = __canevas.canvas.create_image(x, y, anchor=ancrage, image=img, tag=tag)
__img[img_object] = img
return img_object<|docstring|>Affiche l'image contenue dans ``fichier`` avec ``(x, y)`` comme centre. Les
valeurs possibles du point d'ancrage sont ``'center'``, ``'nw'``, etc.
:param float x: abscisse du point d'ancrage
:param float y: ordonnée du point d'ancrage
:param str fichier: nom du fichier contenant l'image
:param ancrage: position du point d'ancrage par rapport à l'image
:param str tag: étiquette d'objet (défaut : pas d'étiquette)
:return: identificateur d'objet<|endoftext|> |
6d195e095dc56876aa497040eed96d40117fe979db2e9221536042b30ac94e5a | def texte(x, y, chaine, couleur='black', ancrage='nw', police='Purisa', taille=24, tag=''):
"\n Affiche la chaîne ``chaine`` avec ``(x, y)`` comme point d'ancrage (par\n défaut le coin supérieur gauche).\n\n :param float x: abscisse du point d'ancrage\n :param float y: ordonnée du point d'ancrage\n :param str couleur: couleur de trait (défaut 'black')\n :param ancrage: position du point d'ancrage (défaut 'nw')\n :param police: police de caractères (défaut : 'Purisa')\n :param taille: taille de police (défaut 24)\n :param tag: étiquette d'objet (défaut : pas d'étiquette\n :return: identificateur d'objet\n "
global __canevas
__canevas.set_font(police, taille)
return __canevas.canvas.create_text(x, y, text=chaine, font=__canevas.tkfont, tag=tag, fill=couleur, anchor=ancrage) | Affiche la chaîne ``chaine`` avec ``(x, y)`` comme point d'ancrage (par
défaut le coin supérieur gauche).
:param float x: abscisse du point d'ancrage
:param float y: ordonnée du point d'ancrage
:param str couleur: couleur de trait (défaut 'black')
:param ancrage: position du point d'ancrage (défaut 'nw')
:param police: police de caractères (défaut : 'Purisa')
:param taille: taille de police (défaut 24)
:param tag: étiquette d'objet (défaut : pas d'étiquette
:return: identificateur d'objet | upemtk.py | texte | Berachem/Azul-Game | 3 | python | def texte(x, y, chaine, couleur='black', ancrage='nw', police='Purisa', taille=24, tag=):
"\n Affiche la chaîne ``chaine`` avec ``(x, y)`` comme point d'ancrage (par\n défaut le coin supérieur gauche).\n\n :param float x: abscisse du point d'ancrage\n :param float y: ordonnée du point d'ancrage\n :param str couleur: couleur de trait (défaut 'black')\n :param ancrage: position du point d'ancrage (défaut 'nw')\n :param police: police de caractères (défaut : 'Purisa')\n :param taille: taille de police (défaut 24)\n :param tag: étiquette d'objet (défaut : pas d'étiquette\n :return: identificateur d'objet\n "
global __canevas
__canevas.set_font(police, taille)
return __canevas.canvas.create_text(x, y, text=chaine, font=__canevas.tkfont, tag=tag, fill=couleur, anchor=ancrage) | def texte(x, y, chaine, couleur='black', ancrage='nw', police='Purisa', taille=24, tag=):
"\n Affiche la chaîne ``chaine`` avec ``(x, y)`` comme point d'ancrage (par\n défaut le coin supérieur gauche).\n\n :param float x: abscisse du point d'ancrage\n :param float y: ordonnée du point d'ancrage\n :param str couleur: couleur de trait (défaut 'black')\n :param ancrage: position du point d'ancrage (défaut 'nw')\n :param police: police de caractères (défaut : 'Purisa')\n :param taille: taille de police (défaut 24)\n :param tag: étiquette d'objet (défaut : pas d'étiquette\n :return: identificateur d'objet\n "
global __canevas
__canevas.set_font(police, taille)
return __canevas.canvas.create_text(x, y, text=chaine, font=__canevas.tkfont, tag=tag, fill=couleur, anchor=ancrage)<|docstring|>Affiche la chaîne ``chaine`` avec ``(x, y)`` comme point d'ancrage (par
défaut le coin supérieur gauche).
:param float x: abscisse du point d'ancrage
:param float y: ordonnée du point d'ancrage
:param str couleur: couleur de trait (défaut 'black')
:param ancrage: position du point d'ancrage (défaut 'nw')
:param police: police de caractères (défaut : 'Purisa')
:param taille: taille de police (défaut 24)
:param tag: étiquette d'objet (défaut : pas d'étiquette
:return: identificateur d'objet<|endoftext|> |
bf4530fde094da45777558289098a79929a0dd2f4e88762f5e9bf42d0190fbe8 | def longueur_texte(chaine):
'\n Donne la longueur en pixel nécessaire pour afficher ``chaine``.\n\n :param str chaine: chaîne à mesurer\n :return: longueur de la chaîne en pixels (int)\n '
global __canevas
return __canevas.tkfont.measure(chaine) | Donne la longueur en pixel nécessaire pour afficher ``chaine``.
:param str chaine: chaîne à mesurer
:return: longueur de la chaîne en pixels (int) | upemtk.py | longueur_texte | Berachem/Azul-Game | 3 | python | def longueur_texte(chaine):
'\n Donne la longueur en pixel nécessaire pour afficher ``chaine``.\n\n :param str chaine: chaîne à mesurer\n :return: longueur de la chaîne en pixels (int)\n '
global __canevas
return __canevas.tkfont.measure(chaine) | def longueur_texte(chaine):
'\n Donne la longueur en pixel nécessaire pour afficher ``chaine``.\n\n :param str chaine: chaîne à mesurer\n :return: longueur de la chaîne en pixels (int)\n '
global __canevas
return __canevas.tkfont.measure(chaine)<|docstring|>Donne la longueur en pixel nécessaire pour afficher ``chaine``.
:param str chaine: chaîne à mesurer
:return: longueur de la chaîne en pixels (int)<|endoftext|> |
749a0a05eec772a9db8b00a6560a3dff6787f92d9e0554229137575c70b30b30 | def hauteur_texte():
'\n Donne la hauteur en pixel nécessaire pour afficher du texte.\n\n :return: hauteur en pixels (int)\n '
global __canevas
return __canevas.tkfont.height | Donne la hauteur en pixel nécessaire pour afficher du texte.
:return: hauteur en pixels (int) | upemtk.py | hauteur_texte | Berachem/Azul-Game | 3 | python | def hauteur_texte():
'\n Donne la hauteur en pixel nécessaire pour afficher du texte.\n\n :return: hauteur en pixels (int)\n '
global __canevas
return __canevas.tkfont.height | def hauteur_texte():
'\n Donne la hauteur en pixel nécessaire pour afficher du texte.\n\n :return: hauteur en pixels (int)\n '
global __canevas
return __canevas.tkfont.height<|docstring|>Donne la hauteur en pixel nécessaire pour afficher du texte.
:return: hauteur en pixels (int)<|endoftext|> |
2bbdf667442945a709323836e90d8fa8300504a7ab266cad7f1f963a92dd1f79 | def efface_tout():
'\n Efface la fenêtre.\n '
global __canevas
global __img
__img.clear()
__canevas.canvas.delete('all') | Efface la fenêtre. | upemtk.py | efface_tout | Berachem/Azul-Game | 3 | python | def efface_tout():
'\n \n '
global __canevas
global __img
__img.clear()
__canevas.canvas.delete('all') | def efface_tout():
'\n \n '
global __canevas
global __img
__img.clear()
__canevas.canvas.delete('all')<|docstring|>Efface la fenêtre.<|endoftext|> |
580e585676b546f34b12f165b96ad7bbda83d2aaf4d6f20e5792665e1f50e85b | def efface(objet):
"\n Efface ``objet`` de la fenêtre.\n\n :param: objet ou étiquette d'objet à supprimer\n :type: ``int`` ou ``str``\n "
global __canevas
if (objet in __img):
del __img[objet]
__canevas.canvas.delete(objet) | Efface ``objet`` de la fenêtre.
:param: objet ou étiquette d'objet à supprimer
:type: ``int`` ou ``str`` | upemtk.py | efface | Berachem/Azul-Game | 3 | python | def efface(objet):
"\n Efface ``objet`` de la fenêtre.\n\n :param: objet ou étiquette d'objet à supprimer\n :type: ``int`` ou ``str``\n "
global __canevas
if (objet in __img):
del __img[objet]
__canevas.canvas.delete(objet) | def efface(objet):
"\n Efface ``objet`` de la fenêtre.\n\n :param: objet ou étiquette d'objet à supprimer\n :type: ``int`` ou ``str``\n "
global __canevas
if (objet in __img):
del __img[objet]
__canevas.canvas.delete(objet)<|docstring|>Efface ``objet`` de la fenêtre.
:param: objet ou étiquette d'objet à supprimer
:type: ``int`` ou ``str``<|endoftext|> |
edde05eda8699cea0f43f526ee20424de5dbc78a5c2991bcfc8ca473ecc0d666 | def efface_marque():
'\n Efface la marque créée par la fonction :py:func:``marque``.\n '
efface('marque') | Efface la marque créée par la fonction :py:func:``marque``. | upemtk.py | efface_marque | Berachem/Azul-Game | 3 | python | def efface_marque():
'\n \n '
efface('marque') | def efface_marque():
'\n \n '
efface('marque')<|docstring|>Efface la marque créée par la fonction :py:func:``marque``.<|endoftext|> |
b7fc99239be171f44dc03d4d9c840e27216c3138dc1e866cb1a2f90ff61d798c | def attente_clic():
"Attend que l'utilisateur clique sur la fenêtre et renvoie un triplet ``(\n x, y, type_clic)``, où ``x`` et ``y`` sont l'abscisse et l'ordonnée du\n point cliqué, et type_clic une chaîne valant ``'ClicGauche'`` ou\n ``'ClicDroit'`` selon le type de clic effectué.\n\n :return: un triplet ``(x, y, 'ClicDroit')``, ``(x, y,\n 'ClicGauche')``\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if ((type_ev == 'ClicDroit') or (type_ev == 'ClicGauche')):
return (clic_x(ev), clic_y(ev), type_ev)
mise_a_jour() | Attend que l'utilisateur clique sur la fenêtre et renvoie un triplet ``(
x, y, type_clic)``, où ``x`` et ``y`` sont l'abscisse et l'ordonnée du
point cliqué, et type_clic une chaîne valant ``'ClicGauche'`` ou
``'ClicDroit'`` selon le type de clic effectué.
:return: un triplet ``(x, y, 'ClicDroit')``, ``(x, y,
'ClicGauche')`` | upemtk.py | attente_clic | Berachem/Azul-Game | 3 | python | def attente_clic():
"Attend que l'utilisateur clique sur la fenêtre et renvoie un triplet ``(\n x, y, type_clic)``, où ``x`` et ``y`` sont l'abscisse et l'ordonnée du\n point cliqué, et type_clic une chaîne valant ``'ClicGauche'`` ou\n ``'ClicDroit'`` selon le type de clic effectué.\n\n :return: un triplet ``(x, y, 'ClicDroit')``, ``(x, y,\n 'ClicGauche')``\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if ((type_ev == 'ClicDroit') or (type_ev == 'ClicGauche')):
return (clic_x(ev), clic_y(ev), type_ev)
mise_a_jour() | def attente_clic():
"Attend que l'utilisateur clique sur la fenêtre et renvoie un triplet ``(\n x, y, type_clic)``, où ``x`` et ``y`` sont l'abscisse et l'ordonnée du\n point cliqué, et type_clic une chaîne valant ``'ClicGauche'`` ou\n ``'ClicDroit'`` selon le type de clic effectué.\n\n :return: un triplet ``(x, y, 'ClicDroit')``, ``(x, y,\n 'ClicGauche')``\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if ((type_ev == 'ClicDroit') or (type_ev == 'ClicGauche')):
return (clic_x(ev), clic_y(ev), type_ev)
mise_a_jour()<|docstring|>Attend que l'utilisateur clique sur la fenêtre et renvoie un triplet ``(
x, y, type_clic)``, où ``x`` et ``y`` sont l'abscisse et l'ordonnée du
point cliqué, et type_clic une chaîne valant ``'ClicGauche'`` ou
``'ClicDroit'`` selon le type de clic effectué.
:return: un triplet ``(x, y, 'ClicDroit')``, ``(x, y,
'ClicGauche')``<|endoftext|> |
1ec2b0f942d92a4cb8918a4d1b09443f04a5cfa2bad41d3ea6790377f93989b6 | def attente_touche():
"\n Attend que l'utilisateur appuie sur une touche.\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if (type_ev == 'Touche'):
return touche(ev)
mise_a_jour() | Attend que l'utilisateur appuie sur une touche. | upemtk.py | attente_touche | Berachem/Azul-Game | 3 | python | def attente_touche():
"\n \n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if (type_ev == 'Touche'):
return touche(ev)
mise_a_jour() | def attente_touche():
"\n \n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if (type_ev == 'Touche'):
return touche(ev)
mise_a_jour()<|docstring|>Attend que l'utilisateur appuie sur une touche.<|endoftext|> |
ec295c02d8d618e651069b0603c4b3e4719b0520fc8d8d7dba8f2831e29473e1 | def attente_touche_jusqua(milliseconds):
"\n Attend que l'utilisateur clique sur la fenêtre pendant le temps indiqué\n "
t1 = (time() + (milliseconds / 1000))
while (time() < t1):
ev = donne_evenement()
typeEv = type_evenement(ev)
if (typeEv == 'Touche'):
return touche(ev)
mise_a_jour()
return None | Attend que l'utilisateur clique sur la fenêtre pendant le temps indiqué | upemtk.py | attente_touche_jusqua | Berachem/Azul-Game | 3 | python | def attente_touche_jusqua(milliseconds):
"\n \n "
t1 = (time() + (milliseconds / 1000))
while (time() < t1):
ev = donne_evenement()
typeEv = type_evenement(ev)
if (typeEv == 'Touche'):
return touche(ev)
mise_a_jour()
return None | def attente_touche_jusqua(milliseconds):
"\n \n "
t1 = (time() + (milliseconds / 1000))
while (time() < t1):
ev = donne_evenement()
typeEv = type_evenement(ev)
if (typeEv == 'Touche'):
return touche(ev)
mise_a_jour()
return None<|docstring|>Attend que l'utilisateur clique sur la fenêtre pendant le temps indiqué<|endoftext|> |
40311d945a07b40563b6ba86f0370d2de8f3887c380ac1df18308c8f915fc8d3 | def attente_clic_ou_touche():
"\n Attend que l'utilisateur clique sur la fenêtre ou appuie sur une touche.\n La fonction renvoie un triplet de la forme ``(x, y, type)`` si l'événement\n est un clic de souris de type ``type`` et de coordonnées ``(x, y)``, ou (-1,\n touche, type) si l'événement est un appui sur la touche ``val``.\n\n :return: ``(x, y, 'ClicDroit')``, ``(x, y, 'ClicGauche')`` ou\n ``(-1, val,\\ 'Touche')``\n\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if ('Clic' in type_ev):
return (clic_x(ev), clic_y(ev), type_ev)
elif (type_ev == 'Touche'):
return ((- 1), touche(ev), type_ev)
mise_a_jour() | Attend que l'utilisateur clique sur la fenêtre ou appuie sur une touche.
La fonction renvoie un triplet de la forme ``(x, y, type)`` si l'événement
est un clic de souris de type ``type`` et de coordonnées ``(x, y)``, ou (-1,
touche, type) si l'événement est un appui sur la touche ``val``.
:return: ``(x, y, 'ClicDroit')``, ``(x, y, 'ClicGauche')`` ou
``(-1, val,\ 'Touche')`` | upemtk.py | attente_clic_ou_touche | Berachem/Azul-Game | 3 | python | def attente_clic_ou_touche():
"\n Attend que l'utilisateur clique sur la fenêtre ou appuie sur une touche.\n La fonction renvoie un triplet de la forme ``(x, y, type)`` si l'événement\n est un clic de souris de type ``type`` et de coordonnées ``(x, y)``, ou (-1,\n touche, type) si l'événement est un appui sur la touche ``val``.\n\n :return: ``(x, y, 'ClicDroit')``, ``(x, y, 'ClicGauche')`` ou\n ``(-1, val,\\ 'Touche')``\n\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if ('Clic' in type_ev):
return (clic_x(ev), clic_y(ev), type_ev)
elif (type_ev == 'Touche'):
return ((- 1), touche(ev), type_ev)
mise_a_jour() | def attente_clic_ou_touche():
"\n Attend que l'utilisateur clique sur la fenêtre ou appuie sur une touche.\n La fonction renvoie un triplet de la forme ``(x, y, type)`` si l'événement\n est un clic de souris de type ``type`` et de coordonnées ``(x, y)``, ou (-1,\n touche, type) si l'événement est un appui sur la touche ``val``.\n\n :return: ``(x, y, 'ClicDroit')``, ``(x, y, 'ClicGauche')`` ou\n ``(-1, val,\\ 'Touche')``\n\n "
while True:
ev = donne_evenement()
type_ev = type_evenement(ev)
if ('Clic' in type_ev):
return (clic_x(ev), clic_y(ev), type_ev)
elif (type_ev == 'Touche'):
return ((- 1), touche(ev), type_ev)
mise_a_jour()<|docstring|>Attend que l'utilisateur clique sur la fenêtre ou appuie sur une touche.
La fonction renvoie un triplet de la forme ``(x, y, type)`` si l'événement
est un clic de souris de type ``type`` et de coordonnées ``(x, y)``, ou (-1,
touche, type) si l'événement est un appui sur la touche ``val``.
:return: ``(x, y, 'ClicDroit')``, ``(x, y, 'ClicGauche')`` ou
``(-1, val,\ 'Touche')``<|endoftext|> |
b1499e52e5ccf5e766c12f001591913a26f818f66b6f3b5e8d1be75a4387a03b | def clic():
"\n Attend que l'utilisateur clique sur la fenêtre, sans récupérer les\n détails de l'événement.\n "
attente_clic() | Attend que l'utilisateur clique sur la fenêtre, sans récupérer les
détails de l'événement. | upemtk.py | clic | Berachem/Azul-Game | 3 | python | def clic():
"\n Attend que l'utilisateur clique sur la fenêtre, sans récupérer les\n détails de l'événement.\n "
attente_clic() | def clic():
"\n Attend que l'utilisateur clique sur la fenêtre, sans récupérer les\n détails de l'événement.\n "
attente_clic()<|docstring|>Attend que l'utilisateur clique sur la fenêtre, sans récupérer les
détails de l'événement.<|endoftext|> |
8b529185c7fc7cf8556ff340b9a61725fcf2df30f1a96b4227295921964f17e7 | def capture_ecran(file):
"\n Fait une capture d'écran sauvegardée dans ``file.png``.\n "
global __canevas
__canevas.canvas.postscript(file=(file + '.ps'), height=__canevas.height, width=__canevas.width, colormode='color')
subprocess.call('convert -density 150 -geometry 100% -background white -flatten', (file + '.ps'), (file + '.png'), shell=True)
subprocess.call('rm', (file + '.ps'), shell=True) | Fait une capture d'écran sauvegardée dans ``file.png``. | upemtk.py | capture_ecran | Berachem/Azul-Game | 3 | python | def capture_ecran(file):
"\n \n "
global __canevas
__canevas.canvas.postscript(file=(file + '.ps'), height=__canevas.height, width=__canevas.width, colormode='color')
subprocess.call('convert -density 150 -geometry 100% -background white -flatten', (file + '.ps'), (file + '.png'), shell=True)
subprocess.call('rm', (file + '.ps'), shell=True) | def capture_ecran(file):
"\n \n "
global __canevas
__canevas.canvas.postscript(file=(file + '.ps'), height=__canevas.height, width=__canevas.width, colormode='color')
subprocess.call('convert -density 150 -geometry 100% -background white -flatten', (file + '.ps'), (file + '.png'), shell=True)
subprocess.call('rm', (file + '.ps'), shell=True)<|docstring|>Fait une capture d'écran sauvegardée dans ``file.png``.<|endoftext|> |
ff99c8682544fdd71e41c7f2d37b35cd470282c1e617727603918f667869235a | def donne_evenement():
" \n Renvoie l'événement associé à la fenêtre.\n "
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
if (len(__canevas.eventQueue) == 0):
return ('RAS', '')
else:
return __canevas.eventQueue.pop() | Renvoie l'événement associé à la fenêtre. | upemtk.py | donne_evenement | Berachem/Azul-Game | 3 | python | def donne_evenement():
" \n \n "
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
if (len(__canevas.eventQueue) == 0):
return ('RAS', )
else:
return __canevas.eventQueue.pop() | def donne_evenement():
" \n \n "
global __canevas
if (__canevas is None):
raise FenetreNonCree('La fenêtre n\'a pas été crée avec la fonction "cree_fenetre".')
if (len(__canevas.eventQueue) == 0):
return ('RAS', )
else:
return __canevas.eventQueue.pop()<|docstring|>Renvoie l'événement associé à la fenêtre.<|endoftext|> |
11b616ed8cb5939f7616e359f42108b39e9d5a2f9603394718dcb6e8e9171e25 | def type_evenement(evenement):
" \n Renvoie une chaîne donnant le type de ``evenement``. Les types\n possibles sont 'ClicDroit', 'ClicGauche', 'Deplacement', 'Touche' ou 'RAS'.\n "
(nom, ev) = evenement
return nom | Renvoie une chaîne donnant le type de ``evenement``. Les types
possibles sont 'ClicDroit', 'ClicGauche', 'Deplacement', 'Touche' ou 'RAS'. | upemtk.py | type_evenement | Berachem/Azul-Game | 3 | python | def type_evenement(evenement):
" \n Renvoie une chaîne donnant le type de ``evenement``. Les types\n possibles sont 'ClicDroit', 'ClicGauche', 'Deplacement', 'Touche' ou 'RAS'.\n "
(nom, ev) = evenement
return nom | def type_evenement(evenement):
" \n Renvoie une chaîne donnant le type de ``evenement``. Les types\n possibles sont 'ClicDroit', 'ClicGauche', 'Deplacement', 'Touche' ou 'RAS'.\n "
(nom, ev) = evenement
return nom<|docstring|>Renvoie une chaîne donnant le type de ``evenement``. Les types
possibles sont 'ClicDroit', 'ClicGauche', 'Deplacement', 'Touche' ou 'RAS'.<|endoftext|> |
650545594ff42b18f6605d230186c018b96813d04f920d0f0c24fed332f4a45f | def clic_x(evenement):
" \n Renvoie la coordonnée X associé à ``evenement`` qui doit être de type\n 'ClicDroit' ou 'ClicGauche' ou 'Deplacement'\n "
(nom, ev) = evenement
if (not ((nom == 'ClicDroit') or (nom == 'ClicGauche') or (nom == 'Deplacement'))):
raise TypeEvenementNonValide('On ne peut pas utiliser "clic_x" sur un évènement de type', nom)
return ev.x | Renvoie la coordonnée X associé à ``evenement`` qui doit être de type
'ClicDroit' ou 'ClicGauche' ou 'Deplacement' | upemtk.py | clic_x | Berachem/Azul-Game | 3 | python | def clic_x(evenement):
" \n Renvoie la coordonnée X associé à ``evenement`` qui doit être de type\n 'ClicDroit' ou 'ClicGauche' ou 'Deplacement'\n "
(nom, ev) = evenement
if (not ((nom == 'ClicDroit') or (nom == 'ClicGauche') or (nom == 'Deplacement'))):
raise TypeEvenementNonValide('On ne peut pas utiliser "clic_x" sur un évènement de type', nom)
return ev.x | def clic_x(evenement):
" \n Renvoie la coordonnée X associé à ``evenement`` qui doit être de type\n 'ClicDroit' ou 'ClicGauche' ou 'Deplacement'\n "
(nom, ev) = evenement
if (not ((nom == 'ClicDroit') or (nom == 'ClicGauche') or (nom == 'Deplacement'))):
raise TypeEvenementNonValide('On ne peut pas utiliser "clic_x" sur un évènement de type', nom)
return ev.x<|docstring|>Renvoie la coordonnée X associé à ``evenement`` qui doit être de type
'ClicDroit' ou 'ClicGauche' ou 'Deplacement'<|endoftext|> |
c0d7a0ce6c50da6639906a0eeed7d5426960cf6a89f5e1172fb524c70efaa91e | def clic_y(evenement):
" \n Renvoie la coordonnée Y associé à ``evenement``, qui doit être de type\n 'ClicDroit' ou 'ClicGauche' ou 'Deplacement'.\n "
(nom, ev) = evenement
if (not ((nom == 'ClicDroit') or (nom == 'ClicGauche') or (nom == 'Deplacement'))):
raise TypeEvenementNonValide('On ne peut pas utiliser "clic_y" sur un évènement de type', nom)
return ev.y | Renvoie la coordonnée Y associé à ``evenement``, qui doit être de type
'ClicDroit' ou 'ClicGauche' ou 'Deplacement'. | upemtk.py | clic_y | Berachem/Azul-Game | 3 | python | def clic_y(evenement):
" \n Renvoie la coordonnée Y associé à ``evenement``, qui doit être de type\n 'ClicDroit' ou 'ClicGauche' ou 'Deplacement'.\n "
(nom, ev) = evenement
if (not ((nom == 'ClicDroit') or (nom == 'ClicGauche') or (nom == 'Deplacement'))):
raise TypeEvenementNonValide('On ne peut pas utiliser "clic_y" sur un évènement de type', nom)
return ev.y | def clic_y(evenement):
" \n Renvoie la coordonnée Y associé à ``evenement``, qui doit être de type\n 'ClicDroit' ou 'ClicGauche' ou 'Deplacement'.\n "
(nom, ev) = evenement
if (not ((nom == 'ClicDroit') or (nom == 'ClicGauche') or (nom == 'Deplacement'))):
raise TypeEvenementNonValide('On ne peut pas utiliser "clic_y" sur un évènement de type', nom)
return ev.y<|docstring|>Renvoie la coordonnée Y associé à ``evenement``, qui doit être de type
'ClicDroit' ou 'ClicGauche' ou 'Deplacement'.<|endoftext|> |
4937e914a1596fe74227980c3027737059ae91c7cba74079fa8c827a56f9a025 | def touche(evenement):
" \n Renvoie une string correspondant à la touche associé à ``evenement``\n qui doit être de type 'Touche'.\n "
(nom, ev) = evenement
if (not (nom == 'Touche')):
raise TypeEvenementNonValide('On peut pas utiliser "touche" sur un évènement de type', nom)
return ev.keysym | Renvoie une string correspondant à la touche associé à ``evenement``
qui doit être de type 'Touche'. | upemtk.py | touche | Berachem/Azul-Game | 3 | python | def touche(evenement):
" \n Renvoie une string correspondant à la touche associé à ``evenement``\n qui doit être de type 'Touche'.\n "
(nom, ev) = evenement
if (not (nom == 'Touche')):
raise TypeEvenementNonValide('On peut pas utiliser "touche" sur un évènement de type', nom)
return ev.keysym | def touche(evenement):
" \n Renvoie une string correspondant à la touche associé à ``evenement``\n qui doit être de type 'Touche'.\n "
(nom, ev) = evenement
if (not (nom == 'Touche')):
raise TypeEvenementNonValide('On peut pas utiliser "touche" sur un évènement de type', nom)
return ev.keysym<|docstring|>Renvoie une string correspondant à la touche associé à ``evenement``
qui doit être de type 'Touche'.<|endoftext|> |
7c5e349d03e3b703315094844d5734b272d6c8e25b08aab2a531e858ec101251 | def findAnnotationGroupByName(annotationGroups: list, name: str):
'\n Find existing annotation group for name.\n :param annotationGroups: list(AnnotationGroup)\n :param name: Name of group.\n :return: AnnotationGroup or None if not found.\n '
for annotationGroup in annotationGroups:
if (annotationGroup._name == name):
return annotationGroup
return None | Find existing annotation group for name.
:param annotationGroups: list(AnnotationGroup)
:param name: Name of group.
:return: AnnotationGroup or None if not found. | src/scaffoldmaker/annotation/annotationgroup.py | findAnnotationGroupByName | mahyar-osn/scaffoldmaker | 0 | python | def findAnnotationGroupByName(annotationGroups: list, name: str):
'\n Find existing annotation group for name.\n :param annotationGroups: list(AnnotationGroup)\n :param name: Name of group.\n :return: AnnotationGroup or None if not found.\n '
for annotationGroup in annotationGroups:
if (annotationGroup._name == name):
return annotationGroup
return None | def findAnnotationGroupByName(annotationGroups: list, name: str):
'\n Find existing annotation group for name.\n :param annotationGroups: list(AnnotationGroup)\n :param name: Name of group.\n :return: AnnotationGroup or None if not found.\n '
for annotationGroup in annotationGroups:
if (annotationGroup._name == name):
return annotationGroup
return None<|docstring|>Find existing annotation group for name.
:param annotationGroups: list(AnnotationGroup)
:param name: Name of group.
:return: AnnotationGroup or None if not found.<|endoftext|> |
e7a1b5a75393e2c2ea29f87f93caf48684dd5247f4c7e98bf5aae033d2e030ca | def findOrCreateAnnotationGroupForTerm(annotationGroups: list, region, term) -> AnnotationGroup:
'\n Find existing annotation group for term, or create it for region if not found.\n If annotation group created here, append it to annotationGroups.\n :param annotationGroups: list(AnnotationGroup)\n :param region: Zinc region to create group for.\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n :return: AnnotationGroup.\n '
name = term[0]
annotationGroup = findAnnotationGroupByName(annotationGroups, name)
if annotationGroup:
assert (annotationGroup._id == term[1]), (((((("Annotation group '" + name) + "' id '") + term[1]) + "' does not match existing id '") + annotationGroup._id) + "'")
else:
annotationGroup = AnnotationGroup(region, term)
annotationGroups.append(annotationGroup)
return annotationGroup | Find existing annotation group for term, or create it for region if not found.
If annotation group created here, append it to annotationGroups.
:param annotationGroups: list(AnnotationGroup)
:param region: Zinc region to create group for.
:param term: Identifier for anatomical term, currently a tuple of name, id.
:return: AnnotationGroup. | src/scaffoldmaker/annotation/annotationgroup.py | findOrCreateAnnotationGroupForTerm | mahyar-osn/scaffoldmaker | 0 | python | def findOrCreateAnnotationGroupForTerm(annotationGroups: list, region, term) -> AnnotationGroup:
'\n Find existing annotation group for term, or create it for region if not found.\n If annotation group created here, append it to annotationGroups.\n :param annotationGroups: list(AnnotationGroup)\n :param region: Zinc region to create group for.\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n :return: AnnotationGroup.\n '
name = term[0]
annotationGroup = findAnnotationGroupByName(annotationGroups, name)
if annotationGroup:
assert (annotationGroup._id == term[1]), (((((("Annotation group '" + name) + "' id '") + term[1]) + "' does not match existing id '") + annotationGroup._id) + "'")
else:
annotationGroup = AnnotationGroup(region, term)
annotationGroups.append(annotationGroup)
return annotationGroup | def findOrCreateAnnotationGroupForTerm(annotationGroups: list, region, term) -> AnnotationGroup:
'\n Find existing annotation group for term, or create it for region if not found.\n If annotation group created here, append it to annotationGroups.\n :param annotationGroups: list(AnnotationGroup)\n :param region: Zinc region to create group for.\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n :return: AnnotationGroup.\n '
name = term[0]
annotationGroup = findAnnotationGroupByName(annotationGroups, name)
if annotationGroup:
assert (annotationGroup._id == term[1]), (((((("Annotation group '" + name) + "' id '") + term[1]) + "' does not match existing id '") + annotationGroup._id) + "'")
else:
annotationGroup = AnnotationGroup(region, term)
annotationGroups.append(annotationGroup)
return annotationGroup<|docstring|>Find existing annotation group for term, or create it for region if not found.
If annotation group created here, append it to annotationGroups.
:param annotationGroups: list(AnnotationGroup)
:param region: Zinc region to create group for.
:param term: Identifier for anatomical term, currently a tuple of name, id.
:return: AnnotationGroup.<|endoftext|> |
69d778735583bf4e5afa436cb445fdbd0657262f0e4d3912922a10d3bff9625f | def getAnnotationGroupForTerm(annotationGroups: list, term) -> AnnotationGroup:
'\n Get existing annotation group for term. Raise exception if not found.\n :param annotationGroups: list(AnnotationGroup)\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n :return: AnnotationGroup.\n '
name = term[0]
annotationGroup = findAnnotationGroupByName(annotationGroups, name)
if annotationGroup:
assert (annotationGroup._id == term[1]), (((((("Annotation group '" + name) + "' id '") + term[1]) + "' does not match existing id '") + annotationGroup._id) + "'")
return annotationGroup
raise NameError((("Annotation group '" + name) + "' not found.")) | Get existing annotation group for term. Raise exception if not found.
:param annotationGroups: list(AnnotationGroup)
:param term: Identifier for anatomical term, currently a tuple of name, id.
:return: AnnotationGroup. | src/scaffoldmaker/annotation/annotationgroup.py | getAnnotationGroupForTerm | mahyar-osn/scaffoldmaker | 0 | python | def getAnnotationGroupForTerm(annotationGroups: list, term) -> AnnotationGroup:
'\n Get existing annotation group for term. Raise exception if not found.\n :param annotationGroups: list(AnnotationGroup)\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n :return: AnnotationGroup.\n '
name = term[0]
annotationGroup = findAnnotationGroupByName(annotationGroups, name)
if annotationGroup:
assert (annotationGroup._id == term[1]), (((((("Annotation group '" + name) + "' id '") + term[1]) + "' does not match existing id '") + annotationGroup._id) + "'")
return annotationGroup
raise NameError((("Annotation group '" + name) + "' not found.")) | def getAnnotationGroupForTerm(annotationGroups: list, term) -> AnnotationGroup:
'\n Get existing annotation group for term. Raise exception if not found.\n :param annotationGroups: list(AnnotationGroup)\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n :return: AnnotationGroup.\n '
name = term[0]
annotationGroup = findAnnotationGroupByName(annotationGroups, name)
if annotationGroup:
assert (annotationGroup._id == term[1]), (((((("Annotation group '" + name) + "' id '") + term[1]) + "' does not match existing id '") + annotationGroup._id) + "'")
return annotationGroup
raise NameError((("Annotation group '" + name) + "' not found."))<|docstring|>Get existing annotation group for term. Raise exception if not found.
:param annotationGroups: list(AnnotationGroup)
:param term: Identifier for anatomical term, currently a tuple of name, id.
:return: AnnotationGroup.<|endoftext|> |
ed5c80e08a745e98cfe5f7d3e6fd4329ba93fc92678a21a8161db30e0c761b78 | def mergeAnnotationGroups(*annotationGroupsIn):
'\n Merge the supplied sequence of list(annotationGroups) to a single list,\n without duplicates.\n :param annotationGroupsIn: Variable number of list(AnnotationGroup) to merge.\n Groups must be for the same region.\n :return: Merged list(AnnotationGroup)\n '
annotationGroups = []
for agroups in annotationGroupsIn:
for agroup in agroups:
if (not findAnnotationGroupByName(annotationGroups, agroup._name)):
annotationGroups.append(agroup)
return annotationGroups | Merge the supplied sequence of list(annotationGroups) to a single list,
without duplicates.
:param annotationGroupsIn: Variable number of list(AnnotationGroup) to merge.
Groups must be for the same region.
:return: Merged list(AnnotationGroup) | src/scaffoldmaker/annotation/annotationgroup.py | mergeAnnotationGroups | mahyar-osn/scaffoldmaker | 0 | python | def mergeAnnotationGroups(*annotationGroupsIn):
'\n Merge the supplied sequence of list(annotationGroups) to a single list,\n without duplicates.\n :param annotationGroupsIn: Variable number of list(AnnotationGroup) to merge.\n Groups must be for the same region.\n :return: Merged list(AnnotationGroup)\n '
annotationGroups = []
for agroups in annotationGroupsIn:
for agroup in agroups:
if (not findAnnotationGroupByName(annotationGroups, agroup._name)):
annotationGroups.append(agroup)
return annotationGroups | def mergeAnnotationGroups(*annotationGroupsIn):
'\n Merge the supplied sequence of list(annotationGroups) to a single list,\n without duplicates.\n :param annotationGroupsIn: Variable number of list(AnnotationGroup) to merge.\n Groups must be for the same region.\n :return: Merged list(AnnotationGroup)\n '
annotationGroups = []
for agroups in annotationGroupsIn:
for agroup in agroups:
if (not findAnnotationGroupByName(annotationGroups, agroup._name)):
annotationGroups.append(agroup)
return annotationGroups<|docstring|>Merge the supplied sequence of list(annotationGroups) to a single list,
without duplicates.
:param annotationGroupsIn: Variable number of list(AnnotationGroup) to merge.
Groups must be for the same region.
:return: Merged list(AnnotationGroup)<|endoftext|> |
5f8e6944e5d3c8d60352f144996fda912f89e4bfb35f7746a9f250fa6dc90bdf | def __init__(self, region, term):
'\n :param region: The Zinc region the AnnotationGroup is to be made for.\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n e.g. ("heart", "FMA:7088")\n '
self._name = term[0]
self._id = term[1]
fm = region.getFieldmodule()
field = fm.findFieldByName(self._name)
if field.isValid():
self._group = field.castGroup()
assert self._group.isValid(), ('AnnotationGroup found existing non-group field called ' + self._name)
else:
self._group = fm.createFieldGroup()
self._group.setName(self._name)
self._group.setManaged(True) | :param region: The Zinc region the AnnotationGroup is to be made for.
:param term: Identifier for anatomical term, currently a tuple of name, id.
e.g. ("heart", "FMA:7088") | src/scaffoldmaker/annotation/annotationgroup.py | __init__ | mahyar-osn/scaffoldmaker | 0 | python | def __init__(self, region, term):
'\n :param region: The Zinc region the AnnotationGroup is to be made for.\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n e.g. ("heart", "FMA:7088")\n '
self._name = term[0]
self._id = term[1]
fm = region.getFieldmodule()
field = fm.findFieldByName(self._name)
if field.isValid():
self._group = field.castGroup()
assert self._group.isValid(), ('AnnotationGroup found existing non-group field called ' + self._name)
else:
self._group = fm.createFieldGroup()
self._group.setName(self._name)
self._group.setManaged(True) | def __init__(self, region, term):
'\n :param region: The Zinc region the AnnotationGroup is to be made for.\n :param term: Identifier for anatomical term, currently a tuple of name, id.\n e.g. ("heart", "FMA:7088")\n '
self._name = term[0]
self._id = term[1]
fm = region.getFieldmodule()
field = fm.findFieldByName(self._name)
if field.isValid():
self._group = field.castGroup()
assert self._group.isValid(), ('AnnotationGroup found existing non-group field called ' + self._name)
else:
self._group = fm.createFieldGroup()
self._group.setName(self._name)
self._group.setManaged(True)<|docstring|>:param region: The Zinc region the AnnotationGroup is to be made for.
:param term: Identifier for anatomical term, currently a tuple of name, id.
e.g. ("heart", "FMA:7088")<|endoftext|> |
deecfa4e6aab5a5f9aa7f43a51a6d30308d86e1c3e456c64c2ac0327a38bcbf3 | def getFMANumber(self):
'\n :return: Integer FMA number or None.\n '
if (self._id and (self.id[:4] == 'FMA:')):
return int(self._id[4:])
return None | :return: Integer FMA number or None. | src/scaffoldmaker/annotation/annotationgroup.py | getFMANumber | mahyar-osn/scaffoldmaker | 0 | python | def getFMANumber(self):
'\n \n '
if (self._id and (self.id[:4] == 'FMA:')):
return int(self._id[4:])
return None | def getFMANumber(self):
'\n \n '
if (self._id and (self.id[:4] == 'FMA:')):
return int(self._id[4:])
return None<|docstring|>:return: Integer FMA number or None.<|endoftext|> |
44438f9209b784246960dc404c3362b821b731e619aa7eb05ac518dd9e4e5cfd | def getFieldElementGroup(self, mesh):
'\n :param mesh: The Zinc mesh to manage a sub group of.\n :return: The Zinc element group field for mesh in this AnnotationGroup.\n '
elementGroup = self._group.getFieldElementGroup(mesh)
if (not elementGroup.isValid()):
elementGroup = self._group.createFieldElementGroup(mesh)
return elementGroup | :param mesh: The Zinc mesh to manage a sub group of.
:return: The Zinc element group field for mesh in this AnnotationGroup. | src/scaffoldmaker/annotation/annotationgroup.py | getFieldElementGroup | mahyar-osn/scaffoldmaker | 0 | python | def getFieldElementGroup(self, mesh):
'\n :param mesh: The Zinc mesh to manage a sub group of.\n :return: The Zinc element group field for mesh in this AnnotationGroup.\n '
elementGroup = self._group.getFieldElementGroup(mesh)
if (not elementGroup.isValid()):
elementGroup = self._group.createFieldElementGroup(mesh)
return elementGroup | def getFieldElementGroup(self, mesh):
'\n :param mesh: The Zinc mesh to manage a sub group of.\n :return: The Zinc element group field for mesh in this AnnotationGroup.\n '
elementGroup = self._group.getFieldElementGroup(mesh)
if (not elementGroup.isValid()):
elementGroup = self._group.createFieldElementGroup(mesh)
return elementGroup<|docstring|>:param mesh: The Zinc mesh to manage a sub group of.
:return: The Zinc element group field for mesh in this AnnotationGroup.<|endoftext|> |
4323710bee2b621c44ecb395d64db972550b6a99aa1f441907a7056a78dca99a | def getFieldNodeGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to manage a sub group of.\n :return: The Zinc node group field for nodeset in this AnnotationGroup.\n '
nodeGroup = self._group.getFieldNodeGroup(nodeset)
if (not nodeGroup.isValid()):
nodeGroup = self._group.createFieldNodeGroup(nodeset)
return nodeGroup | :param nodeset: The Zinc nodeset to manage a sub group of.
:return: The Zinc node group field for nodeset in this AnnotationGroup. | src/scaffoldmaker/annotation/annotationgroup.py | getFieldNodeGroup | mahyar-osn/scaffoldmaker | 0 | python | def getFieldNodeGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to manage a sub group of.\n :return: The Zinc node group field for nodeset in this AnnotationGroup.\n '
nodeGroup = self._group.getFieldNodeGroup(nodeset)
if (not nodeGroup.isValid()):
nodeGroup = self._group.createFieldNodeGroup(nodeset)
return nodeGroup | def getFieldNodeGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to manage a sub group of.\n :return: The Zinc node group field for nodeset in this AnnotationGroup.\n '
nodeGroup = self._group.getFieldNodeGroup(nodeset)
if (not nodeGroup.isValid()):
nodeGroup = self._group.createFieldNodeGroup(nodeset)
return nodeGroup<|docstring|>:param nodeset: The Zinc nodeset to manage a sub group of.
:return: The Zinc node group field for nodeset in this AnnotationGroup.<|endoftext|> |
fded775292f1daf3b5e95c1322fd4cbb1b49e8b7af7bd46fed1b5bd9609deac5 | def getMeshGroup(self, mesh):
'\n :param mesh: The Zinc mesh to manage a sub group of.\n :return: The Zinc meshGroup for adding elements of mesh in this AnnotationGroup.\n '
return self.getFieldElementGroup(mesh).getMeshGroup() | :param mesh: The Zinc mesh to manage a sub group of.
:return: The Zinc meshGroup for adding elements of mesh in this AnnotationGroup. | src/scaffoldmaker/annotation/annotationgroup.py | getMeshGroup | mahyar-osn/scaffoldmaker | 0 | python | def getMeshGroup(self, mesh):
'\n :param mesh: The Zinc mesh to manage a sub group of.\n :return: The Zinc meshGroup for adding elements of mesh in this AnnotationGroup.\n '
return self.getFieldElementGroup(mesh).getMeshGroup() | def getMeshGroup(self, mesh):
'\n :param mesh: The Zinc mesh to manage a sub group of.\n :return: The Zinc meshGroup for adding elements of mesh in this AnnotationGroup.\n '
return self.getFieldElementGroup(mesh).getMeshGroup()<|docstring|>:param mesh: The Zinc mesh to manage a sub group of.
:return: The Zinc meshGroup for adding elements of mesh in this AnnotationGroup.<|endoftext|> |
f2c3b4fd09bee2cf65804e718c4cc9481d9d2756bdf757d83d7e9e786571fbe6 | def hasMeshGroup(self, mesh):
'\n :param mesh: The Zinc mesh to query a sub group of.\n :return: True if MeshGroup for mesh exists and is not empty, otherwise False.\n '
elementGroup = self._group.getFieldElementGroup(mesh)
return (elementGroup.isValid() and (elementGroup.getMeshGroup().getSize() > 0)) | :param mesh: The Zinc mesh to query a sub group of.
:return: True if MeshGroup for mesh exists and is not empty, otherwise False. | src/scaffoldmaker/annotation/annotationgroup.py | hasMeshGroup | mahyar-osn/scaffoldmaker | 0 | python | def hasMeshGroup(self, mesh):
'\n :param mesh: The Zinc mesh to query a sub group of.\n :return: True if MeshGroup for mesh exists and is not empty, otherwise False.\n '
elementGroup = self._group.getFieldElementGroup(mesh)
return (elementGroup.isValid() and (elementGroup.getMeshGroup().getSize() > 0)) | def hasMeshGroup(self, mesh):
'\n :param mesh: The Zinc mesh to query a sub group of.\n :return: True if MeshGroup for mesh exists and is not empty, otherwise False.\n '
elementGroup = self._group.getFieldElementGroup(mesh)
return (elementGroup.isValid() and (elementGroup.getMeshGroup().getSize() > 0))<|docstring|>:param mesh: The Zinc mesh to query a sub group of.
:return: True if MeshGroup for mesh exists and is not empty, otherwise False.<|endoftext|> |
9cd5369ac195b8a21f8bbc913be3c1f7a6f7100d41ad581e06658693d7de2b47 | def getNodesetGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to manage a sub group of.\n :return: The Zinc nodesetGroup for adding nodes from nodeset in this AnnotationGroup.\n '
return self.getFieldNodeGroup(nodeset).getNodesetGroup() | :param nodeset: The Zinc nodeset to manage a sub group of.
:return: The Zinc nodesetGroup for adding nodes from nodeset in this AnnotationGroup. | src/scaffoldmaker/annotation/annotationgroup.py | getNodesetGroup | mahyar-osn/scaffoldmaker | 0 | python | def getNodesetGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to manage a sub group of.\n :return: The Zinc nodesetGroup for adding nodes from nodeset in this AnnotationGroup.\n '
return self.getFieldNodeGroup(nodeset).getNodesetGroup() | def getNodesetGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to manage a sub group of.\n :return: The Zinc nodesetGroup for adding nodes from nodeset in this AnnotationGroup.\n '
return self.getFieldNodeGroup(nodeset).getNodesetGroup()<|docstring|>:param nodeset: The Zinc nodeset to manage a sub group of.
:return: The Zinc nodesetGroup for adding nodes from nodeset in this AnnotationGroup.<|endoftext|> |
91986d8f41e9626605b813e6f1c825716ea135a07cd9a1c6250e9d630730b59d | def hasNodesetGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to query a sub group of.\n :return: True if NodesetGroup for nodeset exists and is not empty, otherwise False.\n '
nodeGroup = self._group.getFieldNodeGroup(nodeset)
return (nodeGroup.isValid() and (nodeGroup.getNodesetGroup().getSize() > 0)) | :param nodeset: The Zinc nodeset to query a sub group of.
:return: True if NodesetGroup for nodeset exists and is not empty, otherwise False. | src/scaffoldmaker/annotation/annotationgroup.py | hasNodesetGroup | mahyar-osn/scaffoldmaker | 0 | python | def hasNodesetGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to query a sub group of.\n :return: True if NodesetGroup for nodeset exists and is not empty, otherwise False.\n '
nodeGroup = self._group.getFieldNodeGroup(nodeset)
return (nodeGroup.isValid() and (nodeGroup.getNodesetGroup().getSize() > 0)) | def hasNodesetGroup(self, nodeset):
'\n :param nodeset: The Zinc nodeset to query a sub group of.\n :return: True if NodesetGroup for nodeset exists and is not empty, otherwise False.\n '
nodeGroup = self._group.getFieldNodeGroup(nodeset)
return (nodeGroup.isValid() and (nodeGroup.getNodesetGroup().getSize() > 0))<|docstring|>:param nodeset: The Zinc nodeset to query a sub group of.
:return: True if NodesetGroup for nodeset exists and is not empty, otherwise False.<|endoftext|> |
a1d2db882e61b8cb2a0c41cdd8ba0c5711f575742e4a45cf61871cb19b3282cd | def addSubelements(self):
'\n Call after group is complete and faces have been defined to add faces and\n nodes for elements in group to related subgroups.\n '
self._group.setSubelementHandlingMode(FieldGroup.SUBELEMENT_HANDLING_MODE_FULL)
fm = self._group.getFieldmodule()
for dimension in range(1, 4):
mesh = fm.findMeshByDimension(dimension)
elementGroup = self._group.getFieldElementGroup(mesh)
if elementGroup.isValid():
meshGroup = elementGroup.getMeshGroup()
meshGroup.addElementsConditional(elementGroup) | Call after group is complete and faces have been defined to add faces and
nodes for elements in group to related subgroups. | src/scaffoldmaker/annotation/annotationgroup.py | addSubelements | mahyar-osn/scaffoldmaker | 0 | python | def addSubelements(self):
'\n Call after group is complete and faces have been defined to add faces and\n nodes for elements in group to related subgroups.\n '
self._group.setSubelementHandlingMode(FieldGroup.SUBELEMENT_HANDLING_MODE_FULL)
fm = self._group.getFieldmodule()
for dimension in range(1, 4):
mesh = fm.findMeshByDimension(dimension)
elementGroup = self._group.getFieldElementGroup(mesh)
if elementGroup.isValid():
meshGroup = elementGroup.getMeshGroup()
meshGroup.addElementsConditional(elementGroup) | def addSubelements(self):
'\n Call after group is complete and faces have been defined to add faces and\n nodes for elements in group to related subgroups.\n '
self._group.setSubelementHandlingMode(FieldGroup.SUBELEMENT_HANDLING_MODE_FULL)
fm = self._group.getFieldmodule()
for dimension in range(1, 4):
mesh = fm.findMeshByDimension(dimension)
elementGroup = self._group.getFieldElementGroup(mesh)
if elementGroup.isValid():
meshGroup = elementGroup.getMeshGroup()
meshGroup.addElementsConditional(elementGroup)<|docstring|>Call after group is complete and faces have been defined to add faces and
nodes for elements in group to related subgroups.<|endoftext|> |
1a396f2ff86f0cb09ac5cd3da93f9882adbd218615c2664925023625f768a6f3 | def encode_erc20_asset_data(token_address: str) -> str:
"Encode an ERC20 token address into an asset data string.\n\n :param token_address: the ERC20 token's contract address.\n :returns: hex encoded asset data string, usable in the makerAssetData or\n takerAssetData fields in a 0x order.\n\n >>> encode_erc20_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48')\n '0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48'\n "
assert_is_string(token_address, 'token_address')
return ('0x' + abi_utils.simple_encode('ERC20Token(address)', token_address).hex()) | Encode an ERC20 token address into an asset data string.
:param token_address: the ERC20 token's contract address.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc20_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48')
'0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48' | python-packages/order_utils/src/zero_ex/order_utils/asset_data_utils.py | encode_erc20_asset_data | dave4506/0x-monorepo | 2 | python | def encode_erc20_asset_data(token_address: str) -> str:
"Encode an ERC20 token address into an asset data string.\n\n :param token_address: the ERC20 token's contract address.\n :returns: hex encoded asset data string, usable in the makerAssetData or\n takerAssetData fields in a 0x order.\n\n >>> encode_erc20_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48')\n '0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48'\n "
assert_is_string(token_address, 'token_address')
return ('0x' + abi_utils.simple_encode('ERC20Token(address)', token_address).hex()) | def encode_erc20_asset_data(token_address: str) -> str:
"Encode an ERC20 token address into an asset data string.\n\n :param token_address: the ERC20 token's contract address.\n :returns: hex encoded asset data string, usable in the makerAssetData or\n takerAssetData fields in a 0x order.\n\n >>> encode_erc20_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48')\n '0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48'\n "
assert_is_string(token_address, 'token_address')
return ('0x' + abi_utils.simple_encode('ERC20Token(address)', token_address).hex())<|docstring|>Encode an ERC20 token address into an asset data string.
:param token_address: the ERC20 token's contract address.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc20_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48')
'0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48'<|endoftext|> |
89b26e123852fef71f720e97cb6890ab3551211e191cbbd4a299e214c52d1c7b | def decode_erc20_asset_data(asset_data: str) -> ERC20AssetData:
'Decode an ERC20 asset data hex string.\n\n :param asset_data: String produced by prior call to encode_erc20_asset_data()\n\n >>> decode_erc20_asset_data("0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48")\n {\'asset_proxy_id\': \'0xf47261b0\', \'token_address\': \'0x1dc4c1cefef38a777b15aa20260a54e584b16c48\'}\n '
assert_is_string(asset_data, 'asset_data')
if (len(asset_data) < ERC20_ASSET_DATA_BYTE_LENGTH):
raise ValueError((('Could not decode ERC20 Proxy Data. Expected length of encoded' + f' data to be at least {str(ERC20_ASSET_DATA_BYTE_LENGTH)}.') + f' Got {str(len(asset_data))}.'))
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if (asset_proxy_id != abi_utils.method_id('ERC20Token', ['address'])):
raise ValueError((('Could not decode ERC20 Proxy Data. Expected Asset Proxy Id to be' + f" ERC20 ({abi_utils.method_id('ERC20Token', ['address'])})") + f' but got {asset_proxy_id}.'))
token_address = eth_abi.decode_abi(['address'], bytes.fromhex(asset_data[SELECTOR_LENGTH:]))[0]
return {'asset_proxy_id': asset_proxy_id, 'token_address': token_address} | Decode an ERC20 asset data hex string.
:param asset_data: String produced by prior call to encode_erc20_asset_data()
>>> decode_erc20_asset_data("0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48")
{'asset_proxy_id': '0xf47261b0', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48'} | python-packages/order_utils/src/zero_ex/order_utils/asset_data_utils.py | decode_erc20_asset_data | dave4506/0x-monorepo | 2 | python | def decode_erc20_asset_data(asset_data: str) -> ERC20AssetData:
'Decode an ERC20 asset data hex string.\n\n :param asset_data: String produced by prior call to encode_erc20_asset_data()\n\n >>> decode_erc20_asset_data("0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48")\n {\'asset_proxy_id\': \'0xf47261b0\', \'token_address\': \'0x1dc4c1cefef38a777b15aa20260a54e584b16c48\'}\n '
assert_is_string(asset_data, 'asset_data')
if (len(asset_data) < ERC20_ASSET_DATA_BYTE_LENGTH):
raise ValueError((('Could not decode ERC20 Proxy Data. Expected length of encoded' + f' data to be at least {str(ERC20_ASSET_DATA_BYTE_LENGTH)}.') + f' Got {str(len(asset_data))}.'))
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if (asset_proxy_id != abi_utils.method_id('ERC20Token', ['address'])):
raise ValueError((('Could not decode ERC20 Proxy Data. Expected Asset Proxy Id to be' + f" ERC20 ({abi_utils.method_id('ERC20Token', ['address'])})") + f' but got {asset_proxy_id}.'))
token_address = eth_abi.decode_abi(['address'], bytes.fromhex(asset_data[SELECTOR_LENGTH:]))[0]
return {'asset_proxy_id': asset_proxy_id, 'token_address': token_address} | def decode_erc20_asset_data(asset_data: str) -> ERC20AssetData:
'Decode an ERC20 asset data hex string.\n\n :param asset_data: String produced by prior call to encode_erc20_asset_data()\n\n >>> decode_erc20_asset_data("0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48")\n {\'asset_proxy_id\': \'0xf47261b0\', \'token_address\': \'0x1dc4c1cefef38a777b15aa20260a54e584b16c48\'}\n '
assert_is_string(asset_data, 'asset_data')
if (len(asset_data) < ERC20_ASSET_DATA_BYTE_LENGTH):
raise ValueError((('Could not decode ERC20 Proxy Data. Expected length of encoded' + f' data to be at least {str(ERC20_ASSET_DATA_BYTE_LENGTH)}.') + f' Got {str(len(asset_data))}.'))
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if (asset_proxy_id != abi_utils.method_id('ERC20Token', ['address'])):
raise ValueError((('Could not decode ERC20 Proxy Data. Expected Asset Proxy Id to be' + f" ERC20 ({abi_utils.method_id('ERC20Token', ['address'])})") + f' but got {asset_proxy_id}.'))
token_address = eth_abi.decode_abi(['address'], bytes.fromhex(asset_data[SELECTOR_LENGTH:]))[0]
return {'asset_proxy_id': asset_proxy_id, 'token_address': token_address}<|docstring|>Decode an ERC20 asset data hex string.
:param asset_data: String produced by prior call to encode_erc20_asset_data()
>>> decode_erc20_asset_data("0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48")
{'asset_proxy_id': '0xf47261b0', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48'}<|endoftext|> |
3fea14202de89236f7ba598fe56b4331dd66af0bec3922703f7be0f07c666efe | def encode_erc721_asset_data(token_address: str, token_id: int) -> str:
"Encode an ERC721 asset data hex string.\n\n :param token_address: the ERC721 token's contract address.\n :param token_id: the identifier of the asset's instance of the token.\n :returns: hex encoded asset data string, usable in the makerAssetData or\n takerAssetData fields in a 0x order.\n\n >>> encode_erc721_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1)\n '0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001'\n "
assert_is_string(token_address, 'token_address')
assert_is_int(token_id, 'token_id')
return ('0x' + abi_utils.simple_encode('ERC721Token(address,uint256)', token_address, token_id).hex()) | Encode an ERC721 asset data hex string.
:param token_address: the ERC721 token's contract address.
:param token_id: the identifier of the asset's instance of the token.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc721_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1)
'0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001' | python-packages/order_utils/src/zero_ex/order_utils/asset_data_utils.py | encode_erc721_asset_data | dave4506/0x-monorepo | 2 | python | def encode_erc721_asset_data(token_address: str, token_id: int) -> str:
"Encode an ERC721 asset data hex string.\n\n :param token_address: the ERC721 token's contract address.\n :param token_id: the identifier of the asset's instance of the token.\n :returns: hex encoded asset data string, usable in the makerAssetData or\n takerAssetData fields in a 0x order.\n\n >>> encode_erc721_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1)\n '0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001'\n "
assert_is_string(token_address, 'token_address')
assert_is_int(token_id, 'token_id')
return ('0x' + abi_utils.simple_encode('ERC721Token(address,uint256)', token_address, token_id).hex()) | def encode_erc721_asset_data(token_address: str, token_id: int) -> str:
"Encode an ERC721 asset data hex string.\n\n :param token_address: the ERC721 token's contract address.\n :param token_id: the identifier of the asset's instance of the token.\n :returns: hex encoded asset data string, usable in the makerAssetData or\n takerAssetData fields in a 0x order.\n\n >>> encode_erc721_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1)\n '0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001'\n "
assert_is_string(token_address, 'token_address')
assert_is_int(token_id, 'token_id')
return ('0x' + abi_utils.simple_encode('ERC721Token(address,uint256)', token_address, token_id).hex())<|docstring|>Encode an ERC721 asset data hex string.
:param token_address: the ERC721 token's contract address.
:param token_id: the identifier of the asset's instance of the token.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc721_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1)
'0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001'<|endoftext|> |
e552e20ccb9a28f045a81043ac549a066cb22e357de3fe39c04e682c7ac54b2c | def decode_erc721_asset_data(asset_data: str) -> ERC721AssetData:
"Decode an ERC721 asset data hex string.\n\n >>> decode_erc721_asset_data('0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001')\n {'asset_proxy_id': '0x02571792', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 'token_id': 1}\n "
assert_is_string(asset_data, 'asset_data')
if (len(asset_data) < ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH):
raise ValueError((('Could not decode ERC721 Asset Data. Expected length of encoded' + f'data to be at least {ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH}. ') + f'Got {len(asset_data)}.'))
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if (asset_proxy_id != abi_utils.method_id('ERC721Token', ['address', 'uint256'])):
raise ValueError(((('Could not decode ERC721 Asset Data. Expected Asset Proxy Id to be' + f' ERC721 (') + f"{abi_utils.method_id('ERC721Token', ['address', 'uint256'])}") + f'), but got {asset_proxy_id}'))
(token_address, token_id) = eth_abi.decode_abi(['address', 'uint256'], bytes.fromhex(asset_data[SELECTOR_LENGTH:]))
return {'asset_proxy_id': asset_proxy_id, 'token_address': token_address, 'token_id': token_id} | Decode an ERC721 asset data hex string.
>>> decode_erc721_asset_data('0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001')
{'asset_proxy_id': '0x02571792', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 'token_id': 1} | python-packages/order_utils/src/zero_ex/order_utils/asset_data_utils.py | decode_erc721_asset_data | dave4506/0x-monorepo | 2 | python | def decode_erc721_asset_data(asset_data: str) -> ERC721AssetData:
"Decode an ERC721 asset data hex string.\n\n >>> decode_erc721_asset_data('0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001')\n {'asset_proxy_id': '0x02571792', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 'token_id': 1}\n "
assert_is_string(asset_data, 'asset_data')
if (len(asset_data) < ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH):
raise ValueError((('Could not decode ERC721 Asset Data. Expected length of encoded' + f'data to be at least {ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH}. ') + f'Got {len(asset_data)}.'))
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if (asset_proxy_id != abi_utils.method_id('ERC721Token', ['address', 'uint256'])):
raise ValueError(((('Could not decode ERC721 Asset Data. Expected Asset Proxy Id to be' + f' ERC721 (') + f"{abi_utils.method_id('ERC721Token', ['address', 'uint256'])}") + f'), but got {asset_proxy_id}'))
(token_address, token_id) = eth_abi.decode_abi(['address', 'uint256'], bytes.fromhex(asset_data[SELECTOR_LENGTH:]))
return {'asset_proxy_id': asset_proxy_id, 'token_address': token_address, 'token_id': token_id} | def decode_erc721_asset_data(asset_data: str) -> ERC721AssetData:
"Decode an ERC721 asset data hex string.\n\n >>> decode_erc721_asset_data('0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001')\n {'asset_proxy_id': '0x02571792', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 'token_id': 1}\n "
assert_is_string(asset_data, 'asset_data')
if (len(asset_data) < ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH):
raise ValueError((('Could not decode ERC721 Asset Data. Expected length of encoded' + f'data to be at least {ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH}. ') + f'Got {len(asset_data)}.'))
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if (asset_proxy_id != abi_utils.method_id('ERC721Token', ['address', 'uint256'])):
raise ValueError(((('Could not decode ERC721 Asset Data. Expected Asset Proxy Id to be' + f' ERC721 (') + f"{abi_utils.method_id('ERC721Token', ['address', 'uint256'])}") + f'), but got {asset_proxy_id}'))
(token_address, token_id) = eth_abi.decode_abi(['address', 'uint256'], bytes.fromhex(asset_data[SELECTOR_LENGTH:]))
return {'asset_proxy_id': asset_proxy_id, 'token_address': token_address, 'token_id': token_id}<|docstring|>Decode an ERC721 asset data hex string.
>>> decode_erc721_asset_data('0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001')
{'asset_proxy_id': '0x02571792', 'token_address': '0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 'token_id': 1}<|endoftext|> |
e2b23cbc559514a09ab088ee01a78664d0a1332a4cb033e2fecf2a8d761217fa | def group_by_cluster(data, clusters, no_clusters):
' returns a nested list of data, sorted by cluster '
ret = [[] for i in range(no_clusters)]
for i in range(len(clusters)):
index = int(clusters[i])
d = data[i]
ret[index].append(d)
return ret | returns a nested list of data, sorted by cluster | serverless/flask-server/matching/utility.py | group_by_cluster | adrianaarcia/YPool | 0 | python | def group_by_cluster(data, clusters, no_clusters):
' '
ret = [[] for i in range(no_clusters)]
for i in range(len(clusters)):
index = int(clusters[i])
d = data[i]
ret[index].append(d)
return ret | def group_by_cluster(data, clusters, no_clusters):
' '
ret = [[] for i in range(no_clusters)]
for i in range(len(clusters)):
index = int(clusters[i])
d = data[i]
ret[index].append(d)
return ret<|docstring|>returns a nested list of data, sorted by cluster<|endoftext|> |
0585ac39d6ea784ee09a1929347afd85755b22e2bad221457806dadd44f8e240 | def sort_by_group(requests):
' given a list of requests, returns a list of lists, where the sublists contain netids of each group '
groups = defaultdict(list)
for req in requests:
if req['matched']:
groups[req['groupId']].append(req)
return list(groups.values()) | given a list of requests, returns a list of lists, where the sublists contain netids of each group | serverless/flask-server/matching/utility.py | sort_by_group | adrianaarcia/YPool | 0 | python | def sort_by_group(requests):
' '
groups = defaultdict(list)
for req in requests:
if req['matched']:
groups[req['groupId']].append(req)
return list(groups.values()) | def sort_by_group(requests):
' '
groups = defaultdict(list)
for req in requests:
if req['matched']:
groups[req['groupId']].append(req)
return list(groups.values())<|docstring|>given a list of requests, returns a list of lists, where the sublists contain netids of each group<|endoftext|> |
b1c35c578d52adf8b82de1d556bba2be95ba8d1bcb2940e13ae0b0ae44263a51 | def match_score(req1, req2):
'Utility tool to evaluate the feasibility of two requests given our evaluation heuristics'
d1 = combine_dt(req1['date'], req1['time'])
d2 = combine_dt(req2['date'], req2['time'])
d_hours = (abs((d1 - d2)).total_seconds() / 3600.0)
if (d_hours > 168):
dt_score = 0
else:
dt_score = (((3.1001984127e-05 * (d_hours ** 2)) - (0.0111607142857 * d_hours)) + 1)
g = abs((int(req1['preferred_group_size']) - int(req2['preferred_group_size'])))
group_sz_score = ((((- 1) / 3) * g) + 1)
if (req1['preferred_car_type'] == req2['preferred_car_type']):
car_sz_score = 1
else:
car_sz_score = 0
if (req1['origin'] == req2['origin']):
origin_score = 1
else:
origin_score = 0
kg = ['Airport-JFK', 'Airport-LGA']
if (req1['destination'] == req2['destination']):
dest_score = 1
elif ((req1['destination'] in kg) and (req2['destination'] in kg)):
dest_score = 0.9
elif (((req1['destination'] in kg) and (req2['destination'] == 'Airport-EWR')) or ((req2['destination'] in kg) and (req1['destination'] == 'Airport-EWR'))):
dest_score = 0.6
elif (((req1['destination'] in kg) and (req2['destination'] == 'Airport-BDL')) or ((req2['destination'] in kg) and (req1['destination'] == 'Airport-BDL'))):
dest_score = 0.2
else:
dest_score = 0
return (((((0.5 * dt_score) + (0.05 * group_sz_score)) + (0.05 * car_sz_score)) + (0.2 * origin_score)) + (0.2 * dest_score)) | Utility tool to evaluate the feasibility of two requests given our evaluation heuristics | serverless/flask-server/matching/utility.py | match_score | adrianaarcia/YPool | 0 | python | def match_score(req1, req2):
d1 = combine_dt(req1['date'], req1['time'])
d2 = combine_dt(req2['date'], req2['time'])
d_hours = (abs((d1 - d2)).total_seconds() / 3600.0)
if (d_hours > 168):
dt_score = 0
else:
dt_score = (((3.1001984127e-05 * (d_hours ** 2)) - (0.0111607142857 * d_hours)) + 1)
g = abs((int(req1['preferred_group_size']) - int(req2['preferred_group_size'])))
group_sz_score = ((((- 1) / 3) * g) + 1)
if (req1['preferred_car_type'] == req2['preferred_car_type']):
car_sz_score = 1
else:
car_sz_score = 0
if (req1['origin'] == req2['origin']):
origin_score = 1
else:
origin_score = 0
kg = ['Airport-JFK', 'Airport-LGA']
if (req1['destination'] == req2['destination']):
dest_score = 1
elif ((req1['destination'] in kg) and (req2['destination'] in kg)):
dest_score = 0.9
elif (((req1['destination'] in kg) and (req2['destination'] == 'Airport-EWR')) or ((req2['destination'] in kg) and (req1['destination'] == 'Airport-EWR'))):
dest_score = 0.6
elif (((req1['destination'] in kg) and (req2['destination'] == 'Airport-BDL')) or ((req2['destination'] in kg) and (req1['destination'] == 'Airport-BDL'))):
dest_score = 0.2
else:
dest_score = 0
return (((((0.5 * dt_score) + (0.05 * group_sz_score)) + (0.05 * car_sz_score)) + (0.2 * origin_score)) + (0.2 * dest_score)) | def match_score(req1, req2):
d1 = combine_dt(req1['date'], req1['time'])
d2 = combine_dt(req2['date'], req2['time'])
d_hours = (abs((d1 - d2)).total_seconds() / 3600.0)
if (d_hours > 168):
dt_score = 0
else:
dt_score = (((3.1001984127e-05 * (d_hours ** 2)) - (0.0111607142857 * d_hours)) + 1)
g = abs((int(req1['preferred_group_size']) - int(req2['preferred_group_size'])))
group_sz_score = ((((- 1) / 3) * g) + 1)
if (req1['preferred_car_type'] == req2['preferred_car_type']):
car_sz_score = 1
else:
car_sz_score = 0
if (req1['origin'] == req2['origin']):
origin_score = 1
else:
origin_score = 0
kg = ['Airport-JFK', 'Airport-LGA']
if (req1['destination'] == req2['destination']):
dest_score = 1
elif ((req1['destination'] in kg) and (req2['destination'] in kg)):
dest_score = 0.9
elif (((req1['destination'] in kg) and (req2['destination'] == 'Airport-EWR')) or ((req2['destination'] in kg) and (req1['destination'] == 'Airport-EWR'))):
dest_score = 0.6
elif (((req1['destination'] in kg) and (req2['destination'] == 'Airport-BDL')) or ((req2['destination'] in kg) and (req1['destination'] == 'Airport-BDL'))):
dest_score = 0.2
else:
dest_score = 0
return (((((0.5 * dt_score) + (0.05 * group_sz_score)) + (0.05 * car_sz_score)) + (0.2 * origin_score)) + (0.2 * dest_score))<|docstring|>Utility tool to evaluate the feasibility of two requests given our evaluation heuristics<|endoftext|> |
b60a9e92961968dc09f5c337458d1bf03dbae625644b1c05ba780f8a6a7852f0 | def group_score(cluster):
'For each group made, finds average value of the feasiblity of the matched group using each member as the head'
if (len(cluster) < 2):
return 0
dat = {}
for req in cluster:
count = 0
for r in cluster:
if (r != req):
v = match_score(req, r)
count += v
dat[req['netId']] = (count / (len(cluster) - 1))
avg = 0
cnt = 0
for item in dat.values():
avg += item
cnt += 1
return (avg / cnt) | For each group made, finds average value of the feasiblity of the matched group using each member as the head | serverless/flask-server/matching/utility.py | group_score | adrianaarcia/YPool | 0 | python | def group_score(cluster):
if (len(cluster) < 2):
return 0
dat = {}
for req in cluster:
count = 0
for r in cluster:
if (r != req):
v = match_score(req, r)
count += v
dat[req['netId']] = (count / (len(cluster) - 1))
avg = 0
cnt = 0
for item in dat.values():
avg += item
cnt += 1
return (avg / cnt) | def group_score(cluster):
if (len(cluster) < 2):
return 0
dat = {}
for req in cluster:
count = 0
for r in cluster:
if (r != req):
v = match_score(req, r)
count += v
dat[req['netId']] = (count / (len(cluster) - 1))
avg = 0
cnt = 0
for item in dat.values():
avg += item
cnt += 1
return (avg / cnt)<|docstring|>For each group made, finds average value of the feasiblity of the matched group using each member as the head<|endoftext|> |
546864d956c495c12c7ef9597e317fbc5b7ff943e7ea459e84a33f85bb8f99a0 | def get_stats(requests):
'Returns a dictionary of relevant stats'
groups = sort_by_group(requests)
no_matched = 0
avg_g_score = 0
for group in groups:
no_matched += len(group)
avg_g_score += group_score(group)
no_groups = len(groups)
if (no_groups > 0):
return {'No. matched': no_matched, 'No. groups': no_groups, 'Avg. group score': (str(round(((100 * avg_g_score) / no_groups), 2)) + '%'), 'Avg. group size': (no_matched / no_groups), 'Match rate': ((no_matched / len(requests)) * 100)}
else:
return {'No. matched': no_matched, 'No. groups': no_groups, 'Avg. group score': '0%', 'Avg. group size': 0, 'Match rate': 0} | Returns a dictionary of relevant stats | serverless/flask-server/matching/utility.py | get_stats | adrianaarcia/YPool | 0 | python | def get_stats(requests):
groups = sort_by_group(requests)
no_matched = 0
avg_g_score = 0
for group in groups:
no_matched += len(group)
avg_g_score += group_score(group)
no_groups = len(groups)
if (no_groups > 0):
return {'No. matched': no_matched, 'No. groups': no_groups, 'Avg. group score': (str(round(((100 * avg_g_score) / no_groups), 2)) + '%'), 'Avg. group size': (no_matched / no_groups), 'Match rate': ((no_matched / len(requests)) * 100)}
else:
return {'No. matched': no_matched, 'No. groups': no_groups, 'Avg. group score': '0%', 'Avg. group size': 0, 'Match rate': 0} | def get_stats(requests):
groups = sort_by_group(requests)
no_matched = 0
avg_g_score = 0
for group in groups:
no_matched += len(group)
avg_g_score += group_score(group)
no_groups = len(groups)
if (no_groups > 0):
return {'No. matched': no_matched, 'No. groups': no_groups, 'Avg. group score': (str(round(((100 * avg_g_score) / no_groups), 2)) + '%'), 'Avg. group size': (no_matched / no_groups), 'Match rate': ((no_matched / len(requests)) * 100)}
else:
return {'No. matched': no_matched, 'No. groups': no_groups, 'Avg. group score': '0%', 'Avg. group size': 0, 'Match rate': 0}<|docstring|>Returns a dictionary of relevant stats<|endoftext|> |
ee0f7598cb05dcafb473bb07c12b22371de177e71fcacf6e2066cc902ed27d47 | def print_stats(stats):
'Prints relevant stats'
for item in stats:
print(f'''
{item}: {stats[item]}''', end='')
print('%') | Prints relevant stats | serverless/flask-server/matching/utility.py | print_stats | adrianaarcia/YPool | 0 | python | def print_stats(stats):
for item in stats:
print(f'
{item}: {stats[item]}', end=)
print('%') | def print_stats(stats):
for item in stats:
print(f'
{item}: {stats[item]}', end=)
print('%')<|docstring|>Prints relevant stats<|endoftext|> |
e8387aedb473371940769c7167b97e06960ede22a6c6d663a90b5ac9734d5134 | def print_groups(requests):
'Prints groups formed from our matching'
groups = sort_by_group(requests)
for i in groups:
print(f"Group ID: {i[0]['groupId']}")
print(f'Group score: {round((group_score(i) * 100), 2)}%')
for request in i:
print(f"net ID: {request['netId']}, date/time: {request['date']} {request['time']}, origin: {request['origin']}, destination: {request['destination']}, pref. group size: {request['preferred_group_size']}, pref. car type: {request['preferred_car_type']}")
print('\n') | Prints groups formed from our matching | serverless/flask-server/matching/utility.py | print_groups | adrianaarcia/YPool | 0 | python | def print_groups(requests):
groups = sort_by_group(requests)
for i in groups:
print(f"Group ID: {i[0]['groupId']}")
print(f'Group score: {round((group_score(i) * 100), 2)}%')
for request in i:
print(f"net ID: {request['netId']}, date/time: {request['date']} {request['time']}, origin: {request['origin']}, destination: {request['destination']}, pref. group size: {request['preferred_group_size']}, pref. car type: {request['preferred_car_type']}")
print('\n') | def print_groups(requests):
groups = sort_by_group(requests)
for i in groups:
print(f"Group ID: {i[0]['groupId']}")
print(f'Group score: {round((group_score(i) * 100), 2)}%')
for request in i:
print(f"net ID: {request['netId']}, date/time: {request['date']} {request['time']}, origin: {request['origin']}, destination: {request['destination']}, pref. group size: {request['preferred_group_size']}, pref. car type: {request['preferred_car_type']}")
print('\n')<|docstring|>Prints groups formed from our matching<|endoftext|> |
e3ec08abafea432221cffc8da1e4bce6c5c24df5891b3d28de894e392a65a760 | def combine_dt(date_str, time_str):
'Utility tool to create datetime object from separate date and time'
date = dt.datetime.strptime(date_str, '%Y-%m-%d').date()
time = dt.datetime.strptime(time_str, '%H:%M').time()
datetime = dt.datetime.combine(date, time)
return datetime | Utility tool to create datetime object from separate date and time | serverless/flask-server/matching/utility.py | combine_dt | adrianaarcia/YPool | 0 | python | def combine_dt(date_str, time_str):
date = dt.datetime.strptime(date_str, '%Y-%m-%d').date()
time = dt.datetime.strptime(time_str, '%H:%M').time()
datetime = dt.datetime.combine(date, time)
return datetime | def combine_dt(date_str, time_str):
date = dt.datetime.strptime(date_str, '%Y-%m-%d').date()
time = dt.datetime.strptime(time_str, '%H:%M').time()
datetime = dt.datetime.combine(date, time)
return datetime<|docstring|>Utility tool to create datetime object from separate date and time<|endoftext|> |
918ebdbd5de6820613bec51292c29d558dac292e0f2fdc1518857ec41ed76fa1 | def mode(lst):
'Returns modal score from a list'
return max(set(lst), key=lst.count) | Returns modal score from a list | serverless/flask-server/matching/utility.py | mode | adrianaarcia/YPool | 0 | python | def mode(lst):
return max(set(lst), key=lst.count) | def mode(lst):
return max(set(lst), key=lst.count)<|docstring|>Returns modal score from a list<|endoftext|> |
dc783266f65187d361fd9029b1cc457c792dfd59e02f2d3e12f80daca660a7d4 | def get_centre_gaussian(xdata, ydata):
'Return the centre of a guassian\n :param xdata and ydata are the x and y that define the guassian\n they can be the x and y of an histogram\n return double that is the centre of the guassian\n '
from lmfit.models import GaussianModel
gmodel = GaussianModel()
params = gmodel.make_params(amplitude=ydata.max(), center=xdata.mean(), sigma=xdata.std())
result = gmodel.fit(ydata, params, x=xdata)
peak = result.values['center']
return peak | Return the centre of a guassian
:param xdata and ydata are the x and y that define the guassian
they can be the x and y of an histogram
return double that is the centre of the guassian | CaRM_HD189733/scripts/emcee_tools.py | get_centre_gaussian | EduardoCristo/CaRM | 0 | python | def get_centre_gaussian(xdata, ydata):
'Return the centre of a guassian\n :param xdata and ydata are the x and y that define the guassian\n they can be the x and y of an histogram\n return double that is the centre of the guassian\n '
from lmfit.models import GaussianModel
gmodel = GaussianModel()
params = gmodel.make_params(amplitude=ydata.max(), center=xdata.mean(), sigma=xdata.std())
result = gmodel.fit(ydata, params, x=xdata)
peak = result.values['center']
return peak | def get_centre_gaussian(xdata, ydata):
'Return the centre of a guassian\n :param xdata and ydata are the x and y that define the guassian\n they can be the x and y of an histogram\n return double that is the centre of the guassian\n '
from lmfit.models import GaussianModel
gmodel = GaussianModel()
params = gmodel.make_params(amplitude=ydata.max(), center=xdata.mean(), sigma=xdata.std())
result = gmodel.fit(ydata, params, x=xdata)
peak = result.values['center']
return peak<|docstring|>Return the centre of a guassian
:param xdata and ydata are the x and y that define the guassian
they can be the x and y of an histogram
return double that is the centre of the guassian<|endoftext|> |
3aa3124c470e727fd9840a2624e403c47469b6be348530fb3d8163fa0d1d769f | def gauspeak(values, nbins):
'Return the centre of a guassian fit to an histrogram of values\n :param values np.array with values for which we will calculate the histogram and the centre of a gaussianfit\n if np.array has more than one parameter it will do each one seperatly\n :param nbins int number of bins used in the histrogram\n return list of the guassian centre fit for each parameter\n '
number_fitted = len(values[(0, :)])
peak = []
for i in range(0, number_fitted):
ydata = np.histogram(values[(:, i)], nbins)[0]
bin_edges = np.histogram(values[(:, i)], nbins)[1]
delta = (bin_edges[2] - bin_edges[1])
xdata = (bin_edges[0:(len(bin_edges) - 1)] + (delta / 2.0))
centre_gaussian = get_centre_gaussian(xdata, ydata)
centre_gaussian = max(centre_gaussian, xdata[0])
centre_gaussian = min(centre_gaussian, xdata[(nbins - 1)])
peak.append(centre_gaussian)
return peak | Return the centre of a guassian fit to an histrogram of values
:param values np.array with values for which we will calculate the histogram and the centre of a gaussianfit
if np.array has more than one parameter it will do each one seperatly
:param nbins int number of bins used in the histrogram
return list of the guassian centre fit for each parameter | CaRM_HD189733/scripts/emcee_tools.py | gauspeak | EduardoCristo/CaRM | 0 | python | def gauspeak(values, nbins):
'Return the centre of a guassian fit to an histrogram of values\n :param values np.array with values for which we will calculate the histogram and the centre of a gaussianfit\n if np.array has more than one parameter it will do each one seperatly\n :param nbins int number of bins used in the histrogram\n return list of the guassian centre fit for each parameter\n '
number_fitted = len(values[(0, :)])
peak = []
for i in range(0, number_fitted):
ydata = np.histogram(values[(:, i)], nbins)[0]
bin_edges = np.histogram(values[(:, i)], nbins)[1]
delta = (bin_edges[2] - bin_edges[1])
xdata = (bin_edges[0:(len(bin_edges) - 1)] + (delta / 2.0))
centre_gaussian = get_centre_gaussian(xdata, ydata)
centre_gaussian = max(centre_gaussian, xdata[0])
centre_gaussian = min(centre_gaussian, xdata[(nbins - 1)])
peak.append(centre_gaussian)
return peak | def gauspeak(values, nbins):
'Return the centre of a guassian fit to an histrogram of values\n :param values np.array with values for which we will calculate the histogram and the centre of a gaussianfit\n if np.array has more than one parameter it will do each one seperatly\n :param nbins int number of bins used in the histrogram\n return list of the guassian centre fit for each parameter\n '
number_fitted = len(values[(0, :)])
peak = []
for i in range(0, number_fitted):
ydata = np.histogram(values[(:, i)], nbins)[0]
bin_edges = np.histogram(values[(:, i)], nbins)[1]
delta = (bin_edges[2] - bin_edges[1])
xdata = (bin_edges[0:(len(bin_edges) - 1)] + (delta / 2.0))
centre_gaussian = get_centre_gaussian(xdata, ydata)
centre_gaussian = max(centre_gaussian, xdata[0])
centre_gaussian = min(centre_gaussian, xdata[(nbins - 1)])
peak.append(centre_gaussian)
return peak<|docstring|>Return the centre of a guassian fit to an histrogram of values
:param values np.array with values for which we will calculate the histogram and the centre of a gaussianfit
if np.array has more than one parameter it will do each one seperatly
:param nbins int number of bins used in the histrogram
return list of the guassian centre fit for each parameter<|endoftext|> |
436f0917a64ef881778c57a42ad0bc6eee6433ff3a6a9316d42295f390835065 | def modepeak(values, nbins):
'Return the mode a distribution by cumputing the histogram\n :param values np.array with values for which we will calculate the mode of the distribution\n if np.array has more than one parameter it will do each one seperatly\n :param nbins int number of bins used in the histrogram\n return list of the mode fit for each parameter\n '
number_fitted = len(values[(0, :)])
peak = []
for i in range(0, number_fitted):
ydata = np.histogram(values[(:, i)], nbins)[0]
bin_edges = np.histogram(values[(:, i)], nbins)[1]
delta = (bin_edges[2] - bin_edges[1])
xdata = (bin_edges[0:(len(bin_edges) - 1)] + (delta / 2.0))
indice = np.argmax(ydata)
peak.append(xdata[indice])
return peak | Return the mode a distribution by cumputing the histogram
:param values np.array with values for which we will calculate the mode of the distribution
if np.array has more than one parameter it will do each one seperatly
:param nbins int number of bins used in the histrogram
return list of the mode fit for each parameter | CaRM_HD189733/scripts/emcee_tools.py | modepeak | EduardoCristo/CaRM | 0 | python | def modepeak(values, nbins):
'Return the mode a distribution by cumputing the histogram\n :param values np.array with values for which we will calculate the mode of the distribution\n if np.array has more than one parameter it will do each one seperatly\n :param nbins int number of bins used in the histrogram\n return list of the mode fit for each parameter\n '
number_fitted = len(values[(0, :)])
peak = []
for i in range(0, number_fitted):
ydata = np.histogram(values[(:, i)], nbins)[0]
bin_edges = np.histogram(values[(:, i)], nbins)[1]
delta = (bin_edges[2] - bin_edges[1])
xdata = (bin_edges[0:(len(bin_edges) - 1)] + (delta / 2.0))
indice = np.argmax(ydata)
peak.append(xdata[indice])
return peak | def modepeak(values, nbins):
'Return the mode a distribution by cumputing the histogram\n :param values np.array with values for which we will calculate the mode of the distribution\n if np.array has more than one parameter it will do each one seperatly\n :param nbins int number of bins used in the histrogram\n return list of the mode fit for each parameter\n '
number_fitted = len(values[(0, :)])
peak = []
for i in range(0, number_fitted):
ydata = np.histogram(values[(:, i)], nbins)[0]
bin_edges = np.histogram(values[(:, i)], nbins)[1]
delta = (bin_edges[2] - bin_edges[1])
xdata = (bin_edges[0:(len(bin_edges) - 1)] + (delta / 2.0))
indice = np.argmax(ydata)
peak.append(xdata[indice])
return peak<|docstring|>Return the mode a distribution by cumputing the histogram
:param values np.array with values for which we will calculate the mode of the distribution
if np.array has more than one parameter it will do each one seperatly
:param nbins int number of bins used in the histrogram
return list of the mode fit for each parameter<|endoftext|> |
512eb1ab9fb894a825fd90bc585ff0d153a186c3dbac5d2332d4c61eac618019 | def get_init_distrib_from_fitvalues(fitted_values):
'Generate the init_distrib dictionary for generate_random_init_pos from fitted_values.\n :param pd.DataFrame fitted_values: Fitted values from a previous run rows are parameter names\n columns are value, sigma+, sigma-\n\n :return dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma"\n of the normal distribution to use for each parameter. First level keys are parameter full\n name. Second is "sigma" and "mu".\n '
init_distrib = {}
for (param, row) in fitted_values.iterrows():
init_distrib[param] = {'mu': row['value'], 'sigma': np.mean([row['sigma+'], row['sigma-']])}
return init_distrib | Generate the init_distrib dictionary for generate_random_init_pos from fitted_values.
:param pd.DataFrame fitted_values: Fitted values from a previous run rows are parameter names
columns are value, sigma+, sigma-
:return dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma"
of the normal distribution to use for each parameter. First level keys are parameter full
name. Second is "sigma" and "mu". | CaRM_HD189733/scripts/emcee_tools.py | get_init_distrib_from_fitvalues | EduardoCristo/CaRM | 0 | python | def get_init_distrib_from_fitvalues(fitted_values):
'Generate the init_distrib dictionary for generate_random_init_pos from fitted_values.\n :param pd.DataFrame fitted_values: Fitted values from a previous run rows are parameter names\n columns are value, sigma+, sigma-\n\n :return dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma"\n of the normal distribution to use for each parameter. First level keys are parameter full\n name. Second is "sigma" and "mu".\n '
init_distrib = {}
for (param, row) in fitted_values.iterrows():
init_distrib[param] = {'mu': row['value'], 'sigma': np.mean([row['sigma+'], row['sigma-']])}
return init_distrib | def get_init_distrib_from_fitvalues(fitted_values):
'Generate the init_distrib dictionary for generate_random_init_pos from fitted_values.\n :param pd.DataFrame fitted_values: Fitted values from a previous run rows are parameter names\n columns are value, sigma+, sigma-\n\n :return dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma"\n of the normal distribution to use for each parameter. First level keys are parameter full\n name. Second is "sigma" and "mu".\n '
init_distrib = {}
for (param, row) in fitted_values.iterrows():
init_distrib[param] = {'mu': row['value'], 'sigma': np.mean([row['sigma+'], row['sigma-']])}
return init_distrib<|docstring|>Generate the init_distrib dictionary for generate_random_init_pos from fitted_values.
:param pd.DataFrame fitted_values: Fitted values from a previous run rows are parameter names
columns are value, sigma+, sigma-
:return dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma"
of the normal distribution to use for each parameter. First level keys are parameter full
name. Second is "sigma" and "mu".<|endoftext|> |
e0905e6891fe6b372c2c8ef6f112da4865d9898fa6f5a75285bcaf8ed1bd9294 | def generate_random_init_pos(nwalker, post_instance, init_distrib=None):
'Generate initial position from for the walkers.\n\n :param int nwalker: number of walkers\n :param Posterior post_instance: Instance of the posterior class\n :param dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma" of\n the normal distribution to use for each parameter. First level keys are parameter full name.\n Second is "sigma" and "mu".\n\n :return np.ndarray p0: Ndarray containing the initial positions for all the walkers\n '
l_param_name = post_instance.lnposteriors.dataset_db['all'].arg_list['param']
p0 = []
if (init_distrib is None):
return post_instance.model.get_initial_values(list_paramnames=l_param_name, nb_values=nwalker).transpose()
else:
for param in l_param_name:
if (param in init_distrib):
p0.append(np.random.normal(loc=init_distrib[param]['mu'], scale=init_distrib[param]['sigma'], size=nwalker))
else:
p0.append(np.squeeze(np.asarray([post_instance.model.get_initial_values(list_paramnames=[param]) for i in range(nwalker)])))
return np.asarray(p0).transpose() | Generate initial position from for the walkers.
:param int nwalker: number of walkers
:param Posterior post_instance: Instance of the posterior class
:param dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma" of
the normal distribution to use for each parameter. First level keys are parameter full name.
Second is "sigma" and "mu".
:return np.ndarray p0: Ndarray containing the initial positions for all the walkers | CaRM_HD189733/scripts/emcee_tools.py | generate_random_init_pos | EduardoCristo/CaRM | 0 | python | def generate_random_init_pos(nwalker, post_instance, init_distrib=None):
'Generate initial position from for the walkers.\n\n :param int nwalker: number of walkers\n :param Posterior post_instance: Instance of the posterior class\n :param dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma" of\n the normal distribution to use for each parameter. First level keys are parameter full name.\n Second is "sigma" and "mu".\n\n :return np.ndarray p0: Ndarray containing the initial positions for all the walkers\n '
l_param_name = post_instance.lnposteriors.dataset_db['all'].arg_list['param']
p0 = []
if (init_distrib is None):
return post_instance.model.get_initial_values(list_paramnames=l_param_name, nb_values=nwalker).transpose()
else:
for param in l_param_name:
if (param in init_distrib):
p0.append(np.random.normal(loc=init_distrib[param]['mu'], scale=init_distrib[param]['sigma'], size=nwalker))
else:
p0.append(np.squeeze(np.asarray([post_instance.model.get_initial_values(list_paramnames=[param]) for i in range(nwalker)])))
return np.asarray(p0).transpose() | def generate_random_init_pos(nwalker, post_instance, init_distrib=None):
'Generate initial position from for the walkers.\n\n :param int nwalker: number of walkers\n :param Posterior post_instance: Instance of the posterior class\n :param dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma" of\n the normal distribution to use for each parameter. First level keys are parameter full name.\n Second is "sigma" and "mu".\n\n :return np.ndarray p0: Ndarray containing the initial positions for all the walkers\n '
l_param_name = post_instance.lnposteriors.dataset_db['all'].arg_list['param']
p0 = []
if (init_distrib is None):
return post_instance.model.get_initial_values(list_paramnames=l_param_name, nb_values=nwalker).transpose()
else:
for param in l_param_name:
if (param in init_distrib):
p0.append(np.random.normal(loc=init_distrib[param]['mu'], scale=init_distrib[param]['sigma'], size=nwalker))
else:
p0.append(np.squeeze(np.asarray([post_instance.model.get_initial_values(list_paramnames=[param]) for i in range(nwalker)])))
return np.asarray(p0).transpose()<|docstring|>Generate initial position from for the walkers.
:param int nwalker: number of walkers
:param Posterior post_instance: Instance of the posterior class
:param dict init_distrib: dictionary of dictionary specifying the parameters "mu" and "sigma" of
the normal distribution to use for each parameter. First level keys are parameter full name.
Second is "sigma" and "mu".
:return np.ndarray p0: Ndarray containing the initial positions for all the walkers<|endoftext|> |
de5b04f366455c02ebe15911dbb9c064cc276a5d3d8a9d897cc3477a58a3ab29 | def explore(sampler, p0, nsteps, save_to_file=False, filename_chain='chain.dat', filename_acceptfrac='acceptfrac.dat', dat_folder=None, overwrite=None, l_param_name=None, logger=None):
'Perform an emcee exploration.\n\n :param emcee.EnsembleSampler sampler: EnsembleSampler instance\n :param array p0: Initial position for each walker and each parameter\n :param bool save_to_file: If True the status of the chains are stored at each iteration in .dat files\n :param str filename_chain: File name to use to save the chains (if save_to_file is True)\n :param str filename_acceptfrac: File name to use to save the acceptance fraction of the chains (if save_to_file is True)\n :param str dat_folder: Folder where the chain and acceptance fraction dat file will be (if save_to_file is True)\n :param bool overwrite: If True already existing .dat files with the same names are automatically overwritten\n :param list_of_str l_param_name: List of the parameter names\n '
if save_to_file:
makedirs(dat_folder, exist_ok=True)
file_chain = join(dat_folder, filename_chain)
file_acceptfrac = join(dat_folder, filename_acceptfrac)
for (filename, cat) in [(file_chain, 'chain'), (file_acceptfrac, 'acceptfrac')]:
if isfile(filename):
if (overwrite is None):
l_reponses_possibles = ['y', 'n']
question = 'File {} already exists. Do you want to continue and overwrite it ? {}\n'.format(filename, l_reponses_possibles)
rep = QCM_utilisateur(question, l_reponses_possibles)
overwrite = (rep == 'y')
else:
overwrite = True
if overwrite:
if (cat == 'chain'):
with open(filename, 'w') as f:
f.write('i_walker\t{:s}\n'.format('\t'.join((l_param_name + ['lnposterior']))))
else:
raise ValueError('{} correspond to an existing file.'.format(filename))
if (logger is None):
tqdm_out = None
else:
tqdm_out = TqdmToLogger(logger, level=INFO)
with tqdm(total=nsteps, file=tqdm_out) as pbar:
previous_i = (- 1)
for (i, result) in enumerate(sampler.sample(p0, iterations=nsteps, storechain=True)):
position = result[0]
lnprob = result[1]
if save_to_file:
with open(filename_chain, 'a') as f:
for k in range(position.shape[0]):
f.write('{:4d} {:s} {:>16.14g}\n'.format(k, ' '.join(['{:>16.14g}'.format(xx) for xx in position[k]]), lnprob[k]))
acceptance_fraction = sampler.acceptance_fraction
with open(filename_acceptfrac, 'w') as f:
for (k, acceptfrac) in enumerate(acceptance_fraction):
f.write('{:4d} {:>15f}\n'.format(k, acceptfrac))
pbar.update((i - previous_i))
previous_i = i
return result | Perform an emcee exploration.
:param emcee.EnsembleSampler sampler: EnsembleSampler instance
:param array p0: Initial position for each walker and each parameter
:param bool save_to_file: If True the status of the chains are stored at each iteration in .dat files
:param str filename_chain: File name to use to save the chains (if save_to_file is True)
:param str filename_acceptfrac: File name to use to save the acceptance fraction of the chains (if save_to_file is True)
:param str dat_folder: Folder where the chain and acceptance fraction dat file will be (if save_to_file is True)
:param bool overwrite: If True already existing .dat files with the same names are automatically overwritten
:param list_of_str l_param_name: List of the parameter names | CaRM_HD189733/scripts/emcee_tools.py | explore | EduardoCristo/CaRM | 0 | python | def explore(sampler, p0, nsteps, save_to_file=False, filename_chain='chain.dat', filename_acceptfrac='acceptfrac.dat', dat_folder=None, overwrite=None, l_param_name=None, logger=None):
'Perform an emcee exploration.\n\n :param emcee.EnsembleSampler sampler: EnsembleSampler instance\n :param array p0: Initial position for each walker and each parameter\n :param bool save_to_file: If True the status of the chains are stored at each iteration in .dat files\n :param str filename_chain: File name to use to save the chains (if save_to_file is True)\n :param str filename_acceptfrac: File name to use to save the acceptance fraction of the chains (if save_to_file is True)\n :param str dat_folder: Folder where the chain and acceptance fraction dat file will be (if save_to_file is True)\n :param bool overwrite: If True already existing .dat files with the same names are automatically overwritten\n :param list_of_str l_param_name: List of the parameter names\n '
if save_to_file:
makedirs(dat_folder, exist_ok=True)
file_chain = join(dat_folder, filename_chain)
file_acceptfrac = join(dat_folder, filename_acceptfrac)
for (filename, cat) in [(file_chain, 'chain'), (file_acceptfrac, 'acceptfrac')]:
if isfile(filename):
if (overwrite is None):
l_reponses_possibles = ['y', 'n']
question = 'File {} already exists. Do you want to continue and overwrite it ? {}\n'.format(filename, l_reponses_possibles)
rep = QCM_utilisateur(question, l_reponses_possibles)
overwrite = (rep == 'y')
else:
overwrite = True
if overwrite:
if (cat == 'chain'):
with open(filename, 'w') as f:
f.write('i_walker\t{:s}\n'.format('\t'.join((l_param_name + ['lnposterior']))))
else:
raise ValueError('{} correspond to an existing file.'.format(filename))
if (logger is None):
tqdm_out = None
else:
tqdm_out = TqdmToLogger(logger, level=INFO)
with tqdm(total=nsteps, file=tqdm_out) as pbar:
previous_i = (- 1)
for (i, result) in enumerate(sampler.sample(p0, iterations=nsteps, storechain=True)):
position = result[0]
lnprob = result[1]
if save_to_file:
with open(filename_chain, 'a') as f:
for k in range(position.shape[0]):
f.write('{:4d} {:s} {:>16.14g}\n'.format(k, ' '.join(['{:>16.14g}'.format(xx) for xx in position[k]]), lnprob[k]))
acceptance_fraction = sampler.acceptance_fraction
with open(filename_acceptfrac, 'w') as f:
for (k, acceptfrac) in enumerate(acceptance_fraction):
f.write('{:4d} {:>15f}\n'.format(k, acceptfrac))
pbar.update((i - previous_i))
previous_i = i
return result | def explore(sampler, p0, nsteps, save_to_file=False, filename_chain='chain.dat', filename_acceptfrac='acceptfrac.dat', dat_folder=None, overwrite=None, l_param_name=None, logger=None):
'Perform an emcee exploration.\n\n :param emcee.EnsembleSampler sampler: EnsembleSampler instance\n :param array p0: Initial position for each walker and each parameter\n :param bool save_to_file: If True the status of the chains are stored at each iteration in .dat files\n :param str filename_chain: File name to use to save the chains (if save_to_file is True)\n :param str filename_acceptfrac: File name to use to save the acceptance fraction of the chains (if save_to_file is True)\n :param str dat_folder: Folder where the chain and acceptance fraction dat file will be (if save_to_file is True)\n :param bool overwrite: If True already existing .dat files with the same names are automatically overwritten\n :param list_of_str l_param_name: List of the parameter names\n '
if save_to_file:
makedirs(dat_folder, exist_ok=True)
file_chain = join(dat_folder, filename_chain)
file_acceptfrac = join(dat_folder, filename_acceptfrac)
for (filename, cat) in [(file_chain, 'chain'), (file_acceptfrac, 'acceptfrac')]:
if isfile(filename):
if (overwrite is None):
l_reponses_possibles = ['y', 'n']
question = 'File {} already exists. Do you want to continue and overwrite it ? {}\n'.format(filename, l_reponses_possibles)
rep = QCM_utilisateur(question, l_reponses_possibles)
overwrite = (rep == 'y')
else:
overwrite = True
if overwrite:
if (cat == 'chain'):
with open(filename, 'w') as f:
f.write('i_walker\t{:s}\n'.format('\t'.join((l_param_name + ['lnposterior']))))
else:
raise ValueError('{} correspond to an existing file.'.format(filename))
if (logger is None):
tqdm_out = None
else:
tqdm_out = TqdmToLogger(logger, level=INFO)
with tqdm(total=nsteps, file=tqdm_out) as pbar:
previous_i = (- 1)
for (i, result) in enumerate(sampler.sample(p0, iterations=nsteps, storechain=True)):
position = result[0]
lnprob = result[1]
if save_to_file:
with open(filename_chain, 'a') as f:
for k in range(position.shape[0]):
f.write('{:4d} {:s} {:>16.14g}\n'.format(k, ' '.join(['{:>16.14g}'.format(xx) for xx in position[k]]), lnprob[k]))
acceptance_fraction = sampler.acceptance_fraction
with open(filename_acceptfrac, 'w') as f:
for (k, acceptfrac) in enumerate(acceptance_fraction):
f.write('{:4d} {:>15f}\n'.format(k, acceptfrac))
pbar.update((i - previous_i))
previous_i = i
return result<|docstring|>Perform an emcee exploration.
:param emcee.EnsembleSampler sampler: EnsembleSampler instance
:param array p0: Initial position for each walker and each parameter
:param bool save_to_file: If True the status of the chains are stored at each iteration in .dat files
:param str filename_chain: File name to use to save the chains (if save_to_file is True)
:param str filename_acceptfrac: File name to use to save the acceptance fraction of the chains (if save_to_file is True)
:param str dat_folder: Folder where the chain and acceptance fraction dat file will be (if save_to_file is True)
:param bool overwrite: If True already existing .dat files with the same names are automatically overwritten
:param list_of_str l_param_name: List of the parameter names<|endoftext|> |
20f1d20c4e696ce8a77cef91db7b76abf208ccb4d1d039c4244ebf1551fd3430 | def read_chaindatfile(chaindatfile, walker_col='i_walker', lnpost_col='lnposterior'):
'Read .dat file created by the explore function (save_to_file=True)\n\n The .dat file needs to have a header. The fist column has to be i_walker giving the index of the\n walker. The last column has to be lnposterior giving the log posterior probability\n\n :param str chaindatfile: Path to .dat file\n :param str walker_col: Name of the column containing the index of the walkers\n :param str lnpost_col: Name of the column containing the log posterior probability values\n :return array chains: Array containing the chains formatted as the EnsembleSampler object\n :return array lnpost: Array containing the lnposterior values formatted as the EnsembleSampler\n object\n :return list_of_str l_param: Array containing the lnposterior values formatted as the EnsembleSampler\n object\n '
df = read_table(chaindatfile, sep='\\s+', header=0)
nb_walker = ((df[walker_col].max() - df[walker_col].min()) + 1)
df['iteration'] = (np.array(df.index) // 88)
df.set_index([walker_col, 'iteration'], inplace=True)
l_param = list(df.columns)
l_param.remove(lnpost_col)
return (concatenate([df.loc[(walker, :)][df.columns[:(- 1)]].values[(newaxis, ...)] for walker in range(nb_walker)]), concatenate([df.loc[(walker, :)][lnpost_col].values[(newaxis, ...)] for walker in range(nb_walker)]), l_param) | Read .dat file created by the explore function (save_to_file=True)
The .dat file needs to have a header. The fist column has to be i_walker giving the index of the
walker. The last column has to be lnposterior giving the log posterior probability
:param str chaindatfile: Path to .dat file
:param str walker_col: Name of the column containing the index of the walkers
:param str lnpost_col: Name of the column containing the log posterior probability values
:return array chains: Array containing the chains formatted as the EnsembleSampler object
:return array lnpost: Array containing the lnposterior values formatted as the EnsembleSampler
object
:return list_of_str l_param: Array containing the lnposterior values formatted as the EnsembleSampler
object | CaRM_HD189733/scripts/emcee_tools.py | read_chaindatfile | EduardoCristo/CaRM | 0 | python | def read_chaindatfile(chaindatfile, walker_col='i_walker', lnpost_col='lnposterior'):
'Read .dat file created by the explore function (save_to_file=True)\n\n The .dat file needs to have a header. The fist column has to be i_walker giving the index of the\n walker. The last column has to be lnposterior giving the log posterior probability\n\n :param str chaindatfile: Path to .dat file\n :param str walker_col: Name of the column containing the index of the walkers\n :param str lnpost_col: Name of the column containing the log posterior probability values\n :return array chains: Array containing the chains formatted as the EnsembleSampler object\n :return array lnpost: Array containing the lnposterior values formatted as the EnsembleSampler\n object\n :return list_of_str l_param: Array containing the lnposterior values formatted as the EnsembleSampler\n object\n '
df = read_table(chaindatfile, sep='\\s+', header=0)
nb_walker = ((df[walker_col].max() - df[walker_col].min()) + 1)
df['iteration'] = (np.array(df.index) // 88)
df.set_index([walker_col, 'iteration'], inplace=True)
l_param = list(df.columns)
l_param.remove(lnpost_col)
return (concatenate([df.loc[(walker, :)][df.columns[:(- 1)]].values[(newaxis, ...)] for walker in range(nb_walker)]), concatenate([df.loc[(walker, :)][lnpost_col].values[(newaxis, ...)] for walker in range(nb_walker)]), l_param) | def read_chaindatfile(chaindatfile, walker_col='i_walker', lnpost_col='lnposterior'):
'Read .dat file created by the explore function (save_to_file=True)\n\n The .dat file needs to have a header. The fist column has to be i_walker giving the index of the\n walker. The last column has to be lnposterior giving the log posterior probability\n\n :param str chaindatfile: Path to .dat file\n :param str walker_col: Name of the column containing the index of the walkers\n :param str lnpost_col: Name of the column containing the log posterior probability values\n :return array chains: Array containing the chains formatted as the EnsembleSampler object\n :return array lnpost: Array containing the lnposterior values formatted as the EnsembleSampler\n object\n :return list_of_str l_param: Array containing the lnposterior values formatted as the EnsembleSampler\n object\n '
df = read_table(chaindatfile, sep='\\s+', header=0)
nb_walker = ((df[walker_col].max() - df[walker_col].min()) + 1)
df['iteration'] = (np.array(df.index) // 88)
df.set_index([walker_col, 'iteration'], inplace=True)
l_param = list(df.columns)
l_param.remove(lnpost_col)
return (concatenate([df.loc[(walker, :)][df.columns[:(- 1)]].values[(newaxis, ...)] for walker in range(nb_walker)]), concatenate([df.loc[(walker, :)][lnpost_col].values[(newaxis, ...)] for walker in range(nb_walker)]), l_param)<|docstring|>Read .dat file created by the explore function (save_to_file=True)
The .dat file needs to have a header. The fist column has to be i_walker giving the index of the
walker. The last column has to be lnposterior giving the log posterior probability
:param str chaindatfile: Path to .dat file
:param str walker_col: Name of the column containing the index of the walkers
:param str lnpost_col: Name of the column containing the log posterior probability values
:return array chains: Array containing the chains formatted as the EnsembleSampler object
:return array lnpost: Array containing the lnposterior values formatted as the EnsembleSampler
object
:return list_of_str l_param: Array containing the lnposterior values formatted as the EnsembleSampler
object<|endoftext|> |
77328b0c63c3f295402d7c1feb6b26c6346db5e157b18bbbe5c2fb7067c9b643 | def read_acceptfracdatfile(acceptfracdatfile, walker_col='i_walker', lnpost_col='lnposterior'):
'Read .dat file created by the explore function (save_to_file=True)\n\n The .dat file needs to have a header. The fist column has to be i_walker giving the index of the\n walker. The last column has to be lnposterior giving the log posterior probability\n\n :param str acceptfracdatfile: Path to .dat file\n :return array acceptance_fraction: Array containing the acceptance fraction for each walker.\n '
df = read_table(acceptfracdatfile, sep='\\s+', header=0)
return df[df.columns[(- 1)]].values | Read .dat file created by the explore function (save_to_file=True)
The .dat file needs to have a header. The fist column has to be i_walker giving the index of the
walker. The last column has to be lnposterior giving the log posterior probability
:param str acceptfracdatfile: Path to .dat file
:return array acceptance_fraction: Array containing the acceptance fraction for each walker. | CaRM_HD189733/scripts/emcee_tools.py | read_acceptfracdatfile | EduardoCristo/CaRM | 0 | python | def read_acceptfracdatfile(acceptfracdatfile, walker_col='i_walker', lnpost_col='lnposterior'):
'Read .dat file created by the explore function (save_to_file=True)\n\n The .dat file needs to have a header. The fist column has to be i_walker giving the index of the\n walker. The last column has to be lnposterior giving the log posterior probability\n\n :param str acceptfracdatfile: Path to .dat file\n :return array acceptance_fraction: Array containing the acceptance fraction for each walker.\n '
df = read_table(acceptfracdatfile, sep='\\s+', header=0)
return df[df.columns[(- 1)]].values | def read_acceptfracdatfile(acceptfracdatfile, walker_col='i_walker', lnpost_col='lnposterior'):
'Read .dat file created by the explore function (save_to_file=True)\n\n The .dat file needs to have a header. The fist column has to be i_walker giving the index of the\n walker. The last column has to be lnposterior giving the log posterior probability\n\n :param str acceptfracdatfile: Path to .dat file\n :return array acceptance_fraction: Array containing the acceptance fraction for each walker.\n '
df = read_table(acceptfracdatfile, sep='\\s+', header=0)
return df[df.columns[(- 1)]].values<|docstring|>Read .dat file created by the explore function (save_to_file=True)
The .dat file needs to have a header. The fist column has to be i_walker giving the index of the
walker. The last column has to be lnposterior giving the log posterior probability
:param str acceptfracdatfile: Path to .dat file
:return array acceptance_fraction: Array containing the acceptance fraction for each walker.<|endoftext|> |
9065726b87bf8b5f37426268e0bfa497381ee4cf4fe599312e9f58d397b1105e | def overplot_one_data_model(param, l_param_name, datasim, dataset, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, phasefold=False, phasefold_kwargs=None, datasim_dbf_instmod=None, zoom=None, show_title=True, show_legend=True, ax_data=None, ax_resi=None):
'Zoom on the data model overplot for one datasetself.\n\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param DatasimDocFunc datasim: Datasimulator for the dataset.\n :param Dataset dataset: Dataset\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param bool phasefold: If true the phase folded data and model are plotted accord to the ephemeris\n provided in phasefold_kwargs.\n :param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters\n "planet" giving the planet name (string)\n "P" giving the planet orbital period (float)\n "tc" giving the time of inferior conjunction for the planet (float)\n :param dict datasim_dbf_instmod: Database containing the datasim function per planet. For the folded\n plot, we use the models for each planet contribution to be able to display the model and data\n correspond only to the planet whose ephemeris is used to phase fold\n (datasim_dbf.instrument_db[inst_mod_fullname]).\n :param None/list_of_float zoom: If provided the plot will be zoomed. Meaning that the model and data\n will only be plotted between two abscisse values. It should be a list-like object with two elements.\n zoom[0] give the minimum abscisse value for the zoom and zoom[1] give the maximum. If phasefold\n is true the abscisse values are interpreted ass orbital phases, if not as times.\n You also have the possibility to produce several zooms. In this case, zoom should be an array\n or list of list object where zoom[i][0] is the min abscisse value and zoom[i][1] the max.\n :param bool show_title: If True, show the title giving the dataset name.\n :param bool show_legend: If True, show the legend.\n :param ~matplotlib.axes._axes.Axes ax_data: Axes instance where the data and model will be ploted\n :param ~matplotlib.axes._axes.Axes ax_resi: Axes instance where the residuals will be ploted\n '
if (zoom is None):
zoom = [[None, None]]
nb_plots = 1
elif isinstance(zoom[0], Number):
zoom = [zoom]
nb_plots = 1
else:
nb_plots = np.shape(zoom)[0]
if ((ax_data is None) and (ax_resi is None)):
(fig, axes) = subplots(nrows=2, ncols=nb_plots, squeeze=False)
ax_data = axes[0]
ax_resi = axes[1]
elif (ax_data is None):
(fig, ax_data) = subplots(ncols=nb_plots, squeeze=False)
ax_data = ax_data[0]
elif (ax_resi is None):
(fig, ax_resi) = subplots(ncols=nb_plots, squeeze=False)
ax_resi = ax_resi[0]
else:
if isinstance(ax_data, Axes):
ax_data = [ax_data]
if isinstance(ax_resi, Axes):
ax_resi = [ax_resi]
title = '{}'.format(dataset.dataset_name)
inst_mod = model_instance.get_instmod(dataset.dataset_name)
noise_mod = mgr_noisemodel.get_noisemodel_subclass(inst_mod.noise_model)
kwargs = dataset.get_kwargs()
t = kwargs.pop('t')
nt = len(t)
data = kwargs.pop('data')
data_err = kwargs.pop('data_err')
kwargs.update(datasim_kwargs)
if noise_mod.has_jitter:
jitter_param_fullname = inst_mod.parameters[jitter_name].get_name(include_prefix=True, recursive=True)
if inst_mod.parameters[jitter_name].free:
idx_jitter = l_param_name.index(jitter_param_fullname)
jitter = param[idx_jitter]
else:
jitter = inst_mod.parameters[jitter_name].value
jitter_type = noise_mod.jitter_type
else:
jitter = None
jitter_type = None
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
if phasefold:
planet_name = phasefold_kwargs['planet']
P = phasefold_kwargs['P']
tc = phasefold_kwargs['tc']
title += 'pl {}'.format(planet_name)
datasim_docfunc_pl = datasim_dbf_instmod[planet_name]
l_datasim_db_docfunc_others = []
for pl in list(model_instance.planets.keys()):
if (pl == planet_name):
continue
else:
l_datasim_db_docfunc_others.append(datasim_dbf_instmod[pl])
data_pl = data.copy()
for datasim_db in l_datasim_db_docfunc_others:
(model, modelwGP, _) = compute_model(t, datasim_db, param, l_param_name, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, noise_model=noise_mod, model_instance=model_instance)
data_pl = (data_pl - model)
pl_kwargs = {'color': 'b', 'fmt': '.'}
for (zoom_i, ax_data_i, ax_resi_i) in zip(zoom, ax_data, ax_resi):
(_, phases) = plot_phase_folded_timeserie(t=t, data=data_pl, P=P, tc=tc, data_err=data_err_new, jitter=None, jitter_type=None, zoom=zoom_i, ax=ax_data_i, pl_kwargs=pl_kwargs)
phasemin = (phases.min() if (zoom_i[0] is None) else max([phases.min(), zoom_i[0]]))
phasemax = (phases.max() if (zoom_i[1] is None) else min([phases.max(), zoom_i[1]]))
tmin = (tc + (P * phasemin))
tmax = (tc + (P * phasemax))
plot_model(tmin, tmax, (nt * oversamp), datasim_docfunc_pl, param, l_param_name, supersamp=supersamp_model, exptime=exptime, datasim_kwargs={'tref': tmin}, plot_phase=True, P=P, tc=tc, noise_model=noise_mod, model_instance=model_instance, ax=ax_data_i)
plot_residuals(t, data, datasim_docfunc_pl, param, l_param_name, data_err=data_err_new, jitter=None, jitter_type=None, supersamp=supersamp_model, exptime=exptime, datasim_kwargs=kwargs, plot_phase=True, P=P, tc=tc, noise_model=noise_mod, model_instance=model_instance, ax=ax_resi_i)
else:
for (zoom_i, ax_data_i, ax_resi_i) in zip(zoom, ax_data, ax_resi):
if ((zoom_i[0] is not None) and (zoom_i[1] is not None)):
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom_i, base_array=t, arrays=[data, data_err_new])
t_i = zoomed_arrays[0]
data_i = zoomed_arrays[1]
data_err_new_i = zoomed_arrays[2]
else:
t_i = t
data_i = data
data_err_new_i = data_err_new
ax_data_i.errorbar(t_i, data_i, data_err_new_i, fmt='.', color='b')
tmin = t_i.min()
tmax = t_i.max()
plot_model(tmin, tmax, (nt * oversamp), datasim, param, l_param_name, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, plot_phase=False, noise_model=noise_mod, model_instance=model_instance, ax=ax_data_i)
plot_residuals(t_i, data_i, datasim, param, l_param_name, data_err=data_err_new_i, jitter=None, jitter_type=None, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, plot_phase=False, noise_model=noise_mod, model_instance=model_instance, ax=ax_resi_i)
if show_title:
ax_data[0].set_title(title)
if show_legend:
ax_data[0].legend(loc='upper right', shadow=True) | Zoom on the data model overplot for one datasetself.
:param np.array param: Vector of parameter values for the model
:param list_of_string l_param_name: List of parameter name corresponding to the parameter values
provided in param
:param DatasimDocFunc datasim: Datasimulator for the dataset.
:param Dataset dataset: Dataset
:param Core_Model model_instance: Core_Model instance
:param int oversamp: The model will be computed in oversamp times more points than the data
:param int supersamp_model: Each point in which the model is compute will be supersampled by the number
of points provided, meaning that we will actually compute the model at
supersamp_model points spread over the exposure time (exptime) and then
average over this points.
:param float exptime: exposure time for the supersampling
:param bool phasefold: If true the phase folded data and model are plotted accord to the ephemeris
provided in phasefold_kwargs.
:param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters
"planet" giving the planet name (string)
"P" giving the planet orbital period (float)
"tc" giving the time of inferior conjunction for the planet (float)
:param dict datasim_dbf_instmod: Database containing the datasim function per planet. For the folded
plot, we use the models for each planet contribution to be able to display the model and data
correspond only to the planet whose ephemeris is used to phase fold
(datasim_dbf.instrument_db[inst_mod_fullname]).
:param None/list_of_float zoom: If provided the plot will be zoomed. Meaning that the model and data
will only be plotted between two abscisse values. It should be a list-like object with two elements.
zoom[0] give the minimum abscisse value for the zoom and zoom[1] give the maximum. If phasefold
is true the abscisse values are interpreted ass orbital phases, if not as times.
You also have the possibility to produce several zooms. In this case, zoom should be an array
or list of list object where zoom[i][0] is the min abscisse value and zoom[i][1] the max.
:param bool show_title: If True, show the title giving the dataset name.
:param bool show_legend: If True, show the legend.
:param ~matplotlib.axes._axes.Axes ax_data: Axes instance where the data and model will be ploted
:param ~matplotlib.axes._axes.Axes ax_resi: Axes instance where the residuals will be ploted | CaRM_HD189733/scripts/emcee_tools.py | overplot_one_data_model | EduardoCristo/CaRM | 0 | python | def overplot_one_data_model(param, l_param_name, datasim, dataset, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, phasefold=False, phasefold_kwargs=None, datasim_dbf_instmod=None, zoom=None, show_title=True, show_legend=True, ax_data=None, ax_resi=None):
'Zoom on the data model overplot for one datasetself.\n\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param DatasimDocFunc datasim: Datasimulator for the dataset.\n :param Dataset dataset: Dataset\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param bool phasefold: If true the phase folded data and model are plotted accord to the ephemeris\n provided in phasefold_kwargs.\n :param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters\n "planet" giving the planet name (string)\n "P" giving the planet orbital period (float)\n "tc" giving the time of inferior conjunction for the planet (float)\n :param dict datasim_dbf_instmod: Database containing the datasim function per planet. For the folded\n plot, we use the models for each planet contribution to be able to display the model and data\n correspond only to the planet whose ephemeris is used to phase fold\n (datasim_dbf.instrument_db[inst_mod_fullname]).\n :param None/list_of_float zoom: If provided the plot will be zoomed. Meaning that the model and data\n will only be plotted between two abscisse values. It should be a list-like object with two elements.\n zoom[0] give the minimum abscisse value for the zoom and zoom[1] give the maximum. If phasefold\n is true the abscisse values are interpreted ass orbital phases, if not as times.\n You also have the possibility to produce several zooms. In this case, zoom should be an array\n or list of list object where zoom[i][0] is the min abscisse value and zoom[i][1] the max.\n :param bool show_title: If True, show the title giving the dataset name.\n :param bool show_legend: If True, show the legend.\n :param ~matplotlib.axes._axes.Axes ax_data: Axes instance where the data and model will be ploted\n :param ~matplotlib.axes._axes.Axes ax_resi: Axes instance where the residuals will be ploted\n '
if (zoom is None):
zoom = [[None, None]]
nb_plots = 1
elif isinstance(zoom[0], Number):
zoom = [zoom]
nb_plots = 1
else:
nb_plots = np.shape(zoom)[0]
if ((ax_data is None) and (ax_resi is None)):
(fig, axes) = subplots(nrows=2, ncols=nb_plots, squeeze=False)
ax_data = axes[0]
ax_resi = axes[1]
elif (ax_data is None):
(fig, ax_data) = subplots(ncols=nb_plots, squeeze=False)
ax_data = ax_data[0]
elif (ax_resi is None):
(fig, ax_resi) = subplots(ncols=nb_plots, squeeze=False)
ax_resi = ax_resi[0]
else:
if isinstance(ax_data, Axes):
ax_data = [ax_data]
if isinstance(ax_resi, Axes):
ax_resi = [ax_resi]
title = '{}'.format(dataset.dataset_name)
inst_mod = model_instance.get_instmod(dataset.dataset_name)
noise_mod = mgr_noisemodel.get_noisemodel_subclass(inst_mod.noise_model)
kwargs = dataset.get_kwargs()
t = kwargs.pop('t')
nt = len(t)
data = kwargs.pop('data')
data_err = kwargs.pop('data_err')
kwargs.update(datasim_kwargs)
if noise_mod.has_jitter:
jitter_param_fullname = inst_mod.parameters[jitter_name].get_name(include_prefix=True, recursive=True)
if inst_mod.parameters[jitter_name].free:
idx_jitter = l_param_name.index(jitter_param_fullname)
jitter = param[idx_jitter]
else:
jitter = inst_mod.parameters[jitter_name].value
jitter_type = noise_mod.jitter_type
else:
jitter = None
jitter_type = None
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
if phasefold:
planet_name = phasefold_kwargs['planet']
P = phasefold_kwargs['P']
tc = phasefold_kwargs['tc']
title += 'pl {}'.format(planet_name)
datasim_docfunc_pl = datasim_dbf_instmod[planet_name]
l_datasim_db_docfunc_others = []
for pl in list(model_instance.planets.keys()):
if (pl == planet_name):
continue
else:
l_datasim_db_docfunc_others.append(datasim_dbf_instmod[pl])
data_pl = data.copy()
for datasim_db in l_datasim_db_docfunc_others:
(model, modelwGP, _) = compute_model(t, datasim_db, param, l_param_name, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, noise_model=noise_mod, model_instance=model_instance)
data_pl = (data_pl - model)
pl_kwargs = {'color': 'b', 'fmt': '.'}
for (zoom_i, ax_data_i, ax_resi_i) in zip(zoom, ax_data, ax_resi):
(_, phases) = plot_phase_folded_timeserie(t=t, data=data_pl, P=P, tc=tc, data_err=data_err_new, jitter=None, jitter_type=None, zoom=zoom_i, ax=ax_data_i, pl_kwargs=pl_kwargs)
phasemin = (phases.min() if (zoom_i[0] is None) else max([phases.min(), zoom_i[0]]))
phasemax = (phases.max() if (zoom_i[1] is None) else min([phases.max(), zoom_i[1]]))
tmin = (tc + (P * phasemin))
tmax = (tc + (P * phasemax))
plot_model(tmin, tmax, (nt * oversamp), datasim_docfunc_pl, param, l_param_name, supersamp=supersamp_model, exptime=exptime, datasim_kwargs={'tref': tmin}, plot_phase=True, P=P, tc=tc, noise_model=noise_mod, model_instance=model_instance, ax=ax_data_i)
plot_residuals(t, data, datasim_docfunc_pl, param, l_param_name, data_err=data_err_new, jitter=None, jitter_type=None, supersamp=supersamp_model, exptime=exptime, datasim_kwargs=kwargs, plot_phase=True, P=P, tc=tc, noise_model=noise_mod, model_instance=model_instance, ax=ax_resi_i)
else:
for (zoom_i, ax_data_i, ax_resi_i) in zip(zoom, ax_data, ax_resi):
if ((zoom_i[0] is not None) and (zoom_i[1] is not None)):
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom_i, base_array=t, arrays=[data, data_err_new])
t_i = zoomed_arrays[0]
data_i = zoomed_arrays[1]
data_err_new_i = zoomed_arrays[2]
else:
t_i = t
data_i = data
data_err_new_i = data_err_new
ax_data_i.errorbar(t_i, data_i, data_err_new_i, fmt='.', color='b')
tmin = t_i.min()
tmax = t_i.max()
plot_model(tmin, tmax, (nt * oversamp), datasim, param, l_param_name, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, plot_phase=False, noise_model=noise_mod, model_instance=model_instance, ax=ax_data_i)
plot_residuals(t_i, data_i, datasim, param, l_param_name, data_err=data_err_new_i, jitter=None, jitter_type=None, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, plot_phase=False, noise_model=noise_mod, model_instance=model_instance, ax=ax_resi_i)
if show_title:
ax_data[0].set_title(title)
if show_legend:
ax_data[0].legend(loc='upper right', shadow=True) | def overplot_one_data_model(param, l_param_name, datasim, dataset, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, phasefold=False, phasefold_kwargs=None, datasim_dbf_instmod=None, zoom=None, show_title=True, show_legend=True, ax_data=None, ax_resi=None):
'Zoom on the data model overplot for one datasetself.\n\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param DatasimDocFunc datasim: Datasimulator for the dataset.\n :param Dataset dataset: Dataset\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param bool phasefold: If true the phase folded data and model are plotted accord to the ephemeris\n provided in phasefold_kwargs.\n :param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters\n "planet" giving the planet name (string)\n "P" giving the planet orbital period (float)\n "tc" giving the time of inferior conjunction for the planet (float)\n :param dict datasim_dbf_instmod: Database containing the datasim function per planet. For the folded\n plot, we use the models for each planet contribution to be able to display the model and data\n correspond only to the planet whose ephemeris is used to phase fold\n (datasim_dbf.instrument_db[inst_mod_fullname]).\n :param None/list_of_float zoom: If provided the plot will be zoomed. Meaning that the model and data\n will only be plotted between two abscisse values. It should be a list-like object with two elements.\n zoom[0] give the minimum abscisse value for the zoom and zoom[1] give the maximum. If phasefold\n is true the abscisse values are interpreted ass orbital phases, if not as times.\n You also have the possibility to produce several zooms. In this case, zoom should be an array\n or list of list object where zoom[i][0] is the min abscisse value and zoom[i][1] the max.\n :param bool show_title: If True, show the title giving the dataset name.\n :param bool show_legend: If True, show the legend.\n :param ~matplotlib.axes._axes.Axes ax_data: Axes instance where the data and model will be ploted\n :param ~matplotlib.axes._axes.Axes ax_resi: Axes instance where the residuals will be ploted\n '
if (zoom is None):
zoom = [[None, None]]
nb_plots = 1
elif isinstance(zoom[0], Number):
zoom = [zoom]
nb_plots = 1
else:
nb_plots = np.shape(zoom)[0]
if ((ax_data is None) and (ax_resi is None)):
(fig, axes) = subplots(nrows=2, ncols=nb_plots, squeeze=False)
ax_data = axes[0]
ax_resi = axes[1]
elif (ax_data is None):
(fig, ax_data) = subplots(ncols=nb_plots, squeeze=False)
ax_data = ax_data[0]
elif (ax_resi is None):
(fig, ax_resi) = subplots(ncols=nb_plots, squeeze=False)
ax_resi = ax_resi[0]
else:
if isinstance(ax_data, Axes):
ax_data = [ax_data]
if isinstance(ax_resi, Axes):
ax_resi = [ax_resi]
title = '{}'.format(dataset.dataset_name)
inst_mod = model_instance.get_instmod(dataset.dataset_name)
noise_mod = mgr_noisemodel.get_noisemodel_subclass(inst_mod.noise_model)
kwargs = dataset.get_kwargs()
t = kwargs.pop('t')
nt = len(t)
data = kwargs.pop('data')
data_err = kwargs.pop('data_err')
kwargs.update(datasim_kwargs)
if noise_mod.has_jitter:
jitter_param_fullname = inst_mod.parameters[jitter_name].get_name(include_prefix=True, recursive=True)
if inst_mod.parameters[jitter_name].free:
idx_jitter = l_param_name.index(jitter_param_fullname)
jitter = param[idx_jitter]
else:
jitter = inst_mod.parameters[jitter_name].value
jitter_type = noise_mod.jitter_type
else:
jitter = None
jitter_type = None
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
if phasefold:
planet_name = phasefold_kwargs['planet']
P = phasefold_kwargs['P']
tc = phasefold_kwargs['tc']
title += 'pl {}'.format(planet_name)
datasim_docfunc_pl = datasim_dbf_instmod[planet_name]
l_datasim_db_docfunc_others = []
for pl in list(model_instance.planets.keys()):
if (pl == planet_name):
continue
else:
l_datasim_db_docfunc_others.append(datasim_dbf_instmod[pl])
data_pl = data.copy()
for datasim_db in l_datasim_db_docfunc_others:
(model, modelwGP, _) = compute_model(t, datasim_db, param, l_param_name, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, noise_model=noise_mod, model_instance=model_instance)
data_pl = (data_pl - model)
pl_kwargs = {'color': 'b', 'fmt': '.'}
for (zoom_i, ax_data_i, ax_resi_i) in zip(zoom, ax_data, ax_resi):
(_, phases) = plot_phase_folded_timeserie(t=t, data=data_pl, P=P, tc=tc, data_err=data_err_new, jitter=None, jitter_type=None, zoom=zoom_i, ax=ax_data_i, pl_kwargs=pl_kwargs)
phasemin = (phases.min() if (zoom_i[0] is None) else max([phases.min(), zoom_i[0]]))
phasemax = (phases.max() if (zoom_i[1] is None) else min([phases.max(), zoom_i[1]]))
tmin = (tc + (P * phasemin))
tmax = (tc + (P * phasemax))
plot_model(tmin, tmax, (nt * oversamp), datasim_docfunc_pl, param, l_param_name, supersamp=supersamp_model, exptime=exptime, datasim_kwargs={'tref': tmin}, plot_phase=True, P=P, tc=tc, noise_model=noise_mod, model_instance=model_instance, ax=ax_data_i)
plot_residuals(t, data, datasim_docfunc_pl, param, l_param_name, data_err=data_err_new, jitter=None, jitter_type=None, supersamp=supersamp_model, exptime=exptime, datasim_kwargs=kwargs, plot_phase=True, P=P, tc=tc, noise_model=noise_mod, model_instance=model_instance, ax=ax_resi_i)
else:
for (zoom_i, ax_data_i, ax_resi_i) in zip(zoom, ax_data, ax_resi):
if ((zoom_i[0] is not None) and (zoom_i[1] is not None)):
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom_i, base_array=t, arrays=[data, data_err_new])
t_i = zoomed_arrays[0]
data_i = zoomed_arrays[1]
data_err_new_i = zoomed_arrays[2]
else:
t_i = t
data_i = data
data_err_new_i = data_err_new
ax_data_i.errorbar(t_i, data_i, data_err_new_i, fmt='.', color='b')
tmin = t_i.min()
tmax = t_i.max()
plot_model(tmin, tmax, (nt * oversamp), datasim, param, l_param_name, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, plot_phase=False, noise_model=noise_mod, model_instance=model_instance, ax=ax_data_i)
plot_residuals(t_i, data_i, datasim, param, l_param_name, data_err=data_err_new_i, jitter=None, jitter_type=None, datasim_kwargs=kwargs, supersamp=supersamp_model, exptime=exptime, plot_phase=False, noise_model=noise_mod, model_instance=model_instance, ax=ax_resi_i)
if show_title:
ax_data[0].set_title(title)
if show_legend:
ax_data[0].legend(loc='upper right', shadow=True)<|docstring|>Zoom on the data model overplot for one datasetself.
:param np.array param: Vector of parameter values for the model
:param list_of_string l_param_name: List of parameter name corresponding to the parameter values
provided in param
:param DatasimDocFunc datasim: Datasimulator for the dataset.
:param Dataset dataset: Dataset
:param Core_Model model_instance: Core_Model instance
:param int oversamp: The model will be computed in oversamp times more points than the data
:param int supersamp_model: Each point in which the model is compute will be supersampled by the number
of points provided, meaning that we will actually compute the model at
supersamp_model points spread over the exposure time (exptime) and then
average over this points.
:param float exptime: exposure time for the supersampling
:param bool phasefold: If true the phase folded data and model are plotted accord to the ephemeris
provided in phasefold_kwargs.
:param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters
"planet" giving the planet name (string)
"P" giving the planet orbital period (float)
"tc" giving the time of inferior conjunction for the planet (float)
:param dict datasim_dbf_instmod: Database containing the datasim function per planet. For the folded
plot, we use the models for each planet contribution to be able to display the model and data
correspond only to the planet whose ephemeris is used to phase fold
(datasim_dbf.instrument_db[inst_mod_fullname]).
:param None/list_of_float zoom: If provided the plot will be zoomed. Meaning that the model and data
will only be plotted between two abscisse values. It should be a list-like object with two elements.
zoom[0] give the minimum abscisse value for the zoom and zoom[1] give the maximum. If phasefold
is true the abscisse values are interpreted ass orbital phases, if not as times.
You also have the possibility to produce several zooms. In this case, zoom should be an array
or list of list object where zoom[i][0] is the min abscisse value and zoom[i][1] the max.
:param bool show_title: If True, show the title giving the dataset name.
:param bool show_legend: If True, show the legend.
:param ~matplotlib.axes._axes.Axes ax_data: Axes instance where the data and model will be ploted
:param ~matplotlib.axes._axes.Axes ax_resi: Axes instance where the residuals will be ploted<|endoftext|> |
aef6572613aefd0bd12e1a8aa85e3869ea459dce59fe09b7839a1d81405401da | def overplot_data_model(param, l_param_name, datasim_dbf, dataset_db, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, phasefold=False, phasefold_kwargs=None, plot_height=2, plot_width=8, kwargs_tl={}):
'Overplot datasets and model for each dataset and provide the residuals.\n\n WARNING: In its current status (using the new overplot_one_data_model function). This function has\n not been tested with phasefold=True.\n\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param datasim_dbf: Datasimulator database\n :param DatasetDatabase dataset_db:\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param bool phasefold: If true the phase folded data and model are plotted.\n :param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters:\n "planets" giving the planet names (list)\n "P" giving the planet periods (list)\n "tc" giving the times of inferior conjunction for each planet (list)\n :param plot_height:\n :param plot_width:\n :param kwargs_tl:\n '
if (phasefold and (phasefold_kwargs is None)):
raise ValueError('If you want to phase fold, you have to provide the phasefold_kwargs')
l_datasets = dataset_db.get_datasets()
ndataset = len(l_datasets)
fig = figure(figsize=(plot_width, (ndataset * plot_height)))
gs = GridSpec(nrows=ndataset, ncols=1)
for (ii, dataset) in enumerate(l_datasets):
inst_mod_fullname = model_instance.get_instmod_fullname(dataset.dataset_name)
datasim = datasim_dbf.instrument_db[inst_mod_fullname]['whole']
datasim_dbf_instmod = datasim_dbf.instrument_db[inst_mod_fullname]
if phasefold:
nplanet = (1 if (phasefold_kwargs is None) else len(phasefold_kwargs['planets']))
(axes_data, axes_resi) = add_twoaxeswithsharex_perplanet(gs[ii], nplanet=nplanet, fig=fig, gs_from_sps_kw={'height_ratios': (3, 1)})
for (planet_name, P, tc, ax_data, ax_resi) in zip(phasefold_kwargs['planets'], phasefold_kwargs['P'], phasefold_kwargs['tc'], axes_data, axes_resi):
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime, phasefold=phasefold, phasefold_kwargs={'planet': planet_name, 'P': P, 'tc': tc}, datasim_dbf_instmod=datasim_dbf_instmod, zoom=None, show_title=True, show_legend=True, ax_data=ax_data, ax_resi=ax_resi)
else:
(ax_data, ax_resi) = add_twoaxeswithsharex(gs[ii], fig=fig, gs_from_sps_kw={'height_ratios': (3, 1)})
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime, phasefold=phasefold, datasim_dbf_instmod=datasim_dbf_instmod, zoom=None, show_title=True, show_legend=True, ax_data=ax_data, ax_resi=ax_resi)
fig.tight_layout(**kwargs_tl) | Overplot datasets and model for each dataset and provide the residuals.
WARNING: In its current status (using the new overplot_one_data_model function). This function has
not been tested with phasefold=True.
:param np.array param: Vector of parameter values for the model
:param list_of_string l_param_name: List of parameter name corresponding to the parameter values
provided in param
:param datasim_dbf: Datasimulator database
:param DatasetDatabase dataset_db:
:param Core_Model model_instance: Core_Model instance
:param int oversamp: The model will be computed in oversamp times more points than the data
:param int supersamp_model: Each point in which the model is compute will be supersampled by the number
of points provided, meaning that we will actually compute the model at
supersamp_model points spread over the exposure time (exptime) and then
average over this points.
:param float exptime: exposure time for the supersampling
:param bool phasefold: If true the phase folded data and model are plotted.
:param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters:
"planets" giving the planet names (list)
"P" giving the planet periods (list)
"tc" giving the times of inferior conjunction for each planet (list)
:param plot_height:
:param plot_width:
:param kwargs_tl: | CaRM_HD189733/scripts/emcee_tools.py | overplot_data_model | EduardoCristo/CaRM | 0 | python | def overplot_data_model(param, l_param_name, datasim_dbf, dataset_db, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, phasefold=False, phasefold_kwargs=None, plot_height=2, plot_width=8, kwargs_tl={}):
'Overplot datasets and model for each dataset and provide the residuals.\n\n WARNING: In its current status (using the new overplot_one_data_model function). This function has\n not been tested with phasefold=True.\n\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param datasim_dbf: Datasimulator database\n :param DatasetDatabase dataset_db:\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param bool phasefold: If true the phase folded data and model are plotted.\n :param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters:\n "planets" giving the planet names (list)\n "P" giving the planet periods (list)\n "tc" giving the times of inferior conjunction for each planet (list)\n :param plot_height:\n :param plot_width:\n :param kwargs_tl:\n '
if (phasefold and (phasefold_kwargs is None)):
raise ValueError('If you want to phase fold, you have to provide the phasefold_kwargs')
l_datasets = dataset_db.get_datasets()
ndataset = len(l_datasets)
fig = figure(figsize=(plot_width, (ndataset * plot_height)))
gs = GridSpec(nrows=ndataset, ncols=1)
for (ii, dataset) in enumerate(l_datasets):
inst_mod_fullname = model_instance.get_instmod_fullname(dataset.dataset_name)
datasim = datasim_dbf.instrument_db[inst_mod_fullname]['whole']
datasim_dbf_instmod = datasim_dbf.instrument_db[inst_mod_fullname]
if phasefold:
nplanet = (1 if (phasefold_kwargs is None) else len(phasefold_kwargs['planets']))
(axes_data, axes_resi) = add_twoaxeswithsharex_perplanet(gs[ii], nplanet=nplanet, fig=fig, gs_from_sps_kw={'height_ratios': (3, 1)})
for (planet_name, P, tc, ax_data, ax_resi) in zip(phasefold_kwargs['planets'], phasefold_kwargs['P'], phasefold_kwargs['tc'], axes_data, axes_resi):
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime, phasefold=phasefold, phasefold_kwargs={'planet': planet_name, 'P': P, 'tc': tc}, datasim_dbf_instmod=datasim_dbf_instmod, zoom=None, show_title=True, show_legend=True, ax_data=ax_data, ax_resi=ax_resi)
else:
(ax_data, ax_resi) = add_twoaxeswithsharex(gs[ii], fig=fig, gs_from_sps_kw={'height_ratios': (3, 1)})
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime, phasefold=phasefold, datasim_dbf_instmod=datasim_dbf_instmod, zoom=None, show_title=True, show_legend=True, ax_data=ax_data, ax_resi=ax_resi)
fig.tight_layout(**kwargs_tl) | def overplot_data_model(param, l_param_name, datasim_dbf, dataset_db, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, phasefold=False, phasefold_kwargs=None, plot_height=2, plot_width=8, kwargs_tl={}):
'Overplot datasets and model for each dataset and provide the residuals.\n\n WARNING: In its current status (using the new overplot_one_data_model function). This function has\n not been tested with phasefold=True.\n\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param datasim_dbf: Datasimulator database\n :param DatasetDatabase dataset_db:\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param bool phasefold: If true the phase folded data and model are plotted.\n :param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters:\n "planets" giving the planet names (list)\n "P" giving the planet periods (list)\n "tc" giving the times of inferior conjunction for each planet (list)\n :param plot_height:\n :param plot_width:\n :param kwargs_tl:\n '
if (phasefold and (phasefold_kwargs is None)):
raise ValueError('If you want to phase fold, you have to provide the phasefold_kwargs')
l_datasets = dataset_db.get_datasets()
ndataset = len(l_datasets)
fig = figure(figsize=(plot_width, (ndataset * plot_height)))
gs = GridSpec(nrows=ndataset, ncols=1)
for (ii, dataset) in enumerate(l_datasets):
inst_mod_fullname = model_instance.get_instmod_fullname(dataset.dataset_name)
datasim = datasim_dbf.instrument_db[inst_mod_fullname]['whole']
datasim_dbf_instmod = datasim_dbf.instrument_db[inst_mod_fullname]
if phasefold:
nplanet = (1 if (phasefold_kwargs is None) else len(phasefold_kwargs['planets']))
(axes_data, axes_resi) = add_twoaxeswithsharex_perplanet(gs[ii], nplanet=nplanet, fig=fig, gs_from_sps_kw={'height_ratios': (3, 1)})
for (planet_name, P, tc, ax_data, ax_resi) in zip(phasefold_kwargs['planets'], phasefold_kwargs['P'], phasefold_kwargs['tc'], axes_data, axes_resi):
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime, phasefold=phasefold, phasefold_kwargs={'planet': planet_name, 'P': P, 'tc': tc}, datasim_dbf_instmod=datasim_dbf_instmod, zoom=None, show_title=True, show_legend=True, ax_data=ax_data, ax_resi=ax_resi)
else:
(ax_data, ax_resi) = add_twoaxeswithsharex(gs[ii], fig=fig, gs_from_sps_kw={'height_ratios': (3, 1)})
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime, phasefold=phasefold, datasim_dbf_instmod=datasim_dbf_instmod, zoom=None, show_title=True, show_legend=True, ax_data=ax_data, ax_resi=ax_resi)
fig.tight_layout(**kwargs_tl)<|docstring|>Overplot datasets and model for each dataset and provide the residuals.
WARNING: In its current status (using the new overplot_one_data_model function). This function has
not been tested with phasefold=True.
:param np.array param: Vector of parameter values for the model
:param list_of_string l_param_name: List of parameter name corresponding to the parameter values
provided in param
:param datasim_dbf: Datasimulator database
:param DatasetDatabase dataset_db:
:param Core_Model model_instance: Core_Model instance
:param int oversamp: The model will be computed in oversamp times more points than the data
:param int supersamp_model: Each point in which the model is compute will be supersampled by the number
of points provided, meaning that we will actually compute the model at
supersamp_model points spread over the exposure time (exptime) and then
average over this points.
:param float exptime: exposure time for the supersampling
:param bool phasefold: If true the phase folded data and model are plotted.
:param dict phasefold_kwargs: Kwargs for the phase folded plot with 3 parameters:
"planets" giving the planet names (list)
"P" giving the planet periods (list)
"tc" giving the times of inferior conjunction for each planet (list)
:param plot_height:
:param plot_width:
:param kwargs_tl:<|endoftext|> |
783a40c294db397645853ab11bad9f7d7df345ae549888439830109913ebd37e | def overplot_onedata_model_pertransits(P, t_tr, planet_name, param, l_param_name, datasim, dataset, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, zoom_width=0.25, show_title=True, show_legend=True, plot_height=2, plot_width=2, kwargs_tl={}):
'Zoom on the data model overplot for one datasetself.\n\n :param float/list_of_float P: Orbital period(s)\n :param float/list_of_float t_tr: Transit time(s), t0(s) of the ephemeris\n :param string/list_of_string planet_name: Planet name(s)\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param DatasimDocFunc datasim: Datasimulator for the dataset.\n :param Dataset dataset: Dataset\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param float zoom_width: Width of the zoom.\n :param bool show_title: If True, show the title giving the dataset name.\n :param bool show_legend: If True, show the legend.\n '
t = dataset.get_time()
t_max = max(t)
t_min = min(t)
zoom_planets = []
P = ([P] if isinstance(P, Number) else P)
t_tr = ([t_tr] if isinstance(t_tr, Number) else t_tr)
planet_name = ([planet_name] if isinstance(planet_name, str) else planet_name)
nb_pl = len(P)
for (jj, P_pl, t_tr_pl) in zip(list(range(nb_pl)), P, t_tr):
zoom_planets.append([])
nb_per_min = int(((t_min - t_tr_pl) // P_pl))
nb_per_min = (nb_per_min if (nb_per_min > 0) else (nb_per_min - 1))
nb_per_max = int(((t_max - t_tr_pl) // P_pl))
nb_per_max = (nb_per_max if (nb_per_max < 0) else (nb_per_max + 1))
for ii in range(nb_per_min, (nb_per_max + 1)):
t_tr_ii = (t_tr_pl + (P_pl * ii))
zoom_min_ii = (t_tr_ii - (zoom_width / 2.0))
zoom_max_ii = (t_tr_ii + (zoom_width / 2.0))
if ((zoom_max_ii < t_min) or (zoom_min_ii > t_max)):
continue
else:
zoom_planets[jj].append([zoom_min_ii, zoom_max_ii])
nb_max_zoom = max([len(zoom_pl) for zoom_pl in zoom_planets])
(fig, axes) = subplots(nrows=(2 * nb_pl), ncols=nb_max_zoom, figsize=((plot_width * nb_max_zoom), (nb_pl * plot_height)))
for jj in range(nb_pl):
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime_Kepler, phasefold=False, zoom=zoom_planets[jj], show_title=False, show_legend=False, ax_data=axes[(jj * 2)], ax_resi=axes[((jj * 2) + 1)])
fig.tight_layout(**kwargs_tl) | Zoom on the data model overplot for one datasetself.
:param float/list_of_float P: Orbital period(s)
:param float/list_of_float t_tr: Transit time(s), t0(s) of the ephemeris
:param string/list_of_string planet_name: Planet name(s)
:param np.array param: Vector of parameter values for the model
:param list_of_string l_param_name: List of parameter name corresponding to the parameter values
provided in param
:param DatasimDocFunc datasim: Datasimulator for the dataset.
:param Dataset dataset: Dataset
:param Core_Model model_instance: Core_Model instance
:param int oversamp: The model will be computed in oversamp times more points than the data
:param int supersamp_model: Each point in which the model is compute will be supersampled by the number
of points provided, meaning that we will actually compute the model at
supersamp_model points spread over the exposure time (exptime) and then
average over this points.
:param float exptime: exposure time for the supersampling
:param float zoom_width: Width of the zoom.
:param bool show_title: If True, show the title giving the dataset name.
:param bool show_legend: If True, show the legend. | CaRM_HD189733/scripts/emcee_tools.py | overplot_onedata_model_pertransits | EduardoCristo/CaRM | 0 | python | def overplot_onedata_model_pertransits(P, t_tr, planet_name, param, l_param_name, datasim, dataset, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, zoom_width=0.25, show_title=True, show_legend=True, plot_height=2, plot_width=2, kwargs_tl={}):
'Zoom on the data model overplot for one datasetself.\n\n :param float/list_of_float P: Orbital period(s)\n :param float/list_of_float t_tr: Transit time(s), t0(s) of the ephemeris\n :param string/list_of_string planet_name: Planet name(s)\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param DatasimDocFunc datasim: Datasimulator for the dataset.\n :param Dataset dataset: Dataset\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param float zoom_width: Width of the zoom.\n :param bool show_title: If True, show the title giving the dataset name.\n :param bool show_legend: If True, show the legend.\n '
t = dataset.get_time()
t_max = max(t)
t_min = min(t)
zoom_planets = []
P = ([P] if isinstance(P, Number) else P)
t_tr = ([t_tr] if isinstance(t_tr, Number) else t_tr)
planet_name = ([planet_name] if isinstance(planet_name, str) else planet_name)
nb_pl = len(P)
for (jj, P_pl, t_tr_pl) in zip(list(range(nb_pl)), P, t_tr):
zoom_planets.append([])
nb_per_min = int(((t_min - t_tr_pl) // P_pl))
nb_per_min = (nb_per_min if (nb_per_min > 0) else (nb_per_min - 1))
nb_per_max = int(((t_max - t_tr_pl) // P_pl))
nb_per_max = (nb_per_max if (nb_per_max < 0) else (nb_per_max + 1))
for ii in range(nb_per_min, (nb_per_max + 1)):
t_tr_ii = (t_tr_pl + (P_pl * ii))
zoom_min_ii = (t_tr_ii - (zoom_width / 2.0))
zoom_max_ii = (t_tr_ii + (zoom_width / 2.0))
if ((zoom_max_ii < t_min) or (zoom_min_ii > t_max)):
continue
else:
zoom_planets[jj].append([zoom_min_ii, zoom_max_ii])
nb_max_zoom = max([len(zoom_pl) for zoom_pl in zoom_planets])
(fig, axes) = subplots(nrows=(2 * nb_pl), ncols=nb_max_zoom, figsize=((plot_width * nb_max_zoom), (nb_pl * plot_height)))
for jj in range(nb_pl):
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime_Kepler, phasefold=False, zoom=zoom_planets[jj], show_title=False, show_legend=False, ax_data=axes[(jj * 2)], ax_resi=axes[((jj * 2) + 1)])
fig.tight_layout(**kwargs_tl) | def overplot_onedata_model_pertransits(P, t_tr, planet_name, param, l_param_name, datasim, dataset, datasim_kwargs={}, model_instance=None, oversamp=10, supersamp_model=1, exptime=exptime_Kepler, zoom_width=0.25, show_title=True, show_legend=True, plot_height=2, plot_width=2, kwargs_tl={}):
'Zoom on the data model overplot for one datasetself.\n\n :param float/list_of_float P: Orbital period(s)\n :param float/list_of_float t_tr: Transit time(s), t0(s) of the ephemeris\n :param string/list_of_string planet_name: Planet name(s)\n :param np.array param: Vector of parameter values for the model\n :param list_of_string l_param_name: List of parameter name corresponding to the parameter values\n provided in param\n :param DatasimDocFunc datasim: Datasimulator for the dataset.\n :param Dataset dataset: Dataset\n :param Core_Model model_instance: Core_Model instance\n :param int oversamp: The model will be computed in oversamp times more points than the data\n :param int supersamp_model: Each point in which the model is compute will be supersampled by the number\n of points provided, meaning that we will actually compute the model at\n supersamp_model points spread over the exposure time (exptime) and then\n average over this points.\n :param float exptime: exposure time for the supersampling\n :param float zoom_width: Width of the zoom.\n :param bool show_title: If True, show the title giving the dataset name.\n :param bool show_legend: If True, show the legend.\n '
t = dataset.get_time()
t_max = max(t)
t_min = min(t)
zoom_planets = []
P = ([P] if isinstance(P, Number) else P)
t_tr = ([t_tr] if isinstance(t_tr, Number) else t_tr)
planet_name = ([planet_name] if isinstance(planet_name, str) else planet_name)
nb_pl = len(P)
for (jj, P_pl, t_tr_pl) in zip(list(range(nb_pl)), P, t_tr):
zoom_planets.append([])
nb_per_min = int(((t_min - t_tr_pl) // P_pl))
nb_per_min = (nb_per_min if (nb_per_min > 0) else (nb_per_min - 1))
nb_per_max = int(((t_max - t_tr_pl) // P_pl))
nb_per_max = (nb_per_max if (nb_per_max < 0) else (nb_per_max + 1))
for ii in range(nb_per_min, (nb_per_max + 1)):
t_tr_ii = (t_tr_pl + (P_pl * ii))
zoom_min_ii = (t_tr_ii - (zoom_width / 2.0))
zoom_max_ii = (t_tr_ii + (zoom_width / 2.0))
if ((zoom_max_ii < t_min) or (zoom_min_ii > t_max)):
continue
else:
zoom_planets[jj].append([zoom_min_ii, zoom_max_ii])
nb_max_zoom = max([len(zoom_pl) for zoom_pl in zoom_planets])
(fig, axes) = subplots(nrows=(2 * nb_pl), ncols=nb_max_zoom, figsize=((plot_width * nb_max_zoom), (nb_pl * plot_height)))
for jj in range(nb_pl):
overplot_one_data_model(param=param, l_param_name=l_param_name, datasim=datasim, dataset=dataset, datasim_kwargs=datasim_kwargs, model_instance=model_instance, oversamp=oversamp, supersamp_model=supersamp_model, exptime=exptime_Kepler, phasefold=False, zoom=zoom_planets[jj], show_title=False, show_legend=False, ax_data=axes[(jj * 2)], ax_resi=axes[((jj * 2) + 1)])
fig.tight_layout(**kwargs_tl)<|docstring|>Zoom on the data model overplot for one datasetself.
:param float/list_of_float P: Orbital period(s)
:param float/list_of_float t_tr: Transit time(s), t0(s) of the ephemeris
:param string/list_of_string planet_name: Planet name(s)
:param np.array param: Vector of parameter values for the model
:param list_of_string l_param_name: List of parameter name corresponding to the parameter values
provided in param
:param DatasimDocFunc datasim: Datasimulator for the dataset.
:param Dataset dataset: Dataset
:param Core_Model model_instance: Core_Model instance
:param int oversamp: The model will be computed in oversamp times more points than the data
:param int supersamp_model: Each point in which the model is compute will be supersampled by the number
of points provided, meaning that we will actually compute the model at
supersamp_model points spread over the exposure time (exptime) and then
average over this points.
:param float exptime: exposure time for the supersampling
:param float zoom_width: Width of the zoom.
:param bool show_title: If True, show the title giving the dataset name.
:param bool show_legend: If True, show the legend.<|endoftext|> |
d3d4806c9d0c70df5f142f98464b6b5bafa82eb5c479d3874ceb54347d375358 | def plot_residuals(t, data, datasim_db_docfunc, param, l_param_name, datasim_kwargs=None, data_err=None, jitter=None, jitter_type=None, supersamp=1, exptime=exptime_Kepler, plot_phase=False, P=None, tc=None, noise_model=None, model_instance=None, zoom=None, pl_kwargs_model=None, show_model=True, pl_kwargs_modelandGP=None, show_modelandGP=True, ax=None):
'\n :param array t:\n :param array data:\n :param datasim_db_docfunc:\n :param param:\n :param l_param_name:\n :param datasim_kwargs:\n :param data_err:\n :param jitter:\n :param jitter_type:\n :param supersamp:\n :param exptime:\n :param plot_phase: If True, plot phase folded residuals\n :param P:\n :param tc:\n :param noise_model:\n :param model_instance:\n :param zoom:\n :param pl_kwargs_model:\n :param bool show_model: To show the residuals of the model only when the noise model is a GP.\n :param pl_kwargs_modelandGP:\n :param bool show_modelandGP:\n :param ax:\n '
ax = __get_default_ax(ax=ax)
(model, model_wGP, _) = compute_model(t, datasim_db_docfunc, param, l_param_name, datasim_kwargs=datasim_kwargs, supersamp=supersamp, exptime=exptime, noise_model=noise_model, model_instance=model_instance)
residual = (data - model)
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
if (noise_model is None):
noise_modelGP = False
else:
noise_modelGP = noise_model.has_GP
if ((zoom is not None) and (not plot_phase)):
extra_arrays_to_zoom = ([data, model, residual] if (data_err is None) else [data, model, residual, data_err_new])
if noise_modelGP:
extra_arrays_to_zoom.append(model_wGP)
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom, base_array=t, arrays=extra_arrays_to_zoom)
t_zoom = zoomed_arrays[0]
data_zoom = zoomed_arrays[1]
residual_zoom = zoomed_arrays[2]
if (data_err is None):
data_err_new_zoom = None
model_wGP_zoom = (None if (not noise_modelGP) else zoomed_arrays[3])
else:
data_err_new_zoom = zoomed_arrays[3]
model_wGP_zoom = (None if (not noise_modelGP) else zoomed_arrays[4])
if (show_model or (not noise_modelGP)):
kwarg_model = {'label': 'model', 'color': 'g', 'fmt': '.'}
if (pl_kwargs_model is not None):
kwarg_model.update(pl_kwargs_model)
if plot_phase:
plot_phase_folded_timeserie(t, residual, P, tc, data_err=data_err_new, zoom=zoom, ax=ax, pl_kwargs=kwarg_model)
elif (zoom is not None):
ax.errorbar(t_zoom, residual_zoom, data_err_new_zoom, **kwarg_model)
else:
ax.errorbar(t, residual, data_err_new, **kwarg_model)
if ((noise_model is not None) and noise_modelGP and show_modelandGP):
residual_wGP = ((data - model_wGP) if (zoom is None) else (data_zoom - model_wGP_zoom))
kwarg_GP = {'label': 'model+GP', 'color': 'r', 'fmt': '.', 'alpha': 0.6}
if (pl_kwargs_modelandGP is not None):
kwarg_GP.update(pl_kwargs_modelandGP)
if plot_phase:
plot_phase_folded_timeserie(t, residual_wGP, P, tc, data_err=data_err_new, zoom=zoom, ax=ax, pl_kwargs=kwarg_GP)
elif (zoom is not None):
ax.errorbar(t_zoom, residual_wGP, data_err_new_zoom, **kwarg_GP)
else:
ax.errorbar(t, residual_wGP, data_err_new, **kwarg_GP)
(xmin, xmax) = ax.get_xlim()
ax.hlines(y=0.0, xmin=xmin, xmax=xmax, linestyles='dashed', linewidth=1)
ax.set_xlim(xmin, xmax) | :param array t:
:param array data:
:param datasim_db_docfunc:
:param param:
:param l_param_name:
:param datasim_kwargs:
:param data_err:
:param jitter:
:param jitter_type:
:param supersamp:
:param exptime:
:param plot_phase: If True, plot phase folded residuals
:param P:
:param tc:
:param noise_model:
:param model_instance:
:param zoom:
:param pl_kwargs_model:
:param bool show_model: To show the residuals of the model only when the noise model is a GP.
:param pl_kwargs_modelandGP:
:param bool show_modelandGP:
:param ax: | CaRM_HD189733/scripts/emcee_tools.py | plot_residuals | EduardoCristo/CaRM | 0 | python | def plot_residuals(t, data, datasim_db_docfunc, param, l_param_name, datasim_kwargs=None, data_err=None, jitter=None, jitter_type=None, supersamp=1, exptime=exptime_Kepler, plot_phase=False, P=None, tc=None, noise_model=None, model_instance=None, zoom=None, pl_kwargs_model=None, show_model=True, pl_kwargs_modelandGP=None, show_modelandGP=True, ax=None):
'\n :param array t:\n :param array data:\n :param datasim_db_docfunc:\n :param param:\n :param l_param_name:\n :param datasim_kwargs:\n :param data_err:\n :param jitter:\n :param jitter_type:\n :param supersamp:\n :param exptime:\n :param plot_phase: If True, plot phase folded residuals\n :param P:\n :param tc:\n :param noise_model:\n :param model_instance:\n :param zoom:\n :param pl_kwargs_model:\n :param bool show_model: To show the residuals of the model only when the noise model is a GP.\n :param pl_kwargs_modelandGP:\n :param bool show_modelandGP:\n :param ax:\n '
ax = __get_default_ax(ax=ax)
(model, model_wGP, _) = compute_model(t, datasim_db_docfunc, param, l_param_name, datasim_kwargs=datasim_kwargs, supersamp=supersamp, exptime=exptime, noise_model=noise_model, model_instance=model_instance)
residual = (data - model)
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
if (noise_model is None):
noise_modelGP = False
else:
noise_modelGP = noise_model.has_GP
if ((zoom is not None) and (not plot_phase)):
extra_arrays_to_zoom = ([data, model, residual] if (data_err is None) else [data, model, residual, data_err_new])
if noise_modelGP:
extra_arrays_to_zoom.append(model_wGP)
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom, base_array=t, arrays=extra_arrays_to_zoom)
t_zoom = zoomed_arrays[0]
data_zoom = zoomed_arrays[1]
residual_zoom = zoomed_arrays[2]
if (data_err is None):
data_err_new_zoom = None
model_wGP_zoom = (None if (not noise_modelGP) else zoomed_arrays[3])
else:
data_err_new_zoom = zoomed_arrays[3]
model_wGP_zoom = (None if (not noise_modelGP) else zoomed_arrays[4])
if (show_model or (not noise_modelGP)):
kwarg_model = {'label': 'model', 'color': 'g', 'fmt': '.'}
if (pl_kwargs_model is not None):
kwarg_model.update(pl_kwargs_model)
if plot_phase:
plot_phase_folded_timeserie(t, residual, P, tc, data_err=data_err_new, zoom=zoom, ax=ax, pl_kwargs=kwarg_model)
elif (zoom is not None):
ax.errorbar(t_zoom, residual_zoom, data_err_new_zoom, **kwarg_model)
else:
ax.errorbar(t, residual, data_err_new, **kwarg_model)
if ((noise_model is not None) and noise_modelGP and show_modelandGP):
residual_wGP = ((data - model_wGP) if (zoom is None) else (data_zoom - model_wGP_zoom))
kwarg_GP = {'label': 'model+GP', 'color': 'r', 'fmt': '.', 'alpha': 0.6}
if (pl_kwargs_modelandGP is not None):
kwarg_GP.update(pl_kwargs_modelandGP)
if plot_phase:
plot_phase_folded_timeserie(t, residual_wGP, P, tc, data_err=data_err_new, zoom=zoom, ax=ax, pl_kwargs=kwarg_GP)
elif (zoom is not None):
ax.errorbar(t_zoom, residual_wGP, data_err_new_zoom, **kwarg_GP)
else:
ax.errorbar(t, residual_wGP, data_err_new, **kwarg_GP)
(xmin, xmax) = ax.get_xlim()
ax.hlines(y=0.0, xmin=xmin, xmax=xmax, linestyles='dashed', linewidth=1)
ax.set_xlim(xmin, xmax) | def plot_residuals(t, data, datasim_db_docfunc, param, l_param_name, datasim_kwargs=None, data_err=None, jitter=None, jitter_type=None, supersamp=1, exptime=exptime_Kepler, plot_phase=False, P=None, tc=None, noise_model=None, model_instance=None, zoom=None, pl_kwargs_model=None, show_model=True, pl_kwargs_modelandGP=None, show_modelandGP=True, ax=None):
'\n :param array t:\n :param array data:\n :param datasim_db_docfunc:\n :param param:\n :param l_param_name:\n :param datasim_kwargs:\n :param data_err:\n :param jitter:\n :param jitter_type:\n :param supersamp:\n :param exptime:\n :param plot_phase: If True, plot phase folded residuals\n :param P:\n :param tc:\n :param noise_model:\n :param model_instance:\n :param zoom:\n :param pl_kwargs_model:\n :param bool show_model: To show the residuals of the model only when the noise model is a GP.\n :param pl_kwargs_modelandGP:\n :param bool show_modelandGP:\n :param ax:\n '
ax = __get_default_ax(ax=ax)
(model, model_wGP, _) = compute_model(t, datasim_db_docfunc, param, l_param_name, datasim_kwargs=datasim_kwargs, supersamp=supersamp, exptime=exptime, noise_model=noise_model, model_instance=model_instance)
residual = (data - model)
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
if (noise_model is None):
noise_modelGP = False
else:
noise_modelGP = noise_model.has_GP
if ((zoom is not None) and (not plot_phase)):
extra_arrays_to_zoom = ([data, model, residual] if (data_err is None) else [data, model, residual, data_err_new])
if noise_modelGP:
extra_arrays_to_zoom.append(model_wGP)
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom, base_array=t, arrays=extra_arrays_to_zoom)
t_zoom = zoomed_arrays[0]
data_zoom = zoomed_arrays[1]
residual_zoom = zoomed_arrays[2]
if (data_err is None):
data_err_new_zoom = None
model_wGP_zoom = (None if (not noise_modelGP) else zoomed_arrays[3])
else:
data_err_new_zoom = zoomed_arrays[3]
model_wGP_zoom = (None if (not noise_modelGP) else zoomed_arrays[4])
if (show_model or (not noise_modelGP)):
kwarg_model = {'label': 'model', 'color': 'g', 'fmt': '.'}
if (pl_kwargs_model is not None):
kwarg_model.update(pl_kwargs_model)
if plot_phase:
plot_phase_folded_timeserie(t, residual, P, tc, data_err=data_err_new, zoom=zoom, ax=ax, pl_kwargs=kwarg_model)
elif (zoom is not None):
ax.errorbar(t_zoom, residual_zoom, data_err_new_zoom, **kwarg_model)
else:
ax.errorbar(t, residual, data_err_new, **kwarg_model)
if ((noise_model is not None) and noise_modelGP and show_modelandGP):
residual_wGP = ((data - model_wGP) if (zoom is None) else (data_zoom - model_wGP_zoom))
kwarg_GP = {'label': 'model+GP', 'color': 'r', 'fmt': '.', 'alpha': 0.6}
if (pl_kwargs_modelandGP is not None):
kwarg_GP.update(pl_kwargs_modelandGP)
if plot_phase:
plot_phase_folded_timeserie(t, residual_wGP, P, tc, data_err=data_err_new, zoom=zoom, ax=ax, pl_kwargs=kwarg_GP)
elif (zoom is not None):
ax.errorbar(t_zoom, residual_wGP, data_err_new_zoom, **kwarg_GP)
else:
ax.errorbar(t, residual_wGP, data_err_new, **kwarg_GP)
(xmin, xmax) = ax.get_xlim()
ax.hlines(y=0.0, xmin=xmin, xmax=xmax, linestyles='dashed', linewidth=1)
ax.set_xlim(xmin, xmax)<|docstring|>:param array t:
:param array data:
:param datasim_db_docfunc:
:param param:
:param l_param_name:
:param datasim_kwargs:
:param data_err:
:param jitter:
:param jitter_type:
:param supersamp:
:param exptime:
:param plot_phase: If True, plot phase folded residuals
:param P:
:param tc:
:param noise_model:
:param model_instance:
:param zoom:
:param pl_kwargs_model:
:param bool show_model: To show the residuals of the model only when the noise model is a GP.
:param pl_kwargs_modelandGP:
:param bool show_modelandGP:
:param ax:<|endoftext|> |
ffe41ec76478a42e7dda8a69c8d0be38ccd373bd27b27ec6e5ce84b611b4669a | def apply_jitter(data_err, jitter, jitter_type):
'Apply jitter to the data error bar\n\n :param array_float data_err: data error array\n :param float jitter: jitter value\n :param str jitter_type: jitter_type ("multi" or "add")\n '
if (jitter_type == 'multi'):
data_err_new = (data_err * exp(jitter))
elif (jitter_type == 'add'):
data_err_new = sqrt(((data_err ** 2) * (1 + exp((2 * jitter)))))
else:
raise ValueError("jitter_type should be in ['multi', 'add']")
return data_err_new | Apply jitter to the data error bar
:param array_float data_err: data error array
:param float jitter: jitter value
:param str jitter_type: jitter_type ("multi" or "add") | CaRM_HD189733/scripts/emcee_tools.py | apply_jitter | EduardoCristo/CaRM | 0 | python | def apply_jitter(data_err, jitter, jitter_type):
'Apply jitter to the data error bar\n\n :param array_float data_err: data error array\n :param float jitter: jitter value\n :param str jitter_type: jitter_type ("multi" or "add")\n '
if (jitter_type == 'multi'):
data_err_new = (data_err * exp(jitter))
elif (jitter_type == 'add'):
data_err_new = sqrt(((data_err ** 2) * (1 + exp((2 * jitter)))))
else:
raise ValueError("jitter_type should be in ['multi', 'add']")
return data_err_new | def apply_jitter(data_err, jitter, jitter_type):
'Apply jitter to the data error bar\n\n :param array_float data_err: data error array\n :param float jitter: jitter value\n :param str jitter_type: jitter_type ("multi" or "add")\n '
if (jitter_type == 'multi'):
data_err_new = (data_err * exp(jitter))
elif (jitter_type == 'add'):
data_err_new = sqrt(((data_err ** 2) * (1 + exp((2 * jitter)))))
else:
raise ValueError("jitter_type should be in ['multi', 'add']")
return data_err_new<|docstring|>Apply jitter to the data error bar
:param array_float data_err: data error array
:param float jitter: jitter value
:param str jitter_type: jitter_type ("multi" or "add")<|endoftext|> |
fd27ea27af5d6974951cd8988a748afe6aeeb78e42e4ccd6f5c8b0d1ccf16133 | def apply_zoom(zoom, base_array, arrays=None):
'Apply jitter to the data error bar\n\n :param list_of_float zoom: It should be a list-like object with two elements.\n zoom[0] give the minimum value in zoom_base_array for the zoom and zoom[1] give the maximum.\n :param array_of_float base_array: Array on which the zoom in based. The idx of the elements\n which satisfy zoom[0] < zoom_base_array < zoom[1], will be used to cut both zoom_base_array\n and zoomed_arrays\n :param None/list_of_array arrays: List of array to zoom.\n :return list_of_array zoomed_arrays: List of zoomed arrays, the first one is the zoomed based array\n :return array idx_zoom: array of indexes which satisfy the zoom\n '
idx_zoom = where(((base_array > zoom[0]) & (base_array < zoom[1])))[0]
zoomed_arrays = []
zoomed_arrays.append(base_array[idx_zoom])
if (arrays is not None):
for arr in arrays:
zoomed_arrays.append(arr[idx_zoom])
return (zoomed_arrays, idx_zoom) | Apply jitter to the data error bar
:param list_of_float zoom: It should be a list-like object with two elements.
zoom[0] give the minimum value in zoom_base_array for the zoom and zoom[1] give the maximum.
:param array_of_float base_array: Array on which the zoom in based. The idx of the elements
which satisfy zoom[0] < zoom_base_array < zoom[1], will be used to cut both zoom_base_array
and zoomed_arrays
:param None/list_of_array arrays: List of array to zoom.
:return list_of_array zoomed_arrays: List of zoomed arrays, the first one is the zoomed based array
:return array idx_zoom: array of indexes which satisfy the zoom | CaRM_HD189733/scripts/emcee_tools.py | apply_zoom | EduardoCristo/CaRM | 0 | python | def apply_zoom(zoom, base_array, arrays=None):
'Apply jitter to the data error bar\n\n :param list_of_float zoom: It should be a list-like object with two elements.\n zoom[0] give the minimum value in zoom_base_array for the zoom and zoom[1] give the maximum.\n :param array_of_float base_array: Array on which the zoom in based. The idx of the elements\n which satisfy zoom[0] < zoom_base_array < zoom[1], will be used to cut both zoom_base_array\n and zoomed_arrays\n :param None/list_of_array arrays: List of array to zoom.\n :return list_of_array zoomed_arrays: List of zoomed arrays, the first one is the zoomed based array\n :return array idx_zoom: array of indexes which satisfy the zoom\n '
idx_zoom = where(((base_array > zoom[0]) & (base_array < zoom[1])))[0]
zoomed_arrays = []
zoomed_arrays.append(base_array[idx_zoom])
if (arrays is not None):
for arr in arrays:
zoomed_arrays.append(arr[idx_zoom])
return (zoomed_arrays, idx_zoom) | def apply_zoom(zoom, base_array, arrays=None):
'Apply jitter to the data error bar\n\n :param list_of_float zoom: It should be a list-like object with two elements.\n zoom[0] give the minimum value in zoom_base_array for the zoom and zoom[1] give the maximum.\n :param array_of_float base_array: Array on which the zoom in based. The idx of the elements\n which satisfy zoom[0] < zoom_base_array < zoom[1], will be used to cut both zoom_base_array\n and zoomed_arrays\n :param None/list_of_array arrays: List of array to zoom.\n :return list_of_array zoomed_arrays: List of zoomed arrays, the first one is the zoomed based array\n :return array idx_zoom: array of indexes which satisfy the zoom\n '
idx_zoom = where(((base_array > zoom[0]) & (base_array < zoom[1])))[0]
zoomed_arrays = []
zoomed_arrays.append(base_array[idx_zoom])
if (arrays is not None):
for arr in arrays:
zoomed_arrays.append(arr[idx_zoom])
return (zoomed_arrays, idx_zoom)<|docstring|>Apply jitter to the data error bar
:param list_of_float zoom: It should be a list-like object with two elements.
zoom[0] give the minimum value in zoom_base_array for the zoom and zoom[1] give the maximum.
:param array_of_float base_array: Array on which the zoom in based. The idx of the elements
which satisfy zoom[0] < zoom_base_array < zoom[1], will be used to cut both zoom_base_array
and zoomed_arrays
:param None/list_of_array arrays: List of array to zoom.
:return list_of_array zoomed_arrays: List of zoomed arrays, the first one is the zoomed based array
:return array idx_zoom: array of indexes which satisfy the zoom<|endoftext|> |
c894b6e7df24c80359a2090ea11479cb344a82397bad49dde86b4a01fed6f909 | def plot_phase_folded_timeserie(t, data, P, tc, data_err=None, jitter=None, jitter_type=None, zoom=None, ax=None, pl_kwargs=None):
'Plot a phase folded representation of a lc\n\n :param array_float t: time array\n :param array_float data: data array\n :param float P: Period of the planet\n :param float tc: Time of inferior conjuction of the planet\n :param array_float data_err: data error array\n :param float jitter: jitter value\n :param str jitter_type: jitter_type ("multi" or "add")\n :param None/list_of_float zoom: If provided the plot will be zoom. Meaning that the model and data\n will only be plotted between two phase values. It should be a list-like object with two elements.\n zoom[0] give the minimum phase for the zoom and zoom[1] give the maximum.\n :param ~matplotlib.axes._axes.Axes ax: Axes instance where the data and model will be ploted\n :param dict pl_kwargs: Keyword argument passed to pl.errorbar function\n\n P and tc needs to have the same unit than the t\n '
ax = __get_default_ax(ax=ax)
phases = (foldAt(t, P, T0=(tc + (P / 2))) - 0.5)
sortIndi = argsort(phases)
if (data_err is not None):
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
data_err_new_sort = data_err_new[sortIndi]
phase_sort = phases[sortIndi]
data_sort = data[sortIndi]
if (zoom is not None):
if ((zoom[0] is not None) and (zoom[1] is not None)):
extra_arrays_to_zoom = ([data_sort] if (data_err is None) else [data_sort, data_err_new_sort])
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom, base_array=phase_sort, arrays=extra_arrays_to_zoom)
phase_sort = zoomed_arrays[0]
data_sort = zoomed_arrays[1]
data_err_new_sort = (None if (data_err is None) else zoomed_arrays[2])
kw = (dict() if (pl_kwargs is None) else pl_kwargs.copy())
if ('fmt' not in kw):
kw['fmt'] = '-'
if ('color' not in kw):
kw['color'] = 'r'
if (data_err is not None):
line = ax.errorbar(phase_sort, data_sort, data_err_new_sort, **kw)
else:
line = ax.errorbar(phase_sort, data_sort, **kw)
return (line, phases) | Plot a phase folded representation of a lc
:param array_float t: time array
:param array_float data: data array
:param float P: Period of the planet
:param float tc: Time of inferior conjuction of the planet
:param array_float data_err: data error array
:param float jitter: jitter value
:param str jitter_type: jitter_type ("multi" or "add")
:param None/list_of_float zoom: If provided the plot will be zoom. Meaning that the model and data
will only be plotted between two phase values. It should be a list-like object with two elements.
zoom[0] give the minimum phase for the zoom and zoom[1] give the maximum.
:param ~matplotlib.axes._axes.Axes ax: Axes instance where the data and model will be ploted
:param dict pl_kwargs: Keyword argument passed to pl.errorbar function
P and tc needs to have the same unit than the t | CaRM_HD189733/scripts/emcee_tools.py | plot_phase_folded_timeserie | EduardoCristo/CaRM | 0 | python | def plot_phase_folded_timeserie(t, data, P, tc, data_err=None, jitter=None, jitter_type=None, zoom=None, ax=None, pl_kwargs=None):
'Plot a phase folded representation of a lc\n\n :param array_float t: time array\n :param array_float data: data array\n :param float P: Period of the planet\n :param float tc: Time of inferior conjuction of the planet\n :param array_float data_err: data error array\n :param float jitter: jitter value\n :param str jitter_type: jitter_type ("multi" or "add")\n :param None/list_of_float zoom: If provided the plot will be zoom. Meaning that the model and data\n will only be plotted between two phase values. It should be a list-like object with two elements.\n zoom[0] give the minimum phase for the zoom and zoom[1] give the maximum.\n :param ~matplotlib.axes._axes.Axes ax: Axes instance where the data and model will be ploted\n :param dict pl_kwargs: Keyword argument passed to pl.errorbar function\n\n P and tc needs to have the same unit than the t\n '
ax = __get_default_ax(ax=ax)
phases = (foldAt(t, P, T0=(tc + (P / 2))) - 0.5)
sortIndi = argsort(phases)
if (data_err is not None):
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
data_err_new_sort = data_err_new[sortIndi]
phase_sort = phases[sortIndi]
data_sort = data[sortIndi]
if (zoom is not None):
if ((zoom[0] is not None) and (zoom[1] is not None)):
extra_arrays_to_zoom = ([data_sort] if (data_err is None) else [data_sort, data_err_new_sort])
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom, base_array=phase_sort, arrays=extra_arrays_to_zoom)
phase_sort = zoomed_arrays[0]
data_sort = zoomed_arrays[1]
data_err_new_sort = (None if (data_err is None) else zoomed_arrays[2])
kw = (dict() if (pl_kwargs is None) else pl_kwargs.copy())
if ('fmt' not in kw):
kw['fmt'] = '-'
if ('color' not in kw):
kw['color'] = 'r'
if (data_err is not None):
line = ax.errorbar(phase_sort, data_sort, data_err_new_sort, **kw)
else:
line = ax.errorbar(phase_sort, data_sort, **kw)
return (line, phases) | def plot_phase_folded_timeserie(t, data, P, tc, data_err=None, jitter=None, jitter_type=None, zoom=None, ax=None, pl_kwargs=None):
'Plot a phase folded representation of a lc\n\n :param array_float t: time array\n :param array_float data: data array\n :param float P: Period of the planet\n :param float tc: Time of inferior conjuction of the planet\n :param array_float data_err: data error array\n :param float jitter: jitter value\n :param str jitter_type: jitter_type ("multi" or "add")\n :param None/list_of_float zoom: If provided the plot will be zoom. Meaning that the model and data\n will only be plotted between two phase values. It should be a list-like object with two elements.\n zoom[0] give the minimum phase for the zoom and zoom[1] give the maximum.\n :param ~matplotlib.axes._axes.Axes ax: Axes instance where the data and model will be ploted\n :param dict pl_kwargs: Keyword argument passed to pl.errorbar function\n\n P and tc needs to have the same unit than the t\n '
ax = __get_default_ax(ax=ax)
phases = (foldAt(t, P, T0=(tc + (P / 2))) - 0.5)
sortIndi = argsort(phases)
if (data_err is not None):
data_err_new = (data_err if (jitter is None) else apply_jitter(data_err, jitter, jitter_type))
data_err_new_sort = data_err_new[sortIndi]
phase_sort = phases[sortIndi]
data_sort = data[sortIndi]
if (zoom is not None):
if ((zoom[0] is not None) and (zoom[1] is not None)):
extra_arrays_to_zoom = ([data_sort] if (data_err is None) else [data_sort, data_err_new_sort])
(zoomed_arrays, idx_zoom) = apply_zoom(zoom=zoom, base_array=phase_sort, arrays=extra_arrays_to_zoom)
phase_sort = zoomed_arrays[0]
data_sort = zoomed_arrays[1]
data_err_new_sort = (None if (data_err is None) else zoomed_arrays[2])
kw = (dict() if (pl_kwargs is None) else pl_kwargs.copy())
if ('fmt' not in kw):
kw['fmt'] = '-'
if ('color' not in kw):
kw['color'] = 'r'
if (data_err is not None):
line = ax.errorbar(phase_sort, data_sort, data_err_new_sort, **kw)
else:
line = ax.errorbar(phase_sort, data_sort, **kw)
return (line, phases)<|docstring|>Plot a phase folded representation of a lc
:param array_float t: time array
:param array_float data: data array
:param float P: Period of the planet
:param float tc: Time of inferior conjuction of the planet
:param array_float data_err: data error array
:param float jitter: jitter value
:param str jitter_type: jitter_type ("multi" or "add")
:param None/list_of_float zoom: If provided the plot will be zoom. Meaning that the model and data
will only be plotted between two phase values. It should be a list-like object with two elements.
zoom[0] give the minimum phase for the zoom and zoom[1] give the maximum.
:param ~matplotlib.axes._axes.Axes ax: Axes instance where the data and model will be ploted
:param dict pl_kwargs: Keyword argument passed to pl.errorbar function
P and tc needs to have the same unit than the t<|endoftext|> |
02ba3ccaf669fde8d0fd4f734bd5c3e3df97f84b812c472f3623867a5ac3622f | def add_twoaxeswithsharex(subplotspec, fig, gs_from_sps_kw=None):
'Add two axes to a subplotspec (created with gridspec) for data and residual plot. '
kw = (dict() if (gs_from_sps_kw is None) else gs_from_sps_kw.copy())
if ('hspace' not in kw):
kw['hspace'] = 0.1
if ('height_ratios' not in kw):
kw['height_ratios'] = (4, 1)
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=subplotspec, **kw)
ax0 = Subplot(fig, gs[0])
ax0.locator_params(axis='y', tight=True, nbins=4)
ax0.tick_params(labelbottom='off')
fig.add_subplot(ax0)
ax1 = Subplot(fig, gs[1], sharex=ax0)
fig.add_subplot(ax1)
ax1.locator_params(axis='y', tight=True, nbins=4)
return (ax0, ax1) | Add two axes to a subplotspec (created with gridspec) for data and residual plot. | CaRM_HD189733/scripts/emcee_tools.py | add_twoaxeswithsharex | EduardoCristo/CaRM | 0 | python | def add_twoaxeswithsharex(subplotspec, fig, gs_from_sps_kw=None):
' '
kw = (dict() if (gs_from_sps_kw is None) else gs_from_sps_kw.copy())
if ('hspace' not in kw):
kw['hspace'] = 0.1
if ('height_ratios' not in kw):
kw['height_ratios'] = (4, 1)
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=subplotspec, **kw)
ax0 = Subplot(fig, gs[0])
ax0.locator_params(axis='y', tight=True, nbins=4)
ax0.tick_params(labelbottom='off')
fig.add_subplot(ax0)
ax1 = Subplot(fig, gs[1], sharex=ax0)
fig.add_subplot(ax1)
ax1.locator_params(axis='y', tight=True, nbins=4)
return (ax0, ax1) | def add_twoaxeswithsharex(subplotspec, fig, gs_from_sps_kw=None):
' '
kw = (dict() if (gs_from_sps_kw is None) else gs_from_sps_kw.copy())
if ('hspace' not in kw):
kw['hspace'] = 0.1
if ('height_ratios' not in kw):
kw['height_ratios'] = (4, 1)
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=subplotspec, **kw)
ax0 = Subplot(fig, gs[0])
ax0.locator_params(axis='y', tight=True, nbins=4)
ax0.tick_params(labelbottom='off')
fig.add_subplot(ax0)
ax1 = Subplot(fig, gs[1], sharex=ax0)
fig.add_subplot(ax1)
ax1.locator_params(axis='y', tight=True, nbins=4)
return (ax0, ax1)<|docstring|>Add two axes to a subplotspec (created with gridspec) for data and residual plot.<|endoftext|> |
61d1ca1da1d0726702528dae8b195a095a7fe1fb2072a80c7e5dd5679efc4c1c | def add_twoaxeswithsharex_perplanet(subplotspec, nplanet, fig, gs_from_sps_kw=None):
'Add two axes per planet to a subplotspec (created with gridspec) for data and residual plot.\n '
gs = gridspec.GridSpecFromSubplotSpec(1, nplanet, subplot_spec=subplotspec)
axes_data = []
axes_resi = []
for gs_elem in gs:
(ax_data, ax_resi) = add_twoaxeswithsharex(gs_elem, fig, gs_from_sps_kw=gs_from_sps_kw)
axes_data.append(ax_data)
axes_resi.append(ax_resi)
return (axes_data, axes_resi) | Add two axes per planet to a subplotspec (created with gridspec) for data and residual plot. | CaRM_HD189733/scripts/emcee_tools.py | add_twoaxeswithsharex_perplanet | EduardoCristo/CaRM | 0 | python | def add_twoaxeswithsharex_perplanet(subplotspec, nplanet, fig, gs_from_sps_kw=None):
'\n '
gs = gridspec.GridSpecFromSubplotSpec(1, nplanet, subplot_spec=subplotspec)
axes_data = []
axes_resi = []
for gs_elem in gs:
(ax_data, ax_resi) = add_twoaxeswithsharex(gs_elem, fig, gs_from_sps_kw=gs_from_sps_kw)
axes_data.append(ax_data)
axes_resi.append(ax_resi)
return (axes_data, axes_resi) | def add_twoaxeswithsharex_perplanet(subplotspec, nplanet, fig, gs_from_sps_kw=None):
'\n '
gs = gridspec.GridSpecFromSubplotSpec(1, nplanet, subplot_spec=subplotspec)
axes_data = []
axes_resi = []
for gs_elem in gs:
(ax_data, ax_resi) = add_twoaxeswithsharex(gs_elem, fig, gs_from_sps_kw=gs_from_sps_kw)
axes_data.append(ax_data)
axes_resi.append(ax_resi)
return (axes_data, axes_resi)<|docstring|>Add two axes per planet to a subplotspec (created with gridspec) for data and residual plot.<|endoftext|> |
99f24803a5b0877e110f061d5917a05c30255c8211d188a04bcf3b6a3075bc1e | def acceptancefraction_selection(acceptance_fraction, sig_fact=3.0, quantile=75, verbose=1):
'Return selected walker based on the acceptance fraction.\n\n :param np.array acceptance_fraction: Value of the acceptance fraction for each walker.\n :param float sig_fact: acceptance fraction below mean - sig_fact * sigma will be rejected\n :param int verbose: if 1 speaks otherwise not\n '
percentile_acceptance_frac = percentile(acceptance_fraction, quantile)
mad_acceptance_frac = mad(acceptance_fraction)
if (verbose == 1):
logger.info('Acceptance fraction of the walkers: {}\nquantile {}%: {}, MAD:{}'.format(acceptance_fraction, quantile, percentile_acceptance_frac, mad_acceptance_frac))
l_selected_walker = where((acceptance_fraction > (percentile_acceptance_frac - (sig_fact * mad_acceptance_frac))))[0]
nb_rejected = (acceptance_fraction.shape[0] - len(l_selected_walker))
if (verbose == 1):
logger.info('Number of rejected walkers: {}/{}'.format(nb_rejected, acceptance_fraction.shape[0]))
return (l_selected_walker, nb_rejected) | Return selected walker based on the acceptance fraction.
:param np.array acceptance_fraction: Value of the acceptance fraction for each walker.
:param float sig_fact: acceptance fraction below mean - sig_fact * sigma will be rejected
:param int verbose: if 1 speaks otherwise not | CaRM_HD189733/scripts/emcee_tools.py | acceptancefraction_selection | EduardoCristo/CaRM | 0 | python | def acceptancefraction_selection(acceptance_fraction, sig_fact=3.0, quantile=75, verbose=1):
'Return selected walker based on the acceptance fraction.\n\n :param np.array acceptance_fraction: Value of the acceptance fraction for each walker.\n :param float sig_fact: acceptance fraction below mean - sig_fact * sigma will be rejected\n :param int verbose: if 1 speaks otherwise not\n '
percentile_acceptance_frac = percentile(acceptance_fraction, quantile)
mad_acceptance_frac = mad(acceptance_fraction)
if (verbose == 1):
logger.info('Acceptance fraction of the walkers: {}\nquantile {}%: {}, MAD:{}'.format(acceptance_fraction, quantile, percentile_acceptance_frac, mad_acceptance_frac))
l_selected_walker = where((acceptance_fraction > (percentile_acceptance_frac - (sig_fact * mad_acceptance_frac))))[0]
nb_rejected = (acceptance_fraction.shape[0] - len(l_selected_walker))
if (verbose == 1):
logger.info('Number of rejected walkers: {}/{}'.format(nb_rejected, acceptance_fraction.shape[0]))
return (l_selected_walker, nb_rejected) | def acceptancefraction_selection(acceptance_fraction, sig_fact=3.0, quantile=75, verbose=1):
'Return selected walker based on the acceptance fraction.\n\n :param np.array acceptance_fraction: Value of the acceptance fraction for each walker.\n :param float sig_fact: acceptance fraction below mean - sig_fact * sigma will be rejected\n :param int verbose: if 1 speaks otherwise not\n '
percentile_acceptance_frac = percentile(acceptance_fraction, quantile)
mad_acceptance_frac = mad(acceptance_fraction)
if (verbose == 1):
logger.info('Acceptance fraction of the walkers: {}\nquantile {}%: {}, MAD:{}'.format(acceptance_fraction, quantile, percentile_acceptance_frac, mad_acceptance_frac))
l_selected_walker = where((acceptance_fraction > (percentile_acceptance_frac - (sig_fact * mad_acceptance_frac))))[0]
nb_rejected = (acceptance_fraction.shape[0] - len(l_selected_walker))
if (verbose == 1):
logger.info('Number of rejected walkers: {}/{}'.format(nb_rejected, acceptance_fraction.shape[0]))
return (l_selected_walker, nb_rejected)<|docstring|>Return selected walker based on the acceptance fraction.
:param np.array acceptance_fraction: Value of the acceptance fraction for each walker.
:param float sig_fact: acceptance fraction below mean - sig_fact * sigma will be rejected
:param int verbose: if 1 speaks otherwise not<|endoftext|> |
dfac924792e50ea43c1265a613334298fe2c04525aa60ee301ae535f367909a4 | def lnposterior_selection(lnprobability, sig_fact=3.0, quantile=75, quantile_walker=50, verbose=1):
'Return selected walker based on the acceptance fraction.\n\n :param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration\n :param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected\n :param float quantile: Quantile to use as reference lnprobability value.\n :param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is\n the meadian, 100 is the highest lnprobability.\n :param int verbose: if 1 speaks otherwise not\n :return list_of_int l_selected_walker: list of selected walker\n :return int nb_rejected: number of rejected walker\n '
walkers_percentile_lnposterior = percentile(lnprobability, quantile_walker, axis=1)
percentile_lnposterior = percentile(walkers_percentile_lnposterior, quantile)
mad_lnposterior = mad(walkers_percentile_lnposterior)
if (verbose == 1):
logger.info('lnposterior of the walkers: {}\nquantile {}%: {}, MAD:{}'.format(walkers_percentile_lnposterior, quantile, percentile_lnposterior, mad_lnposterior))
l_selected_walker = where((walkers_percentile_lnposterior > (percentile_lnposterior - (sig_fact * mad_lnposterior))))[0]
nb_rejected = (lnprobability.shape[0] - len(l_selected_walker))
if (verbose == 1):
logger.info('Number of rejected walkers: {}/{}'.format(nb_rejected, lnprobability.shape[0]))
return (l_selected_walker, nb_rejected) | Return selected walker based on the acceptance fraction.
:param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration
:param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected
:param float quantile: Quantile to use as reference lnprobability value.
:param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is
the meadian, 100 is the highest lnprobability.
:param int verbose: if 1 speaks otherwise not
:return list_of_int l_selected_walker: list of selected walker
:return int nb_rejected: number of rejected walker | CaRM_HD189733/scripts/emcee_tools.py | lnposterior_selection | EduardoCristo/CaRM | 0 | python | def lnposterior_selection(lnprobability, sig_fact=3.0, quantile=75, quantile_walker=50, verbose=1):
'Return selected walker based on the acceptance fraction.\n\n :param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration\n :param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected\n :param float quantile: Quantile to use as reference lnprobability value.\n :param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is\n the meadian, 100 is the highest lnprobability.\n :param int verbose: if 1 speaks otherwise not\n :return list_of_int l_selected_walker: list of selected walker\n :return int nb_rejected: number of rejected walker\n '
walkers_percentile_lnposterior = percentile(lnprobability, quantile_walker, axis=1)
percentile_lnposterior = percentile(walkers_percentile_lnposterior, quantile)
mad_lnposterior = mad(walkers_percentile_lnposterior)
if (verbose == 1):
logger.info('lnposterior of the walkers: {}\nquantile {}%: {}, MAD:{}'.format(walkers_percentile_lnposterior, quantile, percentile_lnposterior, mad_lnposterior))
l_selected_walker = where((walkers_percentile_lnposterior > (percentile_lnposterior - (sig_fact * mad_lnposterior))))[0]
nb_rejected = (lnprobability.shape[0] - len(l_selected_walker))
if (verbose == 1):
logger.info('Number of rejected walkers: {}/{}'.format(nb_rejected, lnprobability.shape[0]))
return (l_selected_walker, nb_rejected) | def lnposterior_selection(lnprobability, sig_fact=3.0, quantile=75, quantile_walker=50, verbose=1):
'Return selected walker based on the acceptance fraction.\n\n :param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration\n :param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected\n :param float quantile: Quantile to use as reference lnprobability value.\n :param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is\n the meadian, 100 is the highest lnprobability.\n :param int verbose: if 1 speaks otherwise not\n :return list_of_int l_selected_walker: list of selected walker\n :return int nb_rejected: number of rejected walker\n '
walkers_percentile_lnposterior = percentile(lnprobability, quantile_walker, axis=1)
percentile_lnposterior = percentile(walkers_percentile_lnposterior, quantile)
mad_lnposterior = mad(walkers_percentile_lnposterior)
if (verbose == 1):
logger.info('lnposterior of the walkers: {}\nquantile {}%: {}, MAD:{}'.format(walkers_percentile_lnposterior, quantile, percentile_lnposterior, mad_lnposterior))
l_selected_walker = where((walkers_percentile_lnposterior > (percentile_lnposterior - (sig_fact * mad_lnposterior))))[0]
nb_rejected = (lnprobability.shape[0] - len(l_selected_walker))
if (verbose == 1):
logger.info('Number of rejected walkers: {}/{}'.format(nb_rejected, lnprobability.shape[0]))
return (l_selected_walker, nb_rejected)<|docstring|>Return selected walker based on the acceptance fraction.
:param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration
:param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected
:param float quantile: Quantile to use as reference lnprobability value.
:param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is
the meadian, 100 is the highest lnprobability.
:param int verbose: if 1 speaks otherwise not
:return list_of_int l_selected_walker: list of selected walker
:return int nb_rejected: number of rejected walker<|endoftext|> |
73293e057abab6420417d2440de6807f9e2a5504443c36361bc507f6db83e6f8 | def get_fitted_values(chainI, method='MAP', l_param_name=None, l_walker=None, l_burnin=None, lnprobability=None, verbose=1):
'Return the fitted values from the sampler.\n\n :param ChainInterpret chainI:\n :param string method: method used to extract the fitted values ["MAP", "median", "gausfit", "mode"]\n :param int_iteratable l_walkers: list of valid walkers\n :param int burnin: index of the first iteration to consider.\n :param int verbose: if 1 speaks otherwise not\n '
ndim = chainI.dim
if (method == 'median'):
res = np.nanmedian(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), axis=0)
elif (method == 'MAP'):
if ((l_walker is not None) or (l_burnin is not None)):
logger.warning('With method MAP the l_walker and l_burnin arguments are ignored.')
(walker, it) = unravel_index(argmax(lnprobability), dims=lnprobability.shape)
res = array([chainI[(walker, it, dim)] for dim in range(ndim)])
elif (method == 'gaussfit'):
res = gauspeak(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), nbins=100)
elif (method == 'mode'):
res = modepeak(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), nbins=100)
else:
raise ValueError('Method {} is not recognised'.format(method))
if (verbose == 1):
l_param_names = __get_default_l_param_name(l_param_name, ndim)
text = '\n'
for (i, param_name) in enumerate(l_param_names):
text += '{} = {}\n'.format(param_name, res[i])
logger.info(text)
return res | Return the fitted values from the sampler.
:param ChainInterpret chainI:
:param string method: method used to extract the fitted values ["MAP", "median", "gausfit", "mode"]
:param int_iteratable l_walkers: list of valid walkers
:param int burnin: index of the first iteration to consider.
:param int verbose: if 1 speaks otherwise not | CaRM_HD189733/scripts/emcee_tools.py | get_fitted_values | EduardoCristo/CaRM | 0 | python | def get_fitted_values(chainI, method='MAP', l_param_name=None, l_walker=None, l_burnin=None, lnprobability=None, verbose=1):
'Return the fitted values from the sampler.\n\n :param ChainInterpret chainI:\n :param string method: method used to extract the fitted values ["MAP", "median", "gausfit", "mode"]\n :param int_iteratable l_walkers: list of valid walkers\n :param int burnin: index of the first iteration to consider.\n :param int verbose: if 1 speaks otherwise not\n '
ndim = chainI.dim
if (method == 'median'):
res = np.nanmedian(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), axis=0)
elif (method == 'MAP'):
if ((l_walker is not None) or (l_burnin is not None)):
logger.warning('With method MAP the l_walker and l_burnin arguments are ignored.')
(walker, it) = unravel_index(argmax(lnprobability), dims=lnprobability.shape)
res = array([chainI[(walker, it, dim)] for dim in range(ndim)])
elif (method == 'gaussfit'):
res = gauspeak(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), nbins=100)
elif (method == 'mode'):
res = modepeak(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), nbins=100)
else:
raise ValueError('Method {} is not recognised'.format(method))
if (verbose == 1):
l_param_names = __get_default_l_param_name(l_param_name, ndim)
text = '\n'
for (i, param_name) in enumerate(l_param_names):
text += '{} = {}\n'.format(param_name, res[i])
logger.info(text)
return res | def get_fitted_values(chainI, method='MAP', l_param_name=None, l_walker=None, l_burnin=None, lnprobability=None, verbose=1):
'Return the fitted values from the sampler.\n\n :param ChainInterpret chainI:\n :param string method: method used to extract the fitted values ["MAP", "median", "gausfit", "mode"]\n :param int_iteratable l_walkers: list of valid walkers\n :param int burnin: index of the first iteration to consider.\n :param int verbose: if 1 speaks otherwise not\n '
ndim = chainI.dim
if (method == 'median'):
res = np.nanmedian(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), axis=0)
elif (method == 'MAP'):
if ((l_walker is not None) or (l_burnin is not None)):
logger.warning('With method MAP the l_walker and l_burnin arguments are ignored.')
(walker, it) = unravel_index(argmax(lnprobability), dims=lnprobability.shape)
res = array([chainI[(walker, it, dim)] for dim in range(ndim)])
elif (method == 'gaussfit'):
res = gauspeak(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), nbins=100)
elif (method == 'mode'):
res = modepeak(get_clean_flatchain(chainI, l_walker=l_walker, l_burnin=l_burnin), nbins=100)
else:
raise ValueError('Method {} is not recognised'.format(method))
if (verbose == 1):
l_param_names = __get_default_l_param_name(l_param_name, ndim)
text = '\n'
for (i, param_name) in enumerate(l_param_names):
text += '{} = {}\n'.format(param_name, res[i])
logger.info(text)
return res<|docstring|>Return the fitted values from the sampler.
:param ChainInterpret chainI:
:param string method: method used to extract the fitted values ["MAP", "median", "gausfit", "mode"]
:param int_iteratable l_walkers: list of valid walkers
:param int burnin: index of the first iteration to consider.
:param int verbose: if 1 speaks otherwise not<|endoftext|> |
364e8e90216f6dbc4ebeeeda0fa2a3686d9a43c021d4b02e2e4afd1fab607516 | def get_clean_flatchain(chainI, l_walker=None, l_burnin=None, force_finite=True):
'Return a flatchain with only the selected walkers and iteration after the burnin.\n\n :param ChainInterpret chainI:\n :param int_iteratable l_walkers: list of valid walkers\n :param int_iteratable l_burnin: list of burnin iterations for each valid walker\n :param bool force_finite: If True the function will suppress every iteration for which one of the\n parameter values provided is not finite.\n :return np.array res: cleaned flat chain\n '
res = None
if ((l_walker is None) and (l_burnin is None)):
res = chainI.flatchain
elif (l_burnin is None):
sh = chainI[(l_walker, ...)].shape
res = chainI[(l_walker, ...)].reshape((sh[0] * sh[1]), sh[2])
elif (l_walker is None):
l_walker = __get_default_l_walker(nwalker=chainI.shape[0])
if (res is None):
ndim = chainI.dim
res = []
if (ndim == 1):
for (walker, burnin) in zip(l_walker, l_burnin):
res.extend(chainI[(walker, burnin:)])
res = array(res).transpose()
else:
for dim in range(ndim):
res.append([])
for (walker, burnin) in zip(l_walker, l_burnin):
res[dim].extend(chainI[(walker, burnin:, dim)])
res = array(res).transpose()
if force_finite:
return np.delete(res, np.where(np.logical_not(np.isfinite(res)))[0], axis=0)
else:
return res | Return a flatchain with only the selected walkers and iteration after the burnin.
:param ChainInterpret chainI:
:param int_iteratable l_walkers: list of valid walkers
:param int_iteratable l_burnin: list of burnin iterations for each valid walker
:param bool force_finite: If True the function will suppress every iteration for which one of the
parameter values provided is not finite.
:return np.array res: cleaned flat chain | CaRM_HD189733/scripts/emcee_tools.py | get_clean_flatchain | EduardoCristo/CaRM | 0 | python | def get_clean_flatchain(chainI, l_walker=None, l_burnin=None, force_finite=True):
'Return a flatchain with only the selected walkers and iteration after the burnin.\n\n :param ChainInterpret chainI:\n :param int_iteratable l_walkers: list of valid walkers\n :param int_iteratable l_burnin: list of burnin iterations for each valid walker\n :param bool force_finite: If True the function will suppress every iteration for which one of the\n parameter values provided is not finite.\n :return np.array res: cleaned flat chain\n '
res = None
if ((l_walker is None) and (l_burnin is None)):
res = chainI.flatchain
elif (l_burnin is None):
sh = chainI[(l_walker, ...)].shape
res = chainI[(l_walker, ...)].reshape((sh[0] * sh[1]), sh[2])
elif (l_walker is None):
l_walker = __get_default_l_walker(nwalker=chainI.shape[0])
if (res is None):
ndim = chainI.dim
res = []
if (ndim == 1):
for (walker, burnin) in zip(l_walker, l_burnin):
res.extend(chainI[(walker, burnin:)])
res = array(res).transpose()
else:
for dim in range(ndim):
res.append([])
for (walker, burnin) in zip(l_walker, l_burnin):
res[dim].extend(chainI[(walker, burnin:, dim)])
res = array(res).transpose()
if force_finite:
return np.delete(res, np.where(np.logical_not(np.isfinite(res)))[0], axis=0)
else:
return res | def get_clean_flatchain(chainI, l_walker=None, l_burnin=None, force_finite=True):
'Return a flatchain with only the selected walkers and iteration after the burnin.\n\n :param ChainInterpret chainI:\n :param int_iteratable l_walkers: list of valid walkers\n :param int_iteratable l_burnin: list of burnin iterations for each valid walker\n :param bool force_finite: If True the function will suppress every iteration for which one of the\n parameter values provided is not finite.\n :return np.array res: cleaned flat chain\n '
res = None
if ((l_walker is None) and (l_burnin is None)):
res = chainI.flatchain
elif (l_burnin is None):
sh = chainI[(l_walker, ...)].shape
res = chainI[(l_walker, ...)].reshape((sh[0] * sh[1]), sh[2])
elif (l_walker is None):
l_walker = __get_default_l_walker(nwalker=chainI.shape[0])
if (res is None):
ndim = chainI.dim
res = []
if (ndim == 1):
for (walker, burnin) in zip(l_walker, l_burnin):
res.extend(chainI[(walker, burnin:)])
res = array(res).transpose()
else:
for dim in range(ndim):
res.append([])
for (walker, burnin) in zip(l_walker, l_burnin):
res[dim].extend(chainI[(walker, burnin:, dim)])
res = array(res).transpose()
if force_finite:
return np.delete(res, np.where(np.logical_not(np.isfinite(res)))[0], axis=0)
else:
return res<|docstring|>Return a flatchain with only the selected walkers and iteration after the burnin.
:param ChainInterpret chainI:
:param int_iteratable l_walkers: list of valid walkers
:param int_iteratable l_burnin: list of burnin iterations for each valid walker
:param bool force_finite: If True the function will suppress every iteration for which one of the
parameter values provided is not finite.
:return np.array res: cleaned flat chain<|endoftext|> |
a9bbf6b33bdc719b99068e33d2876f2b425d813fe8797790e9e33d7957f88062 | def geweke_multi(chains, first=0.1, last=0.5, intervals=20, l_walker=None):
'Adapted the geweke test for multiple wlaker exploration.\n\n :param emcee.EnsembleSampler sampler:\n :param float first: first portion of the chain to be used in the Geweke diagnostic.\n Default to 0.1 (i.e. first 10 % of the chain)\n :param float last: last portion of the chain to be used in the Geweke diagnostic.\n Default to 0.5 (i.e. last 50 % of the chain)\n :param int intervals: Number of sub-chains to analyze. Defaults to 20.\n :param int_iterable l_walker: list of valid walkers\n '
nwalker = chains.shape[0]
ndim = chains.shape[(- 1)]
l_walker = __get_default_l_walker(l_walker=l_walker, nwalker=nwalker)
nwalker = len(l_walker)
nsteps = chains.shape[1]
nb_step_last = int((nsteps * last))
last_start_step = (nsteps - nb_step_last)
logger.info('Number of steps in last portion of the chains for geweke convergence estimate: {}'.format(nb_step_last))
l_med_last = [median(chains[(l_walker, last_start_step:, dim)]) for dim in range(ndim)]
logger.info('Median value for each parameter (over all specified walkers) in the last portion of the chains: {}'.format(l_med_last))
l_mad_last = [mad(chains[(l_walker, last_start_step:, dim)]) for dim in range(ndim)]
l_mad_last_is0 = [(mad_dim == 0.0) for mad_dim in l_mad_last]
if any(l_mad_last_is0):
for dim in np.where(l_mad_last_is0)[0]:
logger.debug('MAD returned 0.0 for parameter number: {}. Compute std.'.format(dim))
l_mad_last[dim] = std(chains[(l_walker, last_start_step:, dim)])
if (l_mad_last[dim] == 0.0):
raise ValueError('MAD and std returned zero for parameter number: {}.'.format(dim))
logger.info('MAD value for each parameter (over all specified walkers) in the last portion of the chains: {}'.format(l_mad_last))
first_length = int((nsteps * first))
logger.info('Number of steps in each interval of first portion of the chains: {}'.format(first_length))
first_start_steps = [int((i * (last_start_step / intervals))) for i in range(intervals)]
logger.debug('First step of each interval in the first portion of the chains: {}'.format(first_start_steps))
zscores = (ones((nwalker, intervals, ndim)) * nan)
for (dim, med_last, mad_last) in zip(list(range(ndim)), l_med_last, l_mad_last):
for (i, walker) in enumerate(l_walker):
for (j, first_start) in enumerate(first_start_steps):
med_first = median(chains[(walker, first_start:(first_start + first_length), dim)])
mad_first = mad(chains[(walker, first_start:(first_start + first_length), dim)])
if (mad_first < (5 * mad_last)):
zscores[(i, j, dim)] = ((med_first - med_last) / sqrt(((mad_first ** 2) + (mad_last ** 2))))
else:
zscores[(i, j, dim)] = np.inf
return (zscores, first_start_steps) | Adapted the geweke test for multiple wlaker exploration.
:param emcee.EnsembleSampler sampler:
:param float first: first portion of the chain to be used in the Geweke diagnostic.
Default to 0.1 (i.e. first 10 % of the chain)
:param float last: last portion of the chain to be used in the Geweke diagnostic.
Default to 0.5 (i.e. last 50 % of the chain)
:param int intervals: Number of sub-chains to analyze. Defaults to 20.
:param int_iterable l_walker: list of valid walkers | CaRM_HD189733/scripts/emcee_tools.py | geweke_multi | EduardoCristo/CaRM | 0 | python | def geweke_multi(chains, first=0.1, last=0.5, intervals=20, l_walker=None):
'Adapted the geweke test for multiple wlaker exploration.\n\n :param emcee.EnsembleSampler sampler:\n :param float first: first portion of the chain to be used in the Geweke diagnostic.\n Default to 0.1 (i.e. first 10 % of the chain)\n :param float last: last portion of the chain to be used in the Geweke diagnostic.\n Default to 0.5 (i.e. last 50 % of the chain)\n :param int intervals: Number of sub-chains to analyze. Defaults to 20.\n :param int_iterable l_walker: list of valid walkers\n '
nwalker = chains.shape[0]
ndim = chains.shape[(- 1)]
l_walker = __get_default_l_walker(l_walker=l_walker, nwalker=nwalker)
nwalker = len(l_walker)
nsteps = chains.shape[1]
nb_step_last = int((nsteps * last))
last_start_step = (nsteps - nb_step_last)
logger.info('Number of steps in last portion of the chains for geweke convergence estimate: {}'.format(nb_step_last))
l_med_last = [median(chains[(l_walker, last_start_step:, dim)]) for dim in range(ndim)]
logger.info('Median value for each parameter (over all specified walkers) in the last portion of the chains: {}'.format(l_med_last))
l_mad_last = [mad(chains[(l_walker, last_start_step:, dim)]) for dim in range(ndim)]
l_mad_last_is0 = [(mad_dim == 0.0) for mad_dim in l_mad_last]
if any(l_mad_last_is0):
for dim in np.where(l_mad_last_is0)[0]:
logger.debug('MAD returned 0.0 for parameter number: {}. Compute std.'.format(dim))
l_mad_last[dim] = std(chains[(l_walker, last_start_step:, dim)])
if (l_mad_last[dim] == 0.0):
raise ValueError('MAD and std returned zero for parameter number: {}.'.format(dim))
logger.info('MAD value for each parameter (over all specified walkers) in the last portion of the chains: {}'.format(l_mad_last))
first_length = int((nsteps * first))
logger.info('Number of steps in each interval of first portion of the chains: {}'.format(first_length))
first_start_steps = [int((i * (last_start_step / intervals))) for i in range(intervals)]
logger.debug('First step of each interval in the first portion of the chains: {}'.format(first_start_steps))
zscores = (ones((nwalker, intervals, ndim)) * nan)
for (dim, med_last, mad_last) in zip(list(range(ndim)), l_med_last, l_mad_last):
for (i, walker) in enumerate(l_walker):
for (j, first_start) in enumerate(first_start_steps):
med_first = median(chains[(walker, first_start:(first_start + first_length), dim)])
mad_first = mad(chains[(walker, first_start:(first_start + first_length), dim)])
if (mad_first < (5 * mad_last)):
zscores[(i, j, dim)] = ((med_first - med_last) / sqrt(((mad_first ** 2) + (mad_last ** 2))))
else:
zscores[(i, j, dim)] = np.inf
return (zscores, first_start_steps) | def geweke_multi(chains, first=0.1, last=0.5, intervals=20, l_walker=None):
'Adapted the geweke test for multiple wlaker exploration.\n\n :param emcee.EnsembleSampler sampler:\n :param float first: first portion of the chain to be used in the Geweke diagnostic.\n Default to 0.1 (i.e. first 10 % of the chain)\n :param float last: last portion of the chain to be used in the Geweke diagnostic.\n Default to 0.5 (i.e. last 50 % of the chain)\n :param int intervals: Number of sub-chains to analyze. Defaults to 20.\n :param int_iterable l_walker: list of valid walkers\n '
nwalker = chains.shape[0]
ndim = chains.shape[(- 1)]
l_walker = __get_default_l_walker(l_walker=l_walker, nwalker=nwalker)
nwalker = len(l_walker)
nsteps = chains.shape[1]
nb_step_last = int((nsteps * last))
last_start_step = (nsteps - nb_step_last)
logger.info('Number of steps in last portion of the chains for geweke convergence estimate: {}'.format(nb_step_last))
l_med_last = [median(chains[(l_walker, last_start_step:, dim)]) for dim in range(ndim)]
logger.info('Median value for each parameter (over all specified walkers) in the last portion of the chains: {}'.format(l_med_last))
l_mad_last = [mad(chains[(l_walker, last_start_step:, dim)]) for dim in range(ndim)]
l_mad_last_is0 = [(mad_dim == 0.0) for mad_dim in l_mad_last]
if any(l_mad_last_is0):
for dim in np.where(l_mad_last_is0)[0]:
logger.debug('MAD returned 0.0 for parameter number: {}. Compute std.'.format(dim))
l_mad_last[dim] = std(chains[(l_walker, last_start_step:, dim)])
if (l_mad_last[dim] == 0.0):
raise ValueError('MAD and std returned zero for parameter number: {}.'.format(dim))
logger.info('MAD value for each parameter (over all specified walkers) in the last portion of the chains: {}'.format(l_mad_last))
first_length = int((nsteps * first))
logger.info('Number of steps in each interval of first portion of the chains: {}'.format(first_length))
first_start_steps = [int((i * (last_start_step / intervals))) for i in range(intervals)]
logger.debug('First step of each interval in the first portion of the chains: {}'.format(first_start_steps))
zscores = (ones((nwalker, intervals, ndim)) * nan)
for (dim, med_last, mad_last) in zip(list(range(ndim)), l_med_last, l_mad_last):
for (i, walker) in enumerate(l_walker):
for (j, first_start) in enumerate(first_start_steps):
med_first = median(chains[(walker, first_start:(first_start + first_length), dim)])
mad_first = mad(chains[(walker, first_start:(first_start + first_length), dim)])
if (mad_first < (5 * mad_last)):
zscores[(i, j, dim)] = ((med_first - med_last) / sqrt(((mad_first ** 2) + (mad_last ** 2))))
else:
zscores[(i, j, dim)] = np.inf
return (zscores, first_start_steps)<|docstring|>Adapted the geweke test for multiple wlaker exploration.
:param emcee.EnsembleSampler sampler:
:param float first: first portion of the chain to be used in the Geweke diagnostic.
Default to 0.1 (i.e. first 10 % of the chain)
:param float last: last portion of the chain to be used in the Geweke diagnostic.
Default to 0.5 (i.e. last 50 % of the chain)
:param int intervals: Number of sub-chains to analyze. Defaults to 20.
:param int_iterable l_walker: list of valid walkers<|endoftext|> |
4e95549fd6586b6f72a77305c076be4a19ed7a9cd84f74981fedb99f807d1b93 | def geweke_selection(zscores, first_steps=None, geweke_thres=2.0, l_walker=None, verbose=1):
'Compute the burnin for each valid walker based on their zscores.\n\n :param numpy.ndarray zscores:\n :param int_iteratable l_walker: list of valid walkers\n '
res = (abs(zscores) <= geweke_thres)
nwalker = zscores.shape[0]
intervals = zscores.shape[1]
first_steps = __get_default_first_steps(first_steps=first_steps, intervals=intervals)
l_walker = __get_default_l_walker(l_walker=l_walker, nwalker=nwalker)
l_burnin = []
l_walker_new = []
for i in range(nwalker):
for j in range(intervals):
if res[(i, j:, :)].all():
l_burnin.append(first_steps[j])
l_walker_new.append(l_walker[i])
break
if (verbose == 1):
logger.info('List of burnin for valid walker: {}'.format(dict(list(zip(l_walker, l_burnin)))))
logger.info('Number of walkers invalid walkers found: {}/{}'.format((len(l_walker) - len(l_walker_new)), len(l_walker)))
return (l_burnin, l_walker_new) | Compute the burnin for each valid walker based on their zscores.
:param numpy.ndarray zscores:
:param int_iteratable l_walker: list of valid walkers | CaRM_HD189733/scripts/emcee_tools.py | geweke_selection | EduardoCristo/CaRM | 0 | python | def geweke_selection(zscores, first_steps=None, geweke_thres=2.0, l_walker=None, verbose=1):
'Compute the burnin for each valid walker based on their zscores.\n\n :param numpy.ndarray zscores:\n :param int_iteratable l_walker: list of valid walkers\n '
res = (abs(zscores) <= geweke_thres)
nwalker = zscores.shape[0]
intervals = zscores.shape[1]
first_steps = __get_default_first_steps(first_steps=first_steps, intervals=intervals)
l_walker = __get_default_l_walker(l_walker=l_walker, nwalker=nwalker)
l_burnin = []
l_walker_new = []
for i in range(nwalker):
for j in range(intervals):
if res[(i, j:, :)].all():
l_burnin.append(first_steps[j])
l_walker_new.append(l_walker[i])
break
if (verbose == 1):
logger.info('List of burnin for valid walker: {}'.format(dict(list(zip(l_walker, l_burnin)))))
logger.info('Number of walkers invalid walkers found: {}/{}'.format((len(l_walker) - len(l_walker_new)), len(l_walker)))
return (l_burnin, l_walker_new) | def geweke_selection(zscores, first_steps=None, geweke_thres=2.0, l_walker=None, verbose=1):
'Compute the burnin for each valid walker based on their zscores.\n\n :param numpy.ndarray zscores:\n :param int_iteratable l_walker: list of valid walkers\n '
res = (abs(zscores) <= geweke_thres)
nwalker = zscores.shape[0]
intervals = zscores.shape[1]
first_steps = __get_default_first_steps(first_steps=first_steps, intervals=intervals)
l_walker = __get_default_l_walker(l_walker=l_walker, nwalker=nwalker)
l_burnin = []
l_walker_new = []
for i in range(nwalker):
for j in range(intervals):
if res[(i, j:, :)].all():
l_burnin.append(first_steps[j])
l_walker_new.append(l_walker[i])
break
if (verbose == 1):
logger.info('List of burnin for valid walker: {}'.format(dict(list(zip(l_walker, l_burnin)))))
logger.info('Number of walkers invalid walkers found: {}/{}'.format((len(l_walker) - len(l_walker_new)), len(l_walker)))
return (l_burnin, l_walker_new)<|docstring|>Compute the burnin for each valid walker based on their zscores.
:param numpy.ndarray zscores:
:param int_iteratable l_walker: list of valid walkers<|endoftext|> |
3f283f1319a9078f50d06bdd9a0e503cd16c6dc96b59a7c015d12ff80ded5714 | def write_latex_table(filename, df_fitval, obj_name=None):
'Write a TeX file with a table giving the fitted values.'
if (obj_name is None):
obj_name = ''
with open(filename, 'w') as f:
f.write('\\begin{table}\n\\caption{\\label{}}\n\\begin{tabular}{lc}\\hline\n')
f.write('Parameters & {}\\\\ \\hline\n'.format(obj_name))
for par in df_fitval.index:
f.write('{} & ${}_{{-{}}}^{{+{}}}$\\\\\n'.format(par.replace('_', '\\_'), df_fitval.loc[(par, 'value')], df_fitval.loc[(par, 'sigma-')], df_fitval.loc[(par, 'sigma+')]))
f.write('\\hline\n\\\\')
f.write('\\end{tabular}\n')
f.write('\\end{table}\n') | Write a TeX file with a table giving the fitted values. | CaRM_HD189733/scripts/emcee_tools.py | write_latex_table | EduardoCristo/CaRM | 0 | python | def write_latex_table(filename, df_fitval, obj_name=None):
if (obj_name is None):
obj_name =
with open(filename, 'w') as f:
f.write('\\begin{table}\n\\caption{\\label{}}\n\\begin{tabular}{lc}\\hline\n')
f.write('Parameters & {}\\\\ \\hline\n'.format(obj_name))
for par in df_fitval.index:
f.write('{} & ${}_{{-{}}}^{{+{}}}$\\\\\n'.format(par.replace('_', '\\_'), df_fitval.loc[(par, 'value')], df_fitval.loc[(par, 'sigma-')], df_fitval.loc[(par, 'sigma+')]))
f.write('\\hline\n\\\\')
f.write('\\end{tabular}\n')
f.write('\\end{table}\n') | def write_latex_table(filename, df_fitval, obj_name=None):
if (obj_name is None):
obj_name =
with open(filename, 'w') as f:
f.write('\\begin{table}\n\\caption{\\label{}}\n\\begin{tabular}{lc}\\hline\n')
f.write('Parameters & {}\\\\ \\hline\n'.format(obj_name))
for par in df_fitval.index:
f.write('{} & ${}_{{-{}}}^{{+{}}}$\\\\\n'.format(par.replace('_', '\\_'), df_fitval.loc[(par, 'value')], df_fitval.loc[(par, 'sigma-')], df_fitval.loc[(par, 'sigma+')]))
f.write('\\hline\n\\\\')
f.write('\\end{tabular}\n')
f.write('\\end{table}\n')<|docstring|>Write a TeX file with a table giving the fitted values.<|endoftext|> |
f62a469a5b66b8af2ed3c599ec241cfd3d30e763cd14fb1391075a8ba1a3b05a | def pickle_stuff(stuff, filename):
'Save stuff in a pickle file.\n\n The pickle file name is defined by the object_name and the extension\n "{}{}".format(obj_name, extension)\n\n :param stuff: Stuff to pickle\n :param str filename: Name of the pickle file\n '
with open(filename, 'wb') as fpickle:
dump(stuff, fpickle) | Save stuff in a pickle file.
The pickle file name is defined by the object_name and the extension
"{}{}".format(obj_name, extension)
:param stuff: Stuff to pickle
:param str filename: Name of the pickle file | CaRM_HD189733/scripts/emcee_tools.py | pickle_stuff | EduardoCristo/CaRM | 0 | python | def pickle_stuff(stuff, filename):
'Save stuff in a pickle file.\n\n The pickle file name is defined by the object_name and the extension\n "{}{}".format(obj_name, extension)\n\n :param stuff: Stuff to pickle\n :param str filename: Name of the pickle file\n '
with open(filename, 'wb') as fpickle:
dump(stuff, fpickle) | def pickle_stuff(stuff, filename):
'Save stuff in a pickle file.\n\n The pickle file name is defined by the object_name and the extension\n "{}{}".format(obj_name, extension)\n\n :param stuff: Stuff to pickle\n :param str filename: Name of the pickle file\n '
with open(filename, 'wb') as fpickle:
dump(stuff, fpickle)<|docstring|>Save stuff in a pickle file.
The pickle file name is defined by the object_name and the extension
"{}{}".format(obj_name, extension)
:param stuff: Stuff to pickle
:param str filename: Name of the pickle file<|endoftext|> |
b2de62a087f5dd5e130a48aa7562e594999f30b2d6ddb0df3ca1a504c65b16ed | def save_emceesampler(sampler, l_param_name=None, obj_name='', folder=None):
'Save Emcee EnsembleSampler instance elements into pickle files.\n\n :param emcee.EnsembleSampler sampler: EnsembleSampler instance to save\n :param list_of_str l_param_name: list of the parameter names\n :param str obj_name: Object name\n :param str folder: Folder where to put the pickle files\n '
if (folder is None):
folder = getcwd()
else:
makedirs(folder, exist_ok=True)
pickle_stuff(sampler.chain, join(folder, '{}{}'.format(obj_name, extension_pickle['chain'])))
pickle_stuff(sampler.lnprobability, join(folder, '{}{}'.format(obj_name, extension_pickle['lnpost'])))
pickle_stuff(sampler.acceptance_fraction, join(folder, '{}{}'.format(obj_name, extension_pickle['acceptfrac'])))
if (l_param_name is not None):
pickle_stuff(l_param_name, join(folder, '{}{}'.format(obj_name, extension_pickle['l_param_name']))) | Save Emcee EnsembleSampler instance elements into pickle files.
:param emcee.EnsembleSampler sampler: EnsembleSampler instance to save
:param list_of_str l_param_name: list of the parameter names
:param str obj_name: Object name
:param str folder: Folder where to put the pickle files | CaRM_HD189733/scripts/emcee_tools.py | save_emceesampler | EduardoCristo/CaRM | 0 | python | def save_emceesampler(sampler, l_param_name=None, obj_name=, folder=None):
'Save Emcee EnsembleSampler instance elements into pickle files.\n\n :param emcee.EnsembleSampler sampler: EnsembleSampler instance to save\n :param list_of_str l_param_name: list of the parameter names\n :param str obj_name: Object name\n :param str folder: Folder where to put the pickle files\n '
if (folder is None):
folder = getcwd()
else:
makedirs(folder, exist_ok=True)
pickle_stuff(sampler.chain, join(folder, '{}{}'.format(obj_name, extension_pickle['chain'])))
pickle_stuff(sampler.lnprobability, join(folder, '{}{}'.format(obj_name, extension_pickle['lnpost'])))
pickle_stuff(sampler.acceptance_fraction, join(folder, '{}{}'.format(obj_name, extension_pickle['acceptfrac'])))
if (l_param_name is not None):
pickle_stuff(l_param_name, join(folder, '{}{}'.format(obj_name, extension_pickle['l_param_name']))) | def save_emceesampler(sampler, l_param_name=None, obj_name=, folder=None):
'Save Emcee EnsembleSampler instance elements into pickle files.\n\n :param emcee.EnsembleSampler sampler: EnsembleSampler instance to save\n :param list_of_str l_param_name: list of the parameter names\n :param str obj_name: Object name\n :param str folder: Folder where to put the pickle files\n '
if (folder is None):
folder = getcwd()
else:
makedirs(folder, exist_ok=True)
pickle_stuff(sampler.chain, join(folder, '{}{}'.format(obj_name, extension_pickle['chain'])))
pickle_stuff(sampler.lnprobability, join(folder, '{}{}'.format(obj_name, extension_pickle['lnpost'])))
pickle_stuff(sampler.acceptance_fraction, join(folder, '{}{}'.format(obj_name, extension_pickle['acceptfrac'])))
if (l_param_name is not None):
pickle_stuff(l_param_name, join(folder, '{}{}'.format(obj_name, extension_pickle['l_param_name'])))<|docstring|>Save Emcee EnsembleSampler instance elements into pickle files.
:param emcee.EnsembleSampler sampler: EnsembleSampler instance to save
:param list_of_str l_param_name: list of the parameter names
:param str obj_name: Object name
:param str folder: Folder where to put the pickle files<|endoftext|> |
8e5254542929a4f430fa07e895462b080541b77ca5b5145e3c6fe62a8f72e70a | def save_chain_analysis(obj_name, fitted_values=None, fitted_values_sec=None, df_fittedval=None, folder=None):
'Save chain analysis results.\n\n TODO: Update to use pickle_stuff\n '
if (folder is None):
folder = getcwd()
else:
makedirs(folder, exist_ok=True)
if (df_fittedval is not None):
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['df_fittedval'])), 'wb') as fdffitval:
dump(df_fittedval, fdffitval)
if (fitted_values is not None):
if (('array' not in fitted_values) or ('l_param' not in fitted_values)):
raise ValueError("fitted_values should be a dictionary with 2 keys 'array' and 'l_param'")
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['fitted_values'])), 'wb') as ffitval:
dump(fitted_values, ffitval)
if (fitted_values_sec is not None):
if (('array' not in fitted_values) or ('l_param' not in fitted_values)):
raise ValueError("fitted_values should be a dictionary with 2 keys 'array' and 'l_param'")
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['fitted_values_sec'])), 'wb') as ffitvals:
dump(fitted_values_sec, ffitvals) | Save chain analysis results.
TODO: Update to use pickle_stuff | CaRM_HD189733/scripts/emcee_tools.py | save_chain_analysis | EduardoCristo/CaRM | 0 | python | def save_chain_analysis(obj_name, fitted_values=None, fitted_values_sec=None, df_fittedval=None, folder=None):
'Save chain analysis results.\n\n TODO: Update to use pickle_stuff\n '
if (folder is None):
folder = getcwd()
else:
makedirs(folder, exist_ok=True)
if (df_fittedval is not None):
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['df_fittedval'])), 'wb') as fdffitval:
dump(df_fittedval, fdffitval)
if (fitted_values is not None):
if (('array' not in fitted_values) or ('l_param' not in fitted_values)):
raise ValueError("fitted_values should be a dictionary with 2 keys 'array' and 'l_param'")
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['fitted_values'])), 'wb') as ffitval:
dump(fitted_values, ffitval)
if (fitted_values_sec is not None):
if (('array' not in fitted_values) or ('l_param' not in fitted_values)):
raise ValueError("fitted_values should be a dictionary with 2 keys 'array' and 'l_param'")
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['fitted_values_sec'])), 'wb') as ffitvals:
dump(fitted_values_sec, ffitvals) | def save_chain_analysis(obj_name, fitted_values=None, fitted_values_sec=None, df_fittedval=None, folder=None):
'Save chain analysis results.\n\n TODO: Update to use pickle_stuff\n '
if (folder is None):
folder = getcwd()
else:
makedirs(folder, exist_ok=True)
if (df_fittedval is not None):
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['df_fittedval'])), 'wb') as fdffitval:
dump(df_fittedval, fdffitval)
if (fitted_values is not None):
if (('array' not in fitted_values) or ('l_param' not in fitted_values)):
raise ValueError("fitted_values should be a dictionary with 2 keys 'array' and 'l_param'")
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['fitted_values'])), 'wb') as ffitval:
dump(fitted_values, ffitval)
if (fitted_values_sec is not None):
if (('array' not in fitted_values) or ('l_param' not in fitted_values)):
raise ValueError("fitted_values should be a dictionary with 2 keys 'array' and 'l_param'")
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['fitted_values_sec'])), 'wb') as ffitvals:
dump(fitted_values_sec, ffitvals)<|docstring|>Save chain analysis results.
TODO: Update to use pickle_stuff<|endoftext|> |
2fb009e0e595551e436b73c0bf95b97fae6fee752c7da1ab80a8aa8b7b6320e3 | def load_emceesampler(obj_name, folder='.'):
'Save Emcee sampler elements.\n\n :param str obj_name: Name of the object for which you want to load the chain analysis results.\n This is used to infer the names of the pickle files\n :param str folder:\n '
if (folder is None):
folder = getcwd()
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['chain'])), 'rb') as fchain:
chain = load(fchain)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['lnpost'])), 'rb') as flnprob:
lnprobability = load(flnprob)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['acceptfrac'])), 'rb') as faccfrac:
acceptance_fraction = load(faccfrac)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['l_param_name'])), 'rb') as flparam:
l_param_name = load(flparam)
return (chain, lnprobability, acceptance_fraction, l_param_name) | Save Emcee sampler elements.
:param str obj_name: Name of the object for which you want to load the chain analysis results.
This is used to infer the names of the pickle files
:param str folder: | CaRM_HD189733/scripts/emcee_tools.py | load_emceesampler | EduardoCristo/CaRM | 0 | python | def load_emceesampler(obj_name, folder='.'):
'Save Emcee sampler elements.\n\n :param str obj_name: Name of the object for which you want to load the chain analysis results.\n This is used to infer the names of the pickle files\n :param str folder:\n '
if (folder is None):
folder = getcwd()
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['chain'])), 'rb') as fchain:
chain = load(fchain)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['lnpost'])), 'rb') as flnprob:
lnprobability = load(flnprob)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['acceptfrac'])), 'rb') as faccfrac:
acceptance_fraction = load(faccfrac)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['l_param_name'])), 'rb') as flparam:
l_param_name = load(flparam)
return (chain, lnprobability, acceptance_fraction, l_param_name) | def load_emceesampler(obj_name, folder='.'):
'Save Emcee sampler elements.\n\n :param str obj_name: Name of the object for which you want to load the chain analysis results.\n This is used to infer the names of the pickle files\n :param str folder:\n '
if (folder is None):
folder = getcwd()
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['chain'])), 'rb') as fchain:
chain = load(fchain)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['lnpost'])), 'rb') as flnprob:
lnprobability = load(flnprob)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['acceptfrac'])), 'rb') as faccfrac:
acceptance_fraction = load(faccfrac)
with open(join(folder, '{}{}'.format(obj_name, extension_pickle['l_param_name'])), 'rb') as flparam:
l_param_name = load(flparam)
return (chain, lnprobability, acceptance_fraction, l_param_name)<|docstring|>Save Emcee sampler elements.
:param str obj_name: Name of the object for which you want to load the chain analysis results.
This is used to infer the names of the pickle files
:param str folder:<|endoftext|> |
da2615ff8792d9323e020e55323774ec3892a180e6c3bb241db8ce6323c770be | def load_chain_analysis(obj_name, folder=None):
'Save Emcee sampler elements.\n\n :param str obj_name: Name of the object for which you want to load the chain analysis results.\n This is used to infer the names of the pickle files\n :param str folder:\n '
if (folder is None):
folder = getcwd()
file_df_fittedval = '{}{}'.format(obj_name, extension_pickle['df_fittedval'])
if isfile(join(folder, file_df_fittedval)):
with open(join(folder, file_df_fittedval), 'rb') as fdffitval:
df_fittedval = load(fdffitval)
else:
df_fittedval = None
file_fitted_values = '{}{}'.format(obj_name, extension_pickle['fitted_values'])
if isfile(join(folder, file_fitted_values)):
with open(join(folder, file_fitted_values), 'rb') as ffitval:
fitted_values = load(ffitval)
else:
fitted_values = None
file_fitted_values_sec = '{}{}'.format(obj_name, extension_pickle['fitted_values_sec'])
if isfile(join(folder, file_fitted_values_sec)):
with open(join(folder, file_fitted_values_sec), 'rb') as ffitvals:
fitted_values_sec = load(ffitvals)
else:
fitted_values_sec = None
return (fitted_values, fitted_values_sec, df_fittedval) | Save Emcee sampler elements.
:param str obj_name: Name of the object for which you want to load the chain analysis results.
This is used to infer the names of the pickle files
:param str folder: | CaRM_HD189733/scripts/emcee_tools.py | load_chain_analysis | EduardoCristo/CaRM | 0 | python | def load_chain_analysis(obj_name, folder=None):
'Save Emcee sampler elements.\n\n :param str obj_name: Name of the object for which you want to load the chain analysis results.\n This is used to infer the names of the pickle files\n :param str folder:\n '
if (folder is None):
folder = getcwd()
file_df_fittedval = '{}{}'.format(obj_name, extension_pickle['df_fittedval'])
if isfile(join(folder, file_df_fittedval)):
with open(join(folder, file_df_fittedval), 'rb') as fdffitval:
df_fittedval = load(fdffitval)
else:
df_fittedval = None
file_fitted_values = '{}{}'.format(obj_name, extension_pickle['fitted_values'])
if isfile(join(folder, file_fitted_values)):
with open(join(folder, file_fitted_values), 'rb') as ffitval:
fitted_values = load(ffitval)
else:
fitted_values = None
file_fitted_values_sec = '{}{}'.format(obj_name, extension_pickle['fitted_values_sec'])
if isfile(join(folder, file_fitted_values_sec)):
with open(join(folder, file_fitted_values_sec), 'rb') as ffitvals:
fitted_values_sec = load(ffitvals)
else:
fitted_values_sec = None
return (fitted_values, fitted_values_sec, df_fittedval) | def load_chain_analysis(obj_name, folder=None):
'Save Emcee sampler elements.\n\n :param str obj_name: Name of the object for which you want to load the chain analysis results.\n This is used to infer the names of the pickle files\n :param str folder:\n '
if (folder is None):
folder = getcwd()
file_df_fittedval = '{}{}'.format(obj_name, extension_pickle['df_fittedval'])
if isfile(join(folder, file_df_fittedval)):
with open(join(folder, file_df_fittedval), 'rb') as fdffitval:
df_fittedval = load(fdffitval)
else:
df_fittedval = None
file_fitted_values = '{}{}'.format(obj_name, extension_pickle['fitted_values'])
if isfile(join(folder, file_fitted_values)):
with open(join(folder, file_fitted_values), 'rb') as ffitval:
fitted_values = load(ffitval)
else:
fitted_values = None
file_fitted_values_sec = '{}{}'.format(obj_name, extension_pickle['fitted_values_sec'])
if isfile(join(folder, file_fitted_values_sec)):
with open(join(folder, file_fitted_values_sec), 'rb') as ffitvals:
fitted_values_sec = load(ffitvals)
else:
fitted_values_sec = None
return (fitted_values, fitted_values_sec, df_fittedval)<|docstring|>Save Emcee sampler elements.
:param str obj_name: Name of the object for which you want to load the chain analysis results.
This is used to infer the names of the pickle files
:param str folder:<|endoftext|> |
b44f0cb94185a3cdf46c9e8bdf1190c68e20a2e6e13277449d3b139a17676f9f | def get_param_value_OrderedDict(values, l_param_names):
'Return an Orderedictwith associate the parameter name to its value.\n '
res = OrderedDict()
for (val, name) in zip(values, l_param_names):
res[name] = val
return res | Return an Orderedictwith associate the parameter name to its value. | CaRM_HD189733/scripts/emcee_tools.py | get_param_value_OrderedDict | EduardoCristo/CaRM | 0 | python | def get_param_value_OrderedDict(values, l_param_names):
'\n '
res = OrderedDict()
for (val, name) in zip(values, l_param_names):
res[name] = val
return res | def get_param_value_OrderedDict(values, l_param_names):
'\n '
res = OrderedDict()
for (val, name) in zip(values, l_param_names):
res[name] = val
return res<|docstring|>Return an Orderedictwith associate the parameter name to its value.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.