content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from typing import List from datetime import datetime from numpy import datetime64 from pandas import DataFrame from dolphindb import ( session, DBConnectionPool, PartitionedTableAppender, Table ) from vnpy.trader.constant import Exchange, Interval from vnpy.trader.object import BarData, TickData from vnpy.trader.database import ( BaseDatabase, BarOverview, DB_TZ, convert_tz ) from vnpy.trader.setting import SETTINGS from .dolphindb_script import ( CREATE_DATABASE_SCRIPT, CREATE_BAR_TABLE_SCRIPT, CREATE_TICK_TABLE_SCRIPT, CREATE_OVERVIEW_TABLE_SCRIPT ) class DolphindbDatabase(BaseDatabase): """DolphinDB数据库接口""" def __init__(self) -> None: """""" self.user: str = SETTINGS["database.user"] self.password: str = SETTINGS["database.password"] self.host: str = SETTINGS["database.host"] self.port: int = SETTINGS["database.port"] self.db_path: str = "dfs://vnpy" # 连接数据库 self.session: session = session() self.session.connect(self.host, self.port, self.user, self.password) # 创建连接池(用于数据写入) self.pool: DBConnectionPool = DBConnectionPool(self.host, self.port, 1, self.user, self.password) # 初始化数据库和数据表 if not self.session.existsDatabase(self.db_path): self.session.run(CREATE_DATABASE_SCRIPT) self.session.run(CREATE_BAR_TABLE_SCRIPT) self.session.run(CREATE_TICK_TABLE_SCRIPT) self.session.run(CREATE_OVERVIEW_TABLE_SCRIPT) def save_bar_data(self, bars: List[BarData]) -> bool: """保存k线数据""" # 读取主键参数 bar: BarData = bars[0] symbol: str = bar.symbol exchange: Exchange = bar.exchange interval: Interval = bar.interval # 转换为DatFrame写入数据库 data: List[dict] = [] for bar in bars: dt: datetime64 = datetime64(convert_tz(bar.datetime)) d: dict = { "symbol": symbol, "exchange": exchange.value, "datetime": dt, "interval": interval.value, "volume": float(bar.volume), "turnover": float(bar.turnover), "open_interest": float(bar.open_interest), "open_price": float(bar.open_price), "high_price": float(bar.high_price), "low_price": float(bar.low_price), "close_price": float(bar.close_price) } data.append(d) df: DataFrame = DataFrame.from_records(data) appender: PartitionedTableAppender = PartitionedTableAppender(self.db_path, "bar", "datetime", self.pool) appender.append(df) # 计算已有K线数据的汇总 table: Table = self.session.loadTable(tableName="bar", dbPath=self.db_path) df_start: DataFrame = ( table.select('*') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .sort(bys=["datetime"]).top(1) .toDF() ) df_end: DataFrame = ( table.select('*') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .sort(bys=["datetime desc"]).top(1) .toDF() ) df_count: DataFrame = ( table.select('count(*)') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .toDF() ) count: int = df_count["count"][0] start: datetime = df_start["datetime"][0] end: datetime = df_end["datetime"][0] # 更新K线汇总数据 data: List[dict] = [] dt: datetime64 = datetime64(datetime(2022, 1, 1)) # 该时间戳仅用于分区 d: dict = { "symbol": symbol, "exchange": exchange.value, "interval": interval.value, "count": count, "start": start, "end": end, "datetime": dt, } data.append(d) df: DataFrame = DataFrame.from_records(data) appender: PartitionedTableAppender = PartitionedTableAppender(self.db_path, "overview", "datetime", self.pool) appender.append(df) return True def save_tick_data(self, ticks: List[TickData]) -> bool: """保存TICK数据""" data: List[dict] = [] for tick in ticks: dt: datetime64 = datetime64(convert_tz(tick.datetime)) d: dict = { "symbol": tick.symbol, "exchange": tick.exchange.value, "datetime": dt, "name": tick.name, "volume": float(tick.volume), "turnover": float(tick.turnover), "open_interest": float(tick.open_interest), "last_price": float(tick.last_price), "last_volume": float(tick.last_volume), "limit_up": float(tick.limit_up), "limit_down": float(tick.limit_down), "open_price": float(tick.open_price), "high_price": float(tick.high_price), "low_price": float(tick.low_price), "pre_close": float(tick.pre_close), "bid_price_1": float(tick.bid_price_1), "bid_price_2": float(tick.bid_price_2), "bid_price_3": float(tick.bid_price_3), "bid_price_4": float(tick.bid_price_4), "bid_price_5": float(tick.bid_price_5), "ask_price_1": float(tick.ask_price_1), "ask_price_2": float(tick.ask_price_2), "ask_price_3": float(tick.ask_price_3), "ask_price_4": float(tick.ask_price_4), "ask_price_5": float(tick.ask_price_5), "bid_volume_1": float(tick.bid_volume_1), "bid_volume_2": float(tick.bid_volume_2), "bid_volume_3": float(tick.bid_volume_3), "bid_volume_4": float(tick.bid_volume_4), "bid_volume_5": float(tick.bid_volume_5), "ask_volume_1": float(tick.ask_volume_1), "ask_volume_2": float(tick.ask_volume_2), "ask_volume_3": float(tick.ask_volume_3), "ask_volume_4": float(tick.ask_volume_4), "ask_volume_5": float(tick.ask_volume_5), "localtime": datetime64(tick.localtime), } data.append(d) df: DataFrame = DataFrame.from_records(data) appender: PartitionedTableAppender = PartitionedTableAppender(self.db_path, "tick", "datetime", self.pool) appender.append(df) return True def load_bar_data( self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime ) -> List[BarData]: """读取K线数据""" # 转换时间格式 start: datetime64 = datetime64(start) start: str = str(start).replace("-", ".") end: datetime64 = datetime64(end) end: str = str(end).replace("-", ".") table: Table = self.session.loadTable(tableName="bar", dbPath=self.db_path) df: DataFrame = ( table.select('*') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .where(f'datetime>={start}') .where(f'datetime<={end}') .toDF() ) if df.empty: return [] df.set_index("datetime", inplace=True) df: DataFrame = df.tz_localize(DB_TZ) # 转换为BarData格式 bars: List[BarData] = [] for tp in df.itertuples(): bar: BarData = BarData( symbol=symbol, exchange=exchange, datetime=tp.Index.to_pydatetime(), interval=interval, volume=tp.volume, turnover=tp.turnover, open_interest=tp.open_interest, open_price=tp.open_price, high_price=tp.high_price, low_price=tp.low_price, close_price=tp.close_price, gateway_name="DB" ) bars.append(bar) return bars def load_tick_data( self, symbol: str, exchange: Exchange, start: datetime, end: datetime ) -> List[TickData]: """读取Tick数据""" # 转换时间格式 start: datetime64 = datetime64(start) start: str = str(start).replace("-", ".") end: datetime64 = datetime64(end) end: str = str(end).replace("-", ".") # 读取数据DataFrame table: Table = self.session.loadTable(tableName="tick", dbPath=self.db_path) df: DataFrame = ( table.select('*') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'datetime>={start}') .where(f'datetime<={end}') .toDF() ) if df.empty: return [] df.set_index("datetime", inplace=True) df: DataFrame = df.tz_localize(DB_TZ) # 转换为TickData格式 ticks: List[TickData] = [] for tp in df.itertuples(): tick: TickData = TickData( symbol=symbol, exchange=exchange, datetime=tp.Index.to_pydatetime(), name=tp.name, volume=tp.volume, turnover=tp.turnover, open_interest=tp.open_interest, last_price=tp.last_price, last_volume=tp.last_volume, limit_up=tp.limit_up, limit_down=tp.limit_down, open_price=tp.open_price, high_price=tp.high_price, low_price=tp.low_price, pre_close=tp.pre_close, bid_price_1=tp.bid_price_1, bid_price_2=tp.bid_price_2, bid_price_3=tp.bid_price_3, bid_price_4=tp.bid_price_4, bid_price_5=tp.bid_price_5, ask_price_1=tp.ask_price_1, ask_price_2=tp.ask_price_2, ask_price_3=tp.ask_price_3, ask_price_4=tp.ask_price_4, ask_price_5=tp.ask_price_5, bid_volume_1=tp.bid_volume_1, bid_volume_2=tp.bid_volume_2, bid_volume_3=tp.bid_volume_3, bid_volume_4=tp.bid_volume_4, bid_volume_5=tp.bid_volume_5, ask_volume_1=tp.ask_volume_1, ask_volume_2=tp.ask_volume_2, ask_volume_3=tp.ask_volume_3, ask_volume_4=tp.ask_volume_4, ask_volume_5=tp.ask_volume_5, localtime=tp.localtime, gateway_name="DB" ) ticks.append(tick) return ticks def delete_bar_data( self, symbol: str, exchange: Exchange, interval: Interval ) -> int: """删除K线数据""" # 加载数据表 table: Table = self.session.loadTable(tableName="bar", dbPath=self.db_path) # 统计数据量 df: DataFrame = ( table.select('count(*)') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .toDF() ) count: int = df["count"][0] # 删除K线数据 ( table.delete() .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .execute() ) # 删除K线汇总 table: Table = self.session.loadTable(tableName="overview", dbPath=self.db_path) ( table.delete() .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .where(f'interval="{interval.value}"') .execute() ) return count def delete_tick_data( self, symbol: str, exchange: Exchange ) -> int: """删除Tick数据""" # 加载数据表 table: Table = self.session.loadTable(tableName="tick", dbPath=self.db_path) # 统计数据量 df: DataFrame = ( table.select('count(*)') .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .toDF() ) count: int = df["count"][0] # 删除Tick数据 ( table.delete() .where(f'symbol="{symbol}"') .where(f'exchange="{exchange.value}"') .execute() ) return count def get_bar_overview(self) -> List[BarOverview]: """"查询数据库中的K线汇总信息""" table: Table = self.session.loadTable(tableName="overview", dbPath=self.db_path) df: DataFrame = table.select('*').toDF() overviews: List[BarOverview] = [] for tp in df.itertuples(): overview: BarOverview = BarOverview( symbol=tp.symbol, exchange=Exchange(tp.exchange), interval=Interval(tp.interval), count=tp.count, start=datetime.fromtimestamp(tp.start.to_pydatetime().timestamp(), DB_TZ), end=datetime.fromtimestamp(tp.end.to_pydatetime().timestamp(), DB_TZ), ) overviews.append(overview) return overviews
nilq/baby-python
python
from __future__ import absolute_import __author__ = "akniazev" from collections import OrderedDict
nilq/baby-python
python
""" File: similarity.py Name: Po Kai Feng ---------------------------- This program compares short dna sequence, s2, with sub sequences of a long dna sequence, s1 The way of approaching this task is the same as what people are doing in the bio industry. """ def main(): """ User will types a long DNA sequence. Then user will type a short DNA sequence to match the long DNA sequence. Finally the code will calculate and return the homology of two DNA sequences. """ long_seq = input('Please give me a DNA sequence to search: ') short_seq = input('What DNA sequence would you like to match? ') print('The best match is '+find_homology(long_seq.upper(), short_seq.upper())) def find_homology(long_seq, short_seq): """ :param long_seq: str, the base DNA sequence user wants to search in with all upper case characters :param short_seq: str, the DNA sequence user wants to match with all upper case characters :return: the homology in long_seq """ homology = '' similarity = 0 for i in range(len(long_seq) - len(short_seq) + 1): # Search from [0] to [long_seq - short_seq] in long_seq new_homology = '' new_similarity = 0 for j in range(i, i + len(short_seq)): # Get the similarity of short_seq and the string from long_seq[i] to long_seq[i+len(short_seq)-1] if long_seq[j] == short_seq[j - i]: # The two DNA match and should add up similarity new_similarity += 1 else: pass if new_similarity > similarity: # The new DNA section in long_seq has more similarity and should replace the homology similarity = new_similarity for k in range(i, i + len(short_seq)): # Assign new homology new_homology += long_seq[k] homology = new_homology return homology ###### DO NOT EDIT CODE BELOW THIS LINE ###### if __name__ == '__main__': main()
nilq/baby-python
python
# Generated by Django 3.2 on 2021-04-15 17:40 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Member', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('phone', models.CharField(blank=True, max_length=255, verbose_name='Phone')), ('mobile_phone', models.CharField(blank=True, max_length=255, verbose_name='Mobile phone')), ('address', models.CharField(max_length=255, verbose_name='Address')), ('baptize_date', models.DateField(blank=True, null=True, verbose_name='Baptize date')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Birth date')), ('note', models.TextField(blank=True, verbose_name='Note')), ], ), ]
nilq/baby-python
python
import abjad import collections import importlib import itertools import os from abjad.tools import abctools from abjad.tools import indicatortools from abjad.tools import instrumenttools from abjad.tools import lilypondfiletools from abjad.tools import markuptools from abjad.tools import mathtools from abjad.tools import metertools from abjad.tools import rhythmmakertools from abjad.tools import selectiontools from abjad.tools import spannertools from abjad.tools import systemtools class SegmentMaker(abctools.AbjadObject): r'''A Consort segment-maker. :: >>> score_template = abjad.templatetools.StringOrchestraScoreTemplate( ... violin_count=2, ... viola_count=1, ... cello_count=1, ... contrabass_count=0, ... ) :: >>> segment_maker = consort.SegmentMaker( ... score_template=score_template, ... settings=( ... consort.MusicSetting( ... timespan_maker=consort.TaleaTimespanMaker(), ... violin_1_bowing_voice=consort.MusicSpecifier(), ... violin_2_bowing_voice=consort.MusicSpecifier(), ... ), ... ), ... desired_duration_in_seconds=2, ... tempo=abjad.MetronomeMark((1, 4), 72), ... permitted_time_signatures=( ... (5, 8), ... (7, 16), ... ), ... ) >>> print(format(segment_maker)) consort.tools.SegmentMaker( desired_duration_in_seconds=abjad.Duration(2, 1), permitted_time_signatures=abjad.TimeSignatureList( [ abjad.TimeSignature((5, 8)), abjad.TimeSignature((7, 16)), ] ), score_template=templatetools.StringOrchestraScoreTemplate( violin_count=2, viola_count=1, cello_count=1, contrabass_count=0, split_hands=True, use_percussion_clefs=False, ), settings=( consort.tools.MusicSetting( timespan_maker=consort.tools.TaleaTimespanMaker( playing_talea=rhythmmakertools.Talea( counts=[4], denominator=16, ), playing_groupings=(1,), repeat=True, silence_talea=rhythmmakertools.Talea( counts=[4], denominator=16, ), step_anchor=Right, synchronize_groupings=False, synchronize_step=False, ), violin_1_bowing_voice=consort.tools.MusicSpecifier(), violin_2_bowing_voice=consort.tools.MusicSpecifier(), ), ), tempo=abjad.MetronomeMark( reference_duration=abjad.Duration(1, 4), units_per_minute=72, ), ) :: >>> lilypond_file = segment_maker() # doctest: +SKIP Performing rhythmic interpretation: populating independent timespans: populated timespans: ... found meters: ... demultiplexed timespans: ... split timespans: ... pruned malformed timespans: ... consolidated timespans: ... inscribed timespans: ... multiplexed timespans: ... pruned short timespans: ... pruned meters: ... total: ... populating dependent timespans: populated timespans: ... demultiplexed timespans: ... split timespans: ... pruned short timespans: ... pruned malformed timespans: ... consolidated timespans: ... inscribed timespans: ... total: ... populated silent timespans: ... validated timespans: ... rewriting meters: rewriting Cello Bowing Voice: 2 rewriting Cello Fingering Voice: 2 rewriting Viola Bowing Voice: 2 rewriting Viola Fingering Voice: 2 rewriting Violin 1 Bowing Voice: 3 rewriting Violin 1 Fingering Voice: 2 rewriting Violin 2 Bowing Voice: 3 rewriting Violin 2 Fingering Voice: 2 total: 0.169489145279 populated score: ... total: ... Performing non-rhythmic interpretation: collected attack points: ... handled graces: ... handled pitches: ... handled attachments: ... total: ... Checking for well-formedness violations: [] 24 check_beamed_quarter_notes [] 18 check_discontiguous_spanners [] 80 check_duplicate_ids [] 0 check_intermarked_hairpins [] 2 check_misdurated_measures [] 2 check_misfilled_measures [] 4 check_mispitched_ties [] 24 check_misrepresented_flags [] 80 check_missing_parents [] 2 check_nested_measures [] 0 check_overlapping_beams [] 0 check_overlapping_glissandi [] 0 check_overlapping_octavation_spanners [] 0 check_short_hairpins total: ... ''' ### CLASS VARIABLES ### __slots__ = ( '_annotate_colors', '_annotate_phrasing', '_annotate_timespans', '_attack_point_map', '_desired_duration_in_seconds', '_discard_final_silence', '_lilypond_file', '_maximum_meter_run_length', '_meters', '_name', '_omit_stylesheets', '_permitted_time_signatures', '_previous_segment_metadata', '_repeat', '_score', '_score_template', '_segment_metadata', '_settings', '_tempo', '_timespan_quantization', '_voice_names', '_voicewise_timespans', ) ### INITIALIZER ### def __init__( self, annotate_colors=None, annotate_phrasing=None, annotate_timespans=None, desired_duration_in_seconds=None, discard_final_silence=None, maximum_meter_run_length=None, name=None, omit_stylesheets=None, permitted_time_signatures=None, repeat=None, score_template=None, settings=None, tempo=None, timespan_quantization=None, ): self.name = name self.annotate_colors = annotate_colors self.annotate_phrasing = annotate_phrasing self.annotate_timespans = annotate_timespans self.discard_final_silence = discard_final_silence self.desired_duration_in_seconds = desired_duration_in_seconds self.maximum_meter_run_length = maximum_meter_run_length self.omit_stylesheets = omit_stylesheets self.permitted_time_signatures = permitted_time_signatures self.score_template = score_template self.tempo = tempo self.timespan_quantization = timespan_quantization self.settings = settings self.repeat = repeat self._reset() ### SPECIAL METHODS ### def __call__( self, annotate=None, verbose=True, segment_metadata=None, previous_segment_metadata=None, ): import consort self._reset() self._annotate_phrasing = self._annotate_phrasing or annotate self._segment_metadata = segment_metadata or \ collections.OrderedDict() self._previous_segment_metadata = previous_segment_metadata or \ collections.OrderedDict() self._score = self.score_template() self._voice_names = tuple( voice.name for voice in abjad.iterate(self.score).by_class(abjad.Voice) ) with systemtools.Timer( ' total:', 'Performing rhythmic interpretation:', verbose=verbose, ): self.interpret_rhythms(verbose=verbose) self.add_time_signature_context() self.score._update_logical_measure_numbers() with systemtools.Timer( ' total:', 'Performing non-rhythmic interpretation:', verbose=verbose, ): with systemtools.Timer( ' collected attack points:', verbose=verbose, ): attack_point_map = self.collect_attack_points(self.score) self._attack_point_map = attack_point_map with systemtools.ForbidUpdate(self.score, update_on_exit=True): with systemtools.Timer( ' handled instruments:', verbose=verbose, ): self.apply_instruments() with systemtools.ForbidUpdate(self.score, update_on_exit=True): with systemtools.Timer( ' handled graces:', verbose=verbose, ): consort.GraceHandler._process_session(self) with systemtools.ForbidUpdate(self.score, update_on_exit=True): with systemtools.Timer( ' total:', ' handling pitches:', verbose=verbose, ): consort.PitchHandler._process_session(self) with systemtools.ForbidUpdate(self.score, update_on_exit=True): with systemtools.Timer( ' total:', ' handling registers:', verbose=verbose, ): consort.RegisterHandler._process_session(self) with systemtools.ForbidUpdate(self.score, update_on_exit=True): with systemtools.Timer( ' total:', ' handling attachments:', verbose=verbose, ): consort.AttachmentHandler._process_session( self, verbose=verbose, ) self.configure_score() self.configure_lilypond_file() with systemtools.Timer( enter_message='Checking for well-formedness violations:', exit_message=' total:', verbose=verbose, ): self.validate_score(self.score, verbose=verbose) self.update_segment_metadata() return self.lilypond_file, self._segment_metadata def __illustrate__(self, **kwargs): r'''Illustrates segment-maker. Returns LilyPond file. ''' lilypond_file, metadata = self(**kwargs) return lilypond_file ### PRIVATE METHODS ### def _reset(self): self._attack_point_map = None self._lilypond_file = None self._meters = None self._score = None self._voice_names = None self._voicewise_timespans = None self._segment_metadata = None self._previous_segment_metadata = None def _get_format_specification(self): agent = systemtools.StorageFormatAgent(self) names = list(agent.signature_keyword_names) if not self.settings: names.remove('settings') return systemtools.FormatSpecification( client=self, storage_format_kwargs_names=names, ) ### PUBLIC METHODS ### def get_end_instruments(self): result = collections.OrderedDict() staves = abjad.iterate(self._score).by_class(abjad.Staff) staves = list(staves) staves.sort(key=lambda x: x.name) prototype = (instrumenttools.Instrument,) for staff in staves: last_leaf = abjad.inspect(staff).get_leaf(-1) instrument = abjad.inspect(last_leaf).get_effective(prototype) if instrument: formatted = format(instrument) formatted = formatted.replace('\n', ' ') formatted = formatted.replace(' ', '') formatted = formatted.replace(' )', ')') formatted = formatted.replace(' ]', ']') formatted = formatted.replace('( ', '(') formatted = formatted.replace('[ ', '[') result[staff.name] = formatted else: result[staff.name] = None return result def get_end_tempo_indication(self): prototype = abjad.MetronomeMark context = self._score['Time Signature Context'] last_leaf = abjad.inspect(context).get_leaf(-1) effective_tempo = abjad.inspect(last_leaf).get_effective(prototype) if effective_tempo is not None: duration = effective_tempo.reference_duration.pair units_per_minute = effective_tempo.units_per_minute effective_tempo = (duration, units_per_minute) return effective_tempo def get_end_time_signature(self): prototype = abjad.TimeSignature context = self._score['Time Signature Context'] last_measure = context[-1] time_signature = abjad.inspect(last_measure).get_effective(prototype) if not time_signature: return pair = time_signature.pair return pair def add_time_signature_context(self): import consort if 'Time Signature Context' not in self.score: time_signature_context = \ consort.ScoreTemplateManager.make_time_signature_context() self.score.insert(0, time_signature_context) context = self.score['Time Signature Context'] time_signatures = [_.implied_time_signature for _ in self.meters] iterator = itertools.groupby(time_signatures, lambda x: x) measures = [] for time_signature, group in iterator: count = len(tuple(group)) skip = abjad.Skip(1) multiplier = abjad.Multiplier(time_signature) * count abjad.attach(multiplier, skip) abjad.attach(time_signature, skip, scope=abjad.Score) measure = abjad.Container([skip]) measures.append(measure) context.extend(measures) def add_setting( self, silenced_contexts=None, timespan_identifier=None, timespan_maker=None, color=None, **music_specifiers ): import consort setting = consort.MusicSetting( silenced_contexts=silenced_contexts, timespan_identifier=timespan_identifier, timespan_maker=timespan_maker, color=color, **music_specifiers ) self._settings.append(setting) def attach_initial_bar_line(self): segment_number = self._segment_metadata.get('segment_number', 1) or 1 if self.repeat: if segment_number != 1: command = indicatortools.LilyPondCommand('break', 'opening') abjad.attach(command, self.score['Time Signature Context']) return elif self._previous_segment_metadata.get('is_repeated'): return elif segment_number == 1: return bar_line = indicatortools.LilyPondCommand('bar "||"', 'opening') for staff in abjad.iterate(self.score).by_class(abjad.Staff): abjad.attach(bar_line, staff) def attach_final_bar_line(self): segment_number = int(self._segment_metadata.get('segment_number', 1) or 1) segment_count = int(self._segment_metadata.get('segment_count', 1) or 1) if self.repeat: repeat = indicatortools.Repeat() for staff in abjad.iterate(self.score).by_class(abjad.Staff): abjad.attach(repeat, staff) abjad.attach(repeat, self.score['Time Signature Context']) elif segment_number == segment_count: self.score.add_final_bar_line( abbreviation='|.', to_each_voice=True, ) if segment_number == segment_count and self.final_markup is not None: self.score.add_final_markup(self.final_markup) def get_rehearsal_letter(self): segment_number = self._segment_metadata.get('segment_number', 1) or 1 if segment_number == 1: return '' segment_index = segment_number - 1 rehearsal_ordinal = ord('A') - 1 + segment_index rehearsal_letter = chr(rehearsal_ordinal) return rehearsal_letter def attach_rehearsal_mark(self): markup_a, markup_b = None, None first_leaf = next(abjad.iterate( self.score['Time Signature Context']).by_leaf()) rehearsal_letter = self.get_rehearsal_letter() if rehearsal_letter: markup_a = markuptools.Markup(rehearsal_letter) markup_a = markup_a.caps().pad_around(0.5).box() if self.name: markup_b = markuptools.Markup('"{}"'.format(self.name or ' ')) markup_b = markup_b.fontsize(-3) if markup_a and markup_b: markup = markuptools.Markup.concat([markup_a, ' ', markup_b]) else: markup = markup_a or markup_b if markup: rehearsal_mark = indicatortools.RehearsalMark(markup=markup) abjad.attach(rehearsal_mark, first_leaf) def attach_tempo(self): first_leaf = next(abjad.iterate( self.score['Time Signature Context']).by_leaf()) if self.tempo is not None: abjad.attach(self.tempo, first_leaf) def configure_lilypond_file(self): import consort comments = [] includes = [] if self.score_package_name != 'consort': comments.extend([ consort.PackageGitCommitToken('abjad'), consort.PackageGitCommitToken('consort'), consort.PackageGitCommitToken(self.score_package_name), ]) if not self.omit_stylesheets: path = os.path.join( '..', '..', 'stylesheets', 'stylesheet.ily', ) includes.append(path) if 1 < (self._segment_metadata.get('segment_number', 1) or 1): path = os.path.join( '..', '..', 'stylesheets', 'nonfirst-segment.ily', ) includes.append(path) score_block = lilypondfiletools.Block(name='score') score_block.items.append(self.score) items = [score_block] lilypond_file = lilypondfiletools.LilyPondFile( comments=comments, date_time_token=False, includes=includes, items=items, use_relative_includes=True, ) self._lilypond_file = lilypond_file def configure_score(self): self.set_bar_number() self.postprocess_grace_containers() self.postprocess_ties() self.postprocess_staff_lines_spanners() self.postprocess_multimeasure_rests() self.attach_bar_number_comments() self.attach_tempo() self.attach_rehearsal_mark() self.attach_initial_bar_line() self.attach_final_bar_line() self.apply_annotations() def apply_annotations(self): import consort if self.annotate_phrasing: consort.annotate(self.score, nonsilence=True) if self.annotate_timespans: context = self.score['Time Signature Context'] for leaf in abjad.iterate(context).by_class(abjad.Leaf): timespan = abjad.inspect(leaf).get_timespan() start_fraction = markuptools.Markup.fraction( timespan.start_offset) stop_fraction = markuptools.Markup.fraction( timespan.stop_offset) markup_contents = [start_fraction, ' : ', stop_fraction] markup = markuptools.Markup.concat(markup_contents) markup = markuptools.Markup(markup, Up) markup = markup.pad_around(0.5).box() abjad.attach(markup, leaf) if self.annotate_colors: for voice in abjad.iterate(self.score).by_class(abjad.Voice): for phrase in voice: music_specifier = abjad.inspect(phrase).get_indicator( consort.MusicSpecifier) if music_specifier is None: continue color = music_specifier.color if color is None: continue spanner = consort.ColorBracket(color) abjad.attach(spanner, phrase) for voice in abjad.iterate(self.score).by_class(abjad.Voice): for phrase in voice: music_specifier = abjad.inspect(phrase).get_indicator( consort.MusicSpecifier) if music_specifier is None: continue comment = music_specifier.comment if comment is None: continue comment = '[{}] Material: "{}"'.format(voice.name, comment) comment = indicatortools.LilyPondComment(comment) abjad.attach(comment, phrase) def apply_instruments(self): import abjad import consort end_instruments = self._previous_segment_metadata.get( 'end_instruments_by_staff', {}) for voice in abjad.iterate(self.score).by_class(abjad.Voice): for i, phrase in enumerate(voice): staff = voice._parent music_specifier = abjad.inspect(phrase).get_indicator( consort.MusicSpecifier) first_leaf = next(abjad.iterate(phrase).by_leaf()) previous_instrument = None if i == 0 and end_instruments.get(staff.name): for parent in phrase._get_parentage(include_self=False): abjad.detach(consort.Instrument, parent) string = 'instrument = {}'.format( end_instruments[staff.name]) namespace = abjad.__dict__.copy() namespace.update(abjad=abjad, consort=consort) try: exec(string, namespace) except: print(string) raise previous_instrument = namespace['instrument'] abjad.attach(previous_instrument, first_leaf) if music_specifier is None: continue instrument = music_specifier.instrument if instrument is None: continue if i == 0: for parent in first_leaf._get_parentage(include_self=True): abjad.detach(consort.Instrument, parent) abjad.attach( previous_instrument, first_leaf, synthetic_offset=-1, ) abjad.attach(instrument, first_leaf) def postprocess_multimeasure_rests(self): def division_to_meter(division): offset = abjad.inspect(division).get_timespan().start_offset timespan = meter_timespans.find_timespans_starting_at(offset)[0] meter = timespan.annotation return meter import consort silent_specifier = consort.MusicSpecifier() meter_timespans = self.meters_to_timespans(self.meters) with systemtools.ForbidUpdate(self.score): for voice in abjad.iterate(self.score).by_class(abjad.Voice): for phrase in voice: music_specifier = abjad.inspect(phrase).get_indicator( consort.MusicSpecifier) if music_specifier != silent_specifier: continue divisions = [ _ for _ in phrase if isinstance(_[0], abjad.MultimeasureRest) ] iterator = itertools.groupby(divisions, division_to_meter) for meter, grouped_divisions in iterator: grouped_divisions = list(grouped_divisions) count = len(grouped_divisions) if count == 1: continue for division in grouped_divisions[1:]: phrase.remove(division) rest = grouped_divisions[0][0] multiplier = abjad.inspect(rest).get_indicator( abjad.Multiplier) abjad.detach(multiplier, rest) multiplier = multiplier * count abjad.attach(multiplier, rest) def postprocess_staff_lines_spanners(self): segment_number = self._segment_metadata.get('segment_number', 1) or 1 segment_count = self._segment_metadata.get('segment_count', 1) or 1 if segment_number != segment_count: return for voice in abjad.iterate(self.score).by_class(abjad.Voice): for leaf in abjad.iterate(voice).by_class(abjad.Leaf, reverse=True): if not isinstance(leaf, abjad.MultimeasureRest): break prototype = spannertools.StaffLinesSpanner if not abjad.inspect(leaf).has_spanner(prototype): continue staff_lines_spanner = abjad.inspect(leaf).get_spanner(prototype) components = staff_lines_spanner.components abjad.detach(staff_lines_spanner) staff_lines_spanner = abjad.new( staff_lines_spanner, forbid_restarting=True, ) abjad.attach( staff_lines_spanner, components, name='staff_lines_spanner', ) break def attach_bar_number_comments(self): first_bar_number = self._segment_metadata.get('first_bar_number', 1) or 1 measure_offsets = self.measure_offsets for voice in abjad.iterate(self.score).by_class(abjad.Voice): voice_name = voice.name for phrase in voice: for division in phrase: timespan = abjad.inspect(division).get_timespan() start_offset = timespan.start_offset matched = False for bar_number, measure_offset in enumerate( measure_offsets, first_bar_number): if measure_offset == start_offset: matched = True break if not matched: continue string = '[{}] Measure {}'.format( voice_name, bar_number, ) comment = indicatortools.LilyPondComment(string) abjad.attach(comment, division) # for leaf in abjad.iterate(phrase).by_leaf(): # string = '[{}] Logical Measure {}'.format( # voice_name, # leaf._logical_measure_number # ) # comment = indicatortools.LilyPondComment(string) # abjad.attach(comment, leaf) def postprocess_ties(self): for component in abjad.iterate(self.score).depth_first(): if not abjad.inspect(component).has_spanner(spannertools.Tie): continue tie = abjad.inspect(component).get_spanner(spannertools.Tie) if component != tie[0]: continue components = tie.components abjad.detach(tie) tie = spannertools.Tie(use_messiaen_style_ties=True) abjad.attach(tie, components) def set_bar_number(self): first_bar_number = self._segment_metadata.get('first_bar_number') if first_bar_number is not None: abjad.setting(self.score).current_bar_number = first_bar_number #else: # override(self.score).bar_number.transparent = True def copy_voice( self, voice, attachment_names=None, new_voice_name=None, new_context_name=None, remove_grace_containers=False, remove_ties=False, replace_rests_with_skips=False, ): new_voice = abjad.mutate(voice).copy() if new_voice_name: new_voice.name = new_voice_name if new_context_name: new_voice.context_name = new_context_name rests = [] for component in abjad.iterate(new_voice).depth_first(capped=True): agent = abjad.inspect(component) indicators = agent.get_indicators(unwrap=False) spanners = agent.get_spanners() for x in indicators: if not x.name: continue if attachment_names and \ not any(x.name.startswith(_) for _ in attachment_names): x._detach() for x in spanners: if remove_ties and isinstance(x, spannertools.Tie): x._detach() if not x.name: continue elif attachment_names and \ not any(x.name.startswith(_) for _ in attachment_names): x._detach() if replace_rests_with_skips and \ isinstance(component, abjad.Rest): rests.append(component) after_grace = agent.get_after_grace_container() if after_grace is not None and remove_grace_containers: after_grace._detach() if replace_rests_with_skips: for rest in rests: indicators = abjad.inspect(rest).get_indicators( abjad.Multiplier, ) skip = abjad.Skip(rest) if indicators: abjad.attach(indicators[0], skip) abjad.mutate(rest).replace(skip) return new_voice @staticmethod def logical_tie_to_music_specifier(logical_tie): import consort parentage = abjad.inspect(logical_tie.head).get_parentage() music_specifier = None prototype = consort.MusicSpecifier for parent in parentage: if not abjad.inspect(parent).has_indicator(prototype): continue music_specifier = abjad.inspect(parent).get_indicator(prototype) return music_specifier @staticmethod def logical_tie_to_division(logical_tie): import consort parentage = abjad.inspect(logical_tie.head).get_parentage() prototype = consort.MusicSpecifier for i, parent in enumerate(parentage): if abjad.inspect(parent).has_indicator(prototype): break return parentage[i - 1] @staticmethod def logical_tie_to_phrase(logical_tie): import consort parentage = abjad.inspect(logical_tie.head).get_parentage() prototype = consort.MusicSpecifier for parent in parentage: if abjad.inspect(parent).has_indicator(prototype): return parent @staticmethod def logical_tie_to_voice(logical_tie): parentage = abjad.inspect(logical_tie.head).get_parentage() voice = None for parent in parentage: if isinstance(parent, abjad.Voice): voice = parent break return voice @staticmethod def logical_tie_to_staff(logical_tie): parentage = abjad.inspect(logical_tie.head).get_parentage() staff = None for parent in parentage: if isinstance(parent, abjad.Staff): staff = parent break return staff def postprocess_grace_containers(self): import consort score = self.score stop_trill_span = consort.StopTrillSpan() for leaf in abjad.iterate(score).by_class(abjad.Leaf): agent = abjad.inspect(leaf) spanners = agent.get_spanners(consort.ConsortTrillSpanner) if not spanners: continue after_grace = agent.get_after_grace_container() if after_grace is None: continue leaf = after_grace[0] abjad.attach(stop_trill_span, leaf) @staticmethod def validate_score(score, verbose=True): import consort manager = systemtools.WellformednessManager( allow_percussion_clef=True) triples = manager(score) for current_violators, current_total, current_check in triples: if verbose: print(' {} {} {}'.format( current_violators, current_total, current_check, )) if current_violators: raise AssertionError if not verbose: return for voice in abjad.iterate(score).by_class(abjad.Voice): #print(voice.name) voice_name = voice.name for phrase in voice: #print('PHRASE:', phrase) music_specifier = abjad.inspect(phrase).get_indicator( consort.MusicSpecifier) if music_specifier is None: #print('\tNO MUSIC SPECIFIER') continue pitch_handler = music_specifier.pitch_handler if pitch_handler is not None: if pitch_handler.pitches_are_nonsemantic: #print('\tPITCHES ARE NONSEMANTIC') continue instrument = music_specifier.instrument if instrument is None: instrument = abjad.inspect(phrase).get_effective( instrumenttools.Instrument) if instrument is None: #print('\tNO INSTRUMENT') continue pitch_range = instrument.pitch_range for leaf in abjad.iterate(phrase).by_class(( abjad.Note, abjad.Chord, )): timespan = abjad.inspect(leaf).get_timespan() #print('\t{!r}'.format(leaf)) if isinstance(leaf, abjad.Note): note_head = leaf.note_head #print('\t\t', note_head) if note_head.written_pitch not in pitch_range: abjad.override(leaf).note_head.color = 'red' message = ' {}Out of range: {} {!r} {!s} {!s}{}' message = message.format( '\033[91m', voice_name, timespan, pitch_range, leaf, '\033[0m', ) print(message) elif isinstance(leaf, abjad.Chord): for note_head in leaf.note_heads: #print('\t\t', note_head) if note_head.written_pitch not in pitch_range: note_head.tweak.color = 'red' message = ' {}Out of range: {} {!r} {!s} {!s} {!s}{}' message = message.format( '\033[91m', voice_name, timespan, pitch_range, leaf, note_head, '\033[0m', ) print(message) @staticmethod def can_rewrite_meter(inscribed_timespan): r'''Is true if containers to be inscribed into `inscribed_timespan` can undergo meter rewriting. Otherwise false. Returns boolean. ''' import consort music_specifier = inscribed_timespan.music_specifier if music_specifier is None: return True rhythm_maker = music_specifier.rhythm_maker if rhythm_maker is None: return True if isinstance(rhythm_maker, consort.CompositeRhythmMaker): specifier = rhythm_maker.default.duration_spelling_specifier else: specifier = rhythm_maker.duration_spelling_specifier if specifier is None: return True if specifier.forbid_meter_rewriting: return False return True @staticmethod def cleanup_logical_ties(music): for logical_tie in abjad.iterate(music).by_logical_tie( nontrivial=True, pitched=True, reverse=True): if len(logical_tie) != 2: continue if not logical_tie._all_leaves_are_in_same_parent: continue if logical_tie.written_duration == \ abjad.Duration(1, 8): abjad.mutate(logical_tie).replace([abjad.Note("c'8")]) elif logical_tie.written_duration == \ abjad.Duration(1, 16): abjad.mutate(logical_tie).replace([abjad.Note("c'16")]) @staticmethod def collect_attack_points(score): import consort attack_point_map = collections.OrderedDict() iterator = abjad.iterate(score).by_timeline(prototype=abjad.Note) for note in iterator: logical_tie = abjad.inspect(note).get_logical_tie() if note is not logical_tie.head: continue attack_point_signature = \ consort.AttackPointSignature.from_logical_tie(logical_tie) attack_point_map[logical_tie] = attack_point_signature return attack_point_map @staticmethod def consolidate_demultiplexed_timespans(demultiplexed_maquette): for voice_name in demultiplexed_maquette: timespans = demultiplexed_maquette[voice_name] consolidated_timespans = SegmentMaker.consolidate_timespans( timespans) demultiplexed_maquette[voice_name] = consolidated_timespans @staticmethod def consolidate_rests(music): r"""Consolidates non-tupleted rests into separate containers in `music`. :: >>> music = abjad.Container(r''' ... { r4 c'8 } ... \times 2/3 { d'4 r8 } ... { r4 e'4 f'4 r4 } ... { r4 g8 r8 } ... { r4 } ... { r4 } ... { a'4 \times 2/3 { b'4 r8 } } ... { c''4 r8 } ... ''') >>> print(format(music)) { { r4 c'8 } \times 2/3 { d'4 r8 } { r4 e'4 f'4 r4 } { r4 g8 r8 } { r4 } { r4 } { a'4 \times 2/3 { b'4 r8 } } { c''4 r8 } } :: >>> music = consort.SegmentMaker.consolidate_rests(music) >>> print(format(music)) { { r4 } { c'8 } \times 2/3 { d'4 r8 } { r4 } { e'4 f'4 } { r4 r4 } { g8 } { r8 r4 r4 } { a'4 \times 2/3 { b'4 r8 } } { c''4 } { r8 } } Returns `music`. """ prototype = ( abjad.Rest, abjad.MultimeasureRest, ) initial_music_duration = abjad.inspect(music).get_duration() initial_leaves = list(abjad.iterate(music).by_leaf()) if not isinstance(music[0], abjad.Tuplet): leading_silence = abjad.Container() while music[0] and isinstance(music[0][0], prototype): leading_silence.append(music[0].pop(0)) if leading_silence: music.insert(0, leading_silence) if not isinstance(music[-1], abjad.Tuplet): tailing_silence = abjad.Container() while music[-1] and isinstance(music[-1][-1], prototype): tailing_silence.insert(0, music[-1].pop()) if tailing_silence: music.append(tailing_silence) if len(music) < 2: return music indices = reversed(range(len(music) - 1)) for index in indices: division = music[index] next_division = music[index + 1] silence = abjad.Container() if not isinstance(division, abjad.Tuplet): while division and isinstance(division[-1], prototype): silence.insert(0, division.pop()) if not isinstance(next_division, abjad.Tuplet): while next_division and \ isinstance(next_division[0], prototype): silence.append(next_division.pop(0)) if silence: music.insert(index + 1, silence) if not division: music.remove(division) if not next_division: music.remove(next_division) for division in music[:]: if not division: music.remove(division) assert abjad.inspect(music).get_duration() == initial_music_duration assert list(abjad.iterate(music).by_leaf()) == initial_leaves return music @staticmethod def consolidate_timespans(timespans, allow_silences=False): r'''Consolidates contiguous performed timespans by music specifier. :: >>> timespans = abjad.TimespanList([ ... consort.PerformedTimespan( ... start_offset=0, ... stop_offset=10, ... music_specifier='foo', ... ), ... consort.PerformedTimespan( ... start_offset=10, ... stop_offset=20, ... music_specifier='foo', ... ), ... consort.PerformedTimespan( ... start_offset=20, ... stop_offset=25, ... music_specifier='bar', ... ), ... consort.PerformedTimespan( ... start_offset=40, ... stop_offset=50, ... music_specifier='bar', ... ), ... consort.PerformedTimespan( ... start_offset=50, ... stop_offset=58, ... music_specifier='bar', ... ), ... ]) >>> print(format(timespans)) abjad.TimespanList( [ consort.tools.PerformedTimespan( start_offset=abjad.Offset(0, 1), stop_offset=abjad.Offset(10, 1), music_specifier='foo', ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(10, 1), stop_offset=abjad.Offset(20, 1), music_specifier='foo', ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(20, 1), stop_offset=abjad.Offset(25, 1), music_specifier='bar', ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(40, 1), stop_offset=abjad.Offset(50, 1), music_specifier='bar', ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(50, 1), stop_offset=abjad.Offset(58, 1), music_specifier='bar', ), ] ) :: >>> timespans = consort.SegmentMaker.consolidate_timespans( ... timespans) >>> print(format(timespans)) abjad.TimespanList( [ consort.tools.PerformedTimespan( start_offset=abjad.Offset(0, 1), stop_offset=abjad.Offset(20, 1), divisions=( abjad.Duration(10, 1), abjad.Duration(10, 1), ), music_specifier='foo', ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(20, 1), stop_offset=abjad.Offset(25, 1), divisions=( abjad.Duration(5, 1), ), music_specifier='bar', ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(40, 1), stop_offset=abjad.Offset(58, 1), divisions=( abjad.Duration(10, 1), abjad.Duration(8, 1), ), music_specifier='bar', ), ] ) Returns new timespan inventory. ''' consolidated_timespans = abjad.TimespanList() for music_specifier, grouped_timespans in \ SegmentMaker.group_timespans(timespans): if music_specifier is None and not allow_silences: continue if hasattr(music_specifier, 'minimum_phrase_duration'): duration = music_specifier.minimum_phrase_duration if duration and grouped_timespans.duration < duration: continue divisions = tuple(_.duration for _ in grouped_timespans) first_timespan = grouped_timespans[0] last_timespan = grouped_timespans[-1] consolidated_timespan = abjad.new( first_timespan, divisions=divisions, stop_offset=last_timespan.stop_offset, original_stop_offset=last_timespan.original_stop_offset, ) consolidated_timespans.append(consolidated_timespan) consolidated_timespans.sort() return consolidated_timespans @staticmethod def debug_timespans(timespans): import consort if not timespans: consort.debug('No timespans found.') else: consort.debug('DEBUG: Dumping timespans:') if isinstance(timespans, dict): for voice_name in timespans: consort.debug('\t' + voice_name) for timespan in timespans[voice_name]: divisions = timespan.divisions or [] divisions = ' '.join(str(_) for _ in divisions) consort.debug('\t\t{}: [{!s} ... {!s}] [{!s}] [{}] {}'.format( type(timespan).__name__, timespan.start_offset, timespan.stop_offset, timespan.duration, divisions, timespan.music, )) else: for timespan in timespans: consort.debug('\t({}) {}: [{!s} to {!s}]'.format( timespan.voice_name, type(timespan).__name__, timespan.start_offset, timespan.stop_offset, )) @staticmethod def resolve_maquette(multiplexed_timespans): import consort demultiplexed_maquette = consort.TimespanListMapping() for timespan in multiplexed_timespans: voice_name, layer = timespan.voice_name, timespan.layer if voice_name not in demultiplexed_maquette: demultiplexed_maquette[voice_name] = {} if layer not in demultiplexed_maquette[voice_name]: demultiplexed_maquette[voice_name][layer] = \ abjad.TimespanList() demultiplexed_maquette[voice_name][layer].append( timespan) demultiplexed_maquette[voice_name][layer] for voice_name in demultiplexed_maquette: for layer, timespans in demultiplexed_maquette[voice_name].items(): cleaned_layer = SegmentMaker.cleanup_maquette_layer(timespans) demultiplexed_maquette[voice_name][layer] = cleaned_layer for voice_name in demultiplexed_maquette: timespan_inventories = demultiplexed_maquette[voice_name] timespan_inventory = \ SegmentMaker.resolve_timespan_inventories( timespan_inventories) demultiplexed_maquette[voice_name] = timespan_inventory return demultiplexed_maquette @staticmethod def cleanup_maquette_layer(timespans): import consort performed_timespans = abjad.TimespanList() silent_timespans = abjad.TimespanList() for timespan in timespans: if isinstance(timespan, consort.PerformedTimespan): performed_timespans.append(timespan) elif isinstance(timespan, consort.SilentTimespan): silent_timespans.append(timespan) else: raise ValueError(timespan) silent_timespans.compute_logical_or() for performed_timespan in performed_timespans: silent_timespans - performed_timespan performed_timespans.extend(silent_timespans) performed_timespans.sort() return performed_timespans @staticmethod def division_is_silent(division): r'''Is true when division only contains rests, at any depth. :: >>> division = abjad.Container("c'4 d'4 e'4 f'4") >>> consort.SegmentMaker.division_is_silent(division) False :: >>> division = abjad.Container('r4 r8 r16 r32') >>> consort.SegmentMaker.division_is_silent(division) True :: >>> division = abjad.Container( ... r"c'4 \times 2/3 { d'8 r8 e'8 } f'4") >>> consort.SegmentMaker.division_is_silent(division) False :: >>> division = abjad.Container( ... r'\times 2/3 { r4 \times 2/3 { r8. } }') >>> consort.SegmentMaker.division_is_silent(division) True Returns boolean. ''' rest_prototype = ( abjad.Rest, abjad.MultimeasureRest, ) leaves = list(abjad.iterate(division).by_leaf()) return all(isinstance(leaf, rest_prototype) for leaf in leaves) def interpret_rhythms( self, verbose=True, ): multiplexed_timespans = abjad.TimespanList() with systemtools.Timer( enter_message=' populating independent timespans:', exit_message=' total:', verbose=verbose, ): meters, measure_offsets, multiplexed_timespans = \ self.populate_independent_timespans( self.discard_final_silence, multiplexed_timespans, self.permitted_time_signatures, self.score, self.score_template, self.settings or (), self.desired_duration, self.timespan_quantization, verbose=verbose, ) self._meters = meters with systemtools.Timer( enter_message=' populating dependent timespans:', exit_message=' total:', verbose=verbose, ): demultiplexed_maquette = \ self.populate_dependent_timespans( self.measure_offsets, multiplexed_timespans, self.score, self.score_template, self.settings or (), self.desired_duration, verbose=verbose, ) with systemtools.Timer( ' populated silent timespans:', verbose=verbose, ): demultiplexed_maquette = self.populate_silent_timespans( demultiplexed_maquette, self.measure_offsets, self.voice_names, ) with systemtools.Timer( ' validated timespans:', verbose=verbose, ): self.validate_timespans(demultiplexed_maquette) with systemtools.Timer( enter_message=' rewriting meters:', exit_message=' total:', verbose=verbose, ): #expr = 'self.rewrite_meters(demultiplexed_maquette, self.meters)' #systemtools.IOManager.profile_expr( # expr, # global_context=globals(), # local_context=locals(), # ) self.rewrite_meters( demultiplexed_maquette, self.meters, self.score, verbose=verbose, ) with systemtools.Timer( ' populated score:', verbose=verbose, ): self.populate_score( demultiplexed_maquette, self.score, ) self._voicewise_timespans = demultiplexed_maquette def find_meters( self, permitted_time_signatures=None, desired_duration=None, timespan_inventory=None, ): import consort offset_counter = metertools.OffsetCounter() for timespan in timespan_inventory: if isinstance(timespan, consort.SilentTimespan): continue offset_counter[timespan.start_offset] += 2 offset_counter[timespan.stop_offset] += 1 maximum = 1 if offset_counter: maximum = int(max(offset_counter.values())) offset_counter[desired_duration] = maximum * 2 maximum_meter_run_length = self.maximum_meter_run_length meters = abjad.Meter.fit_meters( argument=offset_counter, meters=permitted_time_signatures, maximum_run_length=maximum_meter_run_length, ) return tuple(meters) @staticmethod def get_rhythm_maker(music_specifier): import consort beam_specifier = rhythmmakertools.BeamSpecifier( beam_each_division=False, beam_divisions_together=False, ) if music_specifier is None: rhythm_maker = rhythmmakertools.NoteRhythmMaker( beam_specifier=beam_specifier, division_masks=[abjad.silence_all()], ) elif music_specifier.rhythm_maker is None: rhythm_maker = rhythmmakertools.NoteRhythmMaker( beam_specifier=beam_specifier, tie_specifier=rhythmmakertools.TieSpecifier( tie_across_divisions=True, ), ) elif isinstance(music_specifier.rhythm_maker, consort.CompositeRhythmMaker): rhythm_maker = music_specifier.rhythm_maker.new( beam_specifier=beam_specifier, ) else: rhythm_maker = music_specifier.rhythm_maker beam_specifier = rhythm_maker.beam_specifier or beam_specifier beam_specifier = abjad.new( beam_specifier, beam_each_division=False, beam_divisions_together=False, ) rhythm_maker = abjad.new( rhythm_maker, beam_specifier=beam_specifier, ) assert rhythm_maker is not None return rhythm_maker @staticmethod def group_nonsilent_divisions(music): r'''Groups non-silent divisions together. Yields groups in reverse order. :: >>> divisions = [] >>> divisions.append(abjad.Container('r4')) >>> divisions.append(abjad.Container("c'4")) >>> divisions.append(abjad.Container('r4 r4')) >>> divisions.append(abjad.Container("d'4 d'4")) >>> divisions.append(abjad.Container("e'4 e'4 e'4")) >>> divisions.append(abjad.Container('r4 r4 r4')) >>> divisions.append(abjad.Container("f'4 f'4 f'4 f'4")) :: >>> for group in consort.SegmentMaker.group_nonsilent_divisions( ... divisions): ... print(group) (Container("f'4 f'4 f'4 f'4"),) (Container("d'4 d'4"), Container("e'4 e'4 e'4")) (Container("c'4"),) Returns generator. ''' group = [] for division in tuple(reversed(music)): if SegmentMaker.division_is_silent(division): if group: yield tuple(reversed(group)) group = [] else: group.append(division) if group: yield tuple(reversed(group)) @staticmethod def group_timespans(timespans): def grouper(timespan): music_specifier = None if isinstance(timespan, consort.PerformedTimespan): music_specifier = timespan.music_specifier if music_specifier is None: music_specifier = consort.MusicSpecifier() forbid_fusing = timespan.forbid_fusing return music_specifier, forbid_fusing import consort for partitioned_timespans in timespans.partition( include_tangent_timespans=True): for key, grouped_timespans in itertools.groupby( partitioned_timespans, grouper): music_specifier, forbid_fusing = key if forbid_fusing: for timespan in grouped_timespans: group = abjad.TimespanList([timespan]) yield music_specifier, group else: group = abjad.TimespanList( grouped_timespans) yield music_specifier, group @staticmethod def inscribe_demultiplexed_timespans( demultiplexed_maquette, score, ): counter = collections.Counter() voice_names = demultiplexed_maquette.keys() voice_names = SegmentMaker.sort_voice_names(score, voice_names) for voice_name in voice_names: inscribed_timespans = abjad.TimespanList() uninscribed_timespans = demultiplexed_maquette[voice_name] for timespan in uninscribed_timespans: if timespan.music is None: music_specifier = timespan.music_specifier if music_specifier not in counter: if music_specifier is None: seed = 0 else: seed = music_specifier.seed or 0 counter[music_specifier] = seed seed = counter[music_specifier] result = SegmentMaker.inscribe_timespan( timespan, seed=seed, ) inscribed_timespans.extend(result) # Negative rotation mimics advancing through a series. counter[music_specifier] -= 1 else: inscribed_timespans.append(timespan) demultiplexed_maquette[voice_name] = inscribed_timespans @staticmethod def inscribe_timespan(timespan, seed=None): r'''Inscribes `timespan`. :: >>> music_specifier = consort.MusicSpecifier( ... rhythm_maker=rhythmmakertools.NoteRhythmMaker( ... division_masks=[ ... rhythmmakertools.SilenceMask( ... pattern=abjad.Pattern( ... indices=[0], ... period=3, ... ), ... ), ... ], ... ), ... ) :: >>> timespan = consort.PerformedTimespan( ... divisions=[abjad.Duration(1, 4)] * 7, ... start_offset=0, ... stop_offset=(7, 4), ... music_specifier=music_specifier, ... ) >>> print(format(timespan)) consort.tools.PerformedTimespan( start_offset=abjad.Offset(0, 1), stop_offset=abjad.Offset(7, 4), divisions=( abjad.Duration(1, 4), abjad.Duration(1, 4), abjad.Duration(1, 4), abjad.Duration(1, 4), abjad.Duration(1, 4), abjad.Duration(1, 4), abjad.Duration(1, 4), ), music_specifier=consort.tools.MusicSpecifier( rhythm_maker=rhythmmakertools.NoteRhythmMaker( division_masks=abjad.PatternList( ( rhythmmakertools.SilenceMask( pattern=abjad.Pattern( indices=[0], period=3, ), ), ) ), ), ), ) :: >>> result = consort.SegmentMaker.inscribe_timespan(timespan) >>> print(format(result)) abjad.TimespanList( [ consort.tools.PerformedTimespan( start_offset=abjad.Offset(1, 4), stop_offset=abjad.Offset(3, 4), music=abjad.Container( "{ c'4 } { c'4 }" ), music_specifier=consort.tools.MusicSpecifier( rhythm_maker=rhythmmakertools.NoteRhythmMaker( division_masks=abjad.PatternList( ( rhythmmakertools.SilenceMask( pattern=abjad.Pattern( indices=[0], period=3, ), ), ) ), ), ), original_start_offset=abjad.Offset(0, 1), original_stop_offset=abjad.Offset(7, 4), ), consort.tools.PerformedTimespan( start_offset=abjad.Offset(1, 1), stop_offset=abjad.Offset(3, 2), music=abjad.Container( "{ c'4 } { c'4 }" ), music_specifier=consort.tools.MusicSpecifier( rhythm_maker=rhythmmakertools.NoteRhythmMaker( division_masks=abjad.PatternList( ( rhythmmakertools.SilenceMask( pattern=abjad.Pattern( indices=[0], period=3, ), ), ) ), ), ), original_start_offset=abjad.Offset(0, 1), original_stop_offset=abjad.Offset(7, 4), ), ] ) Returns timespan inventory. ''' inscribed_timespans = abjad.TimespanList() rhythm_maker = SegmentMaker.get_rhythm_maker(timespan.music_specifier) durations = timespan.divisions[:] music = SegmentMaker.make_music( rhythm_maker, durations, seed, ) assert abjad.inspect(music).get_duration() == timespan.duration for container, duration in zip(music, durations): assert abjad.inspect(container).get_duration() == duration music = SegmentMaker.consolidate_rests(music) assert abjad.inspect(music).get_duration() == timespan.duration for group in SegmentMaker.group_nonsilent_divisions(music): start_offset = abjad.inspect(group[0]).get_timespan().start_offset stop_offset = abjad.inspect(group[-1]).get_timespan().stop_offset start_offset += timespan.start_offset stop_offset += timespan.start_offset container = abjad.Container() container.extend(group) # beam = spannertools.GeneralizedBeam( # durations=[division._get_duration() for division in music], # include_long_duration_notes=False, # include_long_duration_rests=False, # isolated_nib_direction=None, # use_stemlets=False, # ) # abjad.attach(beam, container, name='beam') for division in container: durations = [division._get_duration()] beam = spannertools.GeneralizedBeam( durations=durations, include_long_duration_notes=False, include_long_duration_rests=False, isolated_nib_direction=None, use_stemlets=True, ) abjad.attach(beam, division) abjad.attach(timespan.music_specifier, container, scope=abjad.Voice) inscribed_timespan = abjad.new( timespan, divisions=None, music=container, start_offset=start_offset, stop_offset=stop_offset, ) assert abjad.inspect(container).get_duration() == \ inscribed_timespan.duration assert abjad.inspect(container).get_timespan().start_offset == 0 assert abjad.inspect(container[0]).get_timespan().start_offset == 0 inscribed_timespans.append(inscribed_timespan) inscribed_timespans.sort() return inscribed_timespans @staticmethod def leaf_is_tied(leaf): prototype = spannertools.Tie leaf_tie = None if abjad.inspect(leaf).get_spanners(prototype): leaf_tie = abjad.inspect(leaf).get_spanner(prototype) else: return False next_leaf = abjad.inspect(leaf).get_leaf(1) if next_leaf is not None: if abjad.inspect(next_leaf).get_spanners(prototype): next_leaf_tie = abjad.inspect(next_leaf).get_spanner(prototype) if leaf_tie is next_leaf_tie: return True return False @staticmethod def make_music(rhythm_maker, durations, seed=0): music = rhythm_maker(durations, rotation=seed) for i, division in enumerate(music): if ( len(division) == 1 and isinstance(division[0], abjad.Tuplet) ): music[i] = division[0] else: music[i] = abjad.Container(division) music = abjad.Container(music) prototype = rhythmmakertools.AccelerandoRhythmMaker if not isinstance(rhythm_maker, prototype): for division in music[:]: if ( isinstance(division, abjad.Tuplet) and division.multiplier == 1 ): abjad.mutate(division).swap(abjad.Container()) return music @staticmethod def meters_to_offsets(meters): r'''Converts `meters` to offsets. :: >>> meters = [ ... abjad.Meter((3, 4)), ... abjad.Meter((2, 4)), ... abjad.Meter((6, 8)), ... abjad.Meter((5, 16)), ... ] :: >>> offsets = consort.SegmentMaker.meters_to_offsets(meters) >>> for x in offsets: ... x ... Offset(0, 1) Offset(3, 4) Offset(5, 4) Offset(2, 1) Offset(37, 16) Returns tuple of offsets. ''' durations = [_.duration for _ in meters] offsets = mathtools.cumulative_sums(durations) offsets = [abjad.Offset(_) for _ in offsets] return tuple(offsets) @staticmethod def meters_to_timespans(meters): r'''Convert `meters` into a collection of annotated timespans. :: >>> meters = [ ... abjad.Meter((3, 4)), ... abjad.Meter((2, 4)), ... abjad.Meter((6, 8)), ... abjad.Meter((5, 16)), ... ] :: >>> timespans = consort.SegmentMaker.meters_to_timespans(meters) >>> print(format(timespans)) consort.tools.TimespanCollection( [ abjad.AnnotatedTimespan( start_offset=abjad.Offset(0, 1), stop_offset=abjad.Offset(3, 4), annotation=abjad.Meter( '(3/4 (1/4 1/4 1/4))' ), ), abjad.AnnotatedTimespan( start_offset=abjad.Offset(3, 4), stop_offset=abjad.Offset(5, 4), annotation=abjad.Meter( '(2/4 (1/4 1/4))' ), ), abjad.AnnotatedTimespan( start_offset=abjad.Offset(5, 4), stop_offset=abjad.Offset(2, 1), annotation=abjad.Meter( '(6/8 ((3/8 (1/8 1/8 1/8)) (3/8 (1/8 1/8 1/8))))' ), ), abjad.AnnotatedTimespan( start_offset=abjad.Offset(2, 1), stop_offset=abjad.Offset(37, 16), annotation=abjad.Meter( '(5/16 ((3/16 (1/16 1/16 1/16)) (2/16 (1/16 1/16))))' ), ), ] ) Returns timespan collections. ''' import consort timespans = consort.TimespanCollection() offsets = SegmentMaker.meters_to_offsets(meters) for i, meter in enumerate(meters): start_offset = offsets[i] stop_offset = offsets[i + 1] timespan = abjad.AnnotatedTimespan( annotation=meter, start_offset=start_offset, stop_offset=stop_offset, ) timespans.insert(timespan) return timespans @staticmethod def multiplex_timespans(demultiplexed_maquette): r'''Multiplexes `demultiplexed_maquette` into a single timespan inventory. :: >>> demultiplexed = {} >>> demultiplexed['foo'] = abjad.TimespanList([ ... abjad.Timespan(0, 10), ... abjad.Timespan(15, 30), ... ]) >>> demultiplexed['bar'] = abjad.TimespanList([ ... abjad.Timespan(5, 15), ... abjad.Timespan(20, 35), ... ]) >>> demultiplexed['baz'] = abjad.TimespanList([ ... abjad.Timespan(5, 40), ... ]) :: >>> multiplexed = consort.SegmentMaker.multiplex_timespans( ... demultiplexed) >>> print(format(multiplexed)) abjad.TimespanList( [ abjad.Timespan( start_offset=abjad.Offset(0, 1), stop_offset=abjad.Offset(10, 1), ), abjad.Timespan( start_offset=abjad.Offset(5, 1), stop_offset=abjad.Offset(15, 1), ), abjad.Timespan( start_offset=abjad.Offset(5, 1), stop_offset=abjad.Offset(40, 1), ), abjad.Timespan( start_offset=abjad.Offset(15, 1), stop_offset=abjad.Offset(30, 1), ), abjad.Timespan( start_offset=abjad.Offset(20, 1), stop_offset=abjad.Offset(35, 1), ), ] ) Returns timespan inventory. ''' multiplexed_timespans = abjad.TimespanList() for timespans in demultiplexed_maquette.values(): multiplexed_timespans.extend(timespans) multiplexed_timespans.sort() return multiplexed_timespans def populate_dependent_timespans( self, meter_offsets, multiplexed_timespans, score, score_template, settings, desired_duration, verbose=True, ): with systemtools.Timer( ' populated timespans:', verbose=verbose, ): self.populate_multiplexed_maquette( dependent=True, score=score, score_template=score_template, settings=settings, desired_duration=desired_duration, timespan_inventory=multiplexed_timespans, ) with systemtools.Timer( ' demultiplexed timespans:', verbose=verbose, ): demultiplexed_maquette = self.resolve_maquette( multiplexed_timespans) self.debug_timespans(demultiplexed_maquette) with systemtools.Timer( ' split timespans:', verbose=verbose, ): self.split_demultiplexed_timespans( meter_offsets, demultiplexed_maquette, ) with systemtools.Timer( ' pruned short timespans:', verbose=verbose, ): for voice_name, timespans in demultiplexed_maquette.items(): self.prune_short_timespans(timespans) with systemtools.Timer( ' pruned malformed timespans:', verbose=verbose, ): for voice_name, timespans in demultiplexed_maquette.items(): self.prune_malformed_timespans(timespans) with systemtools.Timer( ' consolidated timespans:', verbose=verbose, ): self.consolidate_demultiplexed_timespans( demultiplexed_maquette, ) with systemtools.Timer( ' inscribed timespans:', verbose=verbose, ): self.inscribe_demultiplexed_timespans( demultiplexed_maquette, score, ) return demultiplexed_maquette def populate_independent_timespans( self, discard_final_silence, multiplexed_timespans, permitted_time_signatures, score, score_template, settings, desired_duration, timespan_quantization, verbose=True, ): with systemtools.Timer( ' populated timespans:', verbose=verbose, ): SegmentMaker.populate_multiplexed_maquette( dependent=False, score=score, score_template=score_template, settings=settings, desired_duration=desired_duration, timespan_inventory=multiplexed_timespans, timespan_quantization=timespan_quantization, ) with systemtools.Timer( ' found meters:', verbose=verbose, ): meters = self.find_meters( permitted_time_signatures=permitted_time_signatures, desired_duration=desired_duration, timespan_inventory=multiplexed_timespans, ) meter_offsets = SegmentMaker.meters_to_offsets(meters) with systemtools.Timer( ' demultiplexed timespans:', verbose=verbose, ): demultiplexed_maquette = SegmentMaker.resolve_maquette( multiplexed_timespans) with systemtools.Timer( ' split timespans:', verbose=verbose, ): SegmentMaker.split_demultiplexed_timespans( meter_offsets, demultiplexed_maquette, ) # TODO: Determine best place for malformed timespan pruning. with systemtools.Timer( ' pruned short timespans:', verbose=verbose, ): SegmentMaker.prune_short_timespans(multiplexed_timespans) with systemtools.Timer( ' pruned malformed timespans:', verbose=verbose, ): for voice_name, timespans in demultiplexed_maquette.items(): SegmentMaker.prune_malformed_timespans(timespans) with systemtools.Timer( ' consolidated timespans:', verbose=verbose, ): SegmentMaker.consolidate_demultiplexed_timespans( demultiplexed_maquette, ) with systemtools.Timer( ' inscribed timespans:', verbose=verbose, ): SegmentMaker.inscribe_demultiplexed_timespans( demultiplexed_maquette, score, ) with systemtools.Timer( ' multiplexed timespans:', verbose=verbose, ): multiplexed_timespans = SegmentMaker.multiplex_timespans( demultiplexed_maquette) # TODO: Why prune after consolidation? with systemtools.Timer( ' pruned meters:', verbose=verbose, ): meters = SegmentMaker.prune_meters( discard_final_silence, meters, multiplexed_timespans.stop_offset, ) meter_offsets = SegmentMaker.meters_to_offsets(meters) return meters, meter_offsets, multiplexed_timespans @staticmethod def populate_multiplexed_maquette( dependent=False, score=None, score_template=None, settings=None, desired_duration=None, timespan_inventory=None, timespan_quantization=None, ): import consort segment_timespan = abjad.Timespan(0, desired_duration) if timespan_quantization is None: timespan_quantization = abjad.Duration(1, 16) if timespan_inventory is None: timespan_inventory = abjad.TimespanList() independent_settings = [ setting for setting in settings if not setting.timespan_maker.is_dependent ] dependent_settings = [ setting for setting in settings if setting.timespan_maker.is_dependent ] if dependent: settings = dependent_settings start_index = len(independent_settings) else: settings = independent_settings start_index = 0 if not settings: return False for layer, music_setting in enumerate(settings, start_index): content, silence = 0, 0 for timespan in timespan_inventory: if isinstance(timespan, consort.SilentTimespan): silence += 1 else: content += 1 music_setting( layer=layer, score=score, score_template=score_template, segment_timespan=segment_timespan, timespan_inventory=timespan_inventory, timespan_quantization=timespan_quantization, ) SegmentMaker.debug_timespans(timespan_inventory) return True @staticmethod def populate_score( demultiplexed_maquette, score, ): for voice_name, timespans in demultiplexed_maquette.items(): voice = score[voice_name] for timespan in timespans: assert timespan.duration == \ abjad.inspect(timespan.music).get_duration() voice.append(timespan.music) return score @staticmethod def populate_silent_timespans( demultiplexed_maquette, meter_offsets, voice_names=None, ): import consort silent_music_specifier = consort.MusicSpecifier() rhythm_maker = SegmentMaker.get_rhythm_maker(None) if voice_names is None: voice_names = demultiplexed_maquette.keys() else: voice_names = set(voice_names) voice_names.update(demultiplexed_maquette.keys()) for voice_name in voice_names: if voice_name not in demultiplexed_maquette: demultiplexed_maquette[voice_name] = \ abjad.TimespanList() timespans = demultiplexed_maquette[voice_name] silences = abjad.TimespanList([ consort.SilentTimespan( start_offset=0, stop_offset=meter_offsets[-1], voice_name=voice_name, ) ]) silences = SegmentMaker.subtract_timespan_inventories( silences, timespans) silences = SegmentMaker.split_timespans(meter_offsets, silences) for group in silences.partition(include_tangent_timespans=True): start_offset = group.start_offset stop_offset = group.stop_offset durations = [_.duration for _ in group] silence = SegmentMaker.make_music( rhythm_maker, durations, ) abjad.attach(silent_music_specifier, silence, scope=abjad.Voice) silent_timespan = consort.PerformedTimespan( music=silence, start_offset=start_offset, stop_offset=stop_offset, voice_name=voice_name, ) timespans.append(silent_timespan) timespans.sort() return demultiplexed_maquette @staticmethod def prune_meters( discard_final_silence, meters, stop_offset, ): discard_final_silence = bool(discard_final_silence) if discard_final_silence and stop_offset: meters = list(meters) total_meter_durations = sum(_.duration for _ in meters[:-1]) while stop_offset <= total_meter_durations: meters.pop() total_meter_durations = sum(_.duration for _ in meters[:-1]) return tuple(meters) @staticmethod def prune_short_timespans(timespans): for timespan in timespans[:]: if timespan.minimum_duration and \ timespan.duration < timespan.minimum_duration and \ timespan.music is None: timespans.remove(timespan) @staticmethod def prune_malformed_timespans(timespans): for timespan in timespans[:]: if not timespan.is_well_formed: assert timespan.music is None timespans.remove(timespan) @staticmethod def report(timespan_inventory): print('REPORTING') for timespan in timespan_inventory: print( '\t', '{}:'.format(timespan.voice_name), '[{}]'.format(timespan.layer), type(timespan).__name__, float(timespan.start_offset), float(timespan.stop_offset), ) print() @staticmethod def resolve_timespan_inventories( timespan_inventories=None, ): import consort timespan_inventories = [ x[1] for x in sorted(timespan_inventories.items(), key=lambda item: item[0]) ] for timespan_inventory in timespan_inventories: assert timespan_inventory.all_are_nonoverlapping resolved_inventory = consort.TimespanCollection() for timespan in timespan_inventories[0]: if isinstance(timespan, consort.SilentTimespan): continue resolved_inventory.insert(timespan) for timespan_inventory in timespan_inventories[1:]: resolved_inventory = SegmentMaker.subtract_timespan_inventories( resolved_inventory, timespan_inventory, ) for timespan in resolved_inventory[:]: if timespan.minimum_duration and \ timespan.duration < timespan.minimum_duration: resolved_inventory.remove(timespan) for timespan in timespan_inventory: if isinstance(timespan, consort.SilentTimespan): continue resolved_inventory.append(timespan) resolved_inventory.sort() resolved_inventory = abjad.TimespanList( resolved_inventory[:], ) return resolved_inventory @staticmethod def rewrite_container_meter( container, meter_timespans, forbid_staff_lines_spanner=None, ): assert meter_timespans assert meter_timespans[0].start_offset <= \ abjad.inspect(container).get_timespan().start_offset #last_leaf = container.select_leaves()[-1] last_leaf = next(abjad.iterate(container).by_leaf(reverse=True)) is_tied = SegmentMaker.leaf_is_tied(last_leaf) container_timespan = abjad.inspect(container).get_timespan() if isinstance(container, abjad.Tuplet): contents_duration = container._get_contents_duration() meter = abjad.Meter(contents_duration) boundary_depth = 1 if meter.numerator in (3, 4): boundary_depth = None abjad.mutate(container[:]).rewrite_meter( meter, boundary_depth=boundary_depth, maximum_dot_count=2, ) elif len(meter_timespans) == 1: container_timespan = abjad.inspect(container).get_timespan() container_start_offset = container_timespan.start_offset container_stop_offset = container_timespan.stop_offset meter_timespan = meter_timespans[0] relative_meter_start_offset = meter_timespan.start_offset assert relative_meter_start_offset <= container_start_offset absolute_meter_stop_offset = ( relative_meter_start_offset + container_start_offset + meter_timespan.duration ) assert container_stop_offset <= absolute_meter_stop_offset if meter_timespan.is_congruent_to_timespan(container_timespan) \ and SegmentMaker.division_is_silent(container): multimeasure_rest = abjad.MultimeasureRest(1) duration = abjad.inspect(container).get_duration() multiplier = abjad.Multiplier(duration) abjad.attach(multiplier, multimeasure_rest) container[:] = [multimeasure_rest] if not forbid_staff_lines_spanner: previous_leaf = multimeasure_rest._get_leaf(-1) if isinstance(previous_leaf, abjad.MultimeasureRest): staff_lines_spanner = \ abjad.inspect(previous_leaf).get_spanner( spannertools.StaffLinesSpanner) components = staff_lines_spanner.components components = components + [multimeasure_rest] abjad.detach(staff_lines_spanner) else: staff_lines_spanner = spannertools.StaffLinesSpanner([0]) components = [multimeasure_rest] components = abjad.select(components) abjad.attach( staff_lines_spanner, components, name='staff_lines_spanner', ) else: meter = meter_timespan.annotation meter_offset = meter_timespan.start_offset initial_offset = container_start_offset - meter_offset boundary_depth = 1 if meter.numerator in (3, 4): boundary_depth = None abjad.mutate(container[:]).rewrite_meter( meter, boundary_depth=boundary_depth, initial_offset=initial_offset, maximum_dot_count=2, ) else: # TODO: handle bar-line-crossing containers raise AssertionError('Bar-line-crossing containers not permitted.') if is_tied: last_leaf = next(abjad.iterate(container).by_leaf(reverse=True)) next_leaf = abjad.inspect(last_leaf).get_leaf(1) selection = selectiontools.Selection(( last_leaf, next_leaf)) selection._attach_tie_spanner_to_leaf_pair() @staticmethod def rewrite_meters( demultiplexed_maquette, meters, score, verbose=True, ): import consort meter_timespans = SegmentMaker.meters_to_timespans(meters) cache = {} template = ' rewriting {}: {}' for context_name in sorted(demultiplexed_maquette): inscribed_timespans = demultiplexed_maquette[context_name] consort.debug('CONTEXT: {}'.format(context_name)) context = score[context_name] forbid_staff_lines_spanner = context.context_name == 'Dynamics' count = 0 for inscribed_timespan in inscribed_timespans: consort.debug('\t{!s} {!s} {!r}'.format( inscribed_timespan.start_offset, inscribed_timespan.stop_offset, inscribed_timespan.music, )) if not SegmentMaker.can_rewrite_meter(inscribed_timespan): continue for i, container in enumerate(inscribed_timespan.music): container_timespan = abjad.inspect(container).get_timespan() container_timespan = container_timespan.translate( inscribed_timespan.start_offset) if i == 0: assert container_timespan.start_offset == \ inscribed_timespan.start_offset if i == (len(inscribed_timespan.music) - 1): assert container_timespan.stop_offset == \ inscribed_timespan.stop_offset if container_timespan in cache: intersecting_meters = cache[container_timespan] else: intersecting_meters = \ meter_timespans.find_timespans_intersecting_timespan( container_timespan) cache[container_timespan] = intersecting_meters shifted_intersecting_meters = [ _.translate(-1 * inscribed_timespan.start_offset) for _ in intersecting_meters ] consort.debug('\t\t{!r} {!r}'.format( container, container_timespan, )) for intersecting_meter in intersecting_meters: consort.debug('\t\t\t' + repr(intersecting_meter)) SegmentMaker.rewrite_container_meter( container, shifted_intersecting_meters, forbid_staff_lines_spanner, ) SegmentMaker.cleanup_logical_ties(container) count += 1 if verbose: message = template.format(context_name, count) print(message) @staticmethod def sort_voice_names(score, voice_names): result = [] for voice in abjad.iterate(score).by_class(abjad.Voice): if voice.name in voice_names: result.append(voice.name) return tuple(result) @staticmethod def split_demultiplexed_timespans( meter_offsets=None, demultiplexed_maquette=None, ): for voice_name in demultiplexed_maquette: timespan_inventory = demultiplexed_maquette[voice_name] split_inventory = SegmentMaker.split_timespans( meter_offsets, timespan_inventory, ) demultiplexed_maquette[voice_name] = split_inventory @staticmethod def split_timespans(offsets, timespan_inventory): offsets = list(offsets) timespan_inventory.sort() split_inventory = abjad.TimespanList() for timespan in sorted(timespan_inventory): current_offsets = [] while offsets and offsets[0] <= timespan.start_offset: offsets.pop(0) while offsets and offsets[0] < timespan.stop_offset: current_offsets.append(offsets.pop(0)) if hasattr(timespan, 'music') and timespan.music: # We don't need to split already-inscribed timespans split_inventory.append(timespan) continue elif timespan.forbid_splitting: continue if current_offsets: #print(current_offsets, timespan.start_offset, # timespan.stop_offset, type(timespan), # timespan.divisions) shards = timespan.split_at_offsets(current_offsets) for shard in shards: if shard.minimum_duration: if shard.minimum_duration <= shard.duration: split_inventory.append(shard) else: split_inventory.append(shard) else: if timespan.minimum_duration: if timespan.minimum_duration <= timespan.duration: split_inventory.append(timespan) else: split_inventory.append(timespan) split_inventory.sort() return split_inventory @staticmethod def subtract_timespan_inventories(inventory_one, inventory_two): r'''Subtracts `inventory_two` from `inventory_one`. :: >>> inventory_one = abjad.TimespanList([ ... abjad.Timespan(0, 10), ... abjad.Timespan(10, 20), ... abjad.Timespan(40, 80), ... ]) :: >>> inventory_two = abjad.TimespanList([ ... abjad.Timespan(5, 15), ... abjad.Timespan(25, 35), ... abjad.Timespan(35, 45), ... abjad.Timespan(55, 65), ... abjad.Timespan(85, 95), ... ]) :: >>> manager = consort.SegmentMaker >>> result = manager.subtract_timespan_inventories( ... inventory_one, ... inventory_two, ... ) >>> print(format(result)) abjad.TimespanList( [ abjad.Timespan( start_offset=abjad.Offset(0, 1), stop_offset=abjad.Offset(5, 1), ), abjad.Timespan( start_offset=abjad.Offset(15, 1), stop_offset=abjad.Offset(20, 1), ), abjad.Timespan( start_offset=abjad.Offset(45, 1), stop_offset=abjad.Offset(55, 1), ), abjad.Timespan( start_offset=abjad.Offset(65, 1), stop_offset=abjad.Offset(80, 1), ), ] ) :: >>> result = manager.subtract_timespan_inventories( ... inventory_two, ... inventory_one, ... ) >>> print(format(result)) abjad.TimespanList( [ abjad.Timespan( start_offset=abjad.Offset(25, 1), stop_offset=abjad.Offset(35, 1), ), abjad.Timespan( start_offset=abjad.Offset(35, 1), stop_offset=abjad.Offset(40, 1), ), abjad.Timespan( start_offset=abjad.Offset(85, 1), stop_offset=abjad.Offset(95, 1), ), ] ) ''' import consort resulting_timespans = consort.TimespanCollection() if not inventory_two: return abjad.TimespanList(inventory_one) elif not inventory_one: return abjad.TimespanList() subtractee_index = 0 subtractor_index = 0 subtractee = None subtractor = None subtractee_is_modified = False while subtractee_index < len(inventory_one) and \ subtractor_index < len(inventory_two): if subtractee is None: subtractee = inventory_one[subtractee_index] subtractee_is_modified = False if subtractor is None: subtractor = inventory_two[subtractor_index] if subtractee.intersects_timespan(subtractor): subtraction = subtractee - subtractor if len(subtraction) == 1: subtractee = subtraction[0] subtractee_is_modified = True elif len(subtraction) == 2: resulting_timespans.insert(subtraction[0]) subtractee = subtraction[1] subtractee_is_modified = True else: subtractee = None subtractee_index += 1 else: if subtractee.stops_before_or_at_offset( subtractor.start_offset): resulting_timespans.insert(subtractee) subtractee = None subtractee_index += 1 else: subtractor = None subtractor_index += 1 if subtractee_is_modified: if subtractee: resulting_timespans.insert(subtractee) resulting_timespans.insert(inventory_one[subtractee_index + 1:]) else: resulting_timespans.insert(inventory_one[subtractee_index:]) resulting_timespans = abjad.TimespanList( resulting_timespans[:]) return resulting_timespans @staticmethod def validate_timespans(demultiplexed_maquette): durations = set() for voice_name, timespans in demultiplexed_maquette.items(): timespans.sort() assert timespans.start_offset == 0 assert timespans.all_are_contiguous assert timespans.all_are_well_formed assert timespans.all_are_nonoverlapping durations.add(timespans.stop_offset) assert len(tuple(durations)) == 1 def update_segment_metadata(self): self._segment_metadata.update( end_instruments_by_staff=self.get_end_instruments(), end_tempo=self.get_end_tempo_indication(), end_time_signature=self.get_end_time_signature(), is_repeated=self.repeat, measure_count=len(self.meters), ) def get_previous_segment_metadata(self, current_segment_directory): current_segment_name = os.path.basename(current_segment_directory) segments_directory = os.path.abspath( os.path.join(current_segment_directory, '..')) all_segment_names = [ entry for entry in sorted(os.listdir(segments_directory)) if os.path.exists( os.path.join(segments_directory, entry, '__init__.py'), ) ] current_segment_index = all_segment_names.index(current_segment_name) previous_segment_index = current_segment_index - 1 if previous_segment_index < 0: return None previous_segment_name = all_segment_names[previous_segment_index] metadata_path = '{}.segments.{}.__metadata__'.format( self.score_package_name, previous_segment_name, ) try: metadata_module = importlib.import_module(metadata_path) except ImportError: return None return getattr(metadata_module, 'metadata', None) ### PUBLIC PROPERTIES ### @property def attack_point_map(self): return self._attack_point_map @property def meters(self): return self._meters @property def score(self): return self._score @property def voicewise_timespans(self): return self._voicewise_timespans @property def desired_duration(self): tempo = self.tempo if tempo is None: tempo = abjad.MetronomeMark((1, 4), 60) tempo_desired_duration_in_seconds = abjad.Duration( tempo.duration_to_milliseconds(tempo.reference_duration), 1000, ) desired_duration = abjad.Duration(( self.desired_duration_in_seconds / tempo_desired_duration_in_seconds ).limit_denominator(8)) desired_duration *= tempo.reference_duration count = desired_duration // abjad.Duration(1, 8) desired_duration = abjad.Duration(count, 8) assert 0 < desired_duration return desired_duration @property def desired_duration_in_seconds(self): return self._desired_duration_in_seconds @desired_duration_in_seconds.setter def desired_duration_in_seconds(self, desired_duration_in_seconds): if desired_duration_in_seconds is not None: desired_duration_in_seconds = abjad.Duration( desired_duration_in_seconds, ) self._desired_duration_in_seconds = desired_duration_in_seconds @property def discard_final_silence(self): return self._discard_final_silence @discard_final_silence.setter def discard_final_silence(self, discard_final_silence): if discard_final_silence is not None: discard_final_silence = bool(discard_final_silence) self._discard_final_silence = discard_final_silence @property def final_markup(self): return None @property def annotate_colors(self): return self._annotate_colors @annotate_colors.setter def annotate_colors(self, expr): if expr is not None: expr = bool(expr) self._annotate_colors = expr @property def annotate_phrasing(self): return self._annotate_phrasing @annotate_phrasing.setter def annotate_phrasing(self, expr): if expr is not None: expr = bool(expr) self._annotate_phrasing = expr @property def annotate_timespans(self): return self._annotate_timespans @annotate_timespans.setter def annotate_timespans(self, expr): if expr is not None: expr = bool(expr) self._annotate_timespans = expr @property def lilypond_file(self): return self._lilypond_file @property def maximum_meter_run_length(self): return self._maximum_meter_run_length @maximum_meter_run_length.setter def maximum_meter_run_length(self, expr): self._maximum_meter_run_length = expr @property def measure_offsets(self): measure_durations = [x.duration for x in self.time_signatures] measure_offsets = mathtools.cumulative_sums(measure_durations) return measure_offsets @property def name(self): return self._name @name.setter def name(self, expr): if expr is not None: expr = str(expr) self._name = expr @property def omit_stylesheets(self): return self._omit_stylesheets @omit_stylesheets.setter def omit_stylesheets(self, omit_stylesheets): if omit_stylesheets is not None: omit_stylesheets = bool(omit_stylesheets) self._omit_stylesheets = omit_stylesheets @property def permitted_time_signatures(self): r'''Gets and sets segment maker's permitted time signatures. :: >>> segment_maker = consort.SegmentMaker() >>> time_signatures = [(3, 4), (2, 4), (5, 8)] >>> segment_maker.permitted_time_signatures = time_signatures >>> print(format(segment_maker)) consort.tools.SegmentMaker( permitted_time_signatures=abjad.TimeSignatureList( [ abjad.TimeSignature((3, 4)), abjad.TimeSignature((2, 4)), abjad.TimeSignature((5, 8)), ] ), ) ''' return self._permitted_time_signatures @permitted_time_signatures.setter def permitted_time_signatures(self, permitted_time_signatures): if permitted_time_signatures is not None: permitted_time_signatures = abjad.TimeSignatureList( items=permitted_time_signatures, ) self._permitted_time_signatures = permitted_time_signatures @property def score_package_metadata(self): module_name = '{}.__metadata__'.format(self.score_package_name) try: module = importlib.import_module(module_name) metadata = getattr(module, 'metadata') except ImportError: metadata = {} return metadata @property def score_package_module(self): module = importlib.import_module(self.score_package_name) return module @property def score_package_name(self): return 'consort' @property def score_package_path(self): return self.score_package_module.__path__[0] @property def score_template(self): r'''Gets and sets segment maker's score template. :: >>> segment_maker = consort.SegmentMaker() >>> score_template = abjad.templatetools.StringOrchestraScoreTemplate( ... violin_count=2, ... viola_count=1, ... cello_count=1, ... contrabass_count=0, ... ) >>> segment_maker.score_template = score_template >>> print(format(segment_maker)) consort.tools.SegmentMaker( score_template=templatetools.StringOrchestraScoreTemplate( violin_count=2, viola_count=1, cello_count=1, contrabass_count=0, split_hands=True, use_percussion_clefs=False, ), ) ''' return self._score_template @score_template.setter def score_template(self, score_template): self._score_template = score_template @property def segment_duration(self): return sum(x.duration for x in self.time_signatures) @property def settings(self): return tuple(self._settings) @settings.setter def settings(self, settings): import consort if settings is not None: if not isinstance(settings, collections.Sequence): settings = (settings,) assert all(isinstance(_, consort.MusicSetting) for _ in settings) settings = list(settings) self._settings = settings or [] @property def tempo(self): r'''Gets and sets segment maker tempo. :: >>> segment_maker = consort.SegmentMaker() >>> tempo = abjad.MetronomeMark((1, 4), 52) >>> segment_maker.tempo = tempo >>> print(format(segment_maker)) consort.tools.SegmentMaker( tempo=abjad.MetronomeMark( reference_duration=abjad.Duration(1, 4), units_per_minute=52, ), ) ''' tempo = self._tempo if tempo is not None: return tempo elif self._previous_segment_metadata is not None: tempo = self._previous_segment_metadata.get('end_tempo') if tempo: tempo = abjad.MetronomeMark(*tempo) return tempo @tempo.setter def tempo(self, tempo): if tempo is not None: if not isinstance(tempo, abjad.MetronomeMark): tempo = abjad.MetronomeMark(tempo) self._tempo = tempo @property def time_signatures(self): return tuple( meter.implied_time_signature for meter in self.meters ) @property def timespan_quantization(self): r'''Gets and sets segment maker timespan quantization. :: >>> segment_maker = consort.SegmentMaker() >>> timespan_quantization = (1, 8) >>> segment_maker.timespan_quantization = timespan_quantization >>> print(format(segment_maker)) consort.tools.SegmentMaker( timespan_quantization=abjad.Duration(1, 8), ) ''' return self._timespan_quantization @timespan_quantization.setter def timespan_quantization(self, timespan_quantization): if timespan_quantization is not None: timespan_quantization = \ abjad.Duration(timespan_quantization) self._timespan_quantization = timespan_quantization @property def voice_names(self): return self._voice_names @property def repeat(self): return self._repeat @repeat.setter def repeat(self, repeat): if repeat is not None: repeat = bool(repeat) self._repeat = repeat
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # ----------------------------------------------------------------------------- # # P A G E B O T N A N O # # Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens # www.pagebot.io # Licensed under MIT conditions # # Supporting DrawBot, www.drawbot.com # ----------------------------------------------------------------------------- # # context.py # # InDesign JavaScript file specifications here: # https://www.adobe.com/content/dam/acom/en/devnet/indesign/sdk/cs6/scripting/InDesign_ScriptingGuide_JS.pdf # import sys sys.path.insert(0, "../../..") # So we can import pagebotnano without installing. from pagebotnano.contexts.basecontext import BaseContext from pagebotnano.constants import * class InDesignContext(BaseContext): # Used by the generic BaseContext.newString( ) #EXPORT_TYPES = (FILETYPE_IDML,) def __init__(self): """Constructor of InDesignContext. >>> from pagebotnano.document import Document >>> from pagebotnano.toolbox.color import color >>> from pagebotnano.contexts.indesigncontext.context import InDesignContext >>> context = InDesignContext() """ """ >>> font = 'Georgia' # Is available in Adobe >>> styles = {} >>> styles['h0'] = dict(name='h0', font=font, fontSize=pt(48), leading=em(0.9), textFill=color(1, 0, 0)) >>> styles['h1'] = dict(name='h1', font=font, fontSize=pt(24), leading=em(0.9), textFill=color(1, 0, 0)) >>> doc = Document(w=510, h=720, context=context, autoPages=8, padding=p(4), originTop=False) >>> doc.styles = styles # Overwrite all default styles. >>> page = doc[2] >>> scaleType = None #SCALE_TYPE_FITWH # for non-proportional >>> e = Image('resources/images/cookbot10.jpg', parent=page, x=page.pl, y=page.pt, w=page.pw, h=page.pw, scaleImage=False, fill=color(0.5), scaleType=scaleType) >>> page = doc[3] >>> e = Image('resources/images/cookbot10.jpg', parent=page, x=page.pl, y=page.pt, w=page.pw, h=page.pw, scaleImage=False, fill=color(0.2), scaleType=scaleType) >>> e = newRect(parent=page, w=p(16), h=p(16), x=p(20), y=p(41), stroke=color(1, 0, 0), strokeWidth=p(2), fill=color(c=1, m=0.5, y=0, k=0, a=0.8)) >>> e = newRect(parent=page, w=p(16), h=p(16), x=page.pl, y=page.pt, fill=color(1, 0, 0)) >>> e = newRect(parent=page, w=p(16), h=p(16), x=page.pl+p(2), y=p(50), fill=color(c=0.5, m=1, y=0, k=0, a=0.5)) >>> e = newOval(parent=page, w=p(16), h=p(16), x=p(24), y=p(42), fill=color(c=0.5, m=0, y=1, k=0, a=0.5)) >>> e = newTextBox('ABCD EFGH IJKL MNOP', style=doc.styles['h1'], parent=page, w=p(16), h=p(8), x=p(34), y=p(22), padding=p(1), fill=color(c=0, m=0.5, y=1, k=0, a=0.5)) >>> page = page.next >>> e = Image('resources/images/cookbot10.jpg', parent=page, x=page.pl, y=page.pt, w=page.pw, h=page.pw, scaleImage=False, fill=color(0.5), scaleType=scaleType) >>> e = newOval(parent=page, w=p(16), h=p(16), x=p(24), y=p(22), fill=color(c=0.5, m=0, y=1, k=0, a=0.5)) >>> e = newTextBox('@XYZ', style=doc.styles['h0'], parent=page, w=p(26), h=p(8), x=p(14), y=p(22), padding=p(1), fill=color(c=0, m=0.5, y=1, k=0, a=0.5)) >>> page = page.next >>> e = Image('resources/images/cookbot10.jpg', parent=page, x=page.pl, y=page.pt, w=page.pw, h=page.pw, scaleImage=False, fill=color(0, 0, 1), scaleType=scaleType) >>> e = newRect(parent=page, w=p(16), h=p(16), x=p(24), y=p(22), fill=color(c=0.5, m=1, y=1, k=0, a=0.5)) >>> e = newTextBox('@EEE', style=doc.styles['h0'], parent=page, w=p(26), h=p(8), x=p(14), y=p(22), padding=p(1), fill=color(c=0, m=0.5, y=1, k=0, a=0.5)) >>> page = page.next >>> e = Image('resources/images/cookbot10.jpg', parent=page, x=page.pl, y=page.pt, w=page.pw, h=page.pw, scaleImage=False, fill=color(1, 0, 0), scaleType=scaleType) >>> e = newRect(parent=page, w=p(16), h=p(16), x=p(24), y=p(22), fill=color(c=0.5, m=1, y=1, k=0, a=0.5)) >>> e = newTextBox('@EEE', style=doc.styles['h0'], parent=page, w=p(26), h=p(8), x=p(14), y=p(22), padding=p(1), fill=color(c=0, m=0.5, y=1, k=0, a=0.5)) >>> doc.export('Image.js') """ super().__init__() self.b = InDesignBuilder() # cls.b builder for this context. self.name = self.__class__.__name__ def newDocument(self, w=None, h=None, doc=None): self.b.newDocument(w, h, doc) def newDrawing(self): pass def newPage(self, w=None, h=None, e=None): """Have the builder create a new page in the document.""" self.b.newPage(w, h, e) def frameDuration(self, frameDuration, e=None): """Ignore for now in this context.""" pass # Basic shapes. def rect(self, x, y, w=None, h=None, e=None): """New rectangle by the builder""" self.b.rect(x, y, w=w, h=h, e=e) def oval(self, x, y, w=None, h=None, e=None): """Ignore for now in this context.""" self.b.oval(x, y, w=w, h=h, e=e) def textBox(self, sOrBs, p, w=None, h=None, clipPath=None, e=None): self.b.textBox(sOrBs, p, w=w, h=h, clipPath=clipPath, e=e) def scaleImage(self, path, w, h, index=0, showImageLoresMarker=False, exportExtension=None): pass def image(self, path, p, alpha=1, pageNumber=None, w=None, h=None, scaleType=None, e=None): self.b.image(path, p, alpha=alpha, pageNumber=pageNumber, w=w, h=h, scaleType=scaleType, e=e) def newString(self, s, e=None, style=None, w=None, h=None, pixelFit=True): """Creates a new styles BabelString instance of self.STRING_CLASS from `s` (converted to plain unicode string), using e or style as typographic parameters. Ignore and just answer `s` if it is already a self.STRING_CLASS instance and no style is forced. PageBot function. """ return self.STRING_CLASS(s, context=self, style=style) def text(self, sOrBs, p): """Ignore for now in this context.""" pass def imageSize(self, path): """Answers the (w, h) image size of the image file at path. If the path is an SVG image, then determine by parsing the SVG-XML. if path.lower().endswith('.'+FILETYPE_SVG): import xml.etree.ElementTree as ET svgTree = ET.parse(path) print(svgTree) return pt(1000, 1000) return pt(self.b.imageSize(path)) """ return pt(1000, 1000) def saveDocument(self, path, multiPage=True): self.b.saveDocument(path) saveImage = saveDocument def getFlattenedPath(self, path=None): pass def getFlattenedContours(self, path=None): pass def getGlyphPath(self, glyph, p=None, path=None): pass if __name__ == '__main__': import doctest import sys sys.exit(doctest.testmod()[0])
nilq/baby-python
python
""" Streaming newline delimited JSON I/O. Calling `newlinejson.open()` returns a loaded instance of `NLJReader()`, or `NLJWriter()` that acts as a file-like object. See `help()` on each for more information. Example: import newlinejson as nlj with nlj.open('sample-data/dictionaries.json') as src, \\ with nlj.open('out.json', 'w') as dst: for line in src: dst.write(line) with open('out.json') as f: print(f.read())) {'field2': 'l1f2', 'field3': 'l1f3', 'field1': 'l1f1'} {'field2': 'l2f2', 'field3': 'l2f3', 'field1': 'l2f1'} {'field2': 'l3f2', 'field3': 'l3f3', 'field1': 'l3f1'} {'field2': 'l4f2', 'field3': 'l4f3', 'field1': 'l4f1'} {'field2': 'l5f2', 'field3': 'l5f3', 'field1': 'l5f1'} """ from newlinejson.core import dump from newlinejson.core import dumps from newlinejson.core import load from newlinejson.core import loads from newlinejson.core import open from newlinejson.core import NLJBaseStream from newlinejson.core import NLJReader from newlinejson.core import NLJWriter __version__ = '1.0' __author__ = 'Kevin Wurster' __email__ = 'wursterk@gmail.com' __source__ = 'https://github.com/geowurster/NewlineJSON' __license__ = ''' New BSD License Copyright (c) 2014-2015, Kevin D. Wurster All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of NewlineJSON its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
nilq/baby-python
python
''' 商品详情页面 ''' from common.base import Base good_url ='http://ecshop.itsoso.cn/goods.php?id=304' class Buy_Good(Base): '''页面点击立即购买''' # 商品名字 good_name_loc=('class name','goods_style_name') # 商品牌子 good_brand_loc=('css selector','a[href="brand.php?id=20"]') # 购买数量框 number_loc=('id','number') # 立即购买框 libuy_loc=('css selector','img[src="themes/default/images/buybtn1.png"]') # 收藏按钮 collect_loc=('css selector','img[src="themes/default/images/bnt_colles.gif"]') # 分享按钮 share_loc =('css selector','img[src="themes/default/images/bnt_recommend.gif"]') # 价格 price_loc=('id','ECS_RANKPRICE_6') # 前台商品货号 front_good_no_loc=('css selector','li.clearfix:nth-child(1)>dd:nth-child(1)') # 点击商品牌子 def click_brand(self): self.click(self.good_brand_loc) # 购买数量输入 def send_number(self,num): self.double_click(self.number_loc) self.send_keys(self.number_loc,num) self.click(self.price_loc) # 点击立即购买 def click_libuy(self): self.click(self.libuy_loc) # 点击收藏按钮 def click_collect(self): self.click(self.collect_loc) # 点击分享按钮 def click_share(self): self.click(self.share_loc) # 获取商品名称 def get_good_name(self,locator): element =self.find_element(locator) text = element.text return text # 前台商品详情页面获取商品货号 def get_front_good_no(self): element=self.find_element(self.front_good_no_loc) content =element.text.split(':') text =content[1] # ECS000304 # print(content) 商品货号:ECS000304 return text if __name__ == '__main__': from common.base import open_browser from time import sleep driver = open_browser('chrome') libuy = Buy_Good(driver) # 实例化Buy_Good libuy.open_url(good_url) good_name_loc = ('class name', 'goods_style_name') print(libuy.get_good_name(good_name_loc)) # 前台商品货号 front_good_no_loc = ('css selector', 'li.clearfix:nth-child(1)>dd:nth-child(1)') num =libuy.get_front_good_no() print(num) # sleep(2) # libuy.send_number(3) # sleep(3) # # # libuy.click_libuy()
nilq/baby-python
python
import requests import re import threading from bs4 import BeautifulSoup as bs class Crawler(): def __init__(self, seed): self.seed = seed self.data_path = './data/' def make_filename(self,url): """ Extracts domain from a url. Prepend data_path and append '.html' :param url: string return <domain>.html string """ rx = re.compile(r'^https?:\/\/(?:www.)?([^\/]+)\/?') m = rx.search(url) if m: return self.data_path + m[1] + '.html' else: print(f'Can not get domain from {url}') exit(-1) def write_to_file(self,filename, content): """ Write string to given filename :param filename: string :param content: sring """ try: with open(filename, 'w') as f: f.write(content) except FileNotFoundError: print(f'File {filename} does not exists!') except Exception as e: print(f'Can not write to file: {filename}: {str(e)}') exit(-1) def get_html(self,url): # GET request without SSL verification: try: r = requests.get(url) except requests.RequestException: # try with SSL verification disabled. # this is just a dirty workaraound # check https://levelup.gitconnected.com/solve-the-dreadful-certificate-issues-in-python-requests-module-2020d922c72f r = requests.get(url,verify=False) except Exception as e: print('Ca not get url: {url}: {str(e)}!') exit(-1) # set content encoding explicitely r.encoding="utf-8" # if we have the html => save it into file if r.ok: content = r.text filename = self.make_filename(url) self.write_to_file(filename, content) self.extract_links(r.text, content) def extract_links(self, html): # create BeautifulSoup object, which represents the document as a nested data structure: soup = bs(html, 'html.parser') # get HTML element: print(soup.title) # # get HTML element's content as string: print(soup.title.string) articles = soup.find("div",id="module_1_1") print(articles) def run(self): """ run the crawler for each url in seed Use multithreading for each GET request """ for url in self.seed: tr = threading.Thread(target=self.get_html(url)) tr.start() if __name__ == '__main__': seed = [ 'https://www.autokelly.bg/', 'https://www.imdb.com/chart/moviemeter/?ref_=nv_mv_mpm', 'https://bnr.bg/hristobotev/radioteatre/list', 'https://bnr.bg/lyubopitno/list', 'https://www.jobs.bg/front_job_search.php?add_sh=1&from_hp=1&keywords%5B%5D=python', 'https://bnr.bg/lyubopitno/list' ] crawler = Crawler(seed) crawler.run()
nilq/baby-python
python
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Controller that returns information on the heat API versions """ import httplib import json import webob.dec class Controller(object): """ A controller that produces information on the heat API versions. """ def __init__(self, conf): self.conf = conf @webob.dec.wsgify def __call__(self, req): """Respond to a request for all OpenStack API versions.""" version_objs = [ { "id": "v1.1", "status": "CURRENT", "links": [ { "rel": "self", "href": self.get_href(req)}]}, { "id": "v1.0", "status": "SUPPORTED", "links": [ { "rel": "self", "href": self.get_href(req)}]}] body = json.dumps(dict(versions=version_objs)) response = webob.Response(request=req, status=httplib.MULTIPLE_CHOICES, content_type='application/json') response.body = body return response def get_href(self, req): return "%s/v1/" % req.host_url
nilq/baby-python
python
import os import numpy as np import cv2 as cv # Set up path to OpenCV's Haar Cascades for face detection. cascade_path = "C:/Python372/Lib/site-packages/cv2/data/" face_detector = cv.CascadeClassifier(cascade_path + 'haarcascade_frontalface_default.xml') # Set up path to training images and prepare names/labels. # For pre-loaded Demming images use demming_trainer folder. # To use your own face use the trainer folder. #train_path = './trainer' train_path = './demming_trainer' image_paths = [os.path.join(train_path, f) for f in os.listdir(train_path)] images, labels = [], [] # Extract face rectangles and assign numerical labels. for image in image_paths: train_image = cv.imread(image, cv.IMREAD_GRAYSCALE) label = int(os.path.split(image)[-1].split('.')[1]) name = os.path.split(image)[-1].split('.')[0] frame_num = os.path.split(image)[-1].split('.')[2] faces = face_detector.detectMultiScale(train_image) for (x, y, w, h) in faces: images.append(train_image[y:y + h, x:x + w]) labels.append(label) print(f"Preparing training images for {name}.{label}.{frame_num}") cv.imshow("Training Image", train_image[y:y + h, x:x + w]) cv.waitKey(50) cv.destroyAllWindows() # Perform the tranining recognizer = cv.face.LBPHFaceRecognizer_create() recognizer.train(images, np.array(labels)) recognizer.write('lbph_trainer.yml') print("Training complete. Exiting...")
nilq/baby-python
python
from django.contrib.auth import get_user_model from django.test import TestCase #an extension of Python’s TestCase from django.urls import reverse, resolve from .models import ( PostJobModel, ApplicationModel ) from .views import ( createJobView, JobListView, JobsDetailView, SearchResultsListView, applicantCreateView, ApplicantList, ) #from .forms import class CustomUserTests(TestCase): def test_create_user(self): User = get_user_model() user = User.objects.create_user( username='partho', email='partho007@gmail.com', password='testpass123', first_name='Partho', last_name='Bhattacharjee', country='Bangladesh', city_or_district='Sylhet' ) self.assertEqual(user.email, 'partho007@gmail.com') self.assertEqual(user.country, 'Bangladesh') self.assertEqual(user.city_or_district, 'Sylhet') self.assertTrue(user.is_active) self.assertFalse(user.is_staff) self.assertFalse(user.is_superuser) def test_create_superuser(self): User = get_user_model() admin_user = User.objects.create_superuser( username='superadmin', email='superadmin@email.com', password='testpass123' ) self.assertEqual(admin_user.email, 'superadmin@email.com') self.assertTrue(admin_user.is_active) self.assertTrue(admin_user.is_staff) self.assertTrue(admin_user.is_superuser) class JobsTests(TestCase): # new def setUp(self): url = reverse('job_list') self.response = self.client.get(url) self.client.login(email='partho007@gmail.com', password='testpass123') def test_job_post(self): User = get_user_model() user = User.objects.create_user( username='partho', email='partho007@gmail.com', password='testpass123', first_name='Partho', last_name='Bhattacharjee', country='Bangladesh', city_or_district='Sylhet' ) post = PostJobModel.objects.create( Job_author_id=user.id, Job_title='Sales Executive', Company='Unique Trading Company', Job_location='Dhaka, Bangladesh', Employee_type='Full-time', Description='szdfg lzsiuUS DhfkSJDHfiuSHDfLIDbfgysDgfbKSDGfiAeir AfIUGDlsf.', Add_skills='aroshA OgoSHDfguHAS DfiDHfiADF' ) self.assertEqual(post.Job_title, 'Sales Executive') self.assertEqual(post.Company, 'Unique Trading Company') self.assertEqual(post.Job_location, 'Dhaka, Bangladesh') self.assertTrue(post.Employee_type, 'Full-time') self.assertTrue(post.Description, 'szdfg lzsiuUS DhfkSJDHfiuSHDfLIDbfgysDgfbKSDGfiAeir AfIUGDlsf.') self.assertTrue(post.Add_skills, 'aroshA OgoSHDfguHAS DfiDHfiADF') self.assertFalse(post.Is_approved, 'True') def test_job_list_template(self): self.assertEqual(self.response.status_code, 200) self.assertTemplateUsed(self.response, 'jobs/job_list.html') self.assertContains(self.response, 'Search for your next job') self.assertNotContains( self.response, 'Hi there! I should not be on the page.') def job_detail_view(self): post = PostJobModel.objects.create( Job_title='Sales Executive', Company='Unique Trading Company', Job_location='Dhaka, Bangladesh', Employee_type='Full-time', Description='szdfg lzsiuUS DhfkSJDHfiuSHDfLIDbfgysDgfbKSDGfiAeir AfIUGDlsf.', Add_skills='aroshA OgoSHDfguHAS DfiDHfiADF' ) response = self.client.get(post.get_absolute_url()) no_response = self.client.get('/jobs/12345/') self.assertEqual(response.status_code, 200) self.assertEqual(no_response.status_code, 404) self.assertContains(response, 'Sales Executive') self.assertTemplateUsed(response, 'jobs/job_detail.html')
nilq/baby-python
python
import logging import time import celery from django.core.exceptions import ObjectDoesNotExist from django.db.models.query_utils import Q from genes.canonical_transcripts.canonical_transcript_manager import CanonicalTranscriptManager from genes.gene_matching import GeneSymbolMatcher from genes.models import GeneCoverageCollection, GeneCoverageCanonicalTranscript, TranscriptVersion from seqauto.models import EnrichmentKit from snpdb.models import DataState @celery.shared_task def reload_gene_coverage_collection(gene_coverage_collection_id): logging.info("reload_gene_coverage_collection(%s) START", gene_coverage_collection_id) start = time.time() gene_coverage_collection = GeneCoverageCollection.objects.get(pk=gene_coverage_collection_id) gene_coverage_collection.genecoverage_set.all().delete() gene_coverage_collection.genecoveragecanonicaltranscript_set.all().delete() gene_coverage_collection.data_state = DataState.RUNNING gene_coverage_collection.save() genome_build = gene_coverage_collection.genome_build gene_matcher = GeneSymbolMatcher() canonical_transcript_manager = CanonicalTranscriptManager() transcript_versions_by_id = TranscriptVersion.transcript_versions_by_id(genome_build, genome_build.annotation_consortium) try: enrichment_kit = gene_coverage_collection.qcgenecoverage.qc.sequencing_sample.enrichment_kit except ObjectDoesNotExist: enrichment_kit = None gene_coverage_collection.load_from_file(enrichment_kit, gene_matcher=gene_matcher, canonical_transcript_manager=canonical_transcript_manager, transcript_versions_by_id=transcript_versions_by_id) gene_coverage_collection.data_state = DataState.COMPLETE gene_coverage_collection.save() end = time.time() logging.info("reload_gene_coverage_collection(%s) DONE in %.1f seconds", gene_coverage_collection_id, (end - start)) # TODO: This is only needed to migrate existing data - it just takes hours so want to spread across celery tasks # Once all environments https://github.com/SACGF/variantgrid/wiki/Upgrade_Notes have this applied: # https://github.com/SACGF/variantgrid/issues/1216#issuecomment-440561628 delete this task etc. @celery.shared_task def create_canonical_gene_coverage_for_enrichment_kit(enrichment_kit_id): #logging.info("create_canonical_gene_coverage_for_enrichment_kit %s", enrichment_kit_id) canonical_transcript_manager = CanonicalTranscriptManager() if enrichment_kit_id: enrichment_kit = EnrichmentKit.objects.get(pk=enrichment_kit_id) canonical_collection = canonical_transcript_manager.get_canonical_collection_for_enrichment_kit(enrichment_kit) coverage_collection_qs = GeneCoverageCollection.objects.filter(qc__bam_file__unaligned_reads__sequencing_sample__enrichment_kit=enrichment_kit) else: canonical_collection = canonical_transcript_manager.get_default_canonical_collection() coverage_collection_qs = GeneCoverageCollection.objects.filter(qc__isnull=True) canonical_transcripts = canonical_transcript_manager.get_canonical_transcripts(canonical_collection) # Skip ones that have already been calculated already_calculated_q = Q(genecoveragecanonicaltranscript__isnull=False) #num_already_calculated = coverage_collection_qs.filter(already_calculated_q).distinct().count() #if num_already_calculated: # logging.info("Skipping %d already calculated", num_already_calculated) for cc in coverage_collection_qs.exclude(already_calculated_q): transcript_ids, original_transcript = canonical_transcripts qt = Q(transcript_id__in=transcript_ids) qrefseq = Q(original_transcript__in=original_transcript) qs = cc.genecoverage_set.filter(qt | qrefseq) if qs.exists(): #logging.info("Getting GeneCoverage records for %s", cc) canonical_transcripts_list = [] for gc_dict in qs.values(): # GeneCoverageCanonicalTranscript has all of GeneCoverage's fields del gc_dict['id'] gc_dict["canonical_transcript_collection"] = canonical_collection canonical_coverage = GeneCoverageCanonicalTranscript(**gc_dict) canonical_transcripts_list.append(canonical_coverage) if canonical_transcripts_list: #logging.info("Bulk inserting %d GeneCoverageCanonicalTranscript records", len(canonical_transcripts_list)) GeneCoverageCanonicalTranscript.objects.bulk_create(canonical_transcripts_list)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础计算平台 available. Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ class ErrorCode(object): BKDATA_PLAT_CODE = "15" BKDATA_WEB_CODE = "20" class DataError(Exception): MESSAGE = _("系统异常") ERROR_CODE = "500" def __init__(self, *args, **kwargs): """ @param {String} code 自动设置异常状态码 """ super(DataError, self).__init__(*args) if kwargs.get("code"): self.code = str(kwargs.get("code")) else: self.code = self.ERROR_CODE # 位置参数0是异常MESSAGE self.message = force_text(self.MESSAGE) if len(args) == 0 else force_text(args[0]) # 位置参数1是异常后需返回的数据 self.data = None if len(args) < 2 else args[1] self.errors = kwargs.get("errors") class FormError(DataError): MESSAGE = _("参数验证失败") ERROR_CODE = "001" class ApiResultError(DataError): MESSAGE = _("远程服务请求结果异常") ERROR_CODE = "002" class ComponentCallError(DataError): MESSAGE = _("组件调用异常") ERROR_CODE = "003" class PermissionError(DataError): MESSAGE = _("权限不足") ERROR_CODE = "403" class ApiRequestError(DataError): # 属于严重的场景,一般为第三方服务挂了,ESB调用超时 MESSAGE = _("服务不稳定,请检查组件健康状况") ERROR_CODE = "015" class StorageNodeNotFound(DataError): MESSAGE = _("找不到关联的存储节点") ERROR_CODE = "018" class ETLCheckError(DataError): MESSAGE = _("ETL配置错误") ERROR_CODE = "019" class ETLAnalyseError(DataError): MESSAGE = _("ETL解析异常") ERROR_CODE = "021" class CacheKeyError(DataError): MESSAGE = _("获取缓存内容失败") ERROR_CODE = "022"
nilq/baby-python
python
from typing import Any import numpy from scipy.stats import poisson from .PropertyGenerator import PropertyGenerator class PoissonNumberGenerator(PropertyGenerator): def __init__(self, mu: float, return_int: bool = False): """ Init a NumberGenerator which will output number taken from a skewed normal distribution. :param mu: average number of events per interval. :param return_int: return number as integer instead of float """ self.mu = mu self.return_int = return_int def generate(self) -> Any: generated = poisson.rvs(self.mu) if self.return_int: return numpy.asscalar(generated.round()) else: return numpy.asscalar(generated)
nilq/baby-python
python
#!/usr/bin/env python3 import sys import argparse import numpy as np from firedrake import * # recover stage3/: # ./solve.py -refine 0 -mz 8 -marginheight 0.0 # performance demo (1 min run time on my thelio) # tmpg -n 12 ./solve.py -s_snes_converged_reason -mx 4000 -refine 2 -s_snes_monitor -s_snes_atol 1.0e-2 parser = argparse.ArgumentParser(description= '''stage4/ Solve the Glen-Stokes momentum equations for a 2D ice sheet using an extruded mesh, rescaled equations, vertical grid sequencing, and physical diagnostics.''', add_help=False) parser.add_argument('-eps', type=float, metavar='X', default=1.0e-4, help='regularization used in viscosity (default=10^{-4})') parser.add_argument('-marginheight', type=float, metavar='X', default=1.0, help='height of degeneration point at margin (default=1 m)') parser.add_argument('-mx', type=int, metavar='MX', default=50, help='subintervals in coarse mesh (default=50)') parser.add_argument('-mz', type=int, metavar='MZ', default=2, help='vertical layers in coarse mesh (default=2)') parser.add_argument('-o', metavar='FILE.pvd', type=str, default='dome.pvd', help='output filename (default=dome.pvd)') parser.add_argument('-refine', type=int, metavar='X', default=1, help='refinements when generating mesh hierarchy (default=1)') parser.add_argument('-refinefactor', type=int, metavar='X', default=4, help='refinement factor when generating mesh hierarchy (default=4)') parser.add_argument('-single', action='store_true', default=False, help='solve only on the finest level, without grid sequencing') parser.add_argument('-solvehelp', action='store_true', default=False, help='print help for solve.py options and stop') args, unknown = parser.parse_known_args() if args.solvehelp: parser.print_help() sys.exit(0) def profile(x, R, H): '''Exact SIA solution with half-length (radius) R and maximum height H, on interval [0,L] = [0,2R], centered at x=R. See van der Veen (2013) equation (5.50).''' n = 3.0 # glen exponent p1 = n / (2.0 * n + 2.0) # = 3/8 q1 = 1.0 + 1.0 / n # = 4/3 Z = H / (n - 1.0)**p1 # outer constant X = (x - R) / R # rescaled coord Xin = abs(X[abs(X) < 1.0]) # rescaled distance from center Yin = 1.0 - Xin s = np.zeros(np.shape(x)) s[abs(X) < 1.0] = Z * ( (n + 1.0) * Xin - 1.0 \ + n * Yin**q1 - n * Xin**q1 )**p1 s[s < 1.0] = args.marginheight # needed so that prolong() can find nodes return s # level-independent information secpera = 31556926.0 # seconds per year g = 9.81 # m s-2 rho = 910.0 # kg m-3 n = 3.0 A3 = 3.1689e-24 # Pa-3 s-1; EISMINT I value of ice softness B3 = A3**(-1.0/3.0) # Pa s(1/3); ice hardness Dtyp = 1.0 / secpera # s-1 sc = 1.0e-7 # velocity scale constant for symmetric equation scaling fbody = Constant((0.0, - rho * g)) par = {'snes_linesearch_type': 'bt', 'ksp_type': 'preonly', 'pc_type': 'lu', 'pc_factor_shift_type': 'inblocks'} printpar = PETSc.Sys.Print # print once even in parallel def D(w): # strain-rate tensor return 0.5 * (grad(w) + grad(w).T) printpar('generating %d-level mesh hierarchy ...' % (args.refine + 1)) R = 10000.0 H = 1000.0 basemesh = IntervalMesh(args.mx, length_or_left=0.0, right=2.0*R) xbase = basemesh.coordinates.dat.data_ro P1base = FunctionSpace(basemesh,'P',1) sbase = Function(P1base) sbase.dat.data[:] = profile(xbase, R, H) hierarchy = SemiCoarsenedExtrudedHierarchy( \ basemesh, 1.0, base_layer=args.mz, refinement_ratio=args.refinefactor, nref=args.refine) for j in range(args.refine + 1): Q1R = FunctionSpace(hierarchy[j], 'P', 1, vfamily='R', vdegree=0) s = Function(Q1R) s.dat.data[:] = sbase.dat.data_ro[:] Vcoord = hierarchy[j].coordinates.function_space() x, z = SpatialCoordinate(hierarchy[j]) XZ = Function(Vcoord).interpolate(as_vector([x, s * z])) hierarchy[j].coordinates.assign(XZ) # solve the problem for each level in the hierarchy upcoarse = None levels = args.refine + 1 jrange = [levels - 1,] if args.single else range(levels) for j in jrange: mesh = hierarchy[j] V = VectorFunctionSpace(mesh, 'Lagrange', 2) W = FunctionSpace(mesh, 'Lagrange', 1) Z = V * W up = Function(Z) scu, p = split(up) # scaled velocity, unscaled pressure v, q = TestFunctions(Z) # use a more generous eps except when we get to the finest level if args.single or j == levels - 1: eps = args.eps else: eps = 100.0 * args.eps # symmetrically rescale the equations for better conditioning Du2 = 0.5 * inner(D(scu * sc), D(scu * sc)) + (eps * Dtyp)**2.0 nu = 0.5 * B3 * Du2**((1.0 / n - 1.0)/2.0) F = ( sc*sc * inner(2.0 * nu * D(scu), D(v)) \ - sc * p * div(v) - sc * q * div(scu) \ - sc * inner(fbody, v) ) * dx # different boundary conditions relative to stage2/: # base label is 'bottom', and we add noslip condition on degenerate ends bcs = [ DirichletBC(Z.sub(0), Constant((0.0, 0.0)), 'bottom'), DirichletBC(Z.sub(0), Constant((0.0, 0.0)), (1,2)) ] # get initial condition by coarsening previous level if upcoarse is not None: prolong(upcoarse, up) printpar('solving on level %d (%d x %d mesh) ...' \ % (j, args.mx, args.mz * (args.refinefactor)**j)) n_u, n_p = V.dim(), W.dim() printpar(' sizes: n_u = %d, n_p = %d' % (n_u,n_p)) solve(F == 0, up, bcs=bcs, options_prefix='s', solver_parameters=par) if upcoarse is None: upcoarse = up.copy() # print average and maximum velocity scu, _ = up.split() u = scu * sc P1 = FunctionSpace(mesh, 'CG', 1) one = Constant(1.0, domain=mesh) area = assemble(dot(one,one) * dx) umagav = assemble(sqrt(dot(u, u)) * dx) / area umag = interpolate(sqrt(dot(u, u)), P1) with umag.dat.vec_ro as vumag: umagmax = vumag.max()[1] printpar(' ice speed (m a-1): av = %.3f, max = %.3f' \ % (umagav * secpera, umagmax * secpera)) # generate tensor-valued deviatoric stress tau, and effective viscosity nu, # from the velocity solution def stresses(mesh, u): Du2 = 0.5 * inner(D(u), D(u)) + (args.eps * Dtyp)**2.0 Q1 = FunctionSpace(mesh,'Q',1) TQ1 = TensorFunctionSpace(mesh, 'Q', 1) nu = Function(Q1).interpolate(0.5 * B3 * Du2**((1.0 / n - 1.0)/2.0)) nu.rename('effective viscosity (Pa s)') tau = Function(TQ1).interpolate(2.0 * nu * D(u)) tau /= 1.0e5 tau.rename('tau (bar)') return tau, nu printpar('saving u,p,tau,nu,rank to %s ...' % args.o) u, p = up.split() u *= sc tau, nu = stresses(hierarchy[-1], u) u *= secpera p /= 1.0e5 u.rename('velocity (m/a)') p.rename('pressure (bar)') # integer-valued element-wise process rank rank = Function(FunctionSpace(mesh,'DG',0)) rank.dat.data[:] = mesh.comm.rank rank.rename('rank') File(args.o).write(scu, p, tau, nu, rank)
nilq/baby-python
python
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import dataent def execute(): attach_fields = (dataent.db.sql("""select parent, fieldname from `tabDocField` where fieldtype in ('Attach', 'Attach Image')""") + dataent.db.sql("""select dt, fieldname from `tabCustom Field` where fieldtype in ('Attach', 'Attach Image')""")) for doctype, fieldname in attach_fields: dataent.db.sql("""update `tab{doctype}` set `{fieldname}`=concat("/", `{fieldname}`) where `{fieldname}` like 'files/%'""".format(doctype=doctype, fieldname=fieldname))
nilq/baby-python
python
############################################################### # # ADIABATIC_FLAME - A freely-propagating, premixed flat flame # ############################################################### #import : from cantera import * from matplotlib.pylab import * import numpy #Functions : ################################################################# # Prepare your run ################################################################# #Parameter values : #General p = 101325 # pressure tin = 300.0 # unburned gas temperature phi = 1.0 pref = 'T298-P1_SK' #Initial grids, chosen to be 0.02cm long : # - Refined grid at inlet and outlet, 6 points in x-direction : initial_grid = 2*array([0.0, 0.001, 0.01, 0.02, 0.029, 0.03],'d')/3 # m # - Uniform grid, 6 points in x-direction (import numpy): #initial_grid = 0.02*array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0],'d') # m # - Uniform grid of 300 points using numpy : #initial_grid = numpy.linspace(0,0.02 , 300) #Set tolerance properties tol_ss = [1.0e-5, 1.0e-8] # [rtol atol] for steady-state problem tol_ts = [1.0e-5, 1.0e-8] # [rtol atol] for time stepping loglevel = 1 # amount of diagnostic output (0 # to 5) refine_grid = True # True to enable refinement, False to # disable #Import gas phases with mixture transport model gas = Solution('skeletal.cti','gas') ################# #Stoechiometry : fuel_species = 'C2H4' m=gas.n_species stoich_O2 = gas.n_atoms(fuel_species,'C') + 0.25*gas.n_atoms(fuel_species,'H') air_N2_O2_ratio = 3.76 ifuel = gas.species_index(fuel_species) io2 = gas.species_index('O2') in2 = gas.species_index('N2') x = zeros(m,'d') x[ifuel] = phi x[io2] = stoich_O2 x[in2] = stoich_O2*air_N2_O2_ratio ################# #Assembling objects : #Set gas state to that of the unburned gas gas.TPX = tin, p, x #Create the free laminar premixed flame f = FreeFlame(gas, initial_grid) #f.set_fixed_temperature(650) f.flame.set_steady_tolerances(default=tol_ss) f.flame.set_transient_tolerances(default=tol_ts) f.inlet.X = x f.inlet.T = tin ################################################################# # Program starts here ################################################################# #First flame: #No energy for starters f.energy_enabled = False #Refinement criteria f.set_refine_criteria(ratio = 7.0, slope = 1, curve = 1) #Max number of times the Jacobian will be used before it must be re-evaluated f.set_max_jac_age(50, 50) #Set time steps whenever Newton convergence fails f.set_time_step(5.e-06, [10, 20, 80]) #s #Calculation f.solve(loglevel, refine_grid) ################# #Second flame: #Energy equation enabled f.energy_enabled = True #Refinement criteria when energy equation is enabled f.set_refine_criteria(ratio = 5.0, slope = 0.5, curve = 0.5) #Calculation and save of the results f.solve(loglevel, refine_grid) ################# #Third flame and so on ...: #Refinement criteria should be changed ... f.set_refine_criteria(ratio = 5.0, slope = 0.3, curve = 0.3) f.solve(loglevel, refine_grid) ################# #Third flame and so on ...: #Refinement criteria should be changed ... f.set_refine_criteria(ratio = 3.0, slope = 0.1, curve = 0.1) f.solve(loglevel, refine_grid) ################# f.set_refine_criteria(ratio = 2.0, slope = 0.05, curve = 0.05, prune = 0.01) f.solve(loglevel, refine_grid) #Fourth flame and so on ... f.set_refine_criteria(ratio = 2.0, slope = 0.02, curve = 0.02, prune = 0.01) f.solve(loglevel, refine_grid) print('mixture averaged flamespeed = ',f.u[0]) ################################################################# # Save your results if needed ################################################################# #Write the velocity, temperature, density, and mole fractions to a CSV file f.write_csv('c2h4-'+str(pref)+'.csv', species='Y', quiet=False) #f.save('restore.xml','ch4_adiabatic') #f.write_avbp('Sol-CAN2AV_P-'+str(p)+'-T-'+str(tin)+'-Phi-'+str(phi)+'.csv', quiet=False) stop ################################################################# # Plot your results ################################################################# #Plot the velocity, temperature, density z = f.flame.grid T = f.T u = f.u fig=figure(1) # create first subplot - adiabatic flame temperature a=fig.add_subplot(221) a.plot(z,T) title(r'$T_{adiabatic}$ vs. Position') xlabel(r'Position [m]', fontsize=15) ylabel("Adiabatic Flame Temperature [K]") a.xaxis.set_major_locator(MaxNLocator(10)) # this controls the number of tick marks on the axis # create second subplot - velocity b=fig.add_subplot(222) b.plot(z,u) title(r'Velocity vs. Position') xlabel(r'Position [m]', fontsize=15) ylabel("velocity [m/s]") b.xaxis.set_major_locator(MaxNLocator(10)) # create third subplot - rho c=fig.add_subplot(223) p = zeros(f.flame.n_points,'d') for n in range(f.flame.n_points): f.set_gas_state(n) p[n]= gas.density_mass c.plot(z,p) title(r'Rho vs. Position') xlabel(r'Position [m]', fontsize=15) ylabel("Rho [kg/m^3]") c.xaxis.set_major_locator(MaxNLocator(10)) # create fourth subplot - specie CH4 d=fig.add_subplot(224) ch4 = zeros(f.flame.n_points,'d') for n in range(f.flame.n_points): f.set_gas_state(n) ch4[n]= gas.Y[ifuel] d.plot(z,ch4) title(r'CH4 vs. Position') xlabel(r'Position [m]', fontsize=15) ylabel("CH4 Mole Fraction") d.xaxis.set_major_locator(MaxNLocator(10)) # Set title fig.text(0.5,0.95,r'Adiabatic $CH_{4}$ + Air Free Flame at Phi = 1 Ti = 300K and P = 1atm',fontsize=22,horizontalalignment='center') subplots_adjust(left=0.08, right=0.96, wspace=0.25) show() f.show_stats
nilq/baby-python
python
from mysqlhelper import DBConnection link_bd = DBConnection(user="dacrover_user", password="dacrover_pass", host="itsuki.e", port=3306, database= "dacrover") reminder_target = link_bd.select('reminders', where="`ReminderUser` = 'Тагир'", json=True) if (len(reminder_target) > 0): reminder_target = reminder_target[0] print(reminder_target) print(reminder_target['ReminderDisc']) print(reminder_target['ReminderList'].split('[DEL]')) else: print('Заметок нет')
nilq/baby-python
python
# coding: utf-8 """ flyteidl/service/admin.proto No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: version not set Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from flyteadmin.models.core_blob_type import CoreBlobType # noqa: F401,E501 from flyteadmin.models.core_enum_type import CoreEnumType # noqa: F401,E501 from flyteadmin.models.core_literal_type import CoreLiteralType # noqa: F401,E501 from flyteadmin.models.core_schema_type import CoreSchemaType # noqa: F401,E501 from flyteadmin.models.core_simple_type import CoreSimpleType # noqa: F401,E501 from flyteadmin.models.core_structured_dataset_type import CoreStructuredDatasetType # noqa: F401,E501 from flyteadmin.models.core_type_annotation import CoreTypeAnnotation # noqa: F401,E501 from flyteadmin.models.protobuf_struct import ProtobufStruct # noqa: F401,E501 class CoreLiteralType(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'simple': 'CoreSimpleType', 'schema': 'CoreSchemaType', 'collection_type': 'CoreLiteralType', 'map_value_type': 'CoreLiteralType', 'blob': 'CoreBlobType', 'enum_type': 'CoreEnumType', 'structured_dataset_type': 'CoreStructuredDatasetType', 'metadata': 'ProtobufStruct', 'annotation': 'CoreTypeAnnotation' } attribute_map = { 'simple': 'simple', 'schema': 'schema', 'collection_type': 'collection_type', 'map_value_type': 'map_value_type', 'blob': 'blob', 'enum_type': 'enum_type', 'structured_dataset_type': 'structured_dataset_type', 'metadata': 'metadata', 'annotation': 'annotation' } def __init__(self, simple=None, schema=None, collection_type=None, map_value_type=None, blob=None, enum_type=None, structured_dataset_type=None, metadata=None, annotation=None): # noqa: E501 """CoreLiteralType - a model defined in Swagger""" # noqa: E501 self._simple = None self._schema = None self._collection_type = None self._map_value_type = None self._blob = None self._enum_type = None self._structured_dataset_type = None self._metadata = None self._annotation = None self.discriminator = None if simple is not None: self.simple = simple if schema is not None: self.schema = schema if collection_type is not None: self.collection_type = collection_type if map_value_type is not None: self.map_value_type = map_value_type if blob is not None: self.blob = blob if enum_type is not None: self.enum_type = enum_type if structured_dataset_type is not None: self.structured_dataset_type = structured_dataset_type if metadata is not None: self.metadata = metadata if annotation is not None: self.annotation = annotation @property def simple(self): """Gets the simple of this CoreLiteralType. # noqa: E501 A simple type that can be compared one-to-one with another. # noqa: E501 :return: The simple of this CoreLiteralType. # noqa: E501 :rtype: CoreSimpleType """ return self._simple @simple.setter def simple(self, simple): """Sets the simple of this CoreLiteralType. A simple type that can be compared one-to-one with another. # noqa: E501 :param simple: The simple of this CoreLiteralType. # noqa: E501 :type: CoreSimpleType """ self._simple = simple @property def schema(self): """Gets the schema of this CoreLiteralType. # noqa: E501 A complex type that requires matching of inner fields. # noqa: E501 :return: The schema of this CoreLiteralType. # noqa: E501 :rtype: CoreSchemaType """ return self._schema @schema.setter def schema(self, schema): """Sets the schema of this CoreLiteralType. A complex type that requires matching of inner fields. # noqa: E501 :param schema: The schema of this CoreLiteralType. # noqa: E501 :type: CoreSchemaType """ self._schema = schema @property def collection_type(self): """Gets the collection_type of this CoreLiteralType. # noqa: E501 Defines the type of the value of a collection. Only homogeneous collections are allowed. # noqa: E501 :return: The collection_type of this CoreLiteralType. # noqa: E501 :rtype: CoreLiteralType """ return self._collection_type @collection_type.setter def collection_type(self, collection_type): """Sets the collection_type of this CoreLiteralType. Defines the type of the value of a collection. Only homogeneous collections are allowed. # noqa: E501 :param collection_type: The collection_type of this CoreLiteralType. # noqa: E501 :type: CoreLiteralType """ self._collection_type = collection_type @property def map_value_type(self): """Gets the map_value_type of this CoreLiteralType. # noqa: E501 Defines the type of the value of a map type. The type of the key is always a string. # noqa: E501 :return: The map_value_type of this CoreLiteralType. # noqa: E501 :rtype: CoreLiteralType """ return self._map_value_type @map_value_type.setter def map_value_type(self, map_value_type): """Sets the map_value_type of this CoreLiteralType. Defines the type of the value of a map type. The type of the key is always a string. # noqa: E501 :param map_value_type: The map_value_type of this CoreLiteralType. # noqa: E501 :type: CoreLiteralType """ self._map_value_type = map_value_type @property def blob(self): """Gets the blob of this CoreLiteralType. # noqa: E501 A blob might have specialized implementation details depending on associated metadata. # noqa: E501 :return: The blob of this CoreLiteralType. # noqa: E501 :rtype: CoreBlobType """ return self._blob @blob.setter def blob(self, blob): """Sets the blob of this CoreLiteralType. A blob might have specialized implementation details depending on associated metadata. # noqa: E501 :param blob: The blob of this CoreLiteralType. # noqa: E501 :type: CoreBlobType """ self._blob = blob @property def enum_type(self): """Gets the enum_type of this CoreLiteralType. # noqa: E501 Defines an enum with pre-defined string values. # noqa: E501 :return: The enum_type of this CoreLiteralType. # noqa: E501 :rtype: CoreEnumType """ return self._enum_type @enum_type.setter def enum_type(self, enum_type): """Sets the enum_type of this CoreLiteralType. Defines an enum with pre-defined string values. # noqa: E501 :param enum_type: The enum_type of this CoreLiteralType. # noqa: E501 :type: CoreEnumType """ self._enum_type = enum_type @property def structured_dataset_type(self): """Gets the structured_dataset_type of this CoreLiteralType. # noqa: E501 :return: The structured_dataset_type of this CoreLiteralType. # noqa: E501 :rtype: CoreStructuredDatasetType """ return self._structured_dataset_type @structured_dataset_type.setter def structured_dataset_type(self, structured_dataset_type): """Sets the structured_dataset_type of this CoreLiteralType. :param structured_dataset_type: The structured_dataset_type of this CoreLiteralType. # noqa: E501 :type: CoreStructuredDatasetType """ self._structured_dataset_type = structured_dataset_type @property def metadata(self): """Gets the metadata of this CoreLiteralType. # noqa: E501 This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by consumers to identify special behavior or display extended information for the type. # noqa: E501 :return: The metadata of this CoreLiteralType. # noqa: E501 :rtype: ProtobufStruct """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this CoreLiteralType. This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by consumers to identify special behavior or display extended information for the type. # noqa: E501 :param metadata: The metadata of this CoreLiteralType. # noqa: E501 :type: ProtobufStruct """ self._metadata = metadata @property def annotation(self): """Gets the annotation of this CoreLiteralType. # noqa: E501 This field contains arbitrary data that might have special semantic meaning for the client but does not effect internal flyte behavior. # noqa: E501 :return: The annotation of this CoreLiteralType. # noqa: E501 :rtype: CoreTypeAnnotation """ return self._annotation @annotation.setter def annotation(self, annotation): """Sets the annotation of this CoreLiteralType. This field contains arbitrary data that might have special semantic meaning for the client but does not effect internal flyte behavior. # noqa: E501 :param annotation: The annotation of this CoreLiteralType. # noqa: E501 :type: CoreTypeAnnotation """ self._annotation = annotation def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(CoreLiteralType, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CoreLiteralType): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import from django.contrib import admin from croisee import models class WordInline(admin.TabularInline): model = models.Word class DictionaryAdmin(admin.ModelAdmin): list_display = ('name','language','description','public','owner') list_display_links = ['name',] list_filter = ('owner','language',) list_editable = ('language','description','public') search_fields = ('description','name') #inlines = (WordInline,) # too much ordering = ('name','language') exclude = ('owner',) def save_model(self, request, obj, form, change): obj.owner = request.user obj.save() class WordAdmin(admin.ModelAdmin): list_display = ('word','description','dictionary','priority') list_display_links = ['word',] list_filter = ('dictionary',) list_editable = ('description','priority') search_fields = ('word','description') ordering = ('word',) class WordlistUploadAdmin(admin.ModelAdmin): exclude = ('owner',) def has_change_permission(self, request, obj=None): return False # To remove the 'Save and continue editing' button def save_model(self, request, obj, form, change): obj.owner = request.user obj.save() class PuzzleAdmin(admin.ModelAdmin): list_display = ('code', 'title', 'language', 'owner', 'public', 'width', 'height') list_display_links = ('code',) list_filter = ('public', 'owner', 'language', ) list_editable = ('title', 'public', 'language',) search_fields = ('title', 'text', 'questions') def save_model(self, request, obj, form, change): obj.owner = request.user obj.save() admin.site.register(models.Word, WordAdmin) admin.site.register(models.Dictionary, DictionaryAdmin) admin.site.register(models.WordlistUpload, WordlistUploadAdmin) admin.site.register(models.Puzzle, PuzzleAdmin)
nilq/baby-python
python
############################################################################### # Caleydo - Visualization for Molecular Biology - http://caleydo.org # Copyright (c) The Caleydo Team. All rights reserved. # Licensed under the new BSD license, available at http://caleydo.org/license ############################################################################### from builtins import range from builtins import object class ApplicationProxy(object): """ helper class for different applications defined by a namespace and a loader function """ def __init__(self, namespace, loader): self.namespace = namespace # number of suburls to pop self.peeks = namespace.count('/') self._loader = loader self.app = None def init(self): if self.app is None: self.app = self._loader() return self def match(self, path): # start of a suburl or the whole one return path.startswith(self.namespace + '/') or path == self.namespace class PathDispatcher(object): """ helper class to select an application by path """ def __init__(self, default_app, applications): self.default_app = default_app self.applications = [ApplicationProxy(key, value) for key, value in applications.items()] # print self.applications from threading import Lock self.lock = Lock() def get_application(self, path): with self.lock: for app in self.applications: if app.match(path): return app.init() def __call__(self, environ, start_response): from werkzeug.wsgi import pop_path_info, get_path_info app = self.get_application(get_path_info(environ)) if app is not None: for _ in range(app.peeks): pop_path_info(environ) app = app.app # print get_path_info(environ), app else: # use default app app = self.default_app return app(environ, start_response)
nilq/baby-python
python
import CIM2Matpower # from scipy.io import savemat cim_to_matpower_filename = 'CIM_to_Matpower_import' cimfiles = ['./UCTE10_20090319_modified_EQ.xml', './UCTE10_20090319_modified_TP.xml', './UCTE10_20090319_modified_SV.xml'] boundary_profiles = [] mpc = CIM2Matpower.cim_to_mpc(cimfiles, boundary_profiles) #, 'imported_CIM.log') # savemat(cim_to_matpower_filename+'.mat', mpc)
nilq/baby-python
python
""" Parameterized models of the stellar mass - halo mass relation (SMHM). """ from __future__ import division, print_function from __future__ import absolute_import, unicode_literals import os import numpy as np from astropy.table import Table __all__ = ['behroozi10_ms_to_mh', 'behroozi10_evolution', 'leauthaud12_ms_to_mh', 'moster13_mh_to_ms', 'moster13_ms_mh_ratio', 'moster13_evolution', 'behroozi13_f', 'behroozi13_mh_to_ms', 'behroozi13_evolution', 'behroozi_mh_to_ms_icl', 'puebla15_mh_to_ms', 'vanuitert16_mh_to_ms', 'puebla17_p', 'puebla17_q', 'puebla17_g', 'puebla17_evolution', 'puebla17_mh_to_ms', 'puebla17_ms_to_mh', 'shan17_ms_to_mh', 'tinker17_shmr', 'kravtsov18_m500_to_mbcg', 'kravtsov18_mh_to_ms', 'moster18_mh_to_ms', 'moster18_ms_mh_ratio', 'moster18_evolution', 'small_h_corr', 'imf_corr_to_chab', 'sps_corr_to_bc03', 'm500c_to_m200c'] DATA_DIR = '/Users/song/Dropbox/work/project/hsc_massive/hsc_massive/data/shmr' def behroozi10_ms_to_mh(logms, mh_1=12.35, ms_0=10.72, beta=0.44, delta=0.57, gamma=1.56, redshift=None, **kwargs): """Halo mass from stellar mass based on Behroozi+10. Parameters: mh_1: Characteristic halo mass (log10) ms_0: Characteristic stellar mass (log10) beta: Faint-end power law delta: Massive-end power law gamma: Transition width between faint- and massive-end relations Redshift evolution: When `redshift` is not `None`, will use `behroozi10_evolution` function to get the best-fit parameter at desired redshift. """ if redshift is not None: mh_1, ms_0, beta, delta, gamma = behroozi10_evolution(redshift, **kwargs) mass_ratio = (10.0 ** logms) / (10.0 ** ms_0) term_1 = np.log10(mass_ratio) * beta term_2 = mass_ratio ** delta term_3 = (mass_ratio ** -gamma) + 1.0 return mh_1 + term_1 + (term_2 / term_3) - 0.50 def behroozi10_evolution(redshift, free_mu_kappa=True): """Parameterize the evolution in term of scale factor. Using the best-fit parameters in Behroozi10. The default parameter works for 0 < z < 1, and assume free (mu, kappa) parameters about the shifting of SMF at different redshifts. """ scale_minus_one = -redshift / (1.0 + redshift) if free_mu_kappa: if redshift <= 1.0: """ Free mu, kappa; 0<z<1 mh_1_0=12.35+0.07-0.16, mh_1_a=0.28+0.19-0.97 ms_0_0=10.72+0.22-0.29, ms_0_a=0.55+0.18-0.79 beta_0=0.44+0.04-0.06, beta_a=0.18+0.08-0.34 delta_0=0.57+0.15-0.06, delta_a=0.17+0.42-0.41 gamma_0=1.56+0.12-0.38, gamma_a=2.51+0.15-1.83 """ mh_1_0, mh_1_a = 12.35, 0.28 ms_0_0, ms_0_a = 10.72, 0.55 beta_0, beta_a = 0.44, 0.18 delta_0, delta_a = 0.57, 0.17 gamma_0, gamma_a = 1.56, 2.51 elif redshift > 4.0: raise Exception("# Only works for z < 4.0") else: """ Free mu, kappa; 0.8<z<4.0 mh_1_0=12.27+0.59-0.27, mh_1_a=-0.84+0.87-0.58 ms_0_0=11.09+0.54-0.31, ms_0_a=0.56+0.89-0.44 beta_0=0.65+0.26-0.20, beta_a=0.31+0.38-0.47 delta_0=0.56+1.33-0.29, delta_a=-0.12+0.76-0.50 gamma_0=1.12+7.47-0.36, gamma_a=-0.53+7.87-2.50 """ mh_1_0, mh_1_a = 12.27, -0.84 ms_0_0, ms_0_a = 11.09, 0.56 beta_0, beta_a = 0.65, 0.31 delta_0, delta_a = 0.56, -0.12 gamma_0, gamma_a = 1.12, -0.53 else: if redshift > 1: raise Exception("# Only works for z < 1.0") else: """ mu = kappa = 0; 0<z<1 mh_1_0=12.35+0.02-0.15, mh_1_a=0.30+0.14-1.02 ms_0_0=10.72+0.02-0.12, ms_0_a=0.59+0.15-0.85 beta_0=0.43+0.01-0.05, beta_a=0.18+0.06-0.34 delta_0=0.56+0.14-0.05, delta_a=0.18+0.41-0.42 gamma_0=1.54+0.03-0.40, gamma_a=2.52+0.03-1.89 """ mh_1_0, mh_1_a = 12.35, 0.30 ms_0_0, ms_0_a = 10.72, 0.59 beta_0, beta_a = 0.43, 0.18 delta_0, delta_a = 0.56, 0.18 gamma_0, gamma_a = 1.54, 2.52 mh_1 = mh_1_0 + mh_1_a * scale_minus_one ms_0 = ms_0_0 + ms_0_a * scale_minus_one beta = beta_0 + beta_a * scale_minus_one delta = delta_0 + delta_a * scale_minus_one gamma = gamma_0 + gamma_a * scale_minus_one return mh_1, ms_0, beta, delta, gamma def leauthaud12_ms_to_mh(logms, mh_1=12.520, ms_0=10.916, beta=0.457, delta=0.566, gamma=1.53, redshift=None, sigmod=1): """Halo mass from stellar mass based on Leauthaud+2012.""" if redshift is not None: if 0.22 <= redshift < 0.48: if sigmod == 1: mh_1, ms_0, beta, delta, gamma = 12.520, 10.916, 0.457, 0.566, 1.53 elif sigmod == 2: mh_1, ms_0, beta, delta, gamma = 12.518, 10.917, 0.456, 0.582, 1.48 else: raise Exception("# Wrong sig_mod ! Options are [1, 2]") elif 0.48 <= redshift < 0.74: if sigmod == 1: mh_1, ms_0, beta, delta, gamma = 12.725, 11.038, 0.466, 0.610, 1.95 elif sigmod == 2: mh_1, ms_0, beta, delta, gamma = 12.724, 11.038, 0.466, 0.620, 1.93 else: raise Exception("# Wrong sig_mod ! Options are [1, 2]") elif 0.74 <= redshift < 1.0: if sigmod == 1: mh_1, ms_0, beta, delta, gamma = 12.722, 11.100, 0.470, 0.393, 2.51 elif sigmod == 2: mh_1, ms_0, beta, delta, gamma = 12.726, 11.100, 0.470, 0.470, 2.38 else: raise Exception("# Wrong sig_mod ! Options are [1, 2]") else: raise Exception("# Wrong redshift range ! Should be between [0, 1]") return behroozi10_ms_to_mh(logms, mh_1=mh_1, ms_0=ms_0, beta=beta, delta=delta, gamma=gamma) def moster13_mh_to_ms(logmh, mh_1=11.59, n=0.0351, beta=1.376, gamma=0.608, redshift=None): """Stellar mass from halo mass based on Moster et al. 2013.""" ms_ratio = moster13_ms_mh_ratio(logmh, mh_1=mh_1, n=n, beta=beta, gamma=gamma, redshift=redshift) return logmh + np.log10(ms_ratio) def moster13_ms_mh_ratio(logmh, mh_1=11.59, n=0.0351, beta=1.376, gamma=0.608, redshift=None): """Stellar-to-halo mass ratio based on Moster et al. 2013.""" if redshift is not None: mh_1, n, beta, gamma = moster13_evolution(redshift) print(mh_1, n, beta, gamma) mass_ratio = 10.0 ** logmh / 10.0 ** mh_1 term1 = 2.0 * n term2 = mass_ratio ** -beta term3 = mass_ratio ** gamma return term1 / (term2 + term3) def moster13_evolution(redshift, m10=11.59, m11=1.195, n10=0.0351, n11=-0.0247, beta10=1.376, beta11=-0.826, gamma10=0.608, gamma11=0.329): """Redshift dependent of parameters in Moster et al. 2013 model. Best-fit parameter: M10, M11: 11.59+/-0.236, 1.195+/-0.353 N10, N11: 0.0351+/- 0.0058, -0.0247+/-0.0069 beta10, beta11: 1.376+/-0.153, -0.826+/-0.225 gamma10, gamma11: 0.608+/-0.059, 0.329+/-0.173 """ z_factor = redshift / (1.0 + redshift) mh_1 = m10 + m11 * z_factor n = n10 + n11 * z_factor beta = beta10 + beta11 * z_factor gamma = gamma10 + gamma11 * z_factor return mh_1, n, beta, gamma def behroozi13_f(x, alpha, delta, gamma): """The f(x) function used in Behroozi+13.""" term_1 = -1.0 * np.log10(10.0 ** (alpha * x) + 1.0) term_2 = delta * (np.log10(1.0 + np.exp(x)) ** gamma) / (1.0 + np.exp(10.0 ** -x)) return term_1 + term_2 def behroozi13_mh_to_ms(logmh, mh_1=11.514, epsilon=-1.777, alpha=-1.412, delta=3.508, gamma=0.316, redshift=None, **kwargs): """Stellar mass from halo mass based on Behroozi et al. 2013. Parameters: mh_1: Characteristic halo mass (log10) epsilon: Characteristic stellar mass to halo mass ratio (log10) alpha: Faint-end slope of SMHM relation delta: Strength of subpower law at massive end of SMHM relation gamma: Index of subpower law at massive end of SMHM relation Redshift evolution: When `redshift` is not `None`, will use `behroozi13_evolution` function to get the best-fit parameter at desired redshift. """ if redshift is not None: mh_1, epsilon, alpha, delta, gamma = behroozi15_evolution(redshift, **kwargs) mhalo_ratio = logmh - mh_1 return mh_1 + epsilon + (behroozi13_f(mhalo_ratio, alpha, delta, gamma) - behroozi13_f(0.0, alpha, delta, gamma)) def behroozi13_evolution(redshift): """Parameterize the evolution in term of scale factor. Using the best-fit parameters in Behroozi15. The default parameter works for 0 < z < 1, and assume free (mu, kappa) parameters about the shifting of SMF at different redshifts. """ scale = 1.0 / (1.0 + redshift) scale_minus_one = -redshift / (1.0 + redshift) # mh_1_0 = 11.514 + 0.053 - 0.009 # mh_1_a = -1.793 + 0.315 - 0.330 # mh_1_z = -0.251 + 0.012 - 0.125 mh_1_0, mh_1_a, mh_1_z = 11.514, -1.793, -0.251 # epsilon_0 = -1.777 + 0.133 - 0.146 # epsilon_a = -0.006 + 0.113 - 0.361 # epsilon_z = -0.000 + 0.003 - 0.104 # epsilon_a_2 = -0.119 + 0.061 - 0.012 epsilon_0, epsilon_a = -1.777, -0.006 epsilon_z, epsilon_a_2 = -0.000, -0.119 # alpha_0 = -1.412 + 0.020 - 0.105 # alpha_a = 0.731 + 0.344 - 0.296 alpha_0, alpha_a = -1.412, 0.731 # delta_0 = 3.508 + 0.087 - 0.369 # delta_a = 2.608 + 2.446 - 1.261 # delta_z = -0.043 + 0.958 - 0.071 delta_0, delta_a, delta_z = 3.508, 2.608, -0.043 # gamma_0 = 0.316 + 0.076 - 0.012 # gamma_a = 1.319 + 0.584 - 0.505 # gamma_z = 0.279 + 0.256 - 0.081 gamma_0, gamma_a, gamma_z = 0.316, 1.319, 0.279 nu_a = np.exp(-4.0 * (scale ** 2.0)) mh_1 = mh_1_0 + ((mh_1_a * scale_minus_one) + mh_1_z * redshift) * nu_a epsilon = epsilon_0 + ((epsilon_a * scale_minus_one) + epsilon_z * redshift) + epsilon_a_2 * scale_minus_one alpha = alpha_0 + (alpha_a * scale_minus_one) * nu_a delta = delta_0 + (delta_a * scale_minus_one + delta_z * redshift) * nu_a gamma = gamma_0 + (gamma_a * scale_minus_one + gamma_z * redshift) * nu_a return mh_1, epsilon, alpha, delta, gamma def behroozi_mh_to_ms_icl(loc=DATA_DIR): """SHMR with ICL included. Only for redshift at 0.2, 0.3, 0.4, 0.5. """ b13_icl_z1= Table.read( os.path.join(loc, 'behroozi_2013/smhm_z0.2.dat'), format='ascii') b13_icl_z2= Table.read( os.path.join(loc, 'behroozi_2013/smhm_z0.3.dat'), format='ascii') b13_icl_z3= Table.read( os.path.join(loc, 'behroozi_2013/smhm_z0.4.dat'), format='ascii') b13_icl_z4= Table.read( os.path.join(loc, 'behroozi_2013/smhm_z0.5.dat'), format='ascii') return b13_icl_z1, b13_icl_z2, b13_icl_z3, b13_icl_z4 def puebla15_mh_to_ms(logmh, mh_1=11.367, epsilon=-2.143, alpha=-2.858, delta=6.026, gamma=0.303, kind=None): """Stellar mass from halo mass based on Rodriguez-Puebla et al. 2015. Default results are for red central galaxy. """ if kind == 'red': """ mh_1 = 11.361 +/- 0.100 epsilon = -2.143 +/- 0.086 alpha = -2.858 +/- 0.479 delta = 6.026 +/- 0.544 gamma = 0.303 +/- 0.023 """ mh_1, epsilon, alpha, delta, gamma = 11.361, -2.143, -2.858, 6.026, 0.303 elif kind == 'blue': """ mh_1 = 11.581 +/- 0.034 epsilon = -1.593 +/- 0.042 alpha = -1.500 +/- 0.148 delta = 4.293 +/- 0.271 gamma = 0.396 +/- 0.035 """ mh_1, epsilon, alpha, delta, gamma = 11.581, -1.593, -1.500, 4.293, 0.396 else: raise Exception("# Wrong kind: [red / blue]") return behroozi13_mh_to_ms(logmh, mh_1=mh_1, epsilon=epsilon, alpha=alpha, delta=delta, gamma=gamma) def vanuitert16_mh_to_ms(logmh, mh_1=12.06, ms_0=11.16, beta1=5.4, beta2=0.15, all=False, sat=False): """Stellar mass based on halo mass from van Uitert et al. 2016. logmh_1 logms_0 beta_1 beta_2 All 10.97+0.34-0.25 10.58+0.22-0.15 7.5+3.8-2.7 0.25+0.04-0.06 Cen 12.06+0.72-0.80 11.16+0.40-0.62 5.4+5.3-3.4 0.15+0.31-0.14 Sat 11.70+0.70-0.84 11.22+0.12-0.22 4.5+4.6-2.9 0.05+0.07-0.04 """ if all: mh_1, ms_0, beta1, beta2 = 10.97, 10.58, 7.5, 0.25 if sat: mh_1, ms_0, beta1, beta2 = 11.70, 11.22, 4.5, 0.05 mass_ratio = 10.0 ** logmh / 10.0 ** mh_1 term1 = 10.0 ** ms_0 term2 = mass_ratio ** beta1 / (1.0 + mass_ratio) ** (beta1 - beta2) return np.log10(term1 * term2) def puebla17_p(x, y, z): """The P(x, y, z) function used in Rodriguez-Puebla+17.""" return y * z - (x * z) / (1.0 + z) def puebla17_q(z): """The Q(z) function used in Rodriguez-Puebla+17.""" return np.exp(-4.0 / (1.0 + z) ** 2.0) def puebla17_g(x, alpha, delta, gamma): """The g(x) function used in Behroozi+13.""" term_1 = -np.log10(10.0 ** (-alpha * x) + 1.0) term_2 = delta * (np.log10(1.0 + np.exp(x)) ** gamma) / (1.0 + np.exp(10.0 ** -x)) return term_1 + term_2 def puebla17_evolution(redshift): """Parameterize the evolution in term of scale factor. Using the best-fit parameters in Rodriguez-Puebla+17. """ # mh_1_0 = 11.548 +/- 0.049 # mh_1_1 = -1.297 +/- 0.225 # mh_1_2 = -0.026 +/- 0.043 mh_1_0, mh_1_1, mh_1_2 = 11.548, -1.297, -0.026 # epsilon_0 = -1.758 +/- 0.040 # epsilon_1 = 0.110 +/- 0.166 # epsilon_2 = -0.061 +/- 0.029 # epsilon_3 = -0.023 +/- 0.009 epsilon_0, epsilon_1 = -1.758, 0.110 epsilon_2, epsilon_3 = -0.061, -0.023 # alpha_0 = 1.975 +/- 0.074 # alpha_1 = 0.714 +/- 0.165 # alpha_2 = 0.042 +/- 0.017 alpha_0, alpha_1, alpha_2 = 1.975, 0.714, 0.042 # delta_0 = 3.390 +/- 0.281 # delta_1 = -0.472 +/- 0.899 # detla_2 = -0.931 +/- 0.147 delta_0, delta_1, delta_2 = 3.390, -0.472, -0.931 # gamma_0 = 0.498 +/- 0.044 # gamma_1 = -0.157 +/- 0.122 gamma_0, gamma_1 = 0.498, -0.157 mh_1 = mh_1_0 + puebla17_p(mh_1_1, mh_1_2, redshift) * puebla17_q(redshift) epsilon = epsilon_0 + (puebla17_p(epsilon_1, epsilon_2, redshift) * puebla17_q(redshift) + puebla17_p(epsilon_3, 0.0, redshift)) alpha = alpha_0 + puebla17_p(alpha_1, alpha_2, redshift) * puebla17_q(redshift) delta = delta_0 + puebla17_p(delta_1, delta_2, redshift) * puebla17_q(redshift) gamma = gamma_0 + puebla17_p(gamma_1, 0.0, redshift) * puebla17_q(redshift) return mh_1, epsilon, alpha, delta, gamma def puebla17_mh_to_ms(logmh, mh_1=11.514, epsilon=-1.777, alpha=-1.412, delta=3.508, gamma=0.316, redshift=None, **kwargs): """Stellar mass from halo mass based on Puebla et al. 2017. Parameters: mh_1: Characteristic halo mass (log10) epsilon: Characteristic stellar mass to halo mass ratio (log10) alpha: Faint-end slope of SMHM relation delta: Strength of subpower law at massive end of SMHM relation gamma: Index of subpower law at massive end of SMHM relation Redshift evolution: When `redshift` is not `None`, will use `puebla17_evolution` function to get the best-fit parameter at desired redshift. """ if redshift is not None: mh_1, epsilon, alpha, delta, gamma = puebla17_evolution(redshift, **kwargs) mhalo_ratio = logmh - mh_1 print(mh_1, epsilon, alpha, delta, gamma) return mh_1 + epsilon + (puebla17_g(mhalo_ratio, alpha, delta, gamma) - puebla17_g(0.0, alpha, delta, gamma)) def puebla17_ms_to_mh(logms, mh_1=12.58, ms_0=10.90, beta=0.48, delta=0.29, gamma=1.52, redshift=None): """Halo mass from stellar mass based on Rodriguez-Puebla et al. 2017.""" if redshift is not None: if 0.0 < redshift <= 0.20: mh_1, ms_0, beta, delta, gamma = 12.58, 10.90, 0.48, 0.29, 1.52 elif 0.20 < redshift <= 0.40: mh_1, ms_0, beta, delta, gamma = 12.61, 10.93, 0.48, 0.27, 1.46 elif 0.40 < redshift <= 0.60: mh_1, ms_0, beta, delta, gamma = 12.68, 10.99, 0.48, 0.23, 1.39 elif 0.60 < redshift <= 0.90: mh_1, ms_0, beta, delta, gamma = 12.77, 11.08, 0.50, 0.18, 1.33 elif 0.90 < redshift <= 1.20: mh_1, ms_0, beta, delta, gamma = 12.89, 11.19, 0.51, 0.12, 1.27 elif 1.20 < redshift <= 1.40: mh_1, ms_0, beta, delta, gamma = 13.01, 11.31, 0.53, 0.03, 1.22 elif 1.40 < redshift <= 1.60: mh_1, ms_0, beta, delta, gamma = 13.15, 11.47, 0.54, -0.10, 1.17 elif 1.60 < redshift <= 1.80: mh_1, ms_0, beta, delta, gamma = 13.33, 11.73, 0.55, -0.34, 1.16 else: raise Exception("# Wrong redshift range: [0.0, 1.8]") mass_ratio = (10.0 ** logms) / (10.0 ** ms_0) term_1 = np.log10(mass_ratio) * beta term_2 = mass_ratio ** delta term_3 = (mass_ratio ** -gamma) + 1.0 return mh_1 + term_1 + (term_2 / term_3) - 0.50 def shan17_ms_to_mh(logms, mh_1=12.52, ms_0=10.98, beta=0.47, delta=0.55, gamma=1.43, redshift=None): """Halo mass from stellar mass based on Shan+2017.""" if redshift is not None: if 0.2 <= redshift < 0.4: mh_1, ms_0, beta, delta, gamma = 12.52, 10.98, 0.47, 0.55, 1.43 elif 0.4 <= redshift < 0.6: mh_1, ms_0, beta, delta, gamma = 12.70, 11.11, 0.50, 0.54, 1.72 else: raise Exception("# Wrong redshift range ! Should be between [0, 1]") return behroozi10_ms_to_mh(logms, mh_1=mh_1, ms_0=ms_0, beta=beta, delta=delta, gamma=gamma) def tinker17_shmr(loc=DATA_DIR): """SHMR from Tinker+2017.""" tinker17_mh_to_ms = Table.read( os.path.join(loc, 'tinker_2017/tinker2017_mh_to_ms.txt'), format='ascii') tinker17_ms_to_mh = Table.read( os.path.join(loc, 'tinker_2017/tinker2017_ms_to_mh.txt'), format='ascii') return tinker17_mh_to_ms, tinker17_ms_to_mh def kravtsov18_m500_to_mbcg(m500, a=0.39, b=12.15, with_g13=False, tot=False, sat=False): """BCG stellar mass from halo mass based on Kravtsov+2018. * 9 clusters: Relation Slope Normalization Scatter M*_BCG - M500 0.39+/-0.17 12.15+/-0.08 0.21+/-0.09 M*_Sat - M500 0.87+/-0.15 12.42+/-0.07 0.10+/-0.12 M*_Tot - M500 0.69+/-0.09 12.63+/-0.04 0.09+/-0.05 * 21 clusters (+ Gonzalaz et al. 2013) Relation Slope Normalization Scatter M*_BCG - M500 0.33+/-0.11 12.24+/-0.04 0.17+/-0.03 M*_Sat - M500 0.75+/-0.09 12.52+/-0.03 0.10+/-0.03 M*_Tot - M500 0.59+/-0.08 12.71+/-0.03 0.11+/-0.03 """ m_norm = m500 - 14.5 if not with_g13: if tot: a, b = 0.69, 12.63 elif sat: a, b = 0.87, 12.42 else: if tot: a, b = 0.59, 12.71 elif sat: a, b = 0.75, 12.52 else: a, b = 0.33, 12.24 return a * m_norm + b def kravtsov18_mh_to_ms(logmh, mh_1=11.35, epsilon=-1.642, alpha=-1.779, delta=4.394, gamma=0.547, kind=None, scatter=False): """Central stellar mass from halo mass based on Kravtsov et al. 2018.""" if kind is not None: if kind == '200c': if not scatter: mh_1, epsilon, alpha, delta, gamma = 11.39, -1.618, -1.795, 4.345, 0.619 else: mh_1, epsilon, alpha, delta, gamma = 11.35, -1.642, -1.779, 4.394, 0.547 elif kind == '500c': if not scatter: mh_1, epsilon, alpha, delta, gamma = 11.32, -1.527, -1.856, 4.376, 0.644 else: mh_1, epsilon, alpha, delta, gamma = 11.28, -1.566, -1.835, 4.437, 0.567 elif kind == '200m': if not scatter: mh_1, epsilon, alpha, delta, gamma = 11.45, -1.702, -1.736, 4.273, 0.613 else: mh_1, epsilon, alpha, delta, gamma = 11.41, -1.720, -1.727, 4.305, 0.544 elif kind == 'vir': if not scatter: mh_1, epsilon, alpha, delta, gamma = 11.43, -1.663, -1.750, 4.290, 0.595 else: mh_1, epsilon, alpha, delta, gamma = 11.39, -1.685, -1.740, 4.335, 0.531 else: raise Exception("# Wrong definition of mass: [200c, 500c, 200m, vir]") mhalo_ratio = logmh - mh_1 return mh_1 + epsilon + (behroozi13_f(mhalo_ratio, alpha, delta, gamma) - behroozi13_f(0.0, alpha, delta, gamma)) def moster18_mh_to_ms(logmh, mh_1=11.339, n=0.005, beta=3.344, gamma=0.966, fb=0.156, redshift=None): """Stellar mass from halo mass based on Moster et al. 2018.""" ms_ratio = moster18_ms_mh_ratio(logmh, mh_1=mh_1, n=n, beta=beta, gamma=gamma, redshift=redshift) return logmh + np.log10(fb) + np.log10(ms_ratio) def moster18_ms_mh_ratio(logmh, mh_1=11.339, n=0.005, beta=3.344, gamma=0.966, redshift=None): """Stellar-to-halo mass ratio based on Moster et al. 2013.""" if redshift is not None: mh_1, n, beta, gamma = moster18_evolution(redshift) mass_ratio = 10.0 ** logmh / 10.0 ** mh_1 term1 = 2.0 * n term2 = mass_ratio ** -beta term3 = mass_ratio ** gamma return term1 / (term2 + term3) def moster18_evolution(z, kind='cen'): """Redshift dependent of parameters in Moster et al. 2018 model. Based on the best-fit parameters in Table 8 of Moster et al. 2018: """ if 0.0 <= z < 0.3: if kind == 'cen': mh_1, n, beta, gamma = 11.80, 0.14, 1.75, 0.57 elif kind == 'qe': mh_1, n, beta, gamma = 11.65, 0.17, 1.80, 0.57 elif kind == 'sf': mh_1, n, beta, gamma = 11.75, 0.12, 1.75, 0.57 elif kind == 'all': mh_1, n, beta, gamma = 11.78, 0.15, 1.78, 0.57 else: raise Exception("# Wrong kind: [cen, qe, sf, all]") elif 0.3 <= z < 0.8: if kind == 'cen': mh_1, n, beta, gamma = 11.85, 0.16, 1.70, 0.58 elif kind == 'qe': mh_1, n, beta, gamma = 11.75, 0.19, 1.75, 0.58 elif kind == 'sf': mh_1, n, beta, gamma = 11.80, 0.14, 1.70, 0.58 elif kind == 'all': mh_1, n, beta, gamma = 11.86, 0.18, 1.67, 0.58 else: raise Exception("# Wrong kind: [cen, qe, sf, all]") elif 0.8 <= z < 1.5: if kind == 'cen': mh_1, n, beta, gamma = 11.95, 0.18, 1.60, 0.60 elif kind == 'qe': mh_1, n, beta, gamma = 11.85, 0.21, 1.65, 0.60 elif kind == 'sf': mh_1, n, beta, gamma = 11.90, 0.15, 1.60, 0.60 elif kind == 'all': mh_1, n, beta, gamma = 11.98, 0.19, 1.53, 0.59 else: raise Exception("# Wrong kind: [cen, qe, sf, all]") elif 1.5 <= z < 2.5: if kind == 'cen': mh_1, n, beta, gamma = 12.00, 0.18, 1.55, 0.62 elif kind == 'qe': mh_1, n, beta, gamma = 11.90, 0.21, 1.60, 0.60 elif kind == 'sf': mh_1, n, beta, gamma = 11.95, 0.16, 1.55, 0.62 elif kind == 'all': mh_1, n, beta, gamma = 11.99, 0.19, 1.46, 0.59 else: raise Exception("# Wrong kind: [cen, qe, sf, all]") elif 2.5 <= z < 5.5: if kind == 'cen': mh_1, n, beta, gamma = 12.05, 0.19, 1.50, 0.64 elif kind == 'qe': mh_1, n, beta, gamma = 12.00, 0.21, 1.55, 0.64 elif kind == 'sf': mh_1, n, beta, gamma = 12.05, 0.18, 1.50, 0.64 elif kind == 'all': mh_1, n, beta, gamma = 12.07, 0.20, 1.36, 0.60 else: raise Exception("# Wrong kind: [cen, qe, sf, all]") elif 5.5 <= z <= 8.0: if kind == 'cen': mh_1, n, beta, gamma = 12.10, 0.24, 1.30, 0.64 elif kind == 'qe': mh_1, n, beta, gamma = 12.10, 0.28, 1.30, 0.64 elif kind == 'sf': mh_1, n, beta, gamma = 12.10, 0.24, 1.30, 0.64 elif kind == 'all': mh_1, n, beta, gamma = 12.10, 0.24, 1.30, 0.60 else: raise Exception("# Wrong kind: [cen, qe, sf, all]") else: raise Exception("# Wrong redshift range: 0 < z < 8") return mh_1, n, beta, gamma def small_h_corr(h, h_ref=0.7, mh=False): """Correction factor for small h on stellar or halo mass.""" if mh: return h / h_ref else: return (h / h_ref) ** 2.0 def imf_corr_to_chab(kind='kroupa'): """Correct the stellar mass to Chabrier IMF.""" if kind == 'kroupa': return -0.05 elif kind == 'salpeter': return -0.25 elif kind == 'diet-salpeter': return -0.1 else: raise Exception("# Wrong IMF type: [kroupa, salpeter, diet-salpeter]") def sps_corr_to_bc03(kind='fsps'): """Correct the stellar mass to BC03 SPS model.""" if kind == 'fsps': return -0.05 elif kind == 'bc07' or kind == 'cb07': return 0.13 elif kind == 'pegase': return -0.05 elif kind == 'm05': return 0.2 else: raise Exception('# Wrong SPS type: [fsps, bc07, pegase, m05]') def m500c_to_m200c(): """Convert M500c to M200c based on White 2001.""" return -np.log10(0.72)
nilq/baby-python
python
__all__ = ["Dog", "test1", "name"] class Animal(object): pass class Dog(Animal): pass class Cat(Animal): pass def test1(): print("test1") def test2(): print("test2") def test3(): print("test3") name = "小明" age = "22"
nilq/baby-python
python
from flickr.flickr import search
nilq/baby-python
python
# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ACTION_FAILED_ID = 'f26f181d-7891-4720-b022-b074ec1733ef' ACTION2_FAILED_ID = '02f53bd8-3514-485b-ba60-2722ef09c016' ALREADY_EXISTS_ID = '8f7495fe-5e44-4f33-81af-4b28e9b2952f' ATTACHMENT_ID = '4dc3bb12-ad75-41b9-ab2c-7609e743e600' ATTACHMENT2_ID = 'ac2439fe-c071-468f-94e3-547bedb95de0' BACKUP_ID = '707844eb-6d8a-4ac1-8b98-618e1c0b3a3a' BACKUP2_ID = '40e8462a-c9d8-462f-a810-b732a1790535' BACKUP3_ID = '30ae7641-017e-4221-a642-855687c8bd71' CGSNAPSHOT_ID = '5e34cce3-bc97-46b7-a127-5cfb95ef445d' CGSNAPSHOT_NAME = 'cgsnapshot-5e34cce3-bc97-46b7-a127-5cfb95ef445d' CGSNAPSHOT2_ID = '5c36d762-d6ba-4f04-bd07-88a298cc410a' CGSNAPSHOT3_ID = '5f392156-fc03-492a-9cb8-e46a7eedaf33' CONSISTENCY_GROUP_ID = 'f18abf73-79ee-4f2b-8d4f-1c044148f117' CONSISTENCY_GROUP2_ID = '8afc8952-9dce-4228-9f8a-706c5cb5fc82' ENCRYPTION_KEY_ID = 'e8387001-745d-45d0-9e4e-0473815ef09a' IMAGE_ID = 'e79161cd-5f9d-4007-8823-81a807a64332' INSTANCE_ID = 'fa617131-cdbc-45dc-afff-f21f17ae054e' IN_USE_ID = '8ee42073-4ac2-4099-8c7a-d416630e6aee' INVALID_ID = 'f45dcab0-ff2a-46ec-b3b7-74d6f4bb0027' KEY_ID = '9112ecec-fb9d-4299-a948-ffb52650a5b5' OBJECT_ID = 'd7c5b12f-d57d-4762-99ab-db5f62ae3569' OBJECT2_ID = '51f5b8fa-c13c-48ba-8c9d-b470466cbc9c' OBJECT3_ID = '7bf5ffa9-18a2-4b64-aab4-0798b53ee4e7' PROJECT_ID = '89afd400-b646-4bbc-b12b-c0a4d63e5bd3' PROJECT2_ID = '452ebfbc-55d9-402a-87af-65061916c24b' PROJECT3_ID = 'f6c912d7-bf30-4b12-af81-a9e0b2f85f85' PROVIDER_ID = '60087173-e899-470a-9e3a-ba4cffa3e3e3' PROVIDER2_ID = '1060eccd-64bb-4ed2-86ce-aeaf135a97b8' PROVIDER3_ID = '63736819-1c95-440e-a873-b9d685afede5' PROVIDER4_ID = '7db06e02-26b6-4282-945d-7f6c9347a7b0' QOS_SPEC_ID = 'fc0f7527-79d7-44be-a4f6-3b24db8e11ac' QOS_SPEC2_ID = 'c561b69d-98d9-478c-815b-6de11f5a09c9' QOS_SPEC3_ID = '6034720b-f586-4302-a1eb-fe30672069f6' RAISE_ID = 'a56762e1-4a30-4008-b997-5a438ec9c457' SNAPSHOT_ID = '253b2878-ec60-4793-ad19-e65496ec7aab' SNAPSHOT_NAME = 'snapshot-253b2878-ec60-4793-ad19-e65496ec7aab' SNAPSHOT2_ID = 'c02c44fa-5665-4a26-9e66-2ebaf25e5d2d' SNAPSHOT3_ID = '454f9970-1e05-4193-a3ed-5c390c3faa18' UPDATE_FAILED_ID = '110b29df-5e0f-4dbb-840c-ef5963d06933' USER_ID = 'c853ca26-e8ea-4797-8a52-ee124a013d0e' USER2_ID = '95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df' VOLUME_ID = '1e5177e7-95e5-4a0f-b170-e45f4b469f6a' VOLUME_NAME = 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a' VOLUME2_ID = '43a09914-e495-475f-b862-0bda3c8918e4' VOLUME2_NAME = 'volume-43a09914-e495-475f-b862-0bda3c8918e4' VOLUME3_ID = '1b1cf149-219c-44ac-aee3-13121a7f86a7' VOLUME3_NAME = 'volume-1b1cf149-219c-44ac-aee3-13121a7f86a7' VOLUME4_ID = '904d4602-4301-4e9b-8df1-8133b51904e6' VOLUME4_NAME = 'volume-904d4602-4301-4e9b-8df1-8133b51904e6' VOLUME5_ID = '17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' VOLUME5_NAME = 'volume-17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' VOLUME_NAME_ID = 'ee73d33c-52ed-4cb7-a8a9-2687c1205c22' VOLUME2_NAME_ID = '63fbdd21-03bc-4309-b867-2893848f86af' VOLUME_TYPE_ID = '4e9e6d23-eed0-426d-b90a-28f87a94b6fe' VOLUME_TYPE2_ID = 'c4daaf47-c530-4901-b28e-f5f0a359c4e6' VOLUME_TYPE3_ID = 'a3d55d15-eeb1-4816-ada9-bf82decc09b3' VOLUME_TYPE4_ID = '69943076-754d-4da8-8718-0b0117e9cab1' VOLUME_TYPE5_ID = '1c450d81-8aab-459e-b338-a6569139b835' WILL_NOT_BE_FOUND_ID = 'ce816f65-c5aa-46d6-bd62-5272752d584a'
nilq/baby-python
python
from config import appconfig def uniqueName(base_name): return time.strftime('%Y%m%d%H%M%S', time.localtime()) + base_name def isImageByExtension(image_name): return '.' in image_name and image_name.rsplit('.', 1)[1].lower() in appconfig.IMAGE_EXTENSIONS
nilq/baby-python
python
import parse import logging import click from render import Render logging.basicConfig(level = logging.INFO) @click.command() @click.option('--default', '-d', help='Generate the default blog template') @click.option('--resume','-r', help='Generate a resume template') def build(default, resume): Renderer = Render() if default: try: posts = parse.Post_parser() Renderer.Render_posts(posts) logging.info(" Build successful. Check your output folder.") except: logging.exception(" Build failed :(") elif resume: try: details = parse.Resume_parser() Renderer.Render_resume(details) logging.info(" Build successful. Check your output folder.") except: logging.exception(" Build failed :(") if __name__ == "__main__": build()
nilq/baby-python
python
import asyncio import itertools from decimal import Decimal from typing import Tuple, Union from hq2redis.exceptions import SecurityNotFoundError from hq2redis.reader import get_security_price from motor.motor_asyncio import AsyncIOMotorDatabase from pydantic import ValidationError from pymongo import DeleteOne, UpdateOne from app import state from app.db.cache.position import PositionCache from app.db.cache.user import UserCache from app.db.repositories.position import PositionRepository from app.db.repositories.statement import StatementRepository from app.db.repositories.user import UserRepository from app.exceptions.db import EntityDoesNotExist from app.exceptions.service import ( InsufficientFunds, NoPositionsAvailable, NotEnoughAvailablePositions, ) from app.models.base import get_utc_now from app.models.domain.orders import OrderInDB from app.models.domain.statement import Costs from app.models.enums import OrderTypeEnum, TradeTypeEnum from app.models.schemas.orders import OrderInCreate from app.models.schemas.position import PositionInCache from app.models.schemas.users import UserInCache from app.models.types import PyDecimal, PyObjectId from app.services.engines.base import BaseEngine from app.services.engines.event_constants import ( MARKET_CLOSE_EVENT, POSITION_CREATE_EVENT, POSITION_UPDATE_EVENT, UNFREEZE_EVENT, USER_UPDATE_ASSETS_EVENT, USER_UPDATE_AVAILABLE_CASH_EVENT, USER_UPDATE_EVENT, ) from app.services.engines.event_engine import Event, EventEngine class UserEngine(BaseEngine): """用户引擎. Raises ------ InsufficientFunds 资金不足时触发 NoPositionsAvailable 用户未持有卖单指定的股票时触发 NotEnoughAvailablePositions 用户持仓股票可用数量不够买单指定的数量时触发 """ def __init__( self, event_engine: EventEngine, db: AsyncIOMotorDatabase, ) -> None: super().__init__() self.event_engine = event_engine self.user_repo = UserRepository(db) self.position_repo = PositionRepository(db) self.user_cache = UserCache(state.user_redis_pool) self.position_cache = PositionCache(state.position_redis_pool) self.statement_repo = StatementRepository(db) async def startup(self) -> None: await self.load_db_data_to_redis() await self.register_event() async def shutdown(self) -> None: pass async def load_db_data_to_redis(self) -> None: """加载MongoDB的数据到Redis.""" if await self.user_cache.is_reload: user_list = await self.user_repo.get_user_list_to_cache() await self.user_cache.set_user_many(user_list) position_list = await asyncio.gather( *( self.position_repo.get_positions_by_user_id_to_cache(user.id) for user in user_list ) ) position_in_cache_list = list(itertools.chain.from_iterable(position_list)) if position_in_cache_list: await self.position_cache.set_position_many(position_in_cache_list) async def load_redis_data_to_db(self) -> None: """加载Redis的数据到MongoDB.""" user_list = await self.user_cache.get_all_user() update_user_list = [] update_position_list = [] delete_position_list = [] for user in user_list: update_user_list.append( UpdateOne({"_id": user.id}, {"$set": user.dict(exclude={"id"})}) ) position_list = await self.position_repo.get_positions_by_user_id(user.id) for position in position_list: try: position_in_cache = await self.position_cache.get_position( user.id, position.symbol, position.exchange ) except EntityDoesNotExist: delete_position_list.append(DeleteOne({"_id": position.id})) else: update_position_list.append( UpdateOne( { "user": position.user, "symbol": position.symbol, "exchange": position.exchange, }, {"$set": position_in_cache.dict()}, ) ) if update_user_list: await self.user_repo.bulk_update(update_user_list) if update_position_list: await self.position_repo.bulk_update(update_position_list) if delete_position_list: await self.position_repo.bulk_delete(delete_position_list) async def register_event(self) -> None: await self.event_engine.register( POSITION_CREATE_EVENT, self.process_position_create ) await self.event_engine.register(USER_UPDATE_EVENT, self.process_user_update) await self.event_engine.register( POSITION_UPDATE_EVENT, self.process_position_update ) await self.event_engine.register(MARKET_CLOSE_EVENT, self.process_market_close) await self.event_engine.register(UNFREEZE_EVENT, self.process_unfreeze) await self.event_engine.register( USER_UPDATE_AVAILABLE_CASH_EVENT, self.process_user_update_available_cash ) await self.event_engine.register( USER_UPDATE_ASSETS_EVENT, self.process_user_update_assets ) async def process_user_update(self, payload: UserInCache) -> None: await self.user_cache.update_user(payload) async def process_user_update_available_cash(self, payload: UserInCache) -> None: await self.user_cache.update_user(payload, include={"available_cash"}) async def process_user_update_assets(self, payload: UserInCache) -> None: await self.user_cache.update_user( payload, include={"cash", "securities", "assets", "available_cash"} ) async def process_position_create(self, payload: PositionInCache) -> None: await self.position_cache.set_position(payload) async def process_position_update(self, payload: PositionInCache) -> None: await self.position_cache.update_position(payload) async def process_market_close(self, *args) -> None: await self.write_log("收盘清算开始...") users = await self.user_cache.get_all_user() for user in users: await self.write_log(f"正在清算用户`{user.id}`的数据.", level="DEBUG") await self.liquidate_user_position(user.id, is_update_volume=True) await self.liquidate_user_profit(user.id, is_refresh_frozen_amount=True) await self.write_log("收盘清算结束.") await self.load_redis_data_to_db() async def process_unfreeze(self, payload: OrderInDB) -> None: """解除预先冻结的资金或持仓股票数量.""" if payload.frozen_amount: user = await self.user_cache.get_user_by_id(payload.user) user.available_cash = PyDecimal( payload.frozen_amount.to_decimal() + user.available_cash.to_decimal() ) await self.user_cache.update_user(user, include={"available_cash"}) if payload.frozen_stock_volume: position = await self.position_cache.get_position( user_id=payload.user, symbol=payload.symbol, exchange=payload.exchange ) position.available_volume += payload.frozen_stock_volume await self.position_cache.update_position( position, include={"available_volume"} ) async def pre_trade_validation( self, order: OrderInCreate, user: UserInCache, ) -> Union[PyDecimal, int]: """订单创建前用户相关验证.""" if order.order_type == OrderTypeEnum.BUY: return await self.__capital_validation(order, user) else: return await self.__position_validation(order, user) async def __capital_validation( self, order: OrderInCreate, user: UserInCache, ) -> PyDecimal: """用户资金校验.""" cash_needs = ( Decimal(order.volume) * order.price.to_decimal() * (1 + user.commission.to_decimal()) ) # 若用户现金可以满足订单需求 if user.available_cash.to_decimal() >= cash_needs: user.available_cash = PyDecimal( user.available_cash.to_decimal() - cash_needs ) await self.user_cache.update_user(user, include={"available_cash"}) return cash_needs else: raise InsufficientFunds async def __position_validation( self, order: OrderInCreate, user: UserInCache, ) -> int: """用户持仓检查.""" try: position = await self.position_cache.get_position( user.id, order.symbol, order.exchange ) except EntityDoesNotExist: raise NoPositionsAvailable else: if position.available_volume >= order.volume: position.available_volume -= order.volume await self.position_cache.update_position( position, include={"available_volume"} ) return order.volume raise NotEnoughAvailablePositions async def create_position(self, order: OrderInDB) -> Tuple[Decimal, Costs]: """新建持仓.""" user = await self.user_cache.get_user_by_id(order.user) # 根据交易类别判断持仓股票可用数量 order_available_volume = ( order.traded_volume if order.trade_type == TradeTypeEnum.T0 else 0 ) # 行情 quotes = await get_security_price(order.stock_code) # 订单证券市值 securities_order = Decimal(order.traded_volume) * order.sold_price.to_decimal() # 证券资产变化值 securities_diff = Decimal(order.traded_volume) * quotes.current # 交易佣金 commission = securities_order * user.commission.to_decimal() # 订单交易金额 amount = commission + securities_order order_profit = (quotes.current - order.sold_price.to_decimal()) * Decimal( order.traded_volume ) - commission try: position = await self.position_cache.get_position( order.user, order.symbol, order.exchange ) except EntityDoesNotExist: # 持仓成本 = 总花费 / 交易数量 cost = amount / order.traded_volume # 建仓 new_position = PositionInCache( user=order.user, symbol=order.symbol, exchange=order.exchange, volume=order.traded_volume, available_volume=order_available_volume, cost=PyDecimal(cost), current_price=PyDecimal(quotes.current), profit=order_profit, first_buy_date=get_utc_now(), ) await self.event_engine.put(Event(POSITION_CREATE_EVENT, new_position)) else: volume = position.volume + order.traded_volume # 持仓成本 = (原持仓数 * 原持仓成本) + 总花费 / 持仓总量 cost = ( Decimal(position.volume) * position.cost.to_decimal() + amount ) / volume available_volume = position.available_volume + order_available_volume # 持仓利润 = (现价 - 成本价) * 持仓数量 profit = (quotes.current - cost) * Decimal(volume) position.volume = volume position.available_volume = available_volume position.current_price = quotes.current position.cost = PyDecimal(cost) position.profit = PyDecimal(profit) event = Event(POSITION_UPDATE_EVENT, position) await self.event_engine.put(event) costs = Costs(commission=commission, total=commission, tax="0") await self.update_user(order, amount, securities_diff) return securities_order, costs async def reduce_position(self, order: OrderInDB) -> Tuple[Decimal, Costs]: """减仓.""" position = await self.position_cache.get_position( order.user, order.symbol, order.exchange ) user = await self.user_cache.get_user_by_id(order.user) commission = ( Decimal(order.traded_volume) * order.sold_price.to_decimal() * user.commission.to_decimal() ) tax = ( Decimal(order.traded_volume) * order.sold_price.to_decimal() * user.tax_rate.to_decimal() ) volume = position.volume - order.traded_volume # 行情 quotes = await get_security_price(order.stock_code) # 原持仓成本 old_spent = Decimal(position.volume) * position.cost.to_decimal() # 清仓 if volume == 0: # 持仓成本 = (原总成本 + 佣金 + 税) / 数量 cost = (old_spent + commission + tax) / order.traded_volume # 持仓利润 = (现价 - 成本) * 持仓量 profit = (quotes.current - cost) * order.traded_volume position.volume = 0 position.available_volume = 0 position.current_price = PyDecimal(quotes.current) position.cost = PyDecimal(cost) position.profit = PyDecimal(profit) event = Event(POSITION_UPDATE_EVENT, position) await self.event_engine.put(event) # 减仓 else: # 可用持仓 = 原持仓数 + 冻结的股票数量 - 交易成功的股票数量 available_volume = ( position.available_volume + order.frozen_stock_volume - order.traded_volume ) # 持仓成本 = ((原总成本 + 佣金 + 税) - (订单交易价 * 订单成交数量)) / 剩余数量 cost = ( (old_spent + commission + tax) - (order.sold_price.to_decimal() * Decimal(order.traded_volume)) ) / volume # 持仓利润 = (现价 - 持仓例如) * 持仓数量 profit = (quotes.current - cost) * Decimal(volume) position.volume = volume position.available_volume = available_volume position.current_price = PyDecimal(quotes.current) position.cost = PyDecimal(cost) position.profit = PyDecimal(profit) event = Event(POSITION_UPDATE_EVENT, position) await self.event_engine.put(event) costs = Costs(commission=commission, tax=tax, total=commission + tax) # 证券资产变化值 = 订单证券市值 securities_diff = Decimal(order.traded_volume) * order.sold_price.to_decimal() amount = securities_diff - commission - tax await self.update_user(order, amount, securities_diff) return securities_diff, costs async def update_user( self, order: OrderInDB, amount: Decimal, securities_diff: Decimal ) -> None: """订单成交后更新用户信息.""" user = await self.user_cache.get_user_by_id(order.user) if order.order_type == OrderTypeEnum.BUY: # 现金 = 原现金 - 订单交易金额 cash = user.cash.to_decimal() - amount available_cash = ( user.available_cash.to_decimal() + order.frozen_amount.to_decimal() - amount ) # 证券资产 = 原证券资产 + 证券资产的变化值 securities = user.securities.to_decimal() + securities_diff else: # 可用现金 = 原现金 + 收益 cash = user.cash.to_decimal() + amount available_cash = user.available_cash.to_decimal() + amount # 证券资产 = 原证券资产 - 证券资产的变化值 securities = user.securities.to_decimal() - securities_diff # 总资产 = 现金 + 证券资产 assets = cash + securities user.cash = PyDecimal(cash) user.securities = PyDecimal(securities or "0") user.assets = PyDecimal(assets) user.available_cash = PyDecimal(available_cash) await self.event_engine.put(Event(USER_UPDATE_ASSETS_EVENT, user)) async def liquidate_user_position( self, user_id: PyObjectId, is_update_volume: int = False ) -> None: """清算用户持仓数据.""" position_list = await self.position_cache.get_position_by_user_id( user_id=user_id ) new_position_list = [] for position in position_list: if is_update_volume and position.volume == 0: await self.position_cache.delete_position(position) continue try: security = await get_security_price(position.stock_code) except (SecurityNotFoundError, ValidationError): await self.write_log(f"未找到股票{position.stock_code}的行情信息.", level="DEBUG") continue current_price = security.current position.current_price = PyDecimal(current_price) # 更新可用股票数量 if is_update_volume: position.available_volume = position.volume # 持仓利润 = (现价 - 成本价) * 持仓数量 profit = (current_price - position.cost.to_decimal()) * Decimal( position.volume ) position.profit = PyDecimal(profit) new_position_list.append(position) include = {"current_price", "profit"} if is_update_volume: include.add("available_volume") await self.position_cache.update_position_many( new_position_list, include=include ) async def liquidate_user_profit( self, user_id: PyObjectId, is_refresh_frozen_amount: bool = False ) -> None: """清算用户个人数据.""" user = await self.user_cache.get_user_by_id(user_id) position_list = await self.position_cache.get_position_by_user_id( user_id=user_id ) securities = sum( [ position.current_price.to_decimal() * Decimal(position.volume) for position in position_list ] ) user.assets = PyDecimal(user.cash.to_decimal() + securities) if securities != Decimal(0): user.securities = PyDecimal(securities) include = {"assets", "securities"} if is_refresh_frozen_amount: user.available_cash = user.cash include.add("available_cash") await self.user_cache.update_user(user, include=include)
nilq/baby-python
python
import logging import inspect def logger(filename: str, name: str) -> logging.Logger: """configure task logger """ logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) fh = logging.FileHandler(filename) formatter = logging.Formatter( '%(asctime)s %(name)s %(levelname)s: %(message)s') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger def ctx_message(message: str) -> str: """create an info message using the context function name """ name = inspect.stack()[1][3] return f"fn: {name}, msg: '{message}'"
nilq/baby-python
python
from random import uniform import lepy from PySide2Wrapper.PySide2Wrapper.window import MainWindow from PySide2Wrapper.PySide2Wrapper.widget import OpenGLWidget from PySide2Wrapper.PySide2Wrapper.app import Application class SimpleScene: cells_num = [3, 2] def __init__(self): self.engine = None self.user_camera = None def init(self): self.engine = lepy.Engine() frame = lepy.Frame() scene = lepy.Scene() frame.add_scene(scene) self.engine.add_frame(frame) self.user_camera = lepy.UserMouseCamera(scene.get_camera()) frame.set_background_color(lepy.Vec3(0, 0, 0)) scene.get_camera().pos(lepy.Vec3(0, 0, -3)) scene.get_camera().look_at(lepy.Vec3(0, 0, 0)) min_coord = self.__calc_cur_cell_min_coord(0, 0) for i in range(10): scene.add_object(lepy.BuiltinObjects.point(lepy.Vec3(uniform(0, 1) + min_coord[0], uniform(0, 1) + min_coord[1], uniform(0, 1) - 0.5), lepy.Vec3(0, 1, 0))) min_coord = self.__calc_cur_cell_min_coord(1, 0) for i in range(2): triangle_vertices = [] for j in range(3): triangle_vertices.append(lepy.Vec3(uniform(0, 1) + min_coord[0], uniform(0, 1) + min_coord[1], uniform(0, 1) - 0.5)) scene.add_object(lepy.BuiltinObjects.triangle(triangle_vertices, lepy.Vec3(1, 0, 0))) min_coord = self.__calc_cur_cell_min_coord(0, 1) scene.add_object(lepy.BuiltinObjects.sphere(lepy.Vec3(min_coord[0] + 0.5, min_coord[1] + 0.5, 0.5), 0.5, lepy.Vec3(1, 1, 1), 2)) min_coord = self.__calc_cur_cell_min_coord(1, 1) quad_vertices = [ lepy.Vec3(min_coord[0], min_coord[1], 0), lepy.Vec3(min_coord[0], min_coord[1] + 1, 0), lepy.Vec3(min_coord[0] + 1, min_coord[1] + 1, 0), lepy.Vec3(min_coord[0] + 1, min_coord[1], 0) ] scene.add_object(lepy.BuiltinObjects.quad(quad_vertices, lepy.Vec3(0, 0, 1))) min_coord = self.__calc_cur_cell_min_coord(2, 0) scene.add_object(lepy.BuiltinObjects.box(lepy.Vec3(min_coord[0] + 0.5, min_coord[1] + 0.5, 0), lepy.Vec3(0, 0.5, 0), lepy.Vec3(0.5, 0, 0), lepy.Vec3(0, 0, 0.5), lepy.Vec3(1, 0, 1))) min_coord = self.__calc_cur_cell_min_coord(2, 1) for i in range(5): scene.add_object(lepy.BuiltinObjects.line(lepy.Vec3(uniform(0, 1) + min_coord[0], uniform(0, 1) + min_coord[1], uniform(0, 1) - 0.5), lepy.Vec3(uniform(0, 1) + min_coord[0], uniform(0, 1) + min_coord[1], uniform(0, 1) - 0.5), lepy.Vec3(1, 1, 0))) def resize(self, w, h): self.engine.resize(w, h) def draw(self): self.engine.redraw() def process_mouse_press(self, x, y, is_left, is_pressed): self.user_camera.process_mouse_press(x, y, is_left, is_pressed) def process_cursore_movement(self, x, y): self.user_camera.process_cursore_movement(x, y) def process_wheel(self, scrolls_count): self.user_camera.process_wheel(scrolls_count) def __calc_cur_cell_min_coord(self, cur_cell_x: int, cur_cell_y: int): return [cur_cell_x - self.cells_num[0] / 2, cur_cell_y - self.cells_num[1] / 2] if __name__ == "__main__": simple_scene = SimpleScene() app = Application() gl_widget = OpenGLWidget(simple_scene.init, simple_scene.resize, simple_scene.draw) gl_widget.set_mouse_press_callback(simple_scene.process_mouse_press) gl_widget.set_mouse_move_callback(simple_scene.process_cursore_movement) gl_widget.set_wheel_scroll_event(simple_scene.process_wheel) main_window = MainWindow("Simple Scene Example") main_window.add_widget(gl_widget, need_stretch=False) main_window.resize(800, 700) main_window.move(100, 100) main_window.show() app.run()
nilq/baby-python
python
import unittest import socket import tcp from multiprocessing import Process from time import sleep import os import subprocess import signal ''' test_tcp.py can be run on command line by inputting the following sudo python3 tcp/test_tcp.py NOTE: THE TCP THREAD NEVER TERMINATES BECAUSE THE TCP THREAD IS IN A WHILE LOOP. YOU WILL HAVE TO CTRL-C TO BREAK OUT OF IT. * Because the TCP thread never terminates, it is likely that you can only run 1 test at a time. The top test, test_tcp, is the most important one, so that one will run first. If you want to test a specific test case, comment out the other test cases. test_tcp tests if rootfs file is transferred when requested. If the host machine's rootfs.tgz file is the same size (or close) to the size of the /install/boot/rootfs file, then it passes. Else it fails. the other 3 tests send a message to the TCP thread (IS_UNINSTALLED, IS_INSTALLED, IS_HELPFUL). The first 2 should pass, the last one should fail. ''' SEND_BOOT = b"boot\n" + b"EOM\n" SEND_FORMAT = b"format\n" + b"EOM\n" class tcp_tests(unittest.TestCase): def test_tcp(self): sleep(3) port = 3345 tcp_thread = Process(target=tcp.do_tcp, args=["./install/boot", port, "localhost"], name="tcp") tcp_thread.start() try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sd: sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sleep(3) sd.connect(('localhost', port)) sd.sendall(b'IS_FORMATTED\n') with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sd2: sd2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sd2.connect(('localhost', 4444)) file = open('rootfs.tgz', 'wb') data = sd2.recv(1024) while data: file.write(data) data = sd2.recv(1024) rec_size = os.path.getsize("rootfs.tgz") orig_size =os.path.getsize("./install/boot/rootfs.tgz") difference = abs(orig_size - rec_size) file.close() os.remove("rootfs.tgz") # this was originally used instead of the assertLessEqual commented out here # there seems to be a bug with tcp where the exact file is not transferred # self.assertLessEqual(difference, 5000) tcp_thread.terminate() self.assertEqual(rec_size, orig_size) except KeyboardInterrupt as e: sd2.close() sd.close() file.close() os.remove("rootfs.tgz") tcp_thread.terminate() except OSError as e: sd2.close() sd.close() tcp_thread.terminate() sd2.close() sd.close() def test_installed(self): port = 5001 tcp_thread = Process(target=tcp.do_tcp, args=["./install/boot", port, "localhost"], name="tcpThread1") tcp_thread.daemon = True tcp_thread.start() try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sd: sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sleep(2) sd.connect(('localhost', port)) sd.sendall(b'IS_INSTALLED\n') response = sd.recv(1024) self.assertEqual(response, SEND_BOOT) sd.sendall(b'IS_UNINSTALLED\n') response = sd.recv(1024) self.assertEqual(response, SEND_FORMAT) sd.close() except KeyboardInterrupt as e: sd.close() self.fail("Cancelled before completed") except Exception as e: print(e) sd.close() self.fail("Unexpected exception") tcp_thread.terminate() # def test_uninstalled(self): # sleep(13) # port = 5002 # tcp_thread = Process(target=tcp.do_tcp, args=["./install/boot", port, "localhost"], name="tcpThread2") # tcp_thread.daemon = True # tcp_thread.start() # try: # with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sd: # sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sleep(2) # sd.connect(('localhost', port)) # sd.sendall(b'IS_UNINSTALLED\n') # response = sd.recv(1024) # self.assertEqual(response, SEND_FORMAT) # sd.close() # except KeyboardInterrupt as e: # sd.close() # self.fail("User canceled test before completion") # except Exception as e: # print(e) # sd.close() # self.fail("Unexpected exception") # tcp_thread.terminate() if __name__ == '__main__': unittest.main()
nilq/baby-python
python
""" The MIT License (MIT) Copyright (c) 2015 Zagaran, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. @author: Zags (Benjamin Zagorsky) """ import collections import json from logging import log, WARN from past.builtins import basestring from mongolia.constants import (ID_KEY, CHILD_TEMPLATE, UPDATE, SET, REQUIRED_VALUES, REQUIRED_TYPES, TYPES_TO_CHECK, TEST_DATABASE_NAME) from mongolia.errors import (TemplateDatabaseError, MalformedObjectError, RequiredKeyError, DatabaseConflictError, InvalidKeyError, InvalidTypeError, NonexistentObjectError) from mongolia.json_codecs import MongoliaJSONEncoder, MongoliaJSONDecoder from mongolia.mongo_connection import CONNECTION, AlertLevel class DatabaseObject(dict): """ Represent a MongoDB object as a Python dictionary. PATH is the database path in the form "database.collection"; children classes of DatabaseObject should override this attribute. PATH is what specifies which collection in mongo an item is stored in. PATH SHOULD BE UNIQUE FOR EACH CHILD OF DatabaseObject. IF TWO DatabaseObjects ARE CREATED WITH THE SAME PATH, THEIR DATA WILL BE STORED IN THE SAME COLLECTION. DEFAULTS is a dictionary of default values for keys of the dict; defaults can be functions; REQUIRED is a special value for a key that raises a MalformedObjectError if that key isn't in the dict at save time; ; children classes of DatabaseObject can optionally override this attribute Child Class Example: class User(DatabaseObject): PATH = "application.users" DEFAULTS = { "email": REQUIRED, "password": REQUIRED, "time_created": datetime.now, "name": "anonymous" } __getattr__, __setattr__, and __delattr__ have been overridden to behave as item accessors. This means that you can access elements in the DatabaseObject by either database_object["key"] or database_object.key; database_object["key"] syntax is preferable for use in production code since there is no chance of conflicting with any of the methods attached to the DatabaseObject. For example, if your entry is named "copy", you can only access it by means of database_object["copy"], as database_object.copy gives lookup preference to the .copy() method. Mostly, the ability to use the attribute access is for convenience when interacting with DatabaseObjects in an interactive python shell. """ PATH = None DEFAULTS = {} _exists = True def __init__(self, query=None, path=None, defaults=None, _new_object=None, **kwargs): """ Loads a single database object from path matching query. If nothing matches the query (possibly because there is nothing in the specified mongo collection), the created DatabaseObject will be an empty dictionary and have bool(returned object) == False. If more than one database object matches the query, a DatabaseConflictError is thrown. NOTE: The path and defaults parameters to this function are to allow use of the DatabaseObject class directly. However, this class is intended for subclassing and children of it should override the PATH and DEFAULTS attributes rather than passing them as parameters here. NOTE: if you pass in a single argument to __init__, this will match against ID_KEY. @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object @param defaults: the defaults dictionary to use for this object; pass None to use the DEFAULTS property of the object @param _new_object: internal use only @param **kwargs: used as query parameters if query is None @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these @raise TemplateDatabaseError: if PATH is CHILD_TEMPLATE; this constant is for children classes that are not meant to be used as database accessors themselves, but rather extract common functionality used by DatabaseObjects of various collections @raise DatabaseConflictError: if PATH is CHILD_TEMPLATE; this constant is for children classes that are not meant to be used as database accessors themselves, but rather extract common functionality used by DatabaseObjects of various collections """ if path: dict.__setattr__(self, "PATH", path) if defaults: dict.__setattr__(self, "DEFAULTS", defaults) if self.PATH == CHILD_TEMPLATE: raise TemplateDatabaseError() if _new_object is not None: dict.__init__(self, _new_object) return if query is None and len(kwargs) > 0: query = kwargs if query is not None: if not isinstance(query, collections.Mapping): query = {ID_KEY: query} cursor = self.db(path).find(query) if cursor.count() > 1: raise DatabaseConflictError(('More than one database object ' + 'was found for query "%s"') % (query, )) for result in cursor.limit(-1): dict.__init__(self, result) return dict.__setattr__(self, "_exists", False) @classmethod def exists(cls, query=None, path=None, **kwargs): """ Like __init__ but simply returns a boolean as to whether or not the object exists, rather than returning the whole object. NOTE: if you pass in a single argument to exists, this will match against ID_KEY. @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object @param **kwargs: used as query parameters if query is None @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these """ if query is None and len(kwargs) > 0: query = kwargs if query is None: return False return cls.db(path).find_one(query) is not None @classmethod def create(cls, data, path=None, defaults=None, overwrite=False, random_id=False, **kwargs): """ Creates a new database object and stores it in the database NOTE: The path and defaults parameters to this function are to allow use of the DatabaseObject class directly. However, this class is intended for subclassing and children of it should override the PATH and DEFAULTS attributes rather than passing them as parameters here. @param data: dictionary of data that the object should be created with; this must follow all mongo rules, as well as have an entry for ID_KEY unless random_id == True @param path: the path of the database to use, in the form "database.collection" @param defaults: the defaults dictionary to use for this object @param overwrite: if set to true, will overwrite any object in the database with the same ID_KEY; if set to false will raise an exception if there is another object with the same ID_KEY @param random_id: stores the new object with a random value for ID_KEY; overwrites data[ID_KEY] @param **kwargs: ignored @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these @raise DatabaseConflictError: if there is already an object with that ID_KEY and overwrite == False @raise MalformedObjectError: if a REQUIRED key of defaults is missing, or if the ID_KEY of the object is None and random_id is False """ self = cls(path=path, defaults=defaults, _new_object=data) for key, value in self.items(): if key == ID_KEY: continue if self.DEFAULTS and key not in self.DEFAULTS: self._handle_non_default_key(key, value) self._check_type(key, value) if random_id and ID_KEY in self: dict.__delitem__(self, ID_KEY) if not random_id and ID_KEY not in self: raise MalformedObjectError("No " + ID_KEY + " key in item") if not random_id and not overwrite and self._collection.find_one({ID_KEY: data[ID_KEY]}): raise DatabaseConflictError('ID_KEY "%s" already exists in collection %s' % (data[ID_KEY], self.PATH)) self._pre_save() if ID_KEY in self and overwrite: self._collection.replace_one({ID_KEY: self[ID_KEY]}, dict(self), upsert=True) else: insert_result = self._collection.insert_one(dict(self)) dict.__setitem__(self, ID_KEY, insert_result.inserted_id) return self @classmethod def create_from_json(cls, json_str, ignore_non_defaults=True): """ Creates a database object from a json object. The intent of this method is to allow creating a database object directly from json. Mongolia will also automatically convert any json values that are formatted using the MongoliaJSONEncoder (for ObjectIds and datetime objects) back to their native python data types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for creating the new object @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys of the create json that do not appear in DEFAULTS will also be excluded in creation """ create_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") # Remove all keys not in DEFAULTS if ignore_non_defaults is True if cls.DEFAULTS and ignore_non_defaults: for key in frozenset(create_dict).difference(frozenset(cls.DEFAULTS)): del create_dict[key] cls.create(create_dict, random_id=True) @classmethod def db(cls, path=None): """ Returns a pymongo Collection object from the current database connection. If the database connection is in test mode, collection will be in the test database. @param path: if is None, the PATH attribute of the current class is used; if is not None, this is used instead @raise Exception: if neither cls.PATH or path are valid """ if cls.PATH is None and path is None: raise Exception("No database specified") if path is None: path = cls.PATH if "." not in path: raise Exception(('invalid path "%s"; database paths must be ' + 'of the form "database.collection"') % (path,)) if CONNECTION.test_mode: return CONNECTION.get_connection()[TEST_DATABASE_NAME][path] (db, coll) = path.split('.', 1) return CONNECTION.get_connection()[db][coll] def __getitem__(self, key): if not self._exists: raise NonexistentObjectError("The object does not exist") if key == ID_KEY or key == "ID_KEY": return dict.__getitem__(self, ID_KEY) elif key in self: value = dict.__getitem__(self, key) self._check_type(key, value, warning_only=True) return value try: new = self._get_from_defaults(key) except RequiredKeyError: raise MalformedObjectError("'%s' is a required key of %s" % (key, type(self).__name__)) dict.__setitem__(self, key, new) return new def __setitem__(self, key, value): if not self._exists: raise NonexistentObjectError("The object does not exist") if key == ID_KEY or key == "ID_KEY": # Do not allow setting ID_KEY directly raise KeyError("Do not modify '%s' directly; use rename() instead" % ID_KEY) if not isinstance(key, basestring): raise InvalidKeyError("documents must have only string keys, key was %s" % key) if self.DEFAULTS and key not in self.DEFAULTS: self._handle_non_default_key(key, value) self._check_type(key, value) dict.__setitem__(self, key, value) def __delitem__(self, key): if not self._exists: raise NonexistentObjectError("The object does not exist") if key == ID_KEY or key == "ID_KEY": # Do not allow deleting ID_KEY raise KeyError("Do not delete '%s' directly; use rename() instead" % ID_KEY) if key in self: dict.__delitem__(self, key) def __getattr__(self, key): return self[key] def __setattr__(self, key, val): self[key] = val def __delattr__(self, key): del self[key] def __dir__(self): return sorted(set(dir(type(self)) + self.keys())) iteritems = dict.items @property def _collection(self): return self.db(self.PATH) def _pre_save(self): if not self._exists: raise NonexistentObjectError("The object does not exist") # Fill in missing defaults by invoking __getitem__ for each key in DEFAULTS for key in self.DEFAULTS: try: self[key] except KeyError: pass def save(self): """ Saves the current state of the DatabaseObject to the database. Fills in missing values from defaults before saving. NOTE: The actual operation here is to overwrite the entry in the database with the same ID_KEY. WARNING: While the save operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. The update method is better from a concurrency perspective. @raise MalformedObjectError: if the object does not provide a value for a REQUIRED default """ self._pre_save() self._collection.replace_one({ID_KEY: self[ID_KEY]}, dict(self)) def rename(self, new_id): """ Renames the DatabaseObject to have ID_KEY new_id. This is the only way allowed by DatabaseObject to change the ID_KEY of an object. Trying to modify ID_KEY in the dictionary will raise an exception. @param new_id: the new value for ID_KEY NOTE: This is actually a create and delete. WARNING: If the system fails during a rename, data may be duplicated. """ old_id = dict.__getitem__(self, ID_KEY) dict.__setitem__(self, ID_KEY, new_id) self._collection.save(self) self._collection.remove({ID_KEY: old_id}) def remove(self): """ Deletes the object from the database WARNING: This cannot be undone. Be really careful when deleting programatically. It is recommended to backup your database before applying specific deletes. If your application uses deletes regularly, it is strongly recommended that you have a recurring backup system. """ self._collection.remove({ID_KEY: self[ID_KEY]}) dict.clear(self) def copy(self, new_id=None, attribute_overrides={}): """ Copies the DatabaseObject under the ID_KEY new_id. @param new_id: the value for ID_KEY of the copy; if this is none, creates the new object with a random ID_KEY @param attribute_overrides: dictionary of attribute names -> values that you would like to override with. """ data = dict(self) data.update(attribute_overrides) if new_id is not None: data[ID_KEY] = new_id return self.create(data, path=self.PATH) else: del data[ID_KEY] return self.create(data, random_id=True, path=self.PATH) def update(self, update_dict=None, raw=False, **kwargs): """ Applies updates both to the database object and to the database via the mongo update method with the $set argument. Use the `raw` keyword to perform an arbitrary mongo update query. WARNING: Raw updates do not perform type checking. WARNING: While the update operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. While this is safer from a concurrency perspective than the access pattern load -> modify -> save as it only updates keys specified in the update_dict, it will still overwrite updates to those same keys that were made while the object was held in memory. @param update_dict: dictionary of updates to apply @param raw: if set to True, uses the contents of update_dict directly to perform the update rather than wrapping them in $set. @param **kwargs: used as update_dict if no update_dict is None """ if update_dict is None: update_dict = kwargs if raw: self._collection.update_one({ID_KEY: self[ID_KEY]}, update_dict) new_data = self._collection.find_one({ID_KEY: self[ID_KEY]}) dict.clear(self) dict.update(self, new_data) else: for key, value in update_dict.items(): self._check_type(key, value) dict.update(self, update_dict) self._collection.update_one({ID_KEY: self[ID_KEY]}, {SET: update_dict}) def to_json(self): """ Returns the json string of the database object in utf-8. Note: ObjectId and datetime.datetime objects are custom-serialized using the MongoliaJSONEncoder because they are not natively json- serializable. """ return json.dumps(self, cls=MongoliaJSONEncoder, encoding="utf-8") def json_update(self, json_str, exclude=[], ignore_non_defaults=True): """ Updates a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param exclude: a list of top-level keys to exclude from the update (ID_KEY need not be included in this list; it is automatically deleted since it can't be part of a mongo update operation) @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys in the update json that do not appear in DEFAULTS will also be excluded from the update """ update_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") # Remove ID_KEY since it can't be part of a mongo update operation if ID_KEY in update_dict: del update_dict[ID_KEY] # Remove all keys in the exclude list from the update for key in frozenset(exclude).intersection(frozenset(update_dict)): del update_dict[key] # Remove all keys not in DEFAULTS if ignore_non_defaults is True if self.DEFAULTS and ignore_non_defaults: for key in frozenset(update_dict).difference(frozenset(self.DEFAULTS)): del update_dict[key] self.update(update_dict) def json_update_fields(self, json_str, fields_to_update): """ Updates the specified fields of a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update for specific fields of the object. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param fields_to_update: a list of the top-level keys to update; only keys included in this list will be update. Do not include ID_KEY in this list since it can't be part of a mongo update operation """ update_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") update_dict = dict((k, v) for k, v in update_dict.items() if k in fields_to_update and k != ID_KEY) self.update(update_dict) def _get_from_defaults(self, key): # If a KeyError is raised here, it is because the key is found in # neither the database object nor the DEFAULTS if self.DEFAULTS[key] in REQUIRED_VALUES: raise RequiredKeyError(key) if self.DEFAULTS[key] == UPDATE: raise KeyError(key) try: # Try DEFAULTS as a function default = self.DEFAULTS[key]() except TypeError: # If it fails, treat DEFAULTS entry as a value default = self.DEFAULTS[key] # If default is a dict or a list, make a copy to avoid passing by reference if isinstance(default, list): default = list(default) if isinstance(default, dict): default = dict(default) return default def _handle_non_default_key(self, key, value): # There is an attempt to set a key not in DEFAULTS if CONNECTION.defaults_handling == AlertLevel.error: raise InvalidKeyError("%s not in DEFAULTS for %s" % (key, type(self).__name__)) elif CONNECTION.defaults_handling == AlertLevel.warning: log(WARN, "%s not in DEFAULTS for %s" % (key, type(self).__name__)) def _check_type(self, key, value, warning_only=False): # Check the type of the object against the type in DEFAULTS if not self.DEFAULTS or key not in self.DEFAULTS: # If the key is not in defaults, there is nothing to compare to return default = self.DEFAULTS[key] if default in list(REQUIRED_TYPES.keys()) and not isinstance(value, REQUIRED_TYPES[default]): # Check types of required fields regardless of alert settings message = ("value '%s' for key '%s' must be of type %s" % (value, key, REQUIRED_TYPES[default])) if warning_only: log(WARN, message) return raise InvalidTypeError(message) if default in REQUIRED_VALUES or default == UPDATE: # Handle special keys, including a REQUIRED_TYPE default # (which was checked above) return if CONNECTION.type_checking == AlertLevel.none: # Shortcut return if type checking is disabled return type_ = DatabaseObject._get_type(default) if type_ is None or isinstance(value, type_): # The key either matches the type of the default or the default is # not one of the types we check; everything is good return # If we've gotten here, there is a type mismatch: warn or error message = ("value '%s' for key '%s' must be of type %s" % (value, key, type_)) if CONNECTION.type_checking == AlertLevel.error: if warning_only: log(WARN, message) return raise InvalidTypeError(message) elif CONNECTION.type_checking == AlertLevel.warning: log(WARN, message) @staticmethod def _get_type(default): for type_ in TYPES_TO_CHECK: if isinstance(default, type_): return type_ return None
nilq/baby-python
python
from __future__ import print_function import os import random import sys import time import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.nn.parameter import Parameter import numpy as np import matplotlib import matplotlib.pyplot as plt matplotlib.style.use('ggplot') import pandas as pd class VBDBase(nn.Module): def __init__(self, dim_input, dim_output, thresh=0, ard_init=1, anneal=1.05, anneal_max=100, rw_max=20, name=None): super(VBDBase, self).__init__() self.dim_input = dim_input self.dim_output = dim_output self.logit_p = Parameter(torch.Tensor(dim_input, dim_output)) self.logit_p.data.fill_(ard_init) self.thresh = thresh self.ard_init = ard_init self.anneal = anneal self.anneal_max = anneal_max self.rw_max = rw_max self.reached_max = False self.optimizer = None if name is None: self.hash = ''.join([chr(random.randint(97, 122)) for _ in range(3)]) else: self.hash = name @staticmethod def clip(mtx, to=5): mtx.data[mtx.data > to] = to mtx.data[mtx.data < -to] = -to return mtx def anneal_policy(self, epoch): if self.reached_max: return self.anneal_max anneal_val = self.anneal ** epoch if anneal_val > self.anneal_max: self.reached_max = True return self.anneal_max return anneal_val def sgvloss(self, outputs, targets, rw, num_samples): raise NotImplementedError def eval_reg(self): raise NotImplementedError def rw_policy(self, epoch): if epoch > self.rw_max: return 1. return epoch * 1.0 / self.rw_max def get_sparsity(self, **kwargs): return '%.3f(threshold %.1f)' % ((self.logit_p.data < self.thresh).sum() * 1.0 / torch.numel(self.logit_p.data), self.thresh) def get_alpha_range(self): logit_p = self.clip(self.logit_p) return '%.2f, %.2f' % (logit_p.data.min(), logit_p.data.max()) def eval_criteria(self, outputs, targets): raise NotImplementedError def get_val_criteria(self, loader, cuda=False): print_statistics = [0.] for i, data in enumerate(loader): # get the inputs inputs, targets = data inputs = Variable(inputs) targets = Variable(targets) if cuda: inputs = inputs.cuda(async=True) targets = targets.cuda(async=True) outputs = self.forward(inputs, testing=True) acc = self.eval_criteria(outputs, targets) print_statistics[0] += acc return print_statistics[0] * 1.0 / loader.dataset.data_tensor.size(0) def fit(self, data_loader, valloader, testloader=None, stochastic=False, max_iter=1000, batch_print=10, epoch_print=1, weight_lr=1e-3, logitp_lr=1e-3, pretrain=False, train_clip=False, lookahead=10, time_budget=None, lr_patience=10, save_freq=None, cuda=False, decrease_logitp_lr=True): if cuda: self.cuda() if pretrain: logitp_lr = 0. other_params = [p for name, p in self.named_parameters() if name != 'logit_p'] if self.optimizer is None: self.optimizer = optim.Adam([{'params': other_params}, {'params': [self.logit_p], 'lr': logitp_lr}], lr=weight_lr) else: self.optimizer.param_groups[0]['lr'] = weight_lr self.optimizer.param_groups[1]['lr'] = logitp_lr def reduce_lr(ratio=3., min_lr=5E-6): for i, param_group in enumerate(self.optimizer.param_groups): if not decrease_logitp_lr and i == 1: continue old_lr = float(param_group['lr']) new_lr = old_lr / ratio if new_lr < min_lr: new_lr = min_lr param_group['lr'] = new_lr start_time = time.time() min_val_loss = np.inf min_epoch = 0 lr_counter = lr_patience N = data_loader.dataset.data_tensor.size(0) val_loss = [] train_pred_loss = [] train_reg_loss = [] for epoch in range(max_iter): print_statistics = [0., 0., 0., 0.] epoch_st_time = time.time() total_batch = len(data_loader) num = 0 for batch_idx, data in enumerate(data_loader): # get the inputs inputs, targets = data inputs = Variable(inputs) targets = Variable(targets) if cuda: inputs = inputs.cuda(async=True) targets = targets.cuda(async=True) # zero the parameter gradients self.optimizer.zero_grad() outputs = self.forward(inputs, epoch=epoch, stochastic=stochastic, testing=pretrain, train_clip=train_clip) sgv_loss, pred_loss, reg_loss = self.sgvloss(outputs, targets, rw=self.rw_policy(epoch), num_samples=N) the_loss = pred_loss if pretrain else sgv_loss the_loss.backward() self.optimizer.step() acc = self.eval_criteria(outputs, targets) print_statistics[0] += sgv_loss.data[0] print_statistics[2] += pred_loss.data[0] print_statistics[3] += reg_loss.data[0] print_statistics[1] += acc num += inputs.size(0) if batch_idx % batch_print == (batch_print - 1): print('epoch %d [%d / %d]: loss %.5f (%.5f, %.5f)' % \ (epoch, batch_idx, total_batch, print_statistics[0] / num, print_statistics[2] / num, print_statistics[3] / num)) val_criteria = self.get_val_criteria(valloader, cuda=cuda) if epoch % epoch_print == (epoch_print - 1): print('epoch: %d, val: %.3f, train: %.3f, loss: %.5f (%.5f, %.5f), ' \ 'sparsity: %s, range: %s, (%.1f secs)' % \ (epoch, val_criteria, print_statistics[1] / N, print_statistics[0] / N, print_statistics[2] / N, print_statistics[3] / N, self.get_sparsity(), self.get_alpha_range(), time.time() - epoch_st_time)) val_loss.append(val_criteria) train_pred_loss.append(print_statistics[2] / N) train_reg_loss.append(print_statistics[3] / N) if min_val_loss > val_criteria: min_val_loss = val_criteria min_epoch = epoch lr_counter = lr_patience else: if epoch - min_epoch > lookahead: break lr_counter -= 1 if lr_counter == 0: print('reduce learning rate!') reduce_lr() lr_counter = lr_patience if save_freq is not None and epoch % save_freq == (save_freq - 1): self.save_net(epoch) if time_budget is not None and time.time() - start_time > time_budget: print('Exceeds time budget %d seconds! Exit training.' % time_budget) break print('Finished Training') if pretrain: return test_loss = None if testloader is not None: print('Evaluating the test log likelihood...') test_loss = self.get_val_criteria(testloader, cuda=cuda) print('test llk: %.3f, sparsity: %s' % (test_loss, self.get_sparsity())) if save_freq is not None: self.save_net(epoch) self.record_final_result(**locals()) return def save_net(self, epoch=1): if not os.path.exists('model'): os.mkdir('model') fname = sys.argv[0].split('/')[-1][:-3] folder_name = 'model/%s-%s-%s' % (self.__class__.__name__, self.hash, fname) if not os.path.exists(folder_name): os.mkdir(folder_name) name = '%s/%s' % (folder_name, epoch) print(('save model: ' + name)) torch.save(self, name) def record_final_result(real_self, **kwargs): if not os.path.exists('results'): os.mkdir('results') fname = sys.argv[0].split('/')[-1][:-3] folder = 'results/%s-%s-%s' % (real_self.__class__.__name__, real_self.hash, fname) if not os.path.exists(folder): os.mkdir(folder) # Save Excel files filename = '%s/exp.tsv' % folder # Exp settings run_headers = ['lookahead', 'lr_patience', 'weight_lr', 'logitp_lr', 'decrease_logitp_lr', 'stochastic'] run_values = [str(kwargs[h]) for h in run_headers] net_headers = ['ard_init', 'anneal', 'anneal_max', 'rw_max'] net_values = [str(getattr(real_self, h)) for h in net_headers] # Exp result exp_headers = ['name', 'range', 'sparsity', 'test_loss', 'min_val_loss', 'min_train_pred', 'epoch'] exp_vals = [str(real_self.hash), real_self.get_alpha_range(), real_self.get_sparsity(), str(kwargs['test_loss'])] + \ [str(min(kwargs[h])) for h in ['val_loss', 'train_pred_loss']] + \ [str(kwargs['epoch'])] # Custom settings custum_header, custom_vals = [], [] if hasattr(real_self, 'get_custom_settings'): custum_header, custom_vals = real_self.get_custom_settings() with open(filename, 'w') as op: print('\t'.join(exp_headers + run_headers + custum_header + net_headers), file=op) print('\t'.join(exp_vals + run_values + custom_vals + net_values), file=op) print('save exp:', filename) # Save Figs: filename = '%s/loss.png' % folder data = { 'val_loss': kwargs['val_loss'], 'train_pred_loss': kwargs['train_pred_loss'], 'train_reg_loss': kwargs['train_reg_loss'], } df = pd.DataFrame.from_dict(data) ax = df.plot() ax.set_xlabel('epochs') ax.set_ylabel('NLL Loss (nat)') plt.savefig(filename) print('save figure:', filename)
nilq/baby-python
python
# coding: utf-8 """ Xero Payroll UK This is the Xero Payroll API for orgs in the UK region. # noqa: E501 OpenAPI spec version: 2.4.0 Contact: api@xero.com Generated by: https://openapi-generator.tech """ import re # noqa: F401 from xero_python.models import BaseModel class LeavePeriod(BaseModel): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { "period_start_date": "date", "period_end_date": "date", "number_of_units": "float", "period_status": "str", } attribute_map = { "period_start_date": "periodStartDate", "period_end_date": "periodEndDate", "number_of_units": "numberOfUnits", "period_status": "periodStatus", } def __init__( self, period_start_date=None, period_end_date=None, number_of_units=None, period_status=None, ): # noqa: E501 """LeavePeriod - a model defined in OpenAPI""" # noqa: E501 self._period_start_date = None self._period_end_date = None self._number_of_units = None self._period_status = None self.discriminator = None if period_start_date is not None: self.period_start_date = period_start_date if period_end_date is not None: self.period_end_date = period_end_date if number_of_units is not None: self.number_of_units = number_of_units if period_status is not None: self.period_status = period_status @property def period_start_date(self): """Gets the period_start_date of this LeavePeriod. # noqa: E501 The Pay Period Start Date (YYYY-MM-DD) # noqa: E501 :return: The period_start_date of this LeavePeriod. # noqa: E501 :rtype: date """ return self._period_start_date @period_start_date.setter def period_start_date(self, period_start_date): """Sets the period_start_date of this LeavePeriod. The Pay Period Start Date (YYYY-MM-DD) # noqa: E501 :param period_start_date: The period_start_date of this LeavePeriod. # noqa: E501 :type: date """ self._period_start_date = period_start_date @property def period_end_date(self): """Gets the period_end_date of this LeavePeriod. # noqa: E501 The Pay Period End Date (YYYY-MM-DD) # noqa: E501 :return: The period_end_date of this LeavePeriod. # noqa: E501 :rtype: date """ return self._period_end_date @period_end_date.setter def period_end_date(self, period_end_date): """Sets the period_end_date of this LeavePeriod. The Pay Period End Date (YYYY-MM-DD) # noqa: E501 :param period_end_date: The period_end_date of this LeavePeriod. # noqa: E501 :type: date """ self._period_end_date = period_end_date @property def number_of_units(self): """Gets the number_of_units of this LeavePeriod. # noqa: E501 The Number of Units for the leave # noqa: E501 :return: The number_of_units of this LeavePeriod. # noqa: E501 :rtype: float """ return self._number_of_units @number_of_units.setter def number_of_units(self, number_of_units): """Sets the number_of_units of this LeavePeriod. The Number of Units for the leave # noqa: E501 :param number_of_units: The number_of_units of this LeavePeriod. # noqa: E501 :type: float """ self._number_of_units = number_of_units @property def period_status(self): """Gets the period_status of this LeavePeriod. # noqa: E501 Period Status # noqa: E501 :return: The period_status of this LeavePeriod. # noqa: E501 :rtype: str """ return self._period_status @period_status.setter def period_status(self, period_status): """Sets the period_status of this LeavePeriod. Period Status # noqa: E501 :param period_status: The period_status of this LeavePeriod. # noqa: E501 :type: str """ allowed_values = ["Approved", "Completed", "None"] # noqa: E501 if period_status not in allowed_values: raise ValueError( "Invalid value for `period_status` ({0}), must be one of {1}".format( # noqa: E501 period_status, allowed_values ) ) self._period_status = period_status
nilq/baby-python
python
import time import os import sys import getopt import pytz import re import time from datetime import datetime from dotenv import load_dotenv from monitoring import Monitoring from untils import toBytes load_dotenv() class DockerMonitoring(Monitoring): def __init__(self, database, settings): super().__init__(database) self.settings = settings self.nb_containers = 0 self.net_os = [] self.net_is = [] self.cpus = [] self.names = [] self.memories = [] self.avg_cpu = 0.00 self.avg_mem = 0.00 self.avg_net_i = 0.00 self.avg_net_o = 0.00 def check_pattern(self, container): for target in self.settings.targets: pattern = re.compile(r".*%s.*" % target) if pattern.match(container): return True False def get_names(self): names = [] with os.popen("sudo docker stats --no-stream") as f: for s in f.readlines(): ss = s.split() if self.check_pattern(ss[1]): names.append(ss[1].replace("example.com", "")) return names def get_measurements(self): with os.popen("docker stats --no-stream") as f: for s in f.readlines()[1:]: ss = s.split() if len(ss) >= 3 and self.check_pattern(ss[1]): name = ss[1].replace("example.com", "") self.names.append(name) cu = float(ss[2].replace("%", "")) self.cpus.append(cu) mem = float(ss[6].replace("%", "")) self.memories.append(mem) net_i = toBytes(ss[7]) net_o = toBytes(ss[9]) if net_o is None: net_o = 0 if net_i is None: net_i = 0 self.net_is.append(net_i) self.net_os.append(net_o) print("INFO: container %s: cpu %.2f%%, mem %.2f%%, net_i %d B, net_o %d B" % ( name, cu, mem, net_i, net_o)) num = len(self.cpus) self.avg_cpu = sum(self.cpus) / num if num > 0 else -1 self.avg_mem = sum(self.memories) / num if num > 0 else -1 self.avg_net_i = sum(self.net_is) / num if num > 0 else -1 self.avg_net_o = sum(self.net_os) / num if num > 0 else -1 data = { "time": datetime.now(self.settings.timezone), "avgCPU": self.avg_cpu, "avgMEM": self.avg_mem, "avgNetI": self.avg_net_i, "avgNetO": self.avg_net_o, "containers": [] } for i in range(len(self.names)): data["containers"].append({ "name": self.names[i], "cpu": self.cpus[i], "mem": self.memories[i], "netI": self.net_is[i], "netO": self.net_os[i] }) self.database_insertion(data) self.writeToFile(num, self.avg_cpu, self.avg_mem, self.avg_net_i, self.avg_net_o, self.cpus, self.memories, self.net_is, self.net_os) def writeToFile(self, num, avg_cpu, avg_mem, avg_net_i, avg_net_o, cpus, memories, net_is, net_os): log_file = open(self.settings.log_monitor_file, "a") log_file.write("%s,%d,%.2f,%.2f,%d,%d,%s\n" % (datetime.now().strftime("%H:%M:%S"), num, avg_cpu, avg_mem, avg_net_i, avg_net_o, ",".join("%.2f,%.2f,%.3f,%.3f" % ( cpus[i], memories[i], net_is[i], net_os[i]) for i in range(num)))) def writeNamesToFile(self): log_file = open(self.settings.log_monitor_file, "w") names = self.get_names() headline = "Time,Num,AvgCPU,AvgMEM,AvgNetI,AvgNetO," for name in names: headline += name + "-CPU" + "," + name + "-mem" + "," + name + "-netI" + "," + name + "-netO" + "," headline = headline[:-1] headline += "\n" log_file.write(headline) log_file.close()
nilq/baby-python
python
# -*- coding: utf-8 -*- """Example 0 (no style, no lint, no documentation). First version of the example code (slide 10a), prior to applying any tool. """ def Calculate(A, B= {}, print = True): if A == None: if print: print('error: A is not valid') return elif A != None: if print: print('calculating ...', \ "Using ", A) C = {} C['orig'] = A #C['comp'] = A*2?????? C['comp'] = A *3.21868 return C
nilq/baby-python
python
import numpy as np cimport numpy as np cimport cython from .utils import fillna, to_ndarray from .c_utils cimport c_min, c_sum, c_sum_axis_0, c_sum_axis_1 cpdef ChiMerge(feature, target, n_bins = None, min_samples = None, min_threshold = None, nan = -1, balance = True): """Chi-Merge Args: feature (array-like): feature to be merged target (array-like): a array of target classes n_bins (int): n bins will be merged into min_samples (number): min sample in each group, if float, it will be the percentage of samples min_threshold (number): min threshold of chi-square Returns: array: array of split points """ # set default break condition if n_bins is None and min_samples is None and min_threshold is None: n_bins = DEFAULT_BINS if min_samples and min_samples < 1: min_samples = len(feature) * min_samples feature = fillna(feature, by = nan) target = to_ndarray(target) target_unique = np.unique(target) feature_unique = np.unique(feature) len_f = len(feature_unique) len_t = len(target_unique) cdef double [:,:] grouped = np.zeros((len_f, len_t), dtype=np.float) for r in range(len_f): tmp = target[feature == feature_unique[r]] for c in range(len_t): grouped[r, c] = (tmp == target_unique[c]).sum() cdef double [:,:] couple cdef double [:] cols, rows, chi_list # cdef long [:] min_ix, drop_ix # cdef long[:] chi_ix cdef double chi, chi_min, total, e cdef int l, retain_ix, ix cdef Py_ssize_t i, j, k, p while(True): # break loop when reach n_bins if n_bins and len(grouped) <= n_bins: break # break loop if min samples of groups is greater than threshold if min_samples and c_min(c_sum_axis_1(grouped)) > min_samples: break # Calc chi square for each group l = len(grouped) - 1 chi_list = np.zeros(l, dtype=np.float) chi_min = np.inf # chi_ix = [] for i in range(l): chi = 0 couple = grouped[i:i+2,:] total = c_sum(couple) cols = c_sum_axis_0(couple) rows = c_sum_axis_1(couple) for j in range(couple.shape[0]): for k in range(couple.shape[1]): e = rows[j] * cols[k] / total if e != 0: chi += (couple[j, k] - e) ** 2 / e # balance weight of chi if balance: chi *= total chi_list[i] = chi if chi == chi_min: chi_ix.append(i) continue if chi < chi_min: chi_min = chi chi_ix = [i] # break loop when the minimun chi greater the threshold if min_threshold and chi_min > min_threshold: break # get indexes of the groups who has the minimun chi min_ix = np.array(chi_ix) # min_ix = np.where(chi_list == chi_min)[0] # get the indexes witch needs to drop drop_ix = min_ix + 1 # combine groups by indexes retain_ix = min_ix[0] last_ix = retain_ix for ix in min_ix: # set a new group if ix - last_ix > 1: retain_ix = ix # combine all contiguous indexes into one group for p in range(grouped.shape[1]): grouped[retain_ix, p] = grouped[retain_ix, p] + grouped[ix + 1, p] last_ix = ix # drop binned groups grouped = np.delete(grouped, drop_ix, axis = 0) feature_unique = np.delete(feature_unique, drop_ix) return feature_unique[1:]
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayCommerceTransportEtcInfoModifyModel(object): def __init__(self): self._biz_agreement_no = None self._card_no = None self._device_no = None self._order_id = None self._out_biz_no = None self._plate_color = None self._plate_no = None self._user_id = None self._vi_ac = None self._vi_gross_mass = None self._vi_height = None self._vi_length = None self._vi_owner_name = None self._vi_width = None @property def biz_agreement_no(self): return self._biz_agreement_no @biz_agreement_no.setter def biz_agreement_no(self, value): self._biz_agreement_no = value @property def card_no(self): return self._card_no @card_no.setter def card_no(self, value): self._card_no = value @property def device_no(self): return self._device_no @device_no.setter def device_no(self, value): self._device_no = value @property def order_id(self): return self._order_id @order_id.setter def order_id(self, value): self._order_id = value @property def out_biz_no(self): return self._out_biz_no @out_biz_no.setter def out_biz_no(self, value): self._out_biz_no = value @property def plate_color(self): return self._plate_color @plate_color.setter def plate_color(self, value): self._plate_color = value @property def plate_no(self): return self._plate_no @plate_no.setter def plate_no(self, value): self._plate_no = value @property def user_id(self): return self._user_id @user_id.setter def user_id(self, value): self._user_id = value @property def vi_ac(self): return self._vi_ac @vi_ac.setter def vi_ac(self, value): self._vi_ac = value @property def vi_gross_mass(self): return self._vi_gross_mass @vi_gross_mass.setter def vi_gross_mass(self, value): self._vi_gross_mass = value @property def vi_height(self): return self._vi_height @vi_height.setter def vi_height(self, value): self._vi_height = value @property def vi_length(self): return self._vi_length @vi_length.setter def vi_length(self, value): self._vi_length = value @property def vi_owner_name(self): return self._vi_owner_name @vi_owner_name.setter def vi_owner_name(self, value): self._vi_owner_name = value @property def vi_width(self): return self._vi_width @vi_width.setter def vi_width(self, value): self._vi_width = value def to_alipay_dict(self): params = dict() if self.biz_agreement_no: if hasattr(self.biz_agreement_no, 'to_alipay_dict'): params['biz_agreement_no'] = self.biz_agreement_no.to_alipay_dict() else: params['biz_agreement_no'] = self.biz_agreement_no if self.card_no: if hasattr(self.card_no, 'to_alipay_dict'): params['card_no'] = self.card_no.to_alipay_dict() else: params['card_no'] = self.card_no if self.device_no: if hasattr(self.device_no, 'to_alipay_dict'): params['device_no'] = self.device_no.to_alipay_dict() else: params['device_no'] = self.device_no if self.order_id: if hasattr(self.order_id, 'to_alipay_dict'): params['order_id'] = self.order_id.to_alipay_dict() else: params['order_id'] = self.order_id if self.out_biz_no: if hasattr(self.out_biz_no, 'to_alipay_dict'): params['out_biz_no'] = self.out_biz_no.to_alipay_dict() else: params['out_biz_no'] = self.out_biz_no if self.plate_color: if hasattr(self.plate_color, 'to_alipay_dict'): params['plate_color'] = self.plate_color.to_alipay_dict() else: params['plate_color'] = self.plate_color if self.plate_no: if hasattr(self.plate_no, 'to_alipay_dict'): params['plate_no'] = self.plate_no.to_alipay_dict() else: params['plate_no'] = self.plate_no if self.user_id: if hasattr(self.user_id, 'to_alipay_dict'): params['user_id'] = self.user_id.to_alipay_dict() else: params['user_id'] = self.user_id if self.vi_ac: if hasattr(self.vi_ac, 'to_alipay_dict'): params['vi_ac'] = self.vi_ac.to_alipay_dict() else: params['vi_ac'] = self.vi_ac if self.vi_gross_mass: if hasattr(self.vi_gross_mass, 'to_alipay_dict'): params['vi_gross_mass'] = self.vi_gross_mass.to_alipay_dict() else: params['vi_gross_mass'] = self.vi_gross_mass if self.vi_height: if hasattr(self.vi_height, 'to_alipay_dict'): params['vi_height'] = self.vi_height.to_alipay_dict() else: params['vi_height'] = self.vi_height if self.vi_length: if hasattr(self.vi_length, 'to_alipay_dict'): params['vi_length'] = self.vi_length.to_alipay_dict() else: params['vi_length'] = self.vi_length if self.vi_owner_name: if hasattr(self.vi_owner_name, 'to_alipay_dict'): params['vi_owner_name'] = self.vi_owner_name.to_alipay_dict() else: params['vi_owner_name'] = self.vi_owner_name if self.vi_width: if hasattr(self.vi_width, 'to_alipay_dict'): params['vi_width'] = self.vi_width.to_alipay_dict() else: params['vi_width'] = self.vi_width return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayCommerceTransportEtcInfoModifyModel() if 'biz_agreement_no' in d: o.biz_agreement_no = d['biz_agreement_no'] if 'card_no' in d: o.card_no = d['card_no'] if 'device_no' in d: o.device_no = d['device_no'] if 'order_id' in d: o.order_id = d['order_id'] if 'out_biz_no' in d: o.out_biz_no = d['out_biz_no'] if 'plate_color' in d: o.plate_color = d['plate_color'] if 'plate_no' in d: o.plate_no = d['plate_no'] if 'user_id' in d: o.user_id = d['user_id'] if 'vi_ac' in d: o.vi_ac = d['vi_ac'] if 'vi_gross_mass' in d: o.vi_gross_mass = d['vi_gross_mass'] if 'vi_height' in d: o.vi_height = d['vi_height'] if 'vi_length' in d: o.vi_length = d['vi_length'] if 'vi_owner_name' in d: o.vi_owner_name = d['vi_owner_name'] if 'vi_width' in d: o.vi_width = d['vi_width'] return o
nilq/baby-python
python
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from pathlib import Path import pytest from playwright.async_api import Browser, Page @pytest.mark.only_browser("chromium") async def test_should_output_a_trace( browser: Browser, page: Page, server, tmpdir: Path ): output_file = tmpdir / "trace.json" await browser.start_tracing(page=page, screenshots=True, path=output_file) await page.goto(server.PREFIX + "/grid.html") await browser.stop_tracing() assert os.path.getsize(output_file) > 0 @pytest.mark.only_browser("chromium") async def test_should_create_directories_as_needed( browser: Browser, page: Page, server, tmpdir ): output_file = tmpdir / "these" / "are" / "directories" / "trace.json" await browser.start_tracing(page=page, screenshots=True, path=output_file) await page.goto(server.PREFIX + "/grid.html") await browser.stop_tracing() assert os.path.getsize(output_file) > 0 @pytest.mark.only_browser("chromium") async def test_should_run_with_custom_categories_if_provided( browser: Browser, page: Page, tmpdir: Path ): output_file = tmpdir / "trace.json" await browser.start_tracing( page=page, screenshots=True, path=output_file, categories=["disabled-by-default-v8.cpu_profiler.hires"], ) await browser.stop_tracing() with open(output_file, mode="r") as of: trace_json = json.load(of) assert ( "disabled-by-default-v8.cpu_profiler.hires" in trace_json["metadata"]["trace-config"] ) @pytest.mark.only_browser("chromium") async def test_should_throw_if_tracing_on_two_pages( browser: Browser, page: Page, tmpdir: Path ): output_file_1 = tmpdir / "trace1.json" await browser.start_tracing(page=page, screenshots=True, path=output_file_1) output_file_2 = tmpdir / "trace2.json" with pytest.raises(Exception): await browser.start_tracing(page=page, screenshots=True, path=output_file_2) await browser.stop_tracing() @pytest.mark.only_browser("chromium") async def test_should_return_a_buffer( browser: Browser, page: Page, server, tmpdir: Path ): output_file = tmpdir / "trace.json" await browser.start_tracing(page=page, path=output_file, screenshots=True) await page.goto(server.PREFIX + "/grid.html") value = await browser.stop_tracing() with open(output_file, mode="r") as trace_file: assert trace_file.read() == value.decode() @pytest.mark.only_browser("chromium") async def test_should_work_without_options(browser: Browser, page: Page, server): await browser.start_tracing() await page.goto(server.PREFIX + "/grid.html") trace = await browser.stop_tracing() assert trace @pytest.mark.only_browser("chromium") async def test_should_support_a_buffer_without_a_path( browser: Browser, page: Page, server ): await browser.start_tracing(page=page, screenshots=True) await page.goto(server.PREFIX + "/grid.html") trace = await browser.stop_tracing() assert "screenshot" in trace.decode()
nilq/baby-python
python
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=invalid-name """Test curve fitting base class.""" from test.base import QiskitExperimentsTestCase from test.fake_experiment import FakeExperiment from typing import List import numpy as np from qiskit.qobj.utils import MeasLevel from uncertainties import correlated_values from qiskit_experiments.curve_analysis import CurveAnalysis, fit_function from qiskit_experiments.curve_analysis.curve_data import ( SeriesDef, FitData, ParameterRepr, FitOptions, ) from qiskit_experiments.data_processing import DataProcessor, Probability from qiskit_experiments.exceptions import AnalysisError from qiskit_experiments.framework import ExperimentData def simulate_output_data(func, xvals, param_dict, **metadata): """Generate arbitrary fit data.""" __shots = 100000 expected_probs = func(xvals, **param_dict) counts = np.asarray(expected_probs * __shots, dtype=int) data = [ { "counts": {"0": __shots - count, "1": count}, "metadata": dict(xval=xi, qubits=(0,), experiment_type="fake_experiment", **metadata), } for xi, count in zip(xvals, counts) ] expdata = ExperimentData(experiment=FakeExperiment()) for datum in data: expdata.add_data(datum) expdata.metadata["meas_level"] = MeasLevel.CLASSIFIED return expdata def create_new_analysis(series: List[SeriesDef], fixed_params: List[str] = None) -> CurveAnalysis: """A helper function to create a mock analysis class instance.""" class TestAnalysis(CurveAnalysis): """A mock analysis class to test.""" __series__ = series @classmethod def _default_options(cls): opts = super()._default_options() if fixed_params: opts.fixed_parameters = {p: None for p in fixed_params} return opts return TestAnalysis() class TestCurveAnalysisUnit(QiskitExperimentsTestCase): """Unittest for curve fit analysis.""" class TestAnalysis(CurveAnalysis): """Fake analysis class for unittest.""" __series__ = [ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, par2, par3, par4: fit_function.exponential_decay( x, amp=par0, lamb=par1, baseline=par4 ), filter_kwargs={"op1": 1, "op2": True}, model_description=r"p_0 * \exp(p_1 x) + p4", ), SeriesDef( name="curve2", fit_func=lambda x, par0, par1, par2, par3, par4: fit_function.exponential_decay( x, amp=par0, lamb=par2, baseline=par4 ), filter_kwargs={"op1": 2, "op2": True}, model_description=r"p_0 * \exp(p_2 x) + p4", ), SeriesDef( name="curve3", fit_func=lambda x, par0, par1, par2, par3, par4: fit_function.exponential_decay( x, amp=par0, lamb=par3, baseline=par4 ), filter_kwargs={"op1": 3, "op2": True}, model_description=r"p_0 * \exp(p_3 x) + p4", ), ] def test_parsed_fit_params(self): """Test parsed fit params.""" analysis = self.TestAnalysis() self.assertSetEqual(set(analysis.parameters), {"par0", "par1", "par2", "par3", "par4"}) def test_cannot_create_invalid_series_fit(self): """Test we cannot create invalid analysis instance.""" invalid_series = [ SeriesDef( name="fit1", fit_func=lambda x, par0: fit_function.exponential_decay(x, amp=par0), ), SeriesDef( name="fit2", fit_func=lambda x, par1: fit_function.exponential_decay(x, amp=par1), ), ] instance = create_new_analysis(series=invalid_series) with self.assertRaises(AnalysisError): # pylint: disable=pointless-statement instance.parameters # fit1 has param par0 while fit2 has par1 def test_data_extraction(self): """Test data extraction method.""" xvalues = np.linspace(1.0, 5.0, 10) analysis = self.TestAnalysis() analysis.set_options(data_processor=DataProcessor("counts", [Probability("1")])) # data to analyze test_data0 = simulate_output_data( func=fit_function.exponential_decay, xvals=xvalues, param_dict={"amp": 1.0}, op1=1, op2=True, ) curve_data = analysis._run_data_processing( raw_data=test_data0.data(), series=analysis.__series__, ) # check x values ref_x = xvalues np.testing.assert_array_almost_equal(curve_data.x, ref_x) # check y values ref_y = fit_function.exponential_decay(xvalues, amp=1.0) np.testing.assert_array_almost_equal(curve_data.y, ref_y, decimal=3) # check data allocation ref_alloc = np.zeros(10, dtype=int) self.assertListEqual(list(curve_data.data_allocation), list(ref_alloc)) def test_data_extraction_with_subset(self): """Test data extraction method with multiple series.""" xvalues = np.linspace(1.0, 5.0, 10) analysis = self.TestAnalysis() analysis.set_options(data_processor=DataProcessor("counts", [Probability("1")])) # data to analyze test_data0 = simulate_output_data( func=fit_function.exponential_decay, xvals=xvalues, param_dict={"amp": 1.0}, op1=1, op2=True, ) test_data1 = simulate_output_data( func=fit_function.exponential_decay, xvals=xvalues, param_dict={"amp": 0.5}, op1=2, op2=True, ) # get subset curve_data_of_1 = analysis._run_data_processing( raw_data=test_data0.data() + test_data1.data(), series=analysis.__series__, ).get_subset_of("curve1") # check x values ref_x = xvalues np.testing.assert_array_almost_equal(curve_data_of_1.x, ref_x) # check y values ref_y = fit_function.exponential_decay(xvalues, amp=1.0) np.testing.assert_array_almost_equal(curve_data_of_1.y, ref_y, decimal=3) # check data allocation ref_alloc = np.zeros(10, dtype=int) self.assertListEqual(list(curve_data_of_1.data_allocation), list(ref_alloc)) def test_create_results(self): """Test creating analysis results.""" analysis = self.TestAnalysis() analysis.set_options( result_parameters=["par0", ParameterRepr("par1", "Param1", "SomeUnit")], ) pcov = np.diag(np.ones(5)) popt = np.asarray([1.0, 2.0, 3.0, 4.0, 5.0]) fit_params = correlated_values(popt, pcov) fit_data = FitData( popt=fit_params, popt_keys=["par0", "par1", "par2", "par3", "par4", "par5"], pcov=pcov, reduced_chisq=2.0, dof=0, x_data=np.arange(5), y_data=np.arange(5), ) outcomes = analysis._create_analysis_results(fit_data, quality="good", test_val=1) # entry name self.assertEqual(outcomes[0].name, "@Parameters_TestAnalysis") self.assertEqual(outcomes[1].name, "par0") self.assertEqual(outcomes[2].name, "Param1") # entry value self.assertEqual(outcomes[1].value, fit_params[0]) self.assertEqual(outcomes[2].value, fit_params[1]) # other metadata self.assertEqual(outcomes[2].quality, "good") self.assertEqual(outcomes[2].chisq, 2.0) ref_meta = { "test_val": 1, "unit": "SomeUnit", } self.assertDictEqual(outcomes[2].extra, ref_meta) def test_invalid_options(self): """Test setting invalid options.""" analysis = self.TestAnalysis() class InvalidClass: """Dummy class.""" pass with self.assertRaises(TypeError): analysis.set_options(data_processor=InvalidClass()) with self.assertRaises(TypeError): analysis.set_options(curve_drawer=InvalidClass()) class TestCurveAnalysisIntegration(QiskitExperimentsTestCase): """Integration test for curve fit analysis through entire analysis.run function.""" def setUp(self): super().setUp() self.xvalues = np.linspace(0.1, 1, 50) self.err_decimal = 2 def test_run_single_curve_analysis(self): """Test analysis for single curve.""" analysis = create_new_analysis( series=[ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, par2, par3: fit_function.exponential_decay( x, amp=par0, lamb=par1, x0=par2, baseline=par3 ), model_description=r"p_0 \exp(p_1 x + p_2) + p_3", ) ], ) ref_p0 = 0.9 ref_p1 = 2.5 ref_p2 = 0.0 ref_p3 = 0.1 test_data = simulate_output_data( func=fit_function.exponential_decay, xvals=self.xvalues, param_dict={"amp": ref_p0, "lamb": ref_p1, "x0": ref_p2, "baseline": ref_p3}, ) analysis.set_options( p0={"par0": ref_p0, "par1": ref_p1, "par2": ref_p2, "par3": ref_p3}, result_parameters=[ParameterRepr("par1", "parameter_name", "unit")], ) results, _ = analysis._run_analysis(test_data) result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p2, ref_p3]) # check result data np.testing.assert_array_almost_equal(result.value, ref_popt, decimal=self.err_decimal) self.assertEqual(result.extra["dof"], 46) self.assertListEqual(result.extra["popt_keys"], ["par0", "par1", "par2", "par3"]) self.assertDictEqual(result.extra["fit_models"], {"curve1": r"p_0 \exp(p_1 x + p_2) + p_3"}) # special entry formatted for database result = results[1] self.assertEqual(result.name, "parameter_name") self.assertEqual(result.extra["unit"], "unit") self.assertAlmostEqual(result.value.nominal_value, ref_p1, places=self.err_decimal) def test_run_single_curve_fail(self): """Test analysis returns status when it fails.""" analysis = create_new_analysis( series=[ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, par2, par3: fit_function.exponential_decay( x, amp=par0, lamb=par1, x0=par2, baseline=par3 ), ) ], ) ref_p0 = 0.9 ref_p1 = 2.5 ref_p2 = 0.0 ref_p3 = 0.1 test_data = simulate_output_data( func=fit_function.exponential_decay, xvals=self.xvalues, param_dict={"amp": ref_p0, "lamb": ref_p1, "x0": ref_p2, "baseline": ref_p3}, ) analysis.set_options( p0={"par0": ref_p0, "par1": ref_p1, "par2": ref_p2, "par3": ref_p3}, bounds={"par0": [-10, 0], "par1": [-10, 0], "par2": [-10, 0], "par3": [-10, 0]}, return_data_points=True, ) # Try to fit with infeasible parameter boundary. This should fail. results, _ = analysis._run_analysis(test_data) # This returns only data point entry self.assertEqual(len(results), 1) self.assertEqual(results[0].name, "@Data_TestAnalysis") def test_run_two_curves_with_same_fitfunc(self): """Test analysis for two curves. Curves shares fit model.""" analysis = create_new_analysis( series=[ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, par2, par3, par4: fit_function.exponential_decay( x, amp=par0, lamb=par1, x0=par3, baseline=par4 ), filter_kwargs={"exp": 0}, ), SeriesDef( name="curve1", fit_func=lambda x, par0, par1, par2, par3, par4: fit_function.exponential_decay( x, amp=par0, lamb=par2, x0=par3, baseline=par4 ), filter_kwargs={"exp": 1}, ), ], ) ref_p0 = 0.9 ref_p1 = 7.0 ref_p2 = 5.0 ref_p3 = 0.0 ref_p4 = 0.1 test_data0 = simulate_output_data( func=fit_function.exponential_decay, xvals=self.xvalues, param_dict={"amp": ref_p0, "lamb": ref_p1, "x0": ref_p3, "baseline": ref_p4}, exp=0, ) test_data1 = simulate_output_data( func=fit_function.exponential_decay, xvals=self.xvalues, param_dict={"amp": ref_p0, "lamb": ref_p2, "x0": ref_p3, "baseline": ref_p4}, exp=1, ) # merge two experiment data for datum in test_data1.data(): test_data0.add_data(datum) analysis.set_options( p0={"par0": ref_p0, "par1": ref_p1, "par2": ref_p2, "par3": ref_p3, "par4": ref_p4} ) results, _ = analysis._run_analysis(test_data0) result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p2, ref_p3, ref_p4]) # check result data np.testing.assert_array_almost_equal(result.value, ref_popt, decimal=self.err_decimal) def test_run_two_curves_with_two_fitfuncs(self): """Test analysis for two curves. Curves shares fit parameters.""" analysis = create_new_analysis( series=[ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, par2, par3: fit_function.cos( x, amp=par0, freq=par1, phase=par2, baseline=par3 ), filter_kwargs={"exp": 0}, ), SeriesDef( name="curve2", fit_func=lambda x, par0, par1, par2, par3: fit_function.sin( x, amp=par0, freq=par1, phase=par2, baseline=par3 ), filter_kwargs={"exp": 1}, ), ], ) ref_p0 = 0.1 ref_p1 = 2 ref_p2 = -0.3 ref_p3 = 0.5 test_data0 = simulate_output_data( func=fit_function.cos, xvals=self.xvalues, param_dict={"amp": ref_p0, "freq": ref_p1, "phase": ref_p2, "baseline": ref_p3}, exp=0, ) test_data1 = simulate_output_data( func=fit_function.sin, xvals=self.xvalues, param_dict={"amp": ref_p0, "freq": ref_p1, "phase": ref_p2, "baseline": ref_p3}, exp=1, ) # merge two experiment data for datum in test_data1.data(): test_data0.add_data(datum) analysis.set_options(p0={"par0": ref_p0, "par1": ref_p1, "par2": ref_p2, "par3": ref_p3}) results, _ = analysis._run_analysis(test_data0) result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p2, ref_p3]) # check result data np.testing.assert_array_almost_equal(result.value, ref_popt, decimal=self.err_decimal) def test_run_fixed_parameters(self): """Test analysis when some of parameters are fixed.""" analysis = create_new_analysis( series=[ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, fixed_par2, par3: fit_function.cos( x, amp=par0, freq=par1, phase=fixed_par2, baseline=par3 ), ), ], fixed_params=["fixed_par2"], ) ref_p0 = 0.1 ref_p1 = 2 ref_p2 = -0.3 ref_p3 = 0.5 test_data = simulate_output_data( func=fit_function.cos, xvals=self.xvalues, param_dict={"amp": ref_p0, "freq": ref_p1, "phase": ref_p2, "baseline": ref_p3}, ) analysis.set_options( p0={"par0": ref_p0, "par1": ref_p1, "par3": ref_p3}, fixed_parameters={"fixed_par2": ref_p2}, ) results, _ = analysis._run_analysis(test_data) result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p3]) # check result data np.testing.assert_array_almost_equal(result.value, ref_popt, decimal=self.err_decimal) def test_fixed_param_is_missing(self): """Test raising an analysis error when fixed parameter is missing.""" analysis = create_new_analysis( series=[ SeriesDef( name="curve1", fit_func=lambda x, par0, par1, fixed_par2, par3: fit_function.cos( x, amp=par0, freq=par1, phase=fixed_par2, baseline=par3 ), ), ], fixed_params=["fixed_p2"], ) ref_p0 = 0.1 ref_p1 = 2 ref_p2 = -0.3 ref_p3 = 0.5 test_data = simulate_output_data( func=fit_function.cos, xvals=self.xvalues, param_dict={"amp": ref_p0, "freq": ref_p1, "phase": ref_p2, "baseline": ref_p3}, ) # do not define fixed_p2 here analysis.set_options(p0={"par0": ref_p0, "par1": ref_p1, "par3": ref_p3}) with self.assertRaises(AnalysisError): analysis._run_analysis(test_data) class TestFitOptions(QiskitExperimentsTestCase): """Unittest for fit option object.""" def test_empty(self): """Test if default value is automatically filled.""" opt = FitOptions(["par0", "par1", "par2"]) # bounds should be default to inf tuple. otherwise crashes the scipy fitter. ref_opts = { "p0": {"par0": None, "par1": None, "par2": None}, "bounds": { "par0": (-np.inf, np.inf), "par1": (-np.inf, np.inf), "par2": (-np.inf, np.inf), }, } self.assertDictEqual(opt.options, ref_opts) def test_create_option_with_dict(self): """Create option and fill with dictionary.""" opt = FitOptions( ["par0", "par1", "par2"], default_p0={"par0": 0, "par1": 1, "par2": 2}, default_bounds={"par0": (0, 1), "par1": (1, 2), "par2": (2, 3)}, ) ref_opts = { "p0": {"par0": 0.0, "par1": 1.0, "par2": 2.0}, "bounds": {"par0": (0.0, 1.0), "par1": (1.0, 2.0), "par2": (2.0, 3.0)}, } self.assertDictEqual(opt.options, ref_opts) def test_create_option_with_array(self): """Create option and fill with array.""" opt = FitOptions( ["par0", "par1", "par2"], default_p0=[0, 1, 2], default_bounds=[(0, 1), (1, 2), (2, 3)], ) ref_opts = { "p0": {"par0": 0.0, "par1": 1.0, "par2": 2.0}, "bounds": {"par0": (0.0, 1.0), "par1": (1.0, 2.0), "par2": (2.0, 3.0)}, } self.assertDictEqual(opt.options, ref_opts) def test_override_partial_dict(self): """Create option and override value with partial dictionary.""" opt = FitOptions(["par0", "par1", "par2"]) opt.p0.set_if_empty(par1=3) ref_opts = { "p0": {"par0": None, "par1": 3.0, "par2": None}, "bounds": { "par0": (-np.inf, np.inf), "par1": (-np.inf, np.inf), "par2": (-np.inf, np.inf), }, } self.assertDictEqual(opt.options, ref_opts) def test_cannot_override_assigned_value(self): """Test cannot override already assigned value.""" opt = FitOptions(["par0", "par1", "par2"]) opt.p0.set_if_empty(par1=3) opt.p0.set_if_empty(par1=5) ref_opts = { "p0": {"par0": None, "par1": 3.0, "par2": None}, "bounds": { "par0": (-np.inf, np.inf), "par1": (-np.inf, np.inf), "par2": (-np.inf, np.inf), }, } self.assertDictEqual(opt.options, ref_opts) def test_can_override_assigned_value_with_dict_access(self): """Test override already assigned value with direct dict access.""" opt = FitOptions(["par0", "par1", "par2"]) opt.p0["par1"] = 3 opt.p0["par1"] = 5 ref_opts = { "p0": {"par0": None, "par1": 5.0, "par2": None}, "bounds": { "par0": (-np.inf, np.inf), "par1": (-np.inf, np.inf), "par2": (-np.inf, np.inf), }, } self.assertDictEqual(opt.options, ref_opts) def test_cannot_override_user_option(self): """Test cannot override already assigned value.""" opt = FitOptions(["par0", "par1", "par2"], default_p0={"par1": 3}) opt.p0.set_if_empty(par1=5) ref_opts = { "p0": {"par0": None, "par1": 3, "par2": None}, "bounds": { "par0": (-np.inf, np.inf), "par1": (-np.inf, np.inf), "par2": (-np.inf, np.inf), }, } self.assertDictEqual(opt.options, ref_opts) def test_set_operation(self): """Test if set works and duplicated entry is removed.""" opt1 = FitOptions(["par0", "par1"], default_p0=[0, 1]) opt2 = FitOptions(["par0", "par1"], default_p0=[0, 1]) opt3 = FitOptions(["par0", "par1"], default_p0=[0, 2]) opts = set() opts.add(opt1) opts.add(opt2) opts.add(opt3) self.assertEqual(len(opts), 2) def test_detect_invalid_p0(self): """Test if invalid p0 raises Error.""" with self.assertRaises(AnalysisError): # less element FitOptions(["par0", "par1", "par2"], default_p0=[0, 1]) def test_detect_invalid_bounds(self): """Test if invalid bounds raises Error.""" with self.assertRaises(AnalysisError): # less element FitOptions(["par0", "par1", "par2"], default_bounds=[(0, 1), (1, 2)]) with self.assertRaises(AnalysisError): # not min-max tuple FitOptions(["par0", "par1", "par2"], default_bounds=[0, 1, 2]) with self.assertRaises(AnalysisError): # max-min tuple FitOptions(["par0", "par1", "par2"], default_bounds=[(1, 0), (2, 1), (3, 2)]) def test_detect_invalid_key(self): """Test if invalid key raises Error.""" opt = FitOptions(["par0", "par1", "par2"]) with self.assertRaises(AnalysisError): opt.p0.set_if_empty(par3=3) def test_set_extra_options(self): """Add extra fitter options.""" opt = FitOptions( ["par0", "par1", "par2"], default_p0=[0, 1, 2], default_bounds=[(0, 1), (1, 2), (2, 3)] ) opt.add_extra_options(ex1=0, ex2=1) ref_opts = { "p0": {"par0": 0.0, "par1": 1.0, "par2": 2.0}, "bounds": {"par0": (0.0, 1.0), "par1": (1.0, 2.0), "par2": (2.0, 3.0)}, "ex1": 0, "ex2": 1, } self.assertDictEqual(opt.options, ref_opts) def test_complicated(self): """Test for realistic operations for algorithmic guess with user options.""" user_p0 = {"par0": 1, "par1": None} user_bounds = {"par0": None, "par1": (-100, 100)} opt = FitOptions( ["par0", "par1", "par2"], default_p0=user_p0, default_bounds=user_bounds, ) # similar computation in algorithmic guess opt.p0.set_if_empty(par0=5) # this is ignored because user already provided initial guess opt.p0.set_if_empty(par1=opt.p0["par0"] * 2 + 3) # user provided guess propagates opt.bounds.set_if_empty(par0=(0, 10)) # this will be set opt.add_extra_options(fitter="algo1") opt1 = opt.copy() # copy options while keeping previous values opt1.p0.set_if_empty(par2=opt1.p0["par0"] + opt1.p0["par1"]) opt2 = opt.copy() opt2.p0.set_if_empty(par2=opt2.p0["par0"] * 2) # add another p2 value ref_opt1 = { "p0": {"par0": 1.0, "par1": 5.0, "par2": 6.0}, "bounds": {"par0": (0.0, 10.0), "par1": (-100.0, 100.0), "par2": (-np.inf, np.inf)}, "fitter": "algo1", } ref_opt2 = { "p0": {"par0": 1.0, "par1": 5.0, "par2": 2.0}, "bounds": {"par0": (0.0, 10.0), "par1": (-100.0, 100.0), "par2": (-np.inf, np.inf)}, "fitter": "algo1", } self.assertDictEqual(opt1.options, ref_opt1) self.assertDictEqual(opt2.options, ref_opt2) class TestBackwardCompatibility(QiskitExperimentsTestCase): """Test case for backward compatibility.""" def test_old_fixed_param_attributes(self): """Test if old class structure for fixed param is still supported.""" class _DeprecatedAnalysis(CurveAnalysis): __series__ = [ SeriesDef( fit_func=lambda x, par0, par1, par2, par3: fit_function.exponential_decay( x, amp=par0, lamb=par1, x0=par2, baseline=par3 ), ) ] __fixed_parameters__ = ["par1"] @classmethod def _default_options(cls): opts = super()._default_options() opts.par1 = 2 return opts with self.assertWarns(DeprecationWarning): instance = _DeprecatedAnalysis() self.assertDictEqual(instance.options.fixed_parameters, {"par1": 2}) def test_loading_data_with_deprecated_fixed_param(self): """Test loading old data with fixed parameters as standalone options.""" class _DeprecatedAnalysis(CurveAnalysis): __series__ = [ SeriesDef( fit_func=lambda x, par0, par1, par2, par3: fit_function.exponential_decay( x, amp=par0, lamb=par1, x0=par2, baseline=par3 ), ) ] with self.assertWarns(DeprecationWarning): # old option data structure, i.e. fixed param as a standalone option # the analysis instance fixed parameters might be set via the experiment instance instance = _DeprecatedAnalysis.from_config({"options": {"par1": 2}}) self.assertDictEqual(instance.options.fixed_parameters, {"par1": 2})
nilq/baby-python
python
from pathlib import Path from typing import NamedTuple, List, Dict, Any from os import fsync, rename from .instrumentation_id import InstrumentationId from util.atomic_file import atomic_write def get_intrumentation_ids(config: Dict[str, Any]) -> List[InstrumentationId]: intrumentation_ids = [] if config["instrumentation_occupancy"]: intrumentation_ids.append(InstrumentationId.OCCUPANCY) if config["instrumentation_code_injection"] != "none": intrumentation_ids.append(InstrumentationId.get_metric_id(config["instrumentation_code_injection"])) return intrumentation_ids class ModuleConfiguration(NamedTuple): pids_to_instrument: List[int] instrumentation_functions: List[InstrumentationId] class ModuleConfigurationWriter: __confFilePath: Path = "/var/lib/dynatrace/oneagent/agent/runtime/nvbit-module-runtime.conf" __instrumentation_enabled: bool = False def __init__(self, instrumentation_enabled: bool): self.__instrumentation_enabled = instrumentation_enabled def write(self, config: ModuleConfiguration) -> None: with atomic_write(self.__confFilePath) as confFile: if not self.__instrumentation_enabled: return for pid in config.pids_to_instrument: instrument_with = ','.join(str(id.value) for id in config.instrumentation_functions) confFile.write(f"{pid}:{instrument_with}\n")
nilq/baby-python
python
''' Version: 2.0 Autor: CHEN JIE Date: 2020-10-12 15:29:23 LastEditors: CHEN JIE LastEditTime: 2020-10-17 15:54:29 language: Deep learning framework: ''' import torch import torch.nn as nn import torch.nn.functional as F from .node import NodeOp from .dag_layer import DAGLayer from .sep_conv import SeparableConv2d ''' description: param {type} return {type} ''' class RandWire(nn.Module): def __init__(self, hp, graphs): super(RandWire, self).__init__() self.chn = hp.model.channel self.cls = hp.model.classes self.im = hp.model.input_maps # didn't used nn.Sequential for debugging purpose # self.conv1 = SeparableConv2d(1, self.chn//2, kernel_size=3, padding=1, stride=2) self.conv1 = nn.Conv2d(self.im, self.chn//2, kernel_size=3, padding=1, stride=2) self.bn1 = nn.BatchNorm2d(self.chn//2) # self.conv2 = SeparableConv2d(self.chn//2, self.chn, kernel_size=3, padding=1, stride=2) self.conv2 = nn.Conv2d(self.chn//2, self.chn, kernel_size=3, padding=1, stride=2) self.bn2 = nn.BatchNorm2d(self.chn) self.dagly3 = DAGLayer(self.chn, self.chn, graphs[0]['num_nodes'], graphs[0]['edges']) self.dagly4 = DAGLayer(self.chn, 2*self.chn, graphs[1]['num_nodes'], graphs[1]['edges']) self.dagly5 = DAGLayer(2*self.chn, 4*self.chn, graphs[2]['num_nodes'], graphs[2]['edges']) # self.convlast = SeparableConv2d(4*self.chn, 1280, kernel_size=1) self.convlast = nn.Conv2d(4*self.chn, 1280, kernel_size=1) self.bnlast = nn.BatchNorm2d(1280) self.fc = nn.Linear(1280, self.cls) def forward(self, y): # y: [B, im, 224, 224] # conv1 y = self.conv1(y) # [B, chn//2, 112, 112] y = self.bn1(y) # [B, chn//2, 112, 112] # conv2 y = F.relu(y) # [B, chn//2, 112, 112] y = self.conv2(y) # [B, chn, 56, 56] y = self.bn2(y) # [B, chn, 56, 56] # conv3, conv4, conv5 y = self.dagly3(y) # [B, chn, 28, 28] y = self.dagly4(y) # [B, 2*chn, 14, 14] y = self.dagly5(y) # [B, 4*chn, 7, 7] # classifier y = F.relu(y) # [B, 4*chn, 7, 7] y = self.convlast(y) # [B, 1280, 7, 7] y = self.bnlast(y) # [B, 1280, 7, 7] y = F.adaptive_avg_pool2d(y, (1, 1)) # [B, 1280, 1, 1] y = y.view(y.size(0), -1) # [B, 1280] y = self.fc(y) # [B, cls] y = F.log_softmax(y, dim=1) # [B, cls] return y
nilq/baby-python
python
from django.http import HttpResponse, HttpResponseRedirect from django.conf import settings from django.contrib.auth import login from django.contrib.sites.shortcuts import get_current_site from django.utils.encoding import force_bytes, force_text from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.template.loader import render_to_string from django.core.mail import EmailMessage from datachimp.models.user import User from datachimp.models.invitation import Invitation from datachimp.models.membership import Membership from datachimp.serializers.invitation import InvitationSerializer from rest_framework import status, generics from rest_framework.response import Response from datachimp.api_permissions import HasProjectMembership from rest_framework.permissions import IsAuthenticated class SendInviteAPI(generics.CreateAPIView): serializer_class = InvitationSerializer queryset = Invitation.objects.all() permission_classes = (IsAuthenticated, HasProjectMembership) def create(self, request, *args, **kwargs): data = request.data.copy() data['from_user'] = request.user.id serializer = self.serializer_class(data=data) if serializer.is_valid(): saved_instance = serializer.save() #Check if the from user is member of the project from_user = serializer.validated_data['from_user'] project = serializer.validated_data['project'] #Create the content for the email current_site = request.META['HTTP_HOST'] mail_subject = 'datachimp: You have been invited to join %s' % (project.name,) message = render_to_string('email/invitation_email.html', { 'domain': current_site, 'invite_id': urlsafe_base64_encode(force_bytes(saved_instance.id)).decode('utf-8'), 'project_name' : project.name }) to_email = serializer.validated_data['to_email'] email = EmailMessage( mail_subject, message, to=[to_email] ) try: email.send() except Exception as e: print(e) saved_instance.delete() return Response(status=status.HTTP_201_CREATED) return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) def invite_clicked(request, invite_id, *args, **kwargs): try: iid = force_text(urlsafe_base64_decode(invite_id)) except(TypeError, ValueError, OverflowError, User.DoesNotExist): return HttpResponse('This invitation link is no longer valid!') # Save the clicked event invite_object = Invitation.objects.get(pk=iid) invite_object.invite_clicked = True invite_object.save() # Check if the user already exists try: user = User.objects.get(email=invite_object.to_email) try: Membership.objects.get(project=invite_object.project, user=user) except Membership.DoesNotExist: Membership.objects.create(project=invite_object.project, user=user) login(request, user, settings.AUTHENTICATION_BACKENDS[0]) return HttpResponseRedirect('/project/' + str(invite_object.project.id)) except User.DoesNotExist: pass if settings.ENTERPRISE_FLAG: return HttpResponseRedirect('/invite/' + invite_id) return HttpResponseRedirect('/signup/' + invite_id)
nilq/baby-python
python
import numpy as np import scipy.sparse as ss import pandas as pd import anndata as ad def filter_genecounts_percent(adata, cell_fraction, median_count_above_zero): """ filter function for counts :param adata: anndata object to be filtered :param pheno: phenotype to filter on :param percent_cells: the percent of cells which should contain the gene for total gene filtering :param small_pheno_frac: the fraction of the smallest phenotype containing the gene :param count_above_zero: count above the median that is used for total gene filtering :returns adata: filtered anndata object """ if ss.issparse(adata.X): matdense = adata.X.toarray() else: matdense = adata.X abovezero = matdense[matdense > 0] thresh = np.median(abovezero) + median_count_above_zero total_gene_count_thresh = np.round(matdense.shape[0] * cell_fraction * thresh) adata.uns["total_gene_thresh"] = total_gene_count_thresh adata = adata[:,(matdense.sum(0) > total_gene_count_thresh)] return adata def filter_genecounts_numcells(adata, count_threshold, min_expressed_cells): num_cells_thresh = min_expressed_cells if ss.issparse(adata.X): matdense = adata.X.toarray() else: matdense = adata.X num_cells_filter_indices = (np.greater(matdense, count_threshold).sum(0) > num_cells_thresh) adata = adata[:,num_cells_filter_indices] adata.uns["num_cells_thresh"] = num_cells_thresh return adata def get_top_genes(adata, i): """retrieves top genes from each factor loadings""" import pandas as pd sigs = adata.var.index.to_list() zscore = adata.uns["zscores"][:,i].tolist() floadings = adata.uns["Fs_diff"][:,i].tolist() pvals = adata.uns["pval_mat"][:,i].tolist() hum = pd.DataFrame([zscore, floadings, pvals]).T hum.index = sigs hum.columns = ["z_score", "Fs_diff", "pval"] return hum.sort_values("z_score", ascending = False)
nilq/baby-python
python
#! /data/sever/python/bin/python # -*- coding:utf-8 -*- """ @author: 'root' @date: '9/30/16' """ __author__ = 'root' import time import datetime from lib.utils import format_list from lib.mongo import MongoClient from lib.crawler import Crawler from lib.excel import Excel M = MongoClient() def f(): with open('/home/abc/Projects/bias_classification/data/new_export_data2.txt', 'r') as f: datas = f.readlines() print M.db.tbk_disp.remove() for item in datas: cate, title = item.split(' ', 1) title = title.replace("\n", "") if M.db.tbk_disp.find({"title": title}).count(): print "continue", title continue M.db.tbk_disp.insert({"category": cate, "title": title}) if __name__ == "__main__": f()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Fri Mar 25 18:54:45 2022 @author: balas """ import requests from bs4 import BeautifulSoup import pandas as pd def extract(location,tag, page): #Using User Agent,sometimes you will find that the webserver blocks certain user agents. #This is mostly because it identifies the origin as a bot and certain websites don't allow bot crawlers or scrapers. headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36"} #Manipulating the jobindex URL url = f"https://www.jobindex.dk/jobsoegning/{location}?page={page}&q={tag}" r = requests.get(url, headers) soup = BeautifulSoup(r.content.decode("utf-8"), "html.parser") return soup joblist = [] def transform(soup): #This is the div/class for every single jobpost divs = soup.find_all("div", class_="jobsearch-result") for item in divs: #Extracting all the tags and information title = item.find_all("b")[0].text.strip() company = item.find_all("b")[1].text.strip() published_date = item.find("time").text.strip() summary = item.find_all("p")[1].text.strip() job_location = item.find_all("p")[0].text.strip() job_url = item.select_one('[data-click*="u="]:has(> b)')['href'] #Creating a dictionary job = { "title" : title, "company" : company, "published_date" : published_date, "summary" : summary, "job_location" : job_location, "Job_url" : job_url } joblist.append(job) return #keywords1 = input("Hvor søger du?: ") keywords2 = input("Hvad søger du?: ") område = ["storkoebenhavn", "nordsjaelland", "region-sjaelland"] print("Vælg det ønsket jobområde: ") x = 0 while x < len(område): print("Mulighed: ",x+1, område[x]) x+=1 keywords1 = int(input("Vælg det ønsket nummer: ")) print("Du har valgt ", område[keywords1-1]) if keywords1 == int("1"): keywords1 = "storkoebenhavn" elif keywords1 == int("2"): keywords1 = "nordsjaelland" elif keywords1 == int("3"): keywords1 = "region-sjaelland" else: print("område ikke på liste") #Applying function for x in range(1,10): c = extract(keywords1, keywords2, 0) transform(c) #Converting list to dataframe df = pd.DataFrame(joblist) df.to_csv('Jobpost_ '+str(keywords2)+'.csv', index=False, encoding='utf-8-sig') print("Finished")
nilq/baby-python
python
import pathlib from setuptools import setup from src.hyperfit import __version__ here = pathlib.Path(__file__).parent.resolve() # Get the long description from the README file long_description = (here / "README.md").read_text(encoding="utf-8") setup( name="hyperfit", version=__version__, description="Properly fit data with x and y errors", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/CullanHowlett/HyperFit", author="Cullan Howlett", author_email="cullan.howlett@gmail.com", classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.9", ], package_dir={"": "src"}, packages=["hyperfit"], python_requires=">=3.7, <4", install_requires=[ "numpy>=1.20.0", "scipy>=1.6.0", "zeus-mcmc>=2.3.0", "pandas>=1.2.0", "emcee>=3.0.0", "snowline>=0.5.0", ], package_data={"hyperfit": ["data/*.txt"]}, project_urls={ "Bug Reports": "https://github.com/CullanHowlett/HyperFit/issues", }, )
nilq/baby-python
python
from __future__ import absolute_import import pytest from DeploymentDirector.director import Context from DeploymentDirector.rules import Match # def pytest_generate_tests(metafunc): # if 'context' in metafunc.fixturenames: # metafunc.parametrize("context", envs.keys(), indirect=True) envs={ 'complete': { 'CI_BRANCH': 'master', 'CI_BUILD_ID': '1313-313131-3183818-3131', 'CI_COMMITTER_EMAIL': 'boss@company.com', 'CI_COMMITTER_NAME': 'Your Boss', 'CI_COMMITTER_USERNAME': 'da_b0ss', 'CI_COMMIT_DESCRIPTION': 'Revise Salaries', 'CI_COMMIT_ID': 'd6cd1e2bd19e03a81132a23b2025920577f84e37', 'CI_COMMIT_MESSAGE': 'just yours', 'CI_NAME': 'codeship', 'CI_PROJECT_ID': 'abcdef-aabbcc-ffffff-ababab', 'CI_REPO_NAME': 'company-salaries', 'CI_STRING_TIME': '2017-10-13T10:14:23+00:00', 'CI_TIMESTAMP': '1507889663', 'EXTRA_KEY': 'extra_value', } } @pytest.fixture(params=list(envs.keys()), ids=list(envs.keys())) def context(request): return Context(options={'ci_name': 'codeship'}, env=envs[request.param] ) @pytest.fixture def match(context): return Match(matcher_clause=None, context=context) @pytest.fixture def match_branch_as_DEFAULT(context): return Match(matcher_clause=None, context=context, matched_as={ 'branch': 'DEFAULT' })
nilq/baby-python
python
from django.db import models class ShortenedUrl(models.Model): id = models.BigIntegerField(primary_key=True) long_url = models.TextField(blank=False, null=False)
nilq/baby-python
python
#!/usr/bin/env python3 ''' Converted to Python 6/00 by Jason Petrone /* * Copyright (c) 1993-1997, Silicon Graphics, Inc. * ALL RIGHTS RESERVED * Permission to use, copy, modify, and distribute this software for * any purpose and without fee is hereby granted, provided that the above * copyright notice appear in all copies and that both the copyright notice * and this permission notice appear in supporting documentation, and that * the name of Silicon Graphics, Inc. not be used in advertising * or publicity pertaining to distribution of the software without specific, * written prior permission. * * THE MATERIAL EMBODIED ON THIS SOFTWARE IS PROVIDED TO YOU "AS-IS" * AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR OTHERWISE, * INCLUDING WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY OR * FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON * GRAPHICS, INC. BE LIABLE TO YOU OR ANYONE ELSE FOR ANY DIRECT, * SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY * KIND, OR ANY DAMAGES WHATSOEVER, INCLUDING WITHOUT LIMITATION, * LOSS OF PROFIT, LOSS OF USE, SAVINGS OR REVENUE, OR THE CLAIMS OF * THIRD PARTIES, WHETHER OR NOT SILICON GRAPHICS, INC. HAS BEEN * ADVISED OF THE POSSIBILITY OF SUCH LOSS, HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE * POSSESSION, USE OR PERFORMANCE OF THIS SOFTWARE. * * US Government Users Restricted Rights * Use, duplication, or disclosure by the Government is subject to * restrictions set forth in FAR 52.227.19(c)(2) or subparagraph * (c)(1)(ii) of the Rights in Technical Data and Computer Software * clause at DFARS 252.227-7013 and/or in similar or successor * clauses in the FAR or the DOD or NASA FAR Supplement. * Unpublished-- rights reserved under the copyright laws of the * United States. Contractor/manufacturer is Silicon Graphics, * Inc., 2011 N. Shoreline Blvd., Mountain View, CA 94039-7311. * * OpenGL(R) is a registered trademark of Silicon Graphics, Inc. */ ''' # hello.c # This is a simple, introductory OpenGL program. import sys from OpenGL.GLUT import * from OpenGL.GL import * from OpenGL.GLU import * def display(): # clear all pixels glClear(GL_COLOR_BUFFER_BIT) # draw white polygon (rectangle) with corners at # (0.25, 0.25, 0.0) and (0.75, 0.75, 0.0) glColor3f(1.0, 1.0, 1.0) glBegin(GL_POLYGON) glVertex3f(0.25, 0.25, 0.0) glVertex3f(0.75, 0.25, 0.0) glVertex3f(0.75, 0.75, 0.0) glVertex3f(0.25, 0.75, 0.0) glEnd() # don't wait! # start processing buffered OpenGL routines glFlush() def init(): # select clearing color glClearColor(0.0, 0.0, 0.0, 0.0) # initialize viewing values glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0) # Declare initial window size, position, and display mode # (single buffer and RGBA). Open window with "hello" # in its title bar. Call initialization routines. # Register callback function to display graphics. # Enter main loop and process events. def main(): global window glutInit(sys.argv) glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB) glutInitWindowSize(250, 250) glutInitWindowPosition(100, 100) glutCreateWindow('Hello') init() glutDisplayFunc(display) glutMainLoop() if __name__ == '__main__': main()
nilq/baby-python
python
"""Support for GitHub.""" from datetime import timedelta import logging import github import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_NAME, CONF_ACCESS_TOKEN, CONF_NAME, CONF_PATH, CONF_URL, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) CONF_REPOS = "repositories" ATTR_LATEST_COMMIT_MESSAGE = "latest_commit_message" ATTR_LATEST_COMMIT_SHA = "latest_commit_sha" ATTR_LATEST_RELEASE_URL = "latest_release_url" ATTR_LATEST_OPEN_ISSUE_URL = "latest_open_issue_url" ATTR_OPEN_ISSUES = "open_issues" ATTR_LATEST_OPEN_PULL_REQUEST_URL = "latest_open_pull_request_url" ATTR_OPEN_PULL_REQUESTS = "open_pull_requests" ATTR_PATH = "path" ATTR_STARGAZERS = "stargazers" DEFAULT_NAME = "GitHub" SCAN_INTERVAL = timedelta(seconds=300) REPO_SCHEMA = vol.Schema( {vol.Required(CONF_PATH): cv.string, vol.Optional(CONF_NAME): cv.string} ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_ACCESS_TOKEN): cv.string, vol.Optional(CONF_URL): cv.url, vol.Required(CONF_REPOS): vol.All(cv.ensure_list, [REPO_SCHEMA]), } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the GitHub sensor platform.""" sensors = [] for repository in config[CONF_REPOS]: data = GitHubData( repository=repository, access_token=config.get(CONF_ACCESS_TOKEN), server_url=config.get(CONF_URL), ) if data.setup_error is True: _LOGGER.error( "Error setting up GitHub platform. %s", "Check previous errors for details", ) return sensors.append(GitHubSensor(data)) add_entities(sensors, True) class GitHubSensor(Entity): """Representation of a GitHub sensor.""" def __init__(self, github_data): """Initialize the GitHub sensor.""" self._unique_id = github_data.repository_path self._name = None self._state = None self._available = False self._repository_path = None self._latest_commit_message = None self._latest_commit_sha = None self._latest_release_url = None self._open_issue_count = None self._latest_open_issue_url = None self._pull_request_count = None self._latest_open_pr_url = None self._stargazers = None self._github_data = github_data @property def name(self): """Return the name of the sensor.""" return self._name @property def unique_id(self): """Return unique ID for the sensor.""" return self._unique_id @property def state(self): """Return the state of the sensor.""" return self._state @property def available(self): """Return True if entity is available.""" return self._available @property def device_state_attributes(self): """Return the state attributes.""" return { ATTR_PATH: self._repository_path, ATTR_NAME: self._name, ATTR_LATEST_COMMIT_MESSAGE: self._latest_commit_message, ATTR_LATEST_COMMIT_SHA: self._latest_commit_sha, ATTR_LATEST_RELEASE_URL: self._latest_release_url, ATTR_LATEST_OPEN_ISSUE_URL: self._latest_open_issue_url, ATTR_OPEN_ISSUES: self._open_issue_count, ATTR_LATEST_OPEN_PULL_REQUEST_URL: self._latest_open_pr_url, ATTR_OPEN_PULL_REQUESTS: self._pull_request_count, ATTR_STARGAZERS: self._stargazers, } @property def icon(self): """Return the icon to use in the frontend.""" return "mdi:github-circle" def update(self): """Collect updated data from GitHub API.""" self._github_data.update() self._name = self._github_data.name self._repository_path = self._github_data.repository_path self._available = self._github_data.available self._latest_commit_message = self._github_data.latest_commit_message self._latest_commit_sha = self._github_data.latest_commit_sha self._latest_release_url = self._github_data.latest_release_url self._state = self._github_data.latest_commit_sha[0:7] self._open_issue_count = self._github_data.open_issue_count self._latest_open_issue_url = self._github_data.latest_open_issue_url self._pull_request_count = self._github_data.pull_request_count self._latest_open_pr_url = self._github_data.latest_open_pr_url self._stargazers = self._github_data.stargazers class GitHubData: """GitHub Data object.""" def __init__(self, repository, access_token=None, server_url=None): """Set up GitHub.""" self._github = github self.setup_error = False try: if server_url is not None: server_url += "/api/v3" self._github_obj = github.Github(access_token, base_url=server_url) else: self._github_obj = github.Github(access_token) self.repository_path = repository[CONF_PATH] repo = self._github_obj.get_repo(self.repository_path) except self._github.GithubException as err: _LOGGER.error("GitHub error for %s: %s", self.repository_path, err) self.setup_error = True return self.name = repository.get(CONF_NAME, repo.name) self.available = False self.latest_commit_message = None self.latest_commit_sha = None self.latest_release_url = None self.open_issue_count = None self.latest_open_issue_url = None self.pull_request_count = None self.latest_open_pr_url = None self.stargazers = None def update(self): """Update GitHub Sensor.""" try: repo = self._github_obj.get_repo(self.repository_path) self.stargazers = repo.stargazers_count open_issues = repo.get_issues(state="open", sort="created") if open_issues is not None: self.open_issue_count = open_issues.totalCount if open_issues.totalCount > 0: self.latest_open_issue_url = open_issues[0].html_url open_pull_requests = repo.get_pulls(state="open", sort="created") if open_pull_requests is not None: self.pull_request_count = open_pull_requests.totalCount if open_pull_requests.totalCount > 0: self.latest_open_pr_url = open_pull_requests[0].html_url latest_commit = repo.get_commits()[0] self.latest_commit_sha = latest_commit.sha self.latest_commit_message = latest_commit.commit.message releases = repo.get_releases() if releases and releases.totalCount > 0: self.latest_release_url = releases[0].html_url self.available = True except self._github.GithubException as err: _LOGGER.error("GitHub error for %s: %s", self.repository_path, err) self.available = False
nilq/baby-python
python
import os import numpy as np import gym import ray from ray.rllib.models import ModelCatalog from ray.tune.registry import register_env from rl4rs.env.slate import SlateRecEnv, SlateState from rl4rs.env.seqslate import SeqSlateRecEnv, SeqSlateState from rl4rs.utils.rllib_print import pretty_print from rl4rs.nets.rllib.rllib_rawstate_model import getTFModelWithRawState from rl4rs.nets.rllib.rllib_mask_model import getMaskActionsModel, \ getMaskActionsModelWithRawState from rl4rs.utils.rllib_vector_env import MyVectorEnvWrapper from script.modelfree_trainer import get_rl_model from rl4rs.policy.behavior_model import behavior_model from script.offline_evaluation import ope_eval from rl4rs.utils.fileutil import find_newest_files import http.client http.client.HTTPConnection._http_vsn = 10 http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0' import sys algo = sys.argv[1] stage = sys.argv[2] extra_config = eval(sys.argv[3]) if len(sys.argv) >= 4 else {} ray.init() config = {"epoch": 10000, "maxlen": 64, "batch_size": 64, "action_size": 284, "class_num": 2, "dense_feature_num": 432, "category_feature_num": 21, "category_hash_size": 100000, "seq_num": 2, "emb_size": 128, "is_eval": False, "hidden_units": 128, "max_steps": 9, "action_emb_size": 32, "sample_file": '../output/rl4rs_dataset_a_shuf.csv', "model_file": "../output/rl4rs_dataset_a_dnn/model", "iteminfo_file": '../dataset/item_info.csv', "support_rllib_mask": True, "remote_base": 'http://127.0.0.1:16773', 'env': "SlateRecEnv-v0"} config = dict(config, **extra_config) if config['env'] == 'SeqSlateRecEnv-v0': config['max_steps'] = 36 config['batch_size'] = config['batch_size'] // 4 if algo == "DDPG" or 'conti' in algo: config['support_conti_env'] = True config['support_rllib_mask'] = False if 'rawstate' in algo: config['rawstate_as_obs'] = True print(extra_config, config) mask_model = getMaskActionsModel(true_obs_shape=(256,), action_size=config['action_size']) ModelCatalog.register_custom_model("mask_model", mask_model) mask_model_rawstate = getMaskActionsModelWithRawState(config=config, action_size=config['action_size']) ModelCatalog.register_custom_model("mask_model_rawstate", mask_model_rawstate) model_rawstate = getTFModelWithRawState(config=config) ModelCatalog.register_custom_model("model_rawstate", model_rawstate) register_env('rllibEnv-v0', lambda _: MyVectorEnvWrapper(gym.make('HttpEnv-v0', env_id=config['env'], config=config), config['batch_size'])) modelfile = algo + '_' + config['env'] + '_' + config['trial_name'] output_dir = os.environ['rl4rs_output_dir'] checkpoint_dir = '%s/ray_results/%s/' % (output_dir, modelfile) restore_dir = find_newest_files('checkpoint*', checkpoint_dir) restore_file = find_newest_files('checkpoint*', restore_dir) restore_file = restore_file[:restore_file.rfind('.')] \ if '.' in restore_file.split('/')[-1] \ else restore_file # algo = "DQN" # algo = "PPO" if algo == "DDPG" or algo == "DDPG_rawstate": assert config['support_conti_env'] == True cfg = { "exploration_config": { "type": "OrnsteinUhlenbeckNoise", }, } if 'rawstate' in algo or config.get('rawstate_as_obs', False): cfg = dict({ **cfg, "model": { "custom_model": "model_rawstate", }}) elif algo == "DQN" or algo == "DQN_rawstate": cfg = { # TODO(ekl) we need to set these to prevent the masked values # from being further processed in DistributionalQModel, which # would mess up the masking. It is possible to support these if we # defined a custom DistributionalQModel that is aware of masking. "hiddens": [], "dueling": False, # Whether to use double dqn "double_q": True, # N-step Q learning "n_step": 1, "target_network_update_freq": 200, # === Replay buffer === # Size of the replay buffer in batches (not timesteps!). "buffer_size": 100000, # 'rollout_fragment_length': 200, # "num_workers": 0, "model": { "custom_model": "mask_model", }, } if 'rawstate' in algo or config.get('rawstate_as_obs', False): cfg = dict({ **cfg, "model": { "custom_model": "mask_model_rawstate", }}) elif "PPO" in algo: cfg = { "num_workers": 2, "use_critic": True, # If true, use the Generalized Advantage Estimator (GAE) # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. "use_gae": True, # The GAE (lambda) parameter. "lambda": 1.0, # Initial coefficient for KL divergence. "kl_coeff": 0.2, # # Size of batches collected from each worker. # "rollout_fragment_length": 256, # # Number of timesteps collected for each SGD round. This defines the size # # of each SGD epoch. # "train_batch_size": 2048, # Total SGD batch size across all devices for SGD. This defines the # minibatch size within each epoch. "sgd_minibatch_size": 256, # Whether to shuffle sequences in the batch when training (recommended). "shuffle_sequences": True, # Number of SGD iterations in each outer loop (i.e., number of epochs to # execute per train batch). "num_sgd_iter": 1, # Stepsize of SGD. "lr": 0.0001, # Coefficient of the value function loss. IMPORTANT: you must tune this if # you set vf_share_layers=True inside your model's config. "vf_loss_coeff": 0.5, # PPO clip parameter. "clip_param": 0.3, # Clip param for the value function. Note that this is sensitive to the # scale of the rewards. If your expected V is large, increase this. "vf_clip_param": 500.0, # If specified, clip the global norm of gradients by this amount. # "grad_clip": 10.0, # Target value for KL divergence. "kl_target": 0.01, } is_rawstate = 'rawstate' in algo or config.get('rawstate_as_obs', False) is_conti = 'conti' in algo or config.get('support_conti_env', False) if is_conti: assert config['support_conti_env'] == True cfg = dict({ **cfg, "exploration_config": { "type": "StochasticSampling", }}) if is_rawstate and is_conti: cfg = dict({ **cfg, "model": { "custom_model": "model_rawstate", }}) elif is_conti: pass elif is_rawstate: cfg = dict({ **cfg, "model": { "custom_model": "mask_model_rawstate", }}) else: cfg = dict({ **cfg, "model": { "vf_share_layers": False, "custom_model": "mask_model", }}) elif "A2C" in algo: cfg = { # Should use a critic as a baseline (otherwise don't use value baseline; # required for using GAE). "use_critic": True, # If true, use the Generalized Advantage Estimator (GAE) # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. "use_gae": True, # GAE(gamma) parameter "lambda": 1.0, # Max global norm for each gradient calculated by worker "grad_clip": 10.0, # Learning rate "lr": 0.0001, # Value Function Loss coefficient "vf_loss_coeff": 0.5, # Entropy coefficient "entropy_coeff": 0.01, # Min time per iteration "min_iter_time_s": 5, # "num_workers": 0, } is_rawstate = 'rawstate' in algo or config.get('rawstate_as_obs', False) is_conti = 'conti' in algo or config.get('support_conti_env', False) if is_conti: assert config['support_conti_env'] == True cfg = dict({ **cfg, "exploration_config": { "type": "StochasticSampling", }}) if is_rawstate and is_conti: cfg = dict({ **cfg, "model": { "custom_model": "model_rawstate", }}) elif is_conti: pass elif is_rawstate: cfg = dict({ **cfg, "use_gae": False, "exploration_config": { "type": "EpsilonGreedy", "final_epsilon": 0.1, "epsilon_timesteps": 100000, }, "model": { "custom_model": "mask_model_rawstate", }}) else: cfg = dict({ **cfg, "model": { "custom_model": "mask_model", }}) elif "PG" in algo: cfg = { # "num_workers": 0, "lr": 0.0004, # "exploration_config": { # "type": "EpsilonGreedy", # "final_epsilon": 0.15, # } } is_rawstate = 'rawstate' in algo or config.get('rawstate_as_obs', False) is_conti = 'conti' in algo or config.get('support_conti_env', False) if is_conti: assert config['support_conti_env'] == True cfg = dict({ **cfg, "exploration_config": { "type": "StochasticSampling", }}) if is_rawstate and is_conti: cfg = dict({ **cfg, "model": { "custom_model": "model_rawstate", }}) elif is_conti: pass elif is_rawstate: cfg = dict({ **cfg, "model": { "custom_model": "mask_model_rawstate", }}) else: cfg = dict({ **cfg, "model": { "custom_model": "mask_model", }}) elif "IMPALA" in algo: cfg = { # "rollout_fragment_length": 9, "min_iter_time_s": 10, "num_workers": 2, # Learning params. "grad_clip": 10.0, # Either "adam" or "rmsprop". "opt_type": "adam", "lr": 0.0001, # Balancing the three losses. "vf_loss_coeff": 0.5, "entropy_coeff": 0.01, "batch_mode": "truncate_episodes", # "_separate_vf_optimizer": True, # "_lr_vf": 0.0001, } is_rawstate = 'rawstate' in algo or config.get('rawstate_as_obs', False) is_conti = 'conti' in algo or config.get('support_conti_env', False) if is_conti: assert config['support_conti_env'] == True cfg = dict({ **cfg, "exploration_config": { "type": "StochasticSampling", }}) if is_rawstate and is_conti: cfg = dict({ **cfg, "model": { "custom_model": "model_rawstate", }}) elif is_conti: pass elif is_rawstate: cfg = dict({ **cfg, "model": { "custom_model": "mask_model_rawstate", }}) else: cfg = dict({ **cfg, "model": { "custom_model": "mask_model", }}) else: raise Exception rllib_config = dict( { "env": "rllibEnv-v0", "gamma": 1, "explore": True, "exploration_config": { "type": "SoftQ", # "temperature": 1.0, }, "num_gpus": 1 if config.get('gpu', True) else 0, "num_workers": 0, "framework": 'tf', # "framework": 'tfe', "rollout_fragment_length": config['max_steps'], "batch_mode": "complete_episodes", "train_batch_size": min(config["batch_size"] * config['max_steps'], 1024), "evaluation_interval": 500, "evaluation_num_episodes": 2048 * 4, "evaluation_config": { "explore": False }, "log_level": "INFO", }, **cfg) print('rllib_config', rllib_config) trainer = get_rl_model(algo.split('_')[0], rllib_config) if stage == 'train': # trainer.restore(restore_file) # print('model restore from %s' % (restore_file)) for i in range(config["epoch"]): result = trainer.train() if (i + 1) % 500 == 0 or i == 0: print(pretty_print(result)) if (i + 1) % 500 == 0: checkpoint = trainer.save(checkpoint_dir=checkpoint_dir) print("checkpoint saved at", checkpoint) if stage == 'eval': eval_config = config.copy() eval_config['is_eval'] = True eval_config['batch_size'] = 2048 eval_env = gym.make('HttpEnv-v0', env_id=eval_config['env'], config=eval_config) # trainer.restore(checkpoint_dir + '/checkpoint_010000/checkpoint-10000') trainer.restore(restore_file) print('model restore from %s' % (restore_file)) episode_reward = 0 done = False epoch = 4 actions = [] for i in range(epoch): obs = eval_env.reset() print('test batch at ', i, 'avg reward', episode_reward / eval_config['batch_size'] / (i + 0.0001)) for _ in range(config["max_steps"]): obs = dict(enumerate(obs)) action = trainer.compute_actions(obs, explore=False) action = np.array(list(action.values())) obs, reward, done, info = eval_env.step(action) episode_reward += sum(reward) actions.append(action) print('avg reward', episode_reward / eval_config['batch_size'] / epoch) eval_env.close() if stage == 'ope': dataset_dir = os.environ['rl4rs_dataset_dir'] sample_model = behavior_model(config, modelfile=dataset_dir + '/logged_policy.h5') trainer.restore(restore_file) print('model restore from %s' % (restore_file)) eval_config = config.copy() eval_config["epoch"] = 1 eval_config['is_eval'] = True eval_config["batch_size"] = 2048 if config['env'] == 'SeqSlateRecEnv-v0': config['max_steps'] = 36 sim = SeqSlateRecEnv(eval_config, state_cls=SeqSlateState) eval_env = gym.make('SeqSlateRecEnv-v0', recsim=sim) else: sim = SlateRecEnv(eval_config, state_cls=SlateState) eval_env = gym.make('SlateRecEnv-v0', recsim=sim) ope_eval(eval_config, eval_env, trainer, sample_model=sample_model) ray.shutdown()
nilq/baby-python
python
"""Tasks module All tasks run via external message queue (via celery) are defined within. NB: a celery worker must be started for these to ever return. See `celery_worker.py` """ from datetime import datetime from functools import wraps import json from traceback import format_exc from celery.utils.log import get_task_logger from flask import current_app from requests import Request, Session from requests.exceptions import RequestException from sqlalchemy import and_ from .database import db from .dogpile_cache import dogpile_cache from .factories.app import create_app from .factories.celery import create_celery from .models.assessment_status import ( invalidate_assessment_status_cache, overall_assessment_status, ) from .models.communication import Communication from .models.communication_request import queue_outstanding_messages from .models.questionnaire_bank import QuestionnaireBank from .models.reporting import generate_and_send_summaries, get_reporting_stats from .models.role import ROLE, Role from .models.scheduled_job import check_active, update_job_status from .models.tou import update_tous from .models.user import User, UserRoles # To debug, stop the celeryd running out of /etc/init, start in console: # celery worker -A portal.celery_worker.celery --loglevel=debug # Import rdb and use like pdb: # from celery.contrib import rdb # rdb.set_trace() # Follow instructions from celery console, i.e. telnet 127.0.0.1 6900 logger = get_task_logger(__name__) celery = create_celery(create_app()) def scheduled_task(func): @wraps(func) def call_and_update(*args, **kwargs): job_id = kwargs.get('job_id') manual_run = kwargs.get('manual_run') if not manual_run and job_id and not check_active(job_id): message = "Job id `{}` inactive.".format(job_id) logger.debug(message) return message try: before = datetime.now() output = func(*args, **kwargs) duration = datetime.now() - before message = ('{} ran in {} ' 'seconds.'.format(func.__name__, duration.seconds)) if output: message += " {}".format(output) current_app.logger.debug(message) except Exception as exc: message = ("Unexpected exception in `{}` " "on {} : {}".format(func.__name__, job_id, exc)) logger.error(message) logger.error(format_exc()) if job_id: update_job_status(job_id, status=message) return message return call_and_update @celery.task(name="tasks.add") def add(x, y): return x + y @celery.task(name="tasks.info") def info(): return "BROKER_URL: {} <br/> SERVER_NAME: {}".format( current_app.config.get('BROKER_URL'), current_app.config.get('SERVER_NAME')) @celery.task(name="tasks.post_request", bind=True) def post_request(self, url, data, timeout=10, retries=3): """Wrap requests.post for asyncronous posts - includes timeout & retry""" logger.debug("task: %s retries:%s", self.request.id, self.request.retries) s = Session() req = Request('POST', url, data=data) prepped = req.prepare() try: resp = s.send(prepped, timeout=timeout) if resp.status_code < 400: logger.info("{} received from {}".format(resp.status_code, url)) else: logger.error("{} received from {}".format(resp.status_code, url)) except RequestException as exc: """Typically raised on timeout or connection error retry after countdown seconds unless retry threshold has been exceeded """ logger.warn("{} on {}".format(exc.message, url)) if self.request.retries < retries: raise self.retry(exc=exc, countdown=20) else: logger.error( "max retries exceeded for {}, last failure: {}".format( url, exc)) except Exception as exc: logger.error("Unexpected exception on {} : {}".format(url, exc)) @celery.task @scheduled_task def test(**kwargs): return "Test" @celery.task @scheduled_task def test_args(*args, **kwargs): alist = ",".join(args) klist = json.dumps(kwargs) return "{}|{}".format(",".join(args), json.dumps(kwargs)) @celery.task @scheduled_task def cache_reporting_stats(**kwargs): """Populate reporting dashboard stats cache Reporting stats can be a VERY expensive lookup - cached for an hour at a time. This task is responsible for renewing the potentially stale cache. Expected to be called as a scheduled job. """ dogpile_cache.invalidate(get_reporting_stats) dogpile_cache.refresh(get_reporting_stats) @celery.task @scheduled_task def cache_assessment_status(**kwargs): """Populate assessment status cache Assessment status is an expensive lookup - cached for an hour at a time. This task is responsible for renewing the potentially stale cache. Expected to be called as a scheduled job. """ update_patient_loop(update_cache=True, queue_messages=False) @celery.task @scheduled_task def prepare_communications(**kwargs): """Move any ready communications into prepared state """ update_patient_loop(update_cache=False, queue_messages=True) def update_patient_loop(update_cache=True, queue_messages=True): """Function to loop over valid patients and update as per settings Typically called as a scheduled_job - also directly from tests """ patient_role_id = Role.query.filter( Role.name == ROLE.PATIENT.value).with_entities(Role.id).first()[0] valid_patients = User.query.join( UserRoles).filter( and_(User.id == UserRoles.user_id, User.deleted_id.is_(None), UserRoles.role_id == patient_role_id)) now = datetime.utcnow() for user in valid_patients: if update_cache: dogpile_cache.invalidate(overall_assessment_status, user.id) dogpile_cache.refresh(overall_assessment_status, user.id) if queue_messages: qbd = QuestionnaireBank.most_current_qb(user=user, as_of_date=now) if qbd.questionnaire_bank: queue_outstanding_messages( user=user, questionnaire_bank=qbd.questionnaire_bank, iteration_count=qbd.iteration) db.session.commit() @celery.task @scheduled_task def send_queued_communications(**kwargs): "Look for communication objects ready to send" send_messages() def send_messages(): """Function to send all queued messages Typically called as a scheduled_job - also directly from tests """ ready = Communication.query.filter(Communication.status == 'preparation') for communication in ready: current_app.logger.debug("Collate ready communication {}".format( communication)) communication.generate_and_send() db.session.commit() def send_user_messages(user, force_update=False): """Send queued messages to only given user (if found) @param user: to email @param force_update: set True to force reprocessing of cached data and queue any messages previously overlooked. Triggers a send for any messages found in a prepared state ready for transmission. """ ready, reason = user.email_ready() if not ready: raise ValueError("Cannot send messages to {user}; {reason}".format( user=user, reason=reason)) if force_update: invalidate_assessment_status_cache(user_id=user.id) qbd = QuestionnaireBank.most_current_qb( user=user, as_of_date=datetime.utcnow()) if qbd.questionnaire_bank: queue_outstanding_messages( user=user, questionnaire_bank=qbd.questionnaire_bank, iteration_count=qbd.iteration) count = 0 ready = Communication.query.join(User).filter( Communication.status == 'preparation').filter(User == user) for communication in ready: current_app.logger.debug("Collate ready communication {}".format( communication)) communication.generate_and_send() db.session.commit() count += 1 message = "Sent {} messages to {}".format(count, user.email) if force_update: message += " after forced update" return message @celery.task @scheduled_task def send_questionnaire_summary(**kwargs): "Generate and send a summary of questionnaire counts to all Staff in org" cutoff_days = kwargs['cutoff_days'] org_id = kwargs['org_id'] error_emails = generate_and_send_summaries(cutoff_days, org_id) if error_emails: return ('\nUnable to reach recipient(s): ' '{}'.format(', '.join(error_emails))) @celery.task @scheduled_task def update_tous_task(**kwargs): """Job to manage updates for various ToUs Scheduled task, see docs in ``tou.update_tous()`` """ return update_tous(**kwargs) @celery.task @scheduled_task def token_watchdog(**kwargs): """Clean up stale tokens and alert service sponsors if nearly expired""" from .models.auth import token_janitor error_emails = token_janitor() if error_emails: return '\nUnable to reach recipient(s): {}'.format( ', '.join(error_emails))
nilq/baby-python
python
if x == 3: print("bye")
nilq/baby-python
python
from django.apps import AppConfig class SteveConfig(AppConfig): name = 'steve'
nilq/baby-python
python
name = input("What is the name of the gift giver?") present = input("What is the present they gave you?") print() age = input("How old were you on your birthday?") yourName = input("What is your name?") print("Dear " + name + ", ") print("") print("Thank you for the " + present + ". ") print("I really like it. I can't believe ") print("I am already " + age + " years old, but ") print("it does not feel much different than being ") print(str(int(age)-1) + ".") print("") print("Sincerely,") print("") print(yourName)
nilq/baby-python
python
from enum import Enum class IndType(Enum): CONFIRMED = 'Confirmed' DECEASED = 'Deceased' RECOVERED = 'Recovered'
nilq/baby-python
python
""" An exceptionally lousy site spider Ken Kinder <ken@kenkinder.com> This module gives an example of how the TaskClient interface to the IPython controller works. Before running this script start the IPython controller and some engines using something like:: ipcluster -n 4 """ from twisted.python.failure import Failure from IPython.kernel import client import time fetchParse = """ from twisted.web import microdom import urllib2 import urlparse def fetchAndParse(url, data=None): links = [] try: page = urllib2.urlopen(url, data=data) except Exception: return links else: if page.headers.type == 'text/html': doc = microdom.parseString(page.read(), beExtremelyLenient=True) for node in doc.getElementsByTagName('a'): if node.getAttribute('href'): links.append(urlparse.urljoin(url, node.getAttribute('href'))) return links """ class DistributedSpider(object): # Time to wait between polling for task results. pollingDelay = 0.5 def __init__(self, site): self.tc = client.TaskClient() self.rc = client.MultiEngineClient() self.rc.execute(fetchParse) self.allLinks = [] self.linksWorking = {} self.linksDone = {} self.site = site def visitLink(self, url): if url not in self.allLinks: self.allLinks.append(url) if url.startswith(self.site): print ' ', url self.linksWorking[url] = self.tc.run(client.StringTask('links = fetchAndParse(url)', pull=['links'], push={'url': url})) def onVisitDone(self, result, url): print url, ':' self.linksDone[url] = None del self.linksWorking[url] if isinstance(result.failure, Failure): txt = result.failure.getTraceback() for line in txt.split('\n'): print ' ', line else: for link in result.ns.links: self.visitLink(link) def run(self): self.visitLink(self.site) while self.linksWorking: print len(self.linksWorking), 'pending...' self.synchronize() time.sleep(self.pollingDelay) def synchronize(self): for url, taskId in self.linksWorking.items(): # Calling get_task_result with block=False will return None if the # task is not done yet. This provides a simple way of polling. result = self.tc.get_task_result(taskId, block=False) if result is not None: self.onVisitDone(result, url) def main(): distributedSpider = DistributedSpider(raw_input('Enter site to crawl: ')) distributedSpider.run() if __name__ == '__main__': main()
nilq/baby-python
python
# -*- coding: utf-8 -*- import sys from sqlalchemy import create_engine from sqlalchemy import Column, Integer, String from sqlalchemy.ext.declarative import declarative_base import os SQLALCHEMY_DATABASE_URI = None if 'DATABASE_URI' in os.environ: SQLALCHEMY_DATABASE_URI = os.getenv("DATABASE_URI") else: SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://testuser:testpassword@localhost:5432/postgres" Base = declarative_base() def db_connect(): """ Performs database connection using database settings from settings.py. Returns sqlalchemy engine instance """ return create_engine(SQLALCHEMY_DATABASE_URI) # Note order of events matters here # we need to create the Base before importing User from models.user_model import User from models.transaction_model import Transaction # def bind_engine(): engine = db_connect() # Connect to database Base.metadata.create_all(engine) # Create models
nilq/baby-python
python
""" Storage containers for durable queues and (planned) durable topics. """ import abc import logging import threading from coilmq.util.concurrency import synchronized __authors__ = ['"Hans Lellelid" <hans@xmpl.org>'] __copyright__ = "Copyright 2009 Hans Lellelid" __license__ = """Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" lock = threading.RLock() class QueueStore(object): """ Abstract base class for queue storage. Extensions/implementations of this class must be thread-safe. @ivar log: A logger for this class. @type log: C{logging.Logger} """ __metaclass__ = abc.ABCMeta def __init__(self): """ A base constructor that sets up logging. If you extend this class, you should either call this method or at minimum make sure these values get set. """ self.log = logging.getLogger('%s.%s' % ( self.__module__, self.__class__.__name__)) @abc.abstractmethod @synchronized(lock) def enqueue(self, destination, frame): """ Store message (frame) for specified destinationination. @param destination: The destinationination queue name for this message (frame). @type destination: C{str} @param frame: The message (frame) to send to specified destinationination. @type frame: C{stompclient.frame.Frame} """ @abc.abstractmethod @synchronized(lock) def dequeue(self, destination): """ Removes and returns an item from the queue (or C{None} if no items in queue). @param destination: The queue name (destinationination). @type destination: C{str} @return: The first frame in the specified queue, or C{None} if there are none. @rtype: C{stompclient.frame.Frame} """ @synchronized(lock) def requeue(self, destination, frame): """ Requeue a message (frame) for storing at specified destinationination. @param destination: The destinationination queue name for this message (frame). @type destination: C{str} @param frame: The message (frame) to send to specified destinationination. @type frame: C{stompclient.frame.Frame} """ self.enqueue(destination, frame) @synchronized(lock) def size(self, destination): """ Size of the queue for specified destination. @param destination: The queue destination (e.g. /queue/foo) @type destination: C{str} @return: The number of frames in specified queue. @rtype: C{int} """ raise NotImplementedError() @synchronized(lock) def has_frames(self, destination): """ Whether specified destination has any frames. Default implementation uses L{QueueStore.size} to determine if there are any frames in queue. Subclasses may choose to optimize this. @param destination: The queue destination (e.g. /queue/foo) @type destination: C{str} @return: The number of frames in specified queue. @rtype: C{int} """ return self.size(destination) > 0 @synchronized(lock) def destinations(self): """ Provides a set of destinations (queue "addresses") available. @return: A list of the detinations available. @rtype: C{set} """ raise NotImplementedError @synchronized(lock) def close(self): """ May be implemented to perform any necessary cleanup operations when store is closed. """ pass # This is intentionally not synchronized, since it does not directly # expose any shared data. def frames(self, destination): """ Returns an iterator for frames in specified queue. The iterator simply wraps calls to L{dequeue} method, so the order of the frames from the iterator will be the reverse of the order in which the frames were enqueued. @param destination: The queue destination (e.g. /queue/foo) @type destination: C{str} """ return QueueFrameIterator(self, destination) class QueueFrameIterator(object): """ Provides an C{iterable} over the frames for a specified destination in a queue. @ivar store: The queue store. @type store: L{coilmq.store.QueueStore} @ivar destination: The destination for this iterator. @type destination: C{str} """ def __init__(self, store, destination): self.store = store self.destination = destination def __iter__(self): return self def next(self): return self.__next__() def __next__(self): frame = self.store.dequeue(self.destination) if not frame: raise StopIteration() return frame def __len__(self): return self.store.size(self.destination) class TopicStore(object): """ Abstract base class for non-durable topic storage. """ class DurableTopicStore(TopicStore): """ Abstract base class for durable topic storage. """
nilq/baby-python
python
#!/usr/bin/env python3 # Copyright (C) 2015-2016 Ben Klein. All rights reserved. # # This application is licensed under the GNU GPLv3 License, included with # this application source. import sys global DEBUG DEBUG = True if DEBUG: print("Debugging enabled.") print("Called with system args: " + str(sys.argv)) print("Python version: " + sys.version) # Qt GUI stuff try: from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import QSettings except ImportError: print("There was an error importing the Qt python3 libraries,") print("These are required by to operate this program.") print("If you are on Ubuntu/Debian, they should be available via APT.") sys.exit("Could not import Python3 Qt Libraries.") # Airplay Things: try: import discovery import airplay except: sys.exit("Could not import own classes.") class Window(QtWidgets.QWidget): def __init__(self): super(Window, self).__init__() self.settings = QSettings('open-airplay') # Establishes a hook on our system settings. # http://pyqt.sourceforge.net/Docs/PyQt4/pyqt_qsettings.html # Place items in our window. self.createIconGroupBox() # Tray Icon Settings self.createMessageGroupBox() # Test notification group self.createDeviceListGroupBox() # Airplay server selection # Set the iconlabel to it's minimum width without scollbaring. self.iconLabel.setMinimumWidth(self.durationLabel.sizeHint().width()) # Create action groups to put actionable items into. self.createActions() self.createTrayIcon() # Attach clicks on things to actual functions self.showMessageButton.clicked.connect(self.showMessage) self.showIconCheckBox.toggled.connect(self.trayIconVisible) self.systrayClosePromptCheckBox.toggled.connect(self.setSystrayClosePrompt) self.iconComboBox.currentIndexChanged.connect(self.setIcon) self.trayIcon.messageClicked.connect(self.messageClicked) self.trayIcon.activated.connect(self.iconActivated) # Finally add the GUI item groupings we made to the layout and init it. mainLayout = QtWidgets.QVBoxLayout() mainLayout.addWidget(self.iconGroupBox) mainLayout.addWidget(self.deviceListGroupBox) mainLayout.addWidget(self.messageGroupBox) self.setLayout(mainLayout) # Set our System Tray Presence self.iconComboBox.setCurrentIndex(1) self.trayIcon.show() self.trayIcon.setToolTip("OpenAirplay") # Set our basic window things. self.setWindowTitle("OpenAirplay Settings") self.resize(400, 300) # If the user chose not to show the system tray icon: if self.settings.value('systrayicon', type=bool) is False: print("The user chose not to show the system tray icon.") self.trayIconVisible(False) # Setup stuff to poll available receivers every 3 seconds. self.oldReceiverList = [] self.timer=QtCore.QTimer() self.timer.start(3000) self.timer.timeout.connect(self.updateReceivers) # Start discovery of airplay receivers: if DEBUG: print("Starting discovery service...") discovery.start() def setVisible(self, visible): # When we want to 'disappear' into the system tray. self.minimizeAction.setEnabled(visible) #self.maximizeAction.setEnabled(not self.isMaximized()) self.restoreAction.setEnabled(self.isMaximized() or not visible) super(Window, self).setVisible(visible) def closeEvent(self, event): # When someone clicks to close the window, not the tray icon. if self.trayIcon.isVisible(): if self.settings.value('promptOnClose_systray', type=bool): print("The program is returning to the system tray, user notified.") QtWidgets.QMessageBox.information(self, "Systray", "The program will keep running in the system tray. \ To terminate the program, choose <b>Quit</b> in \ the menu of the system tray airplay icon.") else: print("Program returned to system tray, user chose not to be notified.") self.hide() event.ignore() print("Closing to System Tray") else: print("Tray Icon not visible, quitting.") self.quit("Exit: No system tray instance to close to.") def setIcon(self, index): # Sets the selected icon in the tray and taskbar. icon = self.iconComboBox.itemIcon(index) self.trayIcon.setIcon(icon) self.setWindowIcon(icon) def setSystrayClosePrompt(self, preference): print("Prompt on close is now " + str(preference)) self.settings.setValue('promptOnClose_systray', preference) def trayIconVisible(self, preference): self.trayIcon.setVisible(preference) self.settings.setValue('systrayicon', preference) def iconActivated(self, reason): if reason in (QtWidgets.QSystemTrayIcon.Trigger, QtWidgets.QSystemTrayIcon.DoubleClick): self.iconComboBox.setCurrentIndex( (self.iconComboBox.currentIndex() + 1) % self.iconComboBox.count()) elif reason == QtWidgets.QSystemTrayIcon.MiddleClick: self.showMessage() def showMessage(self): # Show the message that was typed in the boxes icon = QtWidgets.QSystemTrayIcon.MessageIcon( self.typeComboBox.itemData(self.typeComboBox.currentIndex())) self.trayIcon.showMessage(self.titleEdit.text(), self.bodyEdit.toPlainText(), icon, self.durationSpinBox.value() * 1000) def messageClicked(self): # In the case that someone clicks on the notification popup (impossible on Ubuntu Unity) QtWidgets.QMessageBox.information(None, "OpenAirplay Help", "If you need help with OpenAirplay, " "see the Github page to file bug reports or see further documentation and help.") def updateReceivers(self): if list(set(discovery.airplayReceivers) - set(self.oldReceiverList)) != []: # The new list has items oldReceiverList doesn't! for item in list(set(discovery.airplayReceivers) - set(self.oldReceiverList)): self.oldReceiverList.append(item) print("Adding device: " + item) # Convert item to string to remove the excess info item = QtWidgets.QListWidgetItem(str(item).replace("._airplay._tcp.local.", "")) self.deviceSelectList.addItem(item) if list(set(self.oldReceiverList) - set(discovery.airplayReceivers)) != []: # Items have been removed from the list! for item in list(set(self.oldReceiverList) - set(discovery.airplayReceivers)): self.oldReceiverList.remove(item) print("Removed device: " + item) items = self.deviceSelectList.findItems(item, QtCore.Qt.MatchExactly) for x in items: self.deviceSelectList.takeItem(self.deviceSelectList.row(x)) def createIconGroupBox(self): # Add the SysTray preferences window grouping self.iconGroupBox = QtWidgets.QGroupBox("Tray Icon") self.iconLabel = QtWidgets.QLabel("Icon:") self.iconComboBox = QtWidgets.QComboBox() self.iconComboBox.addItem(QtGui.QIcon('images/Airplay-Light'), "Black Icon") self.iconComboBox.addItem(QtGui.QIcon('images/Airplay-Dark'), "White Icon") self.showIconCheckBox = QtWidgets.QCheckBox("Show tray icon") self.showIconCheckBox.setChecked(self.settings.value('systrayicon', type=bool)) print("Got systrayicon from settings:" + str(self.settings.value('systrayicon', type=bool))) self.systrayClosePromptCheckBox = QtWidgets.QCheckBox("Systray Close warning") self.systrayClosePromptCheckBox.setChecked(self.settings.value('promptOnClose_systray', type=bool)) print("Got promptOnClose_systray from settings:" + str(self.settings.value('promptOnClose_systray', type=bool))) iconLayout = QtWidgets.QHBoxLayout() iconLayout.addWidget(self.iconLabel) iconLayout.addWidget(self.iconComboBox) iconLayout.addStretch() iconLayout.addWidget(self.showIconCheckBox) iconLayout.addWidget(self.systrayClosePromptCheckBox) self.iconGroupBox.setLayout(iconLayout) # Creates the device selection list. def createDeviceListGroupBox(self): self.deviceListGroupBox = QtWidgets.QGroupBox("Airplay to") self.deviceSelectList = QtWidgets.QListWidget() deviceSelectListNoDisplayItem = QtWidgets.QListWidgetItem("No display.") self.deviceSelectList.addItem(deviceSelectListNoDisplayItem) # layout deviceListLayout = QtWidgets.QHBoxLayout() deviceListLayout.addWidget(self.deviceSelectList) self.deviceListGroupBox.setLayout(deviceListLayout) def createMessageGroupBox(self): # Add the message test GUI window grouping. self.messageGroupBox = QtWidgets.QGroupBox("Balloon Message Test:") typeLabel = QtWidgets.QLabel("Type:") self.typeComboBox = QtWidgets.QComboBox() self.typeComboBox.addItem("None", QtWidgets.QSystemTrayIcon.NoIcon) #self.typeComboBox.addItem(self.style().standardIcon( # QtWidgets.QStyle.SP_MessageBoxInformation), "Information", #QtWidgets.QSystemTrayIcon.Information) #self.typeComboBox.addItem(self.style().standardIcon( # QtWidgets.QStyle.SP_MessageBoxWarning), "Warning", #QtWidgets.QSystemTrayIcon.Warning) #self.typeComboBox.addItem(self.style().standardIcon( # QtWidgets.QStyle.SP_MessageBoxCritical), "Critical", #QtWidgets.QSystemTrayIcon.Critical) self.typeComboBox.addItem("Information", QtWidgets.QSystemTrayIcon.Information) self.typeComboBox.addItem("Warning", QtWidgets.QSystemTrayIcon.Information) self.typeComboBox.addItem("Critical", QtWidgets.QSystemTrayIcon.Information) self.typeComboBox.setCurrentIndex(1) self.durationLabel = QtWidgets.QLabel("Duration:") self.durationSpinBox = QtWidgets.QSpinBox() self.durationSpinBox.setRange(2, 15) self.durationSpinBox.setSuffix("s") self.durationSpinBox.setValue(5) durationWarningLabel = QtWidgets.QLabel("(some systems might ignore this hint)") durationWarningLabel.setIndent(10) titleLabel = QtWidgets.QLabel("Title:") self.titleEdit = QtWidgets.QLineEdit("Cannot connect to network") bodyLabel = QtWidgets.QLabel("Body:") self.bodyEdit = QtWidgets.QTextEdit() self.bodyEdit.setPlainText("Don't believe me. Honestly, I don't have a clue.") self.showMessageButton = QtWidgets.QPushButton("Show Message") self.showMessageButton.setDefault(True) messageLayout = QtWidgets.QGridLayout() messageLayout.addWidget(typeLabel, 0, 0) messageLayout.addWidget(self.typeComboBox, 0, 1, 1, 2) messageLayout.addWidget(self.durationLabel, 1, 0) messageLayout.addWidget(self.durationSpinBox, 1, 1) messageLayout.addWidget(durationWarningLabel, 1, 2, 1, 3) messageLayout.addWidget(titleLabel, 2, 0) messageLayout.addWidget(self.titleEdit, 2, 1, 1, 4) messageLayout.addWidget(bodyLabel, 3, 0) messageLayout.addWidget(self.bodyEdit, 3, 1, 2, 4) messageLayout.addWidget(self.showMessageButton, 5, 4) messageLayout.setColumnStretch(3, 1) messageLayout.setRowStretch(4, 1) self.messageGroupBox.setLayout(messageLayout) def createActions(self): # Create Actions that can be taken from the System Tray Icon self.minimizeAction = QtWidgets.QAction("Mi&nimize", self, triggered=self.hide) # Application is not the kind to be maximized #self.maximizeAction = QtWidgets.QAction("Ma&ximize", self, triggered=self.showMaximized) self.restoreAction = QtWidgets.QAction("&Restore", self, triggered=self.showNormal) self.quitAction = QtWidgets.QAction("&Quit", self, triggered=QtWidgets.qApp.quit) def createTrayIcon(self): self.trayIconMenu = QtWidgets.QMenu() self.trayIconMenu.addAction(self.minimizeAction) #self.trayIconMenu.addAction(self.maximizeAction) self.trayIconMenu.addAction(self.restoreAction) self.trayIconMenu.addSeparator() self.trayIconMenu.addAction(self.quitAction) self.trayIcon = QtWidgets.QSystemTrayIcon(self) self.trayIcon.setContextMenu(self.trayIconMenu) def quit(self, reason): del self.settings #discovery.stop() sys.exit(reason) if __name__ == '__main__': app = QtWidgets.QApplication(['Open Airplay']) if not QtWidgets.QSystemTrayIcon.isSystemTrayAvailable(): QtWidgets.QMessageBox.critical(None, "Systray", "I couldn't detect any system tray on this system.") sys.exit(1) QtWidgets.QApplication.setQuitOnLastWindowClosed(False) window = Window() window.show() # After teh progreem endz: sys.exit(app.exec_()) # Goodbye World
nilq/baby-python
python
import pyqtgraph as pg from pyqtgraph import QtCore, QtGui from .. import definitions as defs from .. import functions class FinWindow(pg.GraphicsLayoutWidget): def __init__(self, title, **kwargs): self.title = title pg.mkQApp() super().__init__(**kwargs) self.setWindowTitle(title) self.setGeometry(defs.winx, defs.winy, defs.winw, defs.winh) defs.winx += 40 defs.winy += 40 self.centralWidget.installEventFilter(self) self.ci.setContentsMargins(0, 0, 0, 0) self.ci.setSpacing(-1) self.closing = False @property def axs(self): return [ax for ax in self.ci.items if isinstance(ax, pg.PlotItem)] def close(self): self.closing = True functions._savewindata(self) functions._clear_timers() return super().close() def eventFilter(self, obj, ev): if ev.type()== QtCore.QEvent.WindowDeactivate: functions._savewindata(self) return False def leaveEvent(self, ev): if not self.closing: super().leaveEvent(ev)
nilq/baby-python
python
import os import re from setuptools import setup PWD = os.path.dirname(__file__) with open(os.path.join(PWD, 'sshtunnel_requests', '__init__.py')) as f: VERSION = (re.compile(r""".*__version__ = ["'](.*?)['"]""", re.S).match(f.read()).group(1)) def parse_requirements_file(filename): with open(filename) as fid: requires = [l.strip() for l in fid.readlines() if not l.startswith("#")] return requires # base requirements install_requires = parse_requirements_file('requirements.txt') test_requires = parse_requirements_file('requirements_test.txt') docs_requires = parse_requirements_file('requirements_doc.txt') extras = { "test": test_requires, "docs": docs_requires, } extras["all"] = sum(extras.values(), []) setup( name='sshtunnel_requests', version=VERSION, url='https://github.com/featureoverload/sshtunnel-requests', project_urls={ "Documentation": "https://sshtunnel-requests.readthedocs.io/en/latest/", "Source": "https://github.com/featureoverload/sshtunnel-requests", "Tracker": "https://github.com/featureoverload/sshtunnel-requests/issues", }, author='Feature Overload', author_email='featureoverload@gmail.com', maintainer='Feature Overload', maintainer_email='featureoverload@gmail.com', packages=['sshtunnel_requests'], package_data={'': ['LICENSE', ]}, package_dir={'sshtunnel_requests': 'sshtunnel_requests'}, description='a simple HTTP library to port forward requests on SSH tunnels to remove server', long_description=open("README.rst").read(), long_description_content_type="text/x-rst", include_package_data=True, python_requires='>=3.8', install_requires=install_requires, extras_require=extras, )
nilq/baby-python
python
# Generated by Django 3.1.5 on 2021-03-28 18:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('challenge', '0004_match_log_file_token'), ] operations = [ migrations.AlterField( model_name='match', name='status', field=models.CharField(choices=[('failed', 'Failed'), ('successful', 'Successful'), ('running', 'Running'), ('freeze', 'Freeze'), ('pending', 'pending')], default='pending', max_length=50), ), ]
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright (c) 2006-2018, Alexis Royer, http://alexis.royer.free.fr/CLI # # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the CLI library project nor the names of its contributors may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Module logger package. """ class LogLevel(object): """ Log level object. """ def __init__(self, string, depth): """ Constructor. @param string (str) String representation of the log class. @param depth (int) Visibility depth: the lowest the most visible. """ self._string = string self._depth = depth def string(self): """ String representation accessor. @return (str) String representation. """ return self._string def depth(self): """ Visibility depth accessor: the lowest the most visible. @return (int) Visibility depth. """ return self._depth # Regular log levels FATAL_ERROR = LogLevel("FATAL ERROR", 10) # pylint: disable=bad-whitespace ERROR = LogLevel("ERROR", 100) # pylint: disable=bad-whitespace WARNING = LogLevel("WARNING", 1000) # pylint: disable=bad-whitespace INFO = LogLevel("INFO", 10000) # pylint: disable=bad-whitespace TRACE = LogLevel("TRACE", 100000) # pylint: disable=bad-whitespace DEBUG = LogLevel("DEBUG", 1000000) # pylint: disable=bad-whitespace class Engine(object): """ Log engine: receives logs and filter them out depending on a filter. """ def __init__(self): """ Constructor. """ self._filter = {} def set_filter(self, module, level): """ Configure the filter for the current module. @param module (str) Log module name. @param level (LogLevel) Maximum log level to display. """ self._filter[module] = level def remove_filter(self, module): """ Filter removal. @param module (str) Log module name. """ del self._filter[module] def is_enabled(self, module, level): """ Checks whether logging is enabled for the given module and level. @param module (str) Log module name. @param level (LogLevel) Log level being tested. @return (bool) True if logging is enabled, False otherwise. """ return self._filter.has_key(module) and (level.depth() <= self._filter[module].depth()) def log(self, module, level, message): """ Prints out a log message. If the level if an error, a warning or a regular info, it is directly printed out. @param module (str) Log class. @param level (LogLevel) Log level: FATAL_ERROR, ERROR, WARNING, INFO, TRACE, DEBUG @param message (str|Exception) Log message or exception. """ if isinstance(message, Exception): # In case message is actually an exception, print out the stack trace _exception = message if self.is_enabled(module, level): import traceback traceback.print_exc(_exception) else: # Unicode processing before output : unicode strings when output through file redirection raise UnicodeEncodeError exceptions. if isinstance(message, unicode): # Automatically encode in utf-8 import codecs message = codecs.lookup("utf-8").encode(message)[0] # Direct printing import sys if level.depth() < ERROR.depth(): sys.stderr.write("Fatal error! %s\n" % message) elif level.depth() < WARNING.depth(): sys.stderr.write("Error! %s\n" % message) elif level.depth() < INFO.depth(): sys.stderr.write("Warning! %s\n" % message) elif level.depth() < TRACE.depth(): sys.stderr.write("Info! %s\n" % message) # Log filtering if self.is_enabled(module, level): if level.depth() > INFO.depth(): sys.stderr.write("<%s|%s> %s\n" % (module, level.string(), message)) _ENGINE = Engine() def engine(): """ Log engine singleton. @return (Engine) Log engine instance. """ return _ENGINE def log(module, level, message): """ Prints out a log message. @param module (str) Log class. @param level (LogLevel) Log level: FATAL_ERROR, ERROR, WARNING, INFO, TRACE, DEBUG @param message (str) Log message. """ engine().log(module, level, message)
nilq/baby-python
python
# -*- coding: utf-8 -*- import os from jsonschema import ValidationError from app_factory.base import AppZero from gluon import current class Table(AppZero): _definition_path = os.path.join( current.request.folder, "static", "json", "model", "dal", "table.json" ) _schema_name = "table.schema.json" _schema_path = os.path.join( current.request.folder, "static", "json_schema", "model_schema", "dal_schema" ) def _construction_method(self, param): self._validate() self._table_validate() def _table_validate(self): existed_table = [] for table in self.data: if table["table_name"] in existed_table: raise ValidationError( "duplicate table: '{table_name}'".format( table_name=table["table_name"] ) ) existed_table.append(table["table_name"]) existed_field = [] for field in table["table_fields"]: if field["field_name"] in existed_field: raise ValidationError( "duplicate field in '{table_name}': '{field_name}'".format( table_name=table["table_name"], field_name=field["field_name"], ) ) existed_field.append(field["field_name"]) if field["field_type"] == "string" and "field_length" not in field: raise ValidationError( """ unspecified 'field_length' for string typed field in '{table_name}': '{field_name}' """.format( table_name=table["table_name"], field_name=field["field_name"], ) ) if field["field_type"] not in ["string"] and "field_length" in field: raise ValidationError( "invalid property 'field_length' to type: '{field_type}'".format( field_type=field["field_type"] ) )
nilq/baby-python
python
import os from codecs import open from setuptools import setup, find_packages try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements here = os.path.abspath(os.path.dirname(__file__)) install_requirements = parse_requirements('requirements.txt', session=False) requirements = [str(ir.req) for ir in install_requirements] setup( name='sacred_helper', version='0.0.1', description='Small helper to retrieve past Sacred experiments', author="Romain Sabathe", keywords='sacred experiment machine learning', packages=find_packages(exclude=('tests', 'docs')), package_data={}, include_package_data=True, install_requires=requirements, )
nilq/baby-python
python
import os import glob import re from setup_app import paths from setup_app.utils import base from setup_app.static import AppType, InstallOption from setup_app.config import Config from setup_app.utils.setup_utils import SetupUtils from setup_app.installers.base import BaseInstaller class JythonInstaller(BaseInstaller, SetupUtils): def __init__(self): setattr(base.current_app, self.__class__.__name__, self) self.service_name = 'jython' self.install_var = 'installJython' self.app_type = AppType.APPLICATION self.install_type = InstallOption.MONDATORY if not base.snap: self.register_progess() self.needdb = False # we don't need backend connection in this class def install(self): jython_installer_list = glob.glob(os.path.join(Config.distAppFolder, 'jython-installer-*')) if not jython_installer_list: self.logIt("Jython installer not found in. Exiting...", True, True) jython_installer = max(jython_installer_list) jython_version_regex = re.search('jython-installer-(.*)\.jar', jython_installer) if not jython_version_regex: self.logIt("Jython installer not found in. Exiting...", True, True) jython_version = jython_version_regex.groups()[0] try: self.run(['rm', '-rf', '/opt*-%s' % jython_version]) self.run([Config.cmd_java, '-jar', jython_installer, '-v', '-s', '-d', '/opt/jython-%s' % jython_version, '-t', 'standard', '-e', 'ensurepip']) except: self.logIt("Error installing jython-installer-%s.jar" % jython_version) self.run([paths.cmd_ln, '-sf', '/opt/jython-%s' % jython_version, Config.jython_home]) self.run([paths.cmd_chown, '-R', 'root:root', '/opt/jython-%s' % jython_version]) self.run([paths.cmd_chown, '-h', 'root:root', Config.jython_home])
nilq/baby-python
python
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Unit tests for "awshelpers" module.""" import unittest from moto import mock_ec2 import boto3 import botocore import mock import requests import responses import ec2rlcore.awshelpers @mock_ec2 class TestAwshelpers(unittest.TestCase): """Testing class for "awshelpers" unit tests.""" IMDS_DOCUMENT = {'privateIp': '172.16.1.128', 'devpayProductCodes': None, 'marketplaceProductCodes': None, 'version': '2017-09-30', 'availabilityZone': 'us-east-1c', 'instanceId': 'i-deadbeef', 'billingProducts': None, 'instanceType': 'm5.4xlarge', 'kernelId': None, 'ramdiskId': None, 'accountId': '1234567890', 'architecture': 'x86_64', 'imageId': 'ami-deadbeef', 'pendingTime': '2018-09-14T01:58:16Z', 'region': 'us-east-1'} def setup_ec2(self): """Setup for usage, including moto environment.""" ec2 = boto3.client("ec2", region_name="us-east-1") response = ec2.run_instances( ImageId="ami-deadbeef", MinCount=1, MaxCount=1, KeyName="deadbeef", InstanceType="m4.16xlarge", ) instance = response["Instances"][0] instanceid = instance["InstanceId"] return instanceid @responses.activate def test_awshelpers_get_volume_ids(self): """Test that retrieving the volume ids for the instance works as expected.""" instanceid = self.setup_ec2() responses.add(responses.GET, "http://169.254.169.254/latest/dynamic/instance-identity/document", json=self.IMDS_DOCUMENT, status=200) responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/instance-id", body=instanceid, status=200) self.assertTrue(ec2rlcore.awshelpers.get_volume_ids()) @responses.activate def test_awshelpers_get_volume_mappings(self): """Test that retrieving the volume mappings for the instance works as expected.""" instanceid = self.setup_ec2() responses.add(responses.GET, "http://169.254.169.254/latest/dynamic/instance-identity/document", json=self.IMDS_DOCUMENT, status=200) responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/instance-id", body=instanceid, status=200) self.assertTrue(ec2rlcore.awshelpers.get_volume_mappings()) @responses.activate def test_awshelpers_get_instance_region(self): """Test that attempting to retrieve the instance region works as expected.""" responses.add(responses.GET, "http://169.254.169.254/latest/dynamic/instance-identity/document", json=self.IMDS_DOCUMENT, status=200) resp = ec2rlcore.awshelpers.get_instance_region() self.assertEqual(resp, "us-east-1") @responses.activate def test_awshelpers_get_instance_id(self): """Test that attempting to retrieve the instance id works as expected.""" responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/instance-id", body="i-deadbeef", status=200) resp = ec2rlcore.awshelpers.get_instance_id() self.assertEqual(resp, "i-deadbeef") @mock.patch("ec2rlcore.awshelpers.requests.get", side_effect=requests.exceptions.Timeout()) def test_awshelpers_get_instance_region_timeout(self, mock_get): """Test that timeout exception raises as expected.""" with self.assertRaises(ec2rlcore.awshelpers.AWSHelperMetadataTimeout): ec2rlcore.awshelpers.get_instance_region() self.assertTrue(mock_get.called) @mock.patch("ec2rlcore.awshelpers.requests.get", side_effect=requests.exceptions.Timeout()) def test_awshelpers_get_instance_id_timeout(self, mock_get): """Test that timeout exception raises as expected.""" with self.assertRaises(ec2rlcore.awshelpers.AWSHelperMetadataTimeout): ec2rlcore.awshelpers.get_instance_id() self.assertTrue(mock_get.called) @responses.activate def test_awshelpers_get_instance_region_httperror(self): """Test that get_instance_region raises AWSHelperMetadataHTTPError.""" responses.add(responses.GET, "http://169.254.169.254/latest/dynamic/instance-identity/document", json=self.IMDS_DOCUMENT, status=404) with self.assertRaises(ec2rlcore.awshelpers.AWSHelperMetadataHTTPError): ec2rlcore.awshelpers.get_instance_region() @responses.activate def test_awshelpers_get_instance_id_httperror(self): """Test that get_instance_id raises AWSHelperMetadataHTTPError.""" responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/instance-id", body="i-deadbeef", status=404) with self.assertRaises(ec2rlcore.awshelpers.AWSHelperMetadataHTTPError): ec2rlcore.awshelpers.get_instance_id() @mock.patch("ec2rlcore.awshelpers.requests.get", side_effect=requests.exceptions.RequestException()) def test_awshelpers_get_instance_region_exception(self, mock_get): with self.assertRaises(ec2rlcore.awshelpers.AWSHelperRequestsException): ec2rlcore.awshelpers.get_instance_region() self.assertTrue(mock_get.called) @mock.patch("ec2rlcore.awshelpers.requests.get", side_effect=requests.exceptions.RequestException()) def test_awshelpers_get_instance_id_exception(self, mock_get): with self.assertRaises(ec2rlcore.awshelpers.AWSHelperRequestsException): ec2rlcore.awshelpers.get_instance_id() self.assertTrue(mock_get.called) @mock.patch("ec2rlcore.awshelpers.boto3.client", side_effect=botocore.exceptions.NoCredentialsError()) @responses.activate def test_awshelpers_no_creds_get_volume_mappings(self, mock_client): responses.add(responses.GET, "http://169.254.169.254/latest/dynamic/instance-identity/document", json=self.IMDS_DOCUMENT, status=200) responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/instance-id", body="i-deadbeef", status=200) with self.assertRaises(ec2rlcore.awshelpers.AWSHelperNoCredsError): ec2rlcore.awshelpers.get_volume_mappings() self.assertTrue(mock_client.called) @mock.patch("ec2rlcore.awshelpers.boto3.client", side_effect=botocore.exceptions.NoCredentialsError()) @responses.activate def test_awshelpers_no_creds_get_volume_id(self, mock_client): responses.add(responses.GET, "http://169.254.169.254/latest/dynamic/instance-identity/document", json=self.IMDS_DOCUMENT, status=200) responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/instance-id", body="i-deadbeef", status=200) with self.assertRaises(ec2rlcore.awshelpers.AWSHelperNoCredsError): ec2rlcore.awshelpers.get_volume_ids() self.assertTrue(mock_client.called)
nilq/baby-python
python
import socket, time, threading, sys, signal, errno from threading import Thread if (len(sys.argv) < 2): print "Server usage: python server.py PORT" sys.exit(0) MIN_THREADS = 2 # Minimum number of workers at start and at any point MAX_THREADS = 32 # Maximum number of workers TOLERANCE = 2 # Minimum difference before reducing the pool or minimum step for increasing (inertia) J_MSG = "JOIN_CHATROOM: " L_MSG = "LEAVE_CHATROOM: " IP_MSG = "CLIENT_IP: " P_MSG = "PORT: " JID_MSG = "JOIN_ID: " NAME_MSG = "CLIENT_NAME: " DIS_MSG = "DISCONNECT: " CHAT_MSG = "CHAT: " MSG = "MESSAGE: " PORT = int(sys.argv[1]) class Room(): def __init__(self): # This will contain [CLIENT_NAME, MESSAGE, set(ID)] self.messages = [] self.clients = [] class ChatState(): def __init__(self): self.idCounter = 0 self.refCounter = 0 # Associating a name with a ref self.roomRefs = {} # Associating a ref with a Room object self.rooms = {} class Pool(): def __init__(self): self.lockClients = threading.Lock() self.lockState = threading.Lock() self.clients = [] self.workers = [] self.state = ChatState() self.threadCounter = 0 self.killRequested = False for counter in range(MIN_THREADS): self.workers.append(Worker(self, self.threadCounter)) self.workers[counter].start() self.threadCounter += 1 def killWorker(self, worker): if (len(self.workers) - self.killedSoFar) <= MIN_THREADS: return False if self.killedSoFar >= self.maxKill: return False if worker.conn is None: worker.useless = True # This thread will eventually die now self.killedSoFar += 1 return True return False def requestResize(self): self.lockClients.acquire() activeWorkers = len([w for w in self.workers if w.conn]) difference = len(self.clients) + activeWorkers - len(self.workers) if difference > 0 and len(self.workers) < MAX_THREADS: nbThreads = min(max(difference, TOLERANCE), MAX_THREADS-len(self.workers)) print "Spawning {0} workers to handle more clients!".format(nbThreads) for counter in range(nbThreads): self.workers.append(Worker(self, self.threadCounter)) self.workers[-1].start() self.threadCounter += 1 elif abs(difference) >= TOLERANCE and len(self.workers) > MIN_THREADS: nbKills = min(abs(difference), len(self.workers)-MIN_THREADS) print "Killing {0} workers because activity dropped!".format(nbKills) self.maxKill = abs(difference) self.killedSoFar = 0 self.workers = [w for w in self.workers if not self.killWorker(w)] self.lockClients.release() def assignClient(self, conn): conn.setblocking(0) self.lockClients.acquire() self.clients.append(conn) self.lockClients.release() self.requestResize() def kill(self): self.killRequested = True class Server(Thread): def __init__(self, pool): Thread.__init__(self) self.daemon = True # This thread may die while waiting for a client self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.server.bind(("0.0.0.0", PORT)) self.pool = pool def run(self): while True: # At most 5 queued clients on most OS self.server.listen(5) (conn, (ip,port)) = self.server.accept() # If the server is already overloaded, reject this client if len(self.pool.clients) > MAX_THREADS: print "Burnout! Server rejected client" conn.close() else: print "Server received client connection and added it to queue" self.pool.assignClient(conn) class Worker(Thread): def __init__(self, pool, id): Thread.__init__(self) self.pool = pool self.conn = None self.id = id self.useless = False self.myRooms = [] def constructReply(self, data): reply = "HELO {0}\nIP:{1}\nPort:{2}\nStudentID:{3}\n".format(data, socket.gethostbyname(socket.gethostname()), PORT, 16336617) return reply def constructJoinReply(self, roomName, roomRef, clientId): reply = ("JOINED_CHATROOM: {0}\n" "SERVER_IP: {1}\n" "PORT: {2}\n" "ROOM_REF: {3}\n" "JOIN_ID: {4}\n" ).format(roomName, socket.gethostbyname(socket.gethostname()), PORT, roomRef, clientId) return reply def constructLeaveReply(self, roomRef, clientId): reply = ("LEFT_CHATROOM: {0}\n" "JOIN_ID: {1}\n" ).format(roomRef, clientId) return reply def constructMessage(self, roomRef, clientName, message): reply = ("CHAT: {0}\n" "CLIENT_NAME: {1}\n" "MESSAGE: {2}\n\n" ).format(roomRef, clientName, message) return reply def sendClient(self, content): while not (self.pool.killRequested or self.useless): try: self.conn.send(content) print "Thread {0} sent this to client: {1}".format(self.id, content) break except socket.error as e: if e.errno == errno.ECONNRESET: break def handleResponse(self, data): # Thread pool protocol if data == "KILL_SERVICE\n": self.pool.kill() return True elif data.startswith("HELO "): self.sendClient(self.constructReply(data[5:].rstrip())) return False # Chat protocol elif data.startswith(J_MSG): roomName = data.splitlines()[0][len(J_MSG):] clientName = data.splitlines()[3][len(NAME_MSG):] # Get client ID, room ref, broadcast and append client to users self.pool.lockState.acquire() clientId = self.associatedId if roomName in self.pool.state.roomRefs: roomRef = self.pool.state.roomRefs[roomName] else: roomRef = self.pool.state.refCounter self.pool.state.roomRefs[roomName] = roomRef self.pool.state.rooms[roomRef] = Room() self.pool.state.refCounter += 1 room = self.pool.state.rooms[roomRef] room.clients.append(clientId) if (len(room.clients) > 0): joinMessage = "{0} has joined the chatroom".format(clientName) room.messages.append([clientName, joinMessage, set(room.clients)]) self.pool.lockState.release() self.myRooms.append((roomRef, clientId)) self.sendClient(self.constructJoinReply(roomName, roomRef, clientId)) return False elif data.startswith(L_MSG): roomRef = int(data.splitlines()[0][len(L_MSG):]) clientId = int(data.splitlines()[1][len(JID_MSG):]) clientName = data.splitlines()[2][len(NAME_MSG):] # Discard any messages left for us, and leave chatroom if (roomRef, clientId) in self.myRooms: self.pool.lockState.acquire() room = self.pool.state.rooms[roomRef] for index in range(len(room.messages)): if clientId in room.messages[index][2]: room.messages[index][2].remove(clientId) room.messages[:] = [m for m in room.messages if m[2]] room.clients.remove(clientId) leaveMessage = "{0} has left the chatroom".format(clientName) if (len(room.clients) > 0): room.messages.append([clientName, leaveMessage, set(room.clients)]) self.pool.lockState.release() self.sendClient(self.constructLeaveReply(roomRef, clientId)) if (roomRef, clientId) in self.myRooms: self.sendClient(self.constructMessage(roomRef, clientName, leaveMessage)) self.myRooms.remove((roomRef, clientId)) return False elif data.startswith(CHAT_MSG): roomRef = int(data.splitlines()[0][len(CHAT_MSG):]) clientId = int(data.splitlines()[1][len(JID_MSG):]) clientName = data.splitlines()[2][len(NAME_MSG):] message = data.splitlines()[3][len(MSG):] # Append message so that all threads can read it (including this one) self.pool.lockState.acquire() room = self.pool.state.rooms[roomRef] if (len(room.clients) > 0): room.messages.append([clientName, message, set(room.clients)]) self.pool.lockState.release() return False elif data.startswith(DIS_MSG): clientName = data.splitlines()[2][len(NAME_MSG):] # Discard any messages left for us, and leave all chatrooms for t in self.myRooms: roomRef = t[0] clientId = t[1] self.pool.lockState.acquire() room = self.pool.state.rooms[roomRef] for index in range(len(room.messages)): if clientId in room.messages[index][2]: room.messages[index][2].remove(clientId) room.messages[:] = [m for m in room.messages if m[2]] room.clients.remove(clientId) discMessage = "{0} was disconnected".format(clientName) if (len(room.clients) > 0): room.messages.append([clientName, discMessage, set(room.clients)]) self.sendClient(self.constructMessage(roomRef, clientName, discMessage)) self.pool.lockState.release() self.myRooms = [] return True def readMessages(self): self.pool.lockState.acquire() for t in self.myRooms: roomRef = t[0] clientId = t[1] room = self.pool.state.rooms[roomRef] for index in range(len(room.messages)): if clientId in room.messages[index][2]: room.messages[index][2].remove(clientId) self.sendClient(self.constructMessage(roomRef, room.messages[index][0], room.messages[index][1])) room.messages[:] = [m for m in room.messages if m[2]] self.pool.lockState.release() def run(self): while not (self.pool.killRequested or self.useless): # Try to get a client self.pool.lockClients.acquire() if (len(self.pool.clients) > 0 and not (self.pool.killRequested or self.useless)): self.conn = self.pool.clients.pop(0) self.pool.lockClients.release() # If we didn't get a client, try again if self.conn is None: continue print "Thread {0} fetched a client".format(self.id) self.pool.lockState.acquire() self.associatedId = self.pool.state.idCounter self.pool.state.idCounter += 1 self.pool.lockState.release() # Serve client while not (self.pool.killRequested or self.useless): self.readMessages() try: data = self.conn.recv(2048).replace("\\n", '\n') print "Thread {0} received data {1}".format(self.id, data.rstrip()) if data == "": break if self.handleResponse(data): break except socket.error as e2: if e2.errno == errno.ECONNRESET: break print "Thread {0} closing client socket".format(self.id) self.conn.close() self.conn = None # Maybe the pool needs to be resized self.pool.requestResize() print "Thread {0} dying".format(self.id) print "--- Preparing thread pool..." workerPool = Pool() print "--- Creating CTRL-C signal handler..." def signalHandler(signal, frame): print "Server received CTRL-C, nuking all threads" workerPool.kill() signal.signal(signal.SIGINT, signalHandler) print "--- TCP server starting..." serverThread = Server(workerPool) serverThread.start() print "--- Server is ready!" while True: if workerPool.killRequested: for worker in workerPool.workers: worker.join() break
nilq/baby-python
python
import os import sys import json from PyQt5 import QtCore, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtGui import * class WindowObj6CornernetLiteModelParam(QtWidgets.QWidget): backward_6_cornernet_lite_valdata_param = QtCore.pyqtSignal(); forward_hyper_param = QtCore.pyqtSignal(); def __init__(self): super().__init__() self.title = 'Cornernet Lite - Model Param' self.left = 100 self.top = 100 self.width = 500 self.height = 400 self.load_cfg(); self.initUI() def load_cfg(self): if(os.path.isfile("obj_6_cornernet_lite.json")): with open('obj_6_cornernet_lite.json') as json_file: self.system = json.load(json_file) def initUI(self): self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height); # Forward self.b1 = QPushButton('Next', self) self.b1.move(300,350) self.b1.clicked.connect(self.forward) # Backward self.b2 = QPushButton('Back', self) self.b2.move(200,350) self.b2.clicked.connect(self.backward) # Quit self.b3 = QPushButton('Quit', self) self.b3.move(400,350) self.b3.clicked.connect(self.close) self.l1 = QLabel(self); self.l1.setText("1. Model :"); self.l1.move(20, 20); self.cb1 = QComboBox(self); self.models = ["CornerNet_Saccade", "CornerNet_Squeeze"]; self.cb1.addItems(self.models); index = self.cb1.findText(self.system["model"], QtCore.Qt.MatchFixedString) if index >= 0: self.cb1.setCurrentIndex(index) self.cb1.move(120, 20); def forward(self): self.system["model"] = str(self.cb1.currentText()) with open('obj_6_cornernet_lite.json', 'w') as outfile: json.dump(self.system, outfile) self.forward_hyper_param.emit(); def backward(self): self.system["model"] = str(self.cb1.currentText()) with open('obj_6_cornernet_lite.json', 'w') as outfile: json.dump(self.system, outfile) self.backward_6_cornernet_lite_valdata_param.emit(); ''' app = QApplication(sys.argv) screen = WindowObj6CornernetLiteModelParam() screen.show() sys.exit(app.exec_()) '''
nilq/baby-python
python
"""FreeBSD Ports Collection module. This module provides an interface to interact with the FreeBSD Ports Collection, and means of discovering ports therein. """ from os import environ from typing import Callable, ClassVar, List, Optional from pathlib import Path from .make import make, make_var from .port import Port, PortError, PortStub __all__ = ['Ports'] class Ports: """Representation of the FreeBSD Ports Collection.""" _factories: ClassVar[List[Callable[[PortStub], Optional[Port]]]] = [] _ports: ClassVar[List[PortStub]] = [] dir: ClassVar[Path] = Path(environ.get('PORTSDIR', '/usr/ports')) categories = make_var(dir, 'SUBDIR') distdir = Path(environ.get('DISTDIR') or make(dir / 'Mk', '-VDISTDIR', '-fbsd.port.mk').strip()) @staticmethod def _get_port(selector: Callable[[PortStub], bool]) -> Port: if not Ports._ports: Ports._load_ports() ports = [i for i in Ports._ports if selector(i)] if not ports: raise PortError('Ports: no port matches requirement') if len(ports) > 1: raise PortError('Ports: multiple ports match requirement') if isinstance(ports[0], PortStub): portstub = ports[0] for factory in reversed(Ports._factories): port = factory(portstub) if port is not None: Ports._ports[Ports._ports.index(ports[0])] = port break else: raise PortError('Ports: unable to create port from origin \'%s\'' % ports[0].origin) else: assert isinstance(ports[0], Port) port = ports[0] return port @staticmethod def _load_ports() -> None: print('Loading ports collection:') for category in Ports.categories: print('\tLoading category: %s' % category) for name in make_var(Ports.dir / category, 'SUBDIR'): Ports._ports.append(PortStub(category, name)) @staticmethod def get_port_by_name(name: str) -> Port: """Get a port by the specified name.""" return Ports._get_port(lambda i: i.name == name) @staticmethod def get_port_by_origin(origin: str) -> Port: """Get a port by the specified port origin.""" return Ports._get_port(lambda i: i.origin == origin) @staticmethod def factory(factory: Callable[[PortStub], Optional[Port]]) -> Callable[[PortStub], Optional[Port]]: """ Decorate a function to register it as being able to load a Port. The factory function will be passed a PortStub instance and, if the factory function can, return a Port instance. If the factory function cannot load the given PortStub then None must be returned. """ Ports._factories.append(factory) return factory
nilq/baby-python
python
from mock import patch from twisted.trial.unittest import TestCase from apns.errorresponse import ( ErrorResponse, ErrorResponseInvalidCodeError, ErrorResponseInvalidCommandError ) MODULE = 'apns.errorresponse.' class ErrorResponseTestCase(TestCase): CLASS = MODULE + 'ErrorResponse.' def test_str(self): resp = ErrorResponse() resp.name = 'name' self.assertEqual(str(resp), '<ErrorResponse: name>') @patch(CLASS + 'CODES', {0: 'invalid token'}) @patch(MODULE + 'struct.unpack') def test_properties_set(self, unpack_mock): unpack_mock.return_value = ErrorResponse.COMMAND, 0, 'identifier' resp = ErrorResponse() resp.from_binary_string('stream') self.assertEqual(resp.code, 0) self.assertEqual(resp.name, 'invalid token') self.assertEqual(resp.identifier, 'identifier') @patch(MODULE + 'struct.unpack') def test_from_binary_string_invalid_command(self, unpack_mock): unpack_mock.return_value = ErrorResponse.COMMAND + 1, None, None resp = ErrorResponse() with self.assertRaises(ErrorResponseInvalidCommandError): resp.from_binary_string('stream') @patch(CLASS + 'CODES', {0: 'invalid token'}) @patch(MODULE + 'struct.unpack') def test_from_binary_string_invalid_code(self, unpack_mock): unpack_mock.return_value = ErrorResponse.COMMAND, 1, None resp = ErrorResponse() with self.assertRaises(ErrorResponseInvalidCodeError): resp.from_binary_string('stream') @patch(CLASS + 'CODES', {0: 'invalid token'}) def test_from_binary_string_valid_input(self): resp = ErrorResponse() resp.from_binary_string(resp.to_binary_string(0, 123)) self.assertEqual(resp.code, 0) self.assertEqual(resp.name, 'invalid token') self.assertEqual(resp.identifier, 123)
nilq/baby-python
python
from typing import List def warmUp(nums: List[int], target: int) -> List[int]: numsDict = {} for index, item in enumerate(nums): diff = target - item if diff in numsDict: return numsDict[diff], index numsDict[item] = index
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup except ImportError: from distutils.core import setup with open('README.rst') as readme_file: readme = readme_file.read() requirements = [ 'pika', 'twisted', 'checkoutmanager', # The 'collectors' branch of chintal's fork # 'tendril', # Install this manually ] test_requirements = [ # TODO: put package test requirements here ] setup( name='tendril-monitor-vcs', version='0.1.0', description="VCS monitoring and documentation generation server using " "Twisted for Tendril", long_description=readme, author="Chintalagiri Shashank", author_email='shashank@chintal.in', url='https://github.com/chintal/tendril-monitor-vcs', packages=[ 'vcs_monitor', ], package_dir={'vcs_monitor': 'vcs_monitor'}, include_package_data=True, install_requires=requirements, license="MIT", zip_safe=False, keywords='tendril-monitor-vcs', classifiers=[ 'Development Status :: 4 - Beta', "License :: OSI Approved :: MIT License", 'Natural Language :: English', 'Programming Language :: Python', ], test_suite='tests', tests_require=test_requirements )
nilq/baby-python
python
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Add function to convert string to bucket Revision ID: 4ec0adada10 Revises: 9177113533 Create Date: 2015-09-06 19:32:50.438462 """ from alembic import op revision = "4ec0adada10" down_revision = "9177113533" def upgrade(): op.execute( """ CREATE FUNCTION sitemap_bucket(text) RETURNS text AS $$ SELECT substring( encode(digest($1, 'sha512'), 'hex') from 1 for 1 ) $$ LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; """ ) def downgrade(): op.execute("DROP FUNCTION sitemap_bucket(text)")
nilq/baby-python
python
from fastapi import APIRouter from kairon.api.auth import Authentication from kairon.api.processor import AccountProcessor from kairon.api.models import Response, User from fastapi import Depends router = APIRouter() auth = Authentication() @router.get("/details", response_model=Response) async def get_users_details(current_user: User = Depends(auth.get_current_user)): """ returns the details of the current logged-in user """ return { "data": {"user": AccountProcessor.get_complete_user_details(current_user.email)} }
nilq/baby-python
python
import time from slacker import Slacker from script.util.BaseFSM import BaseFSM from script.util.misc_util import error_trace class SlackBotFsm(BaseFSM): def __init__(self): super().__init__() self.add_state('pending', initial_state=True) self.add_state('on going') self.add_state('finish') self.add_state('error') self.add_event('raise_error', 'pending', 'error') self.add_event('start', 'pending', 'on going') self.add_event('raise_error', 'on going', 'error') self.add_event('finish', 'on going', 'finish') self.add_event('raise_error', 'finish', 'error') def start(self): self.start() def raise_error(self): self.raise_error() def finish(self): self.finish() def test_slack_bot_fsm(): fsm = SlackBotFsm() print(fsm.state) fsm.start() print(fsm.state) fsm.raise_error() print(fsm.state) fsm.finish() print(fsm.state) class SlackBot: def __init__(self, token_path=None, channel=None): self.token_path = token_path self.channel = channel self.slacker = Slacker(self._get_token(self.token_path)) def _get_token(self, token_path): with open(token_path, 'r') as f: token = f.readlines() return token def post_message(self, msg, attachments=None): # TODO to make usable if attachments: attachments_dict = dict() attachments_dict['pretext'] = "pretext attachments 블록 전에 나타나는 text" attachments_dict['title'] = "title 다른 텍스트 보다 크고 볼드되어서 보이는 title" attachments_dict['title_link'] = "https://corikachu.github.io" attachments_dict['fallback'] = "클라이언트에서 노티피케이션에 보이는 텍스트 입니다. attachment 블록에는 나타나지 않습니다" attachments_dict['text'] = "본문 텍스트! 5줄이 넘어가면 *show more*로 보이게 됩니다." attachments_dict['mrkdwn_in'] = ["text", "pretext"] # 마크다운을 적용시킬 인자들을 선택합니다. attachments = [attachments_dict] self.slacker.chat.post_message(channel=self.channel, text='tetsetseetsetset', attachments=attachments) else: self.slacker.chat.post_message(self.channel, msg) def test_SlackBot(): bot = SlackBot() bot.post_message('hello world') def deco_slackbot(token_path, channel): def _deco_slack_bot(func): def wrapper(*args, **kwargs): start = time.time() try: ret = func(*args, **kwargs) except BaseException as e: print(error_trace(e)) ret = None elapse_time = time.time() - start try: bot = SlackBot(token_path, channel) msg = f"in {func.__name__}(), time {elapse_time:.4f}'s elapsed" bot.post_message(msg) except BaseException as e: print(error_trace(e)) print('slackbot fail to post message') return ret wrapper.__name__ = func.__name__ return wrapper return _deco_slack_bot
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # # rom.py # # Part of MARK II project. For informations about license, please # see file /LICENSE . # # author: Vladislav Mlejnecký # email: v.mlejnecky@seznam.cz from memitem import memitem import sys import mif class rom(memitem): def __init__(self, baseAddress, size, rom0mif, name): memitem.__init__(self, baseAddress, size, name) self.loadmif(rom0mif) def loadmif(self, fileName): miffile = mif.mif(mif.READ, fileName) if miffile.read() == mif.OK: for item in miffile.outBuff: self.mem[item.address] = item.value else: print "Error in " + self.__name__ + "! Can't can't read input file <" + fileName + ">!" print miffile.errmsg sys.exit(1)
nilq/baby-python
python
#! /usr/bin/env python # -*- coding:utf-8; mode:python -*- from ilcli import Command class FirstDemoCommand(Command): ignore_arguments = ['-b'] def _init_arguments(self): super()._init_arguments() self.add_argument('--foo') class SecondDemoCommand(FirstDemoCommand): ignore_arguments = ['--bar', '--foo'] class ThirdDemoCommand(FirstDemoCommand): ignore_arguments = ['bat'] class Parent(Command): subcommands = [FirstDemoCommand, SecondDemoCommand, ThirdDemoCommand] def _init_arguments(self): self.add_argument('-b', '--bar') self.add_argument('bat') if __name__ == '__main__': exit(Parent().run())
nilq/baby-python
python
r"""UTF-8 sanitizer. Python's UTF-8 parser is quite relaxed, this creates problems when talking with other software that uses stricter parsers. >>> _norm(safe_utf8_decode(b"foobar")) (True, ['f', 'o', 'o', 'b', 'a', 'r']) >>> _norm(safe_utf8_decode(b'X\0Z')) (False, ['X', 65533, 'Z']) >>> _norm(safe_utf8_decode(b'OK')) (True, ['O', 'K']) >>> _norm(safe_utf8_decode(b'X\xF1Y')) (False, ['X', 65533, 'Y']) >>> _norm_str(sanitize_unicode(u'\uD801\uDC01')) [66561] >>> sanitize_unicode(b'qwe') Traceback (most recent call last): ... TypeError: Need unicode string """ ## these give different results in py27 and py35 # >>> _norm(safe_utf8_decode(b'X\xed\xa0\x80Y\xed\xb0\x89Z')) # (False, ['X', 65533, 65533, 65533, 'Y', 65533, 65533, 65533, 'Z']) # >>> _norm(safe_utf8_decode(b'X\xed\xa0\x80\xed\xb0\x89Z')) # (False, ['X', 65533, 65533, 65533, 65533, 65533, 65533, 'Z']) # from __future__ import division, absolute_import, print_function import re import codecs try: unichr except NameError: unichr = chr # noqa unicode = str # noqa def _norm_char(uchr): code = ord(uchr) if code >= 0x20 and code < 0x7f: return chr(code) return code def _norm_str(ustr): return [_norm_char(c) for c in ustr] def _norm(tup): flg, ustr = tup return (flg, _norm_str(ustr)) __all__ = ['safe_utf8_decode'] # by default, use same symbol as 'replace' REPLACEMENT_SYMBOL = unichr(0xFFFD) # 65533 def _fix_utf8(m): """Merge UTF16 surrogates, replace others""" u = m.group() if len(u) == 2: # merge into single symbol c1 = ord(u[0]) c2 = ord(u[1]) c = 0x10000 + ((c1 & 0x3FF) << 10) + (c2 & 0x3FF) return unichr(c) else: # use replacement symbol return REPLACEMENT_SYMBOL _urc = None def sanitize_unicode(u): """Fix invalid symbols in unicode string.""" global _urc if not isinstance(u, unicode): raise TypeError('Need unicode string') # regex for finding invalid chars, works on unicode string if not _urc: rx = u"[\uD800-\uDBFF] [\uDC00-\uDFFF]? | [\0\uDC00-\uDFFF]" _urc = re.compile(rx, re.X) # now find and fix UTF16 surrogates m = _urc.search(u) if m: u = _urc.sub(_fix_utf8, u) return u def safe_replace(exc): """Replace only one symbol at a time. Builtin .decode('xxx', 'replace') replaces several symbols together, which is unsafe. """ c2 = REPLACEMENT_SYMBOL # we could assume latin1 #if 0: # c1 = exc.object[exc.start] # c2 = unichr(ord(c1)) return c2, exc.start + 1 # register, it will be globally available codecs.register_error("safe_replace", safe_replace) def safe_utf8_decode(s): """Decode UTF-8 safely. Acts like str.decode('utf8', 'replace') but also fixes UTF16 surrogates and NUL bytes, which Python's default decoder does not do. @param s: utf8-encoded byte string @return: tuple of (was_valid_utf8, unicode_string) """ # decode with error detection ok = True try: # expect no errors by default u = s.decode('utf8') except UnicodeDecodeError: u = s.decode('utf8', 'safe_replace') ok = False u2 = sanitize_unicode(u) if u is not u2: ok = False return (ok, u2)
nilq/baby-python
python
#! /usr/local/bin/python3 import operator import sys from collections import deque from math import prod pubKeys = [int(x) for x in sys.stdin.read().split("\n")[:2]] subject = [1 for i in pubKeys] print(pubKeys) handDivisor = 20201227 acc = [0 for i in pubKeys] for i, k in enumerate(pubKeys): while k != subject[i]: acc[i] += 1 subject[i] = (subject[i] * 7) % handDivisor print(acc) print(pow(pubKeys[0], acc[1], handDivisor), pow(pubKeys[0], acc[1], handDivisor))
nilq/baby-python
python
import sys import os sys.path.append(os.path.join('..','utils')) import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from utilsRobust import * ###################### ### VARIOUS TESTS FOR UTILS ROBUST ###################### def test_mestimate(): mean = 0 std = 5 x = np.arange(1000) y = np.random.normal(mean, std, x.size) ones = np.ones(shape=(x.size)) # add large outliers # numOutliers = 450 for i in xrange(0, numOutliers): index = np.random.randint(0, x.size) y[index] = np.random.randint(std*4, std*20) # compute mean mean = np.average(y) standardDev = np.std(y) # compute mad med = sampleMedian(y) mad = sampleMAD(y) # mestimates mestLocation, mestScale = mestimate(y) # plot plt.figure() plt.scatter(x, y, color='y') plt.plot(x, ones*mean, lw = 2, color="b", label="mean") plt.plot(x, ones*standardDev, lw = 2, color="b", ls="dashed") plt.plot(x, ones*med, lw = 2, color="g", label="median") plt.plot(x, ones*mad, lw = 2, color="g", ls="dashed") plt.plot(x, ones*mestLocation, lw = 2, color="r", label="mest") plt.plot(x, ones*mestScale, lw = 2, color="r", ls="dashed") plt.legend() plt.show() def test_mestimateModel(): # let's generate some data x = np.arange(1000) y = np.arange(-50, 50, 0.1) # create a linear function of this z = 2.5*x + y # let's add some noise mean = 0 std = 3 noise = np.random.normal(0, 3, x.size) # print noise.shape z = z + noise # now add some outliers numOutliers = 80 for i in xrange(0, numOutliers): index = np.random.randint(0, x.size) z[index] = np.random.randint(std*4, std*20) A = np.transpose(np.vstack((x, y))) # now try and do a robust regression components = mestimateModel(A, z) print components # plt.figure() # plt.plot() def testRobustRegression(): # random seed np.random.seed(0) # the function x = np.arange(150) y = 12 + 0.5*x # noise mean = 0 std = 3 noise = np.random.normal(mean, 3*std, x.size) # add noise yNoise = y + noise # now add some outliers numOutliers = 30 for i in xrange(0, numOutliers): index = np.random.randint(0, x.size) yNoise[index] = yNoise[index] + np.random.randint(-1000, 1000) # now add some outliers xNoise = np.array(x) numOutliers = 30 for i in xrange(0, numOutliers): index = np.random.randint(0, x.size) xNoise[index] = x[index] + np.random.randint(-5000, 5000) xNoise = xNoise.reshape((x.size,1)) # lets use m estimate paramsM, residsM, scaleM, weightsM = mestimateModel(xNoise, yNoise, intercept=True) # lets use mm estimate paramsMM, residsMM, scaleMM, weightsMM = mmestimateModel(xNoise, yNoise, intercept=True) # lets test chatterjee machler paramsCM, residsCM, weightsCM = chatterjeeMachler(xNoise, yNoise, intercept=True) # lets test chatterjee machler mod paramsModCM, residsModCM, weightsModCM = chatterjeeMachlerMod(xNoise, yNoise, intercept=True) # let's plot Pdiag plt.figure() n, bins, patches = plt.hist(Pdiag, 50, normed=0, facecolor='green', alpha=0.75) # try and predict yM = paramsM[0] + paramsM[1]*x yMM = paramsMM[0] + paramsMM[1]*x yCM = paramsCM[0] + paramsCM[1]*x yCM_mod = paramsModCM[0] + paramsModCM[1]*x plt.figure() plt.scatter(x, y, marker="s", color="black") plt.scatter(xNoise, yNoise) plt.plot(x, yM) plt.plot(x, yMM) plt.plot(x, yCM) plt.plot(x, yCM_mod) plt.legend(["M estimate", "MM estimate", "chatterjeeMachler", "chatterjeeMachlerMod"], loc="lower left") plt.show() def testRobustRegression2D(): # random seed np.random.seed(0) numPts = 300 # the function x1 = np.arange(numPts, dtype="float") x2 = 10*np.cos(2.0*np.pi*10*x1/np.max(x1)) y = 12 + 0.5*x1 + 3*x2 # noise mean = 0 std = 3 noise = np.random.normal(mean, 3*std, numPts) # add noise yNoise = y + noise # now add some outliers numOutliers = 140 for i in xrange(0, numOutliers): index = np.random.randint(0, numPts) yNoise[index] = yNoise[index] + np.random.randint(-100, 100) # now add some outliers x1Noise = np.array(x1) x2Noise = np.array(x2) numOutliers = 5 for i in xrange(0, numOutliers): index = np.random.randint(0, numPts) x1Noise[index] = x1[index] + np.random.randint(-500, 500) index = np.random.randint(0, numPts) x2Noise[index] = x2[index] + np.random.randint(-500, 500) x1Noise = x1Noise.reshape((x1.size,1)) x2Noise = x2Noise.reshape((x2.size,1)) X = np.hstack((x1Noise, x2Noise)) # lets use m estimate paramsM, residsM, scaleM, weightsM = mestimateModel(X, yNoise, intercept=True) # lets use mm estimate paramsMM, residsMM, scaleMM, weightsMM = mmestimateModel(X, yNoise, intercept=True) # lets test chatterjee machler paramsCM, residsCM, weightsCM = chatterjeeMachler(X, yNoise, intercept=True) # lets test chatterjee machler mod paramsModCM, residsModCM, weightsModCM = chatterjeeMachlerMod(X, yNoise, intercept=True) # lets test chatterjee machler hadi paramsCMHadi, residsCMHadi, weightsCMHadi = chatterjeeMachlerHadi(X, yNoise, intercept=True) # try and predict yM = paramsM[0] + paramsM[1]*x1 + paramsM[2]*x2 yMM = paramsMM[0] + paramsMM[1]*x1 + paramsMM[2]*x2 yCM = paramsCM[0] + paramsCM[1]*x1 + paramsCM[2]*x2 yCM_mod = paramsModCM[0] + paramsModCM[1]*x1 + paramsModCM[2]*x2 yCM_Hadi = paramsCMHadi[0] + paramsCMHadi[1]*x1 + paramsCMHadi[2]*x2 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x1, x2, y, marker="s", color="black") ax.scatter(x1Noise, x2Noise, yNoise, marker="*", s=50, color="goldenrod") # plt.plot(x1, x2, zs=yM) plt.plot(x1, x2, zs=yMM) # plt.plot(x1, x2, zs=yCM) plt.plot(x1, x2, zs=yCM_mod) # plt.plot(x1, x2, zs=yCM_Hadi) # plt.legend(["M estimate", "MM estimate", "chatterjeeMachler", "chatterjeeMachlerMod", "chatterjeeMachlerHadi"], loc="lower left") plt.legend(["MM estimate", "chatterjeeMachlerMod"], loc="lower left") plt.show() #test_mestimate() # test_mestimateModel() # testRobustRegression() testRobustRegression2D()
nilq/baby-python
python
import numpy as np from scratch.abstract import AbstractModel class PCA(AbstractModel): def __init__(self): pass @staticmethod def normalizing(v): return (v - np.mean(v)) / np.std(v) def fit(self, X): # step 1: normalizing Xarray = X.to_numpy() self.Xscale = np.apply_along_axis(self.normalizing, 0, Xarray) # step 2: compute covariances Xcov = np.cov(self.Xscale.T) # step 3: compute eigenvalues and eigenvectors eigenvalues, eigenvectors = np.linalg.eig(Xcov) eigenvectors = eigenvectors.T # step 4: construct feature vector idx = np.flip(np.argsort(eigenvalues)) self.eigenvalues = eigenvalues[idx] self.eigenvectors = eigenvectors[idx] def predict(self): return np.dot(self.Xscale, self.eigenvectors.T)
nilq/baby-python
python
from django.db import models class Like(model.Models): uid = models.IntegerField() name = models.CharField() # Create your models here.
nilq/baby-python
python
import numpy as np import pytest import tensorflow as tf from tensorflow.keras.layers import Dense, Input, InputLayer from alibi_detect.cd.preprocess import UAE, HiddenOutput, pca n, n_features, n_classes, latent_dim, n_hidden = 100, 10, 5, 2, 7 shape = (n_features,) X = np.random.rand(n * n_features).reshape(n, n_features).astype('float32') encoder_net = tf.keras.Sequential( [ InputLayer(input_shape=(n_features,)), Dense(latent_dim) ] ) tests_uae = [encoder_net, latent_dim] n_tests_uae = len(tests_uae) @pytest.fixture def uae_params(request): return tests_uae[request.param] @pytest.mark.parametrize('uae_params', list(range(n_tests_uae)), indirect=True) def test_uae(uae_params): enc = uae_params if isinstance(enc, tf.keras.Sequential): encoder_net, enc_dim = enc, None elif isinstance(enc, int): encoder_net, enc_dim = None, enc X_enc = UAE(encoder_net=encoder_net, shape=X.shape[1:], enc_dim=enc_dim)(X) assert X_enc.shape == (n, latent_dim) class Model1(tf.keras.Model): def __init__(self): super(Model1, self).__init__() self.dense1 = Dense(n_hidden) self.dense2 = Dense(n_classes, activation='softmax') def call(self, x: np.ndarray) -> tf.Tensor: x = self.dense1(x) return self.dense2(x) def model2(): x_in = Input(shape=shape) x = Dense(n_hidden)(x_in) x_out = Dense(n_classes, activation='softmax')(x) return tf.keras.models.Model(inputs=x_in, outputs=x_out) tests_hidden_output = [ (1, -2, shape), (1, -1, shape), (2, -2, None), (2, -1, None), (2, -1, shape) ] n_tests_hidden_output = len(tests_hidden_output) @pytest.fixture def hidden_output_params(request): return tests_hidden_output[request.param] @pytest.mark.parametrize('hidden_output_params', list(range(n_tests_hidden_output)), indirect=True) def test_hidden_output(hidden_output_params): model, layer, input_shape = hidden_output_params model = Model1() if model == 1 else model2() X_hidden = HiddenOutput(model=model, layer=layer, input_shape=input_shape)(X) if layer == -2: assert X_hidden.shape == (n, n_hidden) elif layer == -1: assert X_hidden.shape == (n, n_classes) tests_pca = [2, 4] n_tests_pca = len(tests_pca) @pytest.fixture def pca_params(request): return tests_pca[request.param] @pytest.mark.parametrize('pca_params', list(range(n_tests_pca)), indirect=True) def test_pca(pca_params): n_components = pca_params X_pca = pca(X, n_components) assert X_pca.shape[-1] == n_components
nilq/baby-python
python
# Generated by Django 2.1.3 on 2019-02-27 15:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('queueapp', '0007_auto_20190220_1642'), ] operations = [ migrations.AddField( model_name='queue', name='pause_and_clear', field=models.BooleanField(default=False), ), ]
nilq/baby-python
python
import re import sys import unittest from line import * from canonicalLine import * from degenerateLine import * from lineClassifier import * import importlib pd.set_option('display.width', 1000) filename = "../testData/daylight_1_4.eaf" xmlDoc = etree.parse(filename) lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION")) assert(lineCount == 4) htmlDoc = Doc() htmlDoc.asis('<!DOCTYPE html>') with htmlDoc.tag('html', lang="en"): with htmlDoc.tag('head'): htmlDoc.asis('<meta charset="UTF-8">') htmlDoc.asis('<link rel="stylesheet" href="ijal.css">') with htmlDoc.tag('body'): for i in range(lineCount): x = Line(xmlDoc, i) with htmlDoc.tag("div", klass="line-wrapper"): with htmlDoc.tag("div", klass="line-sidebar"): htmlDoc.text("%d)" % (i + 1)) htmlDoc.asis('<img src="https://www.americanlinguistics.org/wp-content/uploads/speaker.png"></img>') classifier = LineClassifier(x.getTable()) classification = classifier.run() print("%d: %s" % (i, classification)) if(classification == "CanonicalLine"): xc = CanonicalLine(xmlDoc, i) xc.toHtml(htmlDoc) elif(classification == "DegenerateLine"): xd = DegenerateLine(xmlDoc, i) xd.toHtml(htmlDoc) htmlDoc.asis("<p><hr><p>") htmlText = htmlDoc.getvalue() filename = "daylight.html" f = open(filename, "w") f.write(indent(htmlText)) f.close() os.system("open %s" % filename)
nilq/baby-python
python
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from .base import compare_template, SimpleTestCase class CopyButtonTest(SimpleTestCase): maxDiff = None def test_rendered(self): template = """ {% load carbondesign %} {% CopyButton %} """ expected = """ <button data-copy-btn class="bx--copy-btn" type="button" tabindex="0"> <span class="bx--assistive-text bx--copy-btn__feedback">Copied!</span> <svg focusable="false" preserveAspectRatio="xMidYMid meet" xmlns="http://www.w3.org/2000/svg" fill="currentColor" class="bx--snippet__icon" width="16" height="16" viewBox="0 0 32 32" aria-hidden="true"> <path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z"></path> <path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z"></path> </svg> </button> """ rendered = compare_template(template, expected) self.assertEqual(*rendered)
nilq/baby-python
python
# These should probably all live in separate files from ..tensorboard_writer import TensorboardWriter from allennlp.training.callbacks.events import Events from allennlp.training.callbacks.callback import Callback, handle_event from allennlp.common.params import Params import logging from typing import Set, Dict, TYPE_CHECKING if TYPE_CHECKING: from allennlp.training.callback_trainer import CallbackTrainer logger = logging.getLogger(__name__) @Callback.register('tensorboard_logging') class TensorboardLogger(Callback): def __init__(self, tensorboard: TensorboardWriter): self.tensorboard = tensorboard @handle_event(Events.TRAINING_START) def training_start(self, trainer: "CallbackTrainer") -> None: # This is an ugly hack to get the tensorboard instance to know about the trainer, because # the callbacks are defined before the trainer. self.tensorboard._get_batch_num_total = lambda: trainer.batch_num_total @handle_event(Events.BATCH_END) def batch_end_logging(self, trainer: "CallbackTrainer"): if self.tensorboard.should_log_this_batch(): self.tensorboard.log_histograms(trainer.model) self.tensorboard.log_scalars(trainer.model) @classmethod def from_params( # type: ignore cls, serialization_dir: str, params: Params) -> "TensorboardLogger": tensorboard = TensorboardWriter.from_params( params=params, serialization_dir=serialization_dir, get_batch_num_total=lambda: None) return cls(tensorboard)
nilq/baby-python
python
from datetime import datetime from decimal import Decimal import calendar from enum import IntEnum import timex from sqlalchemy import event from sqlalchemy import and_, or_ from sqlalchemy import literal_column from sqlalchemy import Column, Table, ForeignKey, Index, UniqueConstraint from sqlalchemy import Float, Boolean, Text, DateTime, Integer, String from sqlalchemy import cast, null, case from sqlalchemy.orm.interfaces import PropComparator from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import composite from sqlalchemy.orm import backref from sqlalchemy.orm import relationship from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.types import TypeDecorator, DATETIME class Datatype(IntEnum): none = 0 string = 1 int = 2 float = 3 datetime = 4 timerange = 5 class StreamState(IntEnum): active = 1 firing = 2 expiring = 3 error = 4 expire_error = 5 completed = 6 retry_fire = 7 retry_expire = 8 class DBException(Exception): pass class InvalidTraitType(DBException): pass def dt_to_decimal(dt): t_sec = calendar.timegm(dt.utctimetuple()) + (dt.microsecond/1e6) return Decimal("%.6f" % t_sec) def decimal_to_dt(decimal_timestamp): return datetime.utcfromtimestamp(float(decimal_timestamp)) class PreciseTimestamp(TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = DATETIME def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(DECIMAL(precision=20, scale=6, asdecimal=True)) return dialect.type_descriptor(DATETIME()) def process_bind_param(self, value, dialect): if value is None: return value elif dialect.name == 'mysql': return dt_to_decimal(value) return value def process_result_value(self, value, dialect): if value is None: return value elif dialect.name == 'mysql': return decimal_to_dt(value) return value class DBTimeRange(object): def __init__(self, begin, end): self.begin = begin self.end = end def __composite_values__(self): return self.begin, self.end def __repr__(self): return "DBTimeRange(begin=%r, end=%r)" % (self.begin, self.end) def __eq__(self, other): return isinstance(other, DBTimeRange) and \ other.begin == self.begin and \ other.end == self.end def __ne__(self, other): return not self.__eq__(other) class ProxiedDictMixin(object): """Adds obj[name] access to a mapped class. This class basically proxies dictionary access to an attribute called ``_proxied``. The class which inherits this class should have an attribute called ``_proxied`` which points to a dictionary. """ def __len__(self): return len(self._proxied) def __iter__(self): return iter(self._proxied) def __getitem__(self, name): return self._proxied[name] def __contains__(self, name): return name in self._proxied def __setitem__(self, name, value): self._proxied[name] = value def __delitem__(self, name): del self._proxied[name] class PolymorphicVerticalProperty(object): """A name/value pair with polymorphic value storage.""" ATTRIBUTE_MAP = {Datatype.none: None} PY_TYPE_MAP = {unicode: Datatype.string, int: Datatype.int, float: Datatype.float, datetime: Datatype.datetime, DBTimeRange: Datatype.timerange} def __init__(self, name, value=None): self.name = name self.value = value @classmethod def get_type_value(cls, value): if value is None: return Datatype.none, None if isinstance(value, str): value = value.decode('utf8', 'ignore') if isinstance(value, timex.Timestamp): value = value.timestamp if isinstance(value, timex.TimeRange): value = DBTimeRange(value.begin, value.end) if type(value) in cls.PY_TYPE_MAP: return cls.PY_TYPE_MAP[type(value)], value return None, value @hybrid_property def value(self): if self.type not in self.ATTRIBUTE_MAP: raise InvalidTraitType("Invalid trait type in db for %s: %s" % (self.name, self.type)) attribute = self.ATTRIBUTE_MAP[self.type] if attribute is None: return None if self.type == Datatype.timerange: val = getattr(self, attribute) return timex.TimeRange(val.begin, val.end) else: return getattr(self, attribute) @value.setter def value(self, value): datatype, value = self.get_type_value(value) if datatype not in self.ATTRIBUTE_MAP: raise InvalidTraitType("Invalid trait type for %s: %s" % (self.name, datatype)) attribute = self.ATTRIBUTE_MAP[datatype] self.type = int(datatype) if attribute is not None: setattr(self, attribute, value) @value.deleter def value(self): self._set_value(None) @value.comparator class value(PropComparator): """A comparator for .value, builds a polymorphic comparison. """ def __init__(self, cls): self.cls = cls def __eq__(self, other): dtype, value = self.cls.get_type_value(other) if dtype is None: dtype = Datatype.string if dtype == Datatype.none: return self.cls.type == int(Datatype.none) attr = getattr(self.cls, self.cls.ATTRIBUTE_MAP[dtype]) return and_(attr == value, self.cls.type == int(dtype)) def __ne__(self, other): dtype, value = self.cls.get_type_value(other) if dtype is None: dtype = Datatype.string if dtype == Datatype.none: return self.cls.type != int(Datatype.none) attr = getattr(self.cls, self.cls.ATTRIBUTE_MAP[dtype]) return and_(attr != value, self.cls.type == int(dtype)) def __repr__(self): return '<%s %r=%r>' % (self.__class__.__name__, self.name, self.value) Base = declarative_base() class Trait(PolymorphicVerticalProperty, Base): __tablename__ = 'trait' __table_args__ = ( Index('ix_trait_t_int', 't_int'), Index('ix_trait_t_string', 't_string'), Index('ix_trait_t_datetime', 't_datetime'), Index('ix_trait_t_float', 't_float'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) name = Column(String(100), primary_key=True) type = Column(Integer) ATTRIBUTE_MAP = {Datatype.none: None, Datatype.string: 't_string', Datatype.int: 't_int', Datatype.float: 't_float', Datatype.datetime: 't_datetime',} t_string = Column(String(255), nullable=True, default=None) t_float = Column(Float, nullable=True, default=None) t_int = Column(Integer, nullable=True, default=None) t_datetime = Column(PreciseTimestamp(), nullable=True, default=None) def __repr__(self): return "<Trait(%s) %s=%s/%s/%s/%s on %s>" % (self.name, self.type, self.t_string, self.t_float, self.t_int, self.t_datetime, self.event_id) class EventType(Base): """Types of event records.""" __tablename__ = 'event_type' id = Column(Integer, primary_key=True) desc = Column(String(255), unique=True) def __init__(self, event_type): self.desc = event_type def __repr__(self): return "<EventType: %s>" % self.desc class Event(ProxiedDictMixin, Base): __tablename__ = 'event' __table_args__ = ( Index('ix_event_message_id', 'message_id'), Index('ix_event_type_id', 'event_type_id'), Index('ix_event_generated', 'generated') ) id = Column(Integer, primary_key=True) message_id = Column(String(50), unique=True) generated = Column(PreciseTimestamp()) event_type_id = Column(Integer, ForeignKey('event_type.id')) event_type = relationship("EventType", backref=backref('event_type')) traits = relationship("Trait", collection_class=attribute_mapped_collection('name')) _proxied = association_proxy("traits", "value", creator=lambda name, value: Trait(name=name, value=value)) @property def event_type_string(self): return self.event_type.desc @property def as_dict(self): d = dict(self._proxied) d['message_id'] = self.message_id d['event_type'] = self.event_type_string d['timestamp'] = self.generated return d def __init__(self, message_id, event_type, generated): self.message_id = message_id self.event_type = event_type self.generated = generated def __repr__(self): return "<Event %s ('Event : %s %s, Generated: %s')>" % (self.id, self.message_id, self.event_type, self.generated) stream_event_table = Table('streamevent', Base.metadata, Column('stream_id', Integer, ForeignKey('stream.id'), primary_key=True), Column('event_id', Integer, ForeignKey('event.id'), primary_key=True) ) class Stream(ProxiedDictMixin, Base): __tablename__ = 'stream' __table_args__ = ( Index('ix_stream_name', 'name'), Index('ix_stream_state', 'state'), Index('ix_stream_expire_timestamp', 'expire_timestamp'), Index('ix_stream_fire_timestamp', 'fire_timestamp') ) id = Column(Integer, primary_key=True) first_event = Column(PreciseTimestamp(), nullable=False) last_event = Column(PreciseTimestamp(), nullable=False) expire_timestamp = Column(PreciseTimestamp()) fire_timestamp = Column(PreciseTimestamp()) name = Column(String(255), nullable=False) state = Column(Integer, default=StreamState.active, nullable=False) state_serial_no = Column(Integer, default=0, nullable=False) distinguished_by = relationship("DistinguishingTrait", cascade="save-update, merge, delete, delete-orphan", collection_class=attribute_mapped_collection('name')) _proxied = association_proxy("distinguished_by", "value", creator=lambda name, value: DistinguishingTrait(name=name, value=value)) events = relationship(Event, secondary=stream_event_table, order_by=Event.generated) @property def distinguished_by_dict(self): return dict(self._proxied) def __init__(self, name, first_event, last_event=None, expire_timestamp=None, fire_timestamp=None, state=None, state_serial_no=None): self.name = name self.first_event = first_event if last_event is None: last_event = first_event self.last_event = last_event self.expire_timestamp = expire_timestamp self.fire_timestamp = fire_timestamp if state is None: state = StreamState.active self.state = int(state) if state_serial_no is None: state_serial_no = 0 self.state_serial_no = state_serial_no class DistinguishingTrait(PolymorphicVerticalProperty, Base): __tablename__ = 'dist_trait' __table_args__ = ( Index('ix_dist_trait_dt_int', 'dt_int'), Index('ix_dist_trait_dt_float', 'dt_float'), Index('ix_dist_trait_dt_string', 'dt_string'), Index('ix_dist_trait_dt_datetime', 'dt_datetime'), Index('ix_dist_trait_dt_timerange_begin', 'dt_timerange_begin'), Index('ix_dist_trait_dt_timerange_end', 'dt_timerange_end'), ) stream_id = Column(Integer, ForeignKey('stream.id'), primary_key=True) name = Column(String(100), primary_key=True) type = Column(Integer) ATTRIBUTE_MAP = {Datatype.none: None, Datatype.string: 'dt_string', Datatype.int: 'dt_int', Datatype.float: 'dt_float', Datatype.datetime: 'dt_datetime', Datatype.timerange:'dt_timerange', } dt_string = Column(String(255), nullable=True, default=None) dt_float = Column(Float, nullable=True, default=None) dt_int = Column(Integer, nullable=True, default=None) dt_datetime = Column(PreciseTimestamp(), nullable=True, default=None) dt_timerange_begin = Column(PreciseTimestamp(), nullable=True, default=None) dt_timerange_end = Column(PreciseTimestamp(), nullable=True, default=None) dt_timerange = composite(DBTimeRange, dt_timerange_begin, dt_timerange_end) @property def as_dict(self): return {self.name: self.value} def __repr__(self): return "<DistinguishingTrait(%s) %s=%s/%s/%s/%s/(%s to %s) on %s>" % (self.name, self.type, self.dt_string, self.dt_float, self.dt_int, self.dt_datetime, self.dt_timerange_begin, self.dt_timerange_end, self.stream_id)
nilq/baby-python
python
from Step_5.A3C import A3Cagent from Step_5.Parameter import PARA from Step_5.A3C_NETWORK import A3C_shared_network class MainModel: def __init__(self): self.worker = [] shared_model = A3C_shared_network().model for i in range(0, 2): self.worker.append(A3Cagent(Remote_ip=PARA.Remote_ip, Remote_port=PARA.Remote_port + i, CNS_ip=PARA.CNS_ip, CNS_port=PARA.CNS_port + i, Shared_net=shared_model )) # 멀티프로세스 시작 jobs =[] for __ in self.worker: __.start() if __name__ == '__main__': test = MainModel()
nilq/baby-python
python
import profig from gogetmarvel.comic import Comic from gogetmarvel.engine import Engine cfg = profig.Config('gogetmarvel/config.cfg') cfg.sync() class Marvel(object): """ Main marvel object connects the engine to its children. """ def __init__(self, private_key=None, public_key=None): """ Entry point of the marvel class. Requires the API key and secret provided by marvel developer. """ if not private_key or not public_key: self.public_key = cfg['auth.public_key'] self.private_key = cfg['auth.private_key'] else: self.public_key = public_key self.private_key = private_key self.engine = Engine(self.public_key, self.private_key) self.query_comic = Comic(self.engine)
nilq/baby-python
python
#!/usr/bin/env python #coding:utf-8 import requests import re #下面三行是编码转换的功能,大家现在不用关心。 import sys reload(sys) sys.setdefaultencoding("utf-8") #header是我们自己构造的一个字典,里面保存了user-agent header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'} #部分网站对爬虫程序进行过滤,因此爬虫需包含浏览器头部伪装成浏览器 # html = requests.get('http://jp.tingroom.com/yuedu/yd300p/') html = requests.get('http://jp.tingroom.com/yuedu/yd300p/',headers = header) html.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码。 # print html.text # title = re.findall('color:#666666;">(.*?)</span>',html.text,re.S) # for each in title: # print each # chinese = re.findall('color: #039;">(.*?)</a>',html.text,re.S) for each in chinese: print each
nilq/baby-python
python
import repetition menu_selex = 'y' while menu_selex == 'y': #This is so that HW Menu is generated print('\nHomework 3 Menu\n1-Factorial\n2-Sum odd numbers\n3-Exit') selex = int(input('Please select menu item 1, 2 or 3: ')) if selex == 1: #This is the factorial part of the assignment keep_factoring = 'y' while keep_factoring == 'y': num = int(input('To calculate the factorial, please enter a whole number greater than 0 but less than 10: ')) if num <= 0 or num >= 10: print('Number entered is outside of specified range. Please enter another number in the correct range.') num = int(input('To calculate the factorial, please enter a whole number greater than 0 but less than 10: ')) else: factorial = repetition.get_factorial(num) print('The factorial for', num,'is', format(factorial, ',')) keep_factoring = input('Do you want to calculate another factorial (Enter y for yes, n for no): ') elif selex == 2: #This is the sum of odd numbers part of the assignment keep_summing = 'y' while keep_summing =='y': num2 = int(input('To get sum of odd numbers, please enter a whole number greater than zero but less than 100: ')) if num2 <= 0 or num2 >= 100: print('Numbner entered is outside of specified range. Please enter another number in the correct range.') num2 = int(input('To get sum of odd numbers, please enter a whole number greater than zero but less than 100: ')) else: total_odds = repetition.sum_odd_numbers(num2) print('The sum of all odd numbers up to', num2, 'is', format(total_odds, ',')) keep_summing = input('Do you want to calculate the sum of odd numbers again (Enter y for yes, n for no): ') elif selex == 3: #This is the exit from the menu part of the assignment print('You have chosen to exit.') else: print("Invalid entry") menu_selex = input('Do you want to continue with another menu selection (Enter y for yes, n to exit from Homework 3 Menu): ')
nilq/baby-python
python
import os import re import sys import time import traceback import logging import hashlib from urllib.parse import urlsplit, urlunsplit from datetime import datetime from dateutil import tz from flask import ( Flask, render_template, request, redirect, url_for, send_from_directory, jsonify, abort, ) from werkzeug.middleware.proxy_fix import ProxyFix import stripe import sendgrid from jsonschema import validate from parse_cents import parse_cents from python_http_client import exceptions from applicationinsights.flask.ext import AppInsights try: if "WEBSITE_SITE_NAME" in os.environ: os.environ["GIT_VERSION"] = open( "../repository/.git/refs/heads/master", "r" ).read() except OSError: pass TEST_ENVIRONMENT = os.path.basename(sys.argv[0]) == "pytest" REDIRECT_TO_WWW = os.environ.get("REDIRECT_TO_WWW") != "false" def require_env(k: str) -> str: v = os.environ.get(k) if v is None: if TEST_ENVIRONMENT: return f"TEST_{k}" else: raise KeyError(f"Missing required environment variable {k}") return v RECEIPT_TEMPLATE_ID = "d-7e5e6a89f9284d2ab01d6c1e27a180f8" FAILURE_TEMPLATE_ID = "d-570b4b8b20e74ec5a9c55be7e07e2665" SENDGRID_API_KEY = require_env("SENDGRID_API_KEY") DONATE_EMAIL = "donate@missionbit.org" MONTHLY_PLAN_ID = "mb-monthly-001" LOCAL_TZ = tz.gettz("America/Los_Angeles") stripe_keys = { "secret_key": require_env("SECRET_KEY"), "publishable_key": require_env("PUBLISHABLE_KEY"), "endpoint_secret": require_env("WEBHOOK_SIGNING_SECRET"), } stripe.api_key = stripe_keys["secret_key"] CANONICAL_HOSTS = os.environ.get("CANONICAL_HOST", "").split() CHECKOUT_SCHEMA = { "type": "object", "description": "Start the Stripe checkout flow", "required": ["amount"], "properties": { "amount": { "type": "integer", "description": "USD cents of donation", "minimum": 100, }, "metadata": {"type": "object"}, }, } def verizonProxyHostFixer(app): """Azure's Verizon Premium CDN uses the header X-Host instead of X-Forwarded-Host """ def proxy_fixed_app(environ, start_response): x_host = environ.get("HTTP_X_HOST") if x_host in CANONICAL_HOSTS: environ["HTTP_X_FORWARDED_HOST"] = x_host return app(environ, start_response) return proxy_fixed_app app = Flask(__name__) appinsights = AppInsights(app) if CANONICAL_HOSTS: # Azure's Verizon Premium CDN uses the header X-Host instead of X-Forwarded-Host app.wsgi_app = verizonProxyHostFixer(ProxyFix(app.wsgi_app, x_host=1)) streamHandler = logging.StreamHandler() app.logger.addHandler(streamHandler) app.logger.setLevel(logging.DEBUG) def get_telemetry_client(): requests_middleware = appinsights._requests_middleware return requests_middleware.client if requests_middleware else None def set_default_app_context(): requests_middleware = appinsights._requests_middleware if requests_middleware: envs = ["WEBSITE_SITE_NAME", "GIT_VERSION"] for k in envs: v = os.environ.get(k) if v: requests_middleware._common_properties[k] = v set_default_app_context() def merge_dicts(*dicts): rval = {} for d in dicts: if d: rval.update(d) return rval @app.template_filter("asset_url") def asset_url(path, CACHE={}): abspath = os.path.abspath(app.root_path + path) # Avoid directory traversal mistakes if not abspath.startswith(app.static_folder): return path try: # Check that the file exists and use its # size and creation time as a cache key to avoid # computing a digest on every request stat = os.stat(abspath) key = stat.st_size, stat.st_mtime cached = CACHE.get(path) if cached is not None and cached[0] == key: return cached[1] # Get a SHA1 digest of the file contents h = hashlib.sha1() with open(abspath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): h.update(chunk) # Use the prefix of the digest in the URL to ensure # the browser will receive the latest version rval = "{}?v={}".format(path, h.hexdigest()[:8]) CACHE[path] = (key, rval) return rval except OSError: # This will catch any FileNotFoundError or similar # issues with stat, open, or read. return path @app.after_request def add_cache_control_header(response): """Disable caching for non-static endpoints """ if "Cache-Control" not in response.headers: response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" return response @app.route("/favicon.ico") def favicon(): return send_from_directory( os.path.join(app.root_path, "static"), "favicon.ico", mimetype="image/vnd.microsoft.icon", ) @app.route("/robots.txt") def robots(): return send_from_directory( os.path.join(app.root_path, "static"), "robots.txt", mimetype="text/plain" ) @app.route("/.well-known/apple-developer-merchantid-domain-association") def apple_pay_domain_association(): return send_from_directory( os.path.join(app.root_path, "static"), "apple-developer-merchantid-domain-association", mimetype="text/plain", ) def format_identifier(s): """ >>> format_identifier('apple_pay') 'Apple Pay' """ return " ".join(map(lambda s: s.capitalize(), s.split("_"))) CARD_BRANDS = { "amex": "American Express", "diners": "Diners Club", "discover": "Discover", "jcb": "JCB", "mastercard": "Mastercard", "unionpay": "UnionPay", "visa": "Visa", } def format_payment_method_details_source(payment_method_details): payment_type = payment_method_details.type if payment_type in ("card", "card_present"): details = payment_method_details[payment_type] parts = [] brand = CARD_BRANDS.get(details.brand) if brand: parts.append(brand) if details.funding != "unknown": parts.append(details.funding) parts.append("card") if details.wallet: parts.append("({})".format(format_identifier(details.wallet.type))) return " ".join(parts) else: return format_identifier(payment_type) def sendgrid_safe_name(name): """The to.name, cc.name, and bcc.name personalizations cannot include either the ; or , characters. """ return re.sub(r"([,;]\s*)+", " ", name) @app.route("/cancel") def cancel(): return render_template("cancel.html", donate_email=DONATE_EMAIL) @app.route("/success") def success(): session_id = request.args.get("session_id") if not session_id: return redirect("/") session = stripe.checkout.Session.retrieve( session_id, expand=["payment_intent", "subscription.default_payment_method"] ) return render_template( "success.html", donate_email=DONATE_EMAIL, **session_info(session) ) def session_info(session): if session.mode == "subscription": subscription = session.subscription pm = subscription.default_payment_method return merge_dicts( { "id": subscription.id, "frequency": "monthly", "amount": subscription.plan.amount * subscription.quantity, "payment_method": format_payment_method_details_source(pm), }, billing_details_to(pm.billing_details), ) elif session.mode == "payment": charge = session.payment_intent.charges.data[0] return merge_dicts( { "id": charge.id, "frequency": "one-time", "amount": charge.amount, "payment_method": format_payment_method_details_source( charge.payment_method_details ), }, billing_details_to(charge.billing_details), ) else: raise NotImplementedError def session_kw(amount, frequency, metadata): if frequency == "monthly": return { "mode": "subscription", "subscription_data": { "items": [{"plan": MONTHLY_PLAN_ID, "quantity": amount}], "metadata": metadata, }, } else: return { "mode": "payment", "line_items": [ { "amount": amount, "currency": "USD", "name": "One-time donation", "quantity": 1, } ], "submit_type": "donate", "payment_intent_data": {"description": "Donation", "metadata": metadata}, } @app.route("/checkout", methods=["POST"]) def checkout(): body = request.json validate(body, CHECKOUT_SCHEMA) amount = body["amount"] frequency = body["frequency"] o = urlsplit(request.url) metadata = merge_dicts( body.get("metadata", {}), {"origin": urlunsplit((o.scheme, o.netloc, "", "", ""))}, ) session = stripe.checkout.Session.create( payment_method_types=["card"], success_url=urlunsplit( (o.scheme, o.netloc, "/success", "session_id={CHECKOUT_SESSION_ID}", "") ), cancel_url=urlunsplit((o.scheme, o.netloc, "/cancel", "", "")), **session_kw(amount=amount, frequency=frequency, metadata=metadata), ) return jsonify(sessionId=session.id) def billing_details_to(billing_details): return { "name": sendgrid_safe_name(billing_details.name), "email": billing_details.email, } def donor_name(billing_details): if billing_details.name: return f"{billing_details.name} <{billing_details.email}>" else: return billing_details.email def stripe_checkout_session_completed(session): # Subscription receipts are handled by invoice payments if session.mode == "payment": return stripe_checkout_session_completed_payment( stripe.checkout.Session.retrieve(session.id, expand=["payment_intent"]) ) def get_origin(metadata): return metadata.get( "origin", f"https://{CANONICAL_HOSTS[0]}" if CANONICAL_HOSTS else "http://localhost:5000", ) def stripe_invoice_payment_succeeded(invoice): invoice = stripe.Invoice.retrieve( invoice.id, expand=["subscription", "payment_intent"] ) subscription = invoice.subscription charge = invoice.payment_intent.charges.data[0] if is_from_new_app(subscription.metadata): print(f"Skipping subscription email from new app: {charge.id}") return next_dt = datetime.fromtimestamp(subscription.current_period_end, LOCAL_TZ) sg = sendgrid.SendGridAPIClient(SENDGRID_API_KEY) try: response = sg.send( email_template_data( template_id=RECEIPT_TEMPLATE_ID, charge=charge, frequency="monthly", monthly={ "next": f"{next_dt.strftime('%b')} {next_dt.day}, {next_dt.year}", "url": f"{get_origin(subscription.metadata)}/subscriptions/{subscription.id}", }, ) ) if not (200 <= response.status_code < 300): return abort(400) except exceptions.BadRequestsError: return abort(400) track_donation(metadata=subscription.metadata, frequency="monthly", charge=charge) def email_template_data(template_id, charge, frequency, **kw): payment_method = format_payment_method_details_source(charge.payment_method_details) return { "template_id": template_id, "from": {"name": "Mission Bit", "email": DONATE_EMAIL}, "personalizations": [ { "to": [billing_details_to(charge.billing_details)], "dynamic_template_data": merge_dicts( { "transaction_id": charge.id, "frequency": frequency, "total": "${:,.2f}".format(charge.amount * 0.01), "date": datetime.fromtimestamp( charge.created, LOCAL_TZ ).strftime("%x"), "payment_method": payment_method, "donor": donor_name(charge.billing_details), }, kw, ), } ], } def track_invoice_failure(metadata, frequency, charge): client = get_telemetry_client() if client is None: return payment_method = format_payment_method_details_source(charge.payment_method_details) client.track_event( "DonationFailed", merge_dicts( metadata, billing_details_to(charge.billing_details), {"id": charge.id, "frequency": frequency, "payment_method": payment_method}, ), {"amount": charge.amount}, ) def track_donation(metadata, frequency, charge): client = get_telemetry_client() if client is None: return payment_method = format_payment_method_details_source(charge.payment_method_details) client.track_event( "Donation", merge_dicts( metadata, billing_details_to(charge.billing_details), {"id": charge.id, "frequency": frequency, "payment_method": payment_method}, ), {"amount": charge.amount}, ) def stripe_checkout_session_completed_payment(session): payment_intent = session.payment_intent charge = payment_intent.charges.data[0] payment_method = format_payment_method_details_source(charge.payment_method_details) if is_from_new_app(payment_intent.metadata): print(f"Skipping charge email from new app: {charge.id}") return sg = sendgrid.SendGridAPIClient(SENDGRID_API_KEY) try: response = sg.send( email_template_data( template_id=RECEIPT_TEMPLATE_ID, charge=charge, frequency="one-time" ) ) if not (200 <= response.status_code < 300): print(repr(response)) return abort(400) except exceptions.BadRequestsError: traceback.print_tb(sys.last_traceback) return abort(400) track_donation( metadata=payment_intent.metadata, frequency="one-time", charge=charge ) def stripe_invoice_payment_failed(invoice): invoice = stripe.Invoice.retrieve( invoice.id, expand=["subscription", "payment_intent"] ) if invoice.billing_reason != "subscription_cycle": # No email unless it's a renewal, they got an error in the # Stripe Checkout UX for new subscriptions. return subscription = invoice.subscription charge = invoice.payment_intent.charges.data[0] if is_from_new_app(subscription.metadata): print(f"Skipping subscription failure email from new app: {charge.id}") return sg = sendgrid.SendGridAPIClient(SENDGRID_API_KEY) origin = get_origin(subscription.metadata) try: response = sg.send( email_template_data( template_id=FAILURE_TEMPLATE_ID, charge=charge, frequency="monthly", failure_message=charge.failure_message, renew_url=f"{origin}/{'${:,.2f}'.format(charge.amount * 0.01)}/?frequency=monthly", subscription_id=subscription.id, subscription_url=f"{origin}/subscriptions/{subscription.id}", ) ) if not (200 <= response.status_code < 300): return abort(400) except exceptions.BadRequestsError: return abort(400) # Cancel the subscription to avoid future charges if subscription.status != "canceled": stripe.Subscription.delete(subscription.id) track_invoice_failure( metadata=subscription.metadata, frequency="monthly", charge=charge ) def is_from_new_app(metadata): """Events created by the new www.missionbit.org donation portal should be ignored """ return metadata.get("app") == "www.missionbit.org" @app.route("/hooks", methods=["POST"]) def stripe_webhook(): payload = request.data.decode("utf-8") sig_header = request.headers.get("Stripe-Signature", None) event = None try: event = stripe.Webhook.construct_event( payload=payload, sig_header=sig_header, secret=stripe_keys["endpoint_secret"], ) except ValueError as e: # Invalid payload print("Invalid hook payload") return "Invalid payload", 400 except stripe.error.SignatureVerificationError as e: # Invalid signature print("Invalid hook signature") return "Invalid signature", 400 handlers = { "checkout.session.completed": stripe_checkout_session_completed, "invoice.payment_succeeded": stripe_invoice_payment_succeeded, "invoice.payment_failed": stripe_invoice_payment_failed, } handler = handlers.get(event["type"]) if handler is not None: obj = event["data"]["object"] print(f"handling {event['type']} id: {obj.id}") handler(obj) else: print(f"{event['type']} not handled") return jsonify({"status": "success"}) def host_default_amount(host): if host.startswith("gala."): return "$250" else: return "$50" @app.route("/subscriptions/<subscription_id>") def subscription(subscription_id): if REDIRECT_TO_WWW: return redirect(f"https://www.missionbit.org/donate/subscriptions/{subscription_id}") try: subscription = stripe.Subscription.retrieve( subscription_id, expand=["default_payment_method"] ) except stripe.error.InvalidRequestError: return redirect("/") pm = subscription.default_payment_method next_dt = datetime.fromtimestamp(subscription.current_period_end, LOCAL_TZ) return render_template( "subscription.html", donate_email=DONATE_EMAIL, subscription=subscription, id=subscription.id, frequency="monthly", amount=subscription.plan.amount * subscription.quantity, payment_method=format_payment_method_details_source(pm), next_cycle=f"{next_dt.strftime('%b')} {next_dt.day}, {next_dt.year}", **billing_details_to(pm.billing_details), ) @app.route("/subscriptions/<subscription_id>", methods=["POST"]) def delete_subscription(subscription_id): try: stripe.Subscription.delete(subscription_id) except stripe.error.InvalidRequestError: return redirect(f"/subscriptions/{subscription_id}") return redirect(f"/subscriptions/{subscription_id}") @app.route("/") @app.route("/<dollars>") @app.route("/<dollars>/") def index(dollars=""): if REDIRECT_TO_WWW: return redirect("https://www.missionbit.org/donate") host = urlsplit(request.url).netloc frequency = ( "monthly" if request.args.get("frequency", "once") == "monthly" else "once" ) amount = parse_cents(dollars) or parse_cents(host_default_amount(host)) return render_template( "index.html", key=stripe_keys["publishable_key"], metadata=merge_dicts(request.args, {"host": host}), frequency=frequency, formatted_dollar_amount="{:.2f}".format(amount * 0.01) if amount % 100 else f"{amount // 100}", ) if CANONICAL_HOSTS: @app.before_request def redirect_to_cdn(): o = urlsplit(request.url) redirect_host = CANONICAL_HOSTS[0] if o.netloc in CANONICAL_HOSTS: if o.scheme == "https": return None else: redirect_host = o.netloc url = urlunsplit(("https", redirect_host, o[2], o[3], o[4])) return redirect(url, code=302) if __name__ == "__main__": app.run(debug=True)
nilq/baby-python
python
# Assessing placement bias of the global river gauge network # Nature Sustainability # Authors: Corey A. Krabbenhoft, George H. Allen, Peirong Lin, Sarah E. Godsey, Daniel C. Allen, Ryan M. Burrows, Amanda G. DelVecchia, Ken M. Fritz, Margaret Shanafield # Amy J. Burgin, Margaret Zimmer, Thibault Datry, Walter K. Dodds, C. Nathan Jones, Meryl C. Mims, Catherin Franklin, John C. Hammond, Samuel C. Zipper, Adam S. Ward, # Katie H. Costigan, Hylke E. Beck, and Julian D. Olden # Date: 2/7/2022 # This code all gauge locations, and spatially joins them with GRADES river segments # output is the joined table of gauge ID (stationid) with GRADES river ID (COMID) #required library import geopandas as gpd import pandas as pd from shapely.geometry import Point def find_nearest_river(dfpp,dfll,buffersize): ''' This function finds the nearest river reach ID for each gauge input: dfpp: point shapefile of the gauges; dfll: line shapefile of GRADES ''' #create buffer print(' create buffer... wait ...') poly = dfpp.buffer(buffersize) polygpd = gpd.GeoDataFrame(dfpp[['stationid', 'lon', 'lat']],geometry=poly) #spatial join print(' spatial join with flowlines.. wait ...') join = gpd.sjoin(polygpd,dfll,how='inner',op='intersects') merge=join.merge(dfll,on='COMID',how='left') print(' calculating distance.. wait ...') merge['distance']=[Point(merge['lon'][i],merge['lat'][i]).distance(merge['geometry_y'][i]) for i in range(0,len(merge))] join11 = merge.groupby(['stationid']).agg({'distance':'min'}).reset_index() #min dist: width and MERIT merge11 = join11.merge(merge,on=['stationid','distance'],how='left') final = merge11[['stationid','COMID','distance','lon','lat']] return final if __name__ == '__main__': #read latlon of all gauges (this is a combined gauge location database of GSIM and Beck at al) df = pd.read_csv('New_gauge_list_revisions.csv')[['stationid','lat','lon','source']] points = [Point(df.lon[j],df.lat[j]) for j in range(len(df))] #create GeoDataFrame dfpp = gpd.GeoDataFrame(df,geometry=points) #read GRADES river segments and perform spatial join buffersize = 0.05 #~5km allpoints = [] for pfaf in range(1,9): #GRADES river segment downloadable from http://hydrology.princeton.edu/data/mpan/MERIT_Basins/MERIT_Hydro_v07_Basins_v01/pfaf_level_01/ fin = '~/XXX/riv_pfaf_%01d_MERIT_Hydro_v07_Basins_v01.shp'%pfaf print('... intersecting with %s ...'%fin) dfll = gpd.read_file(fin) allpoints.append(find_nearest_river(dfpp,dfll,buffersize)) allpoints = pd.concat(allpoints) #save to file fon = 'stationid_GRADES_v07_join.csv' print('... writing to %s ...'%fon) allpoints.to_csv(fon,index=False)
nilq/baby-python
python
from __future__ import annotations import logging import random from collections import defaultdict from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Tuple, Union from nuplan.common.actor_state.vehicle_parameters import VehicleParameters from nuplan.database.nuplan_db.lidar_pc import LidarPc from nuplan.database.nuplan_db.nuplandb import NuPlanDB from nuplan.database.nuplan_db.nuplandb_wrapper import NuPlanDBWrapper from nuplan.database.nuplan_db.scene import Scene from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario import NuPlanScenario from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario_utils import ( DEFAULT_SCENARIO_NAME, ScenarioExtractionInfo, ScenarioMapping, ) from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map logger = logging.getLogger(__name__) # Dictionary that holds a list of scenarios for each scenario type ScenarioDict = Dict[str, List[NuPlanScenario]] # Scene indices smaller that the first valid index or larger than the last valid index are dropped during filtering. # This is done to ensure that all selected scenes have at least 20s of history/future samples. FIRST_VALID_SCENE_IDX = 2 # First scene in a log that is considered valid for training/simulation LAST_VALID_SCENE_IDX = -2 # Last scene in a log that is considered valid for training/simulation @dataclass(frozen=True) class FilterWrapper: """ Generic filter wrapper that encapsulates the filter's function and metadata. """ fn: Callable[[ScenarioDict], ScenarioDict] # function that filters the scenario dictionary enable: bool # whether to run this filter name: str # name of the filter def run(self, scenario_dict: ScenarioDict) -> ScenarioDict: """ Run the filter if enabled. :param scenario_dict: Input scenario dictionary. :return: Output scenario dictionary. """ if not self.enable: return scenario_dict logger.debug(f'Running scenario filter {self.name}...') scenario_dict = self.fn(scenario_dict) # type: ignore logger.debug(f'Running scenario filter {self.name}...DONE') return scenario_dict def is_scene_valid( scene: Scene, first_valid_idx: int = FIRST_VALID_SCENE_IDX, last_valid_idx: int = LAST_VALID_SCENE_IDX ) -> bool: """ Check whether the scene has enough history/future buffer and is valid for training/simulation. :param scene: Candidate scene. :param first_valid_idx: Index of first valid scene. :param last_valid_idx: Index of last valid scene. :return: Whether the scene is valid or not. """ scenes = scene.log.scenes scene_idx = int(scenes.index(scene)) return first_valid_idx <= scene_idx < len(scenes) + last_valid_idx def extract_scenes_from_log_db( db: NuPlanDB, first_valid_idx: int = FIRST_VALID_SCENE_IDX, last_valid_idx: int = LAST_VALID_SCENE_IDX ) -> List[Scene]: """ Retrieve all valid scenes from a log database. :param db: Log database to retrieve scenes from. :param first_valid_idx: Index of first valid scene. :param last_valid_idx: Index of last valid scene. :return: Retrieved scenes. """ return list(db.scene)[first_valid_idx:last_valid_idx] def create_scenarios_by_tokens( scenario_tokens: List[Tuple[str, str]], db: NuPlanDBWrapper, log_names: Optional[List[str]], expand_scenarios: bool, vehicle_parameters: VehicleParameters, ground_truth_predictions: Optional[TrajectorySampling], ) -> ScenarioDict: """ Create initial scenario dictionary based on desired tokens. :param scenario_tokens: List of (log_name, lidarpc_tokens) used to initialize the scenario dict. :param db: Object for accessing the available databases. :param log_names: List of log names to include in the scenario dictionary. :param expand_scenarios: Whether to expand multi-sample scenarios to multiple single-sample scenarios. :param vehicle_parameters: Vehicle parameters for this db. :param ground_truth_predictions: If None, no GT predictions will be extracted based on its future setting. :return: Dictionary that holds a list of scenarios for each scenario type. """ logger.debug("Creating scenarios by tokens...") # Whether to expand scenarios from multi-sample to single-sample scenarios extraction_info = None if expand_scenarios else ScenarioExtractionInfo() # Find all tokens that match the desired log names if log_names: candidate_log_names = set(log_names) scenario_tokens = [(log_name, token) for log_name, token in scenario_tokens if log_name in candidate_log_names] # Construct nuplan scenario objects for each (log_name, lidarpc token) pair args = [DEFAULT_SCENARIO_NAME, extraction_info, vehicle_parameters, ground_truth_predictions] scenarios = [NuPlanScenario(db.get_log_db(log_name), log_name, token, *args) for log_name, token in scenario_tokens] return {DEFAULT_SCENARIO_NAME: scenarios} def create_scenarios_by_types( scenario_types: List[str], db: NuPlanDBWrapper, log_names: Optional[List[str]], expand_scenarios: bool, scenario_mapping: ScenarioMapping, vehicle_parameters: VehicleParameters, ground_truth_predictions: Optional[TrajectorySampling], ) -> ScenarioDict: """ Create initial scenario dictionary based on desired scenario types. :param scenario_types: List of scenario types used to filter the pool of scenarios. :param db: Object for accessing the available databases. :param log_names: List of log names to include in the scenario dictionary. :param expand_scenarios: Whether to expand multi-sample scenarios to multiple single-sample scenarios. :param vehicle_parameters: Vehicle parameters for this db. :param ground_truth_predictions: If None, no GT predictions will be extracted based on its future setting. :return: Dictionary that holds a list of scenarios for each scenario type. """ logger.debug(f"Creating scenarios by types {scenario_types}...") # Dictionary that holds a list of scenarios for each scenario type scenario_dict: ScenarioDict = dict() # Find all candidate scenario types available_types = db.get_all_scenario_types() candidate_types = set(scenario_types).intersection(available_types) # Find all log dbs that match the desired log names log_dbs = db.log_dbs if log_names: candidate_log_names = set(log_names) log_dbs = [log_db for log_db in log_dbs if log_db.name in candidate_log_names] # Populate scenario dictionary with list of scenarios for each type for scenario_type in candidate_types: extraction_info = None if expand_scenarios else scenario_mapping.get_extraction_info(scenario_type) # TODO: Make scenario_tag.select_many method in DB args = [scenario_type, extraction_info, vehicle_parameters, ground_truth_predictions] scenario_dict[scenario_type] = [ NuPlanScenario(log_db, log_db.log_name, tag.lidar_pc_token, *args) for log_db in log_dbs for tag in log_db.scenario_tag.select_many(type=scenario_type) if is_scene_valid(tag.lidar_pc.scene) ] return scenario_dict def create_all_scenarios( db: NuPlanDBWrapper, log_names: Optional[List[str]], expand_scenarios: bool, vehicle_parameters: VehicleParameters, worker: WorkerPool, ground_truth_predictions: Optional[TrajectorySampling], ) -> ScenarioDict: """ Create initial scenario dictionary containing all available scenarios in the scenario pool. :param db: Object for accessing the available databases. :param log_names: List of log names to include in the scenario dictionary. :param expand_scenarios: Whether to expand multi-sample scenarios to multiple single-sample scenarios. :param vehicle_parameters: Vehicle parameters for this db. :param worker: Worker pool for concurrent scenario processing. :param ground_truth_predictions: If None, no GT predictions will be extracted based on its future setting :return: Dictionary that holds a list of scenarios for each scenario type. """ logger.debug('Creating all scenarios...') # Whether to expand scenarios from multi-sample to single-sample scenarios extraction_info = None if expand_scenarios else ScenarioExtractionInfo() def get_scenarios_from_log_dbs(log_dbs: List[NuPlanDB]) -> List[NuPlanScenario]: """ Retrieve a list of nuplan scenario objects from a list of nuplan log databases. :param log_db: List of nuplan log databases. :return: List of nuplan scenarios. """ def get_scenarios_from_log_db(log_db: NuPlanDB) -> List[NuPlanScenario]: """ Retrieve a list of nuplan scenario objects from a single nuplan log database. Note: This method uses variables from the outer scope to avoid transferring unnecessary load across workers. :param log_db: Nuplan log database. :return: List of nuplan scenarios. """ # Total list of scene tokens in the database scene_tokens = [scene.token for scene in extract_scenes_from_log_db(log_db)] query = ( log_db.session.query(LidarPc.token) .filter(LidarPc.scene_token.in_(scene_tokens)) .order_by(LidarPc.timestamp.asc()) .all() ) # Construct nuplan scenario objects for this log args = [DEFAULT_SCENARIO_NAME, extraction_info, vehicle_parameters, ground_truth_predictions] scenarios = [NuPlanScenario(log_db, log_db.log_name, token, *args) for token, in query] return scenarios return [scenario for log_db in log_dbs for scenario in get_scenarios_from_log_db(log_db)] # Find all log dbs that match the desired log names log_dbs = db.log_dbs if log_names: candidate_log_names = set(log_names) log_dbs = [log_db for log_db in log_dbs if log_db.name in candidate_log_names] # Retrieve all scenarios for the total list of scenes concurrently scenarios = worker_map(worker, get_scenarios_from_log_dbs, log_dbs) return {DEFAULT_SCENARIO_NAME: scenarios} def filter_by_log_names(scenario_dict: ScenarioDict, log_names: List[str]) -> ScenarioDict: """ Filter a scenario dictionary by log names. :param scenario_dict: Dictionary that holds a list of scenarios for each scenario type. :param log_names: List of log names to include in the scenario dictionary. :return: Filtered scenario dictionary. """ scenario_dict = { scenario_type: [scenario for scenario in scenarios if scenario.log_name in log_names] for scenario_type, scenarios in scenario_dict.items() } return scenario_dict def filter_by_map_names(scenario_dict: ScenarioDict, map_names: List[str], db: NuPlanDBWrapper) -> ScenarioDict: """ Filter a scenario dictionary by map names. :param scenario_dict: Dictionary that holds a list of scenarios for each scenario type. :param map_names: List of map names to include in the scenario dictionary. :param db: Object for accessing the available log databases. :return: Filtered scenario dictionary. """ # Mapping from log name to map version # TODO: Pass map name in scenario log_maps = {log_db.log_name: log_db.map_name for log_db in db.log_dbs} scenario_dict = { scenario_type: [scenario for scenario in scenarios if log_maps[scenario.log_name] in map_names] for scenario_type, scenarios in scenario_dict.items() } return scenario_dict def filter_num_scenarios_per_type( scenario_dict: ScenarioDict, num_scenarios_per_type: int, randomize: bool ) -> ScenarioDict: """ Filter the number of scenarios in a scenario dictionary per scenario type. :param scenario_dict: Dictionary that holds a list of scenarios for each scenario type. :param num_scenarios_per_type: Number of scenarios per type to keep. :param randomize: Whether to randomly sample the scenarios. :return: Filtered scenario dictionary. """ for scenario_type in scenario_dict: if randomize and num_scenarios_per_type < len(scenario_dict[scenario_type]): # Sample scenarios randomly scenario_dict[scenario_type] = random.sample(scenario_dict[scenario_type], num_scenarios_per_type) else: # Sample the top k number of scenarios per type scenario_dict[scenario_type] = scenario_dict[scenario_type][:num_scenarios_per_type] return scenario_dict def filter_total_num_scenarios( scenario_dict: ScenarioDict, limit_total_scenarios: Union[int, float], randomize: bool ) -> ScenarioDict: """ Filter the total number of scenarios in a scenario dictionary. :param scenario_dict: Dictionary that holds a list of scenarios for each scenario type. :param limit_total_scenarios: Number of total scenarios to keep. :param randomize: Whether to randomly sample the scenarios. :return: Filtered scenario dictionary. """ scenario_list = scenario_dict_to_list(scenario_dict) if isinstance(limit_total_scenarios, int): # Exact number of scenarios to keep max_scenarios = limit_total_scenarios scenario_list = ( random.sample(scenario_list, max_scenarios) if randomize and max_scenarios < len(scenario_list) else scenario_list[:max_scenarios] ) elif isinstance(limit_total_scenarios, float): # Percentage of scenarios to keep sample_ratio = limit_total_scenarios assert 0.0 < sample_ratio < 1.0, f'Sample ratio has to be between 0 and 1, got {sample_ratio}' step = int(1.0 / sample_ratio) if step < len(scenario_list): scenario_list = scenario_list[::step] else: raise TypeError('Scenario filter "limit_total_scenarios" must be of type int or float') return scenario_list_to_dict(scenario_list) def filter_invalid_goals(scenario_dict: ScenarioDict, worker: WorkerPool) -> ScenarioDict: """ Filter the scenarios with invalid mission goals in a scenario dictionary. :param scenario_dict: Dictionary that holds a list of scenarios for each scenario type. :param worker: Worker pool for concurrent scenario processing. :return: Filtered scenario dictionary. """ def _filter_goals(scenarios: List[NuPlanScenario]) -> List[NuPlanScenario]: """ Filter scenarios that contain invalid mission goals. :param scenarios: List of scenarios to filter. :return: List of filtered scenarios. """ return [scenario for scenario in scenarios if scenario.get_mission_goal()] for scenario_type in scenario_dict: scenario_dict[scenario_type] = worker_map(worker, _filter_goals, scenario_dict[scenario_type]) return scenario_dict def scenario_dict_to_list(scenario_dict: ScenarioDict, shuffle: Optional[bool] = None) -> List[NuPlanScenario]: """ Unravel a scenario dictionary to a list of scenarios. :param scenario_dict: Dictionary that holds a list of scenarios for each scenario type. :param shuffle: Whether to shuffle the scenario list. :return: List of scenarios. """ scenarios = [scenario for scenario_list in scenario_dict.values() for scenario in scenario_list] scenarios = sorted(scenarios, key=lambda scenario: scenario.token) # type: ignore if shuffle: random.shuffle(scenarios) return scenarios def scenario_list_to_dict(scenario_list: List[NuPlanScenario]) -> ScenarioDict: """ Convert a list of scenarios to a dictionary. :param scenario_list: List of input scenarios. :return: Dictionary that holds a list of scenarios for each scenario type. """ scenario_dict: ScenarioDict = defaultdict(list) for scenario in scenario_list: scenario_dict[scenario.scenario_type].append(scenario) return scenario_dict
nilq/baby-python
python