hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
77cdd4cfa1ba951c628b06ba3c433937e7e39f69
2,017
py
Python
web/posts/tests.py
arturgafizov/webtron_social_network
38dc166ea8d099ca3a0967f378d751f758eae649
[ "MIT" ]
null
null
null
web/posts/tests.py
arturgafizov/webtron_social_network
38dc166ea8d099ca3a0967f378d751f758eae649
[ "MIT" ]
null
null
null
web/posts/tests.py
arturgafizov/webtron_social_network
38dc166ea8d099ca3a0967f378d751f758eae649
[ "MIT" ]
null
null
null
from django.contrib.auth import get_user_model from rest_framework.reverse import reverse_lazy from rest_framework.test import APITestCase from rest_framework import status from django.contrib.auth.hashers import make_password User = get_user_model() class PostApiTestCase(APITestCase): @classmethod def setUpTestData(cls): print('setUpTestData') data = { 'email': 'test22@test.com', 'password': make_password('tester26'), } cls.user = User.objects.create(**data, is_active=True) cls.user.emailaddress_set.create(email=cls.user.email, primary=True, verified=True) def setUp(self): url = reverse_lazy('auth_app:api_login') data = { 'email': self.user.email, 'password': 'tester26', } response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_create_post(self): url = reverse_lazy('posts:post-list') data = { 'title': 'Sport', 'content': 'Test content', } response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) print(response.data) response = self.client.get(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) print(response.data) def test_create_post_forbidden(self): url = reverse_lazy('auth_app:logout') response = self.client.post(url) self.assertEqual(response.status_code, status.HTTP_200_OK) url = reverse_lazy('posts:post-list') data = { 'title': 'Sport', 'content': 'Test content', } response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) print("Create article forbidden", response.data)
35.385965
91
0.646505
236
2,017
5.360169
0.283898
0.056917
0.071146
0.114625
0.52253
0.475889
0.4
0.4
0.4
0.362055
0
0.013645
0.236986
2,017
56
92
36.017857
0.808317
0
0
0.354167
0
0
0.114527
0
0
0
0
0
0.104167
1
0.083333
false
0.0625
0.104167
0
0.208333
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
77ce28391d9e87410ba9c6fe9f7e9101b1ee66d1
16,521
py
Python
04-data-lake/etl.py
Ceridan/data-engineering-projects
c608ea76e6db0069f1b8dc24b16c367cf243f657
[ "MIT" ]
null
null
null
04-data-lake/etl.py
Ceridan/data-engineering-projects
c608ea76e6db0069f1b8dc24b16c367cf243f657
[ "MIT" ]
null
null
null
04-data-lake/etl.py
Ceridan/data-engineering-projects
c608ea76e6db0069f1b8dc24b16c367cf243f657
[ "MIT" ]
null
null
null
import configparser import os from pyspark.sql import SparkSession, Window from pyspark.sql.functions import col, asc, desc from pyspark.sql.functions import date_format, row_number, monotonically_increasing_id from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType, DoubleType, TimestampType config = configparser.ConfigParser() config.read('dl.cfg') os.environ['AWS_ACCESS_KEY_ID'] = config.get('S3', 'AWS_ACCESS_KEY_ID') os.environ['AWS_SECRET_ACCESS_KEY'] = config.get('S3', 'AWS_SECRET_ACCESS_KEY') def create_spark_session(): """Create session on the AWS EMR Spark cluster. Required to processing data using Spark""" spark = SparkSession \ .builder \ .appName('Sparkify Data Lake') \ .config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \ .getOrCreate() return spark def process_song_data(spark, input_data, output_data): """Process raw songs dataset using Spark and create Songs and Artists dimensional tables stored in S3""" print('Start processing song data...') # Read song data file song_data_path = input_data + 'song_data/*/*/*/*' df = spark.read.json(song_data_path) # Process Data Frame with raw songs data and create Songs dimensional table stored in S3 process_songs(spark, df, output_data) # Process Data Frame with raw songs data and create Artists dimensional table stored in S3 process_artists(spark, df, output_data) print('Finish processing song data.') def process_log_data(spark, input_data, output_data): """ 1. Process raw logs dataset using Spark and create Users and Time dimensional tables stored in S3. 2. Process both raw logs and songs dataset and create Songplays fact table stored in S3. """ print('Start processing log data...') # Read log data file log_data_path = input_data + 'log_data/*' log_df = spark.read.json(log_data_path) # Process Data Frame with raw logs data and create Users dimensional table stored in S3 process_users(spark, log_df, output_data) # Process Data Frame with raw logs data and create Time dimensional table stored in S3 process_time(spark, log_df, output_data) # Read song data file song_data_path = input_data + 'song_data/*/*/*/*' song_df = spark.read.json(song_data_path) # Process both Data Frames with raw logs and songs data and create Songplays fact table stored in S3 process_songplays(spark, song_df, log_df, output_data) print('Finish processing log data.') def process_songs(spark, df, output_data): """Process Data Frame with raw songs data using Spark and create Songs dimensional table stored in S3""" print('Processing songs...') # Define schema for the Songs table. Schema also could be inferred implicitly # but defining it manually protects us from wrong type conversions songs_schema = StructType([ StructField('song_id', StringType(), nullable=False), StructField('title', StringType(), nullable=False), StructField('artist_id', StringType(), nullable=True), StructField('year', LongType(), nullable=True), StructField('duration', DoubleType(), nullable=True) ]) # Cleanup data. Remove rows with empty song_id or title and select required fields for Songs table. # We also use dropDuplicates by song_id here to avoid the same song row appears twice in the table. songs_rdd = df \ .filter(col('song_id').isNotNull()) \ .filter(col('title').isNotNull()) \ .dropDuplicates(['song_id']) \ .select('song_id', 'title', 'artist_id', 'year', 'duration') \ .rdd # Create Songs table using clean data and schema. songs_table = spark.createDataFrame(songs_rdd, songs_schema) print('Writing songs_table data frame to parquet to S3') # Write Songs table to parquet files partitioned by year and artist to S3 songs_table_path = output_data + 'tables/songs/songs.parquet' songs_table \ .write \ .partitionBy('year', 'artist_id') \ .mode('overwrite') \ .parquet(songs_table_path) print('Songs table has been created.') def process_artists(spark, df, output_data): """Process Data Frame with raw songs data using Spark and create Artists dimensional table stored in S3""" print('Processing artists...') # Define schema for the Artists table. Schema also could be inferred implicitly # but defining it manually protects us from wrong type conversions artists_schema = StructType([ StructField('artist_id', StringType(), nullable=False), StructField('name', StringType(), nullable=False), StructField('location', StringType(), nullable=True), StructField('latitude', DoubleType(), nullable=True), StructField('longitude', DoubleType(), nullable=True) ]) # Cleanup data. Remove rows with empty artist_id or artist_name and select required fields for Artists table. # We also use dropDuplicates by artist_id here to avoid the same artist row appears twice in the table. artists_rdd = df \ .filter(col('artist_id').isNotNull()) \ .filter(col('artist_name').isNotNull()) \ .dropDuplicates(['artist_id']) \ .select('artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude') \ .rdd # Create Artists table using clean data and schema. artists_table = spark.createDataFrame(artists_rdd, artists_schema) print('Writing artists_table data frame to parquet to S3') # Write Artists table to parquet files to S3 artists_table_path = output_data + 'tables/artists/artists.parquet' artists_table \ .write \ .mode('overwrite') \ .parquet(artists_table_path) print('Artists table has been created.') def process_users(spark, df, output_data): """ Process Data Frame with raw logs data using Spark and create Users dimensional table stored in S3. To process Users data properly we need to make two decisions: 1. Log file have different actions, ex. NextSong, Home, Login etc. Should we filter logs data by action or not? Because we want to store information about all of our users thus we do not want to filter data by action and we will write all users to the Users table even if them never perform NextSong action. 2. The same user can occurs multiple times in the log file. There are two approaches to deal with it: - We can create historical Users dimension table where each row will have extra fields EffectiveDateFrom and EffectiveDateTo. It allows us to analyze all changes that was made by the user, ex. he/she may change name, switch from free to paid subscription and vice versa. - Also we may store only the latest state of our users. It means that we will write to the Users dimension table only latest occurrence in the log file for each user (ordered by timestamp). For the current processing task we will use the second approach: write only the latest state of our users. """ print('Processing users...') # Define schema for the Users table. Schema also could be inferred implicitly # but defining it manually protects us from wrong type conversions users_schema = StructType([ StructField('user_id', LongType(), nullable=False), StructField('first_name', StringType(), nullable=True), StructField('last_name', StringType(), nullable=True), StructField('gender', StringType(), nullable=True), StructField('level', StringType(), nullable=True) ]) # Use Window function to enumerate all occurrences of the single user in the log file. # When it is done, we just can select each row with the value 1 for the number of occurrences (also see next # code statement). users_window = Window \ .partitionBy('userId') \ .orderBy(col('ts').desc()) \ .rowsBetween(Window.unboundedPreceding, Window.currentRow) # Cleanup data. Remove rows with empty userId, apply Window function to find latest occurrences for each user # and select required fields for Users table. # We also use dropDuplicates by userId here to avoid the same artist row appears twice in the table. # We also can avoid using dropDuplicates method here because final data already will be unique because of our logic # with Window function and getting only the latest row for each userId. But we add dropDuplicates here to make # our solution more robust. users_rdd = df \ .filter(col('userId').isNotNull()) \ .dropDuplicates('userId') \ .withColumn('num', row_number().over(users_window)) \ .withColumn('user_id', col('userId').cast(LongType())) \ .filter(col('num') == 1) \ .select('user_id', 'firstName', 'lastName', 'gender', 'level') \ .rdd # Create Users table using clean data and schema. users_table = spark.createDataFrame(users_rdd, users_schema) print('Writing users_table data frame to parquet to S3') # Write Users table to parquet files to S3 users_table_path = output_data + 'tables/users/users.parquet' users_table \ .write \ .mode('overwrite') \ .parquet(users_table_path) print('Users table has been created.') def process_time(spark, df, output_data): """ Process Data Frame with raw logs data using Spark and create Time dimensional table stored in S3. To properly create Time table we need to convert timestamp field in the logs. There are two approaches how to deal with it: - Use Spark udf() function and write processing code as normal Python code. - Use power of Spark and its predefined functions to work with timestamp. For the current processing task we will use the second approach: rely on Spark predefined functions. """ print('Processing time...') # Define schema for the Time table. Schema also could be inferred implicitly # but defining it manually protects us from wrong type conversions time_schema = StructType([ StructField('start_time', TimestampType(), nullable=False), StructField('hour', IntegerType(), nullable=False), StructField('day', IntegerType(), nullable=False), StructField('week', IntegerType(), nullable=False), StructField('month', IntegerType(), nullable=False), StructField('year', IntegerType(), nullable=False), StructField('weekday', IntegerType(), nullable=False) ]) # Take unique timestamps from the log data and apply various functions to extract different parts of datetime # on the select stage to get all required fields for the Time table. # We also use dropDuplicates by timestamp here to avoid the same timestamp row appears twice in the table. time_rdd = df \ .select('ts') \ .withColumn('timestamp', (col('ts') / 1000).cast(TimestampType())) \ .dropDuplicates(['timestamp']) \ .select( col('timestamp').alias('start_time'), hour('timestamp').alias('hour'), dayofmonth('timestamp').alias('day'), weekofyear('timestamp').alias('week'), month('timestamp').alias('month'), year('timestamp').alias('year'), date_format(col('timestamp'), 'F').cast(IntegerType()).alias('weekday') ) \ .rdd # Create Time table using clean data and schema. time_table = spark.createDataFrame(time_rdd, time_schema) print('Writing time_table data frame to parquet to S3') # Write Time table to parquet files partitioned by year and month to S3 time_table_path = output_data + 'tables/time/time.parquet' time_table \ .write \ .partitionBy('year', 'month') \ .mode('overwrite') \ .parquet(time_table_path) print('Time table has been created.') def process_songplays(spark, song_df, log_df, output_data): """ Process Data Frame with raw logs and songs data using Spark and create Songplays fact table stored in S3. To create Songplays table we need raw data from both logs and songs files. Here we will join both tables and the tricky part is to choose proper key for the joining. Joining also helps us to cleanup data, because we do not want to include rows to the Songplays table where logs data do not match songs data, ex. some song name appears in the log but it doesn't exist in the song data. Thus for current processing task we will choose joining by several conditions: - Songs data `title` should match logs data `song`. - Songs data `artist_name` should match logs data `artist`. - Songs data `duration` should match logs data `length`. """ print('Processing songplays...') # Define schema for the Songplays table. Schema also could be inferred implicitly # but defining it manually protects us from wrong type conversions. # Songplays schema contains two additional columns: "year" and "month" for partitioning. songplays_schema = StructType([ StructField('songplay_id', LongType(), nullable=False), StructField('start_time', TimestampType(), nullable=False), StructField('user_id', LongType(), nullable=False), StructField('level', StringType(), nullable=True), StructField('song_id', StringType(), nullable=False), StructField('artist_id', StringType(), nullable=False), StructField('session_id', LongType(), nullable=True), StructField('location', StringType(), nullable=True), StructField('user_agent', StringType(), nullable=True), StructField('year', IntegerType(), nullable=False), StructField('month', IntegerType(), nullable=False) ]) # Cleanup data. Remove rows with empty song_id or artist_id from Songs data. clean_song_df = song_df \ .filter(col('song_id').isNotNull()) \ .filter(col('artist_id').isNotNull()) # Cleanup data. Choose only NextSong actions from Log data. clean_log_df = log_df \ .filter(col('page') == 'NextSong') # Join songs and logs data frames, enrich with missing columns and select required columns # to create Songplays table. # Also we use Spark function `monotonically_increasing_id` to create unique identifiers for Songplays table rows. songplays_rdd = clean_song_df \ .join(clean_log_df, (clean_song_df.title == clean_log_df.song) & (clean_song_df.artist_name == clean_log_df.artist) & (clean_song_df.duration == clean_log_df.length) , 'inner') \ .withColumn('id', monotonically_increasing_id() + 1) \ .withColumn('start_time', (col('ts') / 1000).cast(TimestampType())) \ .withColumn('user_id', col('userId').cast(LongType())) \ .withColumn('year', year('start_time')) \ .withColumn('month', month('start_time')) \ .select('id', 'start_time', 'user_id', 'level', 'song_id', 'artist_id', 'sessionId', 'location', 'userAgent', 'year', 'month') \ .repartition('year', 'month') \ .rdd # Create Songplays table using clean data and schema. songplays_table = spark.createDataFrame(songplays_rdd, songplays_schema) print('Writing songplays_table data frame to parquet to S3') # Write Songplays table to parquet files partitioned by year and month to S3 songplays_table_path = output_data + 'tables/songplays/songplays.parquet' songplays_table \ .write \ .partitionBy('year', 'month') \ .mode('overwrite') \ .parquet(songplays_table_path) print('Songplays table has been created.') def main(): """Create Spark session and call functions to process raw logs and songs datasets""" # Create Spark session for application "Sparkify Data Lake" spark = create_spark_session() input_data = "s3a://udacity-dend/" output_data = "s3n://ceri-sparkify" process_song_data(spark, input_data, output_data) process_log_data(spark, input_data, output_data) # Stops Spark session for the job spark.stop() # Entrypoint for the Python program if __name__ == "__main__": main()
44.056
120
0.685794
2,149
16,521
5.164262
0.161005
0.018021
0.036763
0.014868
0.460443
0.380699
0.301496
0.255902
0.156064
0.119121
0
0.00349
0.219539
16,521
374
121
44.173797
0.85722
0.409721
0
0.20202
0
0
0.192913
0.022984
0
0
0
0
0
1
0.045455
false
0
0.035354
0
0.085859
0.09596
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77ced5789b62f0ec5d0ecee1f28883d50d318adb
882
py
Python
SystemProgramming/models/parsers.py
Savital/BMSTU
30b6b1f3e79e1286a8cf5b3da7a4199a215aebe1
[ "MIT" ]
null
null
null
SystemProgramming/models/parsers.py
Savital/BMSTU
30b6b1f3e79e1286a8cf5b3da7a4199a215aebe1
[ "MIT" ]
null
null
null
SystemProgramming/models/parsers.py
Savital/BMSTU
30b6b1f3e79e1286a8cf5b3da7a4199a215aebe1
[ "MIT" ]
null
null
null
# Savital https://github.com/Savital # Reads data from proc import os class ProcReader(): def __init__(self): super(ProcReader, self).__init__() self.construct() def __del__(self): pass def construct(self): pass def get(self, path): if not os.path.exists(path): return False results = [] f = open(path, 'r') for line in f: list = [] i = 0 elm = "" while i < len(line): if line[i] == ' ': list.append(elm.strip()) elm = "" if line[i] == '\n': list.append(elm.strip()) break elm += line[i] i += 1 results.append(list) f.close() print(results) return results
22.05
44
0.422902
90
882
4.011111
0.511111
0.041551
0.060942
0.099723
0
0
0
0
0
0
0
0.004211
0.461451
882
39
45
22.615385
0.755789
0.062358
0
0.193548
0
0
0.004854
0
0
0
0
0
0
1
0.129032
false
0.064516
0.032258
0
0.258065
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
77d0551b120b7e247faec73e3785aed4c6572d10
1,575
py
Python
blog/tests.py
kkampardi/DjangoTesting
1092c41d9d4930f0512fac79b4d95836c70e5f3a
[ "MIT" ]
null
null
null
blog/tests.py
kkampardi/DjangoTesting
1092c41d9d4930f0512fac79b4d95836c70e5f3a
[ "MIT" ]
3
2017-05-05T09:47:40.000Z
2017-05-09T07:28:36.000Z
blog/tests.py
kkampardi/DjangoTesting
1092c41d9d4930f0512fac79b4d95836c70e5f3a
[ "MIT" ]
null
null
null
from django.test import TestCase from django.contrib.auth import get_user_model from .models import Entry class EntryModelTest(TestCase): """Ensure that the a blog's entry string representation is qual to its title""" def test_string_representation(self): entry = Entry(title="This is a test title") self.assertEqual(str(entry), entry.title) def test_verbose_name_plural(self): self.assertEqual(str(Entry._meta.verbose_name_plural), "entries") class HomePageTest(TestCase): """Test whether our blog entries show up on the homepage""" def setUp(self): self.user = get_user_model().objects.create(username="some_user") def test_homepage(self): response = self.client.get('/blog/list') self.assertEqual(response.status_code, 200) def test_one_entry(self): Entry.objects.create(title='1-title', body='1-body', author=self.user) response = self.client.get('/blog/list') self.assertContains(response, '1-title') self.assertContains(response, '1-body') def test_no_entries(self): response = self.client.get('/blog') self.assertContains(response, 'No blog entries yet') def test_two_entries(self): Entry.objects.create(title='1-title', body='1-body', author=self.user) Entry.objects.create(title='2-title', body='2-body', author=self.user) response = self.client.get('/') self.assertContains(response, '1-title') self.assertContains(response, '1-body') self.assertContains(response, '2-title')
37.5
83
0.68254
208
1,575
5.067308
0.307692
0.039848
0.148008
0.079696
0.352941
0.352941
0.321632
0.282732
0.225806
0.225806
0
0.010929
0.186667
1,575
42
84
37.5
0.811866
0.080635
0
0.266667
0
0
0.106398
0
0
0
0
0
0.3
1
0.233333
false
0
0.1
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
77d18b76003c4bfc898530ed0582298875b31542
29
py
Python
trader/trading/executioner.py
9600dev/mmr
b08e63b7044f2b2061d8679b216822c82d309c86
[ "Apache-2.0" ]
12
2021-09-22T21:19:23.000Z
2022-01-03T21:38:47.000Z
trader/trading/executioner.py
webclinic017/mmr
9c385c027fcf8cf365e41726c7c5d2064f33c202
[ "Apache-2.0" ]
null
null
null
trader/trading/executioner.py
webclinic017/mmr
9c385c027fcf8cf365e41726c7c5d2064f33c202
[ "Apache-2.0" ]
3
2021-09-05T23:26:13.000Z
2022-03-25T01:01:22.000Z
class Executioner(): pass
14.5
20
0.689655
3
29
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.206897
29
2
21
14.5
0.869565
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
77d27a48425b7756b0ea323bb46686f3b1ceed5d
1,196
py
Python
DataStructure and algorithms/Overallfibnocci.py
Rajatkhatri7/Project-Milap
0bb5dfc05064a8727760755fa55e53fd7bb8d8d3
[ "Apache-2.0" ]
null
null
null
DataStructure and algorithms/Overallfibnocci.py
Rajatkhatri7/Project-Milap
0bb5dfc05064a8727760755fa55e53fd7bb8d8d3
[ "Apache-2.0" ]
null
null
null
DataStructure and algorithms/Overallfibnocci.py
Rajatkhatri7/Project-Milap
0bb5dfc05064a8727760755fa55e53fd7bb8d8d3
[ "Apache-2.0" ]
null
null
null
# grows via 2^n/2 # fn= { 0 ; n=0 # 1 ; n=1 # f(n-1)+f(n-2) ; n>1} def fib(n): if n<=1: return n else: return fib(n-1)+fib(n-2) # for f(n)>n it will take very long time to compute # bad algo """ F(n) / \ f(n-1) f(n-2) / \ / \ f(n-2) f(n-3) f(n-3) f(n-4) / \ / \ / \ / \ / \ / \ / \ f(n-5) f(n-6) / \ / \ f(n-4) f(n-5) f(n-3) f(n-4) f(n-4) f(n-5) Above we can see that we are computing same thing again(look on (f-3)) which is not required """ def Fastfib(n): f = [] f.append(int(0)) f.append(int(1)) print(f) f[1]=1 for i in range(2,n+1): f.append((f[i-1]+f[i-2])) print(f) return f[n] """it takes less time it is more powerful""" if __name__ == "__main__": n=int(input("enter the no: ")) res=fib(n) res2=Fastfib(n) print(f"sum of first {n} fibnocci no is with recursion : ",res) print(f"sum of first {n} fibnocci no is with fastalgo : ",res2)
23.92
93
0.405518
197
1,196
2.42132
0.35533
0.079665
0.025157
0.033543
0.236897
0.224319
0.190776
0.190776
0.138365
0.138365
0
0.053779
0.424749
1,196
50
94
23.92
0.639535
0.129599
0
0.095238
0
0
0.228407
0
0
0
0
0
0
1
0.095238
false
0
0
0
0.238095
0.190476
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77d4c896f4135bf9076e4a4fc25159dd59f2bc8a
5,765
py
Python
unit-2/tower.py
GalvinGao/2019-ProgrammingCourse
b668bc9bab902959a574aa3db73ae481131c0c27
[ "MIT" ]
null
null
null
unit-2/tower.py
GalvinGao/2019-ProgrammingCourse
b668bc9bab902959a574aa3db73ae481131c0c27
[ "MIT" ]
null
null
null
unit-2/tower.py
GalvinGao/2019-ProgrammingCourse
b668bc9bab902959a574aa3db73ae481131c0c27
[ "MIT" ]
null
null
null
import time import turtle as t t.mode('standard') t.speed(8) DISTANCE = 8 RESIZE_RATIO = 6 t.pensize(RESIZE_RATIO) class Restorer: def __init__(self): self.last_pos = t.pos() def restore(self): t.goto(self.last_pos[0], self.last_pos[1]) class Draw: @staticmethod def goto(x, y, heading=0): t.penup() t.goto(x * RESIZE_RATIO, y * RESIZE_RATIO) t.setheading(heading) t.pendown() @staticmethod def line(first_line=(), second_line=()): restorer = Restorer() assert len(first_line) == 2 and type( first_line) == tuple, "'first_line' must be a Tuple object with 2 positional parameters." assert len(second_line) == 2 and type( second_line) == tuple, "'second_line' must be a Tuple object with 2 positional parameters." t.penup() t.goto(first_line[0] * RESIZE_RATIO, first_line[1] * RESIZE_RATIO) t.pendown() t.goto(second_line[0] * RESIZE_RATIO, second_line[1] * RESIZE_RATIO) t.penup() restorer.restore() def rectangle_absolute(self, top_left_corner: tuple, bottom_right_corner: tuple, fill_color: str = "black"): t.fillcolor(fill_color) self.goto(top_left_corner[0], top_left_corner[1]) t.begin_fill() for _ in range(2): t.forward((bottom_right_corner[0] - top_left_corner[0]) * RESIZE_RATIO) t.left(90) t.forward((bottom_right_corner[1] - top_left_corner[1]) * RESIZE_RATIO) t.left(90) t.end_fill() @staticmethod def circle(distance: float = DISTANCE): t.circle(RESIZE_RATIO * distance) def function(self, _function, trace_size: float = 0.1, x_range: tuple = (), y_range: tuple = ()): restorer = Restorer() for index in range(trace_size, trace_size): y = _function(index) restorer.restore() def square(self, stroke="black"): t.color(stroke) for _ in range(4): t.forward(DISTANCE * RESIZE_RATIO) self.turn_left() def rectangle_relative(self, x_side, y_side): for _ in range(2): t.forward(x_side * RESIZE_RATIO) self.turn_left() t.forward(y_side * RESIZE_RATIO) self.turn_left() def triangle(self): self.polygon(sides=3, fill="white", stroke="black") def circle(self, distance=DISTANCE): t.circle(RESIZE_RATIO * distance) def polygon(self, sides=5, fill="red", stroke="black"): assert sides >= 3, "Side amount of a polygon should be greater or equals to 3." t.color(stroke, fill) turnAngle = 360 / sides t.begin_fill() for i in range(sides): t.forward(RESIZE_RATIO * DISTANCE / sides * 5) t.left(turnAngle) t.end_fill() def car(self): self.goto(-4, -3) self.rectangle_relative(8, 3) t.fillcolor("black") self.goto(-2, -4) t.begin_fill() self.circle(1) t.end_fill() self.goto(2, -4) t.begin_fill() self.circle(1) t.end_fill() self.goto(-4, -2.5) t.begin_fill() self.circle(0.5) t.end_fill() def house(self): self.rectangle_relative(8, 6) self.goto(8, 6, heading=-150) triangle_sides = 4.6 t.forward(triangle_sides * RESIZE_RATIO) t.left(60) t.forward(triangle_sides * RESIZE_RATIO) def tower(self): self.line((0, 0), (24, 0)) self.line((5, 10), (0, 0)) self.line((19, 10), (24, 0)) self.rectangle_absolute((5, 10), (19, 12), "black") self.line((7, 12), (10, 22)) self.line((17, 12), (14, 22)) self.rectangle_absolute((8, 22), (16, 23), "black") self.line((10, 23), (11, 35)) self.line((13, 35), (14, 23)) self.rectangle_absolute((10, 35), (14, 36), "black") self.line((11, 36), (11, 48)) self.line((13, 36), (13, 48)) self.line((19, 10), (0, 0)) self.line((5, 10), (24, 0)) self.line((14, 21), (7, 13)) self.line((17, 13), (10, 21)) self.line((13, 34), (10, 24)) self.line((11, 34), (14, 24)) self.line((13, 47), (11, 37)) self.line((11, 47), (13, 37)) self.goto(12, 48) t.fillcolor("black") t.begin_fill() self.circle(2) t.end_fill() def turn_left(self): t.left(90) def turn_right(self): t.right(90) def turn_around(self): t.left(180) def start_section(self, text): t.penup() self.turn_right() t.forward(DISTANCE * RESIZE_RATIO) t.pendown() t.write(text) t.penup() self.goto(0, 0, 0) t.pendown() t.pensize(2) def end_section(self): time.sleep(.5) t.reset() draw = Draw() # 1. Square draw.start_section("1. Square") draw.square() draw.end_section() # 2. Rectangle draw.start_section("2. Rectangle") draw.rectangle_relative(8, 4) draw.end_section() # 3. Triangle draw.start_section("3. Triangle") draw.triangle() draw.end_section() # 4. Circle draw.start_section("4. Circle") draw.circle() draw.end_section() # 5. Blue Square draw.start_section("5. Blue Square") draw.square("blue") draw.end_section() # 6. Hexagon with red background and yellow border draw.start_section("6. Hexagon with red background and yellow border") draw.polygon(sides=6, fill="red", stroke="yellow") draw.end_section() # 7. Car draw.start_section("7. Car") draw.car() draw.end_section() # 8. House draw.start_section("8. House") draw.house() draw.end_section() # 10. Tower draw.start_section("10. Tower") draw.tower() draw.end_section()
27.716346
112
0.580399
815
5,765
3.958282
0.164417
0.064786
0.044637
0.017359
0.269374
0.179479
0.110353
0.087415
0.087415
0.087415
0
0.058879
0.269384
5,765
207
113
27.850242
0.707028
0.023244
0
0.323529
0
0
0.069205
0
0
0
0
0
0.017647
1
0.117647
false
0
0.011765
0
0.141176
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77d4f48663c89f32c4be3c6b61e022e849edb12a
1,304
py
Python
build_user_list.py
BennettDixon/sql_to_contact_list
23497119628622b4bf9e660ec2c4b12efba8b081
[ "MIT" ]
null
null
null
build_user_list.py
BennettDixon/sql_to_contact_list
23497119628622b4bf9e660ec2c4b12efba8b081
[ "MIT" ]
null
null
null
build_user_list.py
BennettDixon/sql_to_contact_list
23497119628622b4bf9e660ec2c4b12efba8b081
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """script for testing connection to database""" import pyodbc import sys import os from models.user import User import models driver = os.environ.get('CONTACT_SQL_DRIVER') server = os.environ.get('CONTACT_SQL_SERVER') database = os.environ.get('CONTACT_SQL_DB') username = os.environ.get('CONTACT_SQL_USER') password = os.environ.get('CONTACT_SQL_PASS') try: statement = "SELECT * FROM {}".format(sys.argv[1]) except: print("please provide a table as an argument") print("usage: ./build_list.py user_table_name") exit(1) needed = [driver, server, database, username, password, statement] for req in needed: if req is None: print('Failed to get variable from env settings') exit(1) # build the connection string after verifying attributes were provided conn_str = 'Driver={};Server={};Database={};Uid={};Pwd={};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;'.format( driver, server, database, username, password) cnxn = pyodbc.connect(conn_str) cursor = cnxn.cursor() cursor.execute(statement) row = cursor.fetchall() print('got rows') for r in row: u = User(first_name=r[1], last_name=r[2], email=r[3], phone=r[4]) u.save() print(u) models.storage.save()
28.977778
127
0.68635
183
1,304
4.797814
0.508197
0.051253
0.068337
0.1082
0.207289
0
0
0
0
0
0
0.009346
0.179448
1,304
44
128
29.636364
0.811215
0.101227
0
0.054054
0
0.027027
0.280687
0.080687
0
0
0
0
0
1
0
false
0.081081
0.135135
0
0.135135
0.135135
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
77d58ca19b534449cc89976ecbaf5bc5474e985e
14,960
py
Python
Project_03_Books/src/game.py
moniqklimek/training
51504ab839ed4b5ccc5731662a5077d5db334b93
[ "MIT" ]
null
null
null
Project_03_Books/src/game.py
moniqklimek/training
51504ab839ed4b5ccc5731662a5077d5db334b93
[ "MIT" ]
null
null
null
Project_03_Books/src/game.py
moniqklimek/training
51504ab839ed4b5ccc5731662a5077d5db334b93
[ "MIT" ]
null
null
null
import json import pprint """ TITLE: imagine buy in bookshoop - interaktive fun with User :) ISSUE : help you choose the right item, get to know the User's preferences, i.e. - the thematic category that interests him, the results improved for him, a detailed description of the selected item assumptions: no method has been developed to protect the program against entering incorrect answers by the User established: - that the categories will be written as displayed on the console with uppercase letters (no spaces, etc.) - that the user will copy the entire title of the book as it is displayed on the console logic 100. Ask the user what category of prince interests him(show him the sorted results) 101. Enter the selected category and ask if User wants to sort them by: - increasing price, - decreasing price, - the highest number of stars, - the lowest number of stars, - availability, and present the results 102.The user has chosen a given book - show him a short description and product description logika - PL 100. spytaj Kupujacego jaka kategoria ksiazego go intresuje (pokaz mu posortowane wyniki) 101. wejdz do wybranej kategori i spytaj czy Kupujacy chce posortowac je po: - cenie rosnacej, - cenie malejacej, - najwyzszej ilosci gwiazdek, - najnizszej ilosci gwiazdek, - dostepnosci, i zaprezentuj wyniki do dalszego wyboru w postaci listy 102. user wybral dana ksiazke - pokaz mu do niej szczegolowy opis i opis produktu """ # open and read the content of files from part 01 this issue (scraping results) f1 = open('resources/01_category_first_link.json') scrap1 = json.load(f1) f1.close() f2 = open('resources/02_single_books.json') scrap2 = json.load(f2) f2.close() f3 = open('resources/03_details_single_books.json') scrap3 = json.load(f3) f3.close() class Game: def __init__(self): pass # I am using a file called --> "01_category_first_link.json" # important because each file has different keys to access the content of the dictionaries def sorted_thematica_category(self,s1): category_list = [letter['Book_Category'] for letter in s1] sorted_category_list = sorted(category_list) return sorted_category_list # I am using a file called --> "02_single_books.json" def show_all_books_ctagory(self, s2, choosen_category): list_all_books_this_cat=[] for el in s2: if el['Book_Category'] == choosen_category: list_all_books_this_cat.append(el['Book_Title']) how_many_books = len(list_all_books_this_cat) return how_many_books, list_all_books_this_cat def printing_long_questions(self): print('--------') print('Please tell me how to sort the results for YOU. Write 1 or 2 or 3 or 4 or 5.') print(' \t\t 1 - sort by price - DESC.') print(' \t\t 2 - sort by price - ASC.') print(' \t\t 3 - sort by popularity ranking - DESC.') print(' \t\t 4 - sort by popularity ranking - ASC.') print(' \t\t 5 - sort by Title alphabetically. ') def user_choose_filter_method(self, nr, list_title): if nr==1 or nr==2: list_dict_title_and_price=self.generate_tab_title_price(scrap2, list_title) if nr == 1: result_method = self.sort_method_1(list_dict_title_and_price) else: #nr 2 result_method = self.sort_method_2(list_dict_title_and_price) if nr == 3: # create dict only with key like stars and title list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title) # sorted by stars result_method = self.sort_method_3(list_dict_title_and_stars) if nr == 4: # create dict only with key like stars and title list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title) # sorted by stars result_method = self.sort_method_4(list_dict_title_and_stars) if nr == 5: result_method = self.sort_method_5(list_title) return result_method # building a new DICTIONARY - cutting the content from existing DICTIONARIES # idea from https://stackoverflow.com/questions/3420122/filter-dict-to-contain-only-certain-keys def remove_key_from_existing_dict(self, existing_dict, *key_to_delete_from_existing_dict): """ input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1} key_to_delete_from_existing_dict='Book_Stars' output--> {'Book_Price': 10.97,'Book_Title': 'The Long Shadow', , 'Book_total_category_amouth': 1} """ new_dict = dict((key, value) for key, value in existing_dict.items() if key not in key_to_delete_from_existing_dict) return new_dict def leave_only_selected_keys_in_existing_dict(self,existing_dict, *key_to_stay): """ input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1} key_to_stay='Book_Stars', 'Book_Title' output--> {'Book_Stars': 1, 'Book_Title': 'The Long Shadow'} """ new_dict = dict((key, value) for key, value in existing_dict.items() if key in key_to_stay) return new_dict # building a new list of dictionaries - cutting the content from skraping 2 (list - dictionaries) def generate_tab_title_price(self, scrap2, list_title): # scrap2= big list dics # i want filter and catch only interesting me title --list_title # and return only key --'Book_Price', 'Book_Title' list_dict_only_title_price=[] for small_dict in scrap2: for title in list_title: if small_dict['Book_Title'] in title: new_short_dict = self.leave_only_selected_keys_in_existing_dict(small_dict, 'Book_Price', 'Book_Title') list_dict_only_title_price.append(new_short_dict) return list_dict_only_title_price def generate_tab_title_stars(self, scrap2, list_title): # scrap2= big list dics # i want filter and catch only interesting me title --list_title # and return only key --'Book_Title', 'Book_Stars' list_dict_only_title_stars = [] for small_dict in scrap2: for title in list_title: if small_dict['Book_Title'] in title: new_short_dict = self.leave_only_selected_keys_in_existing_dict( small_dict, 'Book_Title', 'Book_Stars') list_dict_only_title_stars.append(new_short_dict) return list_dict_only_title_stars def sort_method_1(self,list_dict_title_and_price): #Press 1 - sort by price descending (malejaco) # return list with dict price and title # inspiration - -> https: // stackoverflow.com/questions/1143671/how-to-sort-objects-by-multiple-keys-in-python sorted_by_price_DESC= sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title'])) return sorted_by_price_DESC def sort_method_2(self, list_dict_title_and_price): # Press 2 - sorted by price in ascending order (rosnaco) # return list with dict price and title sorted_by_price_DESC = sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title'])) sorted_by_price_ASC = sorted_by_price_DESC[::-1] return sorted_by_price_ASC def sort_method_3(self, list_dict_only_title_AND_stars): sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title'])) return sorted_by_stars_DESC def sort_method_4(self, list_dict_only_title_AND_stars): # catch list dict with stars and title and return sorted by stars #Press 3 - sorted by popularity ranking - Max stars to min sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title'])) sorted_by_stars_ASC = sorted_by_stars_DESC[::-1] return sorted_by_stars_ASC def sort_method_5(self, list_title): # Press 5 - sort by title alphabetically """ ["It's Only the Himalayas", 'Full Moon over Noah’s Ark: An Odyssey to Mount Ararat and Beyond', 'See America: A Celebration of Our National Parks & Treasured Sites', 'Vagabonding: An Uncommon Guide to the Art of Long-Term World Travel', 'Under the Tuscan Sun', 'A Summer In Europe', 'The Great Railway Bazaar', 'A Year in Provence (Provence #1)', 'The Road to Little Dribbling: Adventures of an American in Britain (Notes From a Small Island #2)', 'Neither Here nor There: Travels in Europe', '1,000 Places to See Before You Die'] """ # mamy kategorie wybrana, mamy liste ksiazek - sort by price descending. sorted_title = sorted(list_title) return sorted_title # choose inf detail from scrap 3 # I am using a file called --> "03_details_single_books.json" def catch_index_if_have_title(self,title_choosen, scrap3): # output: list dicts # purpose: catch only index - for concret - value :title_choosen # which help to link another parts this dict with information like counter_index_in_list_dicts = 0 for el in scrap3: if el['title_book'] == title_choosen: break else: counter_index_in_list_dicts += 1 return counter_index_in_list_dicts def return_details(self,title_choosen, scrap3): # i need index link with this title index_list_with_dicts = self.catch_index_if_have_title(title_choosen, scrap3) tab_details=[] title_book = scrap3[index_list_with_dicts]["title_book"] tab_details.append(title_book) category = scrap3[index_list_with_dicts]["category"] tab_details.append(category) price = scrap3[index_list_with_dicts]["price"] tab_details.append(price) productDescription = scrap3[index_list_with_dicts]["productDescription"] tab_details.append(productDescription) how_many = scrap3[index_list_with_dicts]["in_stock_how_many_available"] tab_details.append(how_many) about = scrap3[index_list_with_dicts]['detals_link_to_book'] tab_details.append(about) upc = scrap3[index_list_with_dicts]["productInformation_UPC"] tab_details.append(upc) return tab_details def printing_final_result(self, tab_details): title_book = tab_details[0] category = tab_details[1] category = tab_details[1] price = tab_details[2] productDescription = tab_details[3] in_stock_how_many_available = tab_details[4] detals_link_to_book = tab_details[5] productInformation_UPC = tab_details[6] print('\n\t The book has a title: {}.Category is {}'.format(title_book, category)) print('\n\t Book Price:', price) print('\n\t Content Description:', productDescription) print('\n\t We still have {} item/s in stock'.format(in_stock_how_many_available)) print('\n\t If you want to know more about the book, please open the link:', detals_link_to_book) print('\n\t UPC number:', productInformation_UPC) # logic for conversation with User through Terminal def logic(self): answer1_user_if_play = input("Do you want to buy some interesting book? :) . Choose (n/y) \n") if answer1_user_if_play == 'y': print('--------') print("\t Lets game :) ..... \n\t Below thematical book's Category for Your choose. \n") #step one - choose category sorted_category = self.sorted_thematica_category(scrap1) print(sorted_category) print('--------') customer_choose_category_book = input( '\t Please choose one and copy Your choice here ...\n\t (EXAMPLE:... Academic)\n\t (EXAMPLE:... Add a comment)\n\t YOUR TURN - Chose one Category from list : ...') """ while customer_choose_category_book not in sorted_category_list: print('Please once again choose category. This one not exist in own base and list at top') """ if customer_choose_category_book in sorted_category: how_books, title_books_this_choosen_category = self.show_all_books_ctagory(scrap2, customer_choose_category_book) print('We have for You in shop {} book/books title for category {}'.format(how_books, customer_choose_category_book)) print(title_books_this_choosen_category) else: print('Please once again choose category. This one not exist in own base and list at top') # step two - choose how user want to sort results and what want to see self.printing_long_questions() nr_choosen_method=int(input()) print(title_books_this_choosen_category) print('--------') lista_books_filter_by_user_mean=self.user_choose_filter_method(nr_choosen_method, title_books_this_choosen_category) if len(lista_books_filter_by_user_mean)==1: print('\t It is exactly one book in this category') print('--------') # any sens to choose book , if exist only one # for example for catgeory crime - [{'Book_Stars': 1, 'Book_Title': 'The Long Shadow of Small Ghosts: Murder and Memory in an American City'}] user_choose_single_title = lista_books_filter_by_user_mean[0]['Book_Title'] tab_inf = self.return_details(user_choose_single_title, scrap3) #print(tab_inf) self.printing_final_result(tab_inf) else: print('\t Also this is list for You') print(lista_books_filter_by_user_mean) # choose single title book from User input- purpose--> show for this book all details user_choose_single_title = input('\t\n Now please, copy and paste the entire Title of the book here:...(EXAMPLE:... Feathers: Displays of Brilliant Plumage) ') # use the scrap nr 3 with details tab_inf=self.return_details(user_choose_single_title,scrap3) print(tab_inf) self.printing_final_result(tab_inf) if answer1_user_if_play in ('n','n ','n ', 'NO', 'nie', 'N'): print('Nice day any way.') if __name__ == "__main__": game = Game() game.logic()
43.74269
281
0.657754
2,070
14,960
4.481159
0.204348
0.018974
0.015416
0.018974
0.407288
0.313389
0.268327
0.212807
0.205908
0.181544
0
0.014616
0.259091
14,960
341
282
43.870968
0.822266
0.215642
0
0.183432
0
0.023669
0.161118
0.015537
0
0
0
0
0
1
0.106509
false
0.005917
0.011834
0
0.207101
0.201183
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77d754abc0dd2b42226712db869734718758f2ec
1,592
py
Python
books_and_chapters/urls.py
assulthoni/Django-Bookworm
d816c099b1eaceff05958ed3bf9e7dd611e9b5fd
[ "MIT" ]
null
null
null
books_and_chapters/urls.py
assulthoni/Django-Bookworm
d816c099b1eaceff05958ed3bf9e7dd611e9b5fd
[ "MIT" ]
null
null
null
books_and_chapters/urls.py
assulthoni/Django-Bookworm
d816c099b1eaceff05958ed3bf9e7dd611e9b5fd
[ "MIT" ]
null
null
null
"""django_bookworm.books_and_chapters URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.urls import path from . import views from django.views.generic.base import RedirectView from django.contrib.auth.decorators import login_required urlpatterns = [ path('books/', login_required(views.homepage), name='books'), # for adding a new book path('books/search/', views.search_book, name='search_book'), path('books/<slug:slug>/', login_required(views.get_book_details), name='book_detail'), path('books/<int:pk>/delete/', login_required(views.delete_book), name='delete_single_book'), path('books/<int:pk>/edit/', login_required(views.edit_book_details), name='book_details_edit'), path('chapters/add/', views.add_chapter, name='add_chapter'), path('chapters/<int:pk>/delete/', views.delete_chapter, name='delete_chapter'), path('chapters/<int:pk>/edit/', views.edit_chapter, name='edit_chapter'), path('', RedirectView.as_view(url='/accounts/login/', permanent=False)) ]
49.75
100
0.723618
230
1,592
4.878261
0.330435
0.057932
0.064171
0.02139
0.147059
0.104278
0.066845
0
0
0
0
0.005776
0.130025
1,592
31
101
51.354839
0.804332
0.422739
0
0
0
0
0.279605
0.076754
0
0
0
0
0
1
0
false
0
0.266667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77d8b870eb29ec01acb07c7f782c1ca8e13356c3
573
py
Python
manage.py
mwaiyusuf/news_highlight
dfae21f67c3bbe20521a5c3c96a0a6a759fbd8fb
[ "MIT" ]
null
null
null
manage.py
mwaiyusuf/news_highlight
dfae21f67c3bbe20521a5c3c96a0a6a759fbd8fb
[ "MIT" ]
null
null
null
manage.py
mwaiyusuf/news_highlight
dfae21f67c3bbe20521a5c3c96a0a6a759fbd8fb
[ "MIT" ]
null
null
null
from app import create_app from flask_script import Manager,Server #initialise our extensions and server class that aid in launching of our server # Creating app instance app = create_app('development') manager = Manager(app) manager.add_command('server',Server) #launch app server @manager.command def test(): """Run the ubit tests""" import unittest tests = unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': #checks if the script is run directly manager.run()
30.157895
119
0.743455
77
573
5.376623
0.545455
0.094203
0.101449
0
0
0
0
0
0
0
0
0.002079
0.160558
573
18
120
31.833333
0.858628
0.300175
0
0
0
0
0.076531
0
0
0
0
0
0
1
0.076923
false
0
0.230769
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77d998bd3b17f3da023e7ce99e510488aaf3a564
9,384
py
Python
src/robotrunner.py
bbokser/hopper-mpc-simple
51fd3c0cb515d32b2a9cce93a0db45dddf95174c
[ "MIT" ]
null
null
null
src/robotrunner.py
bbokser/hopper-mpc-simple
51fd3c0cb515d32b2a9cce93a0db45dddf95174c
[ "MIT" ]
null
null
null
src/robotrunner.py
bbokser/hopper-mpc-simple
51fd3c0cb515d32b2a9cce93a0db45dddf95174c
[ "MIT" ]
null
null
null
""" Copyright (C) 2020-2022 Benjamin Bokser """ import plots import mpc_cvx # import time # import sys import numpy as np import copy from scipy.linalg import expm import itertools np.set_printoptions(suppress=True, linewidth=np.nan) def projection(p0, v): # find point p projected onto ground plane from point p0 by vector v z = 0 t = (z - p0[2]) / v[2] x = p0[0] + t * v[0] y = p0[1] + t * v[1] p = np.array([x, y, z]) return p class Runner: def __init__(self, dims=2, ctrl='mpc', dt=1e-3): self.dims = dims self.ctrl = ctrl self.dt = dt self.total_run = 5000 self.tol = 1e-3 # desired mpc tolerance self.m = 7.5 # mass of the robot, kg self.N = 10 # mpc horizon length self.g = 9.81 # gravitational acceleration, m/s2 self.t_p = 1 # gait period, seconds self.phi_switch = 0.5 # switching phase, must be between 0 and 1. Percentage of gait spent in contact. # for now, mpc sampling time is equal to gait period self.mpc_dt = self.t_p * self.phi_switch # mpc sampling time self.N_time = self.N*self.mpc_dt # mpc horizon time if dims == 2: self.n_x = 5 # number of states self.n_u = 2 # number of controls self.A = np.array([[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, -1], [0, 0, 0, 0, 0]]) self.B = np.array([[0, 0], [0, 0], [1 / self.m, 0], [0, 1 / self.m], [0, 0]]) self.X_0 = np.zeros(self.n_x) self.X_0[1] = 0.7 self.X_0[-1] = self.g # initial conditions self.X_f = np.array([2, 0.5, 0, 0, self.g]) elif dims == 3: self.n_x = 7 # number of states self.n_u = 3 # number of controls self.A = np.array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, -1], [0, 0, 0, 0, 0, 0, 0]]) self.B = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0], [1 / self.m, 0, 0], [0, 1 / self.m, 0], [0, 0, 1 / self.m], [0, 0, 0]]) self.X_0 = np.zeros(self.n_x) self.X_0[2] = 0.7 self.X_0[-1] = self.g # initial conditions self.X_f = np.hstack([2, 2, 0.5, 0, 0, 0, self.g]).T # desired final state mu = 0.3 # coeff of friction self.mpc = mpc_cvx.Mpc(t=self.mpc_dt, A=self.A, B=self.B, N=self.N, m=self.m, g=self.g, mu=mu) self.mpc_factor = self.mpc_dt * 2 / self.dt # repeat mpc every x seconds def run(self): total = self.total_run + 1 # number of timesteps to plot t = 0 # time t0 = t # starting time mpc_factor = self.mpc_factor # repeat mpc every x seconds mpc_counter = copy.copy(mpc_factor) X_traj = np.zeros((total, self.n_x)) X_traj[0, :] = self.X_0 # initial conditions f_hist = np.zeros((total, self.n_u)) s_hist = np.zeros(total) U_pred = np.zeros((self.N, self.n_u)) X_pred = np.zeros((self.N, self.n_x)) pf_ref = np.zeros(self.n_u) j = int(self.mpc_factor) X_pred_hist = np.zeros((self.N+1, self.n_u)) f_pred_hist = np.zeros((total, self.n_u)) p_pred_hist = np.zeros((total, self.n_u)) for k in range(0, self.total_run): t = t + self.dt s = self.gait_scheduler(t, t0) if self.ctrl == 'mpc': if mpc_counter == mpc_factor: # check if it's time to restart the mpc mpc_counter = 0 # restart the mpc counter X_ref = self.path_plan(X_in=X_traj[k, :]) X_refN = X_ref[::int(self.mpc_dt / self.dt)] U_pred, X_pred = self.mpc.mpcontrol(X_in=X_traj[k, :], X_ref=X_refN) p_pred = (X_pred[2, 0:3]+(X_pred[2, 0:3]+X_pred[3, 0:3])/2)/2 # next pred body pos over next ftstep f_pred = U_pred[2, :] # next predicted foot force vector p_pred_hist = np.vstack((p_pred_hist, p_pred)) f_pred_hist = np.vstack((f_pred_hist, 0.5*f_pred/np.sqrt(np.sum(f_pred**2)))) pf_ref = np.vstack((pf_ref, projection(p_pred, f_pred))) X_pred_hist = np.dstack((X_pred_hist, X_pred[:, 0:self.n_u])) mpc_counter += 1 f_hist[k, :] = U_pred[0, :]*s # take first timestep else: # Open loop traj opt, this will fail if total != mpc_factor if int(total/self.N) != mpc_factor: print("ERROR: Incorrect settings", total/self.N, mpc_factor) if k == 0: X_ref = self.path_plan(X_in=X_traj[k, :]) X_refN = X_ref[::int(self.mpc_factor)] # self.traj_N(X_ref) force_f, X_pred = self.mpc.mpcontrol(X_in=X_traj[k, :], X_ref=X_refN) for i in range(0, self.N): f_hist[int(i*j):int(i*j+j), :] = list(itertools.repeat(force_f[i, :], j)) s_hist[k] = s X_traj[k+1, :] = self.rk4(xk=X_traj[k, :], uk=f_hist[k, :]) # X_traj[k + 1, :] = self.dynamics_dt(X=X_traj[k, :], U=f_hist[k, :], t=self.dt) # print(X_traj[-1, :]) # print(f_hist[4500, :]) plots.fplot(total, p_hist=X_traj[:, 0:self.n_u], f_hist=f_hist, s_hist=s_hist, dims=self.dims) plots.posplot(p_ref=self.X_f[0:self.n_u], p_hist=X_traj[:, 0:self.n_u], dims=self.dims) plots.posfplot(p_ref=self.X_f[0:self.n_u], p_hist=X_traj[:, 0:self.n_u], p_pred_hist=p_pred_hist, f_pred_hist=f_pred_hist, pf_hist=pf_ref, dims=self.dims) # plots.posplot(p_ref=self.X_f[0:self.n_u], p_hist=X_pred_hist[:, 0:self.n_u, 1], dims=self.dims) # plots.posplot_t(p_ref=self.X_ref[0:self.n_u], p_hist=X_traj[:, 0:2], total=total) return None def dynamics_ct(self, X, U): # CT dynamics X -> dX A = self.A B = self.B X_next = A @ X + B @ U return X_next def dynamics_dt(self, X, U, t): n_x = self.n_x # number of states n_u = self.n_u # number of controls A = self.A B = self.B AB = np.vstack((np.hstack((A, B)), np.zeros((n_u, n_x + n_u)))) M = expm(AB * t) Ad = M[0:n_x, 0:n_x] Bd = M[0:n_x, n_x:n_x + n_u] X_next = Ad @ X + Bd @ U return X_next def rk4(self, xk, uk): # RK4 integrator solves for new X dynamics = self.dynamics_ct h = self.dt f1 = dynamics(xk, uk) f2 = dynamics(xk + 0.5 * h * f1, uk) f3 = dynamics(xk + 0.5 * h * f2, uk) f4 = dynamics(xk + h * f3, uk) return xk + (h / 6.0) * (f1 + 2 * f2 + 2 * f3 + f4) def gait_scheduler(self, t, t0): phi = np.mod((t - t0) / self.t_p, 1) if phi > self.phi_switch: s = 0 # scheduled swing else: s = 1 # scheduled stance return s def path_plan(self, X_in): # Path planner--generate reference trajectory dt = self.dt size_mpc = int(self.mpc_factor*self.N) # length of MPC horizon in s TODO: Perhaps N should vary wrt time? t_ref = 0 # timesteps given to get to target, either mpc length or based on distance (whichever is smaller) X_ref = None if self.dims == 2: t_ref = int(np.minimum(size_mpc, abs(self.X_f[0] - X_in[0])*1000)) # ignore z distance due to bouncing X_ref = np.linspace(start=X_in, stop=self.X_f, num=t_ref) # interpolate positions # interpolate velocities X_ref[:-1, 2] = [(X_ref[i + 1, 0] - X_ref[i, 0]) / dt for i in range(0, np.shape(X_ref)[0] - 1)] X_ref[:-1, 3] = [(X_ref[i + 1, 1] - X_ref[i, 1]) / dt for i in range(0, np.shape(X_ref)[0] - 1)] elif self.dims == 3: t_ref = int(np.minimum(size_mpc, np.linalg.norm(self.X_f[0:2] - X_in[0:2]) * 1000)) X_ref = np.linspace(start=X_in, stop=self.X_f, num=t_ref) # interpolate positions # interpolate velocities X_ref[:-1, 3] = [(X_ref[i + 1, 0] - X_ref[i, 0]) / dt for i in range(0, np.shape(X_ref)[0] - 1)] X_ref[:-1, 4] = [(X_ref[i + 1, 1] - X_ref[i, 1]) / dt for i in range(0, np.shape(X_ref)[0] - 1)] X_ref[:-1, 5] = [(X_ref[i + 1, 2] - X_ref[i, 2]) / dt for i in range(0, np.shape(X_ref)[0] - 1)] if (size_mpc - t_ref) == 0: pass elif t_ref == 0: X_ref = np.array(list(itertools.repeat(self.X_f, int(size_mpc)))) else: X_ref = np.vstack((X_ref, list(itertools.repeat(self.X_f, int(size_mpc - t_ref))))) return X_ref
43.646512
120
0.494991
1,551
9,384
2.829142
0.154094
0.036463
0.042388
0.043756
0.38742
0.317001
0.297402
0.26618
0.249772
0.214676
0
0.053106
0.361893
9,384
214
121
43.850467
0.679693
0.17679
0
0.121387
0
0
0.004044
0
0
0
0
0.004673
0
1
0.046243
false
0.00578
0.034682
0
0.127168
0.011561
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77ddff0fe98781dd316cac4d51b90e21e33f27de
6,122
py
Python
toy_example.py
chuchienshu/Pytorch-color-work
d4a6d052bd39e815f2303bc1583e2bc9bdb6cce3
[ "MIT" ]
null
null
null
toy_example.py
chuchienshu/Pytorch-color-work
d4a6d052bd39e815f2303bc1583e2bc9bdb6cce3
[ "MIT" ]
null
null
null
toy_example.py
chuchienshu/Pytorch-color-work
d4a6d052bd39e815f2303bc1583e2bc9bdb6cce3
[ "MIT" ]
null
null
null
from sklearn.neighbors import NearestNeighbors import numpy as np import torch import torch.nn as nn from torch.autograd import Variable def check_value(inds, val): ''' Check to see if an array is a single element equaling a particular value for pre-processing inputs in a function ''' if(np.array(inds).size==1): if(inds==val): return True return False def na(): # shorthand for new axis return np.newaxis def flatten_nd_array(pts_nd,axis=1): ''' Flatten an nd array into a 2d array with a certain axis INPUTS pts_nd N0xN1x...xNd array axis integer OUTPUTS pts_flt prod(N \ N_axis) x N_axis array ''' NDIM = pts_nd.ndim SHP = np.array(pts_nd.shape) nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices NPTS = np.prod(SHP[nax]) axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0) pts_flt = pts_nd.transpose((axorder)) pts_flt = pts_flt.reshape(NPTS,SHP[axis]) return pts_flt def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False): ''' Unflatten a 2d array with a certain axis INPUTS pts_flt prod(N \ N_axis) x M array pts_nd N0xN1x...xNd array axis integer squeeze bool if true, M=1, squeeze it out OUTPUTS pts_out N0xN1x...xNd array ''' NDIM = pts_nd.ndim SHP = np.array(pts_nd.shape) nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices NPTS = np.prod(SHP[nax]) if(squeeze): axorder = nax axorder_rev = np.argsort(axorder) M = pts_flt.shape[1] NEW_SHP = SHP[nax].tolist() pts_out = pts_flt.reshape(NEW_SHP) pts_out = pts_out.transpose(axorder_rev) else: axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0) axorder_rev = np.argsort(axorder) M = pts_flt.shape[1] NEW_SHP = SHP[nax].tolist() NEW_SHP.append(M) pts_out = pts_flt.reshape(NEW_SHP) pts_out = pts_out.transpose(axorder_rev) return pts_out class NNEncode(): ''' Encode points using NearestNeighbors search and Gaussian kernel ''' def __init__(self,NN,sigma,km_filepath='',cc=-1): if(check_value(cc,-1)): self.cc = np.load(km_filepath) else: self.cc = cc self.K = self.cc.shape[0] self.NN = int(NN) self.sigma = sigma self.nbrs = NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(self.cc) self.alreadyUsed = False def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False,sameBlock=True): pts_flt = flatten_nd_array(pts_nd,axis=axis) #pts_flt ---> [N*H*W, 2] P = pts_flt.shape[0] #P ---> N*H*W if(sameBlock and self.alreadyUsed): self.pts_enc_flt[...] = 0 # already pre-allocated print('alreadyUsed') print(self.p_inds) else: print('notUsed') # print(self.p_inds) self.alreadyUsed = True self.pts_enc_flt = np.zeros((P,self.K)) #self.pts_enc_flt.shape ---> [N*H*W, 313] self.p_inds = np.arange(0,P,dtype='int')[:,na()] #self.p_inds.shape ---> [N*H*W, 1] (dists,inds) = self.nbrs.kneighbors(pts_flt) #inds.shape ---> [N*H*W, NN] wts = np.exp(-dists**2/(2*self.sigma**2)) wts = wts/np.sum(wts,axis=1)[:,na()] #wts.shape ---> [N*H*W, NN] #将输入的 feature map(ab 值)与调色板 bin 中最近的 NN(此处取 10) 个距离值赋值到 pts_enc_flt 中,然后展开成 4d 形式返回。 self.pts_enc_flt[self.p_inds,inds] = wts pts_enc_nd = unflatten_2d_array(self.pts_enc_flt,pts_nd,axis=axis) #pts_enc_nd.shape -----> [N, 313, H, W] return pts_enc_nd def decode_points_mtx_nd(self,pts_enc_nd,axis=1): pts_enc_flt = flatten_nd_array(pts_enc_nd,axis=axis) pts_dec_flt = np.dot(pts_enc_flt,self.cc) pts_dec_nd = unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis) return pts_dec_nd def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False): pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis) pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis) if(returnEncode): return (pts_dec_nd,pts_1hot_nd) else: return pts_dec_nd # self.cretion( output, torch.max(target, 1)[1] ) nnenc = NNEncode(10,5,km_filepath='/home/chuchienshu/Documents/propagation_classification/models/custom_layers/pts_in_hull.npy') bottom = np.random.randint(0,10,(2,3,3,3)).astype('float32') # print(bottom) bt = Variable(torch.from_numpy(bottom).cuda()) fac = np.array([[1,2],[3,4],[5,6]]) fac_a = fac[:,0][np.newaxis,:,np.newaxis,np.newaxis] fac_b = fac[:,1][np.newaxis,:,np.newaxis,np.newaxis] pred_ab = np.concatenate((np.sum(bottom * fac_a, axis=1, keepdims=True), np.sum(bottom * fac_b, axis=1, keepdims=True)), axis=1) # print(fac_a,fac_a.shape) # print(fac_b,fac_b.shape) # print(bottom * fac_a, ' jfdis') # print(bottom * fac_b, ' fac_b') # print(np.sum(bottom * fac_a, axis=1, keepdims=True), ' 44') # print(np.sum(bottom * fac_b, axis=1, keepdims=True), ' 66') print(pred_ab, pred_ab.shape) for i, im in enumerate(pred_ab): print(im) print(i) exit() # bt = flatten_nd_array(bt.data.numpy()) ##bt = bt.permute(0,2,3,1).contiguous().view(50, -1) #///////////////////////////////////////////////////////// bottom = np.random.randint(0,10,(8,2,5,5)).astype('float32') print(bottom) nnenc.encode_points_mtx_nd(bottom,axis=1) for _ in range(6): print('fjkfd') print(nnenc.cc ) print('############') exit() #///////////////////////////////////////////////////////// import matplotlib.pyplot as plt n = 1024 # x = np.random.normal(0, 1, n) # 平均值为0,方差为1,生成1024个数 # y = np.random.normal(0, 1, n) x = X[:,0] y = X[:,1] t = np.arctan2(x, y) # for color value,对应cmap plt.scatter(x, y, s=65, c=t, alpha=0.5) # s为size,按每个点的坐标绘制,alpha为透明度 plt.xlim(-5, 5) plt.ylim(-5, 5) plt.xticks([]) plt.yticks([]) plt.show()
32.913978
128
0.611728
960
6,122
3.725
0.229167
0.026846
0.020134
0.018177
0.338647
0.302573
0.251678
0.214765
0.202461
0.14821
0
0.026526
0.22411
6,122
186
129
32.913978
0.726316
0.267723
0
0.243478
0
0
0.034727
0.020791
0
0
0
0
0
1
0.069565
false
0
0.052174
0.008696
0.208696
0.086957
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77de70d6830250aef1c075b8acbbc068d1dc4029
348
py
Python
setup.py
SimonLarsen/python-endktheme
2c942a682eb258f1d8b5fee80b93b9565a7ebde7
[ "MIT" ]
null
null
null
setup.py
SimonLarsen/python-endktheme
2c942a682eb258f1d8b5fee80b93b9565a7ebde7
[ "MIT" ]
null
null
null
setup.py
SimonLarsen/python-endktheme
2c942a682eb258f1d8b5fee80b93b9565a7ebde7
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages import endktheme setup( name="endktheme", description="Visualization themes following Energinet's design guide.", version=endktheme.__version__, author="Simon J. Larsen", author_email="simonhffh@gmail.com", license="MIT", packages=find_packages(), install_requires=[], )
24.857143
75
0.724138
38
348
6.421053
0.763158
0.098361
0
0
0
0
0
0
0
0
0
0
0.163793
348
13
76
26.769231
0.838488
0
0
0
0
0
0.293103
0
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
77e1bd1a557254805b1a9b3ba5eb391f9c2baece
1,630
py
Python
async_downloads/views.py
QuickRelease/django-async-downloads
7a0090e94fe8b5ee78f6175ed877a9b04aa475d7
[ "MIT" ]
1
2021-08-19T23:56:23.000Z
2021-08-19T23:56:23.000Z
async_downloads/views.py
QuickRelease/django-async-downloads
7a0090e94fe8b5ee78f6175ed877a9b04aa475d7
[ "MIT" ]
11
2020-07-22T15:21:14.000Z
2022-03-22T13:02:37.000Z
async_downloads/views.py
QuickRelease/django-async-downloads
7a0090e94fe8b5ee78f6175ed877a9b04aa475d7
[ "MIT" ]
1
2022-01-05T07:45:42.000Z
2022-01-05T07:45:42.000Z
import os from django.contrib.auth.decorators import login_required from django.core.cache import cache from django.core.files.storage import default_storage from django.http import JsonResponse, HttpResponse from django.template.loader import render_to_string from async_downloads.cache import get_collection_key from async_downloads.settings import DOWNLOAD_TEMPLATE @login_required def ajax_update(request): # TODO: can we make `request.user.pk` more generic to allow other # things to be used as keys? download_keys = cache.get(get_collection_key(request.user.pk), []) downloads = [] in_progress = False for i, download_key in enumerate(download_keys): dl = cache.get(download_key) if not dl: continue if dl["complete"]: dl["url"] = default_storage.url(dl["filepath"]) else: in_progress = True downloads.append(dl) # TODO: split up complete and in progress async_downloads? return JsonResponse( { "html": render_to_string(DOWNLOAD_TEMPLATE, {"downloads": downloads}), "downloads": downloads, "in_progress": in_progress, } ) @login_required def ajax_clear_download(request): # TODO: consider just clearing the key without deleting, # so that all deletion is done by one function filepath = request.POST.get("filepath") directory = os.path.split(filepath)[0] download_key = os.path.split(directory)[1] cache.delete(download_key) default_storage.delete(filepath) default_storage.delete(directory) return HttpResponse("")
32.6
82
0.698773
206
1,630
5.368932
0.441748
0.045208
0.025316
0.036166
0
0
0
0
0
0
0
0.001563
0.214724
1,630
49
83
33.265306
0.8625
0.152761
0
0.052632
0
0
0.043636
0
0
0
0
0.020408
0
1
0.052632
false
0
0.210526
0
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
77e1cf7fad88dd83f35688487683b7260d719151
517
py
Python
dltranz/data_load/augmentations/drop_day.py
KirillVladimirov/pytorch-lifestream
83005b950d41de8afc11711fc955ffafb5ff7a9e
[ "Apache-2.0" ]
null
null
null
dltranz/data_load/augmentations/drop_day.py
KirillVladimirov/pytorch-lifestream
83005b950d41de8afc11711fc955ffafb5ff7a9e
[ "Apache-2.0" ]
null
null
null
dltranz/data_load/augmentations/drop_day.py
KirillVladimirov/pytorch-lifestream
83005b950d41de8afc11711fc955ffafb5ff7a9e
[ "Apache-2.0" ]
1
2022-02-05T15:06:48.000Z
2022-02-05T15:06:48.000Z
import numpy as np import torch class DropDay: def __init__(self, event_time_name='event_time'): self.event_time_name = event_time_name def __call__(self, x): mask = self.get_perm_ix(x[self.event_time_name]) new_x = {k: v[mask] for k, v in x.items()} return new_x @staticmethod def get_perm_ix(event_time): days = torch.unique(event_time, sorted=True) ix = np.random.choice(len(days), 1)[0] mask = event_time != days[ix] return mask
25.85
56
0.634429
80
517
3.775
0.45
0.238411
0.172185
0.168874
0.172185
0.172185
0
0
0
0
0
0.005208
0.257253
517
19
57
27.210526
0.78125
0
0
0
0
0
0.019342
0
0
0
0
0
0
1
0.2
false
0
0.133333
0
0.533333
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
77e1ec7bc5d891f1b7dd17ef8b326f42e0381896
371
py
Python
app/api_1_0/books.py
zheng-zy/flask_app
f9a720d76906bfbd48e2b28cf10c0923b9314649
[ "Apache-2.0" ]
null
null
null
app/api_1_0/books.py
zheng-zy/flask_app
f9a720d76906bfbd48e2b28cf10c0923b9314649
[ "Apache-2.0" ]
null
null
null
app/api_1_0/books.py
zheng-zy/flask_app
f9a720d76906bfbd48e2b28cf10c0923b9314649
[ "Apache-2.0" ]
null
null
null
#!usr/bin/env python # coding=utf-8 # Created by zhezhiyong@163.com on 2016/11/17. from flask import jsonify from . import api from .. import mongodb @api.route('/books/') def get_books(): # id = mongodb.db.book.insert({'test':1}) books = mongodb.db.Book.find({}) for book in books: print book return jsonify({ 'code': str(id) })
18.55
46
0.619946
55
371
4.163636
0.709091
0.087336
0.113537
0
0
0
0
0
0
0
0
0.045296
0.226415
371
19
47
19.526316
0.752613
0.318059
0
0
0
0
0.044177
0
0
0
0
0
0
0
null
null
0
0.272727
null
null
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
77e475b891c5acd960168cb4f57e50fe78d26b08
270
py
Python
distrib/tests_platane.py
BonJovi1/Bullet-the-Blue-Sky
9ebf41fc85ccf9f8e2880acdafc6dfffd8a0268f
[ "WTFPL" ]
2
2019-10-13T02:56:35.000Z
2020-04-04T08:57:13.000Z
distrib/tests_platane.py
BonJovi1/Bullet-the-Blue-Sky
9ebf41fc85ccf9f8e2880acdafc6dfffd8a0268f
[ "WTFPL" ]
null
null
null
distrib/tests_platane.py
BonJovi1/Bullet-the-Blue-Sky
9ebf41fc85ccf9f8e2880acdafc6dfffd8a0268f
[ "WTFPL" ]
null
null
null
from utils import * Clean() HgUpdate21() PatchAll() Build_VC11Express_64() OptimusForceIntel() RunAll() OptimusForceNVIDIA() RunAll() Clean() HgUpdate33() PatchAll() Build_VC11Express_64() OptimusForceNVIDIA() RunAll() Clean() HgUpdate33()
11.25
23
0.692593
23
270
7.956522
0.565217
0.142077
0.262295
0.284153
0
0
0
0
0
0
0
0.063636
0.185185
270
23
24
11.73913
0.768182
0
0
0.823529
0
0
0
0
0
0
0
0
0
1
0
true
0
0.058824
0
0.058824
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
77e5180d6ab5330f71187b669f22ec6397e5429c
514
py
Python
custom/_legacy/pact/reports/admin_reports.py
dslowikowski/commcare-hq
ad8885cf8dab69dc85cb64f37aeaf06106124797
[ "BSD-3-Clause" ]
1
2015-02-10T23:26:39.000Z
2015-02-10T23:26:39.000Z
custom/_legacy/pact/reports/admin_reports.py
SEL-Columbia/commcare-hq
992ee34a679c37f063f86200e6df5a197d5e3ff6
[ "BSD-3-Clause" ]
1
2022-03-12T01:03:25.000Z
2022-03-12T01:03:25.000Z
custom/_legacy/pact/reports/admin_reports.py
johan--/commcare-hq
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
[ "BSD-3-Clause" ]
null
null
null
from corehq.apps.reports.generic import GenericTabularReport from corehq.apps.reports.standard import CustomProjectReport class PactAdminReport(GenericTabularReport, CustomProjectReport): fields = ['corehq.apps.reports.filters.dates.DatespanFilter'] name = "PACT Admin Reports" slug = "pactadmin" emailable = True exportable = True report_template_path = "pact/admin/pactadmin_reports.html" @property def report_context(self): ret = {"foo": "bar"} return ret
24.47619
65
0.725681
53
514
6.962264
0.660377
0.081301
0.138211
0.113821
0
0
0
0
0
0
0
0
0.184825
514
20
66
25.7
0.880668
0
0
0
0
0
0.223092
0.158513
0
0
0
0
0
1
0.076923
false
0
0.153846
0
0.846154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77e99bc3b885eac60af8099d66d105d9eea0d121
4,807
py
Python
tests/test_helper.py
INM-6/correlation-toolbox
26b9e999069990a8b756d8a4d880bd152f95149f
[ "MIT" ]
1
2018-10-12T22:54:16.000Z
2018-10-12T22:54:16.000Z
tests/test_helper.py
INM-6/correlation-toolbox
26b9e999069990a8b756d8a4d880bd152f95149f
[ "MIT" ]
6
2017-04-03T07:44:17.000Z
2018-06-08T08:37:47.000Z
tests/test_helper.py
INM-6/correlation-toolbox
26b9e999069990a8b756d8a4d880bd152f95149f
[ "MIT" ]
2
2017-04-05T04:42:12.000Z
2018-07-17T11:43:24.000Z
# global imports import unittest import numpy as np from future.builtins import range # local imports import correlation_toolbox.helper as cthlp class TestHelper(unittest.TestCase): def setUp(self): np.random.seed(12345) self.rate = 30. # (Hz) self.T = 3e4 # (ms) self.N = 100 self.p = 0.6 # percentage of neurons active self.Neff = int(self.p * self.N) self.cc = 0.3 self.tbin = 1. # (ms) def test_create_poisson_spiketrains(self): sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.N) self.assertEqual(self.N, len(np.unique(sp[:, 0]))) # N self.assertTrue(self.T >= np.max(sp[:, 1])) # T emp_rate = 1. * len(sp) / self.T * 1e3 / self.N self.assertTrue(abs(self.rate - emp_rate) < 1e0) # rate def test_sort_gdf_by_id(self): # create N-5 poisson instead of N, creates empty arrays in sp_srt sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff) sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1) self.assertEqual(self.N, len(sp_ids)) # N self.assertTrue(self.T >= np.max([np.max(x) for x in sp_srt if len(x) > 0])) # T for i in range(self.N): emp_rate = 1. * len(sp_srt[i]) / self.T * 1e3 assert(emp_rate >= 0.) if emp_rate > 0.: self.assertTrue(abs(self.rate - emp_rate) < 1e1) # rate self.assertTrue(min(np.diff(sp_srt[i])) > 0.) # time ordering def test_instantaneous_spike_count(self): # create N-5 poisson instead of N, creates empty arrays in sp_srt # to test binning for empty spiketrains sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff) sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1) bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin) # test whether binning produces correct results sp_srt = np.array([[1., 2., 5., 7.], [4., 6., 9.]]) # ground truth bsp_true = np.array( [[1, 1, 0, 0, 1, 0, 1, 0], [0, 0, 0, 1, 0, 1, 0, 1]]) bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin) self.assertTrue(len(bins) == len(bsp[0])) # number of bins self.assertEqual(2, len(bsp)) # number of binned spike trains self.assertEqual(np.sum(bsp_true - bsp), 0.) # histogram def test_create_correlated_spiketrains_sip(self): # create N-5 poisson instead of N, changes correlation sp = cthlp.create_correlated_spiketrains_sip( self.rate, self.T, self.Neff, self.cc) sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1) bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin) emp_rate = 1. * np.sum(bsp) / self.T * 1e3 / self.N self.assertTrue(abs(self.p * self.rate - emp_rate) < 5e-1) # rate self.assertEqual(self.N, len(bsp)) # N self.assertTrue(self.T >= np.max(bins)) # T emp_cc = np.corrcoef(cthlp.strip_binned_spiketrains(bsp)) emp_a_cc = [] for i in range(self.Neff): for j in range(self.Neff): if i != j: emp_a_cc.append(emp_cc[i, j]) emp_mu_cc = 1. / (self.N * (self.N - 1.)) * np.sum(emp_a_cc) # correlation coefficient self.assertTrue(abs(self.p ** 2 * self.cc - emp_mu_cc) < 2e-2) def test_centralize(self): v1 = np.random.normal(-50, 2, int(self.T * 1e1)) v2 = np.random.normal(-30, 2, int(self.T * 1e1)) v_cen_time = cthlp.centralize([v1, v2], time=True) for v in v_cen_time: self.assertTrue(abs(np.mean(v)) < 1e-12) v_cen_units = cthlp.centralize([v1, v2], units=True) for v in v_cen_units.T: self.assertTrue(abs(np.mean(v)) < 1e-12) v_cen_timeunits = cthlp.centralize([v1, v2], time=True, units=True) self.assertTrue(abs(np.mean(v_cen_timeunits)) < 1e-12) def test_strip_sorted_spiketrains(self): sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff) sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1) self.assertEqual(self.N, len(sp_srt)) sp_srt = cthlp.strip_sorted_spiketrains(sp_srt) self.assertEqual(self.Neff, len(sp_srt)) def test_strip_binned_spiketrains(self): sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff) sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1) bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin) self.assertEqual(self.N, len(bsp)) bsp = cthlp.strip_binned_spiketrains(bsp) self.assertEqual(self.Neff, len(bsp)) if __name__ == '__main__': unittest.main()
44.100917
78
0.607239
740
4,807
3.767568
0.17973
0.034075
0.047704
0.060258
0.572095
0.465208
0.39957
0.357245
0.346844
0.322453
0
0.030687
0.261078
4,807
108
79
44.509259
0.754223
0.097774
0
0.197674
0
0
0.001856
0
0
0
0
0
0.255814
1
0.093023
false
0
0.046512
0
0.151163
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77ea483e2f5d12c57ce4e04b7312ab2402473ea4
47
py
Python
input.py
rooch001/RaspberryPi-DTU
baea2ad7a2eb8bdc45b56d6bde5d5bd3e3c30cfd
[ "MIT" ]
null
null
null
input.py
rooch001/RaspberryPi-DTU
baea2ad7a2eb8bdc45b56d6bde5d5bd3e3c30cfd
[ "MIT" ]
null
null
null
input.py
rooch001/RaspberryPi-DTU
baea2ad7a2eb8bdc45b56d6bde5d5bd3e3c30cfd
[ "MIT" ]
null
null
null
import os import shutil print(os.listdir('/'))
11.75
22
0.723404
7
47
4.857143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.106383
47
4
22
11.75
0.809524
0
0
0
0
0
0.020833
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.333333
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
77eb25a04371bec64faee1671d09a1d02f6b7e59
3,783
py
Python
Tareas/Tarea_4.py
JESUS-2120/Python_2
b1854b4118215684eb1adb5acdbc3313c2a15f20
[ "Apache-2.0" ]
null
null
null
Tareas/Tarea_4.py
JESUS-2120/Python_2
b1854b4118215684eb1adb5acdbc3313c2a15f20
[ "Apache-2.0" ]
null
null
null
Tareas/Tarea_4.py
JESUS-2120/Python_2
b1854b4118215684eb1adb5acdbc3313c2a15f20
[ "Apache-2.0" ]
null
null
null
''' NAME Tarea_4.py VERSION 1.0 AUTHOR Victor Jesus Enriquez Castro <victorec@lcg.unam.mx> DESCRIPTION Empleando Entrez.einfo y ENtrez.read el programa imprime la descripcion de los campos FieldList y LinkList en la base de datos protein, de la misma manera dadas palabras claves de busqueda se obtienen los IDs de los articulos que coinciden con los criterios de busqueda en la base de datos pubmed CATEGORY Data Base INPUT Este programa unicamente recibe como inputs las palabras clave para la busqueda de los articulos en la base de datos pubmed EXAMPLES Input: Ingrese el termino con el que desea realizar su busqueda: ludosky ma Ingrese el campo del termino ingresado: AUTH Ingrese el termino con el que desea realizar su busqueda: electrocyte Ingrese el campo del termino ingresado: Title Ingrese el termino con el que desea realizar su busqueda: Baumannii Ingrese el campo del termino ingresado: Title Output: ECNO -> Description: EC number for enzyme or CAS registry number protein_protein_small_genome -> Description: All proteins from this genome El archivo con los IDs de su busqueda se encuentra en: ../files/ bajo el nombre IDs.txt GITHUB https://github.com/JESUS-2120/Python_2/blob/main/Tareas/Tarea_4.py ''' #Importamos las librerias necesarias from Bio import Entrez from pprint import pprint #Ingresamos un correo electronico Entrez.email = "victorec@lcg.unam.mx" #TAREA 1 #Indicamos la base de datos de interes handle = Entrez.einfo(db = "protein") record = Entrez.read(handle) #Obtenemos la descripcion para cada uno de los campos solicitados for i in range(0,len(record["DbInfo"]["FieldList"])): if record["DbInfo"]["FieldList"][i]["Name"] == "ECNO": print(record["DbInfo"]["FieldList"][i]["Name"],"->","Description:") print(record["DbInfo"]["FieldList"][i]["Description"]) print("\n") for i in range(0,len(record["DbInfo"]["LinkList"])): if record["DbInfo"]["LinkList"][i]["Name"] == "protein_protein_small_genome": print(record["DbInfo"]["LinkList"][i]["Name"],"->","Description:") print(record["DbInfo"]["LinkList"][i]["Description"]) print("\n") #TAREA 2 print("Bienvenido al buscador automatico\nSi desea usar el formato ya existente ingrese (1) si desea ingresar su propio string ingrese (2): ") opc = int(input()) if (opc < 1 or opc > 2): opc = int(input("Ingrese un numero valido: ")) if opc == 1: print("Considerando como ejemplo\ntermino = 'ludosky ma[AUTH] AND (electrocyte[Title] OR Baumannii[Title])\ningrese los campos con los que desea realizar su busqueda") #Creamos la lista palabras que utilizaremos para guardar las palabras de busqueda palabras = ["","","","","",""] #Pedimos al usuario las palabras de busqueda for i in range(3): palabras[i] = str(input("Ingrese el termino con el que desea realizar su busqueda: ")) palabras[i + 3] = str(input("Ingrese el campo del termino ingresado: ")) #Concatenamos todo en un string que nos permita concretar la busqueda termino = palabras[0] + "[" + palabras[3] + "]" + " AND (" + palabras[1] + "[" + palabras[4] + "] OR " + palabras[2] + "[" + palabras[5] + "])" if opc == 2: termino = input("Ingrese su string de busqueda: ") #Buscamos en la base de datos handle = Entrez.esearch(db="pubmed", term= termino) record = Entrez.read(handle) #Creamos el archivo IDs IDS = open("../files/IDs.txt","w") IDS.write("Los IDs de su busqueda son: \n") #Escribimos los IDs en el archivo que creamos for rec in record["IdList"]: IDS.write(">" + rec + "\n") print("El archivo con los IDs de su busqueda se encuentra en: ../files/ bajo el nombre IDs.txt")
32.333333
168
0.68755
542
3,783
4.782288
0.333948
0.027778
0.015432
0.025077
0.31983
0.244599
0.177469
0.148148
0.127315
0.127315
0
0.008873
0.195612
3,783
116
169
32.612069
0.842918
0.539783
0
0.114286
0
0.085714
0.455125
0.028678
0
0
0
0.008621
0
1
0
false
0
0.057143
0
0.057143
0.285714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77ebdf9ca9d4616e429b8e92977f8018b29b5675
3,464
py
Python
HDPython/v_enum.py
HardwareDesignWithPython/HDPython
aade03aaa092b1684fa12bffd17674cf1c45f5ac
[ "MIT" ]
null
null
null
HDPython/v_enum.py
HardwareDesignWithPython/HDPython
aade03aaa092b1684fa12bffd17674cf1c45f5ac
[ "MIT" ]
null
null
null
HDPython/v_enum.py
HardwareDesignWithPython/HDPython
aade03aaa092b1684fa12bffd17674cf1c45f5ac
[ "MIT" ]
1
2021-10-20T20:08:16.000Z
2021-10-20T20:08:16.000Z
import os,sys,inspect from HDPython.base import * from HDPython.v_symbol import v_symbol from HDPython.primitive_type_converter import get_primitive_hdl_converter from HDPython.lib_enums import varSig, InOut_t class v_enum(HDPython_base): def __init__(self,EnumIn,EnumVal=None,name=None, Inout = InOut_t.Internal_t,includes="",value=None,varSigConst=varSig.variable_t): super().__init__() self.__hdl_converter__ =get_primitive_hdl_converter("v_enum" )() if type(EnumIn).__name__ == "EnumMeta": Enumtype = EnumIn elif type(type(EnumIn)).__name__ == "EnumMeta": Enumtype = type(EnumIn) EnumVal = EnumIn if EnumVal == None: EnumVal = Enumtype(0) if name == None: name = Enumtype.__name__ self.symbol = v_symbol(name,EnumVal.value,Inout=Inout,includes=includes,value=EnumVal.value,varSigConst=varSigConst ) self._type = Enumtype self.name = name self.__hdl_name__ = None self._Inout = Inout self._varSigConst = varSigConst def __lshift__(self, rhs): if isinstance(rhs,type(self)): self.symbol << rhs.symbol return if isinstance(rhs,self._type): self.symbol << value(rhs) return raise Exception("[ENUM] Unable tp connect different types", self, rhs) def _sim_get_new_storage(self): self.symbol._sim_get_new_storage() def set_simulation_param(self,module, name,writer): self.symbol.set_simulation_param(module, name, writer) def __repr__(self): ret = str(self._type(value(self.symbol)).name) +": "+ str(value(self.symbol)) return ret def setInout(self,Inout): self.symbol.setInout(Inout) def set_varSigConst(self, varSigConst): self._varSigConst=varSigConst self.symbol.set_varSigConst(varSigConst) def isVarSigType(self, varSigType): return self.symbol.isVarSigType( varSigType) def _sim_get_value(self): return value(self.symbol) def __eq__(self,rhs): return value(self) == value(rhs) def set_vhdl_name(self,name, Overwrite = False): if self.__hdl_name__ and self.__hdl_name__ != name and Overwrite == False: raise Exception("double Conversion to vhdl") else: self.__hdl_name__ = name def isInOutType(self, Inout): if Inout==None or self._Inout == Inout: return True elif self._Inout== InOut_t.Master_t: mem = self.getMember(Inout) return len(mem) > 0 elif self._Inout == InOut_t.Slave_t: if Inout == InOut_t.Master_t: Inout = InOut_t.Slave_t elif Inout == InOut_t.Slave_t: Inout = InOut_t.Master_t elif Inout == InOut_t.input_t: Inout = InOut_t.output_t elif Inout == InOut_t.output_t: Inout = InOut_t.input_t mem = self.getMember(Inout) return len(mem) > 0 def __str__(self): if self.__hdl_name__: return self.__hdl_name__ return self._type(value(self.symbol)).name def _issubclass_(self,test): if super()._issubclass_(test): return True return "v_enum" == test
28.162602
134
0.602483
408
3,464
4.762255
0.208333
0.072054
0.062275
0.024704
0.215646
0.063819
0.036027
0.036027
0.036027
0
0
0.001244
0.303984
3,464
122
135
28.393443
0.804645
0
0
0.125
0
0
0.027425
0
0
0
0
0
0
1
0.175
false
0
0.0625
0.0375
0.4125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77ecf310ae779764d67ed3eb67a498968e7015a3
2,146
py
Python
setup.py
Frojd/Fabrik
9f2edbba97a7fd236b72a9b3010f6e912ab5c001
[ "MIT" ]
12
2015-11-03T20:41:29.000Z
2019-02-15T17:13:27.000Z
setup.py
Frojd/Fabrik
9f2edbba97a7fd236b72a9b3010f6e912ab5c001
[ "MIT" ]
35
2015-08-23T17:10:00.000Z
2017-05-10T12:08:57.000Z
setup.py
Frojd/Fabrik
9f2edbba97a7fd236b72a9b3010f6e912ab5c001
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import pip from setuptools import setup, find_packages import fabrik if sys.argv[-1] == "publish": os.system("python setup.py sdist upload") sys.exit() package_exclude = ("tests*", "examples*") packages = find_packages(exclude=package_exclude) # Convert markdown to rst try: from pypandoc import convert long_description = convert("README.md", "rst") except: long_description = "" setup( name="fabrik", version=fabrik.__version__, description="A simple to use deployment toolkit built on top of Fabric", long_description=long_description, author="Fröjd", author_email="martin.sandstrom@frojd.se", url="https://github.com/frojd/fabrik", packages=packages, include_package_data=True, install_requires=[ 'Fabric==1.12.0', 'Unipath==1.1', 'PyCrypto==2.6.1', 'jinja2==2.8', 'click>=5.0', 'GitPython==1.0.1', ], tests_require=[ 'Fabric==1.12.0', 'Unipath==1.1', 'PyCrypto==2.6.1', 'jinja2==2.8', 'click>=5.0', 'GitPython==1.0.1', ], entry_points={ "console_scripts": [ "fabrik = fabrik.scripts.fabrik:main", "fabrik_start = fabrik.cli.scripts.init:main", "fabrik_cleanup = fabrik.cli.scripts.cleanup:main", ] }, license="MIT", zip_safe=False, classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "Natural Language :: English", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries", "Topic :: System :: Software Distribution", "Topic :: System :: Systems Administration", ], )
26.825
76
0.596925
234
2,146
5.380342
0.512821
0.047657
0.059571
0.015886
0.090548
0.090548
0.090548
0.090548
0.090548
0.090548
0
0.025031
0.255359
2,146
79
77
27.164557
0.762829
0.030755
0
0.223881
0
0
0.475686
0.052961
0
0
0
0
0
1
0
false
0
0.089552
0
0.089552
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77ed35b54ba191d21237cd5df4a01654fd2c195e
14
py
Python
utokenize/testdata/comment_intra.py
MaxTurchin/pycopy-lib
d7a69fc2a28031e2ca475c29239f715c1809d8cc
[ "PSF-2.0" ]
126
2019-07-19T14:42:41.000Z
2022-03-21T22:22:19.000Z
utokenize/testdata/comment_intra.py
MaxTurchin/pycopy-lib
d7a69fc2a28031e2ca475c29239f715c1809d8cc
[ "PSF-2.0" ]
38
2019-08-28T01:46:31.000Z
2022-03-17T05:46:51.000Z
utokenize/testdata/comment_intra.py
MaxTurchin/pycopy-lib
d7a69fc2a28031e2ca475c29239f715c1809d8cc
[ "PSF-2.0" ]
55
2019-08-02T09:32:33.000Z
2021-12-22T11:25:51.000Z
foo # comment
7
13
0.714286
2
14
5
1
0
0
0
0
0
0
0
0
0
0
0
0.214286
14
1
14
14
0.909091
0.5
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
77efdc6a298f42286268601e0029f556def6374b
12,607
py
Python
tool.py
Illidanz/MonoPriTranslation
2cff8fe3133d76e1311273ef3b54dde428151390
[ "MIT" ]
3
2021-10-31T04:20:16.000Z
2022-02-16T04:12:57.000Z
tool.py
Illidanz/MonoPriTranslation
2cff8fe3133d76e1311273ef3b54dde428151390
[ "MIT" ]
1
2021-11-01T10:27:59.000Z
2021-11-01T10:27:59.000Z
tool.py
Illidanz/MonoPriTranslation
2cff8fe3133d76e1311273ef3b54dde428151390
[ "MIT" ]
null
null
null
import codecs import csv import filecmp import os import time import click import pyimgur import requests from zipfile import ZipFile, ZIP_DEFLATED from hacktools import common, wii version = "1.5.5" isofile = "data/disc.iso" infolder = "data/extract/" outfolder = "data/repack/" replacefolder = "data/replace/" fontin = "data/font_input.txt" fontout = "data/font_output.txt" fontimgout = "data/extract_FNT/font_jp.png" fontimgin = "data/work_FNT/font_jp.png" fontfile = "data/extract/DATA/files/resfont/font_jp.brfnt" dolin = "data/extract/DATA/sys/main.dol" dolout = "data/repack/DATA/sys/main.dol" patchin = "data/extract/DATA/files/" patchout = "data/repack/DATA/files/" patchfolder = "data/patch/monopri/" xmlfile = "data/patch/riivolution/monopri.xml" @common.cli.command() @click.option("--iso", is_flag=True, default=False) @click.option("--msbe", is_flag=True, default=False) @click.option("--movie", is_flag=True, default=False) @click.option("--tpl", is_flag=True, default=False) @click.option("--fnt", is_flag=True, default=False) @click.option("--speaker", is_flag=True, default=False) @click.option("--merge", is_flag=True, default=False) def extract(iso, msbe, movie, tpl, fnt, speaker, merge): all = not iso and not msbe and not movie and not fnt and not tpl if all or iso: wii.extractIso(isofile, infolder, outfolder) if all or msbe: import extract_msbe extract_msbe.run(speaker, merge) if all or movie: import extract_movie extract_movie.run() if all or fnt: wii.extractFontData(fontfile, fontout) common.makeFolder("data/extract_FNT/") wii.extractBRFNT(fontfile, fontimgout) if all or tpl: wii.extractARC("data/extract/DATA/files/3d/map/", "data/extract_3D/") wii.extractARC("data/extract/DATA/files/effect/", "data/extract_EFF/") wii.extractBREFT("data/extract_EFF", "data/extract_BREFT", "data/out_EFF") wii.extractARC("data/extract/DATA/files/lytdemo/exp_data/", "data/extract_TPL/") common.copyFolder("data/extract/DATA/files/textures/", "data/extract_TPL/textures/") wii.extractTPL("data/extract_TPL/", "data/out_TPL/") @common.cli.command() @click.option("--no-patch", is_flag=True, default=False) @click.option("--msbe", is_flag=True, default=False) @click.option("--onlyquest", is_flag=True, default=False) @click.option("--movie", is_flag=True, default=False) @click.option("--tpl", is_flag=True, default=False) @click.option("--fnt", is_flag=True, default=False) def repack(no_patch, msbe, onlyquest, movie, tpl, fnt): all = not msbe and not movie and not tpl and not fnt if all or fnt: common.logMessage("Repacking FNT from", "data/work_FNT", "...") fontfilein = fontfile if os.path.isfile(fontfile.replace("/extract/", "/replace/")): fontfilein = fontfilein.replace("/extract/", "/replace/") fontfileout = fontfile.replace("/extract/", "/repack/") wii.repackFontData(fontfilein, fontfileout, fontin) wii.repackBRFNT(fontfileout, fontimgin) common.logMessage("Done!") if all or fnt or msbe: import repack_msbe repack_msbe.run(onlyquest) if all or fnt or movie: import repack_movie repack_movie.run() if all or tpl: import repack_tpl repack_tpl.run() if os.path.isdir(replacefolder): common.mergeFolder(replacefolder, outfolder) # Patch the main.dol file common.copyFile(dolin, dolout) with common.Stream(dolout, "rb+", False) as f: # Set the movie subtitles X position to 0 since we're doing some manual centering # Change "fsubs f28,f7,f8 to fsubs f28,f8,f8" f.seek(0x8CF4) # 0x8000cfb4 f.writeUInt(0xef884028) if not no_patch: common.makeFolders(patchfolder) common.makeFolder(patchfolder.replace("monopri/", "riivolution/")) common.logMessage("Creating patch folder in", patchfolder, "...") files = common.getFiles(patchin) for file in common.showProgress(files): if patchout + file == dolout: continue if not filecmp.cmp(patchin + file, patchout + file): common.makeFolders(patchfolder + os.path.dirname(file)) common.copyFile(patchout + file, patchfolder + file) with common.Stream(xmlfile, "w") as f: f.writeLine('<wiidisc version="1">') f.writeLine('\t<id game="RSEJGD"/>') f.writeLine('\t<options>') f.writeLine('\t\t<section name="Translation">') f.writeLine('\t\t\t<option name="Translation Patch">') f.writeLine('\t\t\t\t<choice name="Enabled">') f.writeLine('\t\t\t\t\t<patch id="monoprifolder"/>') f.writeLine('\t\t\t\t</choice>') f.writeLine('\t\t\t</option>') f.writeLine('\t\t</section>') f.writeLine('\t</options>') f.writeLine('\t<patch id="monoprifolder">') f.writeLine('\t\t<folder external="/monopri" recursive="false"/>') f.writeLine('\t\t<folder external="/monopri" disc="/"/>') f.writeLine('\t\t<memory offset="0x8000cfb4" value="ef884028" original="ef874028" />') f.writeLine('\t</patch>') f.writeLine('</wiidisc>') common.logMessage("Creating ZIP file ...") with common.Stream("patcher.bat", "w") as f: f.writeLine('del monopri_patched.iso') f.writeLine('rmdir /s/q patch_temp') f.writeLine('wit EXTRACT -o %1 patch_temp') f.writeLine('xcopy patch\\monopri patch_temp\\DATA\\files /s/e/y/q') f.writeLine('xcopy main.dol patch_temp\\DATA\\sys\\main.dol /y/q') f.writeLine('wit COPY patch_temp monopri_patched.iso') f.writeLine('rmdir /s/q patch_temp') common.copyFile(dolout, "main.dol") with ZipFile("data/patch.zip", "w", ZIP_DEFLATED) as zip: for foldername, _, filenames in os.walk("data/patch"): for filename in filenames: filepath = os.path.join(foldername, filename) zip.write(filepath, filepath[5:]) zip.write("main.dol") zip.write("patcher.bat") os.remove("patcher.bat") os.remove("main.dol") common.logMessage("Done!") @common.cli.command() @click.argument("clientid") def generatepo(clientid): tplfolder = "data/work_TPL" tploriginal = "data/out_TPL" files = common.getFiles(tplfolder) im = pyimgur.Imgur(clientid) with common.Stream("data/tpl.po", "w") as f: for file in common.showProgress(files): uploaded = False while not uploaded: try: image = im.upload_image(tploriginal + file, title="file") f.writeLine("#. " + image.link) f.writeLine("msgid \"" + file.split("/")[2] + "\"") f.writeLine("msgstr \"\"") f.writeLine("") uploaded = True time.sleep(30) except requests.HTTPError: time.sleep(300) common.logMessage("Done!") @common.cli.command() def dupe(): seen = {} sections = common.getSections("data/msbe_input.txt") for section in sections: if section == "quest.bin": continue for line in sections[section]: translation = sections[section][line][0] if line not in seen: seen[line] = [translation, section, 1] else: seen[line][2] += 1 if translation != seen[line][0]: common.logMessage("{}: {}={} ({} @{})".format(section, line, translation, seen[line][0], seen[line][1])) for line in seen: if seen[line][2] > 2: common.logMessage("Dupe", seen[line][2], line + "=") def cleanSection(section): for str in section: newlist = [] for trans in section[str]: if trans != "": newlist.append(trans) if len(newlist) == 0: section[str] = [""] else: section[str] = newlist return section @common.cli.command() def smartcat(): click.confirm("Importing Smartcat CSV will override the msbe_input.txt and movie_input.txt files, are you sure?", abort=True) common.logMessage("Importing Smartcat CSV ...") # Read the lines from the CSV files infiles = ["data/msbe_output_rearranged.csv", "data/msbe_events.csv", "data/msbe_system.csv", "data/movie.csv"] section = {} commons = {} current = "" for file in infiles: with open(file, newline="", encoding="utf-8") as csvfile: rows = csv.reader(csvfile, delimiter=",", quotechar="\"") for row in rows: orig = row[0] trans = row[1] if orig == "ja" or ".png" in orig or "youtube.com" in orig or orig == "Table of Contents:" or orig == "!Images": continue if orig.startswith("("): orig = orig.split(") ", 1)[1] if orig != "": if orig.startswith("!FILE:"): current = orig.split(",")[0].replace("!FILE:", "") section[current] = {} elif current != "": if orig in section[current]: section[current][orig].append(trans) else: section[current][orig] = [trans] if orig in commons: commons[orig].append(trans) else: commons[orig] = [trans] # Clean up empty lines that have translations somewhere else commons = cleanSection(commons) for name in section: section[name] = cleanSection(section[name]) # Export everything to msbe_input following msbe_output for ordering outputfiles = ["data/msbe_output.txt", "data/movie_output.txt"] inputfiles = ["data/msbe_input.txt", "data/movie_input.txt"] for i in range(len(outputfiles)): with codecs.open(outputfiles[i], "r", "utf-8") as fin: with codecs.open(inputfiles[i], "w", "utf-8") as f: current = "" for line in fin: line = line.rstrip("\r\n").replace("\ufeff", "") if line.startswith("!FILE:"): current = line.replace("!FILE:", "") if current not in section: common.logWarning("Section", current, "not found") current = "" else: f.write("!FILE:" + current + "\n") elif current != "": line = line.replace("=", "") linestart = "" if i == 1: linesplit = line.split(":", 2) linestart = linesplit[0] + ":" + linesplit[1] + ":" line = linesplit[2] sectionline = line if line not in section[current]: if line.strip("  ") in section[current] or line.strip("  ") in commons: sectionline = line.strip("  ") elif line.replace("<3D>", "=") in section[current] or line.replace("<3D>", "=") in commons: sectionline = line.replace("<3D>", "=") if sectionline not in section[current] and sectionline in commons: section[current][sectionline] = commons[sectionline] if sectionline in section[current]: f.write(linestart + line + "=" + section[current][sectionline][0] + "\n") if len(section[current][sectionline]) > 1: section[current][sectionline].pop() else: f.write(linestart + line + "=\n") common.logWarning("Line \"" + sectionline + "\" in section", current, "not found") common.logMessage("Done!") if __name__ == "__main__": click.echo("MonoPriTranslation version " + version) if not os.path.isdir("data"): common.logError("data folder not found.") quit() common.runCLI(common.cli)
43.622837
129
0.560482
1,435
12,607
4.86899
0.21115
0.040074
0.023615
0.03163
0.182625
0.152998
0.107915
0.074853
0.065407
0.065407
0
0.009718
0.298009
12,607
288
130
43.774306
0.779774
0.025224
0
0.146617
0
0
0.194528
0.041202
0
0
0.002117
0
0
1
0.022556
false
0
0.06391
0
0.090226
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77f166eca841bd7df6212b114048257be00c424b
50,437
py
Python
sequana/bedtools.py
brwnj/sequana
58f6ca47815bf7253f27e4631d971a0a479c3a63
[ "BSD-3-Clause" ]
null
null
null
sequana/bedtools.py
brwnj/sequana
58f6ca47815bf7253f27e4631d971a0a479c3a63
[ "BSD-3-Clause" ]
null
null
null
sequana/bedtools.py
brwnj/sequana
58f6ca47815bf7253f27e4631d971a0a479c3a63
[ "BSD-3-Clause" ]
1
2019-10-11T18:21:05.000Z
2019-10-11T18:21:05.000Z
# -*- coding: utf-8 -*- # # This file is part of Sequana software # # Copyright (c) 2016 - Sequana Development Team # # File author(s): # Thomas Cokelaer <thomas.cokelaer@pasteur.fr> # Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>, # <d.desvillechabrol@gmail.com> # # Distributed under the terms of the 3-clause BSD license. # The full license is in the LICENSE file, distributed with this software. # # website: https://github.com/sequana/sequana # documentation: http://sequana.readthedocs.io # ############################################################################## """Utilities for the genome coverage""" import re import ast import os import sys from biokit.stats import mixture from sequana.lazy import pandas as pd from sequana.lazy import numpy as np from sequana.lazy import pylab from sequana import logger from sequana.tools import gc_content, genbank_features_parser from sequana.errors import SequanaException from easydev import do_profile __all__ = ["GenomeCov", "ChromosomeCov", "DoubleThresholds"] class DoubleThresholds(object): """Simple structure to handle the double threshold for negative and positive sides Used yb GenomeCov and related classes. :: dt = DoubleThresholds(-3,4,0.5,0.5) This means the low threshold is -3 while the high threshold is 4. The two following values must be between 0 and 1 and are used to define the value of the double threshold set to half the value of the main threshold. Internally, the main thresholds are stored in the low and high attributes. The secondary thresholds are derived from the main thresholds and the two ratios. The ratios are named ldtr and hdtr for low double threshold ratio and high double threshold ration. The secondary thresholds are denoted low2 and high2 are are update automatically if low, high, ldtr or hdtr are changed. """ def __init__(self, low=-3, high=3, ldtr=0.5, hdtr=0.5): assert ldtr>=0. and ldtr<=1.,\ "ldrt parameter (low double threshold ratio) must be in [0,1]" assert hdtr>=0. and hdtr<=1.,\ "hdrt parameter (high double threshold ratio) must be in [0,1]" assert low < 0, "low threshold must be negative" assert high > 0, "high threshold must be positive" self._ldtr = ldtr self._hdtr = hdtr self._high = high self._low = low def _get_ldtr(self): return self._ldtr def _set_ldtr(self, ldtr): self._ldtr = ldtr self._low2 = self._low * self._ldtr ldtr = property(_get_ldtr, _set_ldtr) def _get_hdtr(self): return self._hdtr def _set_hdtr(self, hdtr): self._hdtr = hdtr self._high2 = self._high * self._hdtr hdtr = property(_get_hdtr, _set_hdtr) def _get_low(self): return self._low def _set_low(self, value): assert value < 0. self._low = value self._low2 = self._low * self._ldtr low = property(_get_low, _set_low) def _get_high(self): return self._high def _set_high(self, value): assert value > 0. self._high = value self._high2 = self._high * self._ldtr high = property(_get_high, _set_high) def _get_low2(self): return self._low * self._ldtr low2 = property(_get_low2) def _get_high2(self): return self._high * self._hdtr high2 = property(_get_high2) def get_args(self): return "%.2f,%.2f,%.2f,%.2f" % (self.low, self.high, self.ldtr, self.hdtr) def copy(self): thresholds = DoubleThresholds(self.low, self.high, self.ldtr, self.hdtr) return thresholds def __str__(self): txt = "Low threshold: %s\n" % self.low txt += "High threshold: %s\n" % self.high txt += "double-low threshold: %s\n" % self.low2 txt += "double-high threshold: %s" % self.high2 return txt class GenomeCov(object): """Create a list of dataframe to hold data from a BED file generated with samtools depth. This class can be used to plot the coverage resulting from a mapping, which is stored in BED format. The BED file may contain several chromosomes. There are handled independently and accessible as a list of :class:`ChromosomeCov` instances. Example: .. plot:: :include-source: from sequana import GenomeCov, sequana_data filename = sequana_data('JB409847.bed') reference = sequana_data("JB409847.fasta") gencov = GenomeCov(filename) gencov.compute_gc_content(reference) gencov = GenomeCov(filename) for chrom in gencov: chrom.running_median(n=3001, circular=True) chrom.compute_zscore() chrom.plot_coverage() gencov[0].plot_coverage() Results are stored in a list of :class:`ChromosomeCov` named :attr:`chr_list`. """ def __init__(self, input_filename, genbank_file=None, low_threshold=-3, high_threshold=3, ldtr=0.5, hdtr=0.5): """.. rubric:: constructor :param str input_filename: the input data with results of a bedtools genomecov run. This is just a 3-column file. The first column is a string (chromosome), second column is the base postion and third is the coverage. :param str genbank_file: annotation file of your referenve. :param float low_threshold: threshold used to identify under-covered genomic region of interest (ROI). Must be negative :param float high_threshold: threshold used to identify over-covered genomic region of interest (ROI). Must be positive :param float ldtr: fraction of the low_threshold to be used to define the intermediate threshold in the double threshold method. Must be between 0 and 1. :param float rdtr: fraction of the low_threshold to be used to define the intermediate threshold in the double threshold method. Must be between 0 and 1. """ # Keep information if the genome is circular and the window size used self._circular = None self._feature_dict = None self._gc_window_size = None self._genbank_filename = None self._window_size = None # the user choice have the priorities over csv file if genbank_file: self.genbank_filename = genbank_file # check is the input is a csv of a previous analysis try: self.chr_list = self._read_csv(input_filename) except FileNotFoundError as e: print("FileNotFound error({0}): {1}".format(e.errno, e.strerror)) sys.exit(1) if not self.chr_list: # read bed file self.thresholds = DoubleThresholds(low_threshold, high_threshold, ldtr, hdtr) self.chr_list = self._read_bed(input_filename) def __getitem__(self, index): return self.chr_list[index] def __iter__(self): return self.chr_list.__iter__() def __len__(self): return len(self.chr_list) def __eq__(self, other): if len(self.chr_list) != len(other.chr_list): return False for a,b in zip(self.chr_list, other.chr_list): if all(a.df['cov'] == b.df['cov']) is False: return False return True def compute_coverage(self, window, circular=False, reference=None): """Compute GC content (if reference provided), running_median/zscore for each chromosome. """ if reference: self.compute_gc_content(reference) for c in self.chr_list: c.running_median(window, circular) c.compute_zscore() @property def circular(self): """ Get the circularity of chromosome(s). It must be a boolean. """ return self._circular @circular.setter def circular(self, circular): if isinstance(circular, bool): self._circular = circular else: logger.error("TypeError: Circular must be a boolean. True if your " "genome is circular and False if not.") sys.exit(1) @property def feature_dict(self): """ Get the features dictionary of the genbank. """ return self._feature_dict @feature_dict.setter def feature_dict(self, anything): logger.error("AttributeError: You can't set attribute.\n" "GenomeCov.feature_dict is set when" "GenomeCov.genbank_filename is set.") sys.exit(1) @property def gc_window_size(self): """ Get or set the window size to compute the GC content. """ return self._gc_window_size @gc_window_size.setter def gc_window_size(self, n): if n % 2 == 0: logger.warning("Window size must be an odd number.") self._gc_window_size = n + 1 logger.warning("{0} is incremented by 1".format(n)) else: self._gc_window_size = n @property def genbank_filename(self): """ Get or set the genbank filename to annotate ROI detected with :meth:`ChromosomeCov.get_roi`. Changing the genbank filename will configure the :attr:`GenomeCov.feature_dict`. """ return self._genbank_filename @genbank_filename.setter def genbank_filename(self, genbank_filename): if os.path.isfile(genbank_filename): self._genbank_filename = os.path.realpath(genbank_filename) self._feature_dict = genbank_features_parser( genbank_filename) else: logger.error("FileNotFoundError: The genbank file doesn't exist.") sys.exit(1) @property def window_size(self): """ Get or set the window size to compute the running median. Size must be an interger. """ return self._window_size @window_size.setter def window_size(self, n): if n % 2 == 0: logger.warning("Window size must be an odd number.") self._window_size = n + 1 logger.warning("{0} is incremented to {1}".format( n, self._window_size)) else: self._window_size = n def _read_bed(self, input_filename): """ Read bed generated by samtools depth tools and create :class:'ChromosomeCov' list. """ df = pd.read_table(input_filename, header=None) df = df.rename(columns={0: "chr", 1: "pos", 2: "cov", 3: "mapq0"}) chr_list = self._set_chr_list(df) # Set the link to this instance in each chromosome # useful if one wants to recompute GC content with different window return chr_list def _read_csv(self, input_filename): """ Read csv generated by :class:'GenomeCov' and create :class:'ChromosomeCov' list. """ # set regex to get important information about previous analysis re_threshold = re.compile("thresholds:([\d,\.-]+)") re_window_size = re.compile("\swindow_size:(\d+)") re_circular = re.compile("circular:(\w+)") re_gc_window_size = re.compile("gc_window_size:(\d+)") re_genbank = re.compile("genbank:([\{0}\w\.\-]+)".format(os.sep)) re_chrom = re.compile("^# ([\w\-\.]+):") re_gaussian = re.compile("(\[\{.+\}\])") with open(input_filename, "r") as fp: line = fp.readline() # check if file was generated by sequana_coverage if not line.startswith("# sequana_coverage"): return None # get thresholds thresholds = re_threshold.findall(line)[0] thresholds = [float(f) for f in thresholds.split(',')] self.thresholds = DoubleThresholds(*thresholds) # get window size self.window_size = int(re_window_size.search(line).group(1)) # get circular circular = re_circular.search(line).group(1) self.circular = False if circular == "False" else True # get gc_window_size gc = re_gc_window_size.search(line) if gc: self.gc_window_size = int(gc.group(1)) # get genbank gb = re_genbank.search(line) if gb and not self.genbank_filename: self.genbank_filename = gb.group(1) # get gaussians for each chromosome gaussians_dict = dict() for line in fp: chrom = re_chrom.search(line) if chrom: gaussians = re_gaussian.search(line) gaussians = ast.literal_eval(gaussians.group(1)) gaussians_dict[chrom.group(1)] = gaussians else: break df = pd.read_csv(fp, header=None, names=line.strip().split(",")) chr_list = self._set_chr_list(df) # Add gaussians and range informations for chrom in chr_list: chrom.set_gaussians(gaussians_dict[chrom.chrom_name]) if self.circular: chrom.range = [None, None] else: mid = int(self.window_size/2) chrom.range = [mid, -mid] chrom.mixture_fitting = mixture.EM( chrom.df['scale'][chrom.range[0]:chrom.range[1]]) return chr_list def _set_chr_list(self, df): df = df.set_index("chr", drop=False) return [ChromosomeCov(df.loc[key], self, self.thresholds) for key in df.index.unique()] def compute_gc_content(self, fasta_file, window_size=101, circular=False, letters=['G', 'C', 'c', 'g']): """ Compute GC content of genome sequence. :param str fasta_file: fasta file name. :param int window_size: size of the sliding window. :param bool circular: if the genome is circular (like bacteria chromosome) Store the results in the :attr:`ChromosomeCov.df` attribute (dataframe) with a column named *gc*. """ self.gc_window_size = window_size self.circular = circular gc_dict = gc_content(fasta_file, self.gc_window_size, circular, letters=letters) for chrom in self.chr_list: if chrom.chrom_name in gc_dict.keys(): chrom.df["gc"] = gc_dict[chrom.chrom_name] else: msg = ("The chromosome (or contig) %s in your" " BED/BAM file was not found in the reference provided." " Make sure your input reference file is the same" " as the one used to perform the mapping or just" " remove the --reference parameter.") raise SequanaException(msg % chrom.chrom_name) def get_stats(self, output="json"): """Return basic statistics for each chromosome :return: dictionary with chromosome names as keys and statistics as values. .. seealso:: :class:`ChromosomeCov`. """ stats = {} for chrom in self.chr_list: stats[chrom.chrom_name] = chrom.get_stats(output=output) return stats def hist(self, logx=True, logy=True, fignum=1, N=20, lw=2, **kwargs): for chrom in self.chr_list: chrom.plot_hist_coverage(logx=logx, logy=logy, fignum=fignum, N=N, histtype='step', hold=True, lw=lw, **kwargs) pylab.legend() def to_csv(self, output_filename, **kwargs): """ Write all data in a csv. :param str output_filename: csv output file name. :param **dict kwargs: parameters of :meth:`pandas.DataFrame.to_csv`. """ # Concatenate all df df_list = [chrom.get_df() for chrom in self.chr_list] df = pd.concat(df_list) header = ("# sequana_coverage thresholds:{0} window_size:{1} circular:" "{2}".format(self.thresholds.get_args(), self.window_size, self.circular)) if self.genbank_filename: header += ' genbank:' + self.genbank_filename if self.gc_window_size: header += ' gc_window_size:{0}'.format(self.gc_window_size) with open(output_filename, "w") as fp: print(header, file=fp) for chrom in self.chr_list: print("# {0}".format(chrom.get_gaussians()), file=fp) df.to_csv(fp, **kwargs) class ChromosomeCov(object): """Factory to manipulate coverage and extract region of interests. Example: .. plot:: :include-source: from sequana import GenomeCov, sequana_data filename = sequana_data("virus.bed") gencov = GenomeCov(filename) chrcov = gencov[0] chrcov.running_median(n=3001) chrcov.compute_zscore() chrcov.plot_coverage() df = chrcov.get_roi().get_high_roi() The *df* variable contains a dataframe with high region of interests (over covered) .. seealso:: sequana_coverage standalone application """ def __init__(self, df, genomecov, thresholds=None): """.. rubric:: constructor :param df: dataframe with position for a chromosome used within :class:`GenomeCov`. Must contain the following columns: ["chr", "pos", "cov"] :param thresholds: a data structure :class:`DoubleThresholds` that holds the double threshold values. """ self._bed = genomecov self.df = df.set_index("pos", drop=False) self.chrom_name = str(df["chr"].iloc[0]) try: self.thresholds = thresholds.copy() except: self.thresholds = DoubleThresholds() def __str__(self): stats = self.get_stats(output="dataframe") stats.set_index("name", inplace=True) def _getter(data, key): return data.ix[key].Value txt = "\nGenome length: %s" % int(len(self.df)) txt += "\nSequencing depth (DOC): %8.2f " % _getter(stats,'DOC') txt += "\nSequencing depth (median): %8.2f " % _getter(stats, 'Median') txt += "\nBreadth of coverage (BOC) (percent): %.2f " % _getter( stats, 'BOC') txt += "\nGenome coverage standard deviation : %8.2f " % _getter( stats,'STD') txt += "\nGenome coverage coefficient variation : %8.2f " % _getter( stats,'CV') return txt def __len__(self): return self.df.__len__() @property def bed(self): return self._bed @bed.setter def bed(self): logger.error("AttributeError: You can't set the ChromosomeCov.bed. " "Setting is done automatically when the class is " "created.") def columns(self): """ Return immutable ndarray implementing an ordered, sliceable set. """ return self.df.columns def get_df(self): return self.df.set_index("chr", drop=True) def get_size(self): return self.__len__() def get_mean_cov(self): return self.df["cov"].mean() def get_var_coef(self): return np.sqrt(self.df["cov"].var()) / self.get_mean_cov() def get_gaussians(self): return "{0}: {1}".format(self.chrom_name, self.gaussians_params) def set_gaussians(self, gaussians): """ Set gaussians predicted if you read a csv file generated by :class:`GenomeCov`. """ self.gaussians_params = gaussians self.best_gaussian = self._get_best_gaussian() def moving_average(self, n, circular=False): """Compute moving average of the genome coverage :param n: window's size. Must be odd :param bool circular: is the chromosome circular or not Store the results in the :attr:`df` attribute (dataframe) with a column named *ma*. """ N = len(self.df['cov']) assert n < N/2 from sequana.stats import moving_average ret = np.cumsum(np.array(self.df["cov"]), dtype=float) ret[n:] = ret[n:] - ret[:-n] ma = ret[n - 1:] / n mid = int(n / 2) self.df["ma"] = pd.Series(ma, index=np.arange(start=mid, stop=(len(ma) + mid))) if circular: # FIXME: shift of +-1 as compared to non circular case... # shift the data and compute the moving average self.data = list(self.df['cov'].values[N-n:]) +\ list(self.df['cov'].values) + \ list(self.df['cov'].values[0:n]) ma = moving_average(self.data, n) self.ma = ma[n//2+1:-n//2] self.df["ma"] = pd.Series(self.ma, index=self.df['cov'].index) def running_median(self, n, circular=False): """Compute running median of genome coverage :param int n: window's size. :param bool circular: if a mapping is circular (e.g. bacteria whole genome sequencing), set to True Store the results in the :attr:`df` attribute (dataframe) with a column named *rm*. .. versionchanged:: 0.1.21 Use Pandas rolling function to speed up computation. """ self.bed.window_size = n self.bed.circular = circular # in py2/py3 the division (integer or not) has no impact mid = int(n / 2) self.range = [None, None] try: if circular: # BASED on running_median pure implementation, could be much # slower than pure pandas rolling function. Keep those 4 lines # for book keeping though. #cover = list(self.df["cov"]) #cover = cover[-mid:] + cover + cover[:mid] #rm = running_median.RunningMedian(cover, n).run() #self.df["rm"] = rm[mid:-mid] rm = pd.concat([self.df['cov'][-mid:], self.df['cov'], self.df['cov'][:mid]]).rolling( n, center=True).median() self.df["rm"] = rm[mid:-mid] else: rm = self.df['cov'].rolling(n, center=True).median() # Like in RunningMedian, we copy the NAN with real data rm[0:mid] = self.df['cov'][0:mid] rm[-mid:] = self.df['cov'][-mid:] #rm = running_median.RunningMedian(cover, n).run() self.df["rm"] = rm # set up slice for gaussian prediction self.range = [mid, -mid] except: self.df["rm"] = self.df["cov"] def get_evenness(self): """Return Evenness of the coverage :Reference: Konrad Oexle, Journal of Human Genetics 2016, Evaulation of the evenness score in NGS. work before or after normalisation but lead to different results. """ from sequana.stats import evenness return evenness(self.df['cov']) def get_cv(self): """Return the coefficient variation The coefficient of variation (CV) is defined as sigma / mu To get percentage, you must multiply by 100. """ sigma = self.df['cov'].std() mu = self.df['cov'].mean() return sigma/mu def _coverage_scaling(self): """Normalize data with moving average of coverage Store the results in the :attr:`df` attribute (dataframe) with a column named *scale*. .. note:: Needs to call :meth:`running_median` """ if "rm" not in self.df.columns: txt = "Column rm (running median) is missing.\n" + self.__doc__ print(txt) raise KeyError else: self.df["scale"] = self.df["cov"] / self.df["rm"] self.df = self.df.replace(np.inf, np.nan) self.df = self.df.replace(-np.inf, np.nan) def _get_best_gaussian(self): results_pis = [model["pi"] for model in self.gaussians_params] indice = np.argmax(results_pis) return self.gaussians_params[indice] def compute_zscore(self, k=2, step=10, use_em=True, verbose=True): """ Compute zscore of coverage and normalized coverage. :param int k: Number gaussian predicted in mixture (default = 2) :param int step: (default = 10). This parameter is used to speed up computation and is ignored if the length of the coverage/sequence is below 100,000 Store the results in the :attr:`df` attribute (dataframe) with a column named *zscore*. .. note:: needs to call :meth:`running_median` before hand. """ # here for lazy import from biokit.stats import mixture # normalize coverage self._coverage_scaling() data = self.df['scale'][self.range[0]:self.range[1]] if len(data) < 100000: step = 1 # remove nan and inf values data = data.replace(0, np.nan) data = data.dropna() if data.empty: data = np.full(len(self.df), 1, dtype=int) self.df['scale'] = data if use_em: self.mixture_fitting = mixture.EM( data[::step]) self.mixture_fitting.estimate(k=k) else: self.mixture_fitting = mixture.GaussianMixtureFitting( data[::step],k=k) self.mixture_fitting.estimate() # keep gaussians informations self.gaussians = self.mixture_fitting.results params_key = ("mus", "sigmas", "pis") self.gaussians_params = [{key[:-1]: self.gaussians[key][i] for key in params_key} for i in range(k)] self.best_gaussian = self._get_best_gaussian() # warning when sigma is equal to 0 if self.best_gaussian["sigma"] == 0: logger.warning("A problem related to gaussian prediction is " "detected. Be careful, Sigma is equal to 0.") self.df["zscore"] = np.zeros(len(self.df), dtype=int) else: self.df["zscore"] = (self.df["scale"] - self.best_gaussian["mu"]) / \ self.best_gaussian["sigma"] # Naive checking that the if k == 2: mus = self.gaussians['mus'] sigmas = self.gaussians["sigmas"] index0 = mus.index(self.best_gaussian["mu"]) if index0 == 0: mu1 = mus[1] s0 = sigmas[0] mu0 = mus[0] else: mu1 = mus[0] s0 = sigmas[1] mu0 = mus[1] if abs(mu0-mu1) < s0: logger.warning(("Warning: k=2 but note that |mu0-mu1| < sigma0. " "k=1 could be a better choice")) def get_centralness(self): """Proportion of central (normal) genome coverage This is 1 - (number of non normal data) / (total length) .. note:: depends on the thresholds attribute being used. .. note:: depends slightly on :math:`W` the running median window """ filtered = self.get_roi() Cplus = sum(filtered.get_high_roi()['size']) Cminus = sum(filtered.get_low_roi()['size']) return 1 - (Cplus+Cminus) / float(len(self)) def get_roi(self): """Keep positions with zscore outside of the thresholds range. :return: a dataframe from :class:`FilteredGenomeCov` .. note:: depends on the :attr:`thresholds` low and high values. """ features = self.bed.feature_dict try: second_high = self.thresholds.high2 second_low = self.thresholds.low2 query = "zscore > @second_high or zscore < @second_low" # in the genbank, the names appears as e.g. JB12345 # but in the fasta or BED files, it may be something like # gi|269939526|emb|FN433596.1| # so they do not match. We can try to guess it alternative = None if features: if self.chrom_name not in features.keys(): msg = """Chromosome name (%s) not found in the genbank. Make sure the chromosome names in the BAM/BED files are compatible with the genbank content. Genbank files contains the following keys """ for this in features.keys(): msg += "\n - %s" % this alternative = [x for x in self.chrom_name.split("|") if x] alternative = alternative[-1] # assume the accession is last alternative = alternative.split('.')[0] # remove version if alternative in features.keys(): msg += "\n Guessed the chromosome name to be: %s" % alternative else: features = None logger.warning(msg % self.chrom_name) if features: if alternative: return FilteredGenomeCov(self.df.query(query), self.thresholds, features[alternative]) else: return FilteredGenomeCov(self.df.query(query), self.thresholds, features[self.chrom_name]) else: return FilteredGenomeCov(self.df.query(query), self.thresholds) except KeyError: logger.error("Column zscore is missing in data frame.\n" "You must run compute_zscore before get low coverage." "\n\n", self.__doc__) sys.exit(1) def plot_coverage(self, filename=None, fontsize=16, rm_lw=1, rm_color="#0099cc", rm_label="Running median", th_lw=1, th_color="r", th_ls="--", main_color="k", main_lw=1, main_kwargs={}, sample=True, set_ylimits=True): """ Plot coverage as a function of base position. :param filename: :param rm_lw: line width of the running median :param rm_color: line color of the running median :param rm_color: label for the running median :param th_lw: line width of the thresholds :param th_color: line color of the thresholds :param main_color: line color of the coverage :param main_lw: line width of the coverage :param sample: if there are more than 1 000 000 points, we use an integer step to skip data points. We can still plot all points at your own risk by setting this option to False :param set_ylimits: we want to focus on the "normal" coverage ignoring unsual excess. To do so, we set the yaxis range between 0 and a maximum value. This maximum value is set to the minimum between the 6 times the mean coverage and 1.5 the maximum of the high coverage threshold curve. If you want to let the ylimits free, set this argument to False .. note:: if there are more than 1,000,000 points, we show only 1,000,000 by points. For instance for 5,000,000 points, In addition to the coverage, the running median and coverage confidence corresponding to the lower and upper zscore thresholds are shown. .. note:: uses the thresholds attribute. """ # z = (X/rm - \mu ) / sigma high_zcov = (self.thresholds.high * self.best_gaussian["sigma"] + self.best_gaussian["mu"]) * self.df["rm"] low_zcov = (self.thresholds.low * self.best_gaussian["sigma"] + self.best_gaussian["mu"]) * self.df["rm"] pylab.clf() ax = pylab.gca() ax.set_facecolor('#eeeeee') pylab.xlim(0,self.df["pos"].iloc[-1]) axes = [] labels = [] # 1,000,000 points is a lot for matplotlib. Let us restrict ourself to 1 # million points for now. if len(self.df) > 1000000 and sample is True: NN = int(len(self.df)/1000000) else: NN = 1 # the main coverage plot p1, = pylab.plot(self.df["cov"][::NN], color=main_color, label="Coverage", linewidth=main_lw, **main_kwargs) axes.append(p1) labels.append("Coverage") # The running median plot if rm_lw > 0: p2, = pylab.plot(self.df["rm"][::NN], color=rm_color, linewidth=rm_lw, label=rm_label) axes.append(p2) labels.append(rm_label) # The threshold curves if th_lw > 0: p3, = pylab.plot(high_zcov[::NN], linewidth=th_lw, color=th_color, ls=th_ls, label="Thresholds") p4, = pylab.plot(low_zcov[::NN], linewidth=th_lw, color=th_color, ls=th_ls, label="_nolegend_") axes.append(p3) labels.append("Thresholds") pylab.legend(axes, labels, loc="best") pylab.xlabel("Position", fontsize=fontsize) pylab.ylabel("Per-base coverage", fontsize=fontsize) pylab.grid(True) # sometimes there are large coverage value that squeeze the plot. # Let us restrict it if set_ylimits is True: pylab.ylim([0, min([ high_zcov.max() * 1.5, self.df["cov"].mean()*6])]) else: pylab.ylim([0, pylab.ylim()[1]]) try: pylab.tight_layout() except: pass if filename: pylab.savefig(filename) def _set_bins(self, df, binwidth): try: bins = np.arange(min(df), max(df) + binwidth, binwidth) except ValueError: return 100 if bins.any(): return bins return 100 def plot_hist_zscore(self, fontsize=16, filename=None, max_z=6, binwidth=0.5, **hist_kargs): """ Barplot of the zscore values """ pylab.clf() bins = self._set_bins(self.df["zscore"][self.range[0]:self.range[1]], binwidth) self.df["zscore"][self.range[0]:self.range[1]].hist( grid=True, bins=bins, **hist_kargs) pylab.xlabel("Z-Score", fontsize=fontsize) try: pylab.tight_layout() except: pass if filename: pylab.savefig(filename) def plot_hist_normalized_coverage(self, filename=None, binwidth=0.1, max_z=4): """ Barplot of the normalized coverage with gaussian fitting """ pylab.clf() # if there are a NaN -> can't set up binning d = self.df["scale"][self.range[0]:self.range[1]].dropna() # remove outlier -> plot crash if range between min and max is too high d = d[np.abs(d - d.mean()) <= (4 * d.std())] bins = self._set_bins(d, binwidth) self.mixture_fitting.data = d try: self.mixture_fitting.plot(self.gaussians_params, bins=bins, Xmin=0, Xmax=max_z) except ZeroDivisionError: pass pylab.grid(True) pylab.xlim([0,max_z]) pylab.xlabel("Normalised per-base coverage") try: pylab.tight_layout() except: pass if filename: pylab.savefig(filename) def plot_hist_coverage(self, logx=True, logy=True, fontsize=16, N=20, fignum=1, hold=False, alpha=0.5, filename=None, **kw_hist): """ """ if hold is False: pylab.figure(fignum) pylab.clf() ax = pylab.gca() ax.set_facecolor('#eeeeee') data = self.df['cov'].dropna().values maxcov = data.max() if logx is True and logy is True: bins = pylab.logspace(0, pylab.log10(maxcov), N) pylab.hist(data, bins=bins, log=True, label=self.chrom_name, alpha=alpha, **kw_hist) pylab.semilogx() pylab.xlabel("Coverage (log scale)", fontsize=fontsize) pylab.ylabel("Count (log scale)", fontsize=fontsize) elif logx is False and logy is True: pylab.hist(data, bins=N, log=True, label=self.chrom_name, alpha=alpha, **kw_hist) pylab.xlabel("Coverage", fontsize=fontsize) pylab.ylabel("Count (log scale)", fontsize=fontsize) elif logx is True and logy is False: bins = pylab.logspace(0, pylab.log10(maxcov), N) pylab.hist(data, bins=N, label=self.chrom_name, alpha=alpha, **kw_hist) pylab.xlabel("Coverage (log scale)", fontsize=fontsize) pylab.ylabel("Count", fontsize=fontsize) pylab.semilogx() else: pylab.hist(data, bins=N, label=self.chrom_name, alpha=alpha, **kw_hist) pylab.xlabel("Coverage", fontsize=fontsize) pylab.ylabel("Count", fontsize=fontsize) pylab.grid(True) if filename: pylab.savefig(filename) def to_csv(self, filename=None, start=None, stop=None, **kwargs): """ Write CSV file of the dataframe. :param str filename: csv output filename. If None, return string. :param int start: start row index. :param int stop: stop row index. Params of :meth:`pandas.DataFrame.to_csv`: :param list columns: columns you want to write. :param bool header: determine if the header is written. :param bool index: determine if the index is written. :param str float_format: determine the float format. """ # Create directory to avoid errno 2 if filename: directory = os.path.dirname(os.path.realpath(filename)) try: os.makedirs(directory) except FileExistsError: if os.path.isdir(directory): pass else: msg = "{0} exist and it is not a directory".format( directory) logger.error(msg) raise FileExistsError return self.df[start:stop].to_csv(filename, **kwargs) def plot_gc_vs_coverage(self, filename=None, bins=None, Nlevels=6, fontsize=20, norm="log", ymin=0, ymax=100, contour=True, **kwargs): if Nlevels is None or Nlevels==0: contour = False data = self.df[['cov','gc']].copy() data['gc'] *= 100 data = data.dropna() if bins is None: bins = [100, min(int(data['gc'].max()-data['gc'].min()+1), max(5,self.bed.gc_window_size - 4))] bins[0] = max(10, min(bins[0], self.df['cov'].max())) from biokit import Hist2D h2 = Hist2D(data) try: h2.plot(bins=bins, xlabel="Per-base coverage", ylabel=r'GC content (%)', Nlevels=Nlevels, contour=contour, norm=norm, fontsize=fontsize, **kwargs) except: h2.plot(bins=bins, xlabel="Per-base coverage", ylabel=r'GC content (%)' , Nlevels=Nlevels, contour=False, norm=norm, fontsize=fontsize, **kwargs) pylab.ylim([ymin, ymax]) try: pylab.tight_layout() except: pass if filename: pylab.savefig(filename) def get_gc_correlation(self): """Return the correlation between the coverage and GC content The GC content is the one computed in :meth:`GenomeCov.compute_gc_content` (default window size is 101) """ return self.df[['cov', 'gc']].corr().iloc[0, 1] def get_max_gc_correlation(self, reference, guess=100): """Plot correlation between coverage and GC content by varying the GC window The GC content uses a moving window of size W. This parameter affects the correlation bewteen coverage and GC. This function find the *optimal* window length. """ pylab.clf() corrs = [] wss = [] def func(params): ws = int(round(params[0])) if ws < 10: return 0 self.bed.compute_gc_content(reference, ws) corr = self.get_gc_correlation() corrs.append(corr) wss.append(ws) return corr from scipy.optimize import fmin res = fmin(func, guess, xtol=1, disp=False) # guess is 200 pylab.plot(wss, corrs, "o") pylab.xlabel("GC window size") pylab.ylabel("Correlation") pylab.grid() return res[0] def get_stats(self, output="json"): """Return basic stats about the coverage data""" data = self.df stats = { 'DOC': self.df['cov'].mean(), 'STD': self.df['cov'].std(), 'Median': self.df['cov'].median(), 'BOC': 100 * sum(self.df['cov'] > 0) / float(len(self.df))} try: stats['CV'] = stats['STD'] / stats['DOC'] except: stats['CV'] = np.nan stats['MAD'] = np.median(abs(data['cov'].median() - data['cov']).dropna()) names = ['BOC', 'CV', 'DOC', 'MAD', 'Median', 'STD'] descriptions = [ "breadth of coverage: the proportion (in %s) of the " "genome covered by at least one read.", "the coefficient of variation.", "the sequencing depth (Depth of Coverage), that is the average of " "the genome coverage.", "median of the absolute median deviation defined as median(|X-median(X)|).", "Median of the coverage.", "standard deviation." ] if 'gc' in self.df.columns: stats['GC'] = self.df['gc'].mean() * 100 names.append('GC') descriptions.append("GC content in %") df = pd.DataFrame({ "name": names, "Value": [stats[x] for x in names], "Description": descriptions}) if output == "json": return df.to_json() else: return df class FilteredGenomeCov(object): """Class used within :class:`ChromosomeCov` to select a subset of the original GenomeCov :target: developers only """ _feature_not_wanted = {"gene", "regulatory", "source"} def __init__(self, df, threshold, feature_list=None): """ .. rubric:: constructor :param df: dataframe with filtered position used within :class:`GenomeCov`. Must contain the following columns: ["pos", "cov", "rm", "zscore"] :param int threshold: a :class:`~sequana.bedtools.DoubleThresholds` instance. """ if isinstance(feature_list, list) and len(feature_list) == 0: feature_list = None region_list = self._merge_region(df, threshold=threshold) if feature_list: region_list = self._add_annotation(region_list, feature_list) self.df = self._dict_to_df(region_list, feature_list) def func(x): try: return x.split(".")[0] except: return x for column in ['gene_end', 'gene_start']: if column in self.df.columns: self.df[column] = self.df[column].astype(str) self.df[column] = self.df[column].apply(func) def __str__(self): return self.df.__str__() def __len__(self): return self.df.__len__() def _merge_row(self, df, start, stop): chrom = df["chr"][start] cov = np.mean(df["cov"].loc[start:stop]) max_cov = np.max(df["cov"].loc[start:stop]) rm = np.mean(df["rm"].loc[start:stop]) zscore = np.mean(df["zscore"].loc[start:stop]) if zscore >= 0: max_zscore = df["zscore"].loc[start:stop].max() else: max_zscore = df["zscore"].loc[start:stop].min() size = stop - start + 1 return {"chr": chrom, "start": start, "end": stop + 1, "size": size, "mean_cov": cov, "mean_rm": rm, "mean_zscore": zscore, "max_zscore": max_zscore, "max_cov": max_cov} def _merge_region(self, df, threshold, zscore_label="zscore"): """Merge position side by side of a data frame. Uses a double threshold method. :param threshold: the high threshold (standard one), not the low one. .. todo:: to be documented """ region_start = None region_stop = None start = 1 stop = 1 prev = 1 # handle case where for example position n-1 have a zscore of -5 and n # have a zscore of 5. It is two different regions. region_zscore = 0 merge_df = [] for pos, zscore in zip(df["pos"], df[zscore_label]): stop = pos if stop - 1 == prev and zscore * region_zscore >= 0: prev = stop else: if region_start: merge_df.append(self._merge_row(df, region_start, region_stop)) region_start = None start = stop prev = stop region_zscore = zscore if zscore > 0 and zscore > threshold.high: if not region_start: region_start = pos region_stop = pos else: region_stop = pos elif zscore < 0 and zscore < threshold.low: if not region_start: region_start = pos region_stop = pos else: region_stop = pos if start < stop and region_start: merge_df.append(self._merge_row(df, region_start, region_stop)) return merge_df def _add_annotation(self, region_list, feature_list): """ Add annotation from a dictionary generated by parsers in sequana.tools. """ region_ann = [] # an iterator of features iter_feature = iter(feature_list) feature = next(iter_feature) # pass "source" feature while feature["type"] in FilteredGenomeCov._feature_not_wanted: try: feature = next(iter_feature) except StopIteration: print("Features types ({0}) are not present in the annotation" " file. Please change what types you want".format( feature['type'])) return region_ann # merge regions and annotations for region in region_list: feature_exist = False while feature["gene_end"] <= region["start"]: try: feature = next(iter_feature) except: break while feature["gene_start"] < region["end"]: # A feature exist for detected ROI feature_exist = True # put locus_tag in gene field if gene doesn't exist try: feature["gene"] except KeyError: try: feature["gene"] = feature["locus_tag"] except: feature["gene"] = "None" # put note field in product if product doesn't exist try: feature["product"] except KeyError: try: feature["product"] = feature["note"] except: feature["product"] = "None" # FIXME what that ? #if region["start"] == 237433: # print(dict(region, **feature)) region_ann.append(dict(region, **feature)) try: feature = next(iter_feature) except StopIteration: break if feature_exist is False: region_ann.append(dict(region, **{"gene_start": None, "gene_end": None, "type": None, "gene": None, "strand": None, "product": None})) return region_ann def _dict_to_df(self, region_list, annotation): """ Convert dictionary as dataframe. """ merge_df = pd.DataFrame(region_list) colnames = ["chr", "start", "end", "size", "mean_cov", "max_cov", "mean_rm", "mean_zscore", "max_zscore", "gene_start", "gene_end", "type", "gene", "strand", "product"] if not annotation: colnames = colnames[:9] merge_df = pd.DataFrame(region_list, columns=colnames) int_column = ["start", "end", "size"] merge_df[int_column] = merge_df[int_column].astype(int) if annotation: merge_df.rename(columns={"gene": "gene_name"}, inplace=True) # maybe let the user set what he wants return merge_df.loc[~merge_df["type"].isin( FilteredGenomeCov._feature_not_wanted)] return merge_df def _get_sub_range(self, seq_range): try: return self.df[(self.df["end"] > seq_range[0]) & (self.df["start"] < seq_range[1])] except TypeError: return self.df def get_low_roi(self, seq_range=None): df = self._get_sub_range(seq_range) return df.loc[df["max_zscore"] < 0] def get_high_roi(self, seq_range=None): df = self._get_sub_range(seq_range) return df.loc[df["max_zscore"] >= 0]
36.761662
97
0.5606
6,124
50,437
4.497714
0.129327
0.019605
0.009803
0.005228
0.225094
0.1702
0.15143
0.126525
0.117303
0.091853
0
0.013141
0.333128
50,437
1,371
98
36.788476
0.805768
0.24948
0
0.25
0
0
0.105558
0.003184
0
0
0
0.002188
0.008413
1
0.097356
false
0.007212
0.020433
0.024038
0.210337
0.00601
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77f172359238ee4f9aa64dd9f0ca2c3d158a0bf1
1,989
py
Python
tests/feed/test_refresh_user_feeds.py
sslavov93/rss_scraper
4621e01b295de341d0921cade026b406f2b389e3
[ "MIT" ]
1
2020-09-17T12:40:34.000Z
2020-09-17T12:40:34.000Z
tests/feed/test_refresh_user_feeds.py
sslavov93/rss_scraper
4621e01b295de341d0921cade026b406f2b389e3
[ "MIT" ]
null
null
null
tests/feed/test_refresh_user_feeds.py
sslavov93/rss_scraper
4621e01b295de341d0921cade026b406f2b389e3
[ "MIT" ]
null
null
null
from unittest.mock import patch from feed.models import FeedItem from tests import BaseTestFixture, basic_auth_headers class TestRefreshUserFeeds(BaseTestFixture): def test_refresh_single_feed_when_not_authenticated(self): response = self.client.post('/api/my-feeds/5/update') self.assertEqual(401, response.status_code) def test_refresh_all_user_feeds_when_not_authenticated(self): response = self.client.post('/api/my-feeds/update') self.assertEqual(401, response.status_code) def test_refresh_single_feed_when_feed_not_exist(self): response = self.client.post( '/api/my-feeds/5/update', headers=basic_auth_headers("user", "pass") ) self.assertEqual(404, response.status_code) @patch("feed.routes.Scraper") @patch("feed.routes.scrape_single") def test_refresh_single_feed_no_error(self, scrape_single_task, scraper): scraper.parse.return_value = [FeedItem(id=5), FeedItem(id=6)] scrape_single_task.return_value = {} response = self.client.post( '/api/my-feeds/1/update', headers=basic_auth_headers("user", "pass") ) self.assertEqual(200, response.status_code) self.assertTrue(scraper.persist.called_with([FeedItem(id=5), FeedItem(id=6)])) self.assertTrue(scrape_single_task.delay.called) @patch("feed.routes.Scraper") @patch("feed.routes.scrape_single") def test_refresh_all_user_feeds_no_error(self, scrape_single_task, scraper): scraper.parse.return_value = [FeedItem(id=5), FeedItem(id=6)] scrape_single_task.return_value = {} response = self.client.post( '/api/my-feeds/update', headers=basic_auth_headers("user", "pass") ) self.assertEqual(200, response.status_code) self.assertTrue(scraper.persist.called_with([FeedItem(id=5), FeedItem(id=6)])) self.assertTrue(scrape_single_task.delay.called)
38.25
86
0.691302
251
1,989
5.223108
0.231076
0.073227
0.073227
0.083905
0.866514
0.858886
0.811594
0.811594
0.811594
0.776506
0
0.016129
0.189542
1,989
51
87
39
0.797146
0
0
0.55
0
0
0.109603
0.058321
0
0
0
0
0.225
1
0.125
false
0.075
0.075
0
0.225
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
77f23de07c25946522c768f1ef7e8ffca7391f96
2,272
py
Python
qsrlib/src/qsrlib_qsrs/qsr_tpcc.py
alexiatoumpa/QSR_Detector
ff92a128dddb613690a49a7b4130afeac0dd4381
[ "MIT" ]
15
2015-06-15T16:50:37.000Z
2022-03-27T09:25:56.000Z
qsrlib/src/qsrlib_qsrs/qsr_tpcc.py
alexiatoumpa/QSR_Detector
ff92a128dddb613690a49a7b4130afeac0dd4381
[ "MIT" ]
205
2015-01-22T12:02:59.000Z
2022-03-29T11:59:55.000Z
qsrlib/src/qsrlib_qsrs/qsr_tpcc.py
alexiatoumpa/QSR_Detector
ff92a128dddb613690a49a7b4130afeac0dd4381
[ "MIT" ]
16
2015-02-04T23:13:18.000Z
2022-03-08T13:45:53.000Z
# -*- coding: utf-8 -*- from __future__ import print_function, division from qsrlib_qsrs.qsr_triadic_abstractclass import QSR_Triadic_1t_Abstractclass import math class QSR_TPCC(QSR_Triadic_1t_Abstractclass): """TPCC QSRs. .. seealso:: For further details about TPCC, refer to its :doc:`description. <../handwritten/qsrs/tpcc>` """ _unique_id = "tpcc" _all_possible_relations = ('dlf', 'dfl', 'dsl', 'dbl', 'dlb', 'dsb', 'drb', 'dbr', 'dsr', 'dfr', 'drf', 'dsf', 'clf', 'cfl', 'csl', 'cbl', 'clb', 'csb', 'crb', 'cbr', 'csr', 'cfr', 'crf', 'csf', 'sam') _dtype = "points" __partition_names = ['lb','bl','fl','lf','rf','fr','br','rb'] __partition_size = 2 * math.pi / len(__partition_names) def __init__(self): """Constructor.""" super(QSR_TPCC, self).__init__() def _compute_qsr(self, origin, relatum, objct, qsr_params, **kwargs): base_distance = math.sqrt((origin.x-relatum.x)**2 + (origin.y-relatum.y)**2) object_distance = math.sqrt((objct.x-relatum.x)**2 + (objct.y-relatum.y)**2) if object_distance == 0: return "sam" relation = "d" if object_distance > base_distance else "c" # is it far or close: first letter angle = self._relative_angle(origin, relatum, objct) partition = int(angle / self.__partition_size) relation += self.__partition_names[partition] sin_angle = math.fabs(math.sin(angle)) if sin_angle < 0.00001 or sin_angle > 0.99999: relation = relation[0]+'s'+relation[2] return relation @staticmethod def _relative_angle(a, b, c): """Compute relative angle used to select the (left/right/straight/front/back/straight) relationship""" angle_BA = math.atan2((b.y - a.y),(b.x - a.x)) if angle_BA < 0: angle_BA += 2 * math.pi angle_CB = math.atan2((c.y - b.y), (c.x - b.x)) if angle_CB < 0: angle_CB += 2 * math.pi angle_rel = angle_CB - angle_BA if angle_rel < 0: angle_rel += 2 * math.pi return angle_rel
37.245902
108
0.559859
289
2,272
4.152249
0.449827
0.016667
0.023333
0.041667
0
0
0
0
0
0
0
0.019279
0.292254
2,272
60
109
37.866667
0.72699
0.12456
0
0
0
0
0.054731
0
0
0
0
0
0
1
0.076923
false
0
0.076923
0
0.384615
0.025641
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77f3d071cf3be67ef3b31b8f16f8ea3ddb7f75dc
9,434
py
Python
baobab/configs/tdlmc_diagonal_config.py
aymgal/baobab
960ddbd55fc4391f2b857f2232af38c45c809ae8
[ "MIT" ]
null
null
null
baobab/configs/tdlmc_diagonal_config.py
aymgal/baobab
960ddbd55fc4391f2b857f2232af38c45c809ae8
[ "MIT" ]
null
null
null
baobab/configs/tdlmc_diagonal_config.py
aymgal/baobab
960ddbd55fc4391f2b857f2232af38c45c809ae8
[ "MIT" ]
null
null
null
import numpy as np from addict import Dict cfg = Dict() cfg.name = 'tdlmc' cfg.seed = 1113 # random seed cfg.bnn_prior_class = 'DiagonalBNNPrior' cfg.n_data = 200 # number of images to generate cfg.train_vs_val = 'train' cfg.components = ['lens_mass', 'external_shear', 'src_light', 'lens_light', 'agn_light'] cfg.checkpoint_interval = 2 cfg.selection = dict( magnification=dict( min=2.0 ), initial=["lambda x: x['lens_mass']['theta_E'] > 0.5",] ) cfg.instrument = dict( pixel_scale=0.08, # scale (in arcseonds) of pixels ccd_gain=4.5, # electrons/ADU (analog-to-digital unit). A gain of 8 means that the camera digitizes the CCD signal so that each ADU corresponds to 8 photoelectrons. ) cfg.bandpass = dict( magnitude_zero_point=25.9463, # (effectively, the throuput) magnitude in which 1 count per second per arcsecond square is registered (in ADUs) ) cfg.observation = dict( exposure_time=100.0, # exposure time per image (in seconds) ) cfg.psf = dict( type='PIXEL', # string, type of PSF ('GAUSSIAN' and 'PIXEL' supported) kernel_size=91, # dimension of provided PSF kernel, only valid when profile='PIXEL' which_psf_maps=None, # None if rotate among all available PSF maps, else seed number of the map to generate all images with that map ) cfg.numerics = dict( supersampling_factor=1) cfg.image = dict( num_pix=100, # cutout pixel size inverse=False, # if True, coord sys is ra to the left, if False, to the right ) cfg.bnn_omega = dict( lens_mass = dict( profile='SPEMD', # only available type now # Normal(mu, sigma^2) center_x = dict( dist='normal', # one of ['normal', 'beta'] mu=0.0, sigma=1.e-6, ), center_y = dict( dist='normal', mu=0.0, sigma=1.e-6, ), # Lognormal(mu, sigma^2) gamma = dict( dist='normal', mu=1.935, sigma=0.001, ), theta_E = dict( dist='normal', mu=1.082, sigma=0.001, ), # Beta(a, b) q = dict( dist='normal', mu=0.869, sigma=0.001, ), phi = dict( dist='normal', mu= 0.708, sigma=0.001, ), ), external_shear = dict( profile='SHEAR_GAMMA_PSI', gamma_ext = dict( dist='normal', mu=0.008, # See overleaf doc sigma=0.001, ), psi_ext = dict( dist='normal', mu=0.7853, sigma=0.001, lower=0, upper=np.pi, ) ), lens_light = dict( profile='SERSIC_ELLIPSE', # only available type now # Centered at lens mass # Lognormal(mu, sigma^2) magnitude = dict( dist='normal', mu=17.325, sigma=0.001, ), n_sersic = dict( dist='normal', mu=2.683, sigma=0.001, ), R_sersic = dict( dist='normal', mu=0.949, sigma=0.001, ), # Beta(a, b) q = dict( dist='normal', mu= 0.5, sigma=0.5, lower=0.0, ), phi = dict( dist='normal', mu= 0.658, sigma=0.001, ), ), src_light = dict( profile='SERSIC_ELLIPSE', # only available type now # Lognormal(mu, sigma^2) magnitude = dict( dist='normal', mu=20.407, sigma=0.001, ), n_sersic = dict( dist='lognormal', mu=0.7, sigma=0.4, ), R_sersic = dict( dist='normal', mu=0.4, sigma=0.01, ), # Normal(mu, sigma^2) center_x = dict( dist='normal', mu=0.035, sigma=0.001, ), center_y = dict( dist='normal', mu=-0.025, sigma=0.001, ), q = dict( dist='normal', mu=0.869, sigma=0.001, ), phi = dict( dist='normal', mu= 0.708, sigma=0.001, ), ), agn_light = dict( profile='LENSED_POSITION', # contains one of 'LENSED_POSITION' or 'SOURCE_POSITION' # Centered at host # Pre-magnification, image-plane amplitudes if 'LENSED_POSITION' # Lognormal(mu, sigma^2) magnitude = dict( dist='normal', mu=21, sigma=1, lower=0.0, ), ), )
49.39267
178
0.246555
571
9,434
3.989492
0.345009
0.073749
0.122915
0.13345
0.32309
0.303336
0.276997
0.204565
0.204565
0.075505
0
0.067012
0.693131
9,434
191
179
49.39267
0.719862
0.11713
0
0.552147
1
0
0.037947
0.003012
0
0
0
0
0
1
0
false
0.006135
0.01227
0
0.01227
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
77f68b4f89363b8438a93d741379bb05a3a09d63
1,053
py
Python
arrandmatrix/q17.py
pengfei-chen/algorithm_qa
c2ccdcb77004e88279d61e4e433ee49527fc34d6
[ "MIT" ]
79
2018-03-27T12:37:49.000Z
2022-01-21T10:18:17.000Z
arrandmatrix/q17.py
pengfei-chen/algorithm_qa
c2ccdcb77004e88279d61e4e433ee49527fc34d6
[ "MIT" ]
null
null
null
arrandmatrix/q17.py
pengfei-chen/algorithm_qa
c2ccdcb77004e88279d61e4e433ee49527fc34d6
[ "MIT" ]
27
2018-04-08T03:07:06.000Z
2021-10-30T00:01:50.000Z
""" 问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和. 例如,矩阵matrix为 -90 48 78 64 -40 64 -81 -7 66 其中,最大累加和的子矩阵为: 48 78 -40 64 -7 66 所以返回累加和209. 例如,matrix为: -1 -1 -1 -1 2 2 -1 -1 -1 其中,最大累加和的子矩阵为: 2 2 所以返回累加和为4. """ import sys from arrandmatrix.q16 import MaxSum class MaxMatrixSum: @classmethod def get_max_sum(cls, matrix): if not matrix: return 0 max_value = -sys.maxsize for i in range(len(matrix)): j = i pre_arr = [0 for _ in range(len(matrix[0]))] while j < len(matrix): arr = cls.arr_add(matrix[j], pre_arr) max_value = max([MaxSum.get_max_sum(arr), max_value]) j += 1 pre_arr = arr return max_value @classmethod def arr_add(cls, arr1, arr2): return [arr1[i]+arr2[i] for i in range(len(arr1))] if __name__ == '__main__': my_matrix = [ [-90, 48, 78], [64, -40, 64], [-81, -7, 66] ] print(MaxMatrixSum.get_max_sum(my_matrix))
18.155172
69
0.545109
152
1,053
3.605263
0.388158
0.018248
0.016423
0.029197
0.113139
0.062044
0.062044
0.062044
0.062044
0
0
0.102273
0.331434
1,053
58
70
18.155172
0.676136
0.205128
0
0.074074
0
0
0.009639
0
0
0
0
0
0
1
0.074074
false
0
0.074074
0.037037
0.296296
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77f69a445572611cb4e740401ece731d7139aa76
842
py
Python
registro/migrations/0001_initial.py
lokcito/salute
655a9407ca725785527f486b60162e406d140bfa
[ "bzip2-1.0.6" ]
null
null
null
registro/migrations/0001_initial.py
lokcito/salute
655a9407ca725785527f486b60162e406d140bfa
[ "bzip2-1.0.6" ]
null
null
null
registro/migrations/0001_initial.py
lokcito/salute
655a9407ca725785527f486b60162e406d140bfa
[ "bzip2-1.0.6" ]
null
null
null
# Generated by Django 2.2.6 on 2019-10-22 00:26 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Persona', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dato', models.CharField(max_length=100, verbose_name='Datos del R.N.')), ('fecha', models.DateTimeField(auto_now_add=True, verbose_name='Fecha y Hora Nacimiento')), ('sexo', models.CharField(max_length=20)), ('dni', models.CharField(max_length=20, verbose_name='DNI - Madre o Titular')), ('reg', models.BooleanField(default=False)), ], ), ]
32.384615
114
0.589074
93
842
5.215054
0.645161
0.090722
0.11134
0.148454
0.107216
0
0
0
0
0
0
0.036066
0.275534
842
25
115
33.68
0.759016
0.053444
0
0
1
0
0.110692
0
0
0
0
0
0
1
0
false
0
0.055556
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
77f7e72e1c1010465e3c08a2178c53ca02ebd59a
362
py
Python
Beginner/Easy Math/easy_math.py
agnisain123/CodeChef-1
c6316b179e4b055eb17ead9df8f93505d8fc1166
[ "Apache-2.0" ]
null
null
null
Beginner/Easy Math/easy_math.py
agnisain123/CodeChef-1
c6316b179e4b055eb17ead9df8f93505d8fc1166
[ "Apache-2.0" ]
null
null
null
Beginner/Easy Math/easy_math.py
agnisain123/CodeChef-1
c6316b179e4b055eb17ead9df8f93505d8fc1166
[ "Apache-2.0" ]
null
null
null
t=int(input()) for _ in range(t): n=int(input()) a=list(map(int, input().split())) max_sum=0 for j in range(n): for k in range(j+1, n): num=a[j]*a[k] add=0 while(num!=0): add+=num%10 num=num//10 if max_sum<add: max_sum=add print(max_sum)
22.625
37
0.428177
57
362
2.631579
0.403509
0.16
0.12
0
0
0
0
0
0
0
0
0.037915
0.417127
362
15
38
24.133333
0.672986
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77f8db530c6e11a50fc91ef02feb1663a2228e2e
25
py
Python
utokenize/testdata/func_varargs.py
MaxTurchin/pycopy-lib
d7a69fc2a28031e2ca475c29239f715c1809d8cc
[ "PSF-2.0" ]
126
2019-07-19T14:42:41.000Z
2022-03-21T22:22:19.000Z
utokenize/testdata/func_varargs.py
MaxTurchin/pycopy-lib
d7a69fc2a28031e2ca475c29239f715c1809d8cc
[ "PSF-2.0" ]
38
2019-08-28T01:46:31.000Z
2022-03-17T05:46:51.000Z
utokenize/testdata/func_varargs.py
MaxTurchin/pycopy-lib
d7a69fc2a28031e2ca475c29239f715c1809d8cc
[ "PSF-2.0" ]
55
2019-08-02T09:32:33.000Z
2021-12-22T11:25:51.000Z
def foo(a, *b): pass
8.333333
15
0.48
5
25
2.4
1
0
0
0
0
0
0
0
0
0
0
0
0.32
25
2
16
12.5
0.705882
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
77fb88e214d8327d25cb3dbd91a4d85fb6136d9e
5,686
py
Python
openapi_client/models/validation_error.py
brighthive/jdx-client-api-python
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
[ "Apache-2.0" ]
null
null
null
openapi_client/models/validation_error.py
brighthive/jdx-client-api-python
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
[ "Apache-2.0" ]
null
null
null
openapi_client/models/validation_error.py
brighthive/jdx-client-api-python
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ JDX reference application API This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501 The version of the OpenAPI document: 0.0.17 Contact: engineering@brighthive.io Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six class ValidationError(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'message': 'str', 'status_code': 'int', 'validation_errors': 'list[ValidationErrorValidationErrors]' } attribute_map = { 'message': 'message', 'status_code': 'statusCode', 'validation_errors': 'validationErrors' } def __init__(self, message=None, status_code=None, validation_errors=None): # noqa: E501 """ValidationError - a model defined in OpenAPI""" # noqa: E501 self._message = None self._status_code = None self._validation_errors = None self.discriminator = None if message is not None: self.message = message if status_code is not None: self.status_code = status_code if validation_errors is not None: self.validation_errors = validation_errors @property def message(self): """Gets the message of this ValidationError. # noqa: E501 :return: The message of this ValidationError. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ValidationError. :param message: The message of this ValidationError. # noqa: E501 :type: str """ if message is not None and len(message) > 1024: raise ValueError("Invalid value for `message`, length must be less than or equal to `1024`") # noqa: E501 self._message = message @property def status_code(self): """Gets the status_code of this ValidationError. # noqa: E501 A code identifying the message response. A code of `1` indicates success. # noqa: E501 :return: The status_code of this ValidationError. # noqa: E501 :rtype: int """ return self._status_code @status_code.setter def status_code(self, status_code): """Sets the status_code of this ValidationError. A code identifying the message response. A code of `1` indicates success. # noqa: E501 :param status_code: The status_code of this ValidationError. # noqa: E501 :type: int """ if status_code is not None and status_code > 9999: # noqa: E501 raise ValueError("Invalid value for `status_code`, must be a value less than or equal to `9999`") # noqa: E501 if status_code is not None and status_code < -1: # noqa: E501 raise ValueError("Invalid value for `status_code`, must be a value greater than or equal to `-1`") # noqa: E501 self._status_code = status_code @property def validation_errors(self): """Gets the validation_errors of this ValidationError. # noqa: E501 :return: The validation_errors of this ValidationError. # noqa: E501 :rtype: list[ValidationErrorValidationErrors] """ return self._validation_errors @validation_errors.setter def validation_errors(self, validation_errors): """Sets the validation_errors of this ValidationError. :param validation_errors: The validation_errors of this ValidationError. # noqa: E501 :type: list[ValidationErrorValidationErrors] """ self._validation_errors = validation_errors def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ValidationError): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
32.678161
317
0.615195
673
5,686
5.059435
0.219911
0.073421
0.074009
0.066079
0.381791
0.290455
0.254626
0.21674
0.1163
0.096329
0
0.022088
0.299332
5,686
173
318
32.867052
0.83258
0.352796
0
0.063291
0
0
0.125678
0.011802
0
0
0
0
0
1
0.151899
false
0
0.037975
0
0.341772
0.025316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
77fd511d1f33029866da7ab3ca7352cff624cfec
435
py
Python
exercises/play_ground/pg_008.py
EngineerToBe/python-labs
dbedcf1f8ebb4bdf756c732ad65c3b737df62cdf
[ "Apache-2.0" ]
null
null
null
exercises/play_ground/pg_008.py
EngineerToBe/python-labs
dbedcf1f8ebb4bdf756c732ad65c3b737df62cdf
[ "Apache-2.0" ]
null
null
null
exercises/play_ground/pg_008.py
EngineerToBe/python-labs
dbedcf1f8ebb4bdf756c732ad65c3b737df62cdf
[ "Apache-2.0" ]
null
null
null
# Some say that every one year of a human’s life is equivalent to seven years of a dog’s life. # Write a function named dog_years() that has two parameters named name and age. # The function should compute the age in dog years and return the following string: # "{name}, you are {age} years old in dog years" def dog_years(name, age): result = name + "," + " you are " + str(7*age) + " years old in dog years" return result
43.5
95
0.698851
77
435
3.922078
0.519481
0.13245
0.099338
0.086093
0.139073
0.139073
0
0
0
0
0
0.002933
0.216092
435
9
96
48.333333
0.882698
0.691954
0
0
0
0
0.257813
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
77ff924bd9452e672bd6d921994120546cb53305
102
py
Python
custom_components/matts_gadgets_ceiling_fan/const.py
mattyway/matts_gadgets_ceiling_fan
f7b353fb9752f87569ec1925d08804a83e6715cf
[ "MIT" ]
null
null
null
custom_components/matts_gadgets_ceiling_fan/const.py
mattyway/matts_gadgets_ceiling_fan
f7b353fb9752f87569ec1925d08804a83e6715cf
[ "MIT" ]
null
null
null
custom_components/matts_gadgets_ceiling_fan/const.py
mattyway/matts_gadgets_ceiling_fan
f7b353fb9752f87569ec1925d08804a83e6715cf
[ "MIT" ]
null
null
null
"""Constants for the Matt's Gadgets Ceiling Fan integration.""" DOMAIN = "matts_gadgets_ceiling_fan"
25.5
63
0.77451
14
102
5.428571
0.785714
0.368421
0.447368
0
0
0
0
0
0
0
0
0
0.117647
102
3
64
34
0.844444
0.558824
0
0
0
0
0.641026
0.641026
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ae0070be3b3f3ac59a97d47087604c2b0da1ad24
393
py
Python
src/spaceone/repository/manager/repository_manager/local_repository_manager.py
choonho/repository
9522ecad06dd4e36c718e203864d1d58574cdbfc
[ "Apache-2.0" ]
null
null
null
src/spaceone/repository/manager/repository_manager/local_repository_manager.py
choonho/repository
9522ecad06dd4e36c718e203864d1d58574cdbfc
[ "Apache-2.0" ]
null
null
null
src/spaceone/repository/manager/repository_manager/local_repository_manager.py
choonho/repository
9522ecad06dd4e36c718e203864d1d58574cdbfc
[ "Apache-2.0" ]
null
null
null
from spaceone.repository.manager.repository_manager import RepositoryManager class LocalRepositoryManager(RepositoryManager): def register_repository(self, params): """ Args: params: - name - repository_type: local """ # Assume there is only one local repository return self.repo_model.create(params)
24.5625
76
0.628499
35
393
6.942857
0.714286
0.139918
0
0
0
0
0
0
0
0
0
0
0.305344
393
15
77
26.2
0.89011
0.274809
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
ae03795c6c2f7e8cd5507cfab2b5d4572af1aac3
1,522
py
Python
src/python/WMCore/WMBS/MySQL/Files/AddToFileset.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
21
2015-11-19T16:18:45.000Z
2021-12-02T18:20:39.000Z
src/python/WMCore/WMBS/MySQL/Files/AddToFileset.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
5,671
2015-01-06T14:38:52.000Z
2022-03-31T22:11:14.000Z
src/python/WMCore/WMBS/MySQL/Files/AddToFileset.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
67
2015-01-21T15:55:38.000Z
2022-02-03T19:53:13.000Z
#!/usr/bin/env python """ _AddToFileset_ MySQL implementation of Files.AddToFileset """ import time from WMCore.Database.DBFormatter import DBFormatter class AddToFileset(DBFormatter): sql = """INSERT IGNORE INTO wmbs_fileset_files (fileid, fileset, insert_time) SELECT wmbs_file_details.id, :fileset, :insert_time FROM wmbs_file_details WHERE wmbs_file_details.lfn = :lfn """ sqlAvail = """INSERT IGNORE INTO wmbs_sub_files_available (subscription, fileid) SELECT wmbs_subscription.id AS subscription, wmbs_file_details.id AS fileid FROM wmbs_subscription INNER JOIN wmbs_file_details ON wmbs_file_details.lfn = :lfn WHERE wmbs_subscription.fileset = :fileset """ def execute(self, file = None, fileset = None, conn = None, transaction = False): binds = [] availBinds = [] timestamp = int(time.time()) for fileLFN in file: binds.append({"lfn": fileLFN, "fileset": fileset, "insert_time": timestamp}) availBinds.append({"lfn": fileLFN, "fileset": fileset}) self.dbi.processData(self.sql, binds, conn = conn, transaction = transaction) self.dbi.processData(self.sqlAvail, availBinds, conn = conn, transaction = transaction) return
36.238095
84
0.580158
148
1,522
5.797297
0.371622
0.055944
0.104895
0.04662
0.118881
0
0
0
0
0
0
0
0.336399
1,522
41
85
37.121951
0.849505
0.051905
0
0.137931
0
0
0.46899
0.078049
0
0
0
0
0
1
0.034483
false
0
0.068966
0
0.241379
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ac3121e87d675bdde978afec9d75c5cc0beed90
537
py
Python
geo/models/keras/evaluation.py
stefantaubert/life
14fe8c0d631eb22f6c3f9e41430dbcfcd1718012
[ "MIT" ]
2
2018-06-28T13:02:35.000Z
2018-12-25T08:58:26.000Z
geo/models/keras/evaluation.py
stefantaubert/life
14fe8c0d631eb22f6c3f9e41430dbcfcd1718012
[ "MIT" ]
null
null
null
geo/models/keras/evaluation.py
stefantaubert/life
14fe8c0d631eb22f6c3f9e41430dbcfcd1718012
[ "MIT" ]
null
null
null
import module_support_main import pandas as pd import mrr import numpy as np import settings_main as settings import get_ranks def evaluate_results_from_files(submission_path, gt_path, species_map_path): print("Evaluate submission...") print("Load data...") df = pd.read_csv(submission_path, sep=';') y = np.load(gt_path) print("Calculate MRR-Score...") ranks = get_ranks.get_ranks_df(df, y, settings.TOP_N_SUBMISSION_RANKS) mrr_score = mrr.mrr_score(ranks) print("MRR-Score:", mrr_score * 100,"%")
29.833333
76
0.724395
81
537
4.518519
0.444444
0.10929
0.071038
0
0
0
0
0
0
0
0
0.006637
0.158287
537
17
77
31.588235
0.803097
0
0
0
0
0
0.126629
0
0
0
0
0
0
1
0.066667
false
0
0.4
0
0.466667
0.266667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
7ac3b18e2b2f5eb9e5b456e6eaeeeea667be53a4
2,210
py
Python
backend/src/Blueprints/Posts.py
mechaadi/OnlineLawyerSuite
a5debc337afe3f3693978177bf53d7646ae3536b
[ "MIT" ]
3
2020-05-31T12:31:16.000Z
2021-08-29T00:00:01.000Z
backend/src/Blueprints/Posts.py
mechaadi/OnlineLawyerSuite
a5debc337afe3f3693978177bf53d7646ae3536b
[ "MIT" ]
21
2020-05-28T16:16:03.000Z
2022-02-27T06:51:53.000Z
backend/src/Blueprints/Posts.py
mechaadi/OnlineLawyerSuite
a5debc337afe3f3693978177bf53d7646ae3536b
[ "MIT" ]
null
null
null
from flask import Flask, Blueprint, g, request from src.model import Post, File from src.db import db from src.Middlewares.AuthMiddleware import * from werkzeug.utils import secure_filename import os import dateutil.parser as dt post_bp = Blueprint('post', __name__, url_prefix='/posts') def respond(data, code): responder = Responder() return responder.respond(data, code) def respond_error(msg, code): responder = Responder() return responder.respond_error(msg, code) @post_bp.route('/test') def test(): access_token = token_urlsafe(40) return 'post ok!' @post_bp.route('/create', methods=['POST']) @check_auth def _create_post(): body = request.json print(body) title = body['title'] content = body['content'] pub_at = dt.parse(body['pub_at']) # tags = json.loads(json.dumps(body['tags'])) #images = json.loads(json.dumps(body['images'])) #print(body['images']) p = Post(title=title, content=content, images=body['images'],pub_at=pub_at, user=g.user.id) with db.atomic() as tx: try: p.save() return respond(p.to_dict(), 201) except Exception as e: print(e) return respond_error(str(e), 500) @post_bp.route('/', methods=['GET']) def _get_all_posts(): post = Post.select() post = [p.to_dict() for p in post] return respond(post, 201) @post_bp.route('/<id>', methods=['GET']) @check_auth def _get_by_id(id): post = Post.get_by_id(id) return respond(post.to_dict(), 201) @post_bp.route('/<id>', methods=['delete']) @check_auth def delete_post(id): post = Post.get_or_none(Post.id == id) if post is not None: if post.user.id == g.user.id: with db.atomic() as tx: try: deleted_post = post q = Post.delete().where(Post.id == post.id) q.execute() return respond(deleted_post.to_dict(), 201) except Exception as e: return respond_error(str(e), 500) else: return respond_error("UNAUTHORIZED USER", 404) else: return respond_error("POST NOT FOUND", 404)
24.555556
95
0.609502
303
2,210
4.293729
0.310231
0.079939
0.042275
0.043044
0.257494
0.223674
0.120676
0.039969
0.039969
0
0
0.015729
0.252036
2,210
89
96
24.831461
0.771325
0.050226
0
0.241935
0
0
0.05364
0
0
0
0
0
0
1
0.112903
false
0
0.112903
0
0.403226
0.064516
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ac6dc3dc3bb8ad5ce9781cd7166fbd71e9510f9
9,349
py
Python
felpy/analysis/optics/complex/coherence.py
twguest/FELpy
0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd
[ "Apache-2.0" ]
1
2021-03-15T14:04:19.000Z
2021-03-15T14:04:19.000Z
felpy/analysis/optics/complex/coherence.py
twguest/FELpy
0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd
[ "Apache-2.0" ]
2
2021-11-27T11:55:48.000Z
2021-11-27T11:56:26.000Z
felpy/analysis/optics/complex/coherence.py
twguest/FELpy
0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jun 15 14:24:28 2020 @author: twguest """ import os import numpy as np from time import time from wpg import srwlib from felpy.model.tools import radial_profile #from wpg.wpg_uti_wf import get_axis from tqdm import tqdm from felpy.utils.job_utils import JobScheduler #import wpg.srwlib as srwl from wpg.srw import srwlpy as srwl from felpy.utils.np_utils import memory_map, readMap import multiprocessing as mp from functools import partial from scipy.sparse import csr_matrix from felpy.model.materials.mirror_surface import binArray def get_longitudinal_coherence(slice_no, cfr, map_loc = None, bins = 1, VERBOSE = True): """ Calculate the longitudinal correlation of each slice of a complex wavefront of shape [nx, ny, nz] against a single slice of shape [nx,ny] at longitudinal interval defined by the slice_no :param cfr: complex wavefield :param slice_no: longitudinal index [int] :returns g: complex degree of coherence """ A = np.roll(cfr, -slice_no, axis = 2) B = np.repeat(cfr[:,:,slice_no][:, :, np.newaxis], cfr.shape[-1], axis=-1) ## DEGUB print(A[:,:,0] == wfr[:,:,i]) ## DEBUG print([B[:,:,k] == wfr[:,:,i] for k in range(wfr.shape[-1])]) if map_loc is not None: mmap = memory_map(map_loc, shape = cfr.shape, dtype = 'complex64') mmap[:,:,slice_no] = ((A*B.conjugate()).mean(axis = -1))/np.sqrt( (abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1)) else: return ((A*B.conjugate()).mean(axis = -1))/np.sqrt( (abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1)) def get_longitudinal_coherence_new(slice_no, cfr, map_loc = None, bins = 1, VERBOSE = True): """ Calculate the longitudinal correlation of each slice of a complex wavefront of shape [nx, ny, nz] against a single slice of shape [nx,ny] at longitudinal interval defined by the slice_no :param cfr: complex wavefield :param slice_no: longitudinal index [int] :returns g: complex degree of coherence """ A = np.roll(cfr, -slice_no, axis = 2) B = np.repeat(cfr[:,:,slice_no][:, :, np.newaxis], cfr.shape[-1], axis=-1) ## DEGUB print(A[:,:,0] == wfr[:,:,i]) ## DEBUG print([B[:,:,k] == wfr[:,:,i] for k in range(wfr.shape[-1])]) if map_loc is not None: mmap = memory_map(map_loc, shape = cfr.shape, dtype = 'complex64') mmap[:,:,slice_no] = ((A*B.conjugate()).mean(axis = -1))/np.sqrt( (abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1)) else: return ((A*B.conjugate()).mean(axis = -1))/np.sqrt( (abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1)) def get_coherence_time_new(cfr, tStep, mpi = False, map_loc = "/tmp/coherence_map", bins = 1, VERBOSE = True): """ Calculate the coherence time of complex wavefield of shape [nx, ny, nt]. Relevant for statistically stationary sources. ref: Coherence properties of the radiation from X-ray free electron laser :param cfr: complex wavefield :param tstep: temporal step between slices :returns tau: coherence time [s] """ mmap = memory_map(map_loc = map_loc, shape = cfr.shape, dtype = 'complex64') nz0 = cfr.shape[-1] if bins == 1: nz1 = nz0 else: cfr = binArray(cfr, axis = -1, binstep = nz0//bins, binsize = 1 ) nz1 = cfr.shape[-1] tStep *= (nz0/nz1) g = np.zeros([*cfr.shape], dtype = 'complex64') if VERBOSE: print("Calculating Coherence Time") if mpi: processes = mp.cpu_count()//2 pool = mp.Pool(processes) pool.map(partial(get_longitudinal_coherence, cfr = cfr, map_loc = map_loc), range(cfr.shape[-1])) g = readMap(map_loc, cfr.shape, dtype = 'complex64') else: for i in tqdm(range(cfr.shape[-1])): g[:,:,i] = get_longitudinal_coherence(slice_no = i, cfr = cfr) tau = (abs(g)**2).sum(axis = -1)[0,0] if VERBOSE: print("\n") print(tau) print("Time Step: {} fs".format(tStep*1e15)) print("Coherence Time: {:.2e} fs".format(tau*1e15*tStep)) del mmap os.remove(map_loc) return tau*tStep def get_coherence_time(cfr, tStep, mpi = False, map_loc = "/tmp/coherence_map", bins = 1, VERBOSE = True): """ Calculate the coherence time of complex wavefield of shape [nx, ny, nt]. Relevant for statistically stationary sources. ref: Coherence properties of the radiation from X-ray free electron laser :param cfr: complex wavefield :param tstep: temporal step between slices :returns tau: coherence time [s] """ mmap = memory_map(map_loc = map_loc, shape = cfr.shape, dtype = 'complex64') nz0 = cfr.shape[-1] if bins == 1: nz1 = nz0 else: cfr = binArray(cfr, axis = -1, binstep = nz0//bins, binsize = 1 ) nz1 = cfr.shape[-1] tStep *= (nz0/nz1) g = np.zeros([*cfr.shape], dtype = 'complex64') if VERBOSE: print("Calculating Coherence Time") if mpi: processes = mp.cpu_count()//2 pool = mp.Pool(processes) pool.map(partial(get_longitudinal_coherence, cfr = cfr, map_loc = map_loc), range(cfr.shape[-1])) g = readMap(map_loc, cfr.shape, dtype = 'complex64') else: for i in tqdm(range(cfr.shape[-1])): g[:,:,i] = get_longitudinal_coherence(slice_no = i, cfr = cfr) tau = (abs(g)**2).sum(axis = -1)[0,0] print("g", np.max(g)) if VERBOSE: print("\n") print(tau) print("Time Step: {} fs".format(tStep*1e15)) print("Coherence Time: {:.2e} fs".format(tau*1e15*tStep)) del mmap os.remove(map_loc) return tau*tStep, g def get_coherence_time_wpg(wfr, mpi = False, VERBOSE = True): srwl.SetRepresElecField(wfr._srwl_wf, 't') time_step = (wfr.params.Mesh.sliceMax - wfr.params.Mesh.sliceMin)/wfr.params.Mesh.nSlices return get_coherence_time(wfr.as_complex_array(), time_step, mpi = mpi) def get_coherence_len(wfr, dx, dy, VERBOSE = True): """ Calculate coherence length of a complex wavefield of shape [nx, ny. nz] :param wfr: complex wavefield :param dx: horizontal pixel size :param dy: vertical pixel size :returns Jd: complex degree of coherence :returns clen: coherence length [m] """ profile, r = get_complex_radial_profile(wfr) nt = wfr.shape[-1] J = np.dot(profile, profile.T.conjugate())/ nt II = np.abs(np.diag(J)) # intensity as the main diagonal print(J.shape) J /= II**0.5 * II[:, np.newaxis]**0.5 Jd = np.abs(np.diag(np.fliplr(J))) # DoC as the cross-diagonal lm = np.arange(Jd.shape[0]) lm = lm[(lm >= Jd.shape[0]//2) & (Jd[lm] < 0.5)] rstep = np.sqrt((dx)**2 + (dy)**2) try: lm = lm[0] - Jd.shape[0]//2 except(IndexError): lm = np.inf clen = lm*rstep if VERBOSE: print("Radial Coherence Length: {:.2f} um".format(clen*1e6)) return clen def get_transverse_doc(wfr, VERBOSE = True): """ get transverse degree of coherence of the wavefront across each of the transverse dimensions slices """ p, r = get_complex_radial_profile(wfr) nt = wfr.shape[-1] J = np.dot(p, p.T.conjugate())/nt tdoc = np.diag(np.dot(J, J)).sum() / np.diag(J).sum()**2 if VERBOSE: print("Transverse Degree of Coherence: {:.4f}".format(tdoc.real)) return tdoc def get_complex_radial_profile(wfr): """ Calculate the radial profile of a complex array by azimuthal averaging: I_{radial}(R) = \int_0^R \frac{I(r)2\pi r}{\pi R^2} dr :param wfr: complex wavefield [np array] :returns prof: radial profile """ r = radial_profile(wfr[:,:,0].real, [wfr.shape[0]//2,wfr.shape[1]//2])[1] r = np.diag(r).copy() r[:r.shape[0]//2] *= -1 rp = np.stack([radial_profile(wfr[:,:,i].real, [wfr.shape[0]//2,wfr.shape[1]//2])[0] + radial_profile(wfr[:,:,i].imag, [wfr.shape[0]//2,wfr.shape[1]//2])[0]*1j for i in range(wfr.shape[-1])]) prof = np.moveaxis(rp, 0, -1) return prof, r def coherent_test(wfr): tstep = get_axis(wfr, axis = 't') tstep = wfr.get_temporal_resolution() xstep, ystep = wfr.get_spatial_resolution() wfr = wfr.as_complex_array() tau = get_coherence_time(wfr, tstep, VERBOSE=True) clen = get_coherence_len(wfr, xstep, ystep, VERBOSE=True) tdoc = get_transverse_doc(wfr, VERBOSE=True) return tau, clen, tdoc if __name__ == "__main__": pass
27.578171
93
0.568403
1,287
9,349
4.035742
0.181041
0.023104
0.020793
0.033885
0.621679
0.608587
0.591837
0.591837
0.591837
0.57836
0
0.02453
0.289229
9,349
339
94
27.578171
0.757111
0.233501
0
0.56051
0
0
0.048071
0
0
0
0
0
0
1
0.057325
false
0.006369
0.082803
0
0.197452
0.089172
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ac87bf72cc8a3c3039638b11a435af514a8fc27
911
py
Python
training_program_2019/core_python/basic/threading_join.py
jeffrey-zhang/learn
7d9d53f955f0424ad4c68f69d5538867e5b7fa98
[ "Apache-2.0" ]
1
2020-10-12T01:23:51.000Z
2020-10-12T01:23:51.000Z
training_program_2019/core_python/basic/threading_join.py
jeffrey-zhang/learn
7d9d53f955f0424ad4c68f69d5538867e5b7fa98
[ "Apache-2.0" ]
1
2020-10-11T10:38:21.000Z
2020-10-11T10:38:21.000Z
training_program_2019/core_python/basic/threading_join.py
jeffrey-zhang/learn
7d9d53f955f0424ad4c68f69d5538867e5b7fa98
[ "Apache-2.0" ]
1
2020-09-07T07:22:54.000Z
2020-09-07T07:22:54.000Z
import threading import time products = [] condition = threading.Condition() class consumer(threading.Thread): def consume(self): global condition global products condition.acquire() if len(products) == 0: condition.wait() print('consumer is notified: no product to consume') products.pop() print("consumer notification: consume 1 product") print('consumer notification: there are ' + len(products) +" left that can be consume") condition.notify() condition.release() def run(self): for i in range(0,20): time.sleep(4) self.consume() class Producer(threading.Thread): def produce(self): global condition global products condition.acquire() if len(products) == 10: condition.wait() print('consumer notified')
24.621622
95
0.594951
95
911
5.705263
0.473684
0.095941
0.066421
0.092251
0.228782
0.228782
0.228782
0.228782
0.228782
0.228782
0
0.012658
0.306257
911
37
96
24.621622
0.844937
0
0
0.275862
0
0
0.173246
0
0
0
0
0
0
1
0.103448
false
0
0.068966
0
0.241379
0.137931
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ac89552408a022b01f6bab28396624e4b438a5a
2,168
py
Python
tests/meltano/core/test_utils.py
Mu-L/meltano
7bf8f370608ee9a8833b33ea94112c6e219c8161
[ "MIT" ]
null
null
null
tests/meltano/core/test_utils.py
Mu-L/meltano
7bf8f370608ee9a8833b33ea94112c6e219c8161
[ "MIT" ]
null
null
null
tests/meltano/core/test_utils.py
Mu-L/meltano
7bf8f370608ee9a8833b33ea94112c6e219c8161
[ "MIT" ]
null
null
null
import pytest # noqa: F401 from meltano.core.utils import flatten, nest, pop_at_path, set_at_path def test_nest(): subject = {} one_deep = nest(subject, "a.b") one_deep["val"] = 1 assert one_deep == {"val": 1} two_deep = nest(subject, "a.b.c") two_deep["val"] = 2 assert one_deep == {"val": 1, "c": {"val": 2}} arr = nest(subject, "a.list", value=[]) start_value = {"value": 1} val = nest(subject, "a.value", value=start_value) assert subject["a"]["b"] is one_deep assert subject["a"]["b"]["c"] is two_deep assert isinstance(arr, list) # make sure it is a copy assert val == start_value and val is not start_value def test_pop_at_path(): subject = {} pop_at_path(subject, "a.b.c") assert not subject subject = {"a": {"b": {"c": "value"}}} pop_at_path(subject, "a.b.c") assert not subject subject = {"a": {"b.c": "value"}} pop_at_path(subject, ["a", "b.c"]) assert not subject subject = {"a": {"b": {"c": "value", "d": "value"}, "e": "value"}} pop_at_path(subject, "a.b.c") assert subject == {"a": {"b": {"d": "value"}, "e": "value"}} pop_at_path(subject, "a.b.d") assert subject == {"a": {"e": "value"}} pop_at_path(subject, "a.e") assert not subject def test_set_at_path(): subject = {} set_at_path(subject, "a.b.c", "value") assert subject == {"a": {"b": {"c": "value"}}} set_at_path(subject, "a.b.d", "value") assert subject == {"a": {"b": {"c": "value", "d": "value"}}} set_at_path(subject, "a.b", "value") assert subject == {"a": {"b": "value"}} set_at_path(subject, "a.b", "newvalue") assert subject == {"a": {"b": "newvalue"}} set_at_path(subject, "a.b.c", "value") assert subject == {"a": {"b": {"c": "value"}}} set_at_path(subject, ["a", "d.e"], "value") assert subject == {"a": {"b": {"c": "value"}, "d.e": "value"}} def test_flatten(): example_config = {"_update": {"orchestrate/dags/meltano.py": False}} expected_flat = {"_update.orchestrate/dags/meltano.py": False} result = flatten(example_config, "dot") assert result == expected_flat
27.1
72
0.565959
318
2,168
3.688679
0.166667
0.197783
0.184143
0.127877
0.571185
0.457801
0.396419
0.336743
0.290708
0.28474
0
0.005257
0.210332
2,168
79
73
27.443038
0.679907
0.015221
0
0.264151
0
0
0.152439
0.029081
0
0
0
0
0.358491
1
0.075472
false
0
0.037736
0
0.113208
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7ac9a5104e19b66025a9a8ecc99a6c8ec491db7d
927
py
Python
phc/easy/diagnostic_report.py
taylordeatri/phc-sdk-py
8f3ec6ac44e50c7194f174fd0098de390886693d
[ "MIT" ]
1
2020-07-22T12:46:58.000Z
2020-07-22T12:46:58.000Z
phc/easy/diagnostic_report.py
taylordeatri/phc-sdk-py
8f3ec6ac44e50c7194f174fd0098de390886693d
[ "MIT" ]
54
2019-10-09T16:19:04.000Z
2022-01-19T20:28:59.000Z
phc/easy/diagnostic_report.py
taylordeatri/phc-sdk-py
8f3ec6ac44e50c7194f174fd0098de390886693d
[ "MIT" ]
2
2019-10-30T19:54:43.000Z
2020-12-03T18:57:15.000Z
import pandas as pd from phc.easy.frame import Frame from phc.easy.abstract.fhir_service_patient_item import FhirServicePatientItem class DiagnosticReport(FhirServicePatientItem): @staticmethod def table_name(): return "diagnostic_report" @staticmethod def patient_id_prefixes(): return ["Patient/", "urn:uuid:"] @staticmethod def patient_key(): return "subject.reference" @staticmethod def code_fields(): return ["meta.tag"] @staticmethod def transform_results(df: pd.DataFrame, **expand_args): return Frame.expand( df, custom_columns=[ *expand_args.get("custom_columns", []), Frame.codeable_like_column_expander("subject"), Frame.codeable_like_column_expander("presentedForm"), Frame.codeable_like_column_expander("result"), ], )
26.485714
78
0.638619
91
927
6.252747
0.538462
0.13181
0.089631
0.121265
0.163445
0
0
0
0
0
0
0
0.265372
927
34
79
27.264706
0.835536
0
0
0.185185
0
0
0.106796
0
0
0
0
0
0
1
0.185185
false
0
0.111111
0.185185
0.518519
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
7ac9c5a18f03b6f72c0a1607e900467a67c17aa0
1,048
py
Python
tests/performance/test_mm_lazy_eval.py
varun19299/FeatherMap
a3991ce48eed98584bc12d6ddcb6409ef3db5d60
[ "MIT" ]
14
2020-11-05T00:24:40.000Z
2022-03-30T15:22:31.000Z
tests/performance/test_mm_lazy_eval.py
varun19299/FeatherMap
a3991ce48eed98584bc12d6ddcb6409ef3db5d60
[ "MIT" ]
4
2020-10-01T00:46:39.000Z
2021-02-26T23:38:09.000Z
tests/performance/test_mm_lazy_eval.py
varun19299/FeatherMap
a3991ce48eed98584bc12d6ddcb6409ef3db5d60
[ "MIT" ]
2
2020-11-09T13:09:10.000Z
2021-02-18T11:09:32.000Z
import torch from feathermap.utils import timed from math import sqrt dim_in = 2 ** 14 dim_out = 2 ** 4 A = torch.randn(dim_in, dim_out) B = torch.randn(dim_out, dim_in) C = torch.rand(dim_in, dim_in) D = torch.rand(dim_in, dim_in) E = torch.rand(1, dim_out) F = torch.rand(dim_out, dim_in) G = torch.rand(int(sqrt(dim_in)), int(sqrt(dim_in))) H = torch.rand(int(sqrt(dim_in)), int(sqrt(dim_in))) @timed def mam(a, b): for _ in range(10000): out = torch.mm(a, b) return out def loop(a, b): for i in range(a.size(0)): for j in range(b.size(1)): yield a[i, :] @ b[:, j] def loop2(a, b): for i in range(a.size(0)): for j in range(b.size(1)): yield 1 def tmm(a, b): c = torch.mm(a, b).view(-1, 1) return iter(c) @timed def run(c, dim_in): d = torch.empty(dim_in ** 2) for i in range(d.numel()): d[i] = next(c) mam(E, F) # about 23% faster mam(G, H) # run(loop(A, B), dim_in) # 739 # run(loop2(A, B), dim_in) # 254 # run(tmm(A, B), dim_in) # 289
18.714286
52
0.578244
208
1,048
2.802885
0.254808
0.145798
0.077187
0.082333
0.319039
0.319039
0.253859
0.253859
0.253859
0.253859
0
0.040353
0.243321
1,048
55
53
19.054545
0.69483
0.101145
0
0.166667
0
0
0
0
0
0
0
0
0
1
0.138889
false
0
0.083333
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ad04a1e5c739e7d4690a2753dc470af7567beec
785
py
Python
PythonWeb/example/App/urls.py
JimouChen/python-application
b7b16506a17e2c304d1c5fabd6385e96be211c56
[ "Apache-2.0" ]
1
2020-08-09T12:47:27.000Z
2020-08-09T12:47:27.000Z
PythonWeb/example/App/urls.py
JimouChen/Python_Application
b7b16506a17e2c304d1c5fabd6385e96be211c56
[ "Apache-2.0" ]
null
null
null
PythonWeb/example/App/urls.py
JimouChen/Python_Application
b7b16506a17e2c304d1c5fabd6385e96be211c56
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from django.urls import path from App import views app_name = 'App' urlpatterns = [ path('', views.home, name='home'), # 增删改 path('cud/', views.handle_data, name='handle_data'), # 查询 path('search/', views.find_data, name='search'), # 使用原生sql path('rawsql/', views.raw_sql, name='raw_sql'), # 自定义管理器,看自己需要用不用 path('manager/', views.my_manager, name='my_manager'), # 注册页 path('register/', views.handle_register, name='register'), # 登录页 path('login/', views.handle_login, name='login'), # 显示用户信息 path('show/', views.show_msg, name='show'), # 电影显示 path('movie/', views.show_movie, name='movie'), # django自带的分页功能 path('movie_page/', views.show_movie_page, name='movie_page'), ]
27.068966
66
0.63949
102
785
4.764706
0.372549
0.067901
0.057613
0
0
0
0
0
0
0
0
0
0.185987
785
28
67
28.035714
0.760563
0.081529
0
0
0
0
0.19128
0
0
0
0
0
0
1
0
false
0
0.1875
0
0.1875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ad2462093f3bfe8307d27ce00eeee4b819dafcf
5,098
py
Python
carla_walker_agent/src/carla_walker_agent/carla_walker_agent.py
umateusz/ros-bridge
e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa
[ "MIT" ]
null
null
null
carla_walker_agent/src/carla_walker_agent/carla_walker_agent.py
umateusz/ros-bridge
e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa
[ "MIT" ]
null
null
null
carla_walker_agent/src/carla_walker_agent/carla_walker_agent.py
umateusz/ros-bridge
e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # Copyright (c) 2020 Intel Corporation # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # """ Agent for Walker """ import math from nav_msgs.msg import Path, Odometry from std_msgs.msg import Float64 from geometry_msgs.msg import Pose, Vector3 from carla_msgs.msg import CarlaWalkerControl from ros_compatibility import ( CompatibleNode, QoSProfile, ros_ok, ROSInterruptException, ros_init) import os ROS_VERSION = int(os.environ['ROS_VERSION']) if ROS_VERSION == 1: import rospy elif ROS_VERSION == 2: import time import threading class CarlaWalkerAgent(CompatibleNode): """ walker agent """ # minimum distance to target waypoint before switching to next MIN_DISTANCE = 0.5 def __init__(self): """ Constructor """ super(CarlaWalkerAgent, self).__init__('carla_walker_agent') role_name = self.get_param("role_name", "ego_vehicle") self._target_speed = self.get_param("target_speed", 2.0) self._route_assigned = False self._waypoints = [] self._current_pose = Pose() self.on_shutdown(self._on_shutdown) # wait for ros bridge to create relevant topics try: self.wait_for_one_message( "/carla/{}/odometry".format(role_name), Odometry) except ROSInterruptException as e: if not ros_ok: raise e self._odometry_subscriber = self.create_subscriber( Odometry, "/carla/{}/odometry".format(role_name), self.odometry_updated) self.control_publisher = self.new_publisher( CarlaWalkerControl, "/carla/{}/walker_control_cmd".format(role_name), QoSProfile(depth=1, durability=False)) self._route_subscriber = self.create_subscriber( Path, "/carla/{}/waypoints".format(role_name), self.path_updated) self._target_speed_subscriber = self.create_subscriber( Float64, "/carla/{}/target_speed".format(role_name), self.target_speed_updated) def _on_shutdown(self): """ callback on shutdown """ self.loginfo("Shutting down, stopping walker...") self.control_publisher.publish(CarlaWalkerControl()) # stop def target_speed_updated(self, target_speed): """ callback on new target speed """ self.loginfo("New target speed received: {}".format(target_speed.data)) self._target_speed = target_speed.data def path_updated(self, path): """ callback on new route """ self.loginfo("New plan with {} waypoints received. Assigning plan...".format( len(path.poses))) self.control_publisher.publish(CarlaWalkerControl()) # stop self._waypoints = [] for elem in path.poses: self._waypoints.append(elem.pose) def odometry_updated(self, odo): """ callback on new odometry """ self._current_pose = odo.pose.pose def run(self): """ Control loop :return: """ loop_frequency = 20 if ROS_VERSION == 1: r = rospy.Rate(loop_frequency) self.loginfo("Starting run loop") while ros_ok(): if self._waypoints: control = CarlaWalkerControl() direction = Vector3() direction.x = self._waypoints[0].position.x - self._current_pose.position.x direction.y = self._waypoints[0].position.y - self._current_pose.position.y direction_norm = math.sqrt(direction.x**2 + direction.y**2) if direction_norm > CarlaWalkerAgent.MIN_DISTANCE: control.speed = self._target_speed control.direction.x = direction.x / direction_norm control.direction.y = direction.y / direction_norm else: self._waypoints = self._waypoints[1:] if self._waypoints: self.loginfo("next waypoint: {} {}".format( self._waypoints[0].position.x, self._waypoints[0].position.y)) else: self.loginfo("Route finished.") self.control_publisher.publish(control) try: if ROS_VERSION == 1: r.sleep() elif ROS_VERSION == 2: # TODO: use rclpy.Rate, not working yet time.sleep(1 / loop_frequency) except ROSInterruptException: pass def main(args=None): """ main function :return: """ ros_init(args) controller = CarlaWalkerAgent() if ROS_VERSION == 2: spin_thread = threading.Thread(target=controller.spin, daemon=True) spin_thread.start() try: controller.run() finally: del controller print("Done") if __name__ == "__main__": main()
29.988235
92
0.594547
549
5,098
5.298725
0.311475
0.049158
0.030938
0.030251
0.095222
0.052252
0
0
0
0
0
0.008745
0.304629
5,098
169
93
30.16568
0.811848
0.100235
0
0.156863
0
0
0.078887
0.0114
0
0
0
0.005917
0
1
0.068627
false
0.009804
0.098039
0
0.186275
0.009804
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ad29cf27964c995c953363ee94f815f643e4137
44
py
Python
sdk/exception/exceed_limit_exception.py
CLG0125/elemesdk
344466398bad7cf026e082e47c77d3ca98621ef3
[ "MIT" ]
1
2021-04-03T05:11:29.000Z
2021-04-03T05:11:29.000Z
sdk/exception/exceed_limit_exception.py
CLG0125/elemesdk
344466398bad7cf026e082e47c77d3ca98621ef3
[ "MIT" ]
null
null
null
sdk/exception/exceed_limit_exception.py
CLG0125/elemesdk
344466398bad7cf026e082e47c77d3ca98621ef3
[ "MIT" ]
null
null
null
class ExceedLimitException(Exception):pass
14.666667
42
0.863636
4
44
9.5
1
0
0
0
0
0
0
0
0
0
0
0
0.068182
44
2
43
22
0.926829
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
1
0
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
7ad5b9de70a9e9b65388bd37817ad835c2ba9f1f
5,125
py
Python
megyr/config_validation.py
ExcaliburZero/megyr
f5543cebe0562c78b5d3c710bd6f11c0efbff25b
[ "MIT" ]
1
2020-11-17T20:35:09.000Z
2020-11-17T20:35:09.000Z
megyr/config_validation.py
ExcaliburZero/megyr
f5543cebe0562c78b5d3c710bd6f11c0efbff25b
[ "MIT" ]
17
2018-09-25T18:37:17.000Z
2020-03-31T03:54:36.000Z
megyr/config_validation.py
ExcaliburZero/megyr
f5543cebe0562c78b5d3c710bd6f11c0efbff25b
[ "MIT" ]
null
null
null
from typing import Any, Dict, List def validate_config(config: Dict[str, Any]) -> List[str]: errors: List[str] = [] assert_to_list( errors, "input" in config, '[no_input] Could not find "input" section in config. "input" section must be present in order to run MESA or GYRE.', ) assert_to_list( errors, "stages" in config, '[no_stages] Could not find "stages" section in config. "stages" section must be present in order to run MESA or GYRE.', ) assert_to_list( errors, nested_in(config, ["input", "mesa_configs"]), '[no_mesa_configs] Could not find "mesa_configs" setting in "input" section in config. The "mesa_configs" setting must be present in order to run MESA.', ) if should_run_gyre(config): assert_to_list( errors, nested_in(config, ["stages", "gyre_params"]), '[no_gyre_params] Could not find "gyre_params" setting in "stages" section of config. GYRE is set to run, but needs this setting to know what value combinations to try.', ) else: # Check for GYRE settings present when GYRE is not set to run gyre_missing_msg = '[gyre_not_enabled] Found "{}" setting in "{}" section of config even though GYRE is not enabled. "gyre_config" in the "input" section must be specified in order to run GYRE.' assert_to_list( errors, not nested_in(config, ["output", "gyre_oscillations_summary_file"]), gyre_missing_msg.format("gyre_oscillations_summary_file", "output"), ) assert_to_list( errors, not nested_in(config, ["settings", "gyre_location"]), gyre_missing_msg.format("gyre_location", "settings"), ) assert_to_list( errors, not nested_in(config, ["settings", "gyre_mp_threads"]), gyre_missing_msg.format("gyre_mp_threads", "settings"), ) assert_to_list( errors, not nested_in(config, ["stages", "gyre_params"]), gyre_missing_msg.format("gyre_params", "stages"), ) assert_to_list( errors, not nested_in(config, ["stages", "gyre_derived"]), gyre_missing_msg.format("gyre_derived", "stages"), ) return errors def set_defaults(config: Dict[str, Any]) -> None: ### Output if not nested_in(config, ["output", "output_dir"]): nested_put(config, ["output", "output_dir"], "out") if not nested_in(config, ["output", "mesa_profile_summary_file"]): nested_put( config, ["output", "mesa_profile_summary_file"], "mesa_profile_attributes.csv", ) ### Settings if not nested_in(config, ["settings", "mesa_star_location"]): nested_put(config, ["settings", "mesa_star_location"], "star") if not nested_in(config, ["settings", "gyre_location"]): nested_put(config, ["settings", "gyre_location"], "$GYRE_DIR/bin/gyre") if not nested_in(config, ["settings", "gyre_mp_threads"]) and nested_in( config, ["settings", "mesa_mp_threads"] ): nested_put( config, ["settings", "gyre_mp_threads"], config["settings"]["mesa_mp_threads"], ) def assert_to_list(errors: List[str], condition: bool, message: str) -> None: if not condition: errors.append((message)) def should_run_gyre(config: Dict[str, Any]) -> bool: return nested_in(config, ["input", "gyre_config"]) def nested_in(config: Dict[str, Any], nested_keys: List[str]) -> bool: """ Checks if the given nested keys are within the given dict. Returns false if any of the intermediate keys or the final key are not nested in the dict. >>> config = {} >>> nested_in(config, ["settings", "gyre_mp_threads"]) False >>> config = {"settings": {}} >>> nested_in(config, ["settings", "gyre_mp_threads"]) False >>> config = {"settings": {"gyre_mp_threads": 4}} >>> nested_in(config, ["settings", "gyre_mp_threads"]) True """ for key in nested_keys: if key in config: config = config[key] else: return False return True def nested_put(config: Dict[str, Any], nested_keys: List[str], value: Any) -> None: """ Puts the given nested key value pair into the given dict. If any part of the nested key structure does not yet exist, then it will be created in the process. >>> config = {} >>> nested_put(config, ["key"], "value") >>> config["key"] 'value' >>> config = {} >>> nested_put(config, ["settings", "gyre_mp_threads"], 2) >>> config["settings"]["gyre_mp_threads"] 2 """ if len(nested_keys) == 0: raise Exception("Invalid number of nested keys.") if len(nested_keys) == 1: config[nested_keys[0]] = value else: next_key = nested_keys[0] if next_key not in config: config[next_key] = {} nested_put(config[next_key], nested_keys[1:], value)
33.717105
202
0.606244
649
5,125
4.563945
0.181818
0.067522
0.085078
0.06077
0.435517
0.334571
0.272451
0.204254
0.144497
0.139095
0
0.002125
0.265366
5,125
151
203
33.940397
0.784595
0.169561
0
0.263158
0
0.052632
0.331964
0.0331
0
0
0
0
0.105263
1
0.063158
false
0
0.010526
0.010526
0.115789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ad6399647944ee35628a46c260ef1d398254324
81
py
Python
src/polytropon/__init__.py
xhlulu/polytropon
fd4df35a479f17ca1e3f568d96a95c1b8b5b518f
[ "MIT" ]
14
2022-03-02T10:25:22.000Z
2022-03-13T01:22:08.000Z
src/polytropon/__init__.py
xhlulu/polytropon
fd4df35a479f17ca1e3f568d96a95c1b8b5b518f
[ "MIT" ]
2
2022-03-02T15:56:37.000Z
2022-03-29T17:40:34.000Z
src/polytropon/__init__.py
xhlulu/polytropon
fd4df35a479f17ca1e3f568d96a95c1b8b5b518f
[ "MIT" ]
2
2022-03-02T15:59:47.000Z
2022-03-03T01:14:45.000Z
from . import adapters, utils from .polytropon import VARIANT2CLASS, SkilledMixin
40.5
51
0.839506
9
81
7.555556
0.777778
0
0
0
0
0
0
0
0
0
0
0.013889
0.111111
81
2
51
40.5
0.930556
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7ad705963a4ae17d0227c03011bc22494dcbdf66
674
py
Python
cdhweb/pages/tests/test_context_processors.py
bwhicks/cdh-web
d6002dc1933a4d6e97f5459aafc9ab92cb1f8050
[ "Apache-2.0" ]
1
2017-11-21T16:02:33.000Z
2017-11-21T16:02:33.000Z
cdhweb/pages/tests/test_context_processors.py
bwhicks/cdh-web
d6002dc1933a4d6e97f5459aafc9ab92cb1f8050
[ "Apache-2.0" ]
367
2017-08-14T16:05:41.000Z
2021-11-03T15:29:18.000Z
cdhweb/pages/tests/test_context_processors.py
bwhicks/cdh-web
d6002dc1933a4d6e97f5459aafc9ab92cb1f8050
[ "Apache-2.0" ]
5
2017-09-08T21:08:49.000Z
2020-10-02T04:39:37.000Z
import pytest from wagtail.core.models import Page from cdhweb.pages.context_processors import page_intro from cdhweb.pages.models import LinkPage, PageIntro @pytest.mark.django_db def test_page_intro(rf): root = Page.objects.get(title="Root") link_page = LinkPage(title="Students", link_url="people/students") root.add_child(instance=link_page) intro = PageIntro.objects.create( page=link_page, paragraph="<p>We have great students</p>" ) # should find a page intro for students assert page_intro(rf.get("/people/students/")) == {"page_intro": intro} # but not not for staff assert page_intro(rf.get("/people/staff/")) == {}
32.095238
75
0.718101
96
674
4.90625
0.46875
0.133758
0.070064
0.072187
0.110403
0.110403
0
0
0
0
0
0
0.15727
674
20
76
33.7
0.829225
0.087537
0
0
0
0
0.158497
0
0
0
0
0
0.142857
1
0.071429
false
0
0.285714
0
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ad8ae63fb76ddfd06eb6578ef0cef5a87d9b13a
641
py
Python
tri_selection.py
Erwanexyz/Python
1197c0a27530b60e6cbe048758bfe86f0e159e95
[ "MIT" ]
1
2017-09-07T09:14:55.000Z
2017-09-07T09:14:55.000Z
tri_selection.py
Erwanexyz/Python
1197c0a27530b60e6cbe048758bfe86f0e159e95
[ "MIT" ]
null
null
null
tri_selection.py
Erwanexyz/Python
1197c0a27530b60e6cbe048758bfe86f0e159e95
[ "MIT" ]
null
null
null
def tri_selection(tableau): '''tri_selection (list(object) -> list(object)): trie un tableau''' # Initialisation '''taille (int) : taille du tableau''' taille = len(tableau) # Début du traitement # Pour chaque élément tableau[i] du tableau for i in range(taille): # Pour chaque j allant de l'élément actuel tableau[i] jusqu'à la fin du # tableau, on vérifie si c'est le plus petit for j in range(i, taille): if tableau[j] < tableau[i]: tableau[i], tableau[j] = tableau[j], tableau[i] return tableau print(tri_selection([3,2,1,4,8,4,10,9,8,32,91]))
37.705882
79
0.609984
96
641
4.041667
0.53125
0.103093
0.115979
0.082474
0
0
0
0
0
0
0
0.029536
0.26053
641
17
80
37.705882
0.78903
0.393136
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0
0
0.25
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ad8d19aeb7a5e334f966fd5933774700a903b04
5,297
py
Python
main.py
AKBakshay/DCNet
94d30ae44c95a8db7f7474fcf8cf63347271c1cb
[ "MIT" ]
3
2021-04-18T07:56:18.000Z
2021-08-15T11:30:25.000Z
main.py
AKBakshay/DCNet
94d30ae44c95a8db7f7474fcf8cf63347271c1cb
[ "MIT" ]
null
null
null
main.py
AKBakshay/DCNet
94d30ae44c95a8db7f7474fcf8cf63347271c1cb
[ "MIT" ]
null
null
null
from argparse import ArgumentParser import torch import torch.utils.data import yaml from torchvision import transforms import src.config.config as config from src.model.nn.dcnet import DCNet from src.test.predictor import Predictor from src.test.tester import Tester from src.train.trainer import Trainer def train(cfg): # -------------------- data ------------------------ training_data_transform = transforms.Compose( [ transforms.RandomCrop(cfg["train"]["crop_size"]), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ] ) validation_transformations = [transforms.ToTensor()] if cfg["image"]["size_reduction"]: validation_transformations.append(transforms.Resize(size=cfg["image"]["max_size"])) validation_data_transform = transforms.Compose(validation_transformations) # -------------------- model ----------------------- model = DCNet() model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define # ------------------ training setup ------------------------- criterion = torch.nn.MSELoss(reduction="mean") optimizer = torch.optim.RMSprop( model.parameters(), lr=cfg["train"]["learning_rate"], alpha=cfg["train"]["alpha"], momentum=cfg["train"]["momentum"], ) exp_lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=cfg["train"]["scheduler_steps"], gamma=cfg["train"]["gamma"] ) model.train() trainer = Trainer( model=model, cuda=cfg["basic"]["cuda"], criterion=criterion, optimizer=optimizer, lr_scheduler=exp_lr_scheduler, train_crops=cfg["train"]["crops"], crop_size=cfg["train"]["crop_size"], epochs=cfg["train"]["epochs"], training_dataset_path=cfg["train"]["data_path"], validation_dataset_path=cfg["validate"]["data_path"], train_transform=training_data_transform, valid_transform=validation_data_transform, batch_size=cfg["train"]["batch_size"], t_low=cfg["env"]["transmission_map"]["low"], t_high=cfg["env"]["transmission_map"]["high"], atm_light=cfg["env"]["atm_light"], t_map_random_sampler=cfg["env"]["transmission_map"]["random_sampler"], uint8_transform=cfg["basic"]["uint8_transform"], save_path=cfg["output"]["weight_dir"], ) trainer.train() def test(cfg): # -------------------- data ------------------------ transformations = [transforms.ToTensor()] if cfg["image"]["size_reduction"]: transformations.append(transforms.Resize(size=cfg["image"]["max_size"])) data_transform = transforms.Compose(transformations) # -------------------- model ----------------------- model = DCNet() model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define # ------------------ test --------------------------- criterion = torch.nn.MSELoss(reduction="mean") model.eval() tester = Tester( model=model, cuda=cfg["basic"]["cuda"], criterion=criterion, test_dataset_path=cfg["test"]["data_path"], test_transform=data_transform, t_low=cfg["env"]["transmission_map"]["low"], t_high=cfg["env"]["transmission_map"]["high"], atm_light=cfg["env"]["atm_light"], random_sampler=cfg["env"]["transmission_map"]["random_sampler"], uint8_transform=cfg["basic"]["uint8_transform"], ) tester.test() def predict(cfg): # -------------------- data ------------------------ transformations = [transforms.ToTensor()] if cfg["image"]["size_reduction"]: transformations.append(transforms.Resize(size=cfg["image"]["max_size"])) data_transform = transforms.Compose(transformations) # -------------------- model ----------------------- model = DCNet() model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define # -------------------- prediction ------------------ model.eval() predictor = Predictor( model=model, transform=data_transform, dataset=cfg["predict"]["data_path"], atm_light=cfg["env"]["atm_light"], add_ext_haze=cfg["predict"]["add_ext_haze"], t_low=cfg["env"]["transmission_map"]["low"], t_high=cfg["env"]["transmission_map"]["high"], t_map_random_sampler=cfg["env"]["transmission_map"]["random_sampler"], uint8_transform=cfg["predict"]["uint8_transform"], cuda=cfg["basic"]["cuda"], prediction_dir=cfg["predict"]["save_dir"], ) predictor.predict() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--train", action="store_true") parser.add_argument("--test", action="store_true") parser.add_argument("--predict", action="store_true") args = parser.parse_args() with open(config.path["CONFIG_PATH"], "r") as ymlfile: cfg = yaml.load(ymlfile) if args.train: train(cfg) if args.test: test(cfg) if args.predict: predict(cfg)
32.496933
92
0.570512
532
5,297
5.468045
0.195489
0.024751
0.055689
0.064971
0.465796
0.465796
0.411482
0.411482
0.36198
0.342729
0
0.00144
0.21314
5,297
162
93
32.697531
0.696497
0.092316
0
0.347826
0
0
0.184546
0
0
0
0
0
0
1
0.026087
false
0
0.086957
0
0.113043
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ada8d12418618cb46c91bcc7ce0df76b2c106a3
1,645
py
Python
metagenscope_cli/cli/run_cli.py
LongTailBio/python-metagenscope
0c29042774a09e832fcecd0662657ac6e4895dea
[ "MIT" ]
null
null
null
metagenscope_cli/cli/run_cli.py
LongTailBio/python-metagenscope
0c29042774a09e832fcecd0662657ac6e4895dea
[ "MIT" ]
6
2018-10-30T23:51:55.000Z
2018-11-01T20:41:23.000Z
metagenscope_cli/cli/run_cli.py
LongTailBio/python-metagenscope
0c29042774a09e832fcecd0662657ac6e4895dea
[ "MIT" ]
null
null
null
"""CLI to run commands on MGS server.""" from sys import stderr import click from .utils import add_authorization @click.group() def run(): """Run actions on the server.""" pass @run.group() def middleware(): """Run middleware.""" pass @middleware.command(name='group') @add_authorization() @click.option('-u/-n', '--uuid/--name', default=False) @click.argument('group_name') def group_middleware(uploader, uuid, group_name): """Run middleware for a group.""" if uuid: group_uuid = group_name else: response = uploader.knex.get(f'/api/v1/sample_groups/getid/{group_name}') group_uuid = response['data']['sample_group_uuid'] print(f'{group_name} :: {group_uuid}', file=stderr) response = uploader.knex.post(f'/api/v1/sample_groups/{group_uuid}/middleware', {}) click.echo(response) @middleware.command(name='samples') @add_authorization() @click.option('-u/-n', '--uuid/--name', default=False) @click.argument('sample_names', nargs=-1) def sample_middleware(uploader, uuid, sample_names): """Run middleware for a sample.""" for sample_name in sample_names: if uuid: sample_uuid = sample_name else: response = uploader.knex.get(f'/api/v1/samples/getid/{sample_name}') sample_uuid = response['data']['sample_uuid'] print(f'{sample_name} :: {sample_uuid}', file=stderr) try: response = uploader.knex.post(f'/api/v1/samples/{sample_uuid}/middleware', {}) click.echo(response) except Exception: # pylint: disable=broad-except click.echo('Failed.')
29.909091
90
0.646809
208
1,645
4.971154
0.293269
0.04352
0.077369
0.052224
0.332689
0.249516
0.249516
0.191489
0.191489
0.119923
0
0.003771
0.193921
1,645
54
91
30.462963
0.776018
0.099696
0
0.307692
0
0
0.227491
0.109966
0
0
0
0
0
1
0.102564
false
0.051282
0.076923
0
0.179487
0.051282
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
7adab11f81fe9641b079dc6d9cfecadb798ccba4
165
py
Python
ergo/contrib/utils/utils.py
bmillwood/ergo
34be736f1979ad7f1f130bb90728270cb58dbfe8
[ "MIT" ]
93
2020-04-16T03:49:55.000Z
2022-03-26T14:56:29.000Z
ergo/contrib/utils/utils.py
bmillwood/ergo
34be736f1979ad7f1f130bb90728270cb58dbfe8
[ "MIT" ]
326
2020-03-25T17:49:11.000Z
2021-03-25T03:19:51.000Z
ergo/contrib/utils/utils.py
bmillwood/ergo
34be736f1979ad7f1f130bb90728270cb58dbfe8
[ "MIT" ]
26
2020-03-25T03:18:58.000Z
2022-03-18T21:19:11.000Z
from datetime import timedelta def daterange(start_date, end_date): for n in range(int((end_date - start_date).days)): yield start_date + timedelta(n)
23.571429
54
0.715152
25
165
4.52
0.64
0.238938
0
0
0
0
0
0
0
0
0
0
0.187879
165
6
55
27.5
0.843284
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
7add913572dab6aa965813722b95547e25bae2fd
248
py
Python
uts/session_py/5.py
viad00/code_olymp
90f20f9fd075e8967d02baf7554fcf24f4ae089c
[ "MIT" ]
null
null
null
uts/session_py/5.py
viad00/code_olymp
90f20f9fd075e8967d02baf7554fcf24f4ae089c
[ "MIT" ]
null
null
null
uts/session_py/5.py
viad00/code_olymp
90f20f9fd075e8967d02baf7554fcf24f4ae089c
[ "MIT" ]
null
null
null
import sys sys.stdin = open("input.txt", "r") sys.stdout = open("output.txt", "w") n, m = map(int, input().split()) n_mas = [0 for i in range(n)] for i in range(m): for j in list(map(int, input().split())): n_mas[j-1] += 1 print(*n_mas)
27.555556
45
0.584677
49
248
2.897959
0.510204
0.084507
0.15493
0.225352
0.28169
0.28169
0
0
0
0
0
0.014851
0.185484
248
9
46
27.555556
0.688119
0
0
0
0
0
0.084337
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0.111111
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
7addb1ac9be726107051f2594c1af6bf1d661239
9,183
py
Python
nitroml/automl/ensemble_selection/subpipeline.py
google/nitroml
5eabdbe6de85ff7fdae4fefda7547c0c031f9431
[ "Apache-2.0" ]
43
2020-09-13T18:07:15.000Z
2022-01-05T19:05:28.000Z
nitroml/automl/ensemble_selection/subpipeline.py
google/nitroml
5eabdbe6de85ff7fdae4fefda7547c0c031f9431
[ "Apache-2.0" ]
4
2020-09-14T13:15:09.000Z
2021-11-21T11:21:13.000Z
nitroml/automl/ensemble_selection/subpipeline.py
google/nitroml
5eabdbe6de85ff7fdae4fefda7547c0c031f9431
[ "Apache-2.0" ]
5
2020-09-14T13:03:04.000Z
2021-10-21T01:55:48.000Z
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # Lint as: python3 """An Ensemble Selection subpipeline for tabular datasets.""" import json import os from typing import List, Optional, Tuple from absl import logging from nitroml import subpipeline from nitroml.automl.ensemble_selection.lib import ensemble_selection as es_lib import tensorflow as tf from tfx import types from tfx.dsl.component.experimental.annotations import InputArtifact from tfx.dsl.component.experimental.annotations import OutputArtifact from tfx.dsl.component.experimental.annotations import Parameter from tfx.dsl.component.experimental.decorators import component from tfx.dsl.components.base import base_component from tfx.types import standard_artifacts from tfx.utils import path_utils from google.protobuf import text_format from nitroml.protos import problem_statement_pb2 as ps_pb2 class EnsembleSelection(subpipeline.Subpipeline): """An Ensemble Selection subpipeline for tabular datasets.""" def __init__(self, problem_statement: ps_pb2.ProblemStatement, examples: types.Channel, models: List[types.Channel], evaluation_split_name: str, ensemble_size: int, metric: Optional[tf.keras.metrics.Metric] = None, goal: Optional[str] = None, instance_name: Optional[str] = None): """Constructs an AutoTrainer subpipeline. Args: problem_statement: ProblemStatement proto identifying the task. examples: A Channel of 'Example' artifact type produced from an upstream The source of examples that are used in evaluation (required). models: A List of Channels of 'standard_artifact.Model' type to use as the library of base models in the ensemble selection algorithm. evaluation_split_name: String name of the evaluation split in the `examples` artifact to use for evaluation. For examples, 'eval'. ensemble_size: Maximum number of models (with replacement) to select. This is the number of rounds (iterations) for which the ensemble selection algorithm will run. The number of models in the final ensemble will be at most ensemble_size. metric: Optional TF Keras Metric to optimize for during ensemble selection. When `None`, the `problem_statement` is used to determine the metric and goal. goal: Optional string 'maximize' or 'minimize' depending on the goal of the metric. When `None`, the `problem_statement` is used to determine the metric and goal. instance_name: Optional unique instance name. Necessary iff multiple EnsembleSelection subpipelines are declared in the same pipeline. Raises: ValueError: When a required param is not supplied. """ if not metric and not goal: metric, goal = self._determine_metric_and_goal(problem_statement) input_models = {f'input_model{i}': model for i, model in enumerate(models)} self._instance_name = instance_name self._ensemble_selection = ensemble_selection( problem_statement=text_format.MessageToString( message=problem_statement, as_utf8=True), examples=examples, evaluation_split_name=evaluation_split_name, ensemble_size=ensemble_size, metric=json.dumps(tf.keras.metrics.serialize(metric)), goal=goal, instance_name=instance_name, **input_models, ) @property def id(self) -> str: """Returns the AutoTrainer sub-pipeline's unique ID.""" autotrainer_instance_name = 'EnsembleSelection' if self._instance_name: autotrainer_instance_name = f'{autotrainer_instance_name}.{self._instance_name}' return autotrainer_instance_name @property def components(self) -> List[base_component.BaseComponent]: """Returns the AutoTrainer sub-pipeline's constituent components.""" return [self._ensemble_selection] @property def outputs(self) -> subpipeline.SubpipelineOutputs: """Return the AutoTrainer sub-pipeline's outputs.""" return subpipeline.SubpipelineOutputs( {'model': self._ensemble_selection.outputs.model}) def _determine_metric_and_goal( self, problem_statement: ps_pb2.ProblemStatement ) -> Tuple[tf.keras.metrics.Metric, str]: task_type = problem_statement.tasks[0].type if task_type.HasField('multi_class_classification'): return tf.keras.metrics.SparseCategoricalAccuracy( name='accuracy'), 'maximize' if task_type.HasField('binary_classification'): return tf.keras.metrics.AUC(name='auc_roc', curve='ROC'), 'maximize' if task_type.HasField('one_dimensional_regression'): return tf.keras.metrics.MeanSquaredError(name='mse'), 'minimize' raise ValueError('Invalid task type: {}'.format(task_type)) # pytype: disable=wrong-arg-types @component def ensemble_selection( problem_statement: Parameter[str], examples: InputArtifact[standard_artifacts.Examples], evaluation_split_name: Parameter[str], ensemble_size: Parameter[int], metric: Parameter[str], goal: Parameter[str], model: OutputArtifact[standard_artifacts.Model], input_model0: InputArtifact[standard_artifacts.Model] = None, input_model1: InputArtifact[standard_artifacts.Model] = None, input_model2: InputArtifact[standard_artifacts.Model] = None, input_model3: InputArtifact[standard_artifacts.Model] = None, input_model4: InputArtifact[standard_artifacts.Model] = None, input_model5: InputArtifact[standard_artifacts.Model] = None, input_model6: InputArtifact[standard_artifacts.Model] = None, input_model7: InputArtifact[standard_artifacts.Model] = None, input_model8: InputArtifact[standard_artifacts.Model] = None, input_model9: InputArtifact[standard_artifacts.Model] = None, ) -> None: # pytype: disable=invalid-annotation,wrong-arg-types """Runs the SimpleML trainer as a separate component.""" problem_statement = text_format.Parse(problem_statement, ps_pb2.ProblemStatement()) input_models = [ input_model0, input_model1, input_model2, input_model3, input_model4, input_model5, input_model6, input_model7, input_model8, input_model9 ] saved_model_paths = { str(i): path_utils.serving_model_path(model.uri) for i, model in enumerate(input_models) if model } logging.info('Saved model paths: %s', saved_model_paths) label_key = _label_key(problem_statement) es = es_lib.EnsembleSelection( problem_statement=problem_statement, saved_model_paths=saved_model_paths, ensemble_size=ensemble_size, metric=tf.keras.metrics.deserialize(json.loads(metric)), goal=goal) es.fit(*_data_from_examples( examples_path=os.path.join(examples.uri, evaluation_split_name), label_key=label_key)) logging.info('Selected ensemble weights: %s', es.weights) es.save( export_path=os.path.join( path_utils.serving_model_dir(model.uri), 'export', 'serving')) # pytype: enable=wrong-arg-types def _data_from_examples(examples_path: str, label_key: str): """Returns a tuple of ndarrays of examples and label values.""" # Load all the examples. filenames = tf.io.gfile.listdir(examples_path) files = [ os.path.join(examples_path, filename) for filename in sorted(filenames) ] dataset = tf.data.TFRecordDataset(files, compression_type='GZIP') x, y = [], [] for serialized_example in dataset.take(10000).as_numpy_iterator(): x.append(serialized_example) example = tf.train.Example() example.ParseFromString(serialized_example) y.append(_label_value(example, label_key)) return x, y def _label_key(problem_statement: ps_pb2.ProblemStatement) -> str: """Returns the label key from the problem statement.""" task_type = problem_statement.tasks[0].type if task_type.HasField('multi_class_classification'): return task_type.multi_class_classification.label if task_type.HasField('binary_classification'): return task_type.binary_classification.label if task_type.HasField('one_dimensional_regression'): return task_type.one_dimensional_regression.label raise ValueError('Invalid task type: {}'.format(task_type)) def _label_value(example: tf.train.Example, label_key: str): feature = example.features.feature[label_key] if feature.HasField('int64_list'): return feature.int64_list.value if feature.HasField('float_list'): return feature.float_list.value return feature.bytes_list.value
40.453744
86
0.732658
1,145
9,183
5.689956
0.253275
0.046662
0.050652
0.053722
0.268304
0.211512
0.12218
0.071834
0.043592
0.043592
0
0.006194
0.173691
9,183
226
87
40.632743
0.852399
0.27638
0
0.105634
0
0
0.062423
0.030055
0
0
0
0
0
1
0.06338
false
0
0.119718
0
0.28169
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ade72c37577df6464ca8dc5d4f8c8a62f6533b2
246
py
Python
ndn_python_repo/handle/__init__.py
susmit85/ndn-python-repo
45a76bd3e78b0157e364413d62d3aaa51cd20353
[ "Apache-2.0" ]
5
2020-03-23T01:32:12.000Z
2020-10-03T14:36:19.000Z
ndn_python_repo/handle/__init__.py
susmit85/ndn-python-repo
45a76bd3e78b0157e364413d62d3aaa51cd20353
[ "Apache-2.0" ]
48
2020-01-24T04:29:14.000Z
2020-12-11T09:08:50.000Z
ndn_python_repo/handle/__init__.py
susmit85/ndn-python-repo
45a76bd3e78b0157e364413d62d3aaa51cd20353
[ "Apache-2.0" ]
6
2020-02-20T00:05:33.000Z
2020-11-27T05:27:14.000Z
from .read_handle import ReadHandle from .command_handle_base import CommandHandle from .write_command_handle import WriteCommandHandle from .delete_command_handle import DeleteCommandHandle from .tcp_bulk_insert_handle import TcpBulkInsertHandle
49.2
55
0.902439
30
246
7.066667
0.533333
0.226415
0.179245
0
0
0
0
0
0
0
0
0
0.077236
246
5
55
49.2
0.933921
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7ade9e743940d11d6af70b1681562c89779125eb
23,545
py
Python
main/academy/views.py
UsamaKashif/studentutor
7aa5407ac81134a49e474726220e48beaadc9390
[ "MIT" ]
7
2021-01-17T23:10:15.000Z
2021-02-01T21:35:36.000Z
main/academy/views.py
DiveshTheReal/studentutor
0d3ef57887bde4dd2ee40d68015598f9c8052ffd
[ "MIT" ]
7
2021-01-17T15:10:47.000Z
2022-03-12T00:53:49.000Z
main/academy/views.py
DiveshTheReal/studentutor
0d3ef57887bde4dd2ee40d68015598f9c8052ffd
[ "MIT" ]
3
2021-01-18T09:36:16.000Z
2021-01-20T16:29:40.000Z
from django.shortcuts import render,redirect from django.contrib.auth.models import Group from .forms import AcademySignUpForm, AcademyProfile, ProfilePicture, PostAnAdForm, AboutAcademyForm from django.contrib.auth.models import User from django.views.generic import RedirectView from .decorators import unauthenticated_user, allowed_users, admin_only from django.contrib.auth.decorators import login_required from .models import Academy, PostAnAd, Invitations from tutors.models import PostAnAd as PostAnAd_tutor from tutors.models import PostAnAd as PostAnAd_tutor from tutors.models import Invitaions,WishList_tut from django.contrib import messages from django.core.mail import EmailMessage from django.conf import settings from django.template.loader import render_to_string from django.contrib.sites.shortcuts import get_current_site from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError from .utils import generate_token from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger import threading def academyRegister(request): form = AcademySignUpForm() if request.method == "POST": form = AcademySignUpForm(request.POST) if form.is_valid(): academy = form.save() username = form.cleaned_data.get('username') email = form.cleaned_data.get('email') city = form.cleaned_data.get('city') phone = form.cleaned_data.get("phone") name = form.cleaned_data.get("name") address = form.cleaned_data.get("address") group = Group.objects.get(name="academy") academy.groups.add(group) Academy.objects.create( academy=academy, username= username, name=name, email = email, city = city, phone = phone, address= address ) academy.is_active = False academy.save() current_site = get_current_site(request) template = render_to_string("academy/activate.html", { "name": name, "domain": current_site, "uid": urlsafe_base64_encode(force_bytes(academy.pk)), "token": generate_token.make_token(academy) }) registerEmail = EmailMessage( 'Account Activation', template, settings.EMAIL_HOST_USER, [email] ) registerEmail.fail_silently = False registerEmail.send() return render(request,"students/activation_sent.html",{}) context = { "form": form } return render(request, 'academy/academy_sign_up.html', context) def activate_view(request, uidb64, token): try: uid = force_text(urlsafe_base64_decode(uidb64)) academy = User.objects.get(pk = uid) except: academy = None if academy is not None and generate_token.check_token(academy, token): academy.is_active = True academy.save() template = render_to_string("academy/registerEmail.html", { "name": academy.name }) registerEmail = EmailMessage( 'Registration Successful', template, settings.EMAIL_HOST_USER, [academy.email] ) registerEmail.fail_silently = False registerEmail.send() messages.success(request,'account was created for ' + academy.username) return redirect("sign_in") return render(request, 'students/activate_failed.html', status=401) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def academyDashboard(request): academy = request.user.academy form = AcademyProfile(instance=academy) user = Academy.objects.get(username = request.user.username) active_ads = PostAnAd.objects.filter(academyUser = request.user.academy).count() p_form = ProfilePicture() if request.method=="POST": form = AcademyProfile(request.POST,request.FILES, instance=academy) p_form = ProfilePicture(request.POST, request.FILES) if p_form.is_valid(): image = p_form.cleaned_data["image"] std_image = Academy.objects.get(username = request.user.username) std_image.user_image = image std_image.save() return redirect("academy_dashboard") else: messages.warning(request, 'Supported File Extensions are .jpg And .png, Max Image Size Is 1MB') return redirect("academy_dashboard") if form.is_valid(): form.save() context = { "form": form, "p_form": p_form, "totalAds": user.total_ads, "adsDel": user.ads_deleted, "activeAds": active_ads, # needs to be updated "invitations_sent": user.invitations_sent, "invitations_sent_accepted": user.invitations_sent_accepted, "invitations_sent_rejected": user.invitations_sent_rejected, "invitations_recieved": user.invitations_recieved, "invitations_recieved_accepted": user.invitations_recieved_accepted, "invitations_recieved_rejected": user.invitations_recieved_rejected, } return render(request, 'academy/academy_dashboard.html', context) def post_ad(subject,tuition_level,hours_per_day,days_per_week,estimated_fees,user,tutor_gender): myad = PostAnAd( academyUser = user, subject = subject, tuition_level = tuition_level, hours_per_day = hours_per_day, days_per_week = days_per_week, estimated_salary = estimated_fees, tutor_gender = tutor_gender ) myad.save() user.total_ads += 1 user.ad_post_count += 1 user.save() def email_send(user,my_ad,emails): if emails: template = render_to_string("home/stdAD.html", { "firstname": user.first_name, "lastname": user.last_name, "ad":my_ad }) ADEmail = EmailMessage( subject = f'{user.first_name} {user.last_name} posted an AD', body = template, from_email = settings.EMAIL_HOST_USER, bcc = emails ) ADEmail.fail_silently = False ADEmail.send() @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def postAd(request, pk): postform = PostAnAdForm() user = Academy.objects.get(username = request.user.username) academyAds = PostAnAd.objects.filter(academyUser__username = request.user.username) # wishlist,created = WishList_tut.objects.get_or_create(student=request.user.student) emails = [] # tutors = wishlist.tutors.all() # for t in tutors: # emails.append(t.email) if request.method == "POST": postform = PostAnAdForm(request.POST) if postform.is_valid(): subject = postform.cleaned_data["subject"] tuition_level = postform.cleaned_data["tuition_level"] tutor_gender = postform.cleaned_data["tutor_gender"] hours_per_day = postform.cleaned_data["hours_per_day"] days_per_week = postform.cleaned_data["days_per_week"] estimated_salary = postform.cleaned_data["estimated_salary"] adAvailabel = False for ad in academyAds: if ad.subject == subject and ad.tuition_level == tuition_level: adAvailabel = True if adAvailabel == False: currentad = { "subject" : subject, "tuition_level" : tuition_level, "hours_per_day" : hours_per_day, "days_per_week" : days_per_week, "estimated_salary" : estimated_salary, "tutor_gender":tutor_gender } my_ad = threading.Thread(target=post_ad, args=[subject,tuition_level,hours_per_day,days_per_week,estimated_salary,user,tutor_gender]) # t2 = threading.Thread(target=email_send, args=[user,currentad,emails]) my_ad.start() # t2.start() messages.info(request, "Your post is Successfully Created") return redirect("academy_dashboard") else: messages.info(request, "This AD Already Exists") return redirect("academy_dashboard") context = { "form": postform } return render(request, 'academy/post_ad.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def Ads(request): try: studentAbout = AboutStudent.objects.get(student__username = request.user.username).order_by("-id") except: studentAbout = None ads = PostAnAd.objects.filter(academyUser=request.user.academy).order_by("-id") context = { "ads":ads, "about": studentAbout } return render(request, 'academy/ads.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def AdsDelete(request, pk): ad = PostAnAd.objects.get(id=pk) user = Academy.objects.get(username=request.user.username) if request.method == "POST": ad.delete() user.ads_deleted += 1 user.ad_post_count -= 1 user.save() return redirect("ads_academy") context = { 'ad':ad } return render(request, 'academy/delete_ad.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def allTutors(request): tutors = PostAnAd_tutor.objects.all().order_by("-id") tuition_level_contains_query = request.GET.get('TuitionLevel') subject_contains_query = request.GET.get('Subject') city_contains_query = request.GET.get('City') tuition_gender_query = request.GET.get('tuition_gender') number = tutors.count() if tutors: if tuition_level_contains_query != "" and tuition_level_contains_query is not None and tuition_level_contains_query != "All": tutors = tutors.filter(tuition_level = tuition_level_contains_query).order_by("-id") number = tutors.count() if subject_contains_query != "" and subject_contains_query is not None: tutors = tutors.filter(subject__icontains = subject_contains_query).order_by("-id") number = tutors.count() if city_contains_query != "" and city_contains_query is not None: tutors = tutors.filter(tutorUser__city__icontains = city_contains_query).order_by("-id") number = tutors.count() if tuition_gender_query != "" and tuition_gender_query is not None and tuition_gender_query != "Both": tutors = tutors.filter(tutorUser__gender__startswith = tuition_gender_query.lower()) number = tutors.count() tuts = [] if tutors: for t in tutors: tuts.append(t) paginator = Paginator(tuts,8) page = request.GET.get('page') try: items = paginator.page(page) except PageNotAnInteger: items = paginator.page(1) except EmptyPage: items = paginator.page(paginator.num_pages) index = items.number - 1 max_index = len(paginator.page_range) start_index = index - 5 if index >= 5 else 0 end_index = index + 5 if index <= max_index - 5 else max_index page_range = paginator.page_range[start_index:end_index] context = { # "tutors":items, "items":items, "number": number, "academy": request.user.academy, "page_range": page_range, } return render(request, 'academy/all_tutors.html', context) from tutors.models import AboutAndQualifications @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def SpecificTutor(request, id): tutor = PostAnAd_tutor.objects.get(id = id) qual = AboutAndQualifications.objects.get(tutor__username = tutor.tutorUser.username) tutor.views += 1 tutor.save() tutors = PostAnAd_tutor.objects.filter(tutorUser__username = tutor.tutorUser.username).order_by("-id") # try: # wishList = WishList.objects.get(student = request.user.student) # except: # wishList = None # added = False # if wishList is not None: # if tutor.tutorUser in wishList.tutors.all(): # added = True context = { "tutor_id": tutor.tutorUser, "tutor": tutor, "qual": qual, "tutors": tutors.exclude(id = id), "student": request.user.academy, "added":False, # needs to be updated } return render (request, "academy/specific_tutor.html", context) from tutors.models import Tutor, Invitaions_by_academy @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def inviteFordemo(request, id): ad = PostAnAd_tutor.objects.get(id = id) tutor = Tutor.objects.get ( username = ad.tutorUser.username) user = Academy.objects.get(username = request.user.username) std = Academy.objects.get(username = request.user.username) try: invites_sent_by_std = Invitaions_by_academy.objects.get(tutor_ad = ad) except: invites_sent_by_std = None if request.method == "POST": if invites_sent_by_std != None: if invites_sent_by_std.invitation_sent == True and invites_sent_by_std.inivitaion_by_academy.username == request.user.username: messages.info(request, f'Invitation request already sent to {ad.tutorUser.first_name} {ad.tutorUser.last_name}') return redirect("all_tutors_academy") else: Invitaions_by_academy.objects.create( inivitaion_by_academy = std, tutor_ad = ad, invitation_sent = True, accepted = False, rejected = False ) user.invitations_sent += 1 user.save() tutor.invitations_recieved += 1 tutor.save() template = render_to_string("home/inviteEmail.html", { "firstname": ad.tutorUser.first_name, "lastname": ad.tutorUser.last_name, "ad": ad, "invited_to": "Tutor", "area":ad.address, "city":ad.tutorUser.city }) registerEmail = EmailMessage( 'Invite For Demo', template, settings.EMAIL_HOST_USER, [request.user.email] ) registerEmail.fail_silently = False registerEmail.send() intemplate = render_to_string("academy/inviteEmail.html", { "firstname": request.user.academy.name, "ad": ad, "invited_to": "Tutor", "area":ad.address, "city":ad.tutorUser.city }) email = EmailMessage( 'Invitation', intemplate, settings.EMAIL_HOST_USER, [ad.tutorUser.email] ) email.fail_silently = False email.send() messages.info(request, f'Invited {tutor.first_name} {tutor.last_name} For A Demo') return redirect("academy_dashboard") # needs to be changes to invited page. else: Invitaions_by_academy.objects.create( inivitaion_by_academy = std, tutor_ad = ad, invitation_sent = True, accepted = False, rejected = False ) user.invitations_sent += 1 user.save() tutor.invitations_recieved += 1 tutor.save() template = render_to_string("home/inviteEmail.html", { "firstname": ad.tutorUser.first_name, "lastname": ad.tutorUser.last_name, "ad": ad, "invited_to": "Tutor", "area":ad.address, "city":ad.tutorUser.city }) registerEmail = EmailMessage( 'Invite For Demo', template, settings.EMAIL_HOST_USER, [request.user.email] ) registerEmail.fail_silently = False registerEmail.send() intemplate = render_to_string("academy/inviteEmail.html", { "firstname": request.user.academy.name, "ad": ad, "invited_to": "Tutor", "area":ad.address, "city":ad.tutorUser.city }) email = EmailMessage( 'Invitation', intemplate, settings.EMAIL_HOST_USER, [ad.tutorUser.email] ) email.fail_silently = False email.send() messages.info(request, f'Invited {tutor.first_name} {tutor.last_name} For A Demo') return redirect("academy_dashboard") # needs to be changed to invited page context = { "ad":ad } return render(request, 'academy/invite_for_demo.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def invited(request): student = Academy.objects.get(username = request.user.username) invited = Invitaions_by_academy.objects.filter(inivitaion_by_academy = student).order_by("-id") context={ "invited": invited, } return render(request, "academy/invited.html", context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def invitationsAcademy(request): invites = Invitations.objects.filter(academy_ad__academyUser = request.user.academy).order_by("-id") context = { "invites":invites } return render(request, 'academy/invitations.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def view_your_ad_acad(request, id): student_ad = Invitations.objects.get(id = id) try: tutors = PostAnAd_tutor.objects.filter(subject = student_ad.academy_ad.subject)[4] except: tutors = PostAnAd_tutor.objects.filter(subject = student_ad.academy_ad.subject) context = { "invite":student_ad, "tutors": tutors.exclude(tutorUser__username = student_ad.inivitaion_by_tutor.username) } return render(request,'academy/view_your_ad.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def acceptInvitationAcademy(request, id): invite = Invitations.objects.get(id = id) student = Academy.objects.get(username = request.user.username) tutor = Tutor.objects.get(username = invite.inivitaion_by_tutor.username) if request.method == "POST": invite.accepted = True invite.rejected = False invite.save() student.invitations_recieved_accepted += 1 student.save() tutor.invitations_sent_accepted += 1 tutor.save() template = render_to_string("academy/acceptEmail.html", { "name": request.user.academy.name, "email": request.user.email, "register_as": "Academy", "phone": request.user.academy.phone }) registerEmail = EmailMessage( 'Invitation Accepted', template, settings.EMAIL_HOST_USER, [invite.inivitaion_by_tutor.email] ) registerEmail.fail_silently = False registerEmail.send() recieve_temp = render_to_string("academy/accept_recieve_Email.html", { "request_from" :tutor, "request": "Tutor" }) Email = EmailMessage( 'Invitation Accepted', recieve_temp, settings.EMAIL_HOST_USER, [request.user.email] ) Email.fail_silently = False Email.send() messages.info(request, f'Accepted Invitation Request from {tutor.first_name} {tutor.last_name}') return redirect("invitations_academy") context = { "invite":invite } return render(request, "academy/accept_invitation.html", context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def rejectInviteAcademy(request, id): invite = Invitations.objects.get(id = id) student = Academy.objects.get(username = request.user.username) tutor = Tutor.objects.get(username = invite.inivitaion_by_tutor.username) if request.method == "POST": invite.delete() student.invitations_recieved_rejected += 1 student.save() tutor.invitations_sent_rejected += 1 tutor.save() template = render_to_string("home/rejectEmail.html", { "firstname": request.user.academy.name, "student_email": request.user.email }) registerEmail = EmailMessage( 'Invitation Rejected', template, settings.EMAIL_HOST_USER, [invite.inivitaion_by_tutor.email] ) registerEmail.fail_silently = False registerEmail.send() messages.warning(request, f'Rejected Invite From {tutor.first_name} {tutor.last_name}') return redirect("invitations_academy") context = { "invite": invite } return render(request,'academy/reject_invitation.html', context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def del_account_acad(request): student = User.objects.get(username = request.user.username) # print(request.user.student.first_name) if request.method == "POST": student.is_active = False student.save() template = render_to_string("home/delEmail.html", { "register_as": "Academy", "email": request.user.email, }) registerEmail = EmailMessage( 'Account Deletion', template, settings.EMAIL_HOST_USER, [request.user.email] ) registerEmail.fail_silently = False registerEmail.send() return redirect("academy_dashboard") context = {} return render(request, "academy/del_account.html", context) @login_required(login_url="sign_in") @allowed_users(allowed_roles=["academy"]) def aboutAcademy(request): aboutForm = AboutAcademyForm() if request.method == "POST": aboutForm = AboutAcademyForm(request.POST) if aboutForm.is_valid(): # try: # AboutStudent.objects.get(student__username = request.user.username).delete() # except: # pass about = aboutForm.cleaned_data["textArea"] std = Academy.objects.get(username=request.user.username) std.profile_complete = True std.textArea = about std.save() return redirect("academy_dashboard") context = { "form": aboutForm } return render(request, "academy/student_about.html", context)
33.302687
149
0.615715
2,477
23,545
5.641502
0.115462
0.028338
0.023114
0.027909
0.475383
0.431015
0.376342
0.366466
0.302061
0.288393
0
0.00237
0.283075
23,545
706
150
33.349858
0.825474
0.030622
0
0.432532
0
0
0.11711
0.032765
0
0
0
0
0
1
0.033272
false
0
0.042514
0
0.131238
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7adffba1ca679eea400ec7eeac7d960a50759561
1,019
py
Python
exercises/Desafio042.py
zThiago15/Curso-em-Video
ef25e0497edb79bdfbe71fde485f4dafc0d2a0e6
[ "MIT" ]
null
null
null
exercises/Desafio042.py
zThiago15/Curso-em-Video
ef25e0497edb79bdfbe71fde485f4dafc0d2a0e6
[ "MIT" ]
null
null
null
exercises/Desafio042.py
zThiago15/Curso-em-Video
ef25e0497edb79bdfbe71fde485f4dafc0d2a0e6
[ "MIT" ]
1
2021-07-24T21:39:26.000Z
2021-07-24T21:39:26.000Z
cores = {'limpar':'\033[m', 'vermelho':'\033[1;31m','ciano':'\033[1;36m','azul':'\033[1;34m','amarelo':'\033[1;33m','verde':'\033[1;32m'} print('===== Tipo de Triângulo -> Aprimoramento do Desafio 35 =====') print() s1 = float(input('Primeiro segmento: ')) s2 = float(input('Segundo segmento: ')) s3 = float(input('Terceiro segmento: ')) if s1 < s2+s3 and s2 < s1+s3 and s3 < s1+s2: print(f'{(cores["verde"])}PODE {(cores["limpar"])}se formar um triângulo! Do tipo:') if s1 == s2 == s3: #Se forem os 3 segmentos iguais print(f'{(cores["amarelo"])}EQUILÁTERO! {(cores["limpar"])}(Todos os lados são iguais).') elif s1 != s2 != s3 != s1: #Se forem 3 segmentos diferentes print(f'{(cores["azul"])}ESCALENO! {(cores["limpar"])}(Todos os lados diferentes).') else: #Se forem 2 segmentos iguais e 1 diferente print(f'{(cores["ciano"])}ISÓSCELES! {(cores["limpar"])}(Dois lados iguais).') else: print(f'{(cores["vermelho"])}NÃO PODE {(cores["limpar"])}se formar um triângulo!')
48.52381
137
0.618253
148
1,019
4.256757
0.391892
0.104762
0.087302
0.025397
0.180952
0.107937
0.107937
0
0
0
0
0.066975
0.150147
1,019
21
138
48.52381
0.660508
0.100098
0
0.125
0
0
0.627322
0.269945
0
0
0
0
0
1
0
false
0
0
0
0
0.4375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
2
7ae0cbb3da1cad9d593dd273b1430c763adce93f
2,628
py
Python
bin/Lib/tkinter/test/support.py
yousafsyed/casperjs
ed077ae9e42cf8fb9e023e9b6840d3cea11bac40
[ "MIT" ]
36
2015-02-04T10:43:31.000Z
2022-03-30T13:01:12.000Z
bin/Lib/tkinter/test/support.py
yousafsyed/casperjs
ed077ae9e42cf8fb9e023e9b6840d3cea11bac40
[ "MIT" ]
9
2015-03-17T05:56:16.000Z
2021-11-17T09:31:50.000Z
bin/Lib/tkinter/test/support.py
yousafsyed/casperjs
ed077ae9e42cf8fb9e023e9b6840d3cea11bac40
[ "MIT" ]
22
2015-05-13T17:37:35.000Z
2022-01-25T06:24:42.000Z
import sys import tkinter import unittest from test.support import requires def get_tk_root(): requires('gui') # raise exception if tk unavailable try: root = tkinter._default_root except AttributeError: # it is possible to disable default root in Tkinter, although # I haven't seen people doing it (but apparently someone did it # here). root = None if root is None: # create a new master only if there isn't one already root = tkinter.Tk() return root def root_deiconify(): root = get_tk_root() root.deiconify() def root_withdraw(): root = get_tk_root() root.withdraw() def simulate_mouse_click(widget, x, y): """Generate proper events to click at the x, y position (tries to act like an X server).""" widget.event_generate('<Enter>', x=0, y=0) widget.event_generate('<Motion>', x=x, y=y) widget.event_generate('<ButtonPress-1>', x=x, y=y) widget.event_generate('<ButtonRelease-1>', x=x, y=y) import _tkinter tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.'))) def requires_tcl(*version): return unittest.skipUnless(tcl_version >= version, 'requires Tcl version >= ' + '.'.join(map(str, version))) _tk_patchlevel = None def get_tk_patchlevel(): global _tk_patchlevel if _tk_patchlevel is None: tcl = tkinter.Tcl() patchlevel = [] for x in tcl.call('info', 'patchlevel').split('.'): try: x = int(x, 10) except ValueError: x = -1 patchlevel.append(x) _tk_patchlevel = tuple(patchlevel) return _tk_patchlevel units = { 'c': 72 / 2.54, # centimeters 'i': 72, # inches 'm': 72 / 25.4, # millimeters 'p': 1, # points } def pixels_conv(value): return float(value[:-1]) * units[value[-1:]] def tcl_obj_eq(actual, expected): if actual == expected: return True if isinstance(actual, _tkinter.Tcl_Obj): if isinstance(expected, str): return str(actual) == expected if isinstance(actual, tuple): if isinstance(expected, tuple): return (len(actual) == len(expected) and all(tcl_obj_eq(act, exp) for act, exp in zip(actual, expected))) return False def widget_eq(actual, expected): if actual == expected: return True if isinstance(actual, (str, tkinter.Widget)): if isinstance(expected, (str, tkinter.Widget)): return str(actual) == str(expected) return False
28.565217
73
0.603501
335
2,628
4.608955
0.364179
0.046632
0.049223
0.007772
0.13342
0.107513
0.107513
0.07772
0.07772
0.07772
0
0.011646
0.281202
2,628
91
74
28.879121
0.805717
0.128995
0
0.142857
0
0
0.041832
0
0
0
0
0
0
1
0.128571
false
0
0.071429
0.028571
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ae53592b868323acd8715b4da2629ff4da665fd
301
py
Python
module08-mongodb.programming.using.pymongo/hr/domain.py
deepcloudlabs/dcl162-2020-sep-09
dc05aa6e644610fe0cdb8f9de1074763fc77a3bb
[ "MIT" ]
null
null
null
module08-mongodb.programming.using.pymongo/hr/domain.py
deepcloudlabs/dcl162-2020-sep-09
dc05aa6e644610fe0cdb8f9de1074763fc77a3bb
[ "MIT" ]
null
null
null
module08-mongodb.programming.using.pymongo/hr/domain.py
deepcloudlabs/dcl162-2020-sep-09
dc05aa6e644610fe0cdb8f9de1074763fc77a3bb
[ "MIT" ]
1
2020-10-04T10:58:31.000Z
2020-10-04T10:58:31.000Z
class Employee(): def __init__(self,identity, fullname, salary, email, birth_year,iban): self.identity = identity self.fullname = fullname self.salary = salary self.email = email self.birth_year = birth_year self.iban = iban self.version = 1
33.444444
74
0.621262
35
301
5.142857
0.4
0.15
0
0
0
0
0
0
0
0
0
0.004695
0.292359
301
9
75
33.444444
0.840376
0
0
0
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7ae5c9480c9d4736f4023646d3b8790e5fc94459
924
py
Python
setup.py
impergator493/PySeus
faa7e5741acea9c3b8e0acad066905fa3b1c301b
[ "X11" ]
2
2020-02-17T09:20:50.000Z
2022-03-22T13:05:22.000Z
setup.py
impergator493/PySeus
faa7e5741acea9c3b8e0acad066905fa3b1c301b
[ "X11" ]
null
null
null
setup.py
impergator493/PySeus
faa7e5741acea9c3b8e0acad066905fa3b1c301b
[ "X11" ]
1
2021-05-26T08:14:58.000Z
2021-05-26T08:14:58.000Z
from io import open from setuptools import setup, find_packages setup( name='pyseus', version='0.1', description='PySeus is a minimal viewer for medical imaging data.', long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='http://github.com/calmer/pyseus', author='Christoph Almer', author_email='christoph.almer@gmail.com', license='GNU', packages=find_packages(), package_data={'pyseus': [ 'settings.ini', 'ui/style_dark.qss', 'ui/icon.png', 'settings.ini' ]}, include_package_data=True, install_requires=[ 'pyside2==5.13', 'numpy', 'opencv-python', 'h5py', 'pydicom', 'nibabel', 'natsort' ], entry_points={ 'console_scripts': [ 'pyseus=pyseus:load', ], }, zip_safe=False )
24.315789
71
0.58658
101
924
5.217822
0.752475
0.045541
0
0
0
0
0
0
0
0
0
0.011765
0.264069
924
37
72
24.972973
0.763235
0
0
0.055556
0
0
0.334416
0.027056
0
0
0
0
0
1
0
true
0
0.055556
0
0.055556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
7ae6eee9c017c38595289696d2aff8e25f24cb29
1,753
py
Python
databricks/pixels/path_extractor.py
dmoore247/pixels
cf78ab016530c42bc36538a9b812fe23107f87d7
[ "Apache-2.0" ]
2
2021-03-28T18:50:50.000Z
2021-08-09T02:06:15.000Z
databricks/pixels/path_extractor.py
dmoore247/pixels
cf78ab016530c42bc36538a9b812fe23107f87d7
[ "Apache-2.0" ]
null
null
null
databricks/pixels/path_extractor.py
dmoore247/pixels
cf78ab016530c42bc36538a9b812fe23107f87d7
[ "Apache-2.0" ]
null
null
null
from pyspark.ml.pipeline import Transformer import pyspark.sql.functions as f import pyspark.sql.types as t from pyspark.sql import DataFrame class PathExtractor(Transformer): # Day extractor inherit of property of Transformer def __init__(self, inputCol='path', tagsCol = "tags", basePath='dbfs:/'): self.inputCol = inputCol #the name of your columns self.basePath = basePath def this(): #define an unique ID this(Identifiable.randomUID("PathExtractor")) def copy(extra): defaultCopy(extra) def check_input_type(self, schema): field = schema[self.inputCol] #assert that field is a datetype if (field.dataType != t.StringType()): raise Exception('PathExtractor input type %s did not match input type StringType' % field.dataType) def _transform(self, df): self.check_input_type(df.schema) return self._transform_impl(df, self.basePath, self.inputCol) @staticmethod def _transform_impl(df:DataFrame, basePath:str, inputCol:str): """ User overridable """ return (df .withColumn("relative_path", f.regexp_replace(inputCol, basePath+"(.*)$",r"$1")) .withColumn("local_path", f.regexp_replace(inputCol,"^dbfs:(.*$)",r"/dbfs$1")) .withColumn("extension",f.regexp_replace("relative_path", ".*\.(\w+)$", r"$1")) .withColumn("path_tags", f.split( f.regexp_replace( "relative_path", "([0-9a-zA-Z]+)([\_\.\/\:])", r"$1,"), ",") ) )
42.756098
111
0.557901
183
1,753
5.218579
0.459016
0.050262
0.058639
0.037696
0.108901
0
0
0
0
0
0
0.004992
0.314318
1,753
41
112
42.756098
0.789517
0.081004
0
0
0
0
0.139913
0.01624
0
0
0
0
0
1
0.176471
false
0
0.117647
0
0.382353
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7ae87b35274b2c9c47cbad11cf4cc8f53a9dfec9
9,371
py
Python
microbenchmarks/sort2_20.py
jmgc/pyston
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
[ "BSD-2-Clause", "Apache-2.0" ]
1
2020-02-06T14:28:45.000Z
2020-02-06T14:28:45.000Z
microbenchmarks/sort2_20.py
jmgc/pyston
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
microbenchmarks/sort2_20.py
jmgc/pyston
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
[ "BSD-2-Clause", "Apache-2.0" ]
1
2020-02-06T14:29:00.000Z
2020-02-06T14:29:00.000Z
def f(l): if l[0] > l[1]: l[0], l[1] = l[1], l[0] if l[0] > l[2]: l[0], l[2] = l[2], l[0] if l[0] > l[3]: l[0], l[3] = l[3], l[0] if l[0] > l[4]: l[0], l[4] = l[4], l[0] if l[0] > l[5]: l[0], l[5] = l[5], l[0] if l[0] > l[6]: l[0], l[6] = l[6], l[0] if l[0] > l[7]: l[0], l[7] = l[7], l[0] if l[0] > l[8]: l[0], l[8] = l[8], l[0] if l[0] > l[9]: l[0], l[9] = l[9], l[0] if l[0] > l[10]: l[0], l[10] = l[10], l[0] if l[0] > l[11]: l[0], l[11] = l[11], l[0] if l[0] > l[12]: l[0], l[12] = l[12], l[0] if l[0] > l[13]: l[0], l[13] = l[13], l[0] if l[0] > l[14]: l[0], l[14] = l[14], l[0] if l[0] > l[15]: l[0], l[15] = l[15], l[0] if l[0] > l[16]: l[0], l[16] = l[16], l[0] if l[0] > l[17]: l[0], l[17] = l[17], l[0] if l[0] > l[18]: l[0], l[18] = l[18], l[0] if l[0] > l[19]: l[0], l[19] = l[19], l[0] if l[1] > l[2]: l[1], l[2] = l[2], l[1] if l[1] > l[3]: l[1], l[3] = l[3], l[1] if l[1] > l[4]: l[1], l[4] = l[4], l[1] if l[1] > l[5]: l[1], l[5] = l[5], l[1] if l[1] > l[6]: l[1], l[6] = l[6], l[1] if l[1] > l[7]: l[1], l[7] = l[7], l[1] if l[1] > l[8]: l[1], l[8] = l[8], l[1] if l[1] > l[9]: l[1], l[9] = l[9], l[1] if l[1] > l[10]: l[1], l[10] = l[10], l[1] if l[1] > l[11]: l[1], l[11] = l[11], l[1] if l[1] > l[12]: l[1], l[12] = l[12], l[1] if l[1] > l[13]: l[1], l[13] = l[13], l[1] if l[1] > l[14]: l[1], l[14] = l[14], l[1] if l[1] > l[15]: l[1], l[15] = l[15], l[1] if l[1] > l[16]: l[1], l[16] = l[16], l[1] if l[1] > l[17]: l[1], l[17] = l[17], l[1] if l[1] > l[18]: l[1], l[18] = l[18], l[1] if l[1] > l[19]: l[1], l[19] = l[19], l[1] if l[2] > l[3]: l[2], l[3] = l[3], l[2] if l[2] > l[4]: l[2], l[4] = l[4], l[2] if l[2] > l[5]: l[2], l[5] = l[5], l[2] if l[2] > l[6]: l[2], l[6] = l[6], l[2] if l[2] > l[7]: l[2], l[7] = l[7], l[2] if l[2] > l[8]: l[2], l[8] = l[8], l[2] if l[2] > l[9]: l[2], l[9] = l[9], l[2] if l[2] > l[10]: l[2], l[10] = l[10], l[2] if l[2] > l[11]: l[2], l[11] = l[11], l[2] if l[2] > l[12]: l[2], l[12] = l[12], l[2] if l[2] > l[13]: l[2], l[13] = l[13], l[2] if l[2] > l[14]: l[2], l[14] = l[14], l[2] if l[2] > l[15]: l[2], l[15] = l[15], l[2] if l[2] > l[16]: l[2], l[16] = l[16], l[2] if l[2] > l[17]: l[2], l[17] = l[17], l[2] if l[2] > l[18]: l[2], l[18] = l[18], l[2] if l[2] > l[19]: l[2], l[19] = l[19], l[2] if l[3] > l[4]: l[3], l[4] = l[4], l[3] if l[3] > l[5]: l[3], l[5] = l[5], l[3] if l[3] > l[6]: l[3], l[6] = l[6], l[3] if l[3] > l[7]: l[3], l[7] = l[7], l[3] if l[3] > l[8]: l[3], l[8] = l[8], l[3] if l[3] > l[9]: l[3], l[9] = l[9], l[3] if l[3] > l[10]: l[3], l[10] = l[10], l[3] if l[3] > l[11]: l[3], l[11] = l[11], l[3] if l[3] > l[12]: l[3], l[12] = l[12], l[3] if l[3] > l[13]: l[3], l[13] = l[13], l[3] if l[3] > l[14]: l[3], l[14] = l[14], l[3] if l[3] > l[15]: l[3], l[15] = l[15], l[3] if l[3] > l[16]: l[3], l[16] = l[16], l[3] if l[3] > l[17]: l[3], l[17] = l[17], l[3] if l[3] > l[18]: l[3], l[18] = l[18], l[3] if l[3] > l[19]: l[3], l[19] = l[19], l[3] if l[4] > l[5]: l[4], l[5] = l[5], l[4] if l[4] > l[6]: l[4], l[6] = l[6], l[4] if l[4] > l[7]: l[4], l[7] = l[7], l[4] if l[4] > l[8]: l[4], l[8] = l[8], l[4] if l[4] > l[9]: l[4], l[9] = l[9], l[4] if l[4] > l[10]: l[4], l[10] = l[10], l[4] if l[4] > l[11]: l[4], l[11] = l[11], l[4] if l[4] > l[12]: l[4], l[12] = l[12], l[4] if l[4] > l[13]: l[4], l[13] = l[13], l[4] if l[4] > l[14]: l[4], l[14] = l[14], l[4] if l[4] > l[15]: l[4], l[15] = l[15], l[4] if l[4] > l[16]: l[4], l[16] = l[16], l[4] if l[4] > l[17]: l[4], l[17] = l[17], l[4] if l[4] > l[18]: l[4], l[18] = l[18], l[4] if l[4] > l[19]: l[4], l[19] = l[19], l[4] if l[5] > l[6]: l[5], l[6] = l[6], l[5] if l[5] > l[7]: l[5], l[7] = l[7], l[5] if l[5] > l[8]: l[5], l[8] = l[8], l[5] if l[5] > l[9]: l[5], l[9] = l[9], l[5] if l[5] > l[10]: l[5], l[10] = l[10], l[5] if l[5] > l[11]: l[5], l[11] = l[11], l[5] if l[5] > l[12]: l[5], l[12] = l[12], l[5] if l[5] > l[13]: l[5], l[13] = l[13], l[5] if l[5] > l[14]: l[5], l[14] = l[14], l[5] if l[5] > l[15]: l[5], l[15] = l[15], l[5] if l[5] > l[16]: l[5], l[16] = l[16], l[5] if l[5] > l[17]: l[5], l[17] = l[17], l[5] if l[5] > l[18]: l[5], l[18] = l[18], l[5] if l[5] > l[19]: l[5], l[19] = l[19], l[5] if l[6] > l[7]: l[6], l[7] = l[7], l[6] if l[6] > l[8]: l[6], l[8] = l[8], l[6] if l[6] > l[9]: l[6], l[9] = l[9], l[6] if l[6] > l[10]: l[6], l[10] = l[10], l[6] if l[6] > l[11]: l[6], l[11] = l[11], l[6] if l[6] > l[12]: l[6], l[12] = l[12], l[6] if l[6] > l[13]: l[6], l[13] = l[13], l[6] if l[6] > l[14]: l[6], l[14] = l[14], l[6] if l[6] > l[15]: l[6], l[15] = l[15], l[6] if l[6] > l[16]: l[6], l[16] = l[16], l[6] if l[6] > l[17]: l[6], l[17] = l[17], l[6] if l[6] > l[18]: l[6], l[18] = l[18], l[6] if l[6] > l[19]: l[6], l[19] = l[19], l[6] if l[7] > l[8]: l[7], l[8] = l[8], l[7] if l[7] > l[9]: l[7], l[9] = l[9], l[7] if l[7] > l[10]: l[7], l[10] = l[10], l[7] if l[7] > l[11]: l[7], l[11] = l[11], l[7] if l[7] > l[12]: l[7], l[12] = l[12], l[7] if l[7] > l[13]: l[7], l[13] = l[13], l[7] if l[7] > l[14]: l[7], l[14] = l[14], l[7] if l[7] > l[15]: l[7], l[15] = l[15], l[7] if l[7] > l[16]: l[7], l[16] = l[16], l[7] if l[7] > l[17]: l[7], l[17] = l[17], l[7] if l[7] > l[18]: l[7], l[18] = l[18], l[7] if l[7] > l[19]: l[7], l[19] = l[19], l[7] if l[8] > l[9]: l[8], l[9] = l[9], l[8] if l[8] > l[10]: l[8], l[10] = l[10], l[8] if l[8] > l[11]: l[8], l[11] = l[11], l[8] if l[8] > l[12]: l[8], l[12] = l[12], l[8] if l[8] > l[13]: l[8], l[13] = l[13], l[8] if l[8] > l[14]: l[8], l[14] = l[14], l[8] if l[8] > l[15]: l[8], l[15] = l[15], l[8] if l[8] > l[16]: l[8], l[16] = l[16], l[8] if l[8] > l[17]: l[8], l[17] = l[17], l[8] if l[8] > l[18]: l[8], l[18] = l[18], l[8] if l[8] > l[19]: l[8], l[19] = l[19], l[8] if l[9] > l[10]: l[9], l[10] = l[10], l[9] if l[9] > l[11]: l[9], l[11] = l[11], l[9] if l[9] > l[12]: l[9], l[12] = l[12], l[9] if l[9] > l[13]: l[9], l[13] = l[13], l[9] if l[9] > l[14]: l[9], l[14] = l[14], l[9] if l[9] > l[15]: l[9], l[15] = l[15], l[9] if l[9] > l[16]: l[9], l[16] = l[16], l[9] if l[9] > l[17]: l[9], l[17] = l[17], l[9] if l[9] > l[18]: l[9], l[18] = l[18], l[9] if l[9] > l[19]: l[9], l[19] = l[19], l[9] if l[10] > l[11]: l[10], l[11] = l[11], l[10] if l[10] > l[12]: l[10], l[12] = l[12], l[10] if l[10] > l[13]: l[10], l[13] = l[13], l[10] if l[10] > l[14]: l[10], l[14] = l[14], l[10] if l[10] > l[15]: l[10], l[15] = l[15], l[10] if l[10] > l[16]: l[10], l[16] = l[16], l[10] if l[10] > l[17]: l[10], l[17] = l[17], l[10] if l[10] > l[18]: l[10], l[18] = l[18], l[10] if l[10] > l[19]: l[10], l[19] = l[19], l[10] if l[11] > l[12]: l[11], l[12] = l[12], l[11] if l[11] > l[13]: l[11], l[13] = l[13], l[11] if l[11] > l[14]: l[11], l[14] = l[14], l[11] if l[11] > l[15]: l[11], l[15] = l[15], l[11] if l[11] > l[16]: l[11], l[16] = l[16], l[11] if l[11] > l[17]: l[11], l[17] = l[17], l[11] if l[11] > l[18]: l[11], l[18] = l[18], l[11] if l[11] > l[19]: l[11], l[19] = l[19], l[11] if l[12] > l[13]: l[12], l[13] = l[13], l[12] if l[12] > l[14]: l[12], l[14] = l[14], l[12] if l[12] > l[15]: l[12], l[15] = l[15], l[12] if l[12] > l[16]: l[12], l[16] = l[16], l[12] if l[12] > l[17]: l[12], l[17] = l[17], l[12] if l[12] > l[18]: l[12], l[18] = l[18], l[12] if l[12] > l[19]: l[12], l[19] = l[19], l[12] if l[13] > l[14]: l[13], l[14] = l[14], l[13] if l[13] > l[15]: l[13], l[15] = l[15], l[13] if l[13] > l[16]: l[13], l[16] = l[16], l[13] if l[13] > l[17]: l[13], l[17] = l[17], l[13] if l[13] > l[18]: l[13], l[18] = l[18], l[13] if l[13] > l[19]: l[13], l[19] = l[19], l[13] if l[14] > l[15]: l[14], l[15] = l[15], l[14] if l[14] > l[16]: l[14], l[16] = l[16], l[14] if l[14] > l[17]: l[14], l[17] = l[17], l[14] if l[14] > l[18]: l[14], l[18] = l[18], l[14] if l[14] > l[19]: l[14], l[19] = l[19], l[14] if l[15] > l[16]: l[15], l[16] = l[16], l[15] if l[15] > l[17]: l[15], l[17] = l[17], l[15] if l[15] > l[18]: l[15], l[18] = l[18], l[15] if l[15] > l[19]: l[15], l[19] = l[19], l[15] if l[16] > l[17]: l[16], l[17] = l[17], l[16] if l[16] > l[18]: l[16], l[18] = l[18], l[16] if l[16] > l[19]: l[16], l[19] = l[19], l[16] if l[17] > l[18]: l[17], l[18] = l[18], l[17] if l[17] > l[19]: l[17], l[19] = l[19], l[17] if l[18] > l[19]: l[18], l[19] = l[19], l[18] return l print f([3, 12, 16, 8, 17, 6, 13, 0, 4, 15, 1, 14, 11, 18, 10, 5, 9, 7, 2, 19]) print f([2, 6, 11, 4, 7, 18, 19, 10, 15, 13, 3, 0, 17, 5, 8, 1, 14, 9, 16, 12]) print f([6, 12, 10, 7, 19, 15, 14, 5, 16, 1, 4, 11, 13, 2, 18, 9, 0, 3, 17, 8]) print f([1, 17, 13, 8, 9, 19, 18, 6, 5, 10, 12, 14, 2, 15, 0, 4, 11, 16, 7, 3]) print f([4, 7, 8, 6, 16, 10, 0, 5, 1, 3, 19, 2, 15, 12, 17, 11, 13, 18, 14, 9]) print f([14, 16, 11, 12, 5, 0, 10, 3, 1, 8, 17, 13, 4, 19, 9, 15, 6, 2, 7, 18]) print f([0, 3, 14, 9, 19, 13, 1, 7, 4, 17, 8, 16, 10, 5, 12, 6, 15, 11, 2, 18]) print f([17, 19, 3, 13, 15, 6, 16, 4, 0, 18, 8, 1, 9, 11, 2, 12, 7, 10, 5, 14]) print f([19, 5, 15, 1, 8, 2, 3, 12, 6, 14, 17, 7, 13, 10, 4, 18, 11, 9, 16, 0]) print f([13, 10, 11, 17, 19, 12, 14, 7, 5, 9, 2, 4, 18, 8, 6, 3, 16, 15, 0, 1])
46.162562
79
0.384377
2,695
9,371
1.336549
0.009648
0.158245
0.063298
0.026374
0.877013
0.349806
0
0
0
0
0
0.284501
0.246078
9,371
202
80
46.391089
0.225336
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.049505
0
0
1
null
0
0
0
1
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
7ae91d54f6551b3200a7ad1c7b4b024eb589bb18
91
py
Python
BackendBaggie/cartview/apps.py
Baggie-App/Updateapi
80f200d7ffd4695e6348ce6bb9a7a31a6b821e77
[ "MIT" ]
null
null
null
BackendBaggie/cartview/apps.py
Baggie-App/Updateapi
80f200d7ffd4695e6348ce6bb9a7a31a6b821e77
[ "MIT" ]
null
null
null
BackendBaggie/cartview/apps.py
Baggie-App/Updateapi
80f200d7ffd4695e6348ce6bb9a7a31a6b821e77
[ "MIT" ]
null
null
null
from django.apps import AppConfig class CartviewConfig(AppConfig): name = 'cartview'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.164835
91
5
34
18.2
0.907895
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
7aeb3ca58882967e00e1a0f51dac95fb61f266aa
384
py
Python
s04_variaveis_tipos_de_dados/s04_exercicios/s04_exercicio_07.py
adeogliari/GeekUniversity_Python
1b6badc45ca1dfbaa2f42196fb2dedac417b866e
[ "MIT" ]
null
null
null
s04_variaveis_tipos_de_dados/s04_exercicios/s04_exercicio_07.py
adeogliari/GeekUniversity_Python
1b6badc45ca1dfbaa2f42196fb2dedac417b866e
[ "MIT" ]
null
null
null
s04_variaveis_tipos_de_dados/s04_exercicios/s04_exercicio_07.py
adeogliari/GeekUniversity_Python
1b6badc45ca1dfbaa2f42196fb2dedac417b866e
[ "MIT" ]
null
null
null
""" 7) Leia uma temperatura em graus Fahrenheit e apresente-a convertida em graus Celsius. A fórmula de conversão é: C = (F-32.0)*5.0/9.0, sendo C a temperatura em Celsius e F a temperatura em Fahrenheit. """ fahrenheit = float(input('Digite a temperatura em graus Fahrenheit F: \n')) print(f'A temperatura em graus Celsius de {fahrenheit}F é: {(fahrenheit - 32.0) * 5.0 / 9.0}ºC')
48
200
0.716146
69
384
3.985507
0.42029
0.236364
0.203636
0.203636
0.050909
0.050909
0
0
0
0
0
0.046584
0.161458
384
7
201
54.857143
0.807453
0.520833
0
0
0
0.5
0.75
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
7
7aec73e5242fe246bd53368966dcb8b97fdec44b
862
py
Python
software/openvisualizer/openvisualizer/openType/typeRssi.py
pedrohenriquegomes/openwsn-sw
24761e01dc1e8dbc351e61bd927c57facc64c155
[ "BSD-3-Clause" ]
26
2015-01-31T23:56:36.000Z
2021-04-10T20:26:11.000Z
software/openvisualizer/openvisualizer/openType/typeRssi.py
pedrohenriquegomes/openwsn-sw
24761e01dc1e8dbc351e61bd927c57facc64c155
[ "BSD-3-Clause" ]
33
2015-03-18T15:54:01.000Z
2018-03-08T14:28:50.000Z
software/openvisualizer/openvisualizer/openType/typeRssi.py
pedrohenriquegomes/openwsn-sw
24761e01dc1e8dbc351e61bd927c57facc64c155
[ "BSD-3-Clause" ]
80
2015-01-07T08:54:58.000Z
2020-05-06T21:33:41.000Z
# Copyright (c) 2010-2013, Regents of the University of California. # All rights reserved. # # Released under the BSD 3-Clause license as published at the link below. # https://openwsn.atlassian.net/wiki/display/OW/License import logging log = logging.getLogger('typeRssi') log.setLevel(logging.ERROR) log.addHandler(logging.NullHandler()) import openType class typeRssi(openType.openType): def __init__(self): # log log.info("creating object") # initialize parent class openType.openType.__init__(self) def __str__(self): return '{0} dBm'.format(self.rssi) #======================== public ========================================== def update(self,rssi): self.rssi = rssi #======================== private =========================================
27.806452
79
0.552204
87
862
5.333333
0.666667
0.051724
0
0
0
0
0
0
0
0
0
0.014663
0.208817
862
31
80
27.806452
0.665689
0.454756
0
0
0
0
0.065076
0
0
0
0
0
0
1
0.230769
false
0
0.153846
0.076923
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
7aec991a251b8b5ba72c6f603e780ca170fd3d9c
16,092
py
Python
SMI/pycopia/SMI/Compile.py
kdart/pycopia
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
[ "Apache-2.0" ]
89
2015-03-26T11:25:20.000Z
2022-01-12T06:25:14.000Z
SMI/pycopia/SMI/Compile.py
kdart/pycopia
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
[ "Apache-2.0" ]
1
2015-07-05T03:27:43.000Z
2015-07-11T06:21:20.000Z
SMI/pycopia/SMI/Compile.py
kdart/pycopia
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
[ "Apache-2.0" ]
30
2015-04-30T01:35:54.000Z
2022-01-12T06:19:49.000Z
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Compile module compiles SMI data into Python objects for use by the SNMP module. This started out clean, but now it's ugly. But at least it spits out something useful. """ from __future__ import print_function from __future__ import division import os import py_compile from pycopia import textutils from pycopia.SMI import SMI, Basetypes, Objects USERMIBPATH = os.environ.get("USERMIBPATH", os.path.join("/", "var", "tmp", "mibs")) # global name translation table # Since we convert MIB modules to Python modules, we can't have a dash in # the name. These are translated to underscores. TRANSTABLE = textutils.maketrans("-", "_") def convert_name(name): return name.translate(TRANSTABLE) # These are some of the attributes that the SNMP module needs, and are # exported "as-is". Other attributes are special-cased in the appropriate # generator method. EXPORTS = { "Type": ["status", "format", "units", "ranges", "enumerations"], "Node": ["access", "create", "status", "units"], "Macro": ["name", "status"], "Module": ["name", "path", "conformance", "language", "description"], "Group": ["name", "status"], "Value": ["val"], } # objects directly imported from SMI.Objects in the mib modules IMPORTED_OBJECTS = ["ColumnObject", "MacroObject", "NotificationObject", "RowObject", "ScalarObject", "NodeObject", "ModuleObject", "GroupObject"] def _classstr(tup): def _cstr(c): if type(c) is str: return c else: if c.__name__ in IMPORTED_OBJECTS: return c.__name__ else: return "%s.%s" % (c.__module__, c.__name__) return ", ".join(map(_cstr, tup)) # generic class producer. Returns source code string def genClass(sminode, baseclass, attrdict=None, doc=None): if not attrdict: attrdict = {} for attrname in EXPORTS[sminode.__class__.__name__]: val = getattr(sminode, attrname) if val is None: continue if type(val) is str: attrdict[attrname] = repr(val) else: attrdict[attrname] = val klassname = convert_name(sminode.name) parents = (baseclass,) s = [] if parents: s.append( "class %s(%s):" % (klassname, _classstr(parents)) ) else: s.append( "class %s(object):" % (klassname) ) if doc: s.append('\t"""%s"""' % doc) for key, val in attrdict.items(): if val: s.append( "\t%s = %s" % (key, val) ) if len(s) == 1: s.append("\tpass") s.append("\n") return "\n".join(s) # generates a repr for SMI.Objects.IndexObjects class IndexGenerator(list): def __init__(self, init=None, implied=False): super(IndexGenerator, self).__init__(init or []) self.implied = bool(implied) def __repr__(self): lv = ", ".join(self) return "pycopia.SMI.Objects.IndexObjects([%s], %r)" % (lv, self.implied) class ListGenerator(list): def __init__(self, init=None): super(ListGenerator, self).__init__(init or []) def __repr__(self): return "[%s]" % (", ".join(self), ) class ObjectSourceGenerator(object): """ Usage: ObjectSourceGenerator(fileobject, modulename) Parameters: fileobject = A file-type object. modulename = An SMI module name. """ def __init__(self, fo, oidfo, smimodule): self.smimodule = smimodule self.fo = fo self.oidfo = oidfo self.pymodname = convert_name(smimodule.name) #self.tempmodule = new.module(self.pymodname) self.imports = {} self.fo.write("""# python # This file is generated by a program (mib2py). Any edits will be lost. from pycopia.aid import Enum import pycopia.SMI.Basetypes Range = pycopia.SMI.Basetypes.Range Ranges = pycopia.SMI.Basetypes.Ranges from pycopia.SMI.Objects import %s """ % (", ".join(IMPORTED_OBJECTS),)) self.oidfo.write("""# python # This file is generated by a program (mib2py). import sys {modname} = sys.modules["pycopia.mibs.{modname}"] OIDMAP = {{ """.format(modname=self.pymodname)) def finalize(self): self.oidfo.write("}\n") handle_specials(self.fo, self.smimodule) self.fo.write(""" # Add to master OIDMAP. from pycopia import SMI SMI.update_oidmap(__name__) """) def add_comment(self, text): self.fo.write("# %s\n" % text) def genImports(self): self.fo.write("# imports \n") for node in self.smimodule.get_imports(): if node.module not in self.imports: self.imports[node.module] = [] self.imports[node.module].append(node.name) for modname, implist in self.imports.items(): impnames = [convert_name(s) for s in implist] self.fo.write("from pycopia.mibs.%s import %s\n" % (convert_name(modname), ", ".join(impnames))) self.fo.write("\n") def genModule(self): self.fo.write(genClass(self.smimodule, Objects.ModuleObject)) def genTypes(self): self.fo.write("# types \n") for smi_type in self.smimodule.get_types(): name = convert_name(smi_type.name) if hasattr(Basetypes, name ): self.fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name)) else: self.fo.write("\n") if smi_type.snmptype: baseclass = getattr(Basetypes, smi_type.snmptype) self.fo.write(genClass(smi_type, baseclass)) def genNodes(self): self.fo.write("# nodes\n") for node in self.smimodule.get_nodes(SMI.SMI_NODEKIND_NODE): if node.name: initdict = {} initdict["name"] = repr(node.name) initdict["OID"] = repr(Basetypes.ObjectIdentifier(node.OID)) self.fo.write(genClass(node, Objects.NodeObject, initdict)) self._genOIDItem(node.OID, node.name) self.fo.write("\n") def genScalars(self): self.fo.write("# scalars \n") for scalar in self.smimodule.get_scalars(): if scalar.status not in \ (SMI.SMI_STATUS_DEPRECATED, SMI.SMI_STATUS_CURRENT, SMI.SMI_STATUS_MANDATORY): continue # do not expose optional or obsolete objects initdict = {} initdict["syntaxobject"] = so = self._getSyntax(scalar) if so.find("Enumeration") >= 0: initdict["enumerations"] = scalar.syntax.enumerations initdict["OID"] = repr(Basetypes.ObjectIdentifier(scalar.OID)) self.fo.write(genClass(scalar, Objects.ScalarObject, initdict)) self.fo.write("\n") self._genOIDItem(scalar.OID, scalar.name) def genColumns(self): self.fo.write("# columns\n") for col in self.smimodule.get_columns(): initdict = {} initdict["syntaxobject"] = so = self._getSyntax(col) if so.find("Enumeration") >= 0: initdict["enumerations"] = col.syntax.enumerations initdict["OID"] = repr(Basetypes.ObjectIdentifier(col.OID)) self.fo.write(genClass(col, Objects.ColumnObject, initdict)) self.fo.write("\n") self._genOIDItem(col.OID, col.name) def genRows(self): self.fo.write("# rows \n") for row in self.smimodule.get_rows(): if row.status not in (SMI.SMI_STATUS_DEPRECATED, SMI.SMI_STATUS_CURRENT, SMI.SMI_STATUS_MANDATORY): continue initdict = {} columns = "{%s}" % ", ".join(["%r: %s" % (s, s) for s in self._get_colnames(row)]) initdict["columns"] = columns initdict["index"] = self._genIndexObjects(row) rowstatus = row.rowstatus if rowstatus: initdict["rowstatus"] = row.rowstatus.name initdict["OID"] = repr(Basetypes.ObjectIdentifier(row.OID)) self.fo.write(genClass(row, Objects.RowObject, initdict)) self.fo.write("\n") def genMacros(self): self.fo.write("# macros\n") for node in self.smimodule.get_macros(): self.fo.write(genClass(node, Objects.MacroObject)) self.fo.write("\n") def genNotifications(self): self.fo.write("# notifications (traps) \n") for notif in self.smimodule.get_notifications(): initdict = {"OID": repr(Basetypes.ObjectIdentifier(notif.OID))} self.fo.write(genClass(notif, Objects.NotificationObject, initdict)) self._genOIDItem(notif.OID, notif.name) def genGroups(self): self.fo.write("# groups \n") for group in self.smimodule.get_groups(): if group.status not in (SMI.SMI_STATUS_CURRENT, SMI.SMI_STATUS_DEPRECATED, SMI.SMI_STATUS_MANDATORY): continue initdict = {} initdict["OID"] = repr(Basetypes.ObjectIdentifier(group.OID)) grouplist = [] for el in group.get_elements(): n = el.get_node() grouplist.append(n.name) initdict["group"] = "[%s]" % ", ".join(grouplist) self.fo.write(genClass(group, Objects.GroupObject, initdict)) self._genOIDItem(group.OID, group.name) def genCompliances(self): self.fo.write("# compliances \n") for comp in self.smimodule.get_compliances(): if comp.status not in (SMI.SMI_STATUS_CURRENT, SMI.SMI_STATUS_DEPRECATED, SMI.SMI_STATUS_MANDATORY): continue initdict = {} mandlist = ListGenerator() for el in comp.get_elements(): mandlist.append(el.get_node().name) initdict["mandatory_group"] = mandlist refs = ListGenerator() for ref in comp.get_refinements(): # XXX if ref.syntax: n = ref.get_node() refs.append(self._getSyntax(ref)) # XXX initdict["refinements"] = repr(refs) self.fo.write(genClass(comp, Objects.Compliance, initdict)) self.fo.write("\n") def genCapabilities(self): self.fo.write("# capabilities \n") for cap in self.smimodule.get_capabilities(): if cap.status not in (SMI.SMI_STATUS_CURRENT, SMI.SMI_STATUS_DEPRECATED, SMI.SMI_STATUS_MANDATORY): continue initdict = {} # XXX self.fo.write(genClass(cap, Objects.Capability, initdict)) self.fo.write("\n") # utility methods def _get_colnames(self, row): rv = [] for c in row.get_children(): if c.nodekind == SMI.SMI_NODEKIND_COLUMN: rv.append(c.name) return rv def _genOIDItem(self, OID, classname): self.oidfo.write('%r: %s.%s,\n' % (str(OID), self.pymodname, convert_name(classname))) def _genIndexObjects(self, smirow): index = smirow.get_index() if index is None: # old, old v1 MIBS with no index return gen = IndexGenerator(implied=index.implied) for n in index: gen.append(n.name) if smirow.indexkind == SMI.SMI_INDEX_AUGMENT: for node in index: mod = node.get_module() self.fo.write("from %s import %s\n" % (convert_name(mod.name), node.name)) return repr(gen) def _getSyntax(self, node): syntax = node.syntax if syntax is None: print ("***** unable to get SYNTAX for node %s" % (node.name)) return "UNKNOWN" if not syntax.name: syntax = syntax.get_parent() syntaxname = syntax.name if not syntaxname: syntaxname = syntax.snmptype if hasattr(Objects, syntaxname): cl = getattr(Objects, syntaxname) return "%s.%s" % (cl.__module__, cl.__name__) elif hasattr(Basetypes, syntaxname): cl = getattr(Basetypes, syntaxname) return "%s.%s" % (cl.__module__, cl.__name__) # else must be a locally defined type. return syntaxname def genAll(self): self.genImports() self.genModule() self.genNodes() self.genMacros() self.genTypes() self.genScalars() self.genColumns() self.genRows() self.genNotifications() self.genGroups() #self.genCompliances() self.genCapabilities() self.finalize() # some modules require special handling. Crude, hopefully temporary, hack def handle_specials(fo, smimodule): fo.write("\n# special additions\n") handler = {'SNMPv2-SMI': _handle_smi, 'SNMPv2-TC': _handle_tc}.get(smimodule.name, _handle_default) handler(fo, smimodule) def _handle_smi(fo, mod): fo.write("\n") for name in ("ObjectSyntax", "SimpleSyntax", "ApplicationSyntax"): fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name)) def _handle_tc(fo, mod): fo.write("\n") for name in ("Bits", "BITS"): fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name)) def _handle_default(fo, mod): pass def _compile_module(smimodule): if not smimodule.name: return # unnamed from where? fname = os.path.join(USERMIBPATH, convert_name(smimodule.name)+".py") oidfname = os.path.join(USERMIBPATH, convert_name(smimodule.name)+"_OID.py") if not os.path.exists(fname): print ("Compiling module", smimodule.name) fd = open(fname, "w") oidfd = open(oidfname, "w") generator = ObjectSourceGenerator(fd, oidfd, smimodule) generator.genAll() fd.close() try: py_compile.compile(fname) except Exception as err: print ("***", err) else: print (" +++ file %r exists, skipping." % (fname, )) def compile_module(modname, preload=None, all=False): if preload: for pm in preload: SMI.load_module(pm) smimodule = SMI.get_module(modname) if not smimodule: print ("Could not load module", modname) return if all: for dep in _get_dependents(smimodule): _compile_module(SMI.get_module(dep)) _compile_module(smimodule) def _get_dependents(module, hash=None): h = hash or {} for imp in module.get_imports(): h[imp.module] = True _get_dependents(SMI.get_module(imp.module), h) return h.keys() def compile_everything(all=False): count = 0 paths = SMI.get_path().split(":") for dir in paths: print ("Looking in", dir) for modname in os.listdir(dir): modpath = os.path.join(dir, modname) if os.path.isfile(modpath): print ("Found module", modname, "compiling...") try: compile_module(modname, None, all) except SMI.SmiError as err: print ("***[", err, "]***") count += 1 SMI.clear() # clear out mememory SMI.init() print ("Found and compiled %d MIBS." % (count, )) if __name__ == "__main__": from pycopia import autodebug compile_everything(True)
35.135371
108
0.59222
1,881
16,092
4.945774
0.213184
0.031603
0.043749
0.019349
0.193486
0.168118
0.127916
0.091153
0.06987
0.06987
0
0.001731
0.282066
16,092
457
109
35.212254
0.803514
0.106078
0
0.149296
0
0
0.114955
0.017857
0
0
0
0
0
1
0.101408
false
0.005634
0.076056
0.005634
0.23662
0.028169
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7aed93f1001f0b20c9235af1ae0433ce9cdab764
960
py
Python
burotel/indico_burotel/migrations/20211029_1202_564d660d4ddb_create_count_weekdays_function.py
bpedersen2/indico-plugins-cern
c4f06d11d981c316fc8de2892758484deb58e2f5
[ "MIT" ]
null
null
null
burotel/indico_burotel/migrations/20211029_1202_564d660d4ddb_create_count_weekdays_function.py
bpedersen2/indico-plugins-cern
c4f06d11d981c316fc8de2892758484deb58e2f5
[ "MIT" ]
null
null
null
burotel/indico_burotel/migrations/20211029_1202_564d660d4ddb_create_count_weekdays_function.py
bpedersen2/indico-plugins-cern
c4f06d11d981c316fc8de2892758484deb58e2f5
[ "MIT" ]
null
null
null
"""Create count_weekdays function Revision ID: 564d660d4ddb Revises: Create Date: 2021-10-29 12:02:59.409012 """ import textwrap from alembic import op from sqlalchemy.sql.ddl import CreateSchema, DropSchema # revision identifiers, used by Alembic. revision = '564d660d4ddb' down_revision = None branch_labels = None depends_on = None SQL_FUNCTION_COUNT_WEEKDAYS = textwrap.dedent(''' CREATE FUNCTION plugin_burotel.count_weekdays(from_date date, to_date date) RETURNS bigint AS $$ SELECT COUNT(*) FROM generate_series(from_date, to_date, '1 day'::interval) d WHERE extract('dow' FROM d) NOT IN (0, 6) $$ LANGUAGE SQL IMMUTABLE STRICT; ''') def upgrade(): op.execute(CreateSchema('plugin_burotel')) op.execute(SQL_FUNCTION_COUNT_WEEKDAYS) def downgrade(): op.execute('DROP FUNCTION plugin_burotel.count_weekdays(from_date date, to_date date)') op.execute(DropSchema('plugin_burotel'))
23.414634
91
0.730208
127
960
5.338583
0.503937
0.09587
0.044248
0.070796
0.165192
0.165192
0.165192
0.165192
0.165192
0.165192
0
0.046482
0.170833
960
40
92
24
0.805276
0.152083
0
0
0
0
0.511772
0.128872
0
0
0
0
0
1
0.086957
false
0
0.130435
0
0.217391
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7aef5d12e97ce42126f8f1a2e3852a9ec848f12a
354
py
Python
pywinrt/winsdk/windows/graphics/capture/interop/__init__.py
pywinrt/python-winsdk
1e2958a712949579f5e84d38220062b2cec12511
[ "MIT" ]
3
2022-02-14T14:53:08.000Z
2022-03-29T20:48:54.000Z
pywinrt/winsdk/windows/graphics/capture/interop/__init__.py
pywinrt/python-winsdk
1e2958a712949579f5e84d38220062b2cec12511
[ "MIT" ]
4
2022-01-28T02:53:52.000Z
2022-02-26T18:10:05.000Z
pywinrt/winsdk/windows/graphics/capture/interop/__init__.py
pywinrt/python-winsdk
1e2958a712949579f5e84d38220062b2cec12511
[ "MIT" ]
null
null
null
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4 import winsdk _ns_module = winsdk._import_ns_module("Windows.Graphics.Capture.Interop") try: import winsdk.windows.graphics.capture except Exception: pass create_for_monitor = _ns_module.create_for_monitor create_for_window = _ns_module.create_for_window
25.285714
86
0.805085
55
354
4.872727
0.6
0.119403
0.164179
0.126866
0
0
0
0
0
0
0
0.012821
0.118644
354
13
87
27.230769
0.846154
0.237288
0
0
1
0
0.119403
0.119403
0
0
0
0
0
1
0
false
0.125
0.375
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
3
7aef5d40168535dab3efb9c32c0b60a29e7ae107
2,327
py
Python
models/utils.py
ZephyrII/competitive_colaboration
a557d1e23ef2c0b8e3794f085a79bfffb860f9df
[ "MIT" ]
357
2019-03-12T07:17:32.000Z
2022-03-24T14:13:24.000Z
models/utils.py
DevLooptt/SJTU-CS386-2021Fall-DIP-Project
2167e089be80ca01911ba55c07b83c9f26f147e7
[ "MIT" ]
27
2019-03-11T19:16:11.000Z
2021-05-30T13:30:19.000Z
models/utils.py
DevLooptt/SJTU-CS386-2021Fall-DIP-Project
2167e089be80ca01911ba55c07b83c9f26f147e7
[ "MIT" ]
66
2019-03-27T14:16:22.000Z
2021-11-11T12:40:33.000Z
from __future__ import division import math import torch import torch.nn as nn def conv(in_planes, out_planes, stride=1, batch_norm=False): if batch_norm: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(out_planes, eps=1e-3), nn.ReLU(inplace=True) ) else: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True), nn.ReLU(inplace=True) ) def deconv(in_planes, out_planes, batch_norm=False): if batch_norm: return nn.Sequential( nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(out_planes, eps=1e-3), nn.ReLU(inplace=True) ) else: return nn.Sequential( nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=True), nn.ReLU(inplace=True) ) def predict_depth(in_planes, with_confidence): return nn.Conv2d(in_planes, 2 if with_confidence else 1, kernel_size=3, stride=1, padding=1, bias=True) def post_process_depth(depth, activation_function=None, clamp=False): if activation_function is not None: depth = activation_function(depth) if clamp: depth = depth.clamp(10, 80) return depth[:,0] def adaptative_cat(out_conv, out_deconv, out_depth_up): out_deconv = out_deconv[:, :, :out_conv.size(2), :out_conv.size(3)] out_depth_up = out_depth_up[:, :, :out_conv.size(2), :out_conv.size(3)] return torch.cat((out_conv, out_deconv, out_depth_up), 1) def init_modules(net): for m in net.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2/n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
34.220588
107
0.644177
341
2,327
4.190616
0.211144
0.075577
0.083975
0.071379
0.550035
0.550035
0.550035
0.550035
0.475857
0.418474
0
0.029809
0.235926
2,327
67
108
34.731343
0.773903
0
0
0.346154
0
0
0
0
0
0
0
0
0
1
0.115385
false
0
0.076923
0.019231
0.326923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7af03651a5bc87d941460e143a24cf2c0a69bf1a
516
py
Python
seminar-roulette/backend/migrations/0017_auto_20201029_2151.py
olliegardner/seminar-roulette
c8330258778dd7f71b1289c5dfe611e5637cf71d
[ "MIT" ]
null
null
null
seminar-roulette/backend/migrations/0017_auto_20201029_2151.py
olliegardner/seminar-roulette
c8330258778dd7f71b1289c5dfe611e5637cf71d
[ "MIT" ]
null
null
null
seminar-roulette/backend/migrations/0017_auto_20201029_2151.py
olliegardner/seminar-roulette
c8330258778dd7f71b1289c5dfe611e5637cf71d
[ "MIT" ]
1
2020-10-07T16:21:59.000Z
2020-10-07T16:21:59.000Z
# Generated by Django 3.1.2 on 2020-10-29 21:51 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('backend', '0016_auto_20201029_2133'), ] operations = [ migrations.AlterModelOptions( name='seminarhistory', options={'verbose_name_plural': 'seminar histories'}, ), migrations.RemoveConstraint( model_name='seminarhistory', name='Rating must be between 1 and 10', ), ]
23.454545
65
0.606589
51
516
6.019608
0.784314
0.117264
0
0
0
0
0
0
0
0
0
0.092643
0.28876
516
21
66
24.571429
0.743869
0.087209
0
0.133333
1
0
0.266525
0.049041
0
0
0
0
0
1
0
false
0
0.066667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7af0730950718ad5e375da7065398646acf80fd5
1,005
py
Python
saleor/graphql/shipping/resolvers.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
1
2022-03-25T00:21:11.000Z
2022-03-25T00:21:11.000Z
saleor/graphql/shipping/resolvers.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
86
2021-11-01T04:51:55.000Z
2022-03-30T16:30:16.000Z
saleor/graphql/shipping/resolvers.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
1
2021-12-28T18:02:49.000Z
2021-12-28T18:02:49.000Z
from prices import MoneyRange from ...shipping import models from ...shipping.interface import ShippingMethodData from ..channel import ChannelQsContext from ..translations.resolvers import resolve_translation def resolve_shipping_zones(channel_slug): if channel_slug: instances = models.ShippingZone.objects.filter(channels__slug=channel_slug) else: instances = models.ShippingZone.objects.all() return ChannelQsContext(qs=instances, channel_slug=channel_slug) def resolve_price_range(channel_slug): # TODO: Add dataloader. channel_listing = models.ShippingMethodChannelListing.objects.filter( channel__slug=str(channel_slug) ) prices = [shipping.get_total() for shipping in channel_listing] return MoneyRange(min(prices), max(prices)) if prices else None def resolve_shipping_translation(root: ShippingMethodData, info, language_code): if root.is_external: return None return resolve_translation(root, info, language_code)
32.419355
83
0.774129
116
1,005
6.491379
0.413793
0.116866
0.047809
0.090305
0
0
0
0
0
0
0
0
0.151244
1,005
30
84
33.5
0.882767
0.020896
0
0
0
0
0
0
0
0
0
0.033333
0
1
0.142857
false
0
0.238095
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
2
7af19d3043c8b6728ea0912a528bf1eada7c558c
2,806
py
Python
scripts/simulator_experiments/real_government_strategy_experiments.py
alfred100p/PandemicSimulator
2cb22c4b5c55d54a420fd104c74918d76189feb9
[ "Apache-2.0" ]
null
null
null
scripts/simulator_experiments/real_government_strategy_experiments.py
alfred100p/PandemicSimulator
2cb22c4b5c55d54a420fd104c74918d76189feb9
[ "Apache-2.0" ]
null
null
null
scripts/simulator_experiments/real_government_strategy_experiments.py
alfred100p/PandemicSimulator
2cb22c4b5c55d54a420fd104c74918d76189feb9
[ "Apache-2.0" ]
null
null
null
# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved. from matplotlib import pyplot as plt import pandemic_simulator as ps def eval_government_strategies(experiment_name: str, opts: ps.sh.EvaluationOpts) -> None: data_saver = ps.data.H5DataSaver(experiment_name, path=opts.data_saver_path) print('Running Swedish strategy') ps.sh.experiment_main(sim_config=opts.default_sim_config, sim_opts=ps.env.PandemicSimOpts(), data_saver=data_saver, pandemic_regulations=ps.sh.swedish_regulations, stages_to_execute=swedish_strategy, num_random_seeds=opts.num_seeds, max_episode_length=opts.max_episode_length, exp_id=0) print('Running Italian strategy') ps.sh.experiment_main(sim_config=opts.default_sim_config, sim_opts=ps.env.PandemicSimOpts(), data_saver=data_saver, pandemic_regulations=ps.sh.italian_regulations, stages_to_execute=italian_strategy, num_random_seeds=opts.num_seeds, max_episode_length=opts.max_episode_length, exp_id=1) if __name__ == '__main__': swedish_strategy = [ps.data.StageSchedule(stage=0, end_day=3), ps.data.StageSchedule(stage=1, end_day=None)] italian_strategy = [ps.data.StageSchedule(stage=0, end_day=3), ps.data.StageSchedule(stage=1, end_day=8), ps.data.StageSchedule(stage=2, end_day=13), ps.data.StageSchedule(stage=3, end_day=25), ps.data.StageSchedule(stage=4, end_day=59), ps.data.StageSchedule(stage=3, end_day=79), ps.data.StageSchedule(stage=2, end_day=None)] opts = ps.sh.EvaluationOpts( num_seeds=30, max_episode_length=180, enable_warm_up=False ) exp_name = 'swedish_italian_strategies' try: eval_government_strategies(exp_name, opts) except ValueError: # Expect a value error because we are reusing the same directory. pass ps.sh.make_evaluation_plots(exp_name=exp_name, data_saver_path=opts.data_saver_path, param_labels=['SWE', 'ITA'], bar_plot_xlabel='Real Government Strategies', annotate_stages=True, show_cumulative_reward=False, show_time_to_peak=False, show_pandemic_duration=True) plt.show()
45.258065
89
0.580542
309
2,806
4.957929
0.36246
0.039164
0.111619
0.140992
0.45953
0.432115
0.432115
0.351175
0.351175
0.351175
0
0.017307
0.341055
2,806
61
90
46
0.811249
0.050962
0
0.2
0
0
0.042857
0.009774
0
0
0
0
0
1
0.02
false
0.02
0.04
0
0.06
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7af2b9d69eec342a533a8a7c40f6317b22a86767
3,285
py
Python
EGGS_labrad/clients/ARTIQ_client/DAC_client.py
EGGS-Experiment/EGGS_Control
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
[ "MIT" ]
2
2021-12-26T05:00:54.000Z
2021-12-30T17:15:49.000Z
EGGS_labrad/clients/ARTIQ_client/DAC_client.py
EGGS-Experiment/EGGS_Control
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
[ "MIT" ]
null
null
null
EGGS_labrad/clients/ARTIQ_client/DAC_client.py
EGGS-Experiment/EGGS_Control
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
[ "MIT" ]
null
null
null
from twisted.internet.defer import inlineCallbacks from EGGS_labrad.clients import GUIClient from EGGS_labrad.config.device_db import device_db from EGGS_labrad.clients.ARTIQ_client.DAC_gui import DAC_gui from copy import deepcopy DACID = 659312 class DAC_client(GUIClient): """ Client for an ARTIQ Fastino/Zotino board. """ name = "Fastino Client" servers = {'aq': 'ARTIQ Server'} def getgui(self): if self.gui is None: self.gui = DAC_gui(self.dac_list) return self.gui @inlineCallbacks def initClient(self): # device dictionary self.dac_list = {} # get devices self._getDevices(device_db) # connect to signals yield self.aq.signal__dac_changed(DACID) yield self.aq.addListener(listener=self.updateChannel, source=None, ID=DACID) def _getDevices(self, device_db): # get devices for name, params in device_db.items(): if 'class' not in params: continue elif params['class'] in ('Zotino', 'Fastino'): self.dac_list[name] = {} #@inlineCallbacks def initData(self): # todo: read DAC register values and update pass def initGUI(self): # todo: fix global ofs self.gui.zotino_global_ofs.valueChanged.connect(lambda voltage_mu: self.aq.dac_ofs(voltage_mu, 'mu')) for dac_name, dac_channels in self.dac_list.items(): for channel_num, channel_gui in dac_channels.items(): channel_gui.dac.valueChanged.connect(lambda voltage_mu, _channel_num=channel_num: self.aq.dac_set(_channel_num, voltage_mu, 'mu')) channel_gui.resetswitch.clicked.connect(lambda _channel_num=channel_num: self.aq.dac_set(_channel_num, 0, 'mu')) if "ZOTINO" in dac_name.upper(): channel_gui.off.valueChanged.connect(lambda voltage_mu, _channel_num=channel_num: self.aq.dac_offset(_channel_num, voltage_mu, 'mu')) channel_gui.gain.valueChanged.connect(lambda gain_mu, _channel_num=channel_num: self.aq.dac_offset(_channel_num, gain_mu, 'mu')) channel_gui.calibrateswitch.clicked.connect(lambda: self.calibrate) # lock widgets on startup channel_gui.lock(False) def updateChannel(self, c, signal): num, param, val = signal channel_gui = self.gui.channel_widgets[num] gui_element = None if param == 'dac': gui_element = channel_gui.dac elif param == 'gain': gui_element = channel_gui.dac elif param == 'off': gui_element = channel_gui.dac elif param == 'ofs': gui_element = channel_gui.zotino_global_ofs # adjust value without causing the signal to trigger gui_element.setEnabled(False) gui_element.setValue(val) gui_element.setEnabled(True) if __name__ == "__main__": from EGGS_labrad.clients import runClient runClient(DAC_client)
37.758621
109
0.60274
383
3,285
4.921671
0.287206
0.068966
0.023873
0.04244
0.249337
0.202653
0.202653
0.129443
0.129443
0.129443
0
0.0031
0.312633
3,285
86
110
38.197674
0.831709
0.078234
0
0.05
0
0
0.029294
0
0
0
0
0.011628
0
1
0.1
false
0.016667
0.1
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
7af6e15055517b76f8b5a8e5e342b76b70bd47f6
6,156
py
Python
apps/profiles/views.py
ezl/hnofficehours
3729eca064998bd2d0a9ba1b4fe7e56ccc57324b
[ "MIT" ]
2
2015-11-05T13:47:44.000Z
2020-07-20T19:57:45.000Z
apps/profiles/views.py
ezl/hnofficehours
3729eca064998bd2d0a9ba1b4fe7e56ccc57324b
[ "MIT" ]
null
null
null
apps/profiles/views.py
ezl/hnofficehours
3729eca064998bd2d0a9ba1b4fe7e56ccc57324b
[ "MIT" ]
null
null
null
from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render_to_response from datetime import datetime, timedelta from django.contrib.auth.models import User from django.contrib import messages from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.views.generic.simple import direct_to_template from schedule.models import Event from schedule.periods import Period from profiles.models import * from profiles.forms import ProfileForm, ProfileSkillsForm from profiles.controllers import tag_clean from django.contrib.auth.decorators import login_required from django.utils import simplejson def userprofile(request, username=None, template_name='profiles/profile.html'): form = ProfileForm() return render_to_response(template_name, locals(), context_instance=RequestContext(request)) def ajax_view(request, profile_id, skill_id, verb): datadict = {'profile_id':profile_id, 'skill_id':skill_id, 'verb':verb} profile = Profile.objects.get(id = profile_id) skill = Skill.objects.get(id = skill_id) if verb == "remove": try: profile.skills.remove(skill) except Exception, e: datadict['status'] = "failure" else: datadict['status'] = "success" return HttpResponse(simplejson.dumps(datadict)) else: return HttpResponse("verb unrecognized") def ajax_toggle_availability(request): datadict = dict() datadict['status'] = "failure" if request.user.is_authenticated(): try: user = request.user profile = user.get_profile() profile.is_available = not profile.is_available profile.save() except: pass else: datadict['status'] = "success" datadict['availability'] = profile.is_available return HttpResponse(simplejson.dumps(datadict)) def set_availability(request,set_status): datadict = dict() datadict['status'] = "failure" if request.user.is_authenticated(): try: user = request.user profile = user.get_profile() profile.is_available = bool(int(set_status)) # someone fix this casting for me please profile.save() except: pass else: datadict['status'] = "success" datadict['availability'] = profile.is_available if request.is_ajax(): return HttpResponse(simplejson.dumps(datadict)) else: return HttpResponse("GOOBER!, ENABLE JAVASCRIPT! "+simplejson.dumps(datadict)) def _can_view_full_profile(user): # for now just check if user is logged in, later there may be karma and/or # other requirements. return user.is_authenticated() def list_profiles_by_skill(request, skill): skills = Skill.objects.filter(name__contains=skill) qs = User.objects.filter(profile__skills__in=skills).distinct() return list_profiles(request, qs=qs) def list_profiles(request, qs=None, template_name='profiles/list_profiles.html'): """Display a list of Users If qs == None, return list of all Users Optionally pass a qs of users """ if qs == None: users = User.objects.all() else: users = qs return render_to_response(template_name, locals(), context_instance=RequestContext(request)) def view_profile(request, username, template_name='profiles/view_profile.html'): user = get_object_or_404(User, username=username) display_full_profile = _can_view_full_profile(request.user) events = Event.objects.filter(creator=user) start = datetime.now() end = start + timedelta(days=30) period = Period(events=events, start=start, end=end) office_hours = period.get_occurrences() return render_to_response(template_name, locals(), context_instance=RequestContext(request)) @login_required def profile(request, template_name="profiles/edit_profile.html"): user = request.user profile = user.profile def update_profile(): profile_form = ProfileForm(data=request.POST, instance=request.user.get_profile()) if profile_form.is_valid(): profile_form.save() messages.success(request, 'Profile updated.') def update_skills(): #fuck capitals tag_list = request.POST.get('skills_text').lower().split(',') for tag in tag_list: if tag and tag != '': #fucking excess whitespace man tag = tag_clean(tag) skill, created = Skill.objects.get_or_create(name=tag) profile.skills.add(skill) psf = ProfileSkillsForm(request.POST) if psf.is_valid(): skills_list = Skill.objects.filter(id__in = psf.cleaned_data.get('skills')) for skill in skills_list: profile.skills.add(skill) profile.save() messages.success(request, 'Skills updated.') if request.method == "POST": origin = request.POST.get('origin') if origin == "profile": update_profile() else: #origin == "skill": update_skills() profile_form = ProfileForm(instance=request.user.get_profile()) skill_form = ProfileSkillsForm() skills = profile.skills.all() events = Event.objects.filter(creator=user) start = datetime.now() end = start + timedelta(days=30) office_hours = reduce(lambda x,y: x+y, [e.get_occurrences(start, end) for e in events]) if events else [] return direct_to_template(request, template_name, {'skill_form':skill_form, 'profile_form':profile_form, 'profile':profile, 'skills':skills, 'editable':True, 'office_hours':office_hours})
39.210191
98
0.63564
688
6,156
5.515988
0.235465
0.023715
0.02108
0.017391
0.302767
0.261924
0.261924
0.240316
0.207115
0.207115
0
0.002221
0.268519
6,156
156
99
39.461538
0.840551
0.031027
0
0.380597
0
0
0.06782
0.017126
0
0
0
0
0
0
null
null
0.014925
0.11194
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
7af7571d71981a894e8403a4dbebd261f79a20cf
486
py
Python
code/gits_version.py
ineelshah/GITS2.1-I.R.I.S
12c4455ea55920e0de94a76f45b26e3e43cbcced
[ "MIT" ]
1
2021-11-28T12:18:43.000Z
2021-11-28T12:18:43.000Z
code/gits_version.py
ineelshah/GITS2.1-I.R.I.S
12c4455ea55920e0de94a76f45b26e3e43cbcced
[ "MIT" ]
20
2021-11-26T17:59:00.000Z
2022-01-29T10:44:15.000Z
code/gits_version.py
jayrshah98/GITS2.1-I.R.I.S
2891ba27b3309bbc7e8ff25ed221d3f1c78fb9d3
[ "MIT" ]
3
2021-11-28T21:48:50.000Z
2022-01-05T15:44:06.000Z
import subprocess from subprocess import PIPE def gits_version(args): try: ver = list() ver.append("git") ver.append("--version") process1 = subprocess.Popen(ver, stdout=PIPE, stderr=PIPE) stdout, stderr = process1.communicate() print(stdout.decode("UTF-8")) except Exception as e: print("ERROR: gits version command caught an exception") print("ERROR: {}".format(str(e))) return False return True
24.3
66
0.615226
57
486
5.22807
0.614035
0.073826
0
0
0
0
0
0
0
0
0
0.008403
0.265432
486
19
67
25.578947
0.826331
0
0
0
0
0
0.150206
0
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.333333
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7af766090d703b356c60d610ebbb2f0858967ab4
5,271
py
Python
tests/data/rings_ringspecs.py
ArdanaCLM/swiftlm
a251c7fa4524bf35c95c15fcc21ec608780b07f4
[ "Apache-2.0" ]
1
2020-09-01T12:34:24.000Z
2020-09-01T12:34:24.000Z
tests/data/rings_ringspecs.py
ArdanaCLM/swiftlm
a251c7fa4524bf35c95c15fcc21ec608780b07f4
[ "Apache-2.0" ]
null
null
null
tests/data/rings_ringspecs.py
ArdanaCLM/swiftlm
a251c7fa4524bf35c95c15fcc21ec608780b07f4
[ "Apache-2.0" ]
null
null
null
# (c) Copyright 2015, 2016 Hewlett Packard Enterprise Development LP # (c) Copyright 2017 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ringspec_simple = ''' global: all_ring_specifications: - region_name_not_used: region1 rings: - display_name: Account Ring min_part_hours: 12 name: account partition_power: 17 replication_policy: replica_count: 1 - display_name: Container Ring min_part_hours: 12 name: container partition_power: 17 replication_policy: replica_count: 2 - default: true display_name: General min_part_hours: 12 name: object-0 partition_power: 17 replication_policy: replica_count: 3 - default: false display_name: EC min_part_hours: 12 name: object-1 partition_power: 17 erasure_coding_policy: ec_num_data_fragments: 4 ec_num_parity_fragments: 10 ec_type: jerasure_rs_vand ec_object_segment_size: 1000000 ''' ringspec_region_zones = ''' global: all_ring_specifications: - region_name_not_used: region1 swift_regions: - id: 2 server_groups: - sg21 - sgtwotwo - sgtwo3 - id: 3 server_groups: - sg31 - sgthreetwo - sgthree3 rings: - display_name: Account Ring min_part_hours: 12 name: account partition_power: 17 replication_policy: replica_count: 3 - display_name: Container Ring min_part_hours: 12 name: container partition_power: 17 replication_policy: replica_count: 3 - default: true display_name: General min_part_hours: 12 name: object-0 partition_power: 17 replication_policy: replica_count: 3 ''' ringspec_null_zones = ''' global: all_ring_specifications: - region_name_not_used: region1 swift_regions: [] swift_zones: [] rings: - display_name: Account Ring min_part_hours: 12 name: account partition_power: 17 replication_policy: replica_count: 3 swift_zones: - id: 2 server_groups_omitted: on-purpose - display_name: Container Ring min_part_hours: 12 name: container partition_power: 17 replication_policy: replica_count: 3 - default: true display_name: General min_part_hours: 12 name: object-0 partition_power: 17 replication_policy: replica_count: 3 ''' ringspec_zones_not_speced = ''' global: all_ring_specifications: - region_name_not_used: region1 rings: - display_name: Account Ring min_part_hours: 12 name: account partition_power: 17 replication_policy: replica_count: 3 - display_name: Container Ring min_part_hours: 12 name: container partition_power: 17 replication_policy: replica_count: 3 - default: true display_name: General min_part_hours: 12 name: object-0 partition_power: 17 replication_policy: replica_count: 3 ''' ringspec_zones_duplicate_in_ring = ''' global: all_ring_specifications: - region_name_not_used: region1 rings: - display_name: Account Ring min_part_hours: 12 name: account partition_power: 17 replication_policy: replica_count: 3 swift_zones: - id: 1 server_groups: - ONE - SAME - id: 2 server_groups: - TWO - SAME - display_name: Container Ring min_part_hours: 12 name: container partition_power: 17 replication_policy: replica_count: 3 - default: true display_name: General min_part_hours: 12 name: object-0 partition_power: 17 replication_policy: replica_count: 3 '''
28.803279
75
0.540505
528
5,271
5.100379
0.263258
0.065355
0.071296
0.083179
0.687338
0.687338
0.678426
0.678426
0.678426
0.675826
0
0.04077
0.409031
5,271
182
76
28.961538
0.823756
0.116107
0
0.81875
0
0
0.96037
0.072798
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
7af83d85dcf1ded1b08cade5086590e8e4f45487
254
py
Python
Maze.py
rcpsilva/BCC740_2021_2_Artificial_Intelligence
fad7c02d155ae377e1f8c2a6c941ea45ec3c2fe6
[ "MIT" ]
null
null
null
Maze.py
rcpsilva/BCC740_2021_2_Artificial_Intelligence
fad7c02d155ae377e1f8c2a6c941ea45ec3c2fe6
[ "MIT" ]
null
null
null
Maze.py
rcpsilva/BCC740_2021_2_Artificial_Intelligence
fad7c02d155ae377e1f8c2a6c941ea45ec3c2fe6
[ "MIT" ]
5
2022-03-23T12:21:31.000Z
2022-03-28T16:47:25.000Z
from pyamaze import maze,COLOR,agent from environments import Maze from agents import MazeAgentDFS, MazeAgentBranchAndBound, MazeAgentAStar env = Maze(8,8) ag = MazeAgentAStar(env) #ag = MazeAgentBranchAndBound(env,40) #ag = MazeAgentDFS(env) ag.act()
23.090909
72
0.795276
32
254
6.3125
0.5
0.09901
0
0
0
0
0
0
0
0
0
0.017778
0.114173
254
10
73
25.4
0.88
0.228346
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
7af95ca39bf7740aabcc3f766de8108b97e29b27
921
py
Python
sourcecode/src/vx/spix/Util.py
ivarvb/SPIX
6c757b69c266f738d66164fa643a09f77721880d
[ "MIT" ]
null
null
null
sourcecode/src/vx/spix/Util.py
ivarvb/SPIX
6c757b69c266f738d66164fa643a09f77721880d
[ "MIT" ]
null
null
null
sourcecode/src/vx/spix/Util.py
ivarvb/SPIX
6c757b69c266f738d66164fa643a09f77721880d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import ujson from datetime import datetime class Util: @staticmethod def write(file, obj): with open(file, "w") as filef: filef.write(ujson.dumps(obj)) @staticmethod def read(file): data = {} with open(file,"r") as filef: data = (ujson.load(filef)) return data @staticmethod def now(): return datetime.now().strftime("%Y%m%d%H%M%S") @staticmethod def makedir(ndir): if not os.path.exists(ndir): os.makedirs(ndir) @staticmethod def splitname(filef): base = os.path.basename(filef) base = os.path.splitext(base) #name = base[0] return base @staticmethod def split(filef,separator): return filef.split(separator)
21.418605
54
0.591748
117
921
4.65812
0.504274
0.165138
0.044037
0.055046
0
0
0
0
0
0
0
0.004594
0.290988
921
43
55
21.418605
0.830015
0.061889
0
0.1875
0
0
0.016241
0
0
0
0
0
0
1
0.1875
false
0
0.1875
0.0625
0.53125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7af9ce8ba99ab0024b0d79ef7600bd2202d6b9e4
41
py
Python
packages/plugins/model-define/tensorflow-cycle-gan-model-define/CycleGAN/image_loader/__init__.py
CandyQiu/pipcook
12d482d6dcfb828bf80fcf908aee2c8ba5e9bd8a
[ "Apache-2.0" ]
2
2020-04-21T05:49:02.000Z
2021-03-01T15:14:29.000Z
packages/plugins/model-define/tensorflow-cycle-gan-model-define/CycleGAN/image_loader/__init__.py
CandyQiu/pipcook
12d482d6dcfb828bf80fcf908aee2c8ba5e9bd8a
[ "Apache-2.0" ]
null
null
null
packages/plugins/model-define/tensorflow-cycle-gan-model-define/CycleGAN/image_loader/__init__.py
CandyQiu/pipcook
12d482d6dcfb828bf80fcf908aee2c8ba5e9bd8a
[ "Apache-2.0" ]
null
null
null
from .image_loader import ImageGenerator
20.5
40
0.878049
5
41
7
1
0
0
0
0
0
0
0
0
0
0
0
0.097561
41
1
41
41
0.945946
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7afb55eccc74a4c6a98c9ac3dd757cd060175686
1,005
py
Python
nmea_date_fix.py
hsur/NmeaDateFix
4585dc9541bb2ae87dfc0160cc5006960813a0d3
[ "BSD-2-Clause" ]
null
null
null
nmea_date_fix.py
hsur/NmeaDateFix
4585dc9541bb2ae87dfc0160cc5006960813a0d3
[ "BSD-2-Clause" ]
null
null
null
nmea_date_fix.py
hsur/NmeaDateFix
4585dc9541bb2ae87dfc0160cc5006960813a0d3
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 # vim:fenc=utf-8 ff=unix ft=python ts=4 sw=4 sts=4 noet : import os import sys import datetime from pynmea2 import NMEASentence, ParseError if len(sys.argv) != 2: print("Usage: %s nmeafile.nmea" % sys.argv[0] ) sys.exit(1) file_path = os.path.abspath(sys.argv[1]) tmp_path = os.path.dirname(file_path) + "/updated_" + os.path.basename(file_path) start_datetime = None with open(file_path) as f: with open(tmp_path, mode='w',newline="\r\n") as t: for line in f: try: nmea = NMEASentence.parse(line) if hasattr(nmea, 'datestamp'): nmea.datestamp = (nmea.datestamp + datetime.timedelta(weeks=1024)).strftime("%d%m%y") if start_datetime == None: start_datetime = nmea.datetime.strftime("%Y%m%d%H%M%S") t.write(str(nmea)) t.write("\n") #print(str(nmea)) except ParseError as e: t.write(e.args[0][1]) #print(str(nmea)) os.rename(tmp_path, os.path.dirname(tmp_path) + "/%s.nma" % start_datetime)
29.558824
91
0.649751
163
1,005
3.92638
0.478528
0.05
0.046875
0.040625
0.0625
0
0
0
0
0
0
0.019536
0.185075
1,005
33
92
30.454545
0.761905
0.108458
0
0
0
0
0.084983
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.041667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7afc056cd047372b424519a16da1dae8715cba34
1,103
py
Python
coinkit/keyspace.py
mflaxman/coink
8ce28ac4ff56e2320bf452d0559b83baf40b2b51
[ "MIT" ]
5
2017-09-06T11:59:50.000Z
2019-02-17T21:02:47.000Z
coinkit/keyspace.py
shea256/coinkit
81e86f4ea3dbf6622953c085016445fb4121fb44
[ "MIT" ]
null
null
null
coinkit/keyspace.py
shea256/coinkit
81e86f4ea3dbf6622953c085016445fb4121fb44
[ "MIT" ]
2
2015-01-23T03:10:25.000Z
2021-11-18T01:58:31.000Z
# -*- coding: utf-8 -*- """ Coinkit ~~~~~ :copyright: (c) 2013 by Halfmoon Labs :license: MIT, see LICENSE for more details. """ import re def int_to_hex(i): return re.sub(r'^0x|L$', '', hex(i)) def int_to_string(integer, keyspace_chars): """ Turn a positive integer into a string. """ if not integer > 0: raise ValueError('integer must be > 0') output = "" while integer > 0: integer, digit = divmod(integer, len(keyspace_chars)) output += keyspace_chars[digit] return output[::-1] def string_to_int(string, keyspace_chars): """ Turn a string into a positive integer. """ output = 0 for char in string: output = output * len(keyspace_chars) + keyspace_chars.index(char) return output def change_keyspace(string, original_keyspace, target_keyspace): """ Convert a string from one keyspace to another. """ assert isinstance(string, str) intermediate_integer = string_to_int(string, original_keyspace) output_string = int_to_string(intermediate_integer, target_keyspace) return output_string
29.810811
74
0.668178
145
1,103
4.910345
0.434483
0.109551
0.022472
0.050562
0
0
0
0
0
0
0
0.012717
0.215775
1,103
37
75
29.810811
0.810405
0.224841
0
0
0
0
0.030713
0
0
0
0
0
0.047619
1
0.190476
false
0
0.047619
0.047619
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7afcbfcd3c3962e9a985188562c99110c3140f93
18,907
py
Python
cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py
lightsey/cinder
e03d68e42e57a63f8d0f3e177fb4287290612b24
[ "Apache-2.0" ]
571
2015-01-01T17:47:26.000Z
2022-03-23T07:46:36.000Z
cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py
lightsey/cinder
e03d68e42e57a63f8d0f3e177fb4287290612b24
[ "Apache-2.0" ]
37
2015-01-22T23:27:04.000Z
2021-02-05T16:38:48.000Z
cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py
lightsey/cinder
e03d68e42e57a63f8d0f3e177fb4287290612b24
[ "Apache-2.0" ]
841
2015-01-04T17:17:11.000Z
2022-03-31T12:06:51.000Z
# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade south bound connector to communicate with switch using REST over HTTP or HTTPS protocol. """ import json from oslo_log import log as logging from oslo_serialization import base64 import requests import six from cinder.i18n import _ from cinder.zonemanager.drivers.brocade import exception from cinder.zonemanager.drivers.brocade import fc_zone_constants from cinder.zonemanager.drivers.brocade import rest_constants LOG = logging.getLogger(__name__) class BrcdRestFCZoneClient(object): def __init__(self, ipaddress, username, password, port, vfid, protocol): """Initializing the client with the parameters passed. :param ipaddress: IP Address of the device. :param username: User id to login. :param password: User password. :param port: Device Communication port :param vfid: Virtual Fabric ID. :param protocol: Communication Protocol. """ self.sw_ip = ipaddress self.sw_user = username self.sw_pwd = password self.protocol = protocol self.vfid = vfid self.status_code = '' self.session = None self._login() def is_supported_firmware(self): is_supported_firmware = False fw_version = self._get_firmware_version() ver = fw_version.split(".") if len(ver[0]) > 1: major_ver = ver[0] ver[0] = major_ver[1] if len(ver[2]) > 1: patch_ver = ver[2] ver[2] = patch_ver[0] LOG.debug("Firmware version: %(version)s.", {'version': ver}) if int(ver[0] + ver[1] + ver[2]) > 820: is_supported_firmware = True return is_supported_firmware def get_active_zone_set(self): active_zone_set, checksum = self._get_effective_zone_set() return active_zone_set def get_nameserver_info(self): return self._get_name_server() def add_zones(self, add_zone_map, activate, active_zone_set=None): self._add_zones(add_zone_map, activate) def update_zones(self, update_zone_map, activate, operation, active_zone_set=None): self._update_zones(update_zone_map, activate, operation) def delete_zones(self, zone_names_to_delete, activate, active_zone_set=None): self._delete_zones(zone_names_to_delete, activate) def cleanup(self): self._logout() def _login(self): if self.protocol == fc_zone_constants.REST_HTTPS: self.protocol = fc_zone_constants.HTTPS else: self.protocol = fc_zone_constants.HTTP if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(self.protocol + '://', adapter) credentials = base64.encode_as_text('%s:%s' % (self.sw_user, self.sw_pwd)).replace('\n', '') self.session.headers = {rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.AUTHORIZATION: "Basic %s" % credentials} response = self.session.post(self._build_url(rest_constants.LOGIN)) if response.status_code == 200: auth = response.headers.get('Authorization') LOG.info("REST login success, setting auth: %s", auth) self.session.headers = {rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.CONTENT_TYPE: rest_constants.YANG, rest_constants.AUTHORIZATION: auth} else: msg = (_("REST login failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return response.status_code def _logout(self): response = self.session.post(self._build_url(rest_constants.LOGOUT)) if response.status_code == 204: LOG.info("REST logout success") else: msg = (_("REST logout failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _get_firmware_version(self): response = self.session.get(self._build_url(rest_constants.GET_SWITCH)) firmware_version = '' if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] switch = json_response[rest_constants.SWITCH] firmware_version = switch[rest_constants.FIRMWARE_VERSION] LOG.info("REST firmware version: %s", firmware_version) else: msg = (_("REST get switch fw version failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return firmware_version def _get_name_server(self): port_names = [] url = self._build_url(rest_constants.GET_NAMESERVER) response = self.session.get(url) if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] nsinfos = json_response[rest_constants.FC_NAME_SERVER] i = 0 for nsinfo in nsinfos: port_names.append(nsinfos[i][rest_constants.PORT_NAME]) i = i + 1 else: msg = (_("REST get NS info failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return port_names def _get_effective_zone_set(self): active_zone_set = {} zones_map = {} url = self._build_url(rest_constants.GET_ACTIVE_ZONE_CFG) response = self.session.get(url) checksum = '' active_cfg_name = '' if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] effective_cfg = json_response[rest_constants.EFFECTIVE_CFG] checksum = effective_cfg[rest_constants.CHECKSUM] try: active_cfg_name = effective_cfg[rest_constants.CFG_NAME] zones = effective_cfg[rest_constants.ENABLED_ZONE] if type(zones) is list: for i, zone in enumerate(zones): zones_map.update({zones[i][rest_constants.ZONE_NAME]: zones[i][rest_constants.MEMBER_ENTRY] [rest_constants.ENTRY_NAME]}) else: zones_map.update({zones[rest_constants.ZONE_NAME]: zones[rest_constants.MEMBER_ENTRY] [rest_constants.ENTRY_NAME]}) except Exception: active_cfg_name = '' LOG.info("REST get effective zoneset success: " "active cfg: %(cfg_name)s, checksum: %(chksum)s", {'cfg_name': active_cfg_name, 'chksum': checksum}) else: msg = (_("REST get effective zoneset failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) active_zone_set = {"active_zone_config": active_cfg_name, "zones": zones_map} return active_zone_set, checksum def _add_zones(self, add_zone_map, activate): active_zone_set, checksum = self._get_effective_zone_set() # if activate, get the zones already configured in the active cfg if activate: zones_in_active_cfg = active_zone_set.get("zones") # for each new zone, create a zone entry in defined zone db for zone_name, members in add_zone_map.items(): if zone_name not in zones_in_active_cfg: body = {rest_constants.MEMBER_ENTRY: {rest_constants.ENTRY_NAME: add_zone_map.get(zone_name)}} json_str = json.dumps(body) url = self._build_url(rest_constants.POST_ZONE + zone_name) response = self.session.post(url, data=json_str) if response.status_code == 201: LOG.info("REST create zone success: %s", zone_name) else: msg = (_("REST create zone failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # update the cfg with the new zones active_cfg_name = active_zone_set.get("active_zone_config") active_zones = active_zone_set.get("zones") active_zone_names = active_zones.keys() active_zone_names.extend(add_zone_map.keys()) body = {rest_constants.MEMBER_ZONE: {rest_constants.ZONE_NAME: active_zone_names}} json_str = json.dumps(body) if active_cfg_name == '': active_cfg_name = fc_zone_constants.CFG_NAME url = self._build_url(rest_constants.POST_CFG + active_cfg_name) response = self.session.post(url, data=json_str) if response.status_code == 201: LOG.info("REST cfg create success: %s", active_cfg_name) self._save_and_activate_cfg(checksum, activate, active_cfg_name) else: msg = (_("REST cfg create failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) else: url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name) response = self.session.patch(url, data=json_str) # if update successful, save the configuration changes if response.status_code == 204: LOG.info("REST cfg update success: %s", active_cfg_name) self._save_and_activate_cfg(checksum, activate, active_cfg_name) else: msg = (_("REST cfg update failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _update_zones(self, update_zone_map, activate, operation): active_zone_set, checksum = self._get_effective_zone_set() active_cfg_name = active_zone_set.get("active_zone_config") active_zones = active_zone_set.get("zones") # for each zone, update the zone members in defined zone db for zone_name, members in update_zone_map.items(): current_members = active_zones.get(zone_name) if operation == "ADD": new_members = set(members).difference(set(current_members)) if new_members: update_zone_map.update({zone_name: new_members}) elif operation == "REMOVE": new_members = set(current_members).difference(set(members)) if new_members: update_zone_map.update({zone_name: new_members}) # for each zone to be updated, make REST PATCH call to update for zone in update_zone_map.keys(): body = {rest_constants.MEMBER_ENTRY: {rest_constants.ENTRY_NAME: update_zone_map.get(zone)}} json_str = json.dumps(body) url = self._build_url(rest_constants.POST_ZONE + zone) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST zone update success: %s", zone) else: msg = (_("REST zone update failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # save and activate the config changes self._save_and_activate_cfg(checksum, activate, active_cfg_name) def _delete_zones(self, zone_names_to_delete, activate): zone_names_to_delete = zone_names_to_delete.split(";") active_zone_set, checksum = self._get_effective_zone_set() # for each zone name, make REST DELETE call for zone in zone_names_to_delete: url = self._build_url(rest_constants.DELETE_ZONE + zone) response = self.session.delete(url) if response.status_code == 204: LOG.info("REST delete zone success: %s", zone) else: msg = (_("REST delete zone failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # update the cfg removing the deleted zones active_cfg_name = active_zone_set.get("active_zone_config") active_zones = active_zone_set.get("zones") active_zone_names = active_zones.keys() if len(active_zone_names) == len(zone_names_to_delete): # disable the cfg url = self._build_url(rest_constants.PATCH_CFG_DISABLE) body = {"checksum": checksum} json_str = json.dumps(body) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg disable success") else: msg = (_("REST cfg disable failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # delete the cfg url = self._build_url(rest_constants.DELETE_CFG + active_cfg_name) response = self.session.delete(url) if response.status_code == 204: LOG.info("REST cfg delete success: %s", active_cfg_name) else: msg = (_("REST cfg delete failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) checksum = self._get_checksum() self._save_and_activate_cfg(checksum, False, active_cfg_name) else: # update the cfg by removing the deleted zones zone_names_in_cfg = list(set(active_zone_names) .difference(set(zone_names_to_delete))) body = {rest_constants.MEMBER_ZONE: {rest_constants.ZONE_NAME: zone_names_in_cfg}} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name) response = self.session.patch(url, data=json_str) # if update successful, save the configuration changes if response.status_code == 204: LOG.info("REST cfg update success: %s", active_cfg_name) self._save_and_activate_cfg(checksum, activate, active_cfg_name) else: msg = (_("REST cfg update failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _save_and_activate_cfg(self, checksum, activate, active_cfg_name): body = {"checksum": checksum} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG_SAVE) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg save success") else: msg = (_("REST cfg save failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # if activate=true, then enable the cfg changes to effective cfg if activate: checksum = self._get_checksum() body = {"checksum": checksum} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG_ENABLE + active_cfg_name) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg activate success: %s", active_cfg_name) else: msg = (_("REST cfg activate failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _get_checksum(self): url = self._build_url(rest_constants.GET_CHECKSUM) response = self.session.get(url) checksum = '' if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] effective_cfg = json_response[rest_constants.EFFECTIVE_CFG] checksum = effective_cfg[rest_constants.CHECKSUM] LOG.info("REST get checksum success: %s", checksum) else: msg = (_("REST get checksum failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return checksum def _build_url(self, path): url = '%s://%s%s' % (self.protocol, self.sw_ip, path) if self.vfid is not None: url = '%s?vf-id=%s' % (url, self.vfid) return url
45.779661
79
0.587825
2,140
18,907
4.93271
0.11729
0.070197
0.03202
0.024252
0.60648
0.566029
0.526904
0.503221
0.480296
0.41673
0
0.006436
0.326123
18,907
412
80
45.890777
0.82207
0.086582
0
0.491279
0
0
0.065634
0
0
0
0
0
0
1
0.055233
false
0.005814
0.026163
0.002907
0.110465
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7afd789557f9eb88e7608f59a30820ee3bb43296
4,233
py
Python
main/models.py
mnecas/bpc_bds_web
3c5d4289d74c053e1225e19cdcbb048994271f57
[ "Apache-2.0" ]
null
null
null
main/models.py
mnecas/bpc_bds_web
3c5d4289d74c053e1225e19cdcbb048994271f57
[ "Apache-2.0" ]
null
null
null
main/models.py
mnecas/bpc_bds_web
3c5d4289d74c053e1225e19cdcbb048994271f57
[ "Apache-2.0" ]
null
null
null
from django.db import models class PersonType(models.Model): type = models.CharField(max_length=45) def __str__(self): return f"{self.type}" class Address(models.Model): city = models.CharField(max_length=45) street = models.CharField(max_length=45) street_number = models.IntegerField() zip = models.CharField(max_length=45) def __str__(self): return f"{self.city} {self.street} {self.street_number}" class Person(models.Model): USER_TYPES = ( ('user', 'USER'), ('admin', 'ADMIN'), ) date_of_birth = models.DateField() password = models.BinaryField() first_name = models.CharField(max_length=40) last_name = models.CharField(max_length=40) username = models.CharField(max_length=60) type = models.CharField(max_length=50, choices=USER_TYPES) address = models.ManyToManyField(Address) def __str__(self): return f"{self.type} - {self.username}" # TODO: Add list of params to the type class Contact(models.Model): person = models.ForeignKey(Person, on_delete=models.CASCADE) type = models.CharField(max_length=45) value = models.CharField(max_length=45) def __str__(self): return f"{self.person} {self.type}:{self.value}" class Car(models.Model): person = models.OneToOneField(Person, on_delete=models.CASCADE) plate = models.CharField(max_length=45) type = models.CharField(max_length=45) brand = models.CharField(max_length=45) def __str__(self): return f"{self.person} {self.plate}" class Shift(models.Model): drivers = models.ManyToManyField(Person) start_time = models.DateTimeField() end_time = models.DateTimeField() def __str__(self): return f"{self.start_time} - {self.end_time}" class Cuisine(models.Model): name = models.CharField(max_length=45) def __str__(self): return f"{self.name}" class Dish(models.Model): name = models.CharField(max_length=45) def __str__(self): return f"{self.name}" class Restaurant(models.Model): name = models.CharField(max_length=45) address = models.ForeignKey(Address, on_delete=models.CASCADE) dishes = models.ManyToManyField(Dish, through='RestaurantDish') cuisine = models.ForeignKey(Cuisine, on_delete=models.CASCADE) def __str__(self): return f"{self.name}" class RestaurantDish(models.Model): dish = models.ForeignKey(Dish, on_delete=models.CASCADE) restaurant = models.ForeignKey( Restaurant, on_delete=models.CASCADE, related_name="restaurant_dish") price = models.IntegerField() description = models.TextField(max_length=200) def __str__(self): return f"{self.restaurant} - {self.dish} - {self.price}" class Delivery(models.Model): person = models.ForeignKey( Person, on_delete=models.CASCADE) driver = models.ForeignKey( Person, on_delete=models.CASCADE, related_name='driver') arival = models.DateTimeField(blank=True, auto_now_add=True) delivery_fee = models.IntegerField(blank=True, default=0) dishes = models.ManyToManyField(RestaurantDish, through='DeliveryDish') def __str__(self): return f"Order: {self.person} - Driver: {self.driver} - {self.arival}" class DeliveryDish(models.Model): dish = models.ForeignKey(RestaurantDish, on_delete=models.CASCADE) delivery = models.ForeignKey( Delivery, on_delete=models.CASCADE, related_name="delivery_dish") requirements = models.TextField(max_length=200) def __str__(self): return f"{self.dish} | {self.delivery} | {self.requirements}" class Review(models.Model): reviewer = models.ForeignKey( Person, on_delete=models.CASCADE) driver = models.ForeignKey( Person, blank=True, null=True, on_delete=models.CASCADE, related_name='review_driver') restaurant_dish = models.ForeignKey( RestaurantDish, blank=True, null=True, on_delete=models.CASCADE) rating = models.SmallIntegerField() text = models.TextField(max_length=500) def __str__(self): return f"{self.reviewer} - {self.rating} - {self.text}" class CustomDropTable(models.Model): text = models.TextField(max_length=500)
30.235714
94
0.697378
517
4,233
5.500967
0.187621
0.063291
0.101266
0.135021
0.492264
0.436006
0.303094
0.285513
0.233826
0.233826
0
0.01295
0.179069
4,233
139
95
30.453237
0.805468
0.008505
0
0.285714
0
0.010204
0.121812
0.005721
0
0
0
0.007194
0
1
0.132653
false
0.010204
0.010204
0.132653
0.897959
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
bb00617057f78b6193060f769f38f73ce61320ea
1,232
py
Python
eas_lookup/__init__.py
SFDigitalServices/address-microservice-fn-py
37bba144df2cc5a95822ba79c90de48c9dc7beb1
[ "MIT" ]
null
null
null
eas_lookup/__init__.py
SFDigitalServices/address-microservice-fn-py
37bba144df2cc5a95822ba79c90de48c9dc7beb1
[ "MIT" ]
null
null
null
eas_lookup/__init__.py
SFDigitalServices/address-microservice-fn-py
37bba144df2cc5a95822ba79c90de48c9dc7beb1
[ "MIT" ]
null
null
null
""" eas/lookup init file """ import os import json import logging import requests import azure.functions as func from shared_code.common import func_json_response def main(req: func.HttpRequest) -> func.HttpResponse: """ main function for eas/lookup """ logging.info('EAS Lookup processed a request.') try: params = req.params.copy() if params['search'] : params['$where'] = \ "address like upper('{}%') AND parcel_number IS NOT NULL"\ .format(params['search']) del params['search'] response = requests.get( os.getenv('EAS_API_URL'), params=params, headers={'X-App-Token': os.getenv('EAS_APP_TOKEN')} ) headers = { "Cache-Control": "s-maxage=1, stale-while-revalidate, max-age={}"\ .format(os.getenv('EAS_CACHE_MAX_AGE')), "Access-Control-Allow-Origin": "*" } return func_json_response(response, headers) #pylint: disable=broad-except except Exception as err: logging.error("EAS Lookup error occurred: %s", err) return func.HttpResponse(f"This endpoint encountered an error. {err}", status_code=500)
30.8
95
0.602273
145
1,232
5.02069
0.572414
0.049451
0.04533
0
0
0
0
0
0
0
0
0.004444
0.269481
1,232
39
96
31.589744
0.804444
0.064935
0
0
0
0
0.28007
0.043898
0
0
0
0
0
1
0.034483
false
0
0.206897
0
0.310345
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb006a911bddb51da85b674ee0ef5ef33b604be7
2,228
py
Python
recipes-bsp/polyos-setup/polyos-setup/polyaudio.py
PolyVection/meta-polyvection
a24fb91c4144e4d6e5fcaa73f456f805e30b751b
[ "MIT" ]
null
null
null
recipes-bsp/polyos-setup/polyos-setup/polyaudio.py
PolyVection/meta-polyvection
a24fb91c4144e4d6e5fcaa73f456f805e30b751b
[ "MIT" ]
null
null
null
recipes-bsp/polyos-setup/polyos-setup/polyaudio.py
PolyVection/meta-polyvection
a24fb91c4144e4d6e5fcaa73f456f805e30b751b
[ "MIT" ]
1
2018-04-13T22:32:38.000Z
2018-04-13T22:32:38.000Z
#!/usr/bin/python3 # Copyright (c) 2017, PolyVection UG. # # Based on configure-edison, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU Lesser General Public License, # version 2.1, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for # more details. # # aplay -l | awk -F \: '/,/{print $2}' | awk '{print $1}' | uniq import os import sys from sys import stdout import time import termios import fcntl import subprocess import polyterminal def selectSPDIF(): f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w') f.write("ctl.!default {\n") f.write("type hw\n") f.write("card pcm5121\n") f.write("}\n") f.write("pcm.!default {\n") f.write("type hw\n") f.write("card imxspdif\n") f.write("}\n") f.close() def selectLINE(): f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w') f.write("ctl.!default {\n") f.write("type hw\n") f.write("card pcm5121\n") f.write("}\n") f.write("pcm.!default {\n") f.write("type hw\n") f.write("card pcm5121\n") f.write("}\n") f.close() def selectAMP1(): f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w') f.write("ctl.!default {\n") f.write("type hw\n") f.write("card is31ap2121\n") f.write("}\n") f.write("pcm.!default {\n") f.write("type hw\n") f.write("card is31ap2121\n") f.write("}\n") f.close() def chooseFTS(): polyterminal.reset("PolyOS - Audio Setup") print("") print("Please select the audio output:") print("-----------------------------------------") print("") print("0 -\t TOSLINK \t(ZERO)") print("1 -\t ANALOG \t(ZERO)") print("2 -\t AMPLIFIER\t(AMP1)") print("") user = input("Enter either 0 or 1 to configure audio output: ") if user == "0": selectSPDIF() if user == "1": selectLINE() if user == "2": selectAMP1() else: selectSPDIF()
27.170732
76
0.606373
325
2,228
4.156923
0.387692
0.106588
0.108808
0.062176
0.399704
0.399704
0.352332
0.327905
0.327905
0.327905
0
0.024915
0.207361
2,228
81
77
27.506173
0.740091
0.270197
0
0.566667
0
0
0.368715
0.1018
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.2
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb015fa5e2c4d8430f9e89af39073bf40a1f5fc0
6,168
py
Python
3dcdrl/create_rollout_videos.py
NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA
8e0a405589a9b0bd3bd543dda72bf2325ebc9126
[ "MIT" ]
null
null
null
3dcdrl/create_rollout_videos.py
NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA
8e0a405589a9b0bd3bd543dda72bf2325ebc9126
[ "MIT" ]
null
null
null
3dcdrl/create_rollout_videos.py
NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA
8e0a405589a9b0bd3bd543dda72bf2325ebc9126
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Nov 21 09:19:33 2018 @author: anonymous """ import os import torch import numpy as np from arguments import parse_a2c_args from multi_env import MultiEnv from models import CNNPolicy from a2c_agent import * from utils import initialize_logging from doom_environment import DoomEnvironment import cv2 import pickle from moviepy.editor import ImageSequenceClip from PIL import Image def batch_from_obs(obs, batch_size=32): """Converts an obs (C,H,W) to a batch (B,C,H,W) of given size""" if isinstance(obs, torch.Tensor): if len(obs.shape)==3: obs = obs.unsqueeze(0) return obs.repeat(batch_size, 1, 1, 1) if len(obs.shape)==3: obs = np.expand_dims(obs, axis=0) return np.repeat(obs, repeats=batch_size, axis=0) def make_movie(policy, env, filename, args, n_runs=50, use_tta=False, use_rot=False, use_gray=False, name='', view=None, txt_pos=None): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") time_taken = [] losses = [] for i in range(n_runs): if use_tta: if use_rot: path = 'policy.pth.tar' else: path='policy_TTA_GRAY.pth.tar' checkpoint = torch.load('tta_models/'+path, map_location=device) policy = CNNPolicy((3,64,112), args).to(device) policy.load_state_dict(checkpoint['model']) policy.eval() if use_rot or use_gray: tta_agent = TTAAgent(use_rot=use_rot,obs_shape=(3,64,112), hidden_size=128) tta_agent.load() tta_agent.copy_conv_weights(policy.conv_head) state = torch.zeros(1, args.hidden_size) mask = torch.ones(1,1) obss = [] pos_list = [] obs = env.reset().astype(np.float32) done = False while not done: #Gamma correction obs = 255*np.power(obs/255.0, args.gamma_val) #Inverse image if args.inverse: obs = 255 - obs obss.append(obs) with torch.no_grad(): result = policy(torch.from_numpy(obs).unsqueeze(0), state, mask) action = result['actions'] state = result['states'] obs, reward, done, _ = env.step(action.item()) if view != None and txt_pos != None: x, y, _ = env.get_player_position() pos_list.append([x, y]) if use_tta and (use_rot or use_gray): batch_next_obs = batch_from_obs(torch.Tensor(obs).to(device), batch_size=16) # Adapt using rotation prediction losses.append(tta_agent.update_tta(batch_next_obs)) obs = obs.astype(np.float32) time_taken.append(len(obss)/int(30/args.frame_skip)) if use_tta: if use_rot: tta_type='rotation' elif use_gray: tta_type='grayscale' else: tta_type = 'tta_OFF' else: tta_type='baseline' pickle.dump(time_taken, open(f'TTA_videos/{tta_type}/{name}.pkl', 'wb')) print(len(obss)) print(f'Average time taken: {np.mean(time_taken):.2f}s') print(f'TTA mean loss: {np.mean(losses):.3f}') observations = [o.transpose(1,2,0) for o in obss] clip = ImageSequenceClip(observations, fps=int(30/args.frame_skip)) clip.write_videofile(filename) if view != None and txt_pos != None: # saving the view of the agent and the position # of the last run pos_txt = open(txt_pos, "w+") for p in pos_list: pos_txt.write("%d,%d\r\n" % (p[0], p[1])) pos_txt.close() for c, o in enumerate(observations): im = Image.fromarray(o.astype(np.uint8)) fig_name = str(c) + ".png" im.save(view + fig_name) def evaluate_saved_model(): args = parse_a2c_args() USE_TTA = args.use_tta USE_ROT = args.use_rot USE_GRAY = args.use_gray exp_name = args.experiment_name SV_VW_POS = args.save_view_position device = torch.device("cuda" if torch.cuda.is_available() else "cpu") obs_shape = (3, args.screen_height, args.screen_width) policy = CNNPolicy(obs_shape, args).to(device) #Load Agent if USE_TTA: if USE_ROT: path = 'policy.pth.tar' else: path='policy_TTA_GRAY.pth.tar' checkpoint = torch.load('tta_models/'+path, map_location=device) else: path = 'saved_models/labyrinth_9_checkpoint_0198658048.pth.tar' checkpoint = torch.load(path, map_location=device) policy.load_state_dict(checkpoint['model']) policy.eval() assert args.model_checkpoint, 'No model checkpoint found' assert os.path.isfile(args.model_checkpoint), 'The model could not be loaded' for i in range(args.num_mazes_test): #env = MultiEnv(args.simulator, args.num_environments, args, is_train=True) env = DoomEnvironment(args, idx=i, is_train=True, use_shaping=args.use_shaping, fixed_scenario=True) name='False' if USE_TTA: if USE_ROT: tta_type='rotation' elif USE_GRAY: tta_type='grayscale' else: tta_type = 'tta_OFF' else: tta_type = 'baseline' print(tta_type) if SV_VW_POS: view_name = f'map_creation/TTA_view/{tta_type}/' txt_pos_track_name = f'map_creation/TTA_position/{tta_type}/{exp_name}.txt' print('Saving view and positions of the agent.') else: view_name = None txt_pos_track_name = None movie_name = f'TTA_videos/{tta_type}/{exp_name}.mp4' print('Creating movie {}'.format(movie_name)) make_movie(policy, env, movie_name, args, n_runs=100, use_tta=USE_TTA, use_rot=USE_ROT, use_gray=USE_GRAY, name=exp_name, view=view_name, txt_pos=txt_pos_track_name) if __name__ == '__main__': evaluate_saved_model()
33.521739
130
0.600681
850
6,168
4.137647
0.296471
0.022178
0.011373
0.011373
0.246801
0.194484
0.184817
0.171737
0.171737
0.143304
0
0.02116
0.287451
6,168
183
131
33.704918
0.779067
0.059176
0
0.289855
0
0
0.108094
0.051712
0
0
0
0
0.014493
1
0.021739
false
0
0.094203
0
0.130435
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb021a30b50060269f34e2949685cc9a7971c175
6,706
py
Python
napi/tests.py
abakan-zz/napi
314da65bd78e2c716b7efb6deaf3816d8f38f7fd
[ "MIT" ]
null
null
null
napi/tests.py
abakan-zz/napi
314da65bd78e2c716b7efb6deaf3816d8f38f7fd
[ "MIT" ]
1
2015-08-03T00:41:59.000Z
2015-08-07T06:37:26.000Z
napi/tests.py
abakan/napi
314da65bd78e2c716b7efb6deaf3816d8f38f7fd
[ "MIT" ]
null
null
null
from nose.tools import raises import numpy as np from napi import neval from napi.transformers import NapiTransformer, LazyTransformer from napi.transformers import short_circuit_and TRANSFORMERS = [NapiTransformer]#, LazyTransformer] randbools = lambda *n: np.random.randn(*n) < 0 def short_circuit_and_(arrays, shape): assert np.all(short_circuit_and(list(arrays), shape) == np.all(arrays, 0)) def short_circuit_or_(arrays, shape): assert np.all(short_circuit_and(list(arrays), shape) == np.all(arrays, 0)) def test_short_circuit_and(): for func in [short_circuit_and_, short_circuit_or_]: for shape in [(10,), (10, 3), (1, 10, 1, 4)]: yield func, [randbools(*shape), randbools(*shape), randbools(*shape)], shape def check_napi_magic_configuration(func, line): assert func(line) is None def test_napi_magic_configuration(): from napi.magics import NapiMagics magic = NapiMagics(None) magic._remove = magic._append = lambda: None func = magic.napi for line in ['', '', 'on', 'off', '1', '0', 'sq', 'sq', 'sc', 'sc', 'sq on', 'sq off', 'sq 1', 'sq 0', 'sc 0', 'sc 10000']: yield check_napi_magic_configuration, func, line def check_logicops_of_python_types(source, debug=False, trans=None): result, expect = neval(source, debug=debug, transformer=trans), eval(source) assert result == expect, '{} != {}'.format(result, expect) def test_logicops_of_python_types(debug=False): for t in TRANSFORMERS: for src in [ '1 and True', 'True and 1', '[] and True', 'True and []', '1 and [1]', '0 or True', 'False or 1', '[] or True', 'True or []', 'False or [1]', 'True and [1] and 1 and {1: 1}', 'True and [1] and 1 and {1: 1} and {}', 'True and [1] and 0 or {} and {1: 1}',]: yield check_logicops_of_python_types, src, debug, t def check_logicops_of_arrays(source, expect, ns, debug=False, sc=10000): result = neval(source, ns, debug=debug) assert np.all(result == expect), '{} != {}'.format(result, expect) def test_logicops_of_arrays(debug=False): a = np.arange(10) b = randbools(10) bo = np.ones(10, bool) bz = np.zeros(10, bool) ns = locals() for src, res in [ ('a and a', np.logical_and(a, a)), ('b and b', np.logical_and(b, b)), ('b and b and b', np.logical_and(b, b)), ('a and b', np.logical_and(a, b)), ('a or a', np.logical_or(a, a)), ('b or b', np.logical_or(b, b)), ('a or b', np.logical_or(a, b)), ('a or b or b', np.logical_or(a, b)), ('not a', np.logical_not(a)), ('not b', np.logical_not(b)), ('a and not a', bz), ('b and not b', bz), ('b and True', b), ('(a or b) and False', bz),]: yield check_logicops_of_arrays, src, res, ns, debug def test_array_squeezing(debug=False): b = randbools(10) b2d = randbools(1, 10) b3d = randbools(1, 10, 1) b5d = randbools(2, 1, 5, 1, 10) b6d = randbools(1, 2, 1, 5, 1, 10, 1) ns = locals() for src, res in [ ('b or b2d', np.logical_or(b, b2d.squeeze())), ('b or b2d and b3d', np.logical_or(b, np.logical_and(b2d.squeeze(), b3d.squeeze()))), ('b5d and b6d', np.logical_and(b5d.squeeze(), b6d.squeeze())), ]: yield check_logicops_of_arrays, src, res, ns, debug def test_logicops_with_arithmetics_and_comparisons(debug=False): a = np.arange(10) b = randbools(10) ns = locals() for src, res in [ ('a >= 0 and a + 1', np.logical_and(a >= 0, a + 1)), ('-a <= 0 and a**2 + 1', np.logical_and(-a <= 0, a**2 + 1)), ('---a - 1 <= 0 and b', np.logical_and(---a - 1<= 0, b)), ]: yield check_logicops_of_arrays, src, res, ns, debug def test_short_circuiting(debug=False): arr = [randbools(10000) for i in range(5)] a, b, c, d, e = arr ns = locals() for sc in (False, 10000): for src, res in [ ('a and a', np.logical_and(a, a)), ('b and b', np.logical_and(b, b)), ('a and b', np.logical_and(a, b)), ('a or a', np.logical_or(a, a)), ('b or b', np.logical_or(b, b)), ('a or b', np.logical_or(a, b)), ('a and b and c and d and e', np.all(arr, 0)), ('a or b or c or d or e', np.any(arr, 0)), ('a and b or c or d and e', np.any([np.logical_and(a, b), c, np.logical_and(d, e)], 0)), ]: yield check_logicops_of_arrays, src, res, ns, debug, sc def test_multidim_short_circuiting(debug=False): arr = [randbools(10, 100, 10) for i in range(5)] a, b, c, d, e = arr ns = locals() for sc in (False, 10000): for src, res in [ ('a and a', np.logical_and(a, a)), ('b and b', np.logical_and(b, b)), ('a and b', np.logical_and(a, b)), ('a or a', np.logical_or(a, a)), ('b or b', np.logical_or(b, b)), ('a or b', np.logical_or(a, b)), ('a and b and c and d and e', np.all(arr, 0)), ('a or b or c or d or e', np.any(arr, 0)), ('a and b or c or d and e', np.any([np.logical_and(a, b), c, np.logical_and(d, e)], 0)), ]: yield check_logicops_of_arrays, src, res, ns, debug, sc def test_comparison_chaining(debug=False): """`a < b < c < d`""" a = np.arange(10) - 4 b, c, d = a * 2, a * 3, a * 4 ns = locals() for src, res in [ ('a < b < c < d', np.all([a < b, b < c, c < d], 0)), ('a == b == c == d', np.all([a == b, b == c, c == d], 0)), ('0 == a == 0 == b', np.all([a == 0, b == 0,], 0)), ]: yield check_logicops_of_arrays, src, res, ns, debug @raises(ValueError) def check_array_problems(source, ns, debug=False): neval(source, ns, debug=debug) def test_array_problems(debug=False): a5 = randbools(5) a9 = randbools(9) a9by5 = randbools(9, 5) ns = locals() for src in [ 'a5 and a9', 'a9 or a5', 'a9 or a9by5', ]: yield check_array_problems, src, ns, debug @raises(NameError) def test_name_problem(debug=False): neval('a and b', {}, debug=debug) ''' def test_or_not(debug=False): a = booleans(10) assert all(eval('a or not a', locals(), debug=debug) == any([a, invert(a)], 0)) def test_equal(debug=False): a = arange(10) assert all(eval('a == 1 and a', locals(), debug=debug) == all([a == 1, a], 0)) '''
27.710744
82
0.53892
1,061
6,706
3.287465
0.111216
0.085149
0.065367
0.040998
0.502007
0.461009
0.409117
0.372133
0.372133
0.316227
0
0.036967
0.290039
6,706
242
83
27.710744
0.695652
0.00507
0
0.423841
0
0
0.121598
0
0
0
0
0
0.033113
1
0.112583
false
0
0.039735
0
0.152318
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb037b1a6312e7d97680adcc0f7b31cd176631f1
8,621
py
Python
src/api/impl/review.py
fekblom/critic
a6b60c9053e13d4c878d50531860d7389568626d
[ "Apache-2.0" ]
null
null
null
src/api/impl/review.py
fekblom/critic
a6b60c9053e13d4c878d50531860d7389568626d
[ "Apache-2.0" ]
null
null
null
src/api/impl/review.py
fekblom/critic
a6b60c9053e13d4c878d50531860d7389568626d
[ "Apache-2.0" ]
null
null
null
# -*- mode: python; encoding: utf-8 -*- # # Copyright 2014 the Critic contributors, Opera Software ASA # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import api import api.impl.filters class Review(object): def __init__(self, review_id, repository_id, branch_id, state, summary, description): self.id = review_id self.__repository_id = repository_id self.__branch_id = branch_id self.state = state self.summary = summary self.description = description self.__owners_ids = None self.__reviewers_ids = None self.__watchers_ids = None self.__filters = None self.__commits = None self.__rebases = None def getRepository(self, critic): return api.repository.fetch(critic, repository_id=self.__repository_id) def getBranch(self, critic): return api.branch.fetch(critic, branch_id=self.__branch_id) def __fetchOwners(self, critic): if self.__owners_ids is None: cursor = critic.getDatabaseCursor() cursor.execute("""SELECT uid FROM reviewusers WHERE review=%s AND owner""", (self.id,)) self.__owners_ids = frozenset(user_id for (user_id,) in cursor) def getOwners(self, critic): self.__fetchOwners(critic) return frozenset(api.user.fetch(critic, user_id=user_id) for user_id in self.__owners_ids) def __fetchReviewers(self, critic): if self.__reviewers_ids is None: cursor = critic.getDatabaseCursor() cursor.execute("""SELECT DISTINCT uid FROM reviewuserfiles JOIN reviewfiles ON (reviewfiles.id=reviewuserfiles.file) WHERE reviewfiles.review=%s""", (self.id,)) assigned_reviewers = frozenset(user_id for (user_id,) in cursor) cursor.execute("""SELECT DISTINCT uid FROM reviewfilechanges JOIN reviewfiles ON (reviewfiles.id=reviewfilechanges.file) WHERE reviewfiles.review=%s""", (self.id,)) actual_reviewers = frozenset(user_id for (user_id,) in cursor) self.__reviewers_ids = assigned_reviewers | actual_reviewers def getReviewers(self, critic): self.__fetchReviewers(critic) return frozenset(api.user.fetch(critic, user_id=user_id) for user_id in self.__reviewers_ids) def __fetchWatchers(self, critic): if self.__watchers_ids is None: cursor = critic.getDatabaseCursor() cursor.execute("""SELECT uid FROM reviewusers WHERE review=%s""", (self.id,)) associated_users = frozenset(user_id for (user_id,) in cursor) self.__fetchOwners(critic) self.__fetchReviewers(critic) non_watchers = self.__owners_ids | self.__reviewers_ids self.__watchers_ids = associated_users - non_watchers def getWatchers(self, critic): self.__fetchWatchers(critic) return frozenset(api.user.fetch(critic, user_id=user_id) for user_id in self.__watchers_ids) def getFilters(self, critic): if self.__filters is None: cursor = critic.getDatabaseCursor() cursor.execute("""SELECT uid, type, path, id, review, creator FROM reviewfilters WHERE review=%s""", (self.id,)) impls = [api.impl.filters.ReviewFilter(*row) for row in cursor] self.__filters = [api.filters.ReviewFilter(critic, impl) for impl in impls] return self.__filters def getCommits(self, critic): if self.__commits is None: cursor = critic.getDatabaseCursor() # Direct changesets: no merges, no rebase changes. cursor.execute( """SELECT DISTINCT commits.id, commits.sha1 FROM commits JOIN changesets ON (changesets.child=commits.id) JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id) WHERE reviewchangesets.review=%s AND changesets.type='direct'""", (self.id,)) commit_ids_sha1s = set(cursor) # Merge changesets, excluding those added by move rebases. cursor.execute( """SELECT DISTINCT commits.id, commits.sha1 FROM commits JOIN changesets ON (changesets.child=commits.id) JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id) LEFT OUTER JOIN reviewrebases ON (reviewrebases.review=%s AND reviewrebases.equivalent_merge=commits.id) WHERE reviewchangesets.review=%s AND changesets.type='merge' AND reviewrebases.id IS NULL""", (self.id, self.id)) commit_ids_sha1s.update(cursor) repository = self.getRepository(critic) commits = [api.commit.fetch(repository, commit_id, sha1) for commit_id, sha1 in commit_ids_sha1s] self.__commits = api.commitset.create(critic, commits) return self.__commits def getRebases(self, wrapper): return api.log.rebase.fetchAll(wrapper.critic, wrapper) def wrap(self, critic): return api.review.Review(critic, self) def make(critic, args): for (review_id, repository_id, branch_id, state, summary, description) in args: def callback(): return Review(review_id, repository_id, branch_id, state, summary, description).wrap(critic) yield critic._impl.cached(api.review.Review, review_id, callback) def fetch(critic, review_id, branch): cursor = critic.getDatabaseCursor() if review_id is not None: cursor.execute("""SELECT reviews.id, branches.repository, branches.id, state, summary, description FROM reviews JOIN branches ON (branches.id=reviews.branch) WHERE reviews.id=%s""", (review_id,)) else: cursor.execute("""SELECT reviews.id, branches.repository, branches.id, state, summary, description FROM reviews JOIN branches ON (branches.id=reviews.branch) WHERE branches.id=%s""", (int(branch),)) row = cursor.fetchone() if not row: if review_id is not None: raise api.review.InvalidReviewId(review_id) else: raise api.review.InvalidReviewBranch(branch) return next(make(critic, [row])) def fetchAll(critic, repository, state): cursor = critic.getDatabaseCursor() conditions = ["TRUE"] values = [] if repository is not None: conditions.append("branches.repository=%s") values.append(repository.id) if state is not None: conditions.append("reviews.state IN (%s)" % ", ".join(["%s"] * len(state))) values.extend(state) cursor.execute("""SELECT reviews.id, branches.repository, branches.id, state, summary, description FROM reviews JOIN branches ON (branches.id=reviews.branch) WHERE """ + " AND ".join(conditions) + """ ORDER BY reviews.id""", values) return list(make(critic, cursor))
43.540404
91
0.570931
884
8,621
5.401584
0.211538
0.021361
0.039791
0.019058
0.409843
0.366073
0.347853
0.334031
0.308063
0.226597
0
0.002834
0.345088
8,621
197
92
43.761421
0.842898
0.083981
0
0.32
0
0
0.248283
0.032055
0
0
0
0
0
1
0.113333
false
0
0.013333
0.033333
0.213333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb037e64dca2ed5aefee607b5004f9b5b50431d4
1,080
py
Python
image_example.py
devonps/DearPyGui-Examples
15093d53f90ec6a2f6a53b7dfc9e0e511e93e27a
[ "MIT" ]
null
null
null
image_example.py
devonps/DearPyGui-Examples
15093d53f90ec6a2f6a53b7dfc9e0e511e93e27a
[ "MIT" ]
null
null
null
image_example.py
devonps/DearPyGui-Examples
15093d53f90ec6a2f6a53b7dfc9e0e511e93e27a
[ "MIT" ]
null
null
null
from dearpygui.core import * from dearpygui.simple import * # callback def update(sender, data): uvmin = get_value("uv_min") uvmax = get_value("uv_max") uvminx = uvmin[0] uvminy = uvmin[1] uvmaxx = uvmax[0] uvmaxy = uvmax[1] add_data("TextureCoordinates", [uvminx, uvminy, uvmaxx, uvmaxy]) configure_item("image_1", uv_min=[get_data("TextureCoordinates")[0], get_data("TextureCoordinates")[1]], uv_max=[get_data("TextureCoordinates")[2], get_data("TextureCoordinates")[3]]) print(get_data("TextureCoordinates")) add_slider_float2("uv_min", default_value=[0, 0], callback=update, min_value=-2, max_value=2) add_slider_float2("uv_max", default_value=[1, 1], callback=update, min_value=-2, max_value=2) add_data("TextureCoordinates", [0, 0, 1, 1]) add_image("image_1", "SpriteMapExample.png", uv_min=[get_data("TextureCoordinates")[0], get_data("TextureCoordinates")[1]], uv_max=[get_data("TextureCoordinates")[2], get_data("TextureCoordinates")[3]]) show_logger() start_dearpygui()
36
97
0.685185
141
1,080
4.985816
0.276596
0.344239
0.320057
0.034139
0.423898
0.423898
0.423898
0.423898
0.423898
0.324324
0
0.030668
0.15463
1,080
29
98
37.241379
0.739321
0.007407
0
0.181818
0
0
0.239252
0
0
0
0
0
0
1
0.045455
false
0
0.090909
0
0.136364
0.045455
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
bb03c2e16d10a68817b5577a1dbdeba0377e167e
8,833
py
Python
runner/runner/main.py
BinalModi/reproserver
2c1f86b67ba57473b507217a3289d92697a09665
[ "BSD-3-Clause" ]
null
null
null
runner/runner/main.py
BinalModi/reproserver
2c1f86b67ba57473b507217a3289d92697a09665
[ "BSD-3-Clause" ]
null
null
null
runner/runner/main.py
BinalModi/reproserver
2c1f86b67ba57473b507217a3289d92697a09665
[ "BSD-3-Clause" ]
null
null
null
from common import database from common import TaskQueues, get_object_store from common.utils import setup_logging, shell_escape from hashlib import sha256 import logging import os import shutil from sqlalchemy.orm import joinedload from sqlalchemy.sql import functions import subprocess import tempfile SQLSession = None object_store = None # IP as understood by Docker daemon, not this container DOCKER_REGISTRY = os.environ.get('REGISTRY', 'localhost:5000') def run_cmd_and_log(session, run_id, cmd): proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) proc.stdin.close() for line in iter(proc.stdout.readline, ''): logging.info("> %s", line) session.add(database.RunLogLine( run_id=run_id, line=line.rstrip())) session.commit() return proc.wait() def run_request(channel, method, _properties, body): """Process a run task. Lookup a run in the database, get the input files from S3, then do the run from the Docker image, upload the log and the output files. """ logging.info("Run request received: %r", body) # Look up the run in the database session = SQLSession() exp = joinedload(database.Run.experiment) run = (session.query(database.Run) .options(joinedload(database.Run.parameter_values), joinedload(database.Run.input_files), exp.joinedload(database.Experiment.parameters), exp.joinedload(database.Experiment.paths)) .get(int(body))) if not run: logging.error("Got a run request but couldn't get the run from the " "database (body=%r)", body) # ACK anyway channel.basic_ack(delivery_tag=method.delivery_tag) return # Update status in database if run.started: logging.warning("Starting run which has already been started") else: run.started = functions.now() session.commit() # Remove previous info run.log[:] = [] run.output_files[:] = [] def set_error(msg): logging.warning("Got error: %s", msg) run.done = functions.now() session.add(database.RunLogLine(run_id=run.id, line=msg)) session.commit() channel.basic_ack(delivery_tag=method.delivery_tag) if run.experiment.status != database.Status.BUILT: return set_error("Experiment to run is not BUILT") # Make build directory directory = tempfile.mkdtemp('build_%s' % run.experiment_hash) container = None fq_image_name = '%s/%s' % (DOCKER_REGISTRY, run.experiment.docker_image) try: # Get list of parameters params = {} params_unset = set() for param in run.experiment.parameters: if not param.optional: params_unset.add(param.name) params[param.name] = param.default # Get parameter values for param in run.parameter_values: if param.name in params: logging.info("Param: %s=%r", param.name, param.value) params[param.name] = param.value params_unset.discard(param.name) else: return set_error("Got parameter value for parameter %s which " "does not exist" % param.name) if params_unset: return set_error("Missing value for parameters: %s" % ", ".join(params_unset)) # Get paths paths = {} for path in run.experiment.paths: paths[path.name] = path.path # Get input files inputs = [] for input_file in run.input_files: if input_file.name not in paths: return set_error("Got an unknown input file %s" % input_file.name) inputs.append((input_file, paths[input_file.name])) logging.info("Using %d input files: %s", len(inputs), ", ".join(f.name for f, p in inputs)) # Create container container = 'run_%s' % body logging.info("Creating container %s with image %s", container, run.experiment.docker_image) # Turn parameters into a command-line cmdline = [] for k, v in params.iteritems(): if k.startswith('cmdline_'): i = k[8:] cmdline.extend(['cmd', v, 'run', i]) cmdline = ['docker', 'create', '-i', '--name', container, '--', fq_image_name] + cmdline logging.info('$ %s', ' '.join(shell_escape(a) for a in cmdline)) subprocess.check_call(cmdline) for input_file, path in inputs: local_path = os.path.join(directory, 'input_%s' % input_file.hash) # Download file from S3 logging.info("Downloading input file: %s, %s, %d bytes", input_file.name, input_file.hash, input_file.size) object_store.download_file('inputs', input_file.hash, local_path) # Put file in container logging.info("Copying file to container") subprocess.check_call(['docker', 'cp', '--', local_path, '%s:%s' % (container, path)]) # Remove local file os.remove(local_path) # Start container using parameters logging.info("Starting container") try: ret = run_cmd_and_log(session, run.id, ['docker', 'start', '-ai', '--', container]) except IOError: return set_error("Got IOError running experiment") if ret != 0: return set_error("Error: Docker returned %d" % ret) run.done = functions.now() # Get output files for path in run.experiment.paths: if path.is_output: local_path = os.path.join(directory, 'output_%s' % path.name) # Copy file out of container logging.info("Getting output file %s", path.name) ret = subprocess.call(['docker', 'cp', '--', '%s:%s' % (container, path.path), local_path]) if ret != 0: logging.warning("Couldn't get output %s", path.name) session.add(database.RunLogLine( run_id=run.id, line="Couldn't get output %s" % path.name)) continue with open(local_path, 'rb') as fp: # Hash it hasher = sha256() chunk = fp.read(4096) while chunk: hasher.update(chunk) chunk = fp.read(4096) filehash = hasher.hexdigest() # Rewind it filesize = fp.tell() fp.seek(0, 0) # Upload file to S3 logging.info("Uploading file, size: %d bytes" % filesize) object_store.upload_fileobj('outputs', filehash, fp) # Add OutputFile to database run.output_files.append( database.OutputFile(hash=filehash, name=path.name, size=filesize)) # Remove local file os.remove(local_path) # ACK session.commit() channel.basic_ack(delivery_tag=method.delivery_tag) logging.info("Done!") except Exception: logging.exception("Error processing run!") if True: set_error("Internal error!") else: # Set database status back to QUEUED run.status = database.Status.QUEUED session.commit() # NACK the task in RabbitMQ channel.basic_nack(delivery_tag=method.delivery_tag) finally: # Remove container if created if container is not None: subprocess.call(['docker', 'rm', '-f', '--', container]) # Remove image subprocess.call(['docker', 'rmi', '--', fq_image_name]) # Remove build directory shutil.rmtree(directory) def main(): setup_logging('REPROSERVER-RUNNER') # SQL database global SQLSession engine, SQLSession = database.connect() # AMQP tasks = TaskQueues() # Object storage global object_store object_store = get_object_store() logging.info("Ready, listening for requests") tasks.consume_run_tasks(run_request)
35.051587
78
0.551681
975
8,833
4.895385
0.246154
0.02996
0.017599
0.020951
0.132202
0.11586
0.092814
0.058873
0.049864
0.023465
0
0.004518
0.348466
8,833
251
79
35.191235
0.824848
0.093513
0
0.143678
0
0
0.11072
0
0
0
0
0
0
1
0.022989
false
0
0.063218
0
0.132184
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb06b45e3723a9ef4facef11872ccca58b2eadb8
184
py
Python
applications/ip2country/models/db_iptable.py
blazejkocon/csc438
b06555a965d914ea599d3e6fc85780168486d90c
[ "BSD-3-Clause" ]
null
null
null
applications/ip2country/models/db_iptable.py
blazejkocon/csc438
b06555a965d914ea599d3e6fc85780168486d90c
[ "BSD-3-Clause" ]
null
null
null
applications/ip2country/models/db_iptable.py
blazejkocon/csc438
b06555a965d914ea599d3e6fc85780168486d90c
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- db.define_table('ip2nation', Field('code',type='integer', requires=IS_NOT_EMPTY()), Field('country', requires= IS_NOT_EMPTY()))
36.8
70
0.570652
21
184
4.761905
0.761905
0.2
0.26
0.36
0
0
0
0
0
0
0
0.014184
0.233696
184
4
71
46
0.695035
0.11413
0
0
0
0
0.167702
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
bb085c85e7782e1a789b1bec7a2c6a71e68ad8d9
4,574
py
Python
tests/parser_test.py
OtavioHenrique/yalul
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
[ "MIT" ]
1
2021-04-01T20:22:36.000Z
2021-04-01T20:22:36.000Z
tests/parser_test.py
OtavioHenrique/yalul
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
[ "MIT" ]
1
2020-11-20T22:24:38.000Z
2020-11-20T22:24:38.000Z
tests/parser_test.py
OtavioHenrique/yalul
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
[ "MIT" ]
null
null
null
from yalul.parser import Parser from yalul.parsers.ast.nodes.statements.expressions.binary import Binary from yalul.lex.token import Token from yalul.lex.token_type import TokenType from yalul.parsers.ast.nodes.statements.expressions.values.integer import Integer class TestParserBinary: """Test parser generating binary operations expressions""" def test_parser_run_generates_correct_ast_complex_binary_expression_with_multi_precedence(self): """ Validates if parser is generating a correct AST to a binary expressions with multi precedence, like 39 * 2 + 42 """ tokens = [ Token(TokenType.INTEGER, 39), Token(TokenType.MULTIPLY, "*"), Token(TokenType.INTEGER, 2), Token(TokenType.SUM, '+'), Token(TokenType.INTEGER, 42), Token(TokenType.END_STATEMENT, 'End of Statement'), Token(TokenType.EOF, 'End of File') ] parser_response = Parser(tokens).parse() asts = parser_response.ast.statements ast = asts[0] assert type(ast) is Binary assert ast.operator.type is TokenType.SUM assert type(ast.left) is Binary assert ast.left.operator.type is TokenType.MULTIPLY assert type(ast.left.left) is Integer assert ast.left.left.value == 39 assert type(ast.left.right) is Integer assert ast.left.right.value == 2 assert type(ast.right) is Integer assert ast.right.value == 42 class TestParserGenerateErrors: """Test parser generating correct parser errors""" def test_parser_run_generates_correct_parser_errors(self): """ Validates if parser is generating a correct parser errors """ tokens = [ Token(TokenType.INTEGER, 39), Token(TokenType.MULTIPLY, '*'), Token(TokenType.LEFT_PAREN, 'Left Paren'), Token(TokenType.INTEGER, 41), Token(TokenType.SUM, '+'), Token(TokenType.INTEGER, 1), Token(TokenType.END_STATEMENT, 'End of Statement'), Token(TokenType.EOF, 'End of File') ] parser_response = Parser(tokens).parse() errors = parser_response.errors() assert errors[0] == 'Expected a RIGHT PAREN ) after expression' class TestParserGenerateUnfinishedExpressionErrors: """Test parser generating correct parser errors""" def test_parse_run_generates_correct_error_unfinished_expression(self): """ Validates if parser if generating correct error to unfinished expressions """ tokens = [ Token(TokenType.INTEGER, 39), Token(TokenType.MULTIPLY, '*'), Token(TokenType.INTEGER, 41), Token(TokenType.SUM, '+'), Token(TokenType.END_STATEMENT, 'End of Statement'), Token(TokenType.EOF, 'End of File') ] parser_response = Parser(tokens).parse() errors = parser_response.errors() assert errors[0] == 'Expect Expression after TokenType.SUM, Value: +' class TestParserGenerateUnopenedOperatorError: """Test parser generating correct parser errors""" def test_parse_run_generates_correct_error_unopened_operators_right_paren(self): """ Validates if parser if generating correct error to unopened operators """ tokens = [ Token(TokenType.INTEGER, 39), Token(TokenType.MULTIPLY, '*'), Token(TokenType.RIGHT_PAREN, ')'), Token(TokenType.INTEGER, 41), Token(TokenType.END_STATEMENT, 'End of Statement'), Token(TokenType.EOF, 'End of File') ] parser_response = Parser(tokens).parse() errors = parser_response.errors() assert errors[0] == 'Expect a open operator for TokenType.RIGHT_PAREN, Value: )' def test_parse_run_generates_correct_error_unopened_operators_right_brace(self): """ Validates if parser if generating correct error to unopened operators """ tokens = [ Token(TokenType.INTEGER, 39), Token(TokenType.MULTIPLY, '*'), Token(TokenType.RIGHT_BRACE, '}'), Token(TokenType.INTEGER, 41), Token(TokenType.END_STATEMENT, 'End of Statement'), Token(TokenType.EOF, 'End of File') ] parser_response = Parser(tokens).parse() errors = parser_response.errors() assert errors[0] == 'Expect a open operator for TokenType.RIGHT_BRACE, Value: }'
34.134328
119
0.636205
496
4,574
5.739919
0.149194
0.162276
0.088514
0.036881
0.700386
0.675097
0.639269
0.604145
0.559185
0.517035
0
0.011022
0.266069
4,574
133
120
34.390977
0.837057
0.124836
0
0.487805
0
0
0.092837
0.011378
0
0
0
0
0.170732
1
0.060976
false
0
0.060976
0
0.170732
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb0a2166fba41078615dd96cf7ca319832633515
4,036
py
Python
tests/core/daemon/test_daemon_alerts.py
akubera/chia-blockchain
91f038e2193755e2a6ca22e2160e2c8f547c23fe
[ "Apache-2.0" ]
1
2021-05-28T01:38:23.000Z
2021-05-28T01:38:23.000Z
tests/core/daemon/test_daemon_alerts.py
hoffmang9/chia-blockchain
edc5f1dfe57ecd81d00b5ba4477024309b1231de
[ "Apache-2.0" ]
null
null
null
tests/core/daemon/test_daemon_alerts.py
hoffmang9/chia-blockchain
edc5f1dfe57ecd81d00b5ba4477024309b1231de
[ "Apache-2.0" ]
null
null
null
import dataclasses import pytest from blspy import PrivateKey from src.server.outbound_message import NodeType from src.types.peer_info import PeerInfo from src.util.block_tools import BlockTools from src.util.hash import std_hash from src.util.ints import uint16 from src.util.validate_alert import create_alert_file, create_not_ready_alert_file from tests.core.full_node.test_full_sync import node_height_at_least from tests.setup_nodes import self_hostname, setup_daemon, setup_full_system from tests.simulation.test_simulation import test_constants_modified from tests.time_out_assert import time_out_assert, time_out_assert_custom_interval from tests.util.alert_server import AlertServer no_genesis = dataclasses.replace(test_constants_modified, GENESIS_CHALLENGE=None) b_tools = BlockTools(constants=no_genesis) b_tools_1 = BlockTools(constants=no_genesis) master_int = 5399117110774477986698372024995405256382522670366369834617409486544348441851 master_sk: PrivateKey = PrivateKey.from_bytes(master_int.to_bytes(32, "big")) pubkey_alert = bytes(master_sk.get_g1()).hex() alert_url = "http://127.0.0.1:59000/status" new_config = b_tools._config new_config["CHIA_ALERTS_PUBKEY"] = pubkey_alert new_config["ALERTS_URL"] = alert_url new_config["daemon_port"] = 55401 new_config["network_overrides"]["constants"][new_config["selected_network"]]["GENESIS_CHALLENGE"] = None b_tools.change_config(new_config) new_config_1 = b_tools_1._config new_config_1["CHIA_ALERTS_PUBKEY"] = pubkey_alert new_config_1["ALERTS_URL"] = alert_url new_config_1["daemon_port"] = 55402 new_config_1["network_overrides"]["constants"][new_config_1["selected_network"]]["GENESIS_CHALLENGE"] = None b_tools_1.change_config(new_config_1) class TestDaemonAlerts: @pytest.fixture(scope="function") async def get_daemon(self): async for _ in setup_daemon(btools=b_tools): yield _ @pytest.fixture(scope="function") async def get_daemon_1(self): async for _ in setup_daemon(btools=b_tools_1): yield _ @pytest.fixture(scope="function") async def simulation(self): async for _ in setup_full_system(b_tools_1.constants, b_tools=b_tools, b_tools_1=b_tools_1): yield _ @pytest.mark.asyncio async def test_daemon_alert_simulation(self, simulation, get_daemon, get_daemon_1): node1, node2, _, _, _, _, _, _, _, server1 = simulation await server1.start_client(PeerInfo(self_hostname, uint16(21238))) daemon = get_daemon daemon_1 = get_daemon_1 alert_file_path = daemon.root_path / "alert.txt" alert_server = await AlertServer.create_alert_server(alert_file_path, 59000) create_not_ready_alert_file(alert_file_path, master_sk) await alert_server.run() selected = daemon.net_config["selected_network"] async def num_connections(): count = len(node2.server.connection_by_type[NodeType.FULL_NODE].items()) return count await time_out_assert_custom_interval(60, 1, num_connections, 1) preimage = "This is test preimage!" expected_genesis = std_hash(bytes(preimage, "utf-8")).hex() alert_file_path.unlink() create_alert_file(alert_file_path, master_sk, "This is test preimage!") def check_genesis(expected): deamon_updated = ( daemon.net_config["network_overrides"]["constants"][selected]["GENESIS_CHALLENGE"] == expected ) deamon_1_updated = ( daemon_1.net_config["network_overrides"]["constants"][selected]["GENESIS_CHALLENGE"] == expected ) return deamon_updated and deamon_1_updated await time_out_assert(15, check_genesis, True, expected_genesis) def check_initialized(): return node1.full_node.initialized is True and node2.full_node.initialized is True await time_out_assert(15, check_initialized, True) await time_out_assert(1500, node_height_at_least, True, node2, 7)
39.960396
112
0.745045
544
4,036
5.143382
0.257353
0.045032
0.032523
0.025733
0.338456
0.233024
0.196569
0.10436
0.073624
0
0
0.046657
0.166254
4,036
100
113
40.36
0.784844
0
0
0.076923
0
0
0.102081
0
0
0
0
0
0.064103
1
0.025641
false
0
0.179487
0.012821
0.25641
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb0b659ef2cc14f0a9cf674d122a6ce8c529a3af
8,196
py
Python
words.py
GuyKabiri/language-models
63b368aaee74ec077abcc8f66f0c8fbd4c712a07
[ "MIT" ]
41
2018-08-25T14:00:46.000Z
2022-01-20T05:21:03.000Z
words.py
GuyKabiri/language-models
63b368aaee74ec077abcc8f66f0c8fbd4c712a07
[ "MIT" ]
5
2018-07-06T11:34:22.000Z
2021-11-15T20:32:34.000Z
words.py
GuyKabiri/language-models
63b368aaee74ec077abcc8f66f0c8fbd4c712a07
[ "MIT" ]
17
2018-07-24T19:40:19.000Z
2022-01-25T09:14:11.000Z
import keras import keras.backend as K from keras.datasets import imdb from keras.layers import LSTM, Embedding, TimeDistributed, Input, Dense from keras.models import Model from tensorflow.python.client import device_lib from tqdm import tqdm import os, random from argparse import ArgumentParser import numpy as np from tensorboardX import SummaryWriter import util CHECK = 5 def generate_seq(model : Model, seed, size, temperature=1.0): """ :param model: The complete RNN language model :param seed: The first few wordas of the sequence to start generating from :param size: The total size of the sequence to generate :param temperature: This controls how much we follow the probabilities provided by the network. For t=1.0 we just sample directly according to the probabilities. Lower temperatures make the high-probability words more likely (providing more likely, but slightly boring sentences) and higher temperatures make the lower probabilities more likely (resulting is weirder sentences). For temperature=0.0, the generation is _greedy_, i.e. the word with the highest probability is always chosen. :return: A list of integers representing a samples sentence """ ls = seed.shape[0] # Due to the way Keras RNNs work, we feed the model a complete sequence each time. At first it's just the seed, # zero-padded to the right length. With each iteration we sample and set the next character. tokens = np.concatenate([seed, np.zeros(size - ls)]) for i in range(ls, size): probs = model.predict(tokens[None,:]) # Extract the i-th probability vector and sample an index from it next_token = util.sample_logits(probs[0, i-1, :], temperature=temperature) tokens[i] = next_token return [int(t) for t in tokens] def sparse_loss(y_true, y_pred): return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) def go(options): tbw = SummaryWriter(log_dir=options.tb_dir) if options.seed < 0: seed = random.randint(0, 1000000) print('random seed: ', seed) np.random.seed(seed) else: np.random.seed(options.seed) if options.task == 'wikisimple': x, w21, i2w = \ util.load_words(util.DIR + '/datasets/wikisimple.txt', vocab_size=options.top_words, limit=options.limit) # Finding the length of the longest sequence x_max_len = max([len(sentence) for sentence in x]) numwords = len(i2w) print('max sequence length ', x_max_len) print(numwords, 'distinct words') x = util.batch_pad(x, options.batch, add_eos=True) elif options.task == 'file': x, w21, i2w = \ util.load_words(options.data_dir, vocab_size=options.top_words, limit=options.limit) # Finding the length of the longest sequence x_max_len = max([len(sentence) for sentence in x]) numwords = len(i2w) print('max sequence length ', x_max_len) print(numwords, 'distinct words') x = util.batch_pad(x, options.batch, add_eos=True) else: raise Exception('Task {} not recognized.'.format(options.task)) def decode(seq): return ' '.join(i2w[id] for id in seq) print('Finished data loading. ', sum([b.shape[0] for b in x]), ' sentences loaded') ## Define model input = Input(shape=(None, )) embedding = Embedding(numwords, options.lstm_capacity, input_length=None) embedded = embedding(input) decoder_lstm = LSTM(options.lstm_capacity, return_sequences=True) h = decoder_lstm(embedded) if options.extra is not None: for _ in range(options.extra): h = LSTM(options.lstm_capacity, return_sequences=True)(h) fromhidden = Dense(numwords, activation='linear') out = TimeDistributed(fromhidden)(h) model = Model(input, out) opt = keras.optimizers.Adam(lr=options.lr) lss = sparse_loss model.compile(opt, lss) model.summary() ## Training #- Since we have a variable batch size, we make our own training loop, and train with # model.train_on_batch(...). It's a little more verbose, but it gives us more control. epoch = 0 instances_seen = 0 while epoch < options.epochs: for batch in tqdm(x): n, l = batch.shape batch_shifted = np.concatenate([np.ones((n, 1)), batch], axis=1) # prepend start symbol batch_out = np.concatenate([batch, np.zeros((n, 1))], axis=1) # append pad symbol loss = model.train_on_batch(batch_shifted, batch_out[:, :, None]) instances_seen += n tbw.add_scalar('lm/batch-loss', float(loss), instances_seen) epoch += 1 # Show samples for some sentences from random batches for temp in [0.0, 0.9, 1, 1.1, 1.2]: print('### TEMP ', temp) for i in range(CHECK): b = random.choice(x) if b.shape[1] > 20: seed = b[0,:20] else: seed = b[0, :] seed = np.insert(seed, 0, 1) gen = generate_seq(model, seed, 60, temperature=temp) print('*** [', decode(seed), '] ', decode(gen[len(seed):])) if __name__ == "__main__": ## Parse the command line options parser = ArgumentParser() parser.add_argument("-e", "--epochs", dest="epochs", help="Number of epochs.", default=20, type=int) parser.add_argument("-E", "--embedding-size", dest="embedding_size", help="Size of the word embeddings on the input layer.", default=300, type=int) parser.add_argument("-o", "--output-every", dest="out_every", help="Output every n epochs.", default=1, type=int) parser.add_argument("-l", "--learn-rate", dest="lr", help="Learning rate", default=0.001, type=float) parser.add_argument("-b", "--batch-size", dest="batch", help="Batch size", default=128, type=int) parser.add_argument("-t", "--task", dest="task", help="Task", default='wikisimple', type=str) parser.add_argument("-D", "--data-directory", dest="data", help="Data file. Should contain one sentence per line.", default='./data', type=str) parser.add_argument("-L", "--lstm-hidden-size", dest="lstm_capacity", help="LSTM capacity", default=256, type=int) parser.add_argument("-m", "--max_length", dest="max_length", help="Max length", default=None, type=int) parser.add_argument("-w", "--top_words", dest="top_words", help="Top words", default=10000, type=int) parser.add_argument("-I", "--limit", dest="limit", help="Character cap for the corpus", default=None, type=int) parser.add_argument("-T", "--tb-directory", dest="tb_dir", help="Tensorboard directory", default='./runs/words', type=str) parser.add_argument("-r", "--random-seed", dest="seed", help="RNG seed. Negative for random (seed is printed for reproducability).", default=-1, type=int) parser.add_argument("-x", "--extra-layers", dest="extra", help="Number of extra LSTM layers.", default=None, type=int) options = parser.parse_args() print('OPTIONS', options) go(options)
33.453061
120
0.571986
1,001
8,196
4.593407
0.298701
0.027403
0.051762
0.031318
0.193127
0.15659
0.142236
0.113093
0.094389
0.094389
0
0.013934
0.316984
8,196
245
121
33.453061
0.807431
0.171669
0
0.12
0
0
0.133195
0.003568
0
0
0
0
0
1
0.026667
false
0
0.08
0.013333
0.126667
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb0cafc42dfe07c8f9f0ade59b4efb5599bbec9d
979
py
Python
synthdnm/backend.py
james-guevara/synthdnm
8510cfd91438452da553d35894b63c5d75cdd47e
[ "MIT" ]
6
2021-02-22T08:29:49.000Z
2021-11-26T21:24:49.000Z
build/lib/synthdnm/backend.py
james-guevara/synthdnm
8510cfd91438452da553d35894b63c5d75cdd47e
[ "MIT" ]
1
2021-10-04T19:22:34.000Z
2021-11-16T21:22:12.000Z
synthdnm/backend.py
james-guevara/synthdnm
8510cfd91438452da553d35894b63c5d75cdd47e
[ "MIT" ]
1
2020-11-06T18:57:57.000Z
2020-11-06T18:57:57.000Z
import re # Splits a line by spaces or tabs, returns a list object def tokenize(line): linesplit = line.rstrip().split("\t") if len(linesplit) == 1: linesplit = line.rstrip().split(" ") else: return linesplit # Returns dictionary containing information of pedigree file def process_ped(fam_filepath): ped_dictionary = {} # ped_dictionary[iid] = (fid, iid, father_iid, mother_iid, sex, phen) with open(fam_filepath) as f: for line in f: linesplit = tokenize(line) fid,iid,father_iid,mother_iid,sex,phen = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4],linesplit[5] if sex != "1" and sex != "2": continue if father_iid == "0" and mother_iid == "0": continue ped_dictionary[iid] = (fid,iid,father_iid,mother_iid,sex,phen) return ped_dictionary # Convert spaces to tabs def tabbit(line): linesplit = re.split("\s+", line.rstrip()) return "\t".join(linesplit[1:])
40.791667
130
0.657814
138
979
4.557971
0.413043
0.082671
0.057234
0.071542
0.198728
0.198728
0.198728
0.198728
0.149444
0.149444
0
0.015484
0.208376
979
23
131
42.565217
0.796129
0.208376
0
0
0
0
0.015584
0
0
0
0
0
0
1
0.166667
false
0
0.055556
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bb0cd1bc006214dc6eb22b166f68a1596ea8baca
1,925
py
Python
ws/RLAgents/B_ValueBased/Bootstrapping/OnPolicy/sarsa/impl_mgt.py
dattaray-basab/RLGames
b12263fe7a4a246be02fc20ed20cfb9fda40d29b
[ "MIT" ]
null
null
null
ws/RLAgents/B_ValueBased/Bootstrapping/OnPolicy/sarsa/impl_mgt.py
dattaray-basab/RLGames
b12263fe7a4a246be02fc20ed20cfb9fda40d29b
[ "MIT" ]
null
null
null
ws/RLAgents/B_ValueBased/Bootstrapping/OnPolicy/sarsa/impl_mgt.py
dattaray-basab/RLGames
b12263fe7a4a246be02fc20ed20cfb9fda40d29b
[ "MIT" ]
null
null
null
from ws.RLAgents.B_ValueBased.Bootstrapping.qtable_mgt import qtable_mgt def impl_mgt(app_info): _env = app_info.ENV Display = app_info.ENV.Display fn_get_qval, fn_set_qval, fn_get_q_actions, fn_get_max_q_actions = qtable_mgt() def _fn_update_knowledge(state, action, reward, next_state, next_action): current_q = fn_get_qval(state, action) next_state_q = fn_get_qval(next_state, next_action) new_q = (current_q + app_info.LEARNING_RATE * (reward + app_info.DISCOUNT_FACTOR * next_state_q - current_q)) fn_set_qval(state, action, new_q) def fn_run_sarsa(): episode_num = 0 while True: episode_num += 1 episode_status = _fn_run_episode(Display.fn_move_cursor) print('episode number: {} status = {}'.format(episode_num, episode_status)) if 'TEST_MODE' in app_info: if app_info.TEST_MODE: # ONLY 1 episode needed break pass def _fn_run_episode(fn_move_cursor): new_state = None state = _env.fn_reset_env() action = fn_get_max_q_actions(state, app_info.EPSILON) Display.fn_update_qvalue(state, fn_get_q_actions(state)) continue_running = True while continue_running: new_state, reward, done, _ = _env.fn_take_step(action) continue_running = reward == 0 if fn_move_cursor is not None: fn_move_cursor(state, new_state) new_action = fn_get_max_q_actions(new_state, app_info.EPSILON) _fn_update_knowledge(state, action, reward, new_state, new_action) Display.fn_update_qvalue(state, fn_get_q_actions(state)) action = new_action state = new_state if fn_move_cursor is not None: fn_move_cursor(new_state) return continue_running return fn_run_sarsa
37.019231
89
0.652987
266
1,925
4.270677
0.255639
0.055458
0.06338
0.034331
0.276408
0.237676
0.139085
0.139085
0.139085
0.139085
0
0.002869
0.275844
1,925
51
90
37.745098
0.812052
0.010909
0
0.097561
0
0
0.021556
0
0
0
0
0
0
1
0.097561
false
0.02439
0.02439
0
0.170732
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0