text string | size int64 | token_count int64 |
|---|---|---|
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse_lazy
from rest_framework.test import APITestCase
from rest_framework import status
from django.contrib.auth.hashers import make_password
User = get_user_model()
class PostApiTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
print('setUpTestData')
data = {
'email': 'test22@test.com',
'password': make_password('tester26'),
}
cls.user = User.objects.create(**data, is_active=True)
cls.user.emailaddress_set.create(email=cls.user.email, primary=True, verified=True)
def setUp(self):
url = reverse_lazy('auth_app:api_login')
data = {
'email': self.user.email,
'password': 'tester26',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
def test_create_post(self):
url = reverse_lazy('posts:post-list')
data = {
'title': 'Sport',
'content': 'Test content',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
print(response.data)
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
print(response.data)
def test_create_post_forbidden(self):
url = reverse_lazy('auth_app:logout')
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
url = reverse_lazy('posts:post-list')
data = {
'title': 'Sport',
'content': 'Test content',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
print("Create article forbidden", response.data)
| 2,017 | 626 |
import configparser
import os
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import col, asc, desc
from pyspark.sql.functions import date_format, row_number, monotonically_increasing_id
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType, DoubleType, TimestampType
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] = config.get('S3', 'AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY'] = config.get('S3', 'AWS_SECRET_ACCESS_KEY')
def create_spark_session():
"""Create session on the AWS EMR Spark cluster. Required to processing data using Spark"""
spark = SparkSession \
.builder \
.appName('Sparkify Data Lake') \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""Process raw songs dataset using Spark and create Songs and Artists dimensional tables stored in S3"""
print('Start processing song data...')
# Read song data file
song_data_path = input_data + 'song_data/*/*/*/*'
df = spark.read.json(song_data_path)
# Process Data Frame with raw songs data and create Songs dimensional table stored in S3
process_songs(spark, df, output_data)
# Process Data Frame with raw songs data and create Artists dimensional table stored in S3
process_artists(spark, df, output_data)
print('Finish processing song data.')
def process_log_data(spark, input_data, output_data):
"""
1. Process raw logs dataset using Spark and create Users and Time dimensional tables stored in S3.
2. Process both raw logs and songs dataset and create Songplays fact table stored in S3.
"""
print('Start processing log data...')
# Read log data file
log_data_path = input_data + 'log_data/*'
log_df = spark.read.json(log_data_path)
# Process Data Frame with raw logs data and create Users dimensional table stored in S3
process_users(spark, log_df, output_data)
# Process Data Frame with raw logs data and create Time dimensional table stored in S3
process_time(spark, log_df, output_data)
# Read song data file
song_data_path = input_data + 'song_data/*/*/*/*'
song_df = spark.read.json(song_data_path)
# Process both Data Frames with raw logs and songs data and create Songplays fact table stored in S3
process_songplays(spark, song_df, log_df, output_data)
print('Finish processing log data.')
def process_songs(spark, df, output_data):
"""Process Data Frame with raw songs data using Spark and create Songs dimensional table stored in S3"""
print('Processing songs...')
# Define schema for the Songs table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
songs_schema = StructType([
StructField('song_id', StringType(), nullable=False),
StructField('title', StringType(), nullable=False),
StructField('artist_id', StringType(), nullable=True),
StructField('year', LongType(), nullable=True),
StructField('duration', DoubleType(), nullable=True)
])
# Cleanup data. Remove rows with empty song_id or title and select required fields for Songs table.
# We also use dropDuplicates by song_id here to avoid the same song row appears twice in the table.
songs_rdd = df \
.filter(col('song_id').isNotNull()) \
.filter(col('title').isNotNull()) \
.dropDuplicates(['song_id']) \
.select('song_id', 'title', 'artist_id', 'year', 'duration') \
.rdd
# Create Songs table using clean data and schema.
songs_table = spark.createDataFrame(songs_rdd, songs_schema)
print('Writing songs_table data frame to parquet to S3')
# Write Songs table to parquet files partitioned by year and artist to S3
songs_table_path = output_data + 'tables/songs/songs.parquet'
songs_table \
.write \
.partitionBy('year', 'artist_id') \
.mode('overwrite') \
.parquet(songs_table_path)
print('Songs table has been created.')
def process_artists(spark, df, output_data):
"""Process Data Frame with raw songs data using Spark and create Artists dimensional table stored in S3"""
print('Processing artists...')
# Define schema for the Artists table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
artists_schema = StructType([
StructField('artist_id', StringType(), nullable=False),
StructField('name', StringType(), nullable=False),
StructField('location', StringType(), nullable=True),
StructField('latitude', DoubleType(), nullable=True),
StructField('longitude', DoubleType(), nullable=True)
])
# Cleanup data. Remove rows with empty artist_id or artist_name and select required fields for Artists table.
# We also use dropDuplicates by artist_id here to avoid the same artist row appears twice in the table.
artists_rdd = df \
.filter(col('artist_id').isNotNull()) \
.filter(col('artist_name').isNotNull()) \
.dropDuplicates(['artist_id']) \
.select('artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude') \
.rdd
# Create Artists table using clean data and schema.
artists_table = spark.createDataFrame(artists_rdd, artists_schema)
print('Writing artists_table data frame to parquet to S3')
# Write Artists table to parquet files to S3
artists_table_path = output_data + 'tables/artists/artists.parquet'
artists_table \
.write \
.mode('overwrite') \
.parquet(artists_table_path)
print('Artists table has been created.')
def process_users(spark, df, output_data):
"""
Process Data Frame with raw logs data using Spark and create Users dimensional table stored in S3.
To process Users data properly we need to make two decisions:
1. Log file have different actions, ex. NextSong, Home, Login etc. Should we filter logs data by action or not?
Because we want to store information about all of our users thus we do not want to filter data by action
and we will write all users to the Users table even if them never perform NextSong action.
2. The same user can occurs multiple times in the log file. There are two approaches to deal with it:
- We can create historical Users dimension table where each row will have extra fields
EffectiveDateFrom and EffectiveDateTo. It allows us to analyze all changes that was made by the user,
ex. he/she may change name, switch from free to paid subscription and vice versa.
- Also we may store only the latest state of our users. It means that we will write to the Users dimension
table only latest occurrence in the log file for each user (ordered by timestamp).
For the current processing task we will use the second approach: write only the latest state of our users.
"""
print('Processing users...')
# Define schema for the Users table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
users_schema = StructType([
StructField('user_id', LongType(), nullable=False),
StructField('first_name', StringType(), nullable=True),
StructField('last_name', StringType(), nullable=True),
StructField('gender', StringType(), nullable=True),
StructField('level', StringType(), nullable=True)
])
# Use Window function to enumerate all occurrences of the single user in the log file.
# When it is done, we just can select each row with the value 1 for the number of occurrences (also see next
# code statement).
users_window = Window \
.partitionBy('userId') \
.orderBy(col('ts').desc()) \
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
# Cleanup data. Remove rows with empty userId, apply Window function to find latest occurrences for each user
# and select required fields for Users table.
# We also use dropDuplicates by userId here to avoid the same artist row appears twice in the table.
# We also can avoid using dropDuplicates method here because final data already will be unique because of our logic
# with Window function and getting only the latest row for each userId. But we add dropDuplicates here to make
# our solution more robust.
users_rdd = df \
.filter(col('userId').isNotNull()) \
.dropDuplicates('userId') \
.withColumn('num', row_number().over(users_window)) \
.withColumn('user_id', col('userId').cast(LongType())) \
.filter(col('num') == 1) \
.select('user_id', 'firstName', 'lastName', 'gender', 'level') \
.rdd
# Create Users table using clean data and schema.
users_table = spark.createDataFrame(users_rdd, users_schema)
print('Writing users_table data frame to parquet to S3')
# Write Users table to parquet files to S3
users_table_path = output_data + 'tables/users/users.parquet'
users_table \
.write \
.mode('overwrite') \
.parquet(users_table_path)
print('Users table has been created.')
def process_time(spark, df, output_data):
"""
Process Data Frame with raw logs data using Spark and create Time dimensional table stored in S3.
To properly create Time table we need to convert timestamp field in the logs. There are two approaches
how to deal with it:
- Use Spark udf() function and write processing code as normal Python code.
- Use power of Spark and its predefined functions to work with timestamp.
For the current processing task we will use the second approach: rely on Spark predefined functions.
"""
print('Processing time...')
# Define schema for the Time table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
time_schema = StructType([
StructField('start_time', TimestampType(), nullable=False),
StructField('hour', IntegerType(), nullable=False),
StructField('day', IntegerType(), nullable=False),
StructField('week', IntegerType(), nullable=False),
StructField('month', IntegerType(), nullable=False),
StructField('year', IntegerType(), nullable=False),
StructField('weekday', IntegerType(), nullable=False)
])
# Take unique timestamps from the log data and apply various functions to extract different parts of datetime
# on the select stage to get all required fields for the Time table.
# We also use dropDuplicates by timestamp here to avoid the same timestamp row appears twice in the table.
time_rdd = df \
.select('ts') \
.withColumn('timestamp', (col('ts') / 1000).cast(TimestampType())) \
.dropDuplicates(['timestamp']) \
.select(
col('timestamp').alias('start_time'),
hour('timestamp').alias('hour'),
dayofmonth('timestamp').alias('day'),
weekofyear('timestamp').alias('week'),
month('timestamp').alias('month'),
year('timestamp').alias('year'),
date_format(col('timestamp'), 'F').cast(IntegerType()).alias('weekday')
) \
.rdd
# Create Time table using clean data and schema.
time_table = spark.createDataFrame(time_rdd, time_schema)
print('Writing time_table data frame to parquet to S3')
# Write Time table to parquet files partitioned by year and month to S3
time_table_path = output_data + 'tables/time/time.parquet'
time_table \
.write \
.partitionBy('year', 'month') \
.mode('overwrite') \
.parquet(time_table_path)
print('Time table has been created.')
def process_songplays(spark, song_df, log_df, output_data):
"""
Process Data Frame with raw logs and songs data using Spark and create Songplays fact table stored in S3.
To create Songplays table we need raw data from both logs and songs files. Here we will join both tables
and the tricky part is to choose proper key for the joining. Joining also helps us to cleanup data, because
we do not want to include rows to the Songplays table where logs data do not match songs data,
ex. some song name appears in the log but it doesn't exist in the song data.
Thus for current processing task we will choose joining by several conditions:
- Songs data `title` should match logs data `song`.
- Songs data `artist_name` should match logs data `artist`.
- Songs data `duration` should match logs data `length`.
"""
print('Processing songplays...')
# Define schema for the Songplays table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions.
# Songplays schema contains two additional columns: "year" and "month" for partitioning.
songplays_schema = StructType([
StructField('songplay_id', LongType(), nullable=False),
StructField('start_time', TimestampType(), nullable=False),
StructField('user_id', LongType(), nullable=False),
StructField('level', StringType(), nullable=True),
StructField('song_id', StringType(), nullable=False),
StructField('artist_id', StringType(), nullable=False),
StructField('session_id', LongType(), nullable=True),
StructField('location', StringType(), nullable=True),
StructField('user_agent', StringType(), nullable=True),
StructField('year', IntegerType(), nullable=False),
StructField('month', IntegerType(), nullable=False)
])
# Cleanup data. Remove rows with empty song_id or artist_id from Songs data.
clean_song_df = song_df \
.filter(col('song_id').isNotNull()) \
.filter(col('artist_id').isNotNull())
# Cleanup data. Choose only NextSong actions from Log data.
clean_log_df = log_df \
.filter(col('page') == 'NextSong')
# Join songs and logs data frames, enrich with missing columns and select required columns
# to create Songplays table.
# Also we use Spark function `monotonically_increasing_id` to create unique identifiers for Songplays table rows.
songplays_rdd = clean_song_df \
.join(clean_log_df,
(clean_song_df.title == clean_log_df.song)
& (clean_song_df.artist_name == clean_log_df.artist)
& (clean_song_df.duration == clean_log_df.length)
, 'inner') \
.withColumn('id', monotonically_increasing_id() + 1) \
.withColumn('start_time', (col('ts') / 1000).cast(TimestampType())) \
.withColumn('user_id', col('userId').cast(LongType())) \
.withColumn('year', year('start_time')) \
.withColumn('month', month('start_time')) \
.select('id', 'start_time', 'user_id', 'level', 'song_id', 'artist_id', 'sessionId', 'location',
'userAgent', 'year', 'month') \
.repartition('year', 'month') \
.rdd
# Create Songplays table using clean data and schema.
songplays_table = spark.createDataFrame(songplays_rdd, songplays_schema)
print('Writing songplays_table data frame to parquet to S3')
# Write Songplays table to parquet files partitioned by year and month to S3
songplays_table_path = output_data + 'tables/songplays/songplays.parquet'
songplays_table \
.write \
.partitionBy('year', 'month') \
.mode('overwrite') \
.parquet(songplays_table_path)
print('Songplays table has been created.')
def main():
"""Create Spark session and call functions to process raw logs and songs datasets"""
# Create Spark session for application "Sparkify Data Lake"
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3n://ceri-sparkify"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
# Stops Spark session for the job
spark.stop()
# Entrypoint for the Python program
if __name__ == "__main__":
main()
| 16,521 | 4,563 |
# Savital https://github.com/Savital
# Reads data from proc
import os
class ProcReader():
def __init__(self):
super(ProcReader, self).__init__()
self.construct()
def __del__(self):
pass
def construct(self):
pass
def get(self, path):
if not os.path.exists(path):
return False
results = []
f = open(path, 'r')
for line in f:
list = []
i = 0
elm = ""
while i < len(line):
if line[i] == ' ':
list.append(elm.strip())
elm = ""
if line[i] == '\n':
list.append(elm.strip())
break
elm += line[i]
i += 1
results.append(list)
f.close()
print(results)
return results
| 882 | 252 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from .models import Entry
class EntryModelTest(TestCase):
"""Ensure that the a blog's entry string representation is qual to its title"""
def test_string_representation(self):
entry = Entry(title="This is a test title")
self.assertEqual(str(entry), entry.title)
def test_verbose_name_plural(self):
self.assertEqual(str(Entry._meta.verbose_name_plural), "entries")
class HomePageTest(TestCase):
"""Test whether our blog entries show up on the homepage"""
def setUp(self):
self.user = get_user_model().objects.create(username="some_user")
def test_homepage(self):
response = self.client.get('/blog/list')
self.assertEqual(response.status_code, 200)
def test_one_entry(self):
Entry.objects.create(title='1-title', body='1-body', author=self.user)
response = self.client.get('/blog/list')
self.assertContains(response, '1-title')
self.assertContains(response, '1-body')
def test_no_entries(self):
response = self.client.get('/blog')
self.assertContains(response, 'No blog entries yet')
def test_two_entries(self):
Entry.objects.create(title='1-title', body='1-body', author=self.user)
Entry.objects.create(title='2-title', body='2-body', author=self.user)
response = self.client.get('/')
self.assertContains(response, '1-title')
self.assertContains(response, '1-body')
self.assertContains(response, '2-title') | 1,575 | 478 |
class Executioner():
pass | 29 | 9 |
# grows via 2^n/2
# fn= { 0 ; n=0
# 1 ; n=1
# f(n-1)+f(n-2) ; n>1}
def fib(n):
if n<=1:
return n
else:
return fib(n-1)+fib(n-2)
# for f(n)>n it will take very long time to compute
# bad algo
"""
F(n)
/ \
f(n-1) f(n-2)
/ \ / \
f(n-2) f(n-3) f(n-3) f(n-4)
/ \ / \ / \ / \
/ \ / \ / \ f(n-5) f(n-6)
/ \ / \ f(n-4) f(n-5)
f(n-3) f(n-4) f(n-4) f(n-5)
Above we can see that we are computing same thing again(look on (f-3)) which is not required
"""
def Fastfib(n):
f = []
f.append(int(0))
f.append(int(1))
print(f)
f[1]=1
for i in range(2,n+1):
f.append((f[i-1]+f[i-2]))
print(f)
return f[n]
"""it takes less time it is more powerful"""
if __name__ == "__main__":
n=int(input("enter the no: "))
res=fib(n)
res2=Fastfib(n)
print(f"sum of first {n} fibnocci no is with recursion : ",res)
print(f"sum of first {n} fibnocci no is with fastalgo : ",res2) | 1,196 | 536 |
import time
import turtle as t
t.mode('standard')
t.speed(8)
DISTANCE = 8
RESIZE_RATIO = 6
t.pensize(RESIZE_RATIO)
class Restorer:
def __init__(self):
self.last_pos = t.pos()
def restore(self):
t.goto(self.last_pos[0], self.last_pos[1])
class Draw:
@staticmethod
def goto(x, y, heading=0):
t.penup()
t.goto(x * RESIZE_RATIO, y * RESIZE_RATIO)
t.setheading(heading)
t.pendown()
@staticmethod
def line(first_line=(), second_line=()):
restorer = Restorer()
assert len(first_line) == 2 and type(
first_line) == tuple, "'first_line' must be a Tuple object with 2 positional parameters."
assert len(second_line) == 2 and type(
second_line) == tuple, "'second_line' must be a Tuple object with 2 positional parameters."
t.penup()
t.goto(first_line[0] * RESIZE_RATIO, first_line[1] * RESIZE_RATIO)
t.pendown()
t.goto(second_line[0] * RESIZE_RATIO, second_line[1] * RESIZE_RATIO)
t.penup()
restorer.restore()
def rectangle_absolute(self, top_left_corner: tuple, bottom_right_corner: tuple, fill_color: str = "black"):
t.fillcolor(fill_color)
self.goto(top_left_corner[0], top_left_corner[1])
t.begin_fill()
for _ in range(2):
t.forward((bottom_right_corner[0] - top_left_corner[0]) * RESIZE_RATIO)
t.left(90)
t.forward((bottom_right_corner[1] - top_left_corner[1]) * RESIZE_RATIO)
t.left(90)
t.end_fill()
@staticmethod
def circle(distance: float = DISTANCE):
t.circle(RESIZE_RATIO * distance)
def function(self, _function, trace_size: float = 0.1, x_range: tuple = (), y_range: tuple = ()):
restorer = Restorer()
for index in range(trace_size, trace_size):
y = _function(index)
restorer.restore()
def square(self, stroke="black"):
t.color(stroke)
for _ in range(4):
t.forward(DISTANCE * RESIZE_RATIO)
self.turn_left()
def rectangle_relative(self, x_side, y_side):
for _ in range(2):
t.forward(x_side * RESIZE_RATIO)
self.turn_left()
t.forward(y_side * RESIZE_RATIO)
self.turn_left()
def triangle(self):
self.polygon(sides=3, fill="white", stroke="black")
def circle(self, distance=DISTANCE):
t.circle(RESIZE_RATIO * distance)
def polygon(self, sides=5, fill="red", stroke="black"):
assert sides >= 3, "Side amount of a polygon should be greater or equals to 3."
t.color(stroke, fill)
turnAngle = 360 / sides
t.begin_fill()
for i in range(sides):
t.forward(RESIZE_RATIO * DISTANCE / sides * 5)
t.left(turnAngle)
t.end_fill()
def car(self):
self.goto(-4, -3)
self.rectangle_relative(8, 3)
t.fillcolor("black")
self.goto(-2, -4)
t.begin_fill()
self.circle(1)
t.end_fill()
self.goto(2, -4)
t.begin_fill()
self.circle(1)
t.end_fill()
self.goto(-4, -2.5)
t.begin_fill()
self.circle(0.5)
t.end_fill()
def house(self):
self.rectangle_relative(8, 6)
self.goto(8, 6, heading=-150)
triangle_sides = 4.6
t.forward(triangle_sides * RESIZE_RATIO)
t.left(60)
t.forward(triangle_sides * RESIZE_RATIO)
def tower(self):
self.line((0, 0), (24, 0))
self.line((5, 10), (0, 0))
self.line((19, 10), (24, 0))
self.rectangle_absolute((5, 10), (19, 12), "black")
self.line((7, 12), (10, 22))
self.line((17, 12), (14, 22))
self.rectangle_absolute((8, 22), (16, 23), "black")
self.line((10, 23), (11, 35))
self.line((13, 35), (14, 23))
self.rectangle_absolute((10, 35), (14, 36), "black")
self.line((11, 36), (11, 48))
self.line((13, 36), (13, 48))
self.line((19, 10), (0, 0))
self.line((5, 10), (24, 0))
self.line((14, 21), (7, 13))
self.line((17, 13), (10, 21))
self.line((13, 34), (10, 24))
self.line((11, 34), (14, 24))
self.line((13, 47), (11, 37))
self.line((11, 47), (13, 37))
self.goto(12, 48)
t.fillcolor("black")
t.begin_fill()
self.circle(2)
t.end_fill()
def turn_left(self):
t.left(90)
def turn_right(self):
t.right(90)
def turn_around(self):
t.left(180)
def start_section(self, text):
t.penup()
self.turn_right()
t.forward(DISTANCE * RESIZE_RATIO)
t.pendown()
t.write(text)
t.penup()
self.goto(0, 0, 0)
t.pendown()
t.pensize(2)
def end_section(self):
time.sleep(.5)
t.reset()
draw = Draw()
# 1. Square
draw.start_section("1. Square")
draw.square()
draw.end_section()
# 2. Rectangle
draw.start_section("2. Rectangle")
draw.rectangle_relative(8, 4)
draw.end_section()
# 3. Triangle
draw.start_section("3. Triangle")
draw.triangle()
draw.end_section()
# 4. Circle
draw.start_section("4. Circle")
draw.circle()
draw.end_section()
# 5. Blue Square
draw.start_section("5. Blue Square")
draw.square("blue")
draw.end_section()
# 6. Hexagon with red background and yellow border
draw.start_section("6. Hexagon with red background and yellow border")
draw.polygon(sides=6, fill="red", stroke="yellow")
draw.end_section()
# 7. Car
draw.start_section("7. Car")
draw.car()
draw.end_section()
# 8. House
draw.start_section("8. House")
draw.house()
draw.end_section()
# 10. Tower
draw.start_section("10. Tower")
draw.tower()
draw.end_section()
| 5,765 | 2,254 |
#!/usr/bin/env python3
"""script for testing connection to database"""
import pyodbc
import sys
import os
from models.user import User
import models
driver = os.environ.get('CONTACT_SQL_DRIVER')
server = os.environ.get('CONTACT_SQL_SERVER')
database = os.environ.get('CONTACT_SQL_DB')
username = os.environ.get('CONTACT_SQL_USER')
password = os.environ.get('CONTACT_SQL_PASS')
try:
statement = "SELECT * FROM {}".format(sys.argv[1])
except:
print("please provide a table as an argument")
print("usage: ./build_list.py user_table_name")
exit(1)
needed = [driver, server, database, username, password, statement]
for req in needed:
if req is None:
print('Failed to get variable from env settings')
exit(1)
# build the connection string after verifying attributes were provided
conn_str = 'Driver={};Server={};Database={};Uid={};Pwd={};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;'.format(
driver,
server,
database,
username,
password)
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
cursor.execute(statement)
row = cursor.fetchall()
print('got rows')
for r in row:
u = User(first_name=r[1], last_name=r[2], email=r[3], phone=r[4])
u.save()
print(u)
models.storage.save()
| 1,304 | 424 |
import json
import pprint
"""
TITLE: imagine buy in bookshoop - interaktive fun with User :)
ISSUE : help you choose the right item, get to know the User's preferences, i.e. - the thematic category that interests him, the results improved for him, a detailed description of the selected item
assumptions:
no method has been developed to protect the program against entering incorrect answers by the User
established:
- that the categories will be written as displayed on the console with uppercase letters (no spaces, etc.)
- that the user will copy the entire title of the book as it is displayed on the console
logic
100. Ask the user what category of prince interests him(show him the sorted results)
101. Enter the selected category and ask if User wants to sort them by:
- increasing price,
- decreasing price,
- the highest number of stars,
- the lowest number of stars,
- availability,
and present the results
102.The user has chosen a given book - show him a short description and product description
logika - PL
100. spytaj Kupujacego jaka kategoria ksiazego go intresuje (pokaz mu posortowane wyniki)
101. wejdz do wybranej kategori i spytaj czy Kupujacy chce posortowac je po:
- cenie rosnacej,
- cenie malejacej,
- najwyzszej ilosci gwiazdek,
- najnizszej ilosci gwiazdek,
- dostepnosci,
i zaprezentuj wyniki do dalszego wyboru w postaci listy
102. user wybral dana ksiazke - pokaz mu do niej szczegolowy opis i opis produktu
"""
# open and read the content of files from part 01 this issue (scraping results)
f1 = open('resources/01_category_first_link.json')
scrap1 = json.load(f1)
f1.close()
f2 = open('resources/02_single_books.json')
scrap2 = json.load(f2)
f2.close()
f3 = open('resources/03_details_single_books.json')
scrap3 = json.load(f3)
f3.close()
class Game:
def __init__(self):
pass
# I am using a file called --> "01_category_first_link.json"
# important because each file has different keys to access the content of the dictionaries
def sorted_thematica_category(self,s1):
category_list = [letter['Book_Category'] for letter in s1]
sorted_category_list = sorted(category_list)
return sorted_category_list
# I am using a file called --> "02_single_books.json"
def show_all_books_ctagory(self, s2, choosen_category):
list_all_books_this_cat=[]
for el in s2:
if el['Book_Category'] == choosen_category:
list_all_books_this_cat.append(el['Book_Title'])
how_many_books = len(list_all_books_this_cat)
return how_many_books, list_all_books_this_cat
def printing_long_questions(self):
print('--------')
print('Please tell me how to sort the results for YOU. Write 1 or 2 or 3 or 4 or 5.')
print(' \t\t 1 - sort by price - DESC.')
print(' \t\t 2 - sort by price - ASC.')
print(' \t\t 3 - sort by popularity ranking - DESC.')
print(' \t\t 4 - sort by popularity ranking - ASC.')
print(' \t\t 5 - sort by Title alphabetically. ')
def user_choose_filter_method(self, nr, list_title):
if nr==1 or nr==2:
list_dict_title_and_price=self.generate_tab_title_price(scrap2, list_title)
if nr == 1:
result_method = self.sort_method_1(list_dict_title_and_price)
else:
#nr 2
result_method = self.sort_method_2(list_dict_title_and_price)
if nr == 3:
# create dict only with key like stars and title
list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title)
# sorted by stars
result_method = self.sort_method_3(list_dict_title_and_stars)
if nr == 4:
# create dict only with key like stars and title
list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title)
# sorted by stars
result_method = self.sort_method_4(list_dict_title_and_stars)
if nr == 5:
result_method = self.sort_method_5(list_title)
return result_method
# building a new DICTIONARY - cutting the content from existing DICTIONARIES
# idea from https://stackoverflow.com/questions/3420122/filter-dict-to-contain-only-certain-keys
def remove_key_from_existing_dict(self, existing_dict, *key_to_delete_from_existing_dict):
"""
input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1}
key_to_delete_from_existing_dict='Book_Stars'
output--> {'Book_Price': 10.97,'Book_Title': 'The Long Shadow', , 'Book_total_category_amouth': 1}
"""
new_dict = dict((key, value) for key, value in existing_dict.items() if key not in key_to_delete_from_existing_dict)
return new_dict
def leave_only_selected_keys_in_existing_dict(self,existing_dict, *key_to_stay):
"""
input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1}
key_to_stay='Book_Stars', 'Book_Title'
output--> {'Book_Stars': 1, 'Book_Title': 'The Long Shadow'}
"""
new_dict = dict((key, value) for key, value in existing_dict.items() if key in key_to_stay)
return new_dict
# building a new list of dictionaries - cutting the content from skraping 2 (list - dictionaries)
def generate_tab_title_price(self, scrap2, list_title):
# scrap2= big list dics
# i want filter and catch only interesting me title --list_title
# and return only key --'Book_Price', 'Book_Title'
list_dict_only_title_price=[]
for small_dict in scrap2:
for title in list_title:
if small_dict['Book_Title'] in title:
new_short_dict = self.leave_only_selected_keys_in_existing_dict(small_dict, 'Book_Price', 'Book_Title')
list_dict_only_title_price.append(new_short_dict)
return list_dict_only_title_price
def generate_tab_title_stars(self, scrap2, list_title):
# scrap2= big list dics
# i want filter and catch only interesting me title --list_title
# and return only key --'Book_Title', 'Book_Stars'
list_dict_only_title_stars = []
for small_dict in scrap2:
for title in list_title:
if small_dict['Book_Title'] in title:
new_short_dict = self.leave_only_selected_keys_in_existing_dict(
small_dict, 'Book_Title', 'Book_Stars')
list_dict_only_title_stars.append(new_short_dict)
return list_dict_only_title_stars
def sort_method_1(self,list_dict_title_and_price):
#Press 1 - sort by price descending (malejaco)
# return list with dict price and title
# inspiration - -> https: // stackoverflow.com/questions/1143671/how-to-sort-objects-by-multiple-keys-in-python
sorted_by_price_DESC= sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title']))
return sorted_by_price_DESC
def sort_method_2(self, list_dict_title_and_price):
# Press 2 - sorted by price in ascending order (rosnaco)
# return list with dict price and title
sorted_by_price_DESC = sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title']))
sorted_by_price_ASC = sorted_by_price_DESC[::-1]
return sorted_by_price_ASC
def sort_method_3(self, list_dict_only_title_AND_stars):
sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title']))
return sorted_by_stars_DESC
def sort_method_4(self, list_dict_only_title_AND_stars):
# catch list dict with stars and title and return sorted by stars
#Press 3 - sorted by popularity ranking - Max stars to min
sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title']))
sorted_by_stars_ASC = sorted_by_stars_DESC[::-1]
return sorted_by_stars_ASC
def sort_method_5(self, list_title):
# Press 5 - sort by title alphabetically
"""
["It's Only the Himalayas", 'Full Moon over Noah’s Ark: An Odyssey to Mount Ararat and Beyond', 'See America: A Celebration of Our National Parks & Treasured Sites', 'Vagabonding: An Uncommon Guide to the Art of Long-Term World Travel', 'Under the Tuscan Sun',
'A Summer In Europe', 'The Great Railway Bazaar', 'A Year in Provence (Provence #1)', 'The Road to Little Dribbling: Adventures of an American in Britain (Notes From a Small Island #2)', 'Neither Here nor There: Travels in Europe', '1,000 Places to See Before You Die']
"""
# mamy kategorie wybrana, mamy liste ksiazek - sort by price descending.
sorted_title = sorted(list_title)
return sorted_title
# choose inf detail from scrap 3
# I am using a file called --> "03_details_single_books.json"
def catch_index_if_have_title(self,title_choosen, scrap3):
# output: list dicts
# purpose: catch only index - for concret - value :title_choosen
# which help to link another parts this dict with information like
counter_index_in_list_dicts = 0
for el in scrap3:
if el['title_book'] == title_choosen:
break
else:
counter_index_in_list_dicts += 1
return counter_index_in_list_dicts
def return_details(self,title_choosen, scrap3):
# i need index link with this title
index_list_with_dicts = self.catch_index_if_have_title(title_choosen, scrap3)
tab_details=[]
title_book = scrap3[index_list_with_dicts]["title_book"]
tab_details.append(title_book)
category = scrap3[index_list_with_dicts]["category"]
tab_details.append(category)
price = scrap3[index_list_with_dicts]["price"]
tab_details.append(price)
productDescription = scrap3[index_list_with_dicts]["productDescription"]
tab_details.append(productDescription)
how_many = scrap3[index_list_with_dicts]["in_stock_how_many_available"]
tab_details.append(how_many)
about = scrap3[index_list_with_dicts]['detals_link_to_book']
tab_details.append(about)
upc = scrap3[index_list_with_dicts]["productInformation_UPC"]
tab_details.append(upc)
return tab_details
def printing_final_result(self, tab_details):
title_book = tab_details[0]
category = tab_details[1]
category = tab_details[1]
price = tab_details[2]
productDescription = tab_details[3]
in_stock_how_many_available = tab_details[4]
detals_link_to_book = tab_details[5]
productInformation_UPC = tab_details[6]
print('\n\t The book has a title: {}.Category is {}'.format(title_book, category))
print('\n\t Book Price:', price)
print('\n\t Content Description:', productDescription)
print('\n\t We still have {} item/s in stock'.format(in_stock_how_many_available))
print('\n\t If you want to know more about the book, please open the link:', detals_link_to_book)
print('\n\t UPC number:', productInformation_UPC)
# logic for conversation with User through Terminal
def logic(self):
answer1_user_if_play = input("Do you want to buy some interesting book? :) . Choose (n/y) \n")
if answer1_user_if_play == 'y':
print('--------')
print("\t Lets game :) ..... \n\t Below thematical book's Category for Your choose. \n")
#step one - choose category
sorted_category = self.sorted_thematica_category(scrap1)
print(sorted_category)
print('--------')
customer_choose_category_book = input(
'\t Please choose one and copy Your choice here ...\n\t (EXAMPLE:... Academic)\n\t (EXAMPLE:... Add a comment)\n\t YOUR TURN - Chose one Category from list : ...')
"""
while customer_choose_category_book not in sorted_category_list:
print('Please once again choose category. This one not exist in own base and list at top')
"""
if customer_choose_category_book in sorted_category:
how_books, title_books_this_choosen_category = self.show_all_books_ctagory(scrap2, customer_choose_category_book)
print('We have for You in shop {} book/books title for category {}'.format(how_books, customer_choose_category_book))
print(title_books_this_choosen_category)
else:
print('Please once again choose category. This one not exist in own base and list at top')
# step two - choose how user want to sort results and what want to see
self.printing_long_questions()
nr_choosen_method=int(input())
print(title_books_this_choosen_category)
print('--------')
lista_books_filter_by_user_mean=self.user_choose_filter_method(nr_choosen_method, title_books_this_choosen_category)
if len(lista_books_filter_by_user_mean)==1:
print('\t It is exactly one book in this category')
print('--------')
# any sens to choose book , if exist only one
# for example for catgeory crime - [{'Book_Stars': 1, 'Book_Title': 'The Long Shadow of Small Ghosts: Murder and Memory in an American City'}]
user_choose_single_title = lista_books_filter_by_user_mean[0]['Book_Title']
tab_inf = self.return_details(user_choose_single_title, scrap3)
#print(tab_inf)
self.printing_final_result(tab_inf)
else:
print('\t Also this is list for You')
print(lista_books_filter_by_user_mean)
# choose single title book from User input- purpose--> show for this book all details
user_choose_single_title = input('\t\n Now please, copy and paste the entire Title of the book here:...(EXAMPLE:... Feathers: Displays of Brilliant Plumage) ')
# use the scrap nr 3 with details
tab_inf=self.return_details(user_choose_single_title,scrap3)
print(tab_inf)
self.printing_final_result(tab_inf)
if answer1_user_if_play in ('n','n ','n ', 'NO', 'nie', 'N'):
print('Nice day any way.')
if __name__ == "__main__":
game = Game()
game.logic()
| 14,960 | 4,607 |
"""django_bookworm.books_and_chapters URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
from django.views.generic.base import RedirectView
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('books/', login_required(views.homepage), name='books'), # for adding a new book
path('books/search/', views.search_book, name='search_book'),
path('books/<slug:slug>/', login_required(views.get_book_details), name='book_detail'),
path('books/<int:pk>/delete/', login_required(views.delete_book), name='delete_single_book'),
path('books/<int:pk>/edit/', login_required(views.edit_book_details), name='book_details_edit'),
path('chapters/add/', views.add_chapter, name='add_chapter'),
path('chapters/<int:pk>/delete/', views.delete_chapter, name='delete_chapter'),
path('chapters/<int:pk>/edit/', views.edit_chapter, name='edit_chapter'),
path('', RedirectView.as_view(url='/accounts/login/', permanent=False))
]
| 1,592 | 506 |
from app import create_app
from flask_script import Manager,Server #initialise our extensions and server class that aid in launching of our server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server) #launch app server
@manager.command
def test():
"""Run the ubit tests"""
import unittest
tests = unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__': #checks if the script is run directly
manager.run()
| 573 | 164 |
"""
Copyright (C) 2020-2022 Benjamin Bokser
"""
import plots
import mpc_cvx
# import time
# import sys
import numpy as np
import copy
from scipy.linalg import expm
import itertools
np.set_printoptions(suppress=True, linewidth=np.nan)
def projection(p0, v):
# find point p projected onto ground plane from point p0 by vector v
z = 0
t = (z - p0[2]) / v[2]
x = p0[0] + t * v[0]
y = p0[1] + t * v[1]
p = np.array([x, y, z])
return p
class Runner:
def __init__(self, dims=2, ctrl='mpc', dt=1e-3):
self.dims = dims
self.ctrl = ctrl
self.dt = dt
self.total_run = 5000
self.tol = 1e-3 # desired mpc tolerance
self.m = 7.5 # mass of the robot, kg
self.N = 10 # mpc horizon length
self.g = 9.81 # gravitational acceleration, m/s2
self.t_p = 1 # gait period, seconds
self.phi_switch = 0.5 # switching phase, must be between 0 and 1. Percentage of gait spent in contact.
# for now, mpc sampling time is equal to gait period
self.mpc_dt = self.t_p * self.phi_switch # mpc sampling time
self.N_time = self.N*self.mpc_dt # mpc horizon time
if dims == 2:
self.n_x = 5 # number of states
self.n_u = 2 # number of controls
self.A = np.array([[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, -1],
[0, 0, 0, 0, 0]])
self.B = np.array([[0, 0],
[0, 0],
[1 / self.m, 0],
[0, 1 / self.m],
[0, 0]])
self.X_0 = np.zeros(self.n_x)
self.X_0[1] = 0.7
self.X_0[-1] = self.g # initial conditions
self.X_f = np.array([2, 0.5, 0, 0, self.g])
elif dims == 3:
self.n_x = 7 # number of states
self.n_u = 3 # number of controls
self.A = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0, 0, 0]])
self.B = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1 / self.m, 0, 0],
[0, 1 / self.m, 0],
[0, 0, 1 / self.m],
[0, 0, 0]])
self.X_0 = np.zeros(self.n_x)
self.X_0[2] = 0.7
self.X_0[-1] = self.g # initial conditions
self.X_f = np.hstack([2, 2, 0.5, 0, 0, 0, self.g]).T # desired final state
mu = 0.3 # coeff of friction
self.mpc = mpc_cvx.Mpc(t=self.mpc_dt, A=self.A, B=self.B, N=self.N, m=self.m, g=self.g, mu=mu)
self.mpc_factor = self.mpc_dt * 2 / self.dt # repeat mpc every x seconds
def run(self):
total = self.total_run + 1 # number of timesteps to plot
t = 0 # time
t0 = t # starting time
mpc_factor = self.mpc_factor # repeat mpc every x seconds
mpc_counter = copy.copy(mpc_factor)
X_traj = np.zeros((total, self.n_x))
X_traj[0, :] = self.X_0 # initial conditions
f_hist = np.zeros((total, self.n_u))
s_hist = np.zeros(total)
U_pred = np.zeros((self.N, self.n_u))
X_pred = np.zeros((self.N, self.n_x))
pf_ref = np.zeros(self.n_u)
j = int(self.mpc_factor)
X_pred_hist = np.zeros((self.N+1, self.n_u))
f_pred_hist = np.zeros((total, self.n_u))
p_pred_hist = np.zeros((total, self.n_u))
for k in range(0, self.total_run):
t = t + self.dt
s = self.gait_scheduler(t, t0)
if self.ctrl == 'mpc':
if mpc_counter == mpc_factor: # check if it's time to restart the mpc
mpc_counter = 0 # restart the mpc counter
X_ref = self.path_plan(X_in=X_traj[k, :])
X_refN = X_ref[::int(self.mpc_dt / self.dt)]
U_pred, X_pred = self.mpc.mpcontrol(X_in=X_traj[k, :], X_ref=X_refN)
p_pred = (X_pred[2, 0:3]+(X_pred[2, 0:3]+X_pred[3, 0:3])/2)/2 # next pred body pos over next ftstep
f_pred = U_pred[2, :] # next predicted foot force vector
p_pred_hist = np.vstack((p_pred_hist, p_pred))
f_pred_hist = np.vstack((f_pred_hist, 0.5*f_pred/np.sqrt(np.sum(f_pred**2))))
pf_ref = np.vstack((pf_ref, projection(p_pred, f_pred)))
X_pred_hist = np.dstack((X_pred_hist, X_pred[:, 0:self.n_u]))
mpc_counter += 1
f_hist[k, :] = U_pred[0, :]*s # take first timestep
else: # Open loop traj opt, this will fail if total != mpc_factor
if int(total/self.N) != mpc_factor:
print("ERROR: Incorrect settings", total/self.N, mpc_factor)
if k == 0:
X_ref = self.path_plan(X_in=X_traj[k, :])
X_refN = X_ref[::int(self.mpc_factor)] # self.traj_N(X_ref)
force_f, X_pred = self.mpc.mpcontrol(X_in=X_traj[k, :], X_ref=X_refN)
for i in range(0, self.N):
f_hist[int(i*j):int(i*j+j), :] = list(itertools.repeat(force_f[i, :], j))
s_hist[k] = s
X_traj[k+1, :] = self.rk4(xk=X_traj[k, :], uk=f_hist[k, :])
# X_traj[k + 1, :] = self.dynamics_dt(X=X_traj[k, :], U=f_hist[k, :], t=self.dt)
# print(X_traj[-1, :])
# print(f_hist[4500, :])
plots.fplot(total, p_hist=X_traj[:, 0:self.n_u], f_hist=f_hist, s_hist=s_hist, dims=self.dims)
plots.posplot(p_ref=self.X_f[0:self.n_u], p_hist=X_traj[:, 0:self.n_u], dims=self.dims)
plots.posfplot(p_ref=self.X_f[0:self.n_u], p_hist=X_traj[:, 0:self.n_u],
p_pred_hist=p_pred_hist, f_pred_hist=f_pred_hist, pf_hist=pf_ref, dims=self.dims)
# plots.posplot(p_ref=self.X_f[0:self.n_u], p_hist=X_pred_hist[:, 0:self.n_u, 1], dims=self.dims)
# plots.posplot_t(p_ref=self.X_ref[0:self.n_u], p_hist=X_traj[:, 0:2], total=total)
return None
def dynamics_ct(self, X, U):
# CT dynamics X -> dX
A = self.A
B = self.B
X_next = A @ X + B @ U
return X_next
def dynamics_dt(self, X, U, t):
n_x = self.n_x # number of states
n_u = self.n_u # number of controls
A = self.A
B = self.B
AB = np.vstack((np.hstack((A, B)), np.zeros((n_u, n_x + n_u))))
M = expm(AB * t)
Ad = M[0:n_x, 0:n_x]
Bd = M[0:n_x, n_x:n_x + n_u]
X_next = Ad @ X + Bd @ U
return X_next
def rk4(self, xk, uk):
# RK4 integrator solves for new X
dynamics = self.dynamics_ct
h = self.dt
f1 = dynamics(xk, uk)
f2 = dynamics(xk + 0.5 * h * f1, uk)
f3 = dynamics(xk + 0.5 * h * f2, uk)
f4 = dynamics(xk + h * f3, uk)
return xk + (h / 6.0) * (f1 + 2 * f2 + 2 * f3 + f4)
def gait_scheduler(self, t, t0):
phi = np.mod((t - t0) / self.t_p, 1)
if phi > self.phi_switch:
s = 0 # scheduled swing
else:
s = 1 # scheduled stance
return s
def path_plan(self, X_in):
# Path planner--generate reference trajectory
dt = self.dt
size_mpc = int(self.mpc_factor*self.N) # length of MPC horizon in s TODO: Perhaps N should vary wrt time?
t_ref = 0 # timesteps given to get to target, either mpc length or based on distance (whichever is smaller)
X_ref = None
if self.dims == 2:
t_ref = int(np.minimum(size_mpc, abs(self.X_f[0] - X_in[0])*1000)) # ignore z distance due to bouncing
X_ref = np.linspace(start=X_in, stop=self.X_f, num=t_ref) # interpolate positions
# interpolate velocities
X_ref[:-1, 2] = [(X_ref[i + 1, 0] - X_ref[i, 0]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
X_ref[:-1, 3] = [(X_ref[i + 1, 1] - X_ref[i, 1]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
elif self.dims == 3:
t_ref = int(np.minimum(size_mpc, np.linalg.norm(self.X_f[0:2] - X_in[0:2]) * 1000))
X_ref = np.linspace(start=X_in, stop=self.X_f, num=t_ref) # interpolate positions
# interpolate velocities
X_ref[:-1, 3] = [(X_ref[i + 1, 0] - X_ref[i, 0]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
X_ref[:-1, 4] = [(X_ref[i + 1, 1] - X_ref[i, 1]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
X_ref[:-1, 5] = [(X_ref[i + 1, 2] - X_ref[i, 2]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
if (size_mpc - t_ref) == 0:
pass
elif t_ref == 0:
X_ref = np.array(list(itertools.repeat(self.X_f, int(size_mpc))))
else:
X_ref = np.vstack((X_ref, list(itertools.repeat(self.X_f, int(size_mpc - t_ref)))))
return X_ref
| 9,384 | 3,865 |
from sklearn.neighbors import NearestNeighbors
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def check_value(inds, val):
''' Check to see if an array is a single element equaling a particular value
for pre-processing inputs in a function '''
if(np.array(inds).size==1):
if(inds==val):
return True
return False
def na(): # shorthand for new axis
return np.newaxis
def flatten_nd_array(pts_nd,axis=1):
''' Flatten an nd array into a 2d array with a certain axis
INPUTS
pts_nd N0xN1x...xNd array
axis integer
OUTPUTS
pts_flt prod(N \ N_axis) x N_axis array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
pts_flt = pts_nd.transpose((axorder))
pts_flt = pts_flt.reshape(NPTS,SHP[axis])
return pts_flt
def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False):
''' Unflatten a 2d array with a certain axis
INPUTS
pts_flt prod(N \ N_axis) x M array
pts_nd N0xN1x...xNd array
axis integer
squeeze bool if true, M=1, squeeze it out
OUTPUTS
pts_out N0xN1x...xNd array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
if(squeeze):
axorder = nax
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
else:
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
NEW_SHP.append(M)
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
return pts_out
class NNEncode():
''' Encode points using NearestNeighbors search and Gaussian kernel '''
def __init__(self,NN,sigma,km_filepath='',cc=-1):
if(check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
self.NN = int(NN)
self.sigma = sigma
self.nbrs = NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(self.cc)
self.alreadyUsed = False
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False,sameBlock=True):
pts_flt = flatten_nd_array(pts_nd,axis=axis)
#pts_flt ---> [N*H*W, 2]
P = pts_flt.shape[0]
#P ---> N*H*W
if(sameBlock and self.alreadyUsed):
self.pts_enc_flt[...] = 0 # already pre-allocated
print('alreadyUsed')
print(self.p_inds)
else:
print('notUsed')
# print(self.p_inds)
self.alreadyUsed = True
self.pts_enc_flt = np.zeros((P,self.K))
#self.pts_enc_flt.shape ---> [N*H*W, 313]
self.p_inds = np.arange(0,P,dtype='int')[:,na()]
#self.p_inds.shape ---> [N*H*W, 1]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
#inds.shape ---> [N*H*W, NN]
wts = np.exp(-dists**2/(2*self.sigma**2))
wts = wts/np.sum(wts,axis=1)[:,na()]
#wts.shape ---> [N*H*W, NN]
#将输入的 feature map(ab 值)与调色板 bin 中最近的 NN(此处取 10) 个距离值赋值到 pts_enc_flt 中,然后展开成 4d 形式返回。
self.pts_enc_flt[self.p_inds,inds] = wts
pts_enc_nd = unflatten_2d_array(self.pts_enc_flt,pts_nd,axis=axis)
#pts_enc_nd.shape -----> [N, 313, H, W]
return pts_enc_nd
def decode_points_mtx_nd(self,pts_enc_nd,axis=1):
pts_enc_flt = flatten_nd_array(pts_enc_nd,axis=axis)
pts_dec_flt = np.dot(pts_enc_flt,self.cc)
pts_dec_nd = unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis)
return pts_dec_nd
def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False):
pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis)
pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis)
if(returnEncode):
return (pts_dec_nd,pts_1hot_nd)
else:
return pts_dec_nd
# self.cretion( output, torch.max(target, 1)[1] )
nnenc = NNEncode(10,5,km_filepath='/home/chuchienshu/Documents/propagation_classification/models/custom_layers/pts_in_hull.npy')
bottom = np.random.randint(0,10,(2,3,3,3)).astype('float32')
# print(bottom)
bt = Variable(torch.from_numpy(bottom).cuda())
fac = np.array([[1,2],[3,4],[5,6]])
fac_a = fac[:,0][np.newaxis,:,np.newaxis,np.newaxis]
fac_b = fac[:,1][np.newaxis,:,np.newaxis,np.newaxis]
pred_ab = np.concatenate((np.sum(bottom * fac_a, axis=1, keepdims=True), np.sum(bottom * fac_b, axis=1, keepdims=True)), axis=1)
# print(fac_a,fac_a.shape)
# print(fac_b,fac_b.shape)
# print(bottom * fac_a, ' jfdis')
# print(bottom * fac_b, ' fac_b')
# print(np.sum(bottom * fac_a, axis=1, keepdims=True), ' 44')
# print(np.sum(bottom * fac_b, axis=1, keepdims=True), ' 66')
print(pred_ab, pred_ab.shape)
for i, im in enumerate(pred_ab):
print(im)
print(i)
exit()
# bt = flatten_nd_array(bt.data.numpy())
##bt = bt.permute(0,2,3,1).contiguous().view(50, -1)
#/////////////////////////////////////////////////////////
bottom = np.random.randint(0,10,(8,2,5,5)).astype('float32')
print(bottom)
nnenc.encode_points_mtx_nd(bottom,axis=1)
for _ in range(6):
print('fjkfd')
print(nnenc.cc
)
print('############')
exit()
#/////////////////////////////////////////////////////////
import matplotlib.pyplot as plt
n = 1024
# x = np.random.normal(0, 1, n) # 平均值为0,方差为1,生成1024个数
# y = np.random.normal(0, 1, n)
x = X[:,0]
y = X[:,1]
t = np.arctan2(x, y) # for color value,对应cmap
plt.scatter(x, y, s=65, c=t, alpha=0.5) # s为size,按每个点的坐标绘制,alpha为透明度
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xticks([])
plt.yticks([])
plt.show() | 6,122 | 2,619 |
from setuptools import setup, find_packages
import endktheme
setup(
name="endktheme",
description="Visualization themes following Energinet's design guide.",
version=endktheme.__version__,
author="Simon J. Larsen",
author_email="simonhffh@gmail.com",
license="MIT",
packages=find_packages(),
install_requires=[],
)
| 348 | 109 |
import os
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.files.storage import default_storage
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from async_downloads.cache import get_collection_key
from async_downloads.settings import DOWNLOAD_TEMPLATE
@login_required
def ajax_update(request):
# TODO: can we make `request.user.pk` more generic to allow other
# things to be used as keys?
download_keys = cache.get(get_collection_key(request.user.pk), [])
downloads = []
in_progress = False
for i, download_key in enumerate(download_keys):
dl = cache.get(download_key)
if not dl:
continue
if dl["complete"]:
dl["url"] = default_storage.url(dl["filepath"])
else:
in_progress = True
downloads.append(dl)
# TODO: split up complete and in progress async_downloads?
return JsonResponse(
{
"html": render_to_string(DOWNLOAD_TEMPLATE, {"downloads": downloads}),
"downloads": downloads,
"in_progress": in_progress,
}
)
@login_required
def ajax_clear_download(request):
# TODO: consider just clearing the key without deleting,
# so that all deletion is done by one function
filepath = request.POST.get("filepath")
directory = os.path.split(filepath)[0]
download_key = os.path.split(directory)[1]
cache.delete(download_key)
default_storage.delete(filepath)
default_storage.delete(directory)
return HttpResponse("")
| 1,630 | 473 |
import numpy as np
import torch
class DropDay:
def __init__(self, event_time_name='event_time'):
self.event_time_name = event_time_name
def __call__(self, x):
mask = self.get_perm_ix(x[self.event_time_name])
new_x = {k: v[mask] for k, v in x.items()}
return new_x
@staticmethod
def get_perm_ix(event_time):
days = torch.unique(event_time, sorted=True)
ix = np.random.choice(len(days), 1)[0]
mask = event_time != days[ix]
return mask
| 517 | 183 |
#!usr/bin/env python
# coding=utf-8
# Created by zhezhiyong@163.com on 2016/11/17.
from flask import jsonify
from . import api
from .. import mongodb
@api.route('/books/')
def get_books():
# id = mongodb.db.book.insert({'test':1})
books = mongodb.db.Book.find({})
for book in books:
print book
return jsonify({
'code': str(id)
})
| 371 | 142 |
from utils import *
Clean()
HgUpdate21()
PatchAll()
Build_VC11Express_64()
OptimusForceIntel()
RunAll()
OptimusForceNVIDIA()
RunAll()
Clean()
HgUpdate33()
PatchAll()
Build_VC11Express_64()
OptimusForceNVIDIA()
RunAll()
Clean()
HgUpdate33()
| 270 | 127 |
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport
class PactAdminReport(GenericTabularReport, CustomProjectReport):
fields = ['corehq.apps.reports.filters.dates.DatespanFilter']
name = "PACT Admin Reports"
slug = "pactadmin"
emailable = True
exportable = True
report_template_path = "pact/admin/pactadmin_reports.html"
@property
def report_context(self):
ret = {"foo": "bar"}
return ret
| 514 | 164 |
# global imports
import unittest
import numpy as np
from future.builtins import range
# local imports
import correlation_toolbox.helper as cthlp
class TestHelper(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
self.rate = 30. # (Hz)
self.T = 3e4 # (ms)
self.N = 100
self.p = 0.6 # percentage of neurons active
self.Neff = int(self.p * self.N)
self.cc = 0.3
self.tbin = 1. # (ms)
def test_create_poisson_spiketrains(self):
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.N)
self.assertEqual(self.N, len(np.unique(sp[:, 0]))) # N
self.assertTrue(self.T >= np.max(sp[:, 1])) # T
emp_rate = 1. * len(sp) / self.T * 1e3 / self.N
self.assertTrue(abs(self.rate - emp_rate) < 1e0) # rate
def test_sort_gdf_by_id(self):
# create N-5 poisson instead of N, creates empty arrays in sp_srt
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
self.assertEqual(self.N, len(sp_ids)) # N
self.assertTrue(self.T >= np.max([np.max(x)
for x in sp_srt if len(x) > 0])) # T
for i in range(self.N):
emp_rate = 1. * len(sp_srt[i]) / self.T * 1e3
assert(emp_rate >= 0.)
if emp_rate > 0.:
self.assertTrue(abs(self.rate - emp_rate) < 1e1) # rate
self.assertTrue(min(np.diff(sp_srt[i])) > 0.) # time ordering
def test_instantaneous_spike_count(self):
# create N-5 poisson instead of N, creates empty arrays in sp_srt
# to test binning for empty spiketrains
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
# test whether binning produces correct results
sp_srt = np.array([[1., 2., 5., 7.], [4., 6., 9.]])
# ground truth
bsp_true = np.array(
[[1, 1, 0, 0, 1, 0, 1, 0], [0, 0, 0, 1, 0, 1, 0, 1]])
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
self.assertTrue(len(bins) == len(bsp[0])) # number of bins
self.assertEqual(2, len(bsp)) # number of binned spike trains
self.assertEqual(np.sum(bsp_true - bsp), 0.) # histogram
def test_create_correlated_spiketrains_sip(self):
# create N-5 poisson instead of N, changes correlation
sp = cthlp.create_correlated_spiketrains_sip(
self.rate, self.T, self.Neff, self.cc)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
emp_rate = 1. * np.sum(bsp) / self.T * 1e3 / self.N
self.assertTrue(abs(self.p * self.rate - emp_rate) < 5e-1) # rate
self.assertEqual(self.N, len(bsp)) # N
self.assertTrue(self.T >= np.max(bins)) # T
emp_cc = np.corrcoef(cthlp.strip_binned_spiketrains(bsp))
emp_a_cc = []
for i in range(self.Neff):
for j in range(self.Neff):
if i != j:
emp_a_cc.append(emp_cc[i, j])
emp_mu_cc = 1. / (self.N * (self.N - 1.)) * np.sum(emp_a_cc)
# correlation coefficient
self.assertTrue(abs(self.p ** 2 * self.cc - emp_mu_cc) < 2e-2)
def test_centralize(self):
v1 = np.random.normal(-50, 2, int(self.T * 1e1))
v2 = np.random.normal(-30, 2, int(self.T * 1e1))
v_cen_time = cthlp.centralize([v1, v2], time=True)
for v in v_cen_time:
self.assertTrue(abs(np.mean(v)) < 1e-12)
v_cen_units = cthlp.centralize([v1, v2], units=True)
for v in v_cen_units.T:
self.assertTrue(abs(np.mean(v)) < 1e-12)
v_cen_timeunits = cthlp.centralize([v1, v2], time=True, units=True)
self.assertTrue(abs(np.mean(v_cen_timeunits)) < 1e-12)
def test_strip_sorted_spiketrains(self):
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
self.assertEqual(self.N, len(sp_srt))
sp_srt = cthlp.strip_sorted_spiketrains(sp_srt)
self.assertEqual(self.Neff, len(sp_srt))
def test_strip_binned_spiketrains(self):
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
self.assertEqual(self.N, len(bsp))
bsp = cthlp.strip_binned_spiketrains(bsp)
self.assertEqual(self.Neff, len(bsp))
if __name__ == '__main__':
unittest.main()
| 4,807 | 1,966 |
import os
import shutil
print(os.listdir('/')) | 47 | 17 |
'''
NAME
Tarea_4.py
VERSION
1.0
AUTHOR
Victor Jesus Enriquez Castro <victorec@lcg.unam.mx>
DESCRIPTION
Empleando Entrez.einfo y ENtrez.read el programa imprime la descripcion
de los campos FieldList y LinkList en la base de datos protein, de la misma
manera dadas palabras claves de busqueda se obtienen los IDs de los articulos
que coinciden con los criterios de busqueda en la base de datos pubmed
CATEGORY
Data Base
INPUT
Este programa unicamente recibe como inputs las palabras clave para la
busqueda de los articulos en la base de datos pubmed
EXAMPLES
Input:
Ingrese el termino con el que desea realizar su busqueda: ludosky ma
Ingrese el campo del termino ingresado: AUTH
Ingrese el termino con el que desea realizar su busqueda: electrocyte
Ingrese el campo del termino ingresado: Title
Ingrese el termino con el que desea realizar su busqueda: Baumannii
Ingrese el campo del termino ingresado: Title
Output:
ECNO -> Description:
EC number for enzyme or CAS registry number
protein_protein_small_genome -> Description:
All proteins from this genome
El archivo con los IDs de su busqueda se encuentra en: ../files/ bajo el nombre IDs.txt
GITHUB
https://github.com/JESUS-2120/Python_2/blob/main/Tareas/Tarea_4.py
'''
#Importamos las librerias necesarias
from Bio import Entrez
from pprint import pprint
#Ingresamos un correo electronico
Entrez.email = "victorec@lcg.unam.mx"
#TAREA 1
#Indicamos la base de datos de interes
handle = Entrez.einfo(db = "protein")
record = Entrez.read(handle)
#Obtenemos la descripcion para cada uno de los campos solicitados
for i in range(0,len(record["DbInfo"]["FieldList"])):
if record["DbInfo"]["FieldList"][i]["Name"] == "ECNO":
print(record["DbInfo"]["FieldList"][i]["Name"],"->","Description:")
print(record["DbInfo"]["FieldList"][i]["Description"])
print("\n")
for i in range(0,len(record["DbInfo"]["LinkList"])):
if record["DbInfo"]["LinkList"][i]["Name"] == "protein_protein_small_genome":
print(record["DbInfo"]["LinkList"][i]["Name"],"->","Description:")
print(record["DbInfo"]["LinkList"][i]["Description"])
print("\n")
#TAREA 2
print("Bienvenido al buscador automatico\nSi desea usar el formato ya existente ingrese (1) si desea ingresar su propio string ingrese (2): ")
opc = int(input())
if (opc < 1 or opc > 2):
opc = int(input("Ingrese un numero valido: "))
if opc == 1:
print("Considerando como ejemplo\ntermino = 'ludosky ma[AUTH] AND (electrocyte[Title] OR Baumannii[Title])\ningrese los campos con los que desea realizar su busqueda")
#Creamos la lista palabras que utilizaremos para guardar las palabras de busqueda
palabras = ["","","","","",""]
#Pedimos al usuario las palabras de busqueda
for i in range(3):
palabras[i] = str(input("Ingrese el termino con el que desea realizar su busqueda: "))
palabras[i + 3] = str(input("Ingrese el campo del termino ingresado: "))
#Concatenamos todo en un string que nos permita concretar la busqueda
termino = palabras[0] + "[" + palabras[3] + "]" + " AND (" + palabras[1] + "[" + palabras[4] + "] OR " + palabras[2] + "[" + palabras[5] + "])"
if opc == 2:
termino = input("Ingrese su string de busqueda: ")
#Buscamos en la base de datos
handle = Entrez.esearch(db="pubmed", term= termino)
record = Entrez.read(handle)
#Creamos el archivo IDs
IDS = open("../files/IDs.txt","w")
IDS.write("Los IDs de su busqueda son: \n")
#Escribimos los IDs en el archivo que creamos
for rec in record["IdList"]:
IDS.write(">" + rec + "\n")
print("El archivo con los IDs de su busqueda se encuentra en: ../files/ bajo el nombre IDs.txt")
| 3,783 | 1,364 |
import os,sys,inspect
from HDPython.base import *
from HDPython.v_symbol import v_symbol
from HDPython.primitive_type_converter import get_primitive_hdl_converter
from HDPython.lib_enums import varSig, InOut_t
class v_enum(HDPython_base):
def __init__(self,EnumIn,EnumVal=None,name=None, Inout = InOut_t.Internal_t,includes="",value=None,varSigConst=varSig.variable_t):
super().__init__()
self.__hdl_converter__ =get_primitive_hdl_converter("v_enum" )()
if type(EnumIn).__name__ == "EnumMeta":
Enumtype = EnumIn
elif type(type(EnumIn)).__name__ == "EnumMeta":
Enumtype = type(EnumIn)
EnumVal = EnumIn
if EnumVal == None:
EnumVal = Enumtype(0)
if name == None:
name = Enumtype.__name__
self.symbol = v_symbol(name,EnumVal.value,Inout=Inout,includes=includes,value=EnumVal.value,varSigConst=varSigConst )
self._type = Enumtype
self.name = name
self.__hdl_name__ = None
self._Inout = Inout
self._varSigConst = varSigConst
def __lshift__(self, rhs):
if isinstance(rhs,type(self)):
self.symbol << rhs.symbol
return
if isinstance(rhs,self._type):
self.symbol << value(rhs)
return
raise Exception("[ENUM] Unable tp connect different types", self, rhs)
def _sim_get_new_storage(self):
self.symbol._sim_get_new_storage()
def set_simulation_param(self,module, name,writer):
self.symbol.set_simulation_param(module, name, writer)
def __repr__(self):
ret = str(self._type(value(self.symbol)).name) +": "+ str(value(self.symbol))
return ret
def setInout(self,Inout):
self.symbol.setInout(Inout)
def set_varSigConst(self, varSigConst):
self._varSigConst=varSigConst
self.symbol.set_varSigConst(varSigConst)
def isVarSigType(self, varSigType):
return self.symbol.isVarSigType( varSigType)
def _sim_get_value(self):
return value(self.symbol)
def __eq__(self,rhs):
return value(self) == value(rhs)
def set_vhdl_name(self,name, Overwrite = False):
if self.__hdl_name__ and self.__hdl_name__ != name and Overwrite == False:
raise Exception("double Conversion to vhdl")
else:
self.__hdl_name__ = name
def isInOutType(self, Inout):
if Inout==None or self._Inout == Inout:
return True
elif self._Inout== InOut_t.Master_t:
mem = self.getMember(Inout)
return len(mem) > 0
elif self._Inout == InOut_t.Slave_t:
if Inout == InOut_t.Master_t:
Inout = InOut_t.Slave_t
elif Inout == InOut_t.Slave_t:
Inout = InOut_t.Master_t
elif Inout == InOut_t.input_t:
Inout = InOut_t.output_t
elif Inout == InOut_t.output_t:
Inout = InOut_t.input_t
mem = self.getMember(Inout)
return len(mem) > 0
def __str__(self):
if self.__hdl_name__:
return self.__hdl_name__
return self._type(value(self.symbol)).name
def _issubclass_(self,test):
if super()._issubclass_(test):
return True
return "v_enum" == test
| 3,464 | 1,155 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pip
from setuptools import setup, find_packages
import fabrik
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
package_exclude = ("tests*", "examples*")
packages = find_packages(exclude=package_exclude)
# Convert markdown to rst
try:
from pypandoc import convert
long_description = convert("README.md", "rst")
except:
long_description = ""
setup(
name="fabrik",
version=fabrik.__version__,
description="A simple to use deployment toolkit built on top of Fabric",
long_description=long_description,
author="Fröjd",
author_email="martin.sandstrom@frojd.se",
url="https://github.com/frojd/fabrik",
packages=packages,
include_package_data=True,
install_requires=[
'Fabric==1.12.0',
'Unipath==1.1',
'PyCrypto==2.6.1',
'jinja2==2.8',
'click>=5.0',
'GitPython==1.0.1',
],
tests_require=[
'Fabric==1.12.0',
'Unipath==1.1',
'PyCrypto==2.6.1',
'jinja2==2.8',
'click>=5.0',
'GitPython==1.0.1',
],
entry_points={
"console_scripts": [
"fabrik = fabrik.scripts.fabrik:main",
"fabrik_start = fabrik.cli.scripts.init:main",
"fabrik_cleanup = fabrik.cli.scripts.cleanup:main",
]
},
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Software Distribution",
"Topic :: System :: Systems Administration",
],
)
| 2,146 | 694 |
foo # comment
| 14 | 5 |
import codecs
import csv
import filecmp
import os
import time
import click
import pyimgur
import requests
from zipfile import ZipFile, ZIP_DEFLATED
from hacktools import common, wii
version = "1.5.5"
isofile = "data/disc.iso"
infolder = "data/extract/"
outfolder = "data/repack/"
replacefolder = "data/replace/"
fontin = "data/font_input.txt"
fontout = "data/font_output.txt"
fontimgout = "data/extract_FNT/font_jp.png"
fontimgin = "data/work_FNT/font_jp.png"
fontfile = "data/extract/DATA/files/resfont/font_jp.brfnt"
dolin = "data/extract/DATA/sys/main.dol"
dolout = "data/repack/DATA/sys/main.dol"
patchin = "data/extract/DATA/files/"
patchout = "data/repack/DATA/files/"
patchfolder = "data/patch/monopri/"
xmlfile = "data/patch/riivolution/monopri.xml"
@common.cli.command()
@click.option("--iso", is_flag=True, default=False)
@click.option("--msbe", is_flag=True, default=False)
@click.option("--movie", is_flag=True, default=False)
@click.option("--tpl", is_flag=True, default=False)
@click.option("--fnt", is_flag=True, default=False)
@click.option("--speaker", is_flag=True, default=False)
@click.option("--merge", is_flag=True, default=False)
def extract(iso, msbe, movie, tpl, fnt, speaker, merge):
all = not iso and not msbe and not movie and not fnt and not tpl
if all or iso:
wii.extractIso(isofile, infolder, outfolder)
if all or msbe:
import extract_msbe
extract_msbe.run(speaker, merge)
if all or movie:
import extract_movie
extract_movie.run()
if all or fnt:
wii.extractFontData(fontfile, fontout)
common.makeFolder("data/extract_FNT/")
wii.extractBRFNT(fontfile, fontimgout)
if all or tpl:
wii.extractARC("data/extract/DATA/files/3d/map/", "data/extract_3D/")
wii.extractARC("data/extract/DATA/files/effect/", "data/extract_EFF/")
wii.extractBREFT("data/extract_EFF", "data/extract_BREFT", "data/out_EFF")
wii.extractARC("data/extract/DATA/files/lytdemo/exp_data/", "data/extract_TPL/")
common.copyFolder("data/extract/DATA/files/textures/", "data/extract_TPL/textures/")
wii.extractTPL("data/extract_TPL/", "data/out_TPL/")
@common.cli.command()
@click.option("--no-patch", is_flag=True, default=False)
@click.option("--msbe", is_flag=True, default=False)
@click.option("--onlyquest", is_flag=True, default=False)
@click.option("--movie", is_flag=True, default=False)
@click.option("--tpl", is_flag=True, default=False)
@click.option("--fnt", is_flag=True, default=False)
def repack(no_patch, msbe, onlyquest, movie, tpl, fnt):
all = not msbe and not movie and not tpl and not fnt
if all or fnt:
common.logMessage("Repacking FNT from", "data/work_FNT", "...")
fontfilein = fontfile
if os.path.isfile(fontfile.replace("/extract/", "/replace/")):
fontfilein = fontfilein.replace("/extract/", "/replace/")
fontfileout = fontfile.replace("/extract/", "/repack/")
wii.repackFontData(fontfilein, fontfileout, fontin)
wii.repackBRFNT(fontfileout, fontimgin)
common.logMessage("Done!")
if all or fnt or msbe:
import repack_msbe
repack_msbe.run(onlyquest)
if all or fnt or movie:
import repack_movie
repack_movie.run()
if all or tpl:
import repack_tpl
repack_tpl.run()
if os.path.isdir(replacefolder):
common.mergeFolder(replacefolder, outfolder)
# Patch the main.dol file
common.copyFile(dolin, dolout)
with common.Stream(dolout, "rb+", False) as f:
# Set the movie subtitles X position to 0 since we're doing some manual centering
# Change "fsubs f28,f7,f8 to fsubs f28,f8,f8"
f.seek(0x8CF4) # 0x8000cfb4
f.writeUInt(0xef884028)
if not no_patch:
common.makeFolders(patchfolder)
common.makeFolder(patchfolder.replace("monopri/", "riivolution/"))
common.logMessage("Creating patch folder in", patchfolder, "...")
files = common.getFiles(patchin)
for file in common.showProgress(files):
if patchout + file == dolout:
continue
if not filecmp.cmp(patchin + file, patchout + file):
common.makeFolders(patchfolder + os.path.dirname(file))
common.copyFile(patchout + file, patchfolder + file)
with common.Stream(xmlfile, "w") as f:
f.writeLine('<wiidisc version="1">')
f.writeLine('\t<id game="RSEJGD"/>')
f.writeLine('\t<options>')
f.writeLine('\t\t<section name="Translation">')
f.writeLine('\t\t\t<option name="Translation Patch">')
f.writeLine('\t\t\t\t<choice name="Enabled">')
f.writeLine('\t\t\t\t\t<patch id="monoprifolder"/>')
f.writeLine('\t\t\t\t</choice>')
f.writeLine('\t\t\t</option>')
f.writeLine('\t\t</section>')
f.writeLine('\t</options>')
f.writeLine('\t<patch id="monoprifolder">')
f.writeLine('\t\t<folder external="/monopri" recursive="false"/>')
f.writeLine('\t\t<folder external="/monopri" disc="/"/>')
f.writeLine('\t\t<memory offset="0x8000cfb4" value="ef884028" original="ef874028" />')
f.writeLine('\t</patch>')
f.writeLine('</wiidisc>')
common.logMessage("Creating ZIP file ...")
with common.Stream("patcher.bat", "w") as f:
f.writeLine('del monopri_patched.iso')
f.writeLine('rmdir /s/q patch_temp')
f.writeLine('wit EXTRACT -o %1 patch_temp')
f.writeLine('xcopy patch\\monopri patch_temp\\DATA\\files /s/e/y/q')
f.writeLine('xcopy main.dol patch_temp\\DATA\\sys\\main.dol /y/q')
f.writeLine('wit COPY patch_temp monopri_patched.iso')
f.writeLine('rmdir /s/q patch_temp')
common.copyFile(dolout, "main.dol")
with ZipFile("data/patch.zip", "w", ZIP_DEFLATED) as zip:
for foldername, _, filenames in os.walk("data/patch"):
for filename in filenames:
filepath = os.path.join(foldername, filename)
zip.write(filepath, filepath[5:])
zip.write("main.dol")
zip.write("patcher.bat")
os.remove("patcher.bat")
os.remove("main.dol")
common.logMessage("Done!")
@common.cli.command()
@click.argument("clientid")
def generatepo(clientid):
tplfolder = "data/work_TPL"
tploriginal = "data/out_TPL"
files = common.getFiles(tplfolder)
im = pyimgur.Imgur(clientid)
with common.Stream("data/tpl.po", "w") as f:
for file in common.showProgress(files):
uploaded = False
while not uploaded:
try:
image = im.upload_image(tploriginal + file, title="file")
f.writeLine("#. " + image.link)
f.writeLine("msgid \"" + file.split("/")[2] + "\"")
f.writeLine("msgstr \"\"")
f.writeLine("")
uploaded = True
time.sleep(30)
except requests.HTTPError:
time.sleep(300)
common.logMessage("Done!")
@common.cli.command()
def dupe():
seen = {}
sections = common.getSections("data/msbe_input.txt")
for section in sections:
if section == "quest.bin":
continue
for line in sections[section]:
translation = sections[section][line][0]
if line not in seen:
seen[line] = [translation, section, 1]
else:
seen[line][2] += 1
if translation != seen[line][0]:
common.logMessage("{}: {}={} ({} @{})".format(section, line, translation, seen[line][0], seen[line][1]))
for line in seen:
if seen[line][2] > 2:
common.logMessage("Dupe", seen[line][2], line + "=")
def cleanSection(section):
for str in section:
newlist = []
for trans in section[str]:
if trans != "":
newlist.append(trans)
if len(newlist) == 0:
section[str] = [""]
else:
section[str] = newlist
return section
@common.cli.command()
def smartcat():
click.confirm("Importing Smartcat CSV will override the msbe_input.txt and movie_input.txt files, are you sure?", abort=True)
common.logMessage("Importing Smartcat CSV ...")
# Read the lines from the CSV files
infiles = ["data/msbe_output_rearranged.csv", "data/msbe_events.csv", "data/msbe_system.csv", "data/movie.csv"]
section = {}
commons = {}
current = ""
for file in infiles:
with open(file, newline="", encoding="utf-8") as csvfile:
rows = csv.reader(csvfile, delimiter=",", quotechar="\"")
for row in rows:
orig = row[0]
trans = row[1]
if orig == "ja" or ".png" in orig or "youtube.com" in orig or orig == "Table of Contents:" or orig == "!Images":
continue
if orig.startswith("("):
orig = orig.split(") ", 1)[1]
if orig != "":
if orig.startswith("!FILE:"):
current = orig.split(",")[0].replace("!FILE:", "")
section[current] = {}
elif current != "":
if orig in section[current]:
section[current][orig].append(trans)
else:
section[current][orig] = [trans]
if orig in commons:
commons[orig].append(trans)
else:
commons[orig] = [trans]
# Clean up empty lines that have translations somewhere else
commons = cleanSection(commons)
for name in section:
section[name] = cleanSection(section[name])
# Export everything to msbe_input following msbe_output for ordering
outputfiles = ["data/msbe_output.txt", "data/movie_output.txt"]
inputfiles = ["data/msbe_input.txt", "data/movie_input.txt"]
for i in range(len(outputfiles)):
with codecs.open(outputfiles[i], "r", "utf-8") as fin:
with codecs.open(inputfiles[i], "w", "utf-8") as f:
current = ""
for line in fin:
line = line.rstrip("\r\n").replace("\ufeff", "")
if line.startswith("!FILE:"):
current = line.replace("!FILE:", "")
if current not in section:
common.logWarning("Section", current, "not found")
current = ""
else:
f.write("!FILE:" + current + "\n")
elif current != "":
line = line.replace("=", "")
linestart = ""
if i == 1:
linesplit = line.split(":", 2)
linestart = linesplit[0] + ":" + linesplit[1] + ":"
line = linesplit[2]
sectionline = line
if line not in section[current]:
if line.strip(" ") in section[current] or line.strip(" ") in commons:
sectionline = line.strip(" ")
elif line.replace("<3D>", "=") in section[current] or line.replace("<3D>", "=") in commons:
sectionline = line.replace("<3D>", "=")
if sectionline not in section[current] and sectionline in commons:
section[current][sectionline] = commons[sectionline]
if sectionline in section[current]:
f.write(linestart + line + "=" + section[current][sectionline][0] + "\n")
if len(section[current][sectionline]) > 1:
section[current][sectionline].pop()
else:
f.write(linestart + line + "=\n")
common.logWarning("Line \"" + sectionline + "\" in section", current, "not found")
common.logMessage("Done!")
if __name__ == "__main__":
click.echo("MonoPriTranslation version " + version)
if not os.path.isdir("data"):
common.logError("data folder not found.")
quit()
common.runCLI(common.cli)
| 12,607 | 3,908 |
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Utilities for the genome coverage"""
import re
import ast
import os
import sys
from biokit.stats import mixture
from sequana.lazy import pandas as pd
from sequana.lazy import numpy as np
from sequana.lazy import pylab
from sequana import logger
from sequana.tools import gc_content, genbank_features_parser
from sequana.errors import SequanaException
from easydev import do_profile
__all__ = ["GenomeCov", "ChromosomeCov", "DoubleThresholds"]
class DoubleThresholds(object):
"""Simple structure to handle the double threshold for negative and
positive sides
Used yb GenomeCov and related classes.
::
dt = DoubleThresholds(-3,4,0.5,0.5)
This means the low threshold is -3 while the high threshold is 4. The two
following values must be between 0 and 1 and are used to define the value
of the double threshold set to half the value of the main threshold.
Internally, the main thresholds are stored in the low and high attributes.
The secondary thresholds are derived from the main thresholds and the
two ratios. The ratios are named ldtr and hdtr for low double threshold
ratio and high double threshold ration. The secondary thresholds are
denoted low2 and high2 are are update automatically if low, high, ldtr or
hdtr are changed.
"""
def __init__(self, low=-3, high=3, ldtr=0.5, hdtr=0.5):
assert ldtr>=0. and ldtr<=1.,\
"ldrt parameter (low double threshold ratio) must be in [0,1]"
assert hdtr>=0. and hdtr<=1.,\
"hdrt parameter (high double threshold ratio) must be in [0,1]"
assert low < 0, "low threshold must be negative"
assert high > 0, "high threshold must be positive"
self._ldtr = ldtr
self._hdtr = hdtr
self._high = high
self._low = low
def _get_ldtr(self):
return self._ldtr
def _set_ldtr(self, ldtr):
self._ldtr = ldtr
self._low2 = self._low * self._ldtr
ldtr = property(_get_ldtr, _set_ldtr)
def _get_hdtr(self):
return self._hdtr
def _set_hdtr(self, hdtr):
self._hdtr = hdtr
self._high2 = self._high * self._hdtr
hdtr = property(_get_hdtr, _set_hdtr)
def _get_low(self):
return self._low
def _set_low(self, value):
assert value < 0.
self._low = value
self._low2 = self._low * self._ldtr
low = property(_get_low, _set_low)
def _get_high(self):
return self._high
def _set_high(self, value):
assert value > 0.
self._high = value
self._high2 = self._high * self._ldtr
high = property(_get_high, _set_high)
def _get_low2(self):
return self._low * self._ldtr
low2 = property(_get_low2)
def _get_high2(self):
return self._high * self._hdtr
high2 = property(_get_high2)
def get_args(self):
return "%.2f,%.2f,%.2f,%.2f" % (self.low, self.high, self.ldtr,
self.hdtr)
def copy(self):
thresholds = DoubleThresholds(self.low, self.high,
self.ldtr, self.hdtr)
return thresholds
def __str__(self):
txt = "Low threshold: %s\n" % self.low
txt += "High threshold: %s\n" % self.high
txt += "double-low threshold: %s\n" % self.low2
txt += "double-high threshold: %s" % self.high2
return txt
class GenomeCov(object):
"""Create a list of dataframe to hold data from a BED file generated with
samtools depth.
This class can be used to plot the coverage resulting from a mapping, which
is stored in BED format. The BED file may contain several chromosomes.
There are handled independently and accessible as a list of
:class:`ChromosomeCov` instances.
Example:
.. plot::
:include-source:
from sequana import GenomeCov, sequana_data
filename = sequana_data('JB409847.bed')
reference = sequana_data("JB409847.fasta")
gencov = GenomeCov(filename)
gencov.compute_gc_content(reference)
gencov = GenomeCov(filename)
for chrom in gencov:
chrom.running_median(n=3001, circular=True)
chrom.compute_zscore()
chrom.plot_coverage()
gencov[0].plot_coverage()
Results are stored in a list of :class:`ChromosomeCov` named
:attr:`chr_list`.
"""
def __init__(self, input_filename, genbank_file=None,
low_threshold=-3, high_threshold=3, ldtr=0.5, hdtr=0.5):
""".. rubric:: constructor
:param str input_filename: the input data with results of a bedtools
genomecov run. This is just a 3-column file. The first column is a
string (chromosome), second column is the base postion and third
is the coverage.
:param str genbank_file: annotation file of your referenve.
:param float low_threshold: threshold used to identify under-covered
genomic region of interest (ROI). Must be negative
:param float high_threshold: threshold used to identify over-covered
genomic region of interest (ROI). Must be positive
:param float ldtr: fraction of the low_threshold to be used to define
the intermediate threshold in the double threshold method. Must be
between 0 and 1.
:param float rdtr: fraction of the low_threshold to be used to define
the intermediate threshold in the double threshold method. Must be
between 0 and 1.
"""
# Keep information if the genome is circular and the window size used
self._circular = None
self._feature_dict = None
self._gc_window_size = None
self._genbank_filename = None
self._window_size = None
# the user choice have the priorities over csv file
if genbank_file:
self.genbank_filename = genbank_file
# check is the input is a csv of a previous analysis
try:
self.chr_list = self._read_csv(input_filename)
except FileNotFoundError as e:
print("FileNotFound error({0}): {1}".format(e.errno, e.strerror))
sys.exit(1)
if not self.chr_list:
# read bed file
self.thresholds = DoubleThresholds(low_threshold, high_threshold,
ldtr, hdtr)
self.chr_list = self._read_bed(input_filename)
def __getitem__(self, index):
return self.chr_list[index]
def __iter__(self):
return self.chr_list.__iter__()
def __len__(self):
return len(self.chr_list)
def __eq__(self, other):
if len(self.chr_list) != len(other.chr_list):
return False
for a,b in zip(self.chr_list, other.chr_list):
if all(a.df['cov'] == b.df['cov']) is False:
return False
return True
def compute_coverage(self, window, circular=False, reference=None):
"""Compute GC content (if reference provided), running_median/zscore for each chromosome.
"""
if reference:
self.compute_gc_content(reference)
for c in self.chr_list:
c.running_median(window, circular)
c.compute_zscore()
@property
def circular(self):
""" Get the circularity of chromosome(s). It must be a boolean.
"""
return self._circular
@circular.setter
def circular(self, circular):
if isinstance(circular, bool):
self._circular = circular
else:
logger.error("TypeError: Circular must be a boolean. True if your "
"genome is circular and False if not.")
sys.exit(1)
@property
def feature_dict(self):
""" Get the features dictionary of the genbank.
"""
return self._feature_dict
@feature_dict.setter
def feature_dict(self, anything):
logger.error("AttributeError: You can't set attribute.\n"
"GenomeCov.feature_dict is set when"
"GenomeCov.genbank_filename is set.")
sys.exit(1)
@property
def gc_window_size(self):
""" Get or set the window size to compute the GC content.
"""
return self._gc_window_size
@gc_window_size.setter
def gc_window_size(self, n):
if n % 2 == 0:
logger.warning("Window size must be an odd number.")
self._gc_window_size = n + 1
logger.warning("{0} is incremented by 1".format(n))
else:
self._gc_window_size = n
@property
def genbank_filename(self):
""" Get or set the genbank filename to annotate ROI detected with
:meth:`ChromosomeCov.get_roi`. Changing the genbank filename will
configure the :attr:`GenomeCov.feature_dict`.
"""
return self._genbank_filename
@genbank_filename.setter
def genbank_filename(self, genbank_filename):
if os.path.isfile(genbank_filename):
self._genbank_filename = os.path.realpath(genbank_filename)
self._feature_dict = genbank_features_parser(
genbank_filename)
else:
logger.error("FileNotFoundError: The genbank file doesn't exist.")
sys.exit(1)
@property
def window_size(self):
""" Get or set the window size to compute the running median. Size
must be an interger.
"""
return self._window_size
@window_size.setter
def window_size(self, n):
if n % 2 == 0:
logger.warning("Window size must be an odd number.")
self._window_size = n + 1
logger.warning("{0} is incremented to {1}".format(
n, self._window_size))
else:
self._window_size = n
def _read_bed(self, input_filename):
""" Read bed generated by samtools depth tools and create
:class:'ChromosomeCov' list.
"""
df = pd.read_table(input_filename, header=None)
df = df.rename(columns={0: "chr", 1: "pos", 2: "cov", 3: "mapq0"})
chr_list = self._set_chr_list(df)
# Set the link to this instance in each chromosome
# useful if one wants to recompute GC content with different window
return chr_list
def _read_csv(self, input_filename):
""" Read csv generated by :class:'GenomeCov' and create
:class:'ChromosomeCov' list.
"""
# set regex to get important information about previous analysis
re_threshold = re.compile("thresholds:([\d,\.-]+)")
re_window_size = re.compile("\swindow_size:(\d+)")
re_circular = re.compile("circular:(\w+)")
re_gc_window_size = re.compile("gc_window_size:(\d+)")
re_genbank = re.compile("genbank:([\{0}\w\.\-]+)".format(os.sep))
re_chrom = re.compile("^# ([\w\-\.]+):")
re_gaussian = re.compile("(\[\{.+\}\])")
with open(input_filename, "r") as fp:
line = fp.readline()
# check if file was generated by sequana_coverage
if not line.startswith("# sequana_coverage"):
return None
# get thresholds
thresholds = re_threshold.findall(line)[0]
thresholds = [float(f) for f in thresholds.split(',')]
self.thresholds = DoubleThresholds(*thresholds)
# get window size
self.window_size = int(re_window_size.search(line).group(1))
# get circular
circular = re_circular.search(line).group(1)
self.circular = False if circular == "False" else True
# get gc_window_size
gc = re_gc_window_size.search(line)
if gc:
self.gc_window_size = int(gc.group(1))
# get genbank
gb = re_genbank.search(line)
if gb and not self.genbank_filename:
self.genbank_filename = gb.group(1)
# get gaussians for each chromosome
gaussians_dict = dict()
for line in fp:
chrom = re_chrom.search(line)
if chrom:
gaussians = re_gaussian.search(line)
gaussians = ast.literal_eval(gaussians.group(1))
gaussians_dict[chrom.group(1)] = gaussians
else:
break
df = pd.read_csv(fp, header=None, names=line.strip().split(","))
chr_list = self._set_chr_list(df)
# Add gaussians and range informations
for chrom in chr_list:
chrom.set_gaussians(gaussians_dict[chrom.chrom_name])
if self.circular:
chrom.range = [None, None]
else:
mid = int(self.window_size/2)
chrom.range = [mid, -mid]
chrom.mixture_fitting = mixture.EM(
chrom.df['scale'][chrom.range[0]:chrom.range[1]])
return chr_list
def _set_chr_list(self, df):
df = df.set_index("chr", drop=False)
return [ChromosomeCov(df.loc[key], self, self.thresholds) for key in
df.index.unique()]
def compute_gc_content(self, fasta_file, window_size=101, circular=False,
letters=['G', 'C', 'c', 'g']):
""" Compute GC content of genome sequence.
:param str fasta_file: fasta file name.
:param int window_size: size of the sliding window.
:param bool circular: if the genome is circular (like bacteria
chromosome)
Store the results in the :attr:`ChromosomeCov.df` attribute (dataframe)
with a column named *gc*.
"""
self.gc_window_size = window_size
self.circular = circular
gc_dict = gc_content(fasta_file, self.gc_window_size, circular,
letters=letters)
for chrom in self.chr_list:
if chrom.chrom_name in gc_dict.keys():
chrom.df["gc"] = gc_dict[chrom.chrom_name]
else:
msg = ("The chromosome (or contig) %s in your"
" BED/BAM file was not found in the reference provided."
" Make sure your input reference file is the same"
" as the one used to perform the mapping or just"
" remove the --reference parameter.")
raise SequanaException(msg % chrom.chrom_name)
def get_stats(self, output="json"):
"""Return basic statistics for each chromosome
:return: dictionary with chromosome names as keys
and statistics as values.
.. seealso:: :class:`ChromosomeCov`.
"""
stats = {}
for chrom in self.chr_list:
stats[chrom.chrom_name] = chrom.get_stats(output=output)
return stats
def hist(self, logx=True, logy=True, fignum=1, N=20, lw=2, **kwargs):
for chrom in self.chr_list:
chrom.plot_hist_coverage(logx=logx, logy=logy, fignum=fignum, N=N,
histtype='step', hold=True, lw=lw, **kwargs)
pylab.legend()
def to_csv(self, output_filename, **kwargs):
""" Write all data in a csv.
:param str output_filename: csv output file name.
:param **dict kwargs: parameters of :meth:`pandas.DataFrame.to_csv`.
"""
# Concatenate all df
df_list = [chrom.get_df() for chrom in self.chr_list]
df = pd.concat(df_list)
header = ("# sequana_coverage thresholds:{0} window_size:{1} circular:"
"{2}".format(self.thresholds.get_args(), self.window_size,
self.circular))
if self.genbank_filename:
header += ' genbank:' + self.genbank_filename
if self.gc_window_size:
header += ' gc_window_size:{0}'.format(self.gc_window_size)
with open(output_filename, "w") as fp:
print(header, file=fp)
for chrom in self.chr_list:
print("# {0}".format(chrom.get_gaussians()), file=fp)
df.to_csv(fp, **kwargs)
class ChromosomeCov(object):
"""Factory to manipulate coverage and extract region of interests.
Example:
.. plot::
:include-source:
from sequana import GenomeCov, sequana_data
filename = sequana_data("virus.bed")
gencov = GenomeCov(filename)
chrcov = gencov[0]
chrcov.running_median(n=3001)
chrcov.compute_zscore()
chrcov.plot_coverage()
df = chrcov.get_roi().get_high_roi()
The *df* variable contains a dataframe with high region of interests (over
covered)
.. seealso:: sequana_coverage standalone application
"""
def __init__(self, df, genomecov, thresholds=None):
""".. rubric:: constructor
:param df: dataframe with position for a chromosome used within
:class:`GenomeCov`. Must contain the following columns:
["chr", "pos", "cov"]
:param thresholds: a data structure :class:`DoubleThresholds` that holds
the double threshold values.
"""
self._bed = genomecov
self.df = df.set_index("pos", drop=False)
self.chrom_name = str(df["chr"].iloc[0])
try:
self.thresholds = thresholds.copy()
except:
self.thresholds = DoubleThresholds()
def __str__(self):
stats = self.get_stats(output="dataframe")
stats.set_index("name", inplace=True)
def _getter(data, key):
return data.ix[key].Value
txt = "\nGenome length: %s" % int(len(self.df))
txt += "\nSequencing depth (DOC): %8.2f " % _getter(stats,'DOC')
txt += "\nSequencing depth (median): %8.2f " % _getter(stats, 'Median')
txt += "\nBreadth of coverage (BOC) (percent): %.2f " % _getter(
stats, 'BOC')
txt += "\nGenome coverage standard deviation : %8.2f " % _getter(
stats,'STD')
txt += "\nGenome coverage coefficient variation : %8.2f " % _getter(
stats,'CV')
return txt
def __len__(self):
return self.df.__len__()
@property
def bed(self):
return self._bed
@bed.setter
def bed(self):
logger.error("AttributeError: You can't set the ChromosomeCov.bed. "
"Setting is done automatically when the class is "
"created.")
def columns(self):
""" Return immutable ndarray implementing an ordered, sliceable set.
"""
return self.df.columns
def get_df(self):
return self.df.set_index("chr", drop=True)
def get_size(self):
return self.__len__()
def get_mean_cov(self):
return self.df["cov"].mean()
def get_var_coef(self):
return np.sqrt(self.df["cov"].var()) / self.get_mean_cov()
def get_gaussians(self):
return "{0}: {1}".format(self.chrom_name, self.gaussians_params)
def set_gaussians(self, gaussians):
""" Set gaussians predicted if you read a csv file generated by
:class:`GenomeCov`.
"""
self.gaussians_params = gaussians
self.best_gaussian = self._get_best_gaussian()
def moving_average(self, n, circular=False):
"""Compute moving average of the genome coverage
:param n: window's size. Must be odd
:param bool circular: is the chromosome circular or not
Store the results in the :attr:`df` attribute (dataframe) with a
column named *ma*.
"""
N = len(self.df['cov'])
assert n < N/2
from sequana.stats import moving_average
ret = np.cumsum(np.array(self.df["cov"]), dtype=float)
ret[n:] = ret[n:] - ret[:-n]
ma = ret[n - 1:] / n
mid = int(n / 2)
self.df["ma"] = pd.Series(ma, index=np.arange(start=mid,
stop=(len(ma) + mid)))
if circular:
# FIXME: shift of +-1 as compared to non circular case...
# shift the data and compute the moving average
self.data = list(self.df['cov'].values[N-n:]) +\
list(self.df['cov'].values) + \
list(self.df['cov'].values[0:n])
ma = moving_average(self.data, n)
self.ma = ma[n//2+1:-n//2]
self.df["ma"] = pd.Series(self.ma, index=self.df['cov'].index)
def running_median(self, n, circular=False):
"""Compute running median of genome coverage
:param int n: window's size.
:param bool circular: if a mapping is circular (e.g. bacteria
whole genome sequencing), set to True
Store the results in the :attr:`df` attribute (dataframe) with a
column named *rm*.
.. versionchanged:: 0.1.21
Use Pandas rolling function to speed up computation.
"""
self.bed.window_size = n
self.bed.circular = circular
# in py2/py3 the division (integer or not) has no impact
mid = int(n / 2)
self.range = [None, None]
try:
if circular:
# BASED on running_median pure implementation, could be much
# slower than pure pandas rolling function. Keep those 4 lines
# for book keeping though.
#cover = list(self.df["cov"])
#cover = cover[-mid:] + cover + cover[:mid]
#rm = running_median.RunningMedian(cover, n).run()
#self.df["rm"] = rm[mid:-mid]
rm = pd.concat([self.df['cov'][-mid:],
self.df['cov'],
self.df['cov'][:mid]]).rolling(
n, center=True).median()
self.df["rm"] = rm[mid:-mid]
else:
rm = self.df['cov'].rolling(n, center=True).median()
# Like in RunningMedian, we copy the NAN with real data
rm[0:mid] = self.df['cov'][0:mid]
rm[-mid:] = self.df['cov'][-mid:]
#rm = running_median.RunningMedian(cover, n).run()
self.df["rm"] = rm
# set up slice for gaussian prediction
self.range = [mid, -mid]
except:
self.df["rm"] = self.df["cov"]
def get_evenness(self):
"""Return Evenness of the coverage
:Reference: Konrad Oexle, Journal of Human Genetics 2016, Evaulation
of the evenness score in NGS.
work before or after normalisation but lead to different results.
"""
from sequana.stats import evenness
return evenness(self.df['cov'])
def get_cv(self):
"""Return the coefficient variation
The coefficient of variation (CV) is defined as sigma / mu
To get percentage, you must multiply by 100.
"""
sigma = self.df['cov'].std()
mu = self.df['cov'].mean()
return sigma/mu
def _coverage_scaling(self):
"""Normalize data with moving average of coverage
Store the results in the :attr:`df` attribute (dataframe) with a
column named *scale*.
.. note:: Needs to call :meth:`running_median`
"""
if "rm" not in self.df.columns:
txt = "Column rm (running median) is missing.\n" + self.__doc__
print(txt)
raise KeyError
else:
self.df["scale"] = self.df["cov"] / self.df["rm"]
self.df = self.df.replace(np.inf, np.nan)
self.df = self.df.replace(-np.inf, np.nan)
def _get_best_gaussian(self):
results_pis = [model["pi"] for model in self.gaussians_params]
indice = np.argmax(results_pis)
return self.gaussians_params[indice]
def compute_zscore(self, k=2, step=10, use_em=True, verbose=True):
""" Compute zscore of coverage and normalized coverage.
:param int k: Number gaussian predicted in mixture (default = 2)
:param int step: (default = 10). This parameter is used to speed
up computation and is ignored if the length of the coverage/sequence
is below 100,000
Store the results in the :attr:`df` attribute (dataframe) with a
column named *zscore*.
.. note:: needs to call :meth:`running_median` before hand.
"""
# here for lazy import
from biokit.stats import mixture
# normalize coverage
self._coverage_scaling()
data = self.df['scale'][self.range[0]:self.range[1]]
if len(data) < 100000:
step = 1
# remove nan and inf values
data = data.replace(0, np.nan)
data = data.dropna()
if data.empty:
data = np.full(len(self.df), 1, dtype=int)
self.df['scale'] = data
if use_em:
self.mixture_fitting = mixture.EM(
data[::step])
self.mixture_fitting.estimate(k=k)
else:
self.mixture_fitting = mixture.GaussianMixtureFitting(
data[::step],k=k)
self.mixture_fitting.estimate()
# keep gaussians informations
self.gaussians = self.mixture_fitting.results
params_key = ("mus", "sigmas", "pis")
self.gaussians_params = [{key[:-1]: self.gaussians[key][i] for key in
params_key} for i in range(k)]
self.best_gaussian = self._get_best_gaussian()
# warning when sigma is equal to 0
if self.best_gaussian["sigma"] == 0:
logger.warning("A problem related to gaussian prediction is "
"detected. Be careful, Sigma is equal to 0.")
self.df["zscore"] = np.zeros(len(self.df), dtype=int)
else:
self.df["zscore"] = (self.df["scale"] - self.best_gaussian["mu"]) / \
self.best_gaussian["sigma"]
# Naive checking that the
if k == 2:
mus = self.gaussians['mus']
sigmas = self.gaussians["sigmas"]
index0 = mus.index(self.best_gaussian["mu"])
if index0 == 0:
mu1 = mus[1]
s0 = sigmas[0]
mu0 = mus[0]
else:
mu1 = mus[0]
s0 = sigmas[1]
mu0 = mus[1]
if abs(mu0-mu1) < s0:
logger.warning(("Warning: k=2 but note that |mu0-mu1| < sigma0. "
"k=1 could be a better choice"))
def get_centralness(self):
"""Proportion of central (normal) genome coverage
This is 1 - (number of non normal data) / (total length)
.. note:: depends on the thresholds attribute being used.
.. note:: depends slightly on :math:`W` the running median window
"""
filtered = self.get_roi()
Cplus = sum(filtered.get_high_roi()['size'])
Cminus = sum(filtered.get_low_roi()['size'])
return 1 - (Cplus+Cminus) / float(len(self))
def get_roi(self):
"""Keep positions with zscore outside of the thresholds range.
:return: a dataframe from :class:`FilteredGenomeCov`
.. note:: depends on the :attr:`thresholds` low and high values.
"""
features = self.bed.feature_dict
try:
second_high = self.thresholds.high2
second_low = self.thresholds.low2
query = "zscore > @second_high or zscore < @second_low"
# in the genbank, the names appears as e.g. JB12345
# but in the fasta or BED files, it may be something like
# gi|269939526|emb|FN433596.1|
# so they do not match. We can try to guess it
alternative = None
if features:
if self.chrom_name not in features.keys():
msg = """Chromosome name (%s) not found
in the genbank. Make sure the chromosome names in
the BAM/BED files are compatible with the genbank
content. Genbank files contains the following keys """
for this in features.keys():
msg += "\n - %s" % this
alternative = [x for x in self.chrom_name.split("|") if x]
alternative = alternative[-1] # assume the accession is last
alternative = alternative.split('.')[0] # remove version
if alternative in features.keys():
msg += "\n Guessed the chromosome name to be: %s" % alternative
else:
features = None
logger.warning(msg % self.chrom_name)
if features:
if alternative:
return FilteredGenomeCov(self.df.query(query), self.thresholds,
features[alternative])
else:
return FilteredGenomeCov(self.df.query(query), self.thresholds,
features[self.chrom_name])
else:
return FilteredGenomeCov(self.df.query(query), self.thresholds)
except KeyError:
logger.error("Column zscore is missing in data frame.\n"
"You must run compute_zscore before get low coverage."
"\n\n", self.__doc__)
sys.exit(1)
def plot_coverage(self, filename=None, fontsize=16,
rm_lw=1, rm_color="#0099cc", rm_label="Running median",
th_lw=1, th_color="r", th_ls="--", main_color="k", main_lw=1,
main_kwargs={}, sample=True, set_ylimits=True):
""" Plot coverage as a function of base position.
:param filename:
:param rm_lw: line width of the running median
:param rm_color: line color of the running median
:param rm_color: label for the running median
:param th_lw: line width of the thresholds
:param th_color: line color of the thresholds
:param main_color: line color of the coverage
:param main_lw: line width of the coverage
:param sample: if there are more than 1 000 000 points, we
use an integer step to skip data points. We can still plot
all points at your own risk by setting this option to False
:param set_ylimits: we want to focus on the "normal" coverage ignoring
unsual excess. To do so, we set the yaxis range between 0 and a
maximum value. This maximum value is set to the minimum between the
6 times the mean coverage and 1.5 the maximum of the high coverage
threshold curve. If you want to let the ylimits free, set this
argument to False
.. note:: if there are more than 1,000,000 points, we show only
1,000,000 by points. For instance for 5,000,000 points,
In addition to the coverage, the running median and coverage confidence
corresponding to the lower and upper zscore thresholds are shown.
.. note:: uses the thresholds attribute.
"""
# z = (X/rm - \mu ) / sigma
high_zcov = (self.thresholds.high * self.best_gaussian["sigma"] +
self.best_gaussian["mu"]) * self.df["rm"]
low_zcov = (self.thresholds.low * self.best_gaussian["sigma"] +
self.best_gaussian["mu"]) * self.df["rm"]
pylab.clf()
ax = pylab.gca()
ax.set_facecolor('#eeeeee')
pylab.xlim(0,self.df["pos"].iloc[-1])
axes = []
labels = []
# 1,000,000 points is a lot for matplotlib. Let us restrict ourself to 1
# million points for now.
if len(self.df) > 1000000 and sample is True:
NN = int(len(self.df)/1000000)
else:
NN = 1
# the main coverage plot
p1, = pylab.plot(self.df["cov"][::NN], color=main_color, label="Coverage",
linewidth=main_lw, **main_kwargs)
axes.append(p1)
labels.append("Coverage")
# The running median plot
if rm_lw > 0:
p2, = pylab.plot(self.df["rm"][::NN],
color=rm_color,
linewidth=rm_lw,
label=rm_label)
axes.append(p2)
labels.append(rm_label)
# The threshold curves
if th_lw > 0:
p3, = pylab.plot(high_zcov[::NN], linewidth=th_lw, color=th_color, ls=th_ls,
label="Thresholds")
p4, = pylab.plot(low_zcov[::NN], linewidth=th_lw, color=th_color, ls=th_ls,
label="_nolegend_")
axes.append(p3)
labels.append("Thresholds")
pylab.legend(axes, labels, loc="best")
pylab.xlabel("Position", fontsize=fontsize)
pylab.ylabel("Per-base coverage", fontsize=fontsize)
pylab.grid(True)
# sometimes there are large coverage value that squeeze the plot.
# Let us restrict it
if set_ylimits is True:
pylab.ylim([0, min([
high_zcov.max() * 1.5,
self.df["cov"].mean()*6])])
else:
pylab.ylim([0, pylab.ylim()[1]])
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def _set_bins(self, df, binwidth):
try:
bins = np.arange(min(df), max(df) + binwidth, binwidth)
except ValueError:
return 100
if bins.any():
return bins
return 100
def plot_hist_zscore(self, fontsize=16, filename=None, max_z=6,
binwidth=0.5, **hist_kargs):
""" Barplot of the zscore values
"""
pylab.clf()
bins = self._set_bins(self.df["zscore"][self.range[0]:self.range[1]],
binwidth)
self.df["zscore"][self.range[0]:self.range[1]].hist(
grid=True, bins=bins, **hist_kargs)
pylab.xlabel("Z-Score", fontsize=fontsize)
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def plot_hist_normalized_coverage(self, filename=None, binwidth=0.1,
max_z=4):
""" Barplot of the normalized coverage with gaussian fitting
"""
pylab.clf()
# if there are a NaN -> can't set up binning
d = self.df["scale"][self.range[0]:self.range[1]].dropna()
# remove outlier -> plot crash if range between min and max is too high
d = d[np.abs(d - d.mean()) <= (4 * d.std())]
bins = self._set_bins(d, binwidth)
self.mixture_fitting.data = d
try:
self.mixture_fitting.plot(self.gaussians_params, bins=bins, Xmin=0,
Xmax=max_z)
except ZeroDivisionError:
pass
pylab.grid(True)
pylab.xlim([0,max_z])
pylab.xlabel("Normalised per-base coverage")
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def plot_hist_coverage(self, logx=True, logy=True, fontsize=16, N=20,
fignum=1, hold=False, alpha=0.5, filename=None, **kw_hist):
"""
"""
if hold is False:
pylab.figure(fignum)
pylab.clf()
ax = pylab.gca()
ax.set_facecolor('#eeeeee')
data = self.df['cov'].dropna().values
maxcov = data.max()
if logx is True and logy is True:
bins = pylab.logspace(0, pylab.log10(maxcov), N)
pylab.hist(data, bins=bins, log=True, label=self.chrom_name,
alpha=alpha, **kw_hist)
pylab.semilogx()
pylab.xlabel("Coverage (log scale)", fontsize=fontsize)
pylab.ylabel("Count (log scale)", fontsize=fontsize)
elif logx is False and logy is True:
pylab.hist(data, bins=N, log=True, label=self.chrom_name,
alpha=alpha, **kw_hist)
pylab.xlabel("Coverage", fontsize=fontsize)
pylab.ylabel("Count (log scale)", fontsize=fontsize)
elif logx is True and logy is False:
bins = pylab.logspace(0, pylab.log10(maxcov), N)
pylab.hist(data, bins=N, label=self.chrom_name, alpha=alpha,
**kw_hist)
pylab.xlabel("Coverage (log scale)", fontsize=fontsize)
pylab.ylabel("Count", fontsize=fontsize)
pylab.semilogx()
else:
pylab.hist(data, bins=N, label=self.chrom_name, alpha=alpha,
**kw_hist)
pylab.xlabel("Coverage", fontsize=fontsize)
pylab.ylabel("Count", fontsize=fontsize)
pylab.grid(True)
if filename:
pylab.savefig(filename)
def to_csv(self, filename=None, start=None, stop=None, **kwargs):
""" Write CSV file of the dataframe.
:param str filename: csv output filename. If None, return string.
:param int start: start row index.
:param int stop: stop row index.
Params of :meth:`pandas.DataFrame.to_csv`:
:param list columns: columns you want to write.
:param bool header: determine if the header is written.
:param bool index: determine if the index is written.
:param str float_format: determine the float format.
"""
# Create directory to avoid errno 2
if filename:
directory = os.path.dirname(os.path.realpath(filename))
try:
os.makedirs(directory)
except FileExistsError:
if os.path.isdir(directory):
pass
else:
msg = "{0} exist and it is not a directory".format(
directory)
logger.error(msg)
raise FileExistsError
return self.df[start:stop].to_csv(filename, **kwargs)
def plot_gc_vs_coverage(self, filename=None, bins=None, Nlevels=6,
fontsize=20, norm="log", ymin=0, ymax=100,
contour=True, **kwargs):
if Nlevels is None or Nlevels==0:
contour = False
data = self.df[['cov','gc']].copy()
data['gc'] *= 100
data = data.dropna()
if bins is None:
bins = [100, min(int(data['gc'].max()-data['gc'].min()+1),
max(5,self.bed.gc_window_size - 4))]
bins[0] = max(10, min(bins[0], self.df['cov'].max()))
from biokit import Hist2D
h2 = Hist2D(data)
try:
h2.plot(bins=bins, xlabel="Per-base coverage",
ylabel=r'GC content (%)',
Nlevels=Nlevels, contour=contour, norm=norm,
fontsize=fontsize, **kwargs)
except:
h2.plot(bins=bins, xlabel="Per-base coverage",
ylabel=r'GC content (%)' ,
Nlevels=Nlevels, contour=False, norm=norm,
fontsize=fontsize, **kwargs)
pylab.ylim([ymin, ymax])
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def get_gc_correlation(self):
"""Return the correlation between the coverage and GC content
The GC content is the one computed in :meth:`GenomeCov.compute_gc_content`
(default window size is 101)
"""
return self.df[['cov', 'gc']].corr().iloc[0, 1]
def get_max_gc_correlation(self, reference, guess=100):
"""Plot correlation between coverage and GC content by varying the GC window
The GC content uses a moving window of size W. This parameter affects
the correlation bewteen coverage and GC. This function find the
*optimal* window length.
"""
pylab.clf()
corrs = []
wss = []
def func(params):
ws = int(round(params[0]))
if ws < 10:
return 0
self.bed.compute_gc_content(reference, ws)
corr = self.get_gc_correlation()
corrs.append(corr)
wss.append(ws)
return corr
from scipy.optimize import fmin
res = fmin(func, guess, xtol=1, disp=False) # guess is 200
pylab.plot(wss, corrs, "o")
pylab.xlabel("GC window size")
pylab.ylabel("Correlation")
pylab.grid()
return res[0]
def get_stats(self, output="json"):
"""Return basic stats about the coverage data"""
data = self.df
stats = {
'DOC': self.df['cov'].mean(),
'STD': self.df['cov'].std(),
'Median': self.df['cov'].median(),
'BOC': 100 * sum(self.df['cov'] > 0) / float(len(self.df))}
try:
stats['CV'] = stats['STD'] / stats['DOC']
except:
stats['CV'] = np.nan
stats['MAD'] = np.median(abs(data['cov'].median() -
data['cov']).dropna())
names = ['BOC', 'CV', 'DOC', 'MAD', 'Median', 'STD']
descriptions = [
"breadth of coverage: the proportion (in %s) of the "
"genome covered by at least one read.",
"the coefficient of variation.",
"the sequencing depth (Depth of Coverage), that is the average of "
"the genome coverage.",
"median of the absolute median deviation defined as median(|X-median(X)|).",
"Median of the coverage.",
"standard deviation."
]
if 'gc' in self.df.columns:
stats['GC'] = self.df['gc'].mean() * 100
names.append('GC')
descriptions.append("GC content in %")
df = pd.DataFrame({
"name": names,
"Value": [stats[x] for x in names],
"Description": descriptions})
if output == "json":
return df.to_json()
else:
return df
class FilteredGenomeCov(object):
"""Class used within :class:`ChromosomeCov` to select a subset of the
original GenomeCov
:target: developers only
"""
_feature_not_wanted = {"gene", "regulatory", "source"}
def __init__(self, df, threshold, feature_list=None):
""" .. rubric:: constructor
:param df: dataframe with filtered position used within
:class:`GenomeCov`. Must contain the following columns:
["pos", "cov", "rm", "zscore"]
:param int threshold: a :class:`~sequana.bedtools.DoubleThresholds`
instance.
"""
if isinstance(feature_list, list) and len(feature_list) == 0:
feature_list = None
region_list = self._merge_region(df, threshold=threshold)
if feature_list:
region_list = self._add_annotation(region_list, feature_list)
self.df = self._dict_to_df(region_list, feature_list)
def func(x):
try:
return x.split(".")[0]
except:
return x
for column in ['gene_end', 'gene_start']:
if column in self.df.columns:
self.df[column] = self.df[column].astype(str)
self.df[column] = self.df[column].apply(func)
def __str__(self):
return self.df.__str__()
def __len__(self):
return self.df.__len__()
def _merge_row(self, df, start, stop):
chrom = df["chr"][start]
cov = np.mean(df["cov"].loc[start:stop])
max_cov = np.max(df["cov"].loc[start:stop])
rm = np.mean(df["rm"].loc[start:stop])
zscore = np.mean(df["zscore"].loc[start:stop])
if zscore >= 0:
max_zscore = df["zscore"].loc[start:stop].max()
else:
max_zscore = df["zscore"].loc[start:stop].min()
size = stop - start + 1
return {"chr": chrom, "start": start, "end": stop + 1, "size": size,
"mean_cov": cov, "mean_rm": rm, "mean_zscore": zscore,
"max_zscore": max_zscore, "max_cov": max_cov}
def _merge_region(self, df, threshold, zscore_label="zscore"):
"""Merge position side by side of a data frame.
Uses a double threshold method.
:param threshold: the high threshold (standard one), not the low one.
.. todo:: to be documented
"""
region_start = None
region_stop = None
start = 1
stop = 1
prev = 1
# handle case where for example position n-1 have a zscore of -5 and n
# have a zscore of 5. It is two different regions.
region_zscore = 0
merge_df = []
for pos, zscore in zip(df["pos"], df[zscore_label]):
stop = pos
if stop - 1 == prev and zscore * region_zscore >= 0:
prev = stop
else:
if region_start:
merge_df.append(self._merge_row(df, region_start,
region_stop))
region_start = None
start = stop
prev = stop
region_zscore = zscore
if zscore > 0 and zscore > threshold.high:
if not region_start:
region_start = pos
region_stop = pos
else:
region_stop = pos
elif zscore < 0 and zscore < threshold.low:
if not region_start:
region_start = pos
region_stop = pos
else:
region_stop = pos
if start < stop and region_start:
merge_df.append(self._merge_row(df, region_start, region_stop))
return merge_df
def _add_annotation(self, region_list, feature_list):
""" Add annotation from a dictionary generated by parsers in
sequana.tools.
"""
region_ann = []
# an iterator of features
iter_feature = iter(feature_list)
feature = next(iter_feature)
# pass "source" feature
while feature["type"] in FilteredGenomeCov._feature_not_wanted:
try:
feature = next(iter_feature)
except StopIteration:
print("Features types ({0}) are not present in the annotation"
" file. Please change what types you want".format(
feature['type']))
return region_ann
# merge regions and annotations
for region in region_list:
feature_exist = False
while feature["gene_end"] <= region["start"]:
try:
feature = next(iter_feature)
except:
break
while feature["gene_start"] < region["end"]:
# A feature exist for detected ROI
feature_exist = True
# put locus_tag in gene field if gene doesn't exist
try:
feature["gene"]
except KeyError:
try:
feature["gene"] = feature["locus_tag"]
except:
feature["gene"] = "None"
# put note field in product if product doesn't exist
try:
feature["product"]
except KeyError:
try:
feature["product"] = feature["note"]
except:
feature["product"] = "None"
# FIXME what that ?
#if region["start"] == 237433:
# print(dict(region, **feature))
region_ann.append(dict(region, **feature))
try:
feature = next(iter_feature)
except StopIteration:
break
if feature_exist is False:
region_ann.append(dict(region, **{"gene_start": None,
"gene_end": None,
"type": None,
"gene": None,
"strand": None,
"product": None}))
return region_ann
def _dict_to_df(self, region_list, annotation):
""" Convert dictionary as dataframe.
"""
merge_df = pd.DataFrame(region_list)
colnames = ["chr", "start", "end", "size", "mean_cov", "max_cov",
"mean_rm", "mean_zscore", "max_zscore", "gene_start",
"gene_end", "type", "gene", "strand", "product"]
if not annotation:
colnames = colnames[:9]
merge_df = pd.DataFrame(region_list, columns=colnames)
int_column = ["start", "end", "size"]
merge_df[int_column] = merge_df[int_column].astype(int)
if annotation:
merge_df.rename(columns={"gene": "gene_name"}, inplace=True)
# maybe let the user set what he wants
return merge_df.loc[~merge_df["type"].isin(
FilteredGenomeCov._feature_not_wanted)]
return merge_df
def _get_sub_range(self, seq_range):
try:
return self.df[(self.df["end"] > seq_range[0]) &
(self.df["start"] < seq_range[1])]
except TypeError:
return self.df
def get_low_roi(self, seq_range=None):
df = self._get_sub_range(seq_range)
return df.loc[df["max_zscore"] < 0]
def get_high_roi(self, seq_range=None):
df = self._get_sub_range(seq_range)
return df.loc[df["max_zscore"] >= 0]
| 50,437 | 15,072 |
from unittest.mock import patch
from feed.models import FeedItem
from tests import BaseTestFixture, basic_auth_headers
class TestRefreshUserFeeds(BaseTestFixture):
def test_refresh_single_feed_when_not_authenticated(self):
response = self.client.post('/api/my-feeds/5/update')
self.assertEqual(401, response.status_code)
def test_refresh_all_user_feeds_when_not_authenticated(self):
response = self.client.post('/api/my-feeds/update')
self.assertEqual(401, response.status_code)
def test_refresh_single_feed_when_feed_not_exist(self):
response = self.client.post(
'/api/my-feeds/5/update',
headers=basic_auth_headers("user", "pass")
)
self.assertEqual(404, response.status_code)
@patch("feed.routes.Scraper")
@patch("feed.routes.scrape_single")
def test_refresh_single_feed_no_error(self, scrape_single_task, scraper):
scraper.parse.return_value = [FeedItem(id=5), FeedItem(id=6)]
scrape_single_task.return_value = {}
response = self.client.post(
'/api/my-feeds/1/update',
headers=basic_auth_headers("user", "pass")
)
self.assertEqual(200, response.status_code)
self.assertTrue(scraper.persist.called_with([FeedItem(id=5), FeedItem(id=6)]))
self.assertTrue(scrape_single_task.delay.called)
@patch("feed.routes.Scraper")
@patch("feed.routes.scrape_single")
def test_refresh_all_user_feeds_no_error(self, scrape_single_task, scraper):
scraper.parse.return_value = [FeedItem(id=5), FeedItem(id=6)]
scrape_single_task.return_value = {}
response = self.client.post(
'/api/my-feeds/update',
headers=basic_auth_headers("user", "pass")
)
self.assertEqual(200, response.status_code)
self.assertTrue(scraper.persist.called_with([FeedItem(id=5), FeedItem(id=6)]))
self.assertTrue(scrape_single_task.delay.called)
| 1,989 | 670 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib_qsrs.qsr_triadic_abstractclass import QSR_Triadic_1t_Abstractclass
import math
class QSR_TPCC(QSR_Triadic_1t_Abstractclass):
"""TPCC QSRs.
.. seealso:: For further details about TPCC, refer to its :doc:`description. <../handwritten/qsrs/tpcc>`
"""
_unique_id = "tpcc"
_all_possible_relations = ('dlf', 'dfl', 'dsl', 'dbl', 'dlb', 'dsb', 'drb', 'dbr',
'dsr', 'dfr', 'drf', 'dsf', 'clf', 'cfl', 'csl', 'cbl',
'clb', 'csb', 'crb', 'cbr', 'csr', 'cfr', 'crf', 'csf',
'sam')
_dtype = "points"
__partition_names = ['lb','bl','fl','lf','rf','fr','br','rb']
__partition_size = 2 * math.pi / len(__partition_names)
def __init__(self):
"""Constructor."""
super(QSR_TPCC, self).__init__()
def _compute_qsr(self, origin, relatum, objct, qsr_params, **kwargs):
base_distance = math.sqrt((origin.x-relatum.x)**2 + (origin.y-relatum.y)**2)
object_distance = math.sqrt((objct.x-relatum.x)**2 + (objct.y-relatum.y)**2)
if object_distance == 0:
return "sam"
relation = "d" if object_distance > base_distance else "c" # is it far or close: first letter
angle = self._relative_angle(origin, relatum, objct)
partition = int(angle / self.__partition_size)
relation += self.__partition_names[partition]
sin_angle = math.fabs(math.sin(angle))
if sin_angle < 0.00001 or sin_angle > 0.99999:
relation = relation[0]+'s'+relation[2]
return relation
@staticmethod
def _relative_angle(a, b, c):
"""Compute relative angle used to select the (left/right/straight/front/back/straight)
relationship"""
angle_BA = math.atan2((b.y - a.y),(b.x - a.x))
if angle_BA < 0:
angle_BA += 2 * math.pi
angle_CB = math.atan2((c.y - b.y), (c.x - b.x))
if angle_CB < 0:
angle_CB += 2 * math.pi
angle_rel = angle_CB - angle_BA
if angle_rel < 0:
angle_rel += 2 * math.pi
return angle_rel
| 2,272 | 804 |
import numpy as np
from addict import Dict
cfg = Dict()
cfg.name = 'tdlmc'
cfg.seed = 1113 # random seed
cfg.bnn_prior_class = 'DiagonalBNNPrior'
cfg.n_data = 200 # number of images to generate
cfg.train_vs_val = 'train'
cfg.components = ['lens_mass', 'external_shear', 'src_light', 'lens_light', 'agn_light']
cfg.checkpoint_interval = 2
cfg.selection = dict(
magnification=dict(
min=2.0
),
initial=["lambda x: x['lens_mass']['theta_E'] > 0.5",]
)
cfg.instrument = dict(
pixel_scale=0.08, # scale (in arcseonds) of pixels
ccd_gain=4.5, # electrons/ADU (analog-to-digital unit). A gain of 8 means that the camera digitizes the CCD signal so that each ADU corresponds to 8 photoelectrons.
)
cfg.bandpass = dict(
magnitude_zero_point=25.9463, # (effectively, the throuput) magnitude in which 1 count per second per arcsecond square is registered (in ADUs)
)
cfg.observation = dict(
exposure_time=100.0, # exposure time per image (in seconds)
)
cfg.psf = dict(
type='PIXEL', # string, type of PSF ('GAUSSIAN' and 'PIXEL' supported)
kernel_size=91, # dimension of provided PSF kernel, only valid when profile='PIXEL'
which_psf_maps=None, # None if rotate among all available PSF maps, else seed number of the map to generate all images with that map
)
cfg.numerics = dict(
supersampling_factor=1)
cfg.image = dict(
num_pix=100, # cutout pixel size
inverse=False, # if True, coord sys is ra to the left, if False, to the right
)
cfg.bnn_omega = dict(
lens_mass = dict(
profile='SPEMD', # only available type now
# Normal(mu, sigma^2)
center_x = dict(
dist='normal', # one of ['normal', 'beta']
mu=0.0,
sigma=1.e-6,
),
center_y = dict(
dist='normal',
mu=0.0,
sigma=1.e-6,
),
# Lognormal(mu, sigma^2)
gamma = dict(
dist='normal',
mu=1.935,
sigma=0.001,
),
theta_E = dict(
dist='normal',
mu=1.082,
sigma=0.001,
),
# Beta(a, b)
q = dict(
dist='normal',
mu=0.869,
sigma=0.001,
),
phi = dict(
dist='normal',
mu= 0.708,
sigma=0.001,
),
),
external_shear = dict(
profile='SHEAR_GAMMA_PSI',
gamma_ext = dict(
dist='normal',
mu=0.008, # See overleaf doc
sigma=0.001,
),
psi_ext = dict(
dist='normal',
mu=0.7853,
sigma=0.001,
lower=0,
upper=np.pi,
)
),
lens_light = dict(
profile='SERSIC_ELLIPSE', # only available type now
# Centered at lens mass
# Lognormal(mu, sigma^2)
magnitude = dict(
dist='normal',
mu=17.325,
sigma=0.001,
),
n_sersic = dict(
dist='normal',
mu=2.683,
sigma=0.001,
),
R_sersic = dict(
dist='normal',
mu=0.949,
sigma=0.001,
),
# Beta(a, b)
q = dict(
dist='normal',
mu= 0.5,
sigma=0.5,
lower=0.0,
),
phi = dict(
dist='normal',
mu= 0.658,
sigma=0.001,
),
),
src_light = dict(
profile='SERSIC_ELLIPSE', # only available type now
# Lognormal(mu, sigma^2)
magnitude = dict(
dist='normal',
mu=20.407,
sigma=0.001,
),
n_sersic = dict(
dist='lognormal',
mu=0.7,
sigma=0.4,
),
R_sersic = dict(
dist='normal',
mu=0.4,
sigma=0.01,
),
# Normal(mu, sigma^2)
center_x = dict(
dist='normal',
mu=0.035,
sigma=0.001,
),
center_y = dict(
dist='normal',
mu=-0.025,
sigma=0.001,
),
q = dict(
dist='normal',
mu=0.869,
sigma=0.001,
),
phi = dict(
dist='normal',
mu= 0.708,
sigma=0.001,
),
),
agn_light = dict(
profile='LENSED_POSITION', # contains one of 'LENSED_POSITION' or 'SOURCE_POSITION'
# Centered at host
# Pre-magnification, image-plane amplitudes if 'LENSED_POSITION'
# Lognormal(mu, sigma^2)
magnitude = dict(
dist='normal',
mu=21,
sigma=1,
lower=0.0,
),
),
) | 9,434 | 1,945 |
"""
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix)) | 1,053 | 527 |
# Generated by Django 2.2.6 on 2019-10-22 00:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dato', models.CharField(max_length=100, verbose_name='Datos del R.N.')),
('fecha', models.DateTimeField(auto_now_add=True, verbose_name='Fecha y Hora Nacimiento')),
('sexo', models.CharField(max_length=20)),
('dni', models.CharField(max_length=20, verbose_name='DNI - Madre o Titular')),
('reg', models.BooleanField(default=False)),
],
),
]
| 842 | 263 |
t=int(input())
for _ in range(t):
n=int(input())
a=list(map(int, input().split()))
max_sum=0
for j in range(n):
for k in range(j+1, n):
num=a[j]*a[k]
add=0
while(num!=0):
add+=num%10
num=num//10
if max_sum<add:
max_sum=add
print(max_sum)
| 362 | 132 |
def foo(a, *b):
pass
| 25 | 13 |
# coding: utf-8
"""
JDX reference application API
This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501
The version of the OpenAPI document: 0.0.17
Contact: engineering@brighthive.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ValidationError(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'status_code': 'int',
'validation_errors': 'list[ValidationErrorValidationErrors]'
}
attribute_map = {
'message': 'message',
'status_code': 'statusCode',
'validation_errors': 'validationErrors'
}
def __init__(self, message=None, status_code=None, validation_errors=None): # noqa: E501
"""ValidationError - a model defined in OpenAPI""" # noqa: E501
self._message = None
self._status_code = None
self._validation_errors = None
self.discriminator = None
if message is not None:
self.message = message
if status_code is not None:
self.status_code = status_code
if validation_errors is not None:
self.validation_errors = validation_errors
@property
def message(self):
"""Gets the message of this ValidationError. # noqa: E501
:return: The message of this ValidationError. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ValidationError.
:param message: The message of this ValidationError. # noqa: E501
:type: str
"""
if message is not None and len(message) > 1024:
raise ValueError("Invalid value for `message`, length must be less than or equal to `1024`") # noqa: E501
self._message = message
@property
def status_code(self):
"""Gets the status_code of this ValidationError. # noqa: E501
A code identifying the message response. A code of `1` indicates success. # noqa: E501
:return: The status_code of this ValidationError. # noqa: E501
:rtype: int
"""
return self._status_code
@status_code.setter
def status_code(self, status_code):
"""Sets the status_code of this ValidationError.
A code identifying the message response. A code of `1` indicates success. # noqa: E501
:param status_code: The status_code of this ValidationError. # noqa: E501
:type: int
"""
if status_code is not None and status_code > 9999: # noqa: E501
raise ValueError("Invalid value for `status_code`, must be a value less than or equal to `9999`") # noqa: E501
if status_code is not None and status_code < -1: # noqa: E501
raise ValueError("Invalid value for `status_code`, must be a value greater than or equal to `-1`") # noqa: E501
self._status_code = status_code
@property
def validation_errors(self):
"""Gets the validation_errors of this ValidationError. # noqa: E501
:return: The validation_errors of this ValidationError. # noqa: E501
:rtype: list[ValidationErrorValidationErrors]
"""
return self._validation_errors
@validation_errors.setter
def validation_errors(self, validation_errors):
"""Sets the validation_errors of this ValidationError.
:param validation_errors: The validation_errors of this ValidationError. # noqa: E501
:type: list[ValidationErrorValidationErrors]
"""
self._validation_errors = validation_errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ValidationError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 5,686 | 1,641 |
# Some say that every one year of a human’s life is equivalent to seven years of a dog’s life.
# Write a function named dog_years() that has two parameters named name and age.
# The function should compute the age in dog years and return the following string:
# "{name}, you are {age} years old in dog years"
def dog_years(name, age):
result = name + "," + " you are " + str(7*age) + " years old in dog years"
return result
| 435 | 125 |
"""Constants for the Matt's Gadgets Ceiling Fan integration."""
DOMAIN = "matts_gadgets_ceiling_fan"
| 102 | 38 |
from spaceone.repository.manager.repository_manager import RepositoryManager
class LocalRepositoryManager(RepositoryManager):
def register_repository(self, params):
"""
Args:
params:
- name
- repository_type: local
"""
# Assume there is only one local repository
return self.repo_model.create(params)
| 393 | 91 |
#!/usr/bin/env python
"""
_AddToFileset_
MySQL implementation of Files.AddToFileset
"""
import time
from WMCore.Database.DBFormatter import DBFormatter
class AddToFileset(DBFormatter):
sql = """INSERT IGNORE INTO wmbs_fileset_files (fileid, fileset, insert_time)
SELECT wmbs_file_details.id, :fileset, :insert_time
FROM wmbs_file_details
WHERE wmbs_file_details.lfn = :lfn
"""
sqlAvail = """INSERT IGNORE INTO wmbs_sub_files_available (subscription, fileid)
SELECT wmbs_subscription.id AS subscription,
wmbs_file_details.id AS fileid FROM wmbs_subscription
INNER JOIN wmbs_file_details ON
wmbs_file_details.lfn = :lfn
WHERE wmbs_subscription.fileset = :fileset
"""
def execute(self, file = None, fileset = None, conn = None,
transaction = False):
binds = []
availBinds = []
timestamp = int(time.time())
for fileLFN in file:
binds.append({"lfn": fileLFN, "fileset": fileset,
"insert_time": timestamp})
availBinds.append({"lfn": fileLFN, "fileset": fileset})
self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
self.dbi.processData(self.sqlAvail, availBinds, conn = conn,
transaction = transaction)
return
| 1,522 | 441 |
import module_support_main
import pandas as pd
import mrr
import numpy as np
import settings_main as settings
import get_ranks
def evaluate_results_from_files(submission_path, gt_path, species_map_path):
print("Evaluate submission...")
print("Load data...")
df = pd.read_csv(submission_path, sep=';')
y = np.load(gt_path)
print("Calculate MRR-Score...")
ranks = get_ranks.get_ranks_df(df, y, settings.TOP_N_SUBMISSION_RANKS)
mrr_score = mrr.mrr_score(ranks)
print("MRR-Score:", mrr_score * 100,"%")
| 537 | 200 |
from flask import Flask, Blueprint, g, request
from src.model import Post, File
from src.db import db
from src.Middlewares.AuthMiddleware import *
from werkzeug.utils import secure_filename
import os
import dateutil.parser as dt
post_bp = Blueprint('post', __name__, url_prefix='/posts')
def respond(data, code):
responder = Responder()
return responder.respond(data, code)
def respond_error(msg, code):
responder = Responder()
return responder.respond_error(msg, code)
@post_bp.route('/test')
def test():
access_token = token_urlsafe(40)
return 'post ok!'
@post_bp.route('/create', methods=['POST'])
@check_auth
def _create_post():
body = request.json
print(body)
title = body['title']
content = body['content']
pub_at = dt.parse(body['pub_at'])
# tags = json.loads(json.dumps(body['tags']))
#images = json.loads(json.dumps(body['images']))
#print(body['images'])
p = Post(title=title, content=content, images=body['images'],pub_at=pub_at, user=g.user.id)
with db.atomic() as tx:
try:
p.save()
return respond(p.to_dict(), 201)
except Exception as e:
print(e)
return respond_error(str(e), 500)
@post_bp.route('/', methods=['GET'])
def _get_all_posts():
post = Post.select()
post = [p.to_dict() for p in post]
return respond(post, 201)
@post_bp.route('/<id>', methods=['GET'])
@check_auth
def _get_by_id(id):
post = Post.get_by_id(id)
return respond(post.to_dict(), 201)
@post_bp.route('/<id>', methods=['delete'])
@check_auth
def delete_post(id):
post = Post.get_or_none(Post.id == id)
if post is not None:
if post.user.id == g.user.id:
with db.atomic() as tx:
try:
deleted_post = post
q = Post.delete().where(Post.id == post.id)
q.execute()
return respond(deleted_post.to_dict(), 201)
except Exception as e:
return respond_error(str(e), 500)
else:
return respond_error("UNAUTHORIZED USER", 404)
else:
return respond_error("POST NOT FOUND", 404)
| 2,210 | 754 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 14:24:28 2020
@author: twguest
"""
import os
import numpy as np
from time import time
from wpg import srwlib
from felpy.model.tools import radial_profile
#from wpg.wpg_uti_wf import get_axis
from tqdm import tqdm
from felpy.utils.job_utils import JobScheduler
#import wpg.srwlib as srwl
from wpg.srw import srwlpy as srwl
from felpy.utils.np_utils import memory_map, readMap
import multiprocessing as mp
from functools import partial
from scipy.sparse import csr_matrix
from felpy.model.materials.mirror_surface import binArray
def get_longitudinal_coherence(slice_no, cfr, map_loc = None, bins = 1, VERBOSE = True):
"""
Calculate the longitudinal correlation of each slice of a complex wavefront
of shape [nx, ny, nz] against a single slice of shape [nx,ny] at longitudinal
interval defined by the slice_no
:param cfr: complex wavefield
:param slice_no: longitudinal index [int]
:returns g: complex degree of coherence
"""
A = np.roll(cfr, -slice_no, axis = 2)
B = np.repeat(cfr[:,:,slice_no][:, :, np.newaxis], cfr.shape[-1], axis=-1)
## DEGUB print(A[:,:,0] == wfr[:,:,i])
## DEBUG print([B[:,:,k] == wfr[:,:,i] for k in range(wfr.shape[-1])])
if map_loc is not None:
mmap = memory_map(map_loc,
shape = cfr.shape,
dtype = 'complex64')
mmap[:,:,slice_no] = ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
else:
return ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
def get_longitudinal_coherence_new(slice_no, cfr, map_loc = None, bins = 1, VERBOSE = True):
"""
Calculate the longitudinal correlation of each slice of a complex wavefront
of shape [nx, ny, nz] against a single slice of shape [nx,ny] at longitudinal
interval defined by the slice_no
:param cfr: complex wavefield
:param slice_no: longitudinal index [int]
:returns g: complex degree of coherence
"""
A = np.roll(cfr, -slice_no, axis = 2)
B = np.repeat(cfr[:,:,slice_no][:, :, np.newaxis], cfr.shape[-1], axis=-1)
## DEGUB print(A[:,:,0] == wfr[:,:,i])
## DEBUG print([B[:,:,k] == wfr[:,:,i] for k in range(wfr.shape[-1])])
if map_loc is not None:
mmap = memory_map(map_loc,
shape = cfr.shape,
dtype = 'complex64')
mmap[:,:,slice_no] = ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
else:
return ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
def get_coherence_time_new(cfr, tStep, mpi = False, map_loc = "/tmp/coherence_map",
bins = 1, VERBOSE = True):
"""
Calculate the coherence time of complex wavefield of shape
[nx, ny, nt].
Relevant for statistically stationary sources.
ref: Coherence properties of the radiation from X-ray free electron laser
:param cfr: complex wavefield
:param tstep: temporal step between slices
:returns tau: coherence time [s]
"""
mmap = memory_map(map_loc = map_loc,
shape = cfr.shape,
dtype = 'complex64')
nz0 = cfr.shape[-1]
if bins == 1:
nz1 = nz0
else:
cfr = binArray(cfr, axis = -1, binstep = nz0//bins, binsize = 1 )
nz1 = cfr.shape[-1]
tStep *= (nz0/nz1)
g = np.zeros([*cfr.shape], dtype = 'complex64')
if VERBOSE:
print("Calculating Coherence Time")
if mpi:
processes = mp.cpu_count()//2
pool = mp.Pool(processes)
pool.map(partial(get_longitudinal_coherence, cfr = cfr, map_loc = map_loc),
range(cfr.shape[-1]))
g = readMap(map_loc, cfr.shape, dtype = 'complex64')
else:
for i in tqdm(range(cfr.shape[-1])):
g[:,:,i] = get_longitudinal_coherence(slice_no = i, cfr = cfr)
tau = (abs(g)**2).sum(axis = -1)[0,0]
if VERBOSE:
print("\n")
print(tau)
print("Time Step: {} fs".format(tStep*1e15))
print("Coherence Time: {:.2e} fs".format(tau*1e15*tStep))
del mmap
os.remove(map_loc)
return tau*tStep
def get_coherence_time(cfr, tStep, mpi = False, map_loc = "/tmp/coherence_map",
bins = 1, VERBOSE = True):
"""
Calculate the coherence time of complex wavefield of shape
[nx, ny, nt].
Relevant for statistically stationary sources.
ref: Coherence properties of the radiation from X-ray free electron laser
:param cfr: complex wavefield
:param tstep: temporal step between slices
:returns tau: coherence time [s]
"""
mmap = memory_map(map_loc = map_loc,
shape = cfr.shape,
dtype = 'complex64')
nz0 = cfr.shape[-1]
if bins == 1:
nz1 = nz0
else:
cfr = binArray(cfr, axis = -1, binstep = nz0//bins, binsize = 1 )
nz1 = cfr.shape[-1]
tStep *= (nz0/nz1)
g = np.zeros([*cfr.shape], dtype = 'complex64')
if VERBOSE:
print("Calculating Coherence Time")
if mpi:
processes = mp.cpu_count()//2
pool = mp.Pool(processes)
pool.map(partial(get_longitudinal_coherence, cfr = cfr, map_loc = map_loc),
range(cfr.shape[-1]))
g = readMap(map_loc, cfr.shape, dtype = 'complex64')
else:
for i in tqdm(range(cfr.shape[-1])):
g[:,:,i] = get_longitudinal_coherence(slice_no = i, cfr = cfr)
tau = (abs(g)**2).sum(axis = -1)[0,0]
print("g", np.max(g))
if VERBOSE:
print("\n")
print(tau)
print("Time Step: {} fs".format(tStep*1e15))
print("Coherence Time: {:.2e} fs".format(tau*1e15*tStep))
del mmap
os.remove(map_loc)
return tau*tStep, g
def get_coherence_time_wpg(wfr, mpi = False, VERBOSE = True):
srwl.SetRepresElecField(wfr._srwl_wf, 't')
time_step = (wfr.params.Mesh.sliceMax - wfr.params.Mesh.sliceMin)/wfr.params.Mesh.nSlices
return get_coherence_time(wfr.as_complex_array(), time_step, mpi = mpi)
def get_coherence_len(wfr, dx, dy, VERBOSE = True):
"""
Calculate coherence length of a complex wavefield of shape
[nx, ny. nz]
:param wfr: complex wavefield
:param dx: horizontal pixel size
:param dy: vertical pixel size
:returns Jd: complex degree of coherence
:returns clen: coherence length [m]
"""
profile, r = get_complex_radial_profile(wfr)
nt = wfr.shape[-1]
J = np.dot(profile, profile.T.conjugate())/ nt
II = np.abs(np.diag(J)) # intensity as the main diagonal
print(J.shape)
J /= II**0.5 * II[:, np.newaxis]**0.5
Jd = np.abs(np.diag(np.fliplr(J))) # DoC as the cross-diagonal
lm = np.arange(Jd.shape[0])
lm = lm[(lm >= Jd.shape[0]//2) & (Jd[lm] < 0.5)]
rstep = np.sqrt((dx)**2 + (dy)**2)
try:
lm = lm[0] - Jd.shape[0]//2
except(IndexError):
lm = np.inf
clen = lm*rstep
if VERBOSE:
print("Radial Coherence Length: {:.2f} um".format(clen*1e6))
return clen
def get_transverse_doc(wfr, VERBOSE = True):
"""
get transverse degree of coherence of the wavefront across each of the
transverse dimensions slices
"""
p, r = get_complex_radial_profile(wfr)
nt = wfr.shape[-1]
J = np.dot(p, p.T.conjugate())/nt
tdoc = np.diag(np.dot(J, J)).sum() / np.diag(J).sum()**2
if VERBOSE:
print("Transverse Degree of Coherence: {:.4f}".format(tdoc.real))
return tdoc
def get_complex_radial_profile(wfr):
"""
Calculate the radial profile of a complex array by azimuthal averaging:
I_{radial}(R) = \int_0^R \frac{I(r)2\pi r}{\pi R^2} dr
:param wfr: complex wavefield [np array]
:returns prof: radial profile
"""
r = radial_profile(wfr[:,:,0].real, [wfr.shape[0]//2,wfr.shape[1]//2])[1]
r = np.diag(r).copy()
r[:r.shape[0]//2] *= -1
rp = np.stack([radial_profile(wfr[:,:,i].real,
[wfr.shape[0]//2,wfr.shape[1]//2])[0]
+ radial_profile(wfr[:,:,i].imag,
[wfr.shape[0]//2,wfr.shape[1]//2])[0]*1j
for i in range(wfr.shape[-1])])
prof = np.moveaxis(rp, 0, -1)
return prof, r
def coherent_test(wfr):
tstep = get_axis(wfr, axis = 't')
tstep = wfr.get_temporal_resolution()
xstep, ystep = wfr.get_spatial_resolution()
wfr = wfr.as_complex_array()
tau = get_coherence_time(wfr, tstep, VERBOSE=True)
clen = get_coherence_len(wfr, xstep, ystep, VERBOSE=True)
tdoc = get_transverse_doc(wfr, VERBOSE=True)
return tau, clen, tdoc
if __name__ == "__main__":
pass | 9,349 | 3,656 |
import threading
import time
products = []
condition = threading.Condition()
class consumer(threading.Thread):
def consume(self):
global condition
global products
condition.acquire()
if len(products) == 0:
condition.wait()
print('consumer is notified: no product to consume')
products.pop()
print("consumer notification: consume 1 product")
print('consumer notification: there are ' + len(products) +" left that can be consume")
condition.notify()
condition.release()
def run(self):
for i in range(0,20):
time.sleep(4)
self.consume()
class Producer(threading.Thread):
def produce(self):
global condition
global products
condition.acquire()
if len(products) == 10:
condition.wait()
print('consumer notified') | 911 | 238 |
import pytest # noqa: F401
from meltano.core.utils import flatten, nest, pop_at_path, set_at_path
def test_nest():
subject = {}
one_deep = nest(subject, "a.b")
one_deep["val"] = 1
assert one_deep == {"val": 1}
two_deep = nest(subject, "a.b.c")
two_deep["val"] = 2
assert one_deep == {"val": 1, "c": {"val": 2}}
arr = nest(subject, "a.list", value=[])
start_value = {"value": 1}
val = nest(subject, "a.value", value=start_value)
assert subject["a"]["b"] is one_deep
assert subject["a"]["b"]["c"] is two_deep
assert isinstance(arr, list)
# make sure it is a copy
assert val == start_value and val is not start_value
def test_pop_at_path():
subject = {}
pop_at_path(subject, "a.b.c")
assert not subject
subject = {"a": {"b": {"c": "value"}}}
pop_at_path(subject, "a.b.c")
assert not subject
subject = {"a": {"b.c": "value"}}
pop_at_path(subject, ["a", "b.c"])
assert not subject
subject = {"a": {"b": {"c": "value", "d": "value"}, "e": "value"}}
pop_at_path(subject, "a.b.c")
assert subject == {"a": {"b": {"d": "value"}, "e": "value"}}
pop_at_path(subject, "a.b.d")
assert subject == {"a": {"e": "value"}}
pop_at_path(subject, "a.e")
assert not subject
def test_set_at_path():
subject = {}
set_at_path(subject, "a.b.c", "value")
assert subject == {"a": {"b": {"c": "value"}}}
set_at_path(subject, "a.b.d", "value")
assert subject == {"a": {"b": {"c": "value", "d": "value"}}}
set_at_path(subject, "a.b", "value")
assert subject == {"a": {"b": "value"}}
set_at_path(subject, "a.b", "newvalue")
assert subject == {"a": {"b": "newvalue"}}
set_at_path(subject, "a.b.c", "value")
assert subject == {"a": {"b": {"c": "value"}}}
set_at_path(subject, ["a", "d.e"], "value")
assert subject == {"a": {"b": {"c": "value"}, "d.e": "value"}}
def test_flatten():
example_config = {"_update": {"orchestrate/dags/meltano.py": False}}
expected_flat = {"_update.orchestrate/dags/meltano.py": False}
result = flatten(example_config, "dot")
assert result == expected_flat
| 2,168 | 844 |
import pandas as pd
from phc.easy.frame import Frame
from phc.easy.abstract.fhir_service_patient_item import FhirServicePatientItem
class DiagnosticReport(FhirServicePatientItem):
@staticmethod
def table_name():
return "diagnostic_report"
@staticmethod
def patient_id_prefixes():
return ["Patient/", "urn:uuid:"]
@staticmethod
def patient_key():
return "subject.reference"
@staticmethod
def code_fields():
return ["meta.tag"]
@staticmethod
def transform_results(df: pd.DataFrame, **expand_args):
return Frame.expand(
df,
custom_columns=[
*expand_args.get("custom_columns", []),
Frame.codeable_like_column_expander("subject"),
Frame.codeable_like_column_expander("presentedForm"),
Frame.codeable_like_column_expander("result"),
],
)
| 927 | 267 |
import torch
from feathermap.utils import timed
from math import sqrt
dim_in = 2 ** 14
dim_out = 2 ** 4
A = torch.randn(dim_in, dim_out)
B = torch.randn(dim_out, dim_in)
C = torch.rand(dim_in, dim_in)
D = torch.rand(dim_in, dim_in)
E = torch.rand(1, dim_out)
F = torch.rand(dim_out, dim_in)
G = torch.rand(int(sqrt(dim_in)), int(sqrt(dim_in)))
H = torch.rand(int(sqrt(dim_in)), int(sqrt(dim_in)))
@timed
def mam(a, b):
for _ in range(10000):
out = torch.mm(a, b)
return out
def loop(a, b):
for i in range(a.size(0)):
for j in range(b.size(1)):
yield a[i, :] @ b[:, j]
def loop2(a, b):
for i in range(a.size(0)):
for j in range(b.size(1)):
yield 1
def tmm(a, b):
c = torch.mm(a, b).view(-1, 1)
return iter(c)
@timed
def run(c, dim_in):
d = torch.empty(dim_in ** 2)
for i in range(d.numel()):
d[i] = next(c)
mam(E, F) # about 23% faster
mam(G, H)
# run(loop(A, B), dim_in) # 739
# run(loop2(A, B), dim_in) # 254
# run(tmm(A, B), dim_in) # 289
| 1,048 | 510 |
from django.contrib import admin
from django.urls import path
from App import views
app_name = 'App'
urlpatterns = [
path('', views.home, name='home'),
# 增删改
path('cud/', views.handle_data, name='handle_data'),
# 查询
path('search/', views.find_data, name='search'),
# 使用原生sql
path('rawsql/', views.raw_sql, name='raw_sql'),
# 自定义管理器,看自己需要用不用
path('manager/', views.my_manager, name='my_manager'),
# 注册页
path('register/', views.handle_register, name='register'),
# 登录页
path('login/', views.handle_login, name='login'),
# 显示用户信息
path('show/', views.show_msg, name='show'),
# 电影显示
path('movie/', views.show_movie, name='movie'),
# django自带的分页功能
path('movie_page/', views.show_movie_page, name='movie_page'),
]
| 785 | 313 |
#!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Agent for Walker
"""
import math
from nav_msgs.msg import Path, Odometry
from std_msgs.msg import Float64
from geometry_msgs.msg import Pose, Vector3
from carla_msgs.msg import CarlaWalkerControl
from ros_compatibility import (
CompatibleNode,
QoSProfile,
ros_ok,
ROSInterruptException,
ros_init)
import os
ROS_VERSION = int(os.environ['ROS_VERSION'])
if ROS_VERSION == 1:
import rospy
elif ROS_VERSION == 2:
import time
import threading
class CarlaWalkerAgent(CompatibleNode):
"""
walker agent
"""
# minimum distance to target waypoint before switching to next
MIN_DISTANCE = 0.5
def __init__(self):
"""
Constructor
"""
super(CarlaWalkerAgent, self).__init__('carla_walker_agent')
role_name = self.get_param("role_name", "ego_vehicle")
self._target_speed = self.get_param("target_speed", 2.0)
self._route_assigned = False
self._waypoints = []
self._current_pose = Pose()
self.on_shutdown(self._on_shutdown)
# wait for ros bridge to create relevant topics
try:
self.wait_for_one_message(
"/carla/{}/odometry".format(role_name), Odometry)
except ROSInterruptException as e:
if not ros_ok:
raise e
self._odometry_subscriber = self.create_subscriber(
Odometry, "/carla/{}/odometry".format(role_name), self.odometry_updated)
self.control_publisher = self.new_publisher(
CarlaWalkerControl, "/carla/{}/walker_control_cmd".format(role_name),
QoSProfile(depth=1, durability=False))
self._route_subscriber = self.create_subscriber(
Path, "/carla/{}/waypoints".format(role_name), self.path_updated)
self._target_speed_subscriber = self.create_subscriber(
Float64, "/carla/{}/target_speed".format(role_name), self.target_speed_updated)
def _on_shutdown(self):
"""
callback on shutdown
"""
self.loginfo("Shutting down, stopping walker...")
self.control_publisher.publish(CarlaWalkerControl()) # stop
def target_speed_updated(self, target_speed):
"""
callback on new target speed
"""
self.loginfo("New target speed received: {}".format(target_speed.data))
self._target_speed = target_speed.data
def path_updated(self, path):
"""
callback on new route
"""
self.loginfo("New plan with {} waypoints received. Assigning plan...".format(
len(path.poses)))
self.control_publisher.publish(CarlaWalkerControl()) # stop
self._waypoints = []
for elem in path.poses:
self._waypoints.append(elem.pose)
def odometry_updated(self, odo):
"""
callback on new odometry
"""
self._current_pose = odo.pose.pose
def run(self):
"""
Control loop
:return:
"""
loop_frequency = 20
if ROS_VERSION == 1:
r = rospy.Rate(loop_frequency)
self.loginfo("Starting run loop")
while ros_ok():
if self._waypoints:
control = CarlaWalkerControl()
direction = Vector3()
direction.x = self._waypoints[0].position.x - self._current_pose.position.x
direction.y = self._waypoints[0].position.y - self._current_pose.position.y
direction_norm = math.sqrt(direction.x**2 + direction.y**2)
if direction_norm > CarlaWalkerAgent.MIN_DISTANCE:
control.speed = self._target_speed
control.direction.x = direction.x / direction_norm
control.direction.y = direction.y / direction_norm
else:
self._waypoints = self._waypoints[1:]
if self._waypoints:
self.loginfo("next waypoint: {} {}".format(
self._waypoints[0].position.x, self._waypoints[0].position.y))
else:
self.loginfo("Route finished.")
self.control_publisher.publish(control)
try:
if ROS_VERSION == 1:
r.sleep()
elif ROS_VERSION == 2:
# TODO: use rclpy.Rate, not working yet
time.sleep(1 / loop_frequency)
except ROSInterruptException:
pass
def main(args=None):
"""
main function
:return:
"""
ros_init(args)
controller = CarlaWalkerAgent()
if ROS_VERSION == 2:
spin_thread = threading.Thread(target=controller.spin, daemon=True)
spin_thread.start()
try:
controller.run()
finally:
del controller
print("Done")
if __name__ == "__main__":
main()
| 5,098 | 1,520 |
class ExceedLimitException(Exception):pass
| 44 | 13 |
from typing import Any, Dict, List
def validate_config(config: Dict[str, Any]) -> List[str]:
errors: List[str] = []
assert_to_list(
errors,
"input" in config,
'[no_input] Could not find "input" section in config. "input" section must be present in order to run MESA or GYRE.',
)
assert_to_list(
errors,
"stages" in config,
'[no_stages] Could not find "stages" section in config. "stages" section must be present in order to run MESA or GYRE.',
)
assert_to_list(
errors,
nested_in(config, ["input", "mesa_configs"]),
'[no_mesa_configs] Could not find "mesa_configs" setting in "input" section in config. The "mesa_configs" setting must be present in order to run MESA.',
)
if should_run_gyre(config):
assert_to_list(
errors,
nested_in(config, ["stages", "gyre_params"]),
'[no_gyre_params] Could not find "gyre_params" setting in "stages" section of config. GYRE is set to run, but needs this setting to know what value combinations to try.',
)
else:
# Check for GYRE settings present when GYRE is not set to run
gyre_missing_msg = '[gyre_not_enabled] Found "{}" setting in "{}" section of config even though GYRE is not enabled. "gyre_config" in the "input" section must be specified in order to run GYRE.'
assert_to_list(
errors,
not nested_in(config, ["output", "gyre_oscillations_summary_file"]),
gyre_missing_msg.format("gyre_oscillations_summary_file", "output"),
)
assert_to_list(
errors,
not nested_in(config, ["settings", "gyre_location"]),
gyre_missing_msg.format("gyre_location", "settings"),
)
assert_to_list(
errors,
not nested_in(config, ["settings", "gyre_mp_threads"]),
gyre_missing_msg.format("gyre_mp_threads", "settings"),
)
assert_to_list(
errors,
not nested_in(config, ["stages", "gyre_params"]),
gyre_missing_msg.format("gyre_params", "stages"),
)
assert_to_list(
errors,
not nested_in(config, ["stages", "gyre_derived"]),
gyre_missing_msg.format("gyre_derived", "stages"),
)
return errors
def set_defaults(config: Dict[str, Any]) -> None:
### Output
if not nested_in(config, ["output", "output_dir"]):
nested_put(config, ["output", "output_dir"], "out")
if not nested_in(config, ["output", "mesa_profile_summary_file"]):
nested_put(
config,
["output", "mesa_profile_summary_file"],
"mesa_profile_attributes.csv",
)
### Settings
if not nested_in(config, ["settings", "mesa_star_location"]):
nested_put(config, ["settings", "mesa_star_location"], "star")
if not nested_in(config, ["settings", "gyre_location"]):
nested_put(config, ["settings", "gyre_location"], "$GYRE_DIR/bin/gyre")
if not nested_in(config, ["settings", "gyre_mp_threads"]) and nested_in(
config, ["settings", "mesa_mp_threads"]
):
nested_put(
config,
["settings", "gyre_mp_threads"],
config["settings"]["mesa_mp_threads"],
)
def assert_to_list(errors: List[str], condition: bool, message: str) -> None:
if not condition:
errors.append((message))
def should_run_gyre(config: Dict[str, Any]) -> bool:
return nested_in(config, ["input", "gyre_config"])
def nested_in(config: Dict[str, Any], nested_keys: List[str]) -> bool:
"""
Checks if the given nested keys are within the given dict. Returns false if
any of the intermediate keys or the final key are not nested in the dict.
>>> config = {}
>>> nested_in(config, ["settings", "gyre_mp_threads"])
False
>>> config = {"settings": {}}
>>> nested_in(config, ["settings", "gyre_mp_threads"])
False
>>> config = {"settings": {"gyre_mp_threads": 4}}
>>> nested_in(config, ["settings", "gyre_mp_threads"])
True
"""
for key in nested_keys:
if key in config:
config = config[key]
else:
return False
return True
def nested_put(config: Dict[str, Any], nested_keys: List[str], value: Any) -> None:
"""
Puts the given nested key value pair into the given dict. If any part of
the nested key structure does not yet exist, then it will be created in the
process.
>>> config = {}
>>> nested_put(config, ["key"], "value")
>>> config["key"]
'value'
>>> config = {}
>>> nested_put(config, ["settings", "gyre_mp_threads"], 2)
>>> config["settings"]["gyre_mp_threads"]
2
"""
if len(nested_keys) == 0:
raise Exception("Invalid number of nested keys.")
if len(nested_keys) == 1:
config[nested_keys[0]] = value
else:
next_key = nested_keys[0]
if next_key not in config:
config[next_key] = {}
nested_put(config[next_key], nested_keys[1:], value)
| 5,125 | 1,606 |
from . import adapters, utils
from .polytropon import VARIANT2CLASS, SkilledMixin | 81 | 31 |
import pytest
from wagtail.core.models import Page
from cdhweb.pages.context_processors import page_intro
from cdhweb.pages.models import LinkPage, PageIntro
@pytest.mark.django_db
def test_page_intro(rf):
root = Page.objects.get(title="Root")
link_page = LinkPage(title="Students", link_url="people/students")
root.add_child(instance=link_page)
intro = PageIntro.objects.create(
page=link_page, paragraph="<p>We have great students</p>"
)
# should find a page intro for students
assert page_intro(rf.get("/people/students/")) == {"page_intro": intro}
# but not not for staff
assert page_intro(rf.get("/people/staff/")) == {}
| 674 | 221 |
def tri_selection(tableau):
'''tri_selection (list(object) -> list(object)): trie un tableau'''
# Initialisation
'''taille (int) : taille du tableau'''
taille = len(tableau)
# Début du traitement
# Pour chaque élément tableau[i] du tableau
for i in range(taille):
# Pour chaque j allant de l'élément actuel tableau[i] jusqu'à la fin du
# tableau, on vérifie si c'est le plus petit
for j in range(i, taille):
if tableau[j] < tableau[i]:
tableau[i], tableau[j] = tableau[j], tableau[i]
return tableau
print(tri_selection([3,2,1,4,8,4,10,9,8,32,91])) | 641 | 227 |
from argparse import ArgumentParser
import torch
import torch.utils.data
import yaml
from torchvision import transforms
import src.config.config as config
from src.model.nn.dcnet import DCNet
from src.test.predictor import Predictor
from src.test.tester import Tester
from src.train.trainer import Trainer
def train(cfg):
# -------------------- data ------------------------
training_data_transform = transforms.Compose(
[
transforms.RandomCrop(cfg["train"]["crop_size"]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
validation_transformations = [transforms.ToTensor()]
if cfg["image"]["size_reduction"]:
validation_transformations.append(transforms.Resize(size=cfg["image"]["max_size"]))
validation_data_transform = transforms.Compose(validation_transformations)
# -------------------- model -----------------------
model = DCNet()
model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define
# ------------------ training setup -------------------------
criterion = torch.nn.MSELoss(reduction="mean")
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=cfg["train"]["learning_rate"],
alpha=cfg["train"]["alpha"],
momentum=cfg["train"]["momentum"],
)
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=cfg["train"]["scheduler_steps"], gamma=cfg["train"]["gamma"]
)
model.train()
trainer = Trainer(
model=model,
cuda=cfg["basic"]["cuda"],
criterion=criterion,
optimizer=optimizer,
lr_scheduler=exp_lr_scheduler,
train_crops=cfg["train"]["crops"],
crop_size=cfg["train"]["crop_size"],
epochs=cfg["train"]["epochs"],
training_dataset_path=cfg["train"]["data_path"],
validation_dataset_path=cfg["validate"]["data_path"],
train_transform=training_data_transform,
valid_transform=validation_data_transform,
batch_size=cfg["train"]["batch_size"],
t_low=cfg["env"]["transmission_map"]["low"],
t_high=cfg["env"]["transmission_map"]["high"],
atm_light=cfg["env"]["atm_light"],
t_map_random_sampler=cfg["env"]["transmission_map"]["random_sampler"],
uint8_transform=cfg["basic"]["uint8_transform"],
save_path=cfg["output"]["weight_dir"],
)
trainer.train()
def test(cfg):
# -------------------- data ------------------------
transformations = [transforms.ToTensor()]
if cfg["image"]["size_reduction"]:
transformations.append(transforms.Resize(size=cfg["image"]["max_size"]))
data_transform = transforms.Compose(transformations)
# -------------------- model -----------------------
model = DCNet()
model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define
# ------------------ test ---------------------------
criterion = torch.nn.MSELoss(reduction="mean")
model.eval()
tester = Tester(
model=model,
cuda=cfg["basic"]["cuda"],
criterion=criterion,
test_dataset_path=cfg["test"]["data_path"],
test_transform=data_transform,
t_low=cfg["env"]["transmission_map"]["low"],
t_high=cfg["env"]["transmission_map"]["high"],
atm_light=cfg["env"]["atm_light"],
random_sampler=cfg["env"]["transmission_map"]["random_sampler"],
uint8_transform=cfg["basic"]["uint8_transform"],
)
tester.test()
def predict(cfg):
# -------------------- data ------------------------
transformations = [transforms.ToTensor()]
if cfg["image"]["size_reduction"]:
transformations.append(transforms.Resize(size=cfg["image"]["max_size"]))
data_transform = transforms.Compose(transformations)
# -------------------- model -----------------------
model = DCNet()
model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define
# -------------------- prediction ------------------
model.eval()
predictor = Predictor(
model=model,
transform=data_transform,
dataset=cfg["predict"]["data_path"],
atm_light=cfg["env"]["atm_light"],
add_ext_haze=cfg["predict"]["add_ext_haze"],
t_low=cfg["env"]["transmission_map"]["low"],
t_high=cfg["env"]["transmission_map"]["high"],
t_map_random_sampler=cfg["env"]["transmission_map"]["random_sampler"],
uint8_transform=cfg["predict"]["uint8_transform"],
cuda=cfg["basic"]["cuda"],
prediction_dir=cfg["predict"]["save_dir"],
)
predictor.predict()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--train", action="store_true")
parser.add_argument("--test", action="store_true")
parser.add_argument("--predict", action="store_true")
args = parser.parse_args()
with open(config.path["CONFIG_PATH"], "r") as ymlfile:
cfg = yaml.load(ymlfile)
if args.train:
train(cfg)
if args.test:
test(cfg)
if args.predict:
predict(cfg)
| 5,297 | 1,732 |
"""CLI to run commands on MGS server."""
from sys import stderr
import click
from .utils import add_authorization
@click.group()
def run():
"""Run actions on the server."""
pass
@run.group()
def middleware():
"""Run middleware."""
pass
@middleware.command(name='group')
@add_authorization()
@click.option('-u/-n', '--uuid/--name', default=False)
@click.argument('group_name')
def group_middleware(uploader, uuid, group_name):
"""Run middleware for a group."""
if uuid:
group_uuid = group_name
else:
response = uploader.knex.get(f'/api/v1/sample_groups/getid/{group_name}')
group_uuid = response['data']['sample_group_uuid']
print(f'{group_name} :: {group_uuid}', file=stderr)
response = uploader.knex.post(f'/api/v1/sample_groups/{group_uuid}/middleware', {})
click.echo(response)
@middleware.command(name='samples')
@add_authorization()
@click.option('-u/-n', '--uuid/--name', default=False)
@click.argument('sample_names', nargs=-1)
def sample_middleware(uploader, uuid, sample_names):
"""Run middleware for a sample."""
for sample_name in sample_names:
if uuid:
sample_uuid = sample_name
else:
response = uploader.knex.get(f'/api/v1/samples/getid/{sample_name}')
sample_uuid = response['data']['sample_uuid']
print(f'{sample_name} :: {sample_uuid}', file=stderr)
try:
response = uploader.knex.post(f'/api/v1/samples/{sample_uuid}/middleware', {})
click.echo(response)
except Exception: # pylint: disable=broad-except
click.echo('Failed.')
| 1,645 | 544 |
from datetime import timedelta
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
| 165 | 56 |
import sys
sys.stdin = open("input.txt", "r")
sys.stdout = open("output.txt", "w")
n, m = map(int, input().split())
n_mas = [0 for i in range(n)]
for i in range(m):
for j in list(map(int, input().split())):
n_mas[j-1] += 1
print(*n_mas) | 248 | 103 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""An Ensemble Selection subpipeline for tabular datasets."""
import json
import os
from typing import List, Optional, Tuple
from absl import logging
from nitroml import subpipeline
from nitroml.automl.ensemble_selection.lib import ensemble_selection as es_lib
import tensorflow as tf
from tfx import types
from tfx.dsl.component.experimental.annotations import InputArtifact
from tfx.dsl.component.experimental.annotations import OutputArtifact
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.components.base import base_component
from tfx.types import standard_artifacts
from tfx.utils import path_utils
from google.protobuf import text_format
from nitroml.protos import problem_statement_pb2 as ps_pb2
class EnsembleSelection(subpipeline.Subpipeline):
"""An Ensemble Selection subpipeline for tabular datasets."""
def __init__(self,
problem_statement: ps_pb2.ProblemStatement,
examples: types.Channel,
models: List[types.Channel],
evaluation_split_name: str,
ensemble_size: int,
metric: Optional[tf.keras.metrics.Metric] = None,
goal: Optional[str] = None,
instance_name: Optional[str] = None):
"""Constructs an AutoTrainer subpipeline.
Args:
problem_statement: ProblemStatement proto identifying the task.
examples: A Channel of 'Example' artifact type produced from an upstream
The source of examples that are used in evaluation (required).
models: A List of Channels of 'standard_artifact.Model' type to use as
the library of base models in the ensemble selection algorithm.
evaluation_split_name: String name of the evaluation split in the
`examples` artifact to use for evaluation. For examples, 'eval'.
ensemble_size: Maximum number of models (with replacement) to select. This
is the number of rounds (iterations) for which the ensemble selection
algorithm will run. The number of models in the final ensemble will be
at most ensemble_size.
metric: Optional TF Keras Metric to optimize for during ensemble
selection. When `None`, the `problem_statement` is used to determine
the metric and goal.
goal: Optional string 'maximize' or 'minimize' depending on the goal of
the metric. When `None`, the `problem_statement` is used to determine
the metric and goal.
instance_name: Optional unique instance name. Necessary iff multiple
EnsembleSelection subpipelines are declared in the same pipeline.
Raises:
ValueError: When a required param is not supplied.
"""
if not metric and not goal:
metric, goal = self._determine_metric_and_goal(problem_statement)
input_models = {f'input_model{i}': model for i, model in enumerate(models)}
self._instance_name = instance_name
self._ensemble_selection = ensemble_selection(
problem_statement=text_format.MessageToString(
message=problem_statement, as_utf8=True),
examples=examples,
evaluation_split_name=evaluation_split_name,
ensemble_size=ensemble_size,
metric=json.dumps(tf.keras.metrics.serialize(metric)),
goal=goal,
instance_name=instance_name,
**input_models,
)
@property
def id(self) -> str:
"""Returns the AutoTrainer sub-pipeline's unique ID."""
autotrainer_instance_name = 'EnsembleSelection'
if self._instance_name:
autotrainer_instance_name = f'{autotrainer_instance_name}.{self._instance_name}'
return autotrainer_instance_name
@property
def components(self) -> List[base_component.BaseComponent]:
"""Returns the AutoTrainer sub-pipeline's constituent components."""
return [self._ensemble_selection]
@property
def outputs(self) -> subpipeline.SubpipelineOutputs:
"""Return the AutoTrainer sub-pipeline's outputs."""
return subpipeline.SubpipelineOutputs(
{'model': self._ensemble_selection.outputs.model})
def _determine_metric_and_goal(
self, problem_statement: ps_pb2.ProblemStatement
) -> Tuple[tf.keras.metrics.Metric, str]:
task_type = problem_statement.tasks[0].type
if task_type.HasField('multi_class_classification'):
return tf.keras.metrics.SparseCategoricalAccuracy(
name='accuracy'), 'maximize'
if task_type.HasField('binary_classification'):
return tf.keras.metrics.AUC(name='auc_roc', curve='ROC'), 'maximize'
if task_type.HasField('one_dimensional_regression'):
return tf.keras.metrics.MeanSquaredError(name='mse'), 'minimize'
raise ValueError('Invalid task type: {}'.format(task_type))
# pytype: disable=wrong-arg-types
@component
def ensemble_selection(
problem_statement: Parameter[str],
examples: InputArtifact[standard_artifacts.Examples],
evaluation_split_name: Parameter[str],
ensemble_size: Parameter[int],
metric: Parameter[str],
goal: Parameter[str],
model: OutputArtifact[standard_artifacts.Model],
input_model0: InputArtifact[standard_artifacts.Model] = None,
input_model1: InputArtifact[standard_artifacts.Model] = None,
input_model2: InputArtifact[standard_artifacts.Model] = None,
input_model3: InputArtifact[standard_artifacts.Model] = None,
input_model4: InputArtifact[standard_artifacts.Model] = None,
input_model5: InputArtifact[standard_artifacts.Model] = None,
input_model6: InputArtifact[standard_artifacts.Model] = None,
input_model7: InputArtifact[standard_artifacts.Model] = None,
input_model8: InputArtifact[standard_artifacts.Model] = None,
input_model9: InputArtifact[standard_artifacts.Model] = None,
) -> None: # pytype: disable=invalid-annotation,wrong-arg-types
"""Runs the SimpleML trainer as a separate component."""
problem_statement = text_format.Parse(problem_statement,
ps_pb2.ProblemStatement())
input_models = [
input_model0, input_model1, input_model2, input_model3, input_model4,
input_model5, input_model6, input_model7, input_model8, input_model9
]
saved_model_paths = {
str(i): path_utils.serving_model_path(model.uri)
for i, model in enumerate(input_models)
if model
}
logging.info('Saved model paths: %s', saved_model_paths)
label_key = _label_key(problem_statement)
es = es_lib.EnsembleSelection(
problem_statement=problem_statement,
saved_model_paths=saved_model_paths,
ensemble_size=ensemble_size,
metric=tf.keras.metrics.deserialize(json.loads(metric)),
goal=goal)
es.fit(*_data_from_examples(
examples_path=os.path.join(examples.uri, evaluation_split_name),
label_key=label_key))
logging.info('Selected ensemble weights: %s', es.weights)
es.save(
export_path=os.path.join(
path_utils.serving_model_dir(model.uri), 'export', 'serving'))
# pytype: enable=wrong-arg-types
def _data_from_examples(examples_path: str, label_key: str):
"""Returns a tuple of ndarrays of examples and label values."""
# Load all the examples.
filenames = tf.io.gfile.listdir(examples_path)
files = [
os.path.join(examples_path, filename) for filename in sorted(filenames)
]
dataset = tf.data.TFRecordDataset(files, compression_type='GZIP')
x, y = [], []
for serialized_example in dataset.take(10000).as_numpy_iterator():
x.append(serialized_example)
example = tf.train.Example()
example.ParseFromString(serialized_example)
y.append(_label_value(example, label_key))
return x, y
def _label_key(problem_statement: ps_pb2.ProblemStatement) -> str:
"""Returns the label key from the problem statement."""
task_type = problem_statement.tasks[0].type
if task_type.HasField('multi_class_classification'):
return task_type.multi_class_classification.label
if task_type.HasField('binary_classification'):
return task_type.binary_classification.label
if task_type.HasField('one_dimensional_regression'):
return task_type.one_dimensional_regression.label
raise ValueError('Invalid task type: {}'.format(task_type))
def _label_value(example: tf.train.Example, label_key: str):
feature = example.features.feature[label_key]
if feature.HasField('int64_list'):
return feature.int64_list.value
if feature.HasField('float_list'):
return feature.float_list.value
return feature.bytes_list.value
| 9,183 | 2,712 |
from .read_handle import ReadHandle
from .command_handle_base import CommandHandle
from .write_command_handle import WriteCommandHandle
from .delete_command_handle import DeleteCommandHandle
from .tcp_bulk_insert_handle import TcpBulkInsertHandle | 246 | 62 |
from django.shortcuts import render,redirect
from django.contrib.auth.models import Group
from .forms import AcademySignUpForm, AcademyProfile, ProfilePicture, PostAnAdForm, AboutAcademyForm
from django.contrib.auth.models import User
from django.views.generic import RedirectView
from .decorators import unauthenticated_user, allowed_users, admin_only
from django.contrib.auth.decorators import login_required
from .models import Academy, PostAnAd, Invitations
from tutors.models import PostAnAd as PostAnAd_tutor
from tutors.models import PostAnAd as PostAnAd_tutor
from tutors.models import Invitaions,WishList_tut
from django.contrib import messages
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError
from .utils import generate_token
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import threading
def academyRegister(request):
form = AcademySignUpForm()
if request.method == "POST":
form = AcademySignUpForm(request.POST)
if form.is_valid():
academy = form.save()
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
city = form.cleaned_data.get('city')
phone = form.cleaned_data.get("phone")
name = form.cleaned_data.get("name")
address = form.cleaned_data.get("address")
group = Group.objects.get(name="academy")
academy.groups.add(group)
Academy.objects.create(
academy=academy,
username= username,
name=name,
email = email,
city = city,
phone = phone,
address= address
)
academy.is_active = False
academy.save()
current_site = get_current_site(request)
template = render_to_string("academy/activate.html", {
"name": name,
"domain": current_site,
"uid": urlsafe_base64_encode(force_bytes(academy.pk)),
"token": generate_token.make_token(academy)
})
registerEmail = EmailMessage(
'Account Activation',
template,
settings.EMAIL_HOST_USER,
[email]
)
registerEmail.fail_silently = False
registerEmail.send()
return render(request,"students/activation_sent.html",{})
context = {
"form": form
}
return render(request, 'academy/academy_sign_up.html', context)
def activate_view(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
academy = User.objects.get(pk = uid)
except:
academy = None
if academy is not None and generate_token.check_token(academy, token):
academy.is_active = True
academy.save()
template = render_to_string("academy/registerEmail.html", {
"name": academy.name
})
registerEmail = EmailMessage(
'Registration Successful',
template,
settings.EMAIL_HOST_USER,
[academy.email]
)
registerEmail.fail_silently = False
registerEmail.send()
messages.success(request,'account was created for ' + academy.username)
return redirect("sign_in")
return render(request, 'students/activate_failed.html', status=401)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def academyDashboard(request):
academy = request.user.academy
form = AcademyProfile(instance=academy)
user = Academy.objects.get(username = request.user.username)
active_ads = PostAnAd.objects.filter(academyUser = request.user.academy).count()
p_form = ProfilePicture()
if request.method=="POST":
form = AcademyProfile(request.POST,request.FILES, instance=academy)
p_form = ProfilePicture(request.POST, request.FILES)
if p_form.is_valid():
image = p_form.cleaned_data["image"]
std_image = Academy.objects.get(username = request.user.username)
std_image.user_image = image
std_image.save()
return redirect("academy_dashboard")
else:
messages.warning(request, 'Supported File Extensions are .jpg And .png, Max Image Size Is 1MB')
return redirect("academy_dashboard")
if form.is_valid():
form.save()
context = {
"form": form,
"p_form": p_form,
"totalAds": user.total_ads,
"adsDel": user.ads_deleted,
"activeAds": active_ads, # needs to be updated
"invitations_sent": user.invitations_sent,
"invitations_sent_accepted": user.invitations_sent_accepted,
"invitations_sent_rejected": user.invitations_sent_rejected,
"invitations_recieved": user.invitations_recieved,
"invitations_recieved_accepted": user.invitations_recieved_accepted,
"invitations_recieved_rejected": user.invitations_recieved_rejected,
}
return render(request, 'academy/academy_dashboard.html', context)
def post_ad(subject,tuition_level,hours_per_day,days_per_week,estimated_fees,user,tutor_gender):
myad = PostAnAd(
academyUser = user,
subject = subject,
tuition_level = tuition_level, hours_per_day = hours_per_day,
days_per_week = days_per_week,
estimated_salary = estimated_fees,
tutor_gender = tutor_gender
)
myad.save()
user.total_ads += 1
user.ad_post_count += 1
user.save()
def email_send(user,my_ad,emails):
if emails:
template = render_to_string("home/stdAD.html", {
"firstname": user.first_name,
"lastname": user.last_name,
"ad":my_ad
})
ADEmail = EmailMessage(
subject = f'{user.first_name} {user.last_name} posted an AD',
body = template,
from_email = settings.EMAIL_HOST_USER,
bcc = emails
)
ADEmail.fail_silently = False
ADEmail.send()
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def postAd(request, pk):
postform = PostAnAdForm()
user = Academy.objects.get(username = request.user.username)
academyAds = PostAnAd.objects.filter(academyUser__username = request.user.username)
# wishlist,created = WishList_tut.objects.get_or_create(student=request.user.student)
emails = []
# tutors = wishlist.tutors.all()
# for t in tutors:
# emails.append(t.email)
if request.method == "POST":
postform = PostAnAdForm(request.POST)
if postform.is_valid():
subject = postform.cleaned_data["subject"]
tuition_level = postform.cleaned_data["tuition_level"]
tutor_gender = postform.cleaned_data["tutor_gender"]
hours_per_day = postform.cleaned_data["hours_per_day"]
days_per_week = postform.cleaned_data["days_per_week"]
estimated_salary = postform.cleaned_data["estimated_salary"]
adAvailabel = False
for ad in academyAds:
if ad.subject == subject and ad.tuition_level == tuition_level:
adAvailabel = True
if adAvailabel == False:
currentad = {
"subject" : subject,
"tuition_level" : tuition_level,
"hours_per_day" : hours_per_day,
"days_per_week" : days_per_week,
"estimated_salary" : estimated_salary,
"tutor_gender":tutor_gender
}
my_ad = threading.Thread(target=post_ad, args=[subject,tuition_level,hours_per_day,days_per_week,estimated_salary,user,tutor_gender])
# t2 = threading.Thread(target=email_send, args=[user,currentad,emails])
my_ad.start()
# t2.start()
messages.info(request, "Your post is Successfully Created")
return redirect("academy_dashboard")
else:
messages.info(request, "This AD Already Exists")
return redirect("academy_dashboard")
context = {
"form": postform
}
return render(request, 'academy/post_ad.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def Ads(request):
try:
studentAbout = AboutStudent.objects.get(student__username = request.user.username).order_by("-id")
except:
studentAbout = None
ads = PostAnAd.objects.filter(academyUser=request.user.academy).order_by("-id")
context = {
"ads":ads,
"about": studentAbout
}
return render(request, 'academy/ads.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def AdsDelete(request, pk):
ad = PostAnAd.objects.get(id=pk)
user = Academy.objects.get(username=request.user.username)
if request.method == "POST":
ad.delete()
user.ads_deleted += 1
user.ad_post_count -= 1
user.save()
return redirect("ads_academy")
context = {
'ad':ad
}
return render(request, 'academy/delete_ad.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def allTutors(request):
tutors = PostAnAd_tutor.objects.all().order_by("-id")
tuition_level_contains_query = request.GET.get('TuitionLevel')
subject_contains_query = request.GET.get('Subject')
city_contains_query = request.GET.get('City')
tuition_gender_query = request.GET.get('tuition_gender')
number = tutors.count()
if tutors:
if tuition_level_contains_query != "" and tuition_level_contains_query is not None and tuition_level_contains_query != "All":
tutors = tutors.filter(tuition_level = tuition_level_contains_query).order_by("-id")
number = tutors.count()
if subject_contains_query != "" and subject_contains_query is not None:
tutors = tutors.filter(subject__icontains = subject_contains_query).order_by("-id")
number = tutors.count()
if city_contains_query != "" and city_contains_query is not None:
tutors = tutors.filter(tutorUser__city__icontains = city_contains_query).order_by("-id")
number = tutors.count()
if tuition_gender_query != "" and tuition_gender_query is not None and tuition_gender_query != "Both":
tutors = tutors.filter(tutorUser__gender__startswith = tuition_gender_query.lower())
number = tutors.count()
tuts = []
if tutors:
for t in tutors:
tuts.append(t)
paginator = Paginator(tuts,8)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
index = items.number - 1
max_index = len(paginator.page_range)
start_index = index - 5 if index >= 5 else 0
end_index = index + 5 if index <= max_index - 5 else max_index
page_range = paginator.page_range[start_index:end_index]
context = {
# "tutors":items,
"items":items,
"number": number,
"academy": request.user.academy,
"page_range": page_range,
}
return render(request, 'academy/all_tutors.html', context)
from tutors.models import AboutAndQualifications
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def SpecificTutor(request, id):
tutor = PostAnAd_tutor.objects.get(id = id)
qual = AboutAndQualifications.objects.get(tutor__username = tutor.tutorUser.username)
tutor.views += 1
tutor.save()
tutors = PostAnAd_tutor.objects.filter(tutorUser__username = tutor.tutorUser.username).order_by("-id")
# try:
# wishList = WishList.objects.get(student = request.user.student)
# except:
# wishList = None
# added = False
# if wishList is not None:
# if tutor.tutorUser in wishList.tutors.all():
# added = True
context = {
"tutor_id": tutor.tutorUser,
"tutor": tutor,
"qual": qual,
"tutors": tutors.exclude(id = id),
"student": request.user.academy,
"added":False, # needs to be updated
}
return render (request, "academy/specific_tutor.html", context)
from tutors.models import Tutor, Invitaions_by_academy
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def inviteFordemo(request, id):
ad = PostAnAd_tutor.objects.get(id = id)
tutor = Tutor.objects.get ( username = ad.tutorUser.username)
user = Academy.objects.get(username = request.user.username)
std = Academy.objects.get(username = request.user.username)
try:
invites_sent_by_std = Invitaions_by_academy.objects.get(tutor_ad = ad)
except:
invites_sent_by_std = None
if request.method == "POST":
if invites_sent_by_std != None:
if invites_sent_by_std.invitation_sent == True and invites_sent_by_std.inivitaion_by_academy.username == request.user.username:
messages.info(request, f'Invitation request already sent to {ad.tutorUser.first_name} {ad.tutorUser.last_name}')
return redirect("all_tutors_academy")
else:
Invitaions_by_academy.objects.create(
inivitaion_by_academy = std,
tutor_ad = ad,
invitation_sent = True,
accepted = False,
rejected = False
)
user.invitations_sent += 1
user.save()
tutor.invitations_recieved += 1
tutor.save()
template = render_to_string("home/inviteEmail.html", {
"firstname": ad.tutorUser.first_name,
"lastname": ad.tutorUser.last_name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
registerEmail = EmailMessage(
'Invite For Demo',
template,
settings.EMAIL_HOST_USER,
[request.user.email]
)
registerEmail.fail_silently = False
registerEmail.send()
intemplate = render_to_string("academy/inviteEmail.html", {
"firstname": request.user.academy.name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
email = EmailMessage(
'Invitation',
intemplate,
settings.EMAIL_HOST_USER,
[ad.tutorUser.email]
)
email.fail_silently = False
email.send()
messages.info(request, f'Invited {tutor.first_name} {tutor.last_name} For A Demo')
return redirect("academy_dashboard") # needs to be changes to invited page.
else:
Invitaions_by_academy.objects.create(
inivitaion_by_academy = std,
tutor_ad = ad,
invitation_sent = True,
accepted = False,
rejected = False
)
user.invitations_sent += 1
user.save()
tutor.invitations_recieved += 1
tutor.save()
template = render_to_string("home/inviteEmail.html", {
"firstname": ad.tutorUser.first_name,
"lastname": ad.tutorUser.last_name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
registerEmail = EmailMessage(
'Invite For Demo',
template,
settings.EMAIL_HOST_USER,
[request.user.email]
)
registerEmail.fail_silently = False
registerEmail.send()
intemplate = render_to_string("academy/inviteEmail.html", {
"firstname": request.user.academy.name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
email = EmailMessage(
'Invitation',
intemplate,
settings.EMAIL_HOST_USER,
[ad.tutorUser.email]
)
email.fail_silently = False
email.send()
messages.info(request, f'Invited {tutor.first_name} {tutor.last_name} For A Demo')
return redirect("academy_dashboard") # needs to be changed to invited page
context = {
"ad":ad
}
return render(request, 'academy/invite_for_demo.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def invited(request):
student = Academy.objects.get(username = request.user.username)
invited = Invitaions_by_academy.objects.filter(inivitaion_by_academy = student).order_by("-id")
context={
"invited": invited,
}
return render(request, "academy/invited.html", context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def invitationsAcademy(request):
invites = Invitations.objects.filter(academy_ad__academyUser = request.user.academy).order_by("-id")
context = {
"invites":invites
}
return render(request, 'academy/invitations.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def view_your_ad_acad(request, id):
student_ad = Invitations.objects.get(id = id)
try:
tutors = PostAnAd_tutor.objects.filter(subject = student_ad.academy_ad.subject)[4]
except:
tutors = PostAnAd_tutor.objects.filter(subject = student_ad.academy_ad.subject)
context = {
"invite":student_ad,
"tutors": tutors.exclude(tutorUser__username = student_ad.inivitaion_by_tutor.username)
}
return render(request,'academy/view_your_ad.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def acceptInvitationAcademy(request, id):
invite = Invitations.objects.get(id = id)
student = Academy.objects.get(username = request.user.username)
tutor = Tutor.objects.get(username = invite.inivitaion_by_tutor.username)
if request.method == "POST":
invite.accepted = True
invite.rejected = False
invite.save()
student.invitations_recieved_accepted += 1
student.save()
tutor.invitations_sent_accepted += 1
tutor.save()
template = render_to_string("academy/acceptEmail.html", {
"name": request.user.academy.name,
"email": request.user.email,
"register_as": "Academy",
"phone": request.user.academy.phone
})
registerEmail = EmailMessage(
'Invitation Accepted',
template,
settings.EMAIL_HOST_USER,
[invite.inivitaion_by_tutor.email]
)
registerEmail.fail_silently = False
registerEmail.send()
recieve_temp = render_to_string("academy/accept_recieve_Email.html", {
"request_from" :tutor,
"request": "Tutor"
})
Email = EmailMessage(
'Invitation Accepted',
recieve_temp,
settings.EMAIL_HOST_USER,
[request.user.email]
)
Email.fail_silently = False
Email.send()
messages.info(request, f'Accepted Invitation Request from {tutor.first_name} {tutor.last_name}')
return redirect("invitations_academy")
context = {
"invite":invite
}
return render(request, "academy/accept_invitation.html", context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def rejectInviteAcademy(request, id):
invite = Invitations.objects.get(id = id)
student = Academy.objects.get(username = request.user.username)
tutor = Tutor.objects.get(username = invite.inivitaion_by_tutor.username)
if request.method == "POST":
invite.delete()
student.invitations_recieved_rejected += 1
student.save()
tutor.invitations_sent_rejected += 1
tutor.save()
template = render_to_string("home/rejectEmail.html", {
"firstname": request.user.academy.name,
"student_email": request.user.email
})
registerEmail = EmailMessage(
'Invitation Rejected',
template,
settings.EMAIL_HOST_USER,
[invite.inivitaion_by_tutor.email]
)
registerEmail.fail_silently = False
registerEmail.send()
messages.warning(request, f'Rejected Invite From {tutor.first_name} {tutor.last_name}')
return redirect("invitations_academy")
context = {
"invite": invite
}
return render(request,'academy/reject_invitation.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def del_account_acad(request):
student = User.objects.get(username = request.user.username)
# print(request.user.student.first_name)
if request.method == "POST":
student.is_active = False
student.save()
template = render_to_string("home/delEmail.html", {
"register_as": "Academy",
"email": request.user.email,
})
registerEmail = EmailMessage(
'Account Deletion',
template,
settings.EMAIL_HOST_USER,
[request.user.email]
)
registerEmail.fail_silently = False
registerEmail.send()
return redirect("academy_dashboard")
context = {}
return render(request, "academy/del_account.html", context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def aboutAcademy(request):
aboutForm = AboutAcademyForm()
if request.method == "POST":
aboutForm = AboutAcademyForm(request.POST)
if aboutForm.is_valid():
# try:
# AboutStudent.objects.get(student__username = request.user.username).delete()
# except:
# pass
about = aboutForm.cleaned_data["textArea"]
std = Academy.objects.get(username=request.user.username)
std.profile_complete = True
std.textArea = about
std.save()
return redirect("academy_dashboard")
context = {
"form": aboutForm
}
return render(request, "academy/student_about.html", context)
| 23,545 | 7,114 |
cores = {'limpar':'\033[m', 'vermelho':'\033[1;31m','ciano':'\033[1;36m','azul':'\033[1;34m','amarelo':'\033[1;33m','verde':'\033[1;32m'}
print('===== Tipo de Triângulo -> Aprimoramento do Desafio 35 =====')
print()
s1 = float(input('Primeiro segmento: '))
s2 = float(input('Segundo segmento: '))
s3 = float(input('Terceiro segmento: '))
if s1 < s2+s3 and s2 < s1+s3 and s3 < s1+s2:
print(f'{(cores["verde"])}PODE {(cores["limpar"])}se formar um triângulo! Do tipo:')
if s1 == s2 == s3: #Se forem os 3 segmentos iguais
print(f'{(cores["amarelo"])}EQUILÁTERO! {(cores["limpar"])}(Todos os lados são iguais).')
elif s1 != s2 != s3 != s1: #Se forem 3 segmentos diferentes
print(f'{(cores["azul"])}ESCALENO! {(cores["limpar"])}(Todos os lados diferentes).')
else: #Se forem 2 segmentos iguais e 1 diferente
print(f'{(cores["ciano"])}ISÓSCELES! {(cores["limpar"])}(Dois lados iguais).')
else:
print(f'{(cores["vermelho"])}NÃO PODE {(cores["limpar"])}se formar um triângulo!') | 1,019 | 463 |
import sys
import tkinter
import unittest
from test.support import requires
def get_tk_root():
requires('gui') # raise exception if tk unavailable
try:
root = tkinter._default_root
except AttributeError:
# it is possible to disable default root in Tkinter, although
# I haven't seen people doing it (but apparently someone did it
# here).
root = None
if root is None:
# create a new master only if there isn't one already
root = tkinter.Tk()
return root
def root_deiconify():
root = get_tk_root()
root.deiconify()
def root_withdraw():
root = get_tk_root()
root.withdraw()
def simulate_mouse_click(widget, x, y):
"""Generate proper events to click at the x, y position (tries to act
like an X server)."""
widget.event_generate('<Enter>', x=0, y=0)
widget.event_generate('<Motion>', x=x, y=y)
widget.event_generate('<ButtonPress-1>', x=x, y=y)
widget.event_generate('<ButtonRelease-1>', x=x, y=y)
import _tkinter
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
def requires_tcl(*version):
return unittest.skipUnless(tcl_version >= version,
'requires Tcl version >= ' + '.'.join(map(str, version)))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = tkinter.Tcl()
patchlevel = []
for x in tcl.call('info', 'patchlevel').split('.'):
try:
x = int(x, 10)
except ValueError:
x = -1
patchlevel.append(x)
_tk_patchlevel = tuple(patchlevel)
return _tk_patchlevel
units = {
'c': 72 / 2.54, # centimeters
'i': 72, # inches
'm': 72 / 25.4, # millimeters
'p': 1, # points
}
def pixels_conv(value):
return float(value[:-1]) * units[value[-1:]]
def tcl_obj_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, _tkinter.Tcl_Obj):
if isinstance(expected, str):
return str(actual) == expected
if isinstance(actual, tuple):
if isinstance(expected, tuple):
return (len(actual) == len(expected) and
all(tcl_obj_eq(act, exp)
for act, exp in zip(actual, expected)))
return False
def widget_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, (str, tkinter.Widget)):
if isinstance(expected, (str, tkinter.Widget)):
return str(actual) == str(expected)
return False
| 2,628 | 837 |
class Employee():
def __init__(self,identity, fullname, salary, email, birth_year,iban):
self.identity = identity
self.fullname = fullname
self.salary = salary
self.email = email
self.birth_year = birth_year
self.iban = iban
self.version = 1 | 301 | 90 |
from io import open
from setuptools import setup, find_packages
setup(
name='pyseus',
version='0.1',
description='PySeus is a minimal viewer for medical imaging data.',
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
url='http://github.com/calmer/pyseus',
author='Christoph Almer',
author_email='christoph.almer@gmail.com',
license='GNU',
packages=find_packages(),
package_data={'pyseus': [
'settings.ini',
'ui/style_dark.qss',
'ui/icon.png',
'settings.ini'
]},
include_package_data=True,
install_requires=[
'pyside2==5.13',
'numpy',
'opencv-python',
'h5py',
'pydicom',
'nibabel',
'natsort'
],
entry_points={
'console_scripts': [
'pyseus=pyseus:load',
],
},
zip_safe=False
)
| 924 | 317 |
from pyspark.ml.pipeline import Transformer
import pyspark.sql.functions as f
import pyspark.sql.types as t
from pyspark.sql import DataFrame
class PathExtractor(Transformer):
# Day extractor inherit of property of Transformer
def __init__(self, inputCol='path', tagsCol = "tags", basePath='dbfs:/'):
self.inputCol = inputCol #the name of your columns
self.basePath = basePath
def this():
#define an unique ID
this(Identifiable.randomUID("PathExtractor"))
def copy(extra):
defaultCopy(extra)
def check_input_type(self, schema):
field = schema[self.inputCol]
#assert that field is a datetype
if (field.dataType != t.StringType()):
raise Exception('PathExtractor input type %s did not match input type StringType' % field.dataType)
def _transform(self, df):
self.check_input_type(df.schema)
return self._transform_impl(df, self.basePath, self.inputCol)
@staticmethod
def _transform_impl(df:DataFrame, basePath:str, inputCol:str):
""" User overridable """
return (df
.withColumn("relative_path", f.regexp_replace(inputCol, basePath+"(.*)$",r"$1"))
.withColumn("local_path", f.regexp_replace(inputCol,"^dbfs:(.*$)",r"/dbfs$1"))
.withColumn("extension",f.regexp_replace("relative_path", ".*\.(\w+)$", r"$1"))
.withColumn("path_tags",
f.split(
f.regexp_replace(
"relative_path",
"([0-9a-zA-Z]+)([\_\.\/\:])",
r"$1,"),
",")
)
) | 1,753 | 501 |
def f(l):
if l[0] > l[1]: l[0], l[1] = l[1], l[0]
if l[0] > l[2]: l[0], l[2] = l[2], l[0]
if l[0] > l[3]: l[0], l[3] = l[3], l[0]
if l[0] > l[4]: l[0], l[4] = l[4], l[0]
if l[0] > l[5]: l[0], l[5] = l[5], l[0]
if l[0] > l[6]: l[0], l[6] = l[6], l[0]
if l[0] > l[7]: l[0], l[7] = l[7], l[0]
if l[0] > l[8]: l[0], l[8] = l[8], l[0]
if l[0] > l[9]: l[0], l[9] = l[9], l[0]
if l[0] > l[10]: l[0], l[10] = l[10], l[0]
if l[0] > l[11]: l[0], l[11] = l[11], l[0]
if l[0] > l[12]: l[0], l[12] = l[12], l[0]
if l[0] > l[13]: l[0], l[13] = l[13], l[0]
if l[0] > l[14]: l[0], l[14] = l[14], l[0]
if l[0] > l[15]: l[0], l[15] = l[15], l[0]
if l[0] > l[16]: l[0], l[16] = l[16], l[0]
if l[0] > l[17]: l[0], l[17] = l[17], l[0]
if l[0] > l[18]: l[0], l[18] = l[18], l[0]
if l[0] > l[19]: l[0], l[19] = l[19], l[0]
if l[1] > l[2]: l[1], l[2] = l[2], l[1]
if l[1] > l[3]: l[1], l[3] = l[3], l[1]
if l[1] > l[4]: l[1], l[4] = l[4], l[1]
if l[1] > l[5]: l[1], l[5] = l[5], l[1]
if l[1] > l[6]: l[1], l[6] = l[6], l[1]
if l[1] > l[7]: l[1], l[7] = l[7], l[1]
if l[1] > l[8]: l[1], l[8] = l[8], l[1]
if l[1] > l[9]: l[1], l[9] = l[9], l[1]
if l[1] > l[10]: l[1], l[10] = l[10], l[1]
if l[1] > l[11]: l[1], l[11] = l[11], l[1]
if l[1] > l[12]: l[1], l[12] = l[12], l[1]
if l[1] > l[13]: l[1], l[13] = l[13], l[1]
if l[1] > l[14]: l[1], l[14] = l[14], l[1]
if l[1] > l[15]: l[1], l[15] = l[15], l[1]
if l[1] > l[16]: l[1], l[16] = l[16], l[1]
if l[1] > l[17]: l[1], l[17] = l[17], l[1]
if l[1] > l[18]: l[1], l[18] = l[18], l[1]
if l[1] > l[19]: l[1], l[19] = l[19], l[1]
if l[2] > l[3]: l[2], l[3] = l[3], l[2]
if l[2] > l[4]: l[2], l[4] = l[4], l[2]
if l[2] > l[5]: l[2], l[5] = l[5], l[2]
if l[2] > l[6]: l[2], l[6] = l[6], l[2]
if l[2] > l[7]: l[2], l[7] = l[7], l[2]
if l[2] > l[8]: l[2], l[8] = l[8], l[2]
if l[2] > l[9]: l[2], l[9] = l[9], l[2]
if l[2] > l[10]: l[2], l[10] = l[10], l[2]
if l[2] > l[11]: l[2], l[11] = l[11], l[2]
if l[2] > l[12]: l[2], l[12] = l[12], l[2]
if l[2] > l[13]: l[2], l[13] = l[13], l[2]
if l[2] > l[14]: l[2], l[14] = l[14], l[2]
if l[2] > l[15]: l[2], l[15] = l[15], l[2]
if l[2] > l[16]: l[2], l[16] = l[16], l[2]
if l[2] > l[17]: l[2], l[17] = l[17], l[2]
if l[2] > l[18]: l[2], l[18] = l[18], l[2]
if l[2] > l[19]: l[2], l[19] = l[19], l[2]
if l[3] > l[4]: l[3], l[4] = l[4], l[3]
if l[3] > l[5]: l[3], l[5] = l[5], l[3]
if l[3] > l[6]: l[3], l[6] = l[6], l[3]
if l[3] > l[7]: l[3], l[7] = l[7], l[3]
if l[3] > l[8]: l[3], l[8] = l[8], l[3]
if l[3] > l[9]: l[3], l[9] = l[9], l[3]
if l[3] > l[10]: l[3], l[10] = l[10], l[3]
if l[3] > l[11]: l[3], l[11] = l[11], l[3]
if l[3] > l[12]: l[3], l[12] = l[12], l[3]
if l[3] > l[13]: l[3], l[13] = l[13], l[3]
if l[3] > l[14]: l[3], l[14] = l[14], l[3]
if l[3] > l[15]: l[3], l[15] = l[15], l[3]
if l[3] > l[16]: l[3], l[16] = l[16], l[3]
if l[3] > l[17]: l[3], l[17] = l[17], l[3]
if l[3] > l[18]: l[3], l[18] = l[18], l[3]
if l[3] > l[19]: l[3], l[19] = l[19], l[3]
if l[4] > l[5]: l[4], l[5] = l[5], l[4]
if l[4] > l[6]: l[4], l[6] = l[6], l[4]
if l[4] > l[7]: l[4], l[7] = l[7], l[4]
if l[4] > l[8]: l[4], l[8] = l[8], l[4]
if l[4] > l[9]: l[4], l[9] = l[9], l[4]
if l[4] > l[10]: l[4], l[10] = l[10], l[4]
if l[4] > l[11]: l[4], l[11] = l[11], l[4]
if l[4] > l[12]: l[4], l[12] = l[12], l[4]
if l[4] > l[13]: l[4], l[13] = l[13], l[4]
if l[4] > l[14]: l[4], l[14] = l[14], l[4]
if l[4] > l[15]: l[4], l[15] = l[15], l[4]
if l[4] > l[16]: l[4], l[16] = l[16], l[4]
if l[4] > l[17]: l[4], l[17] = l[17], l[4]
if l[4] > l[18]: l[4], l[18] = l[18], l[4]
if l[4] > l[19]: l[4], l[19] = l[19], l[4]
if l[5] > l[6]: l[5], l[6] = l[6], l[5]
if l[5] > l[7]: l[5], l[7] = l[7], l[5]
if l[5] > l[8]: l[5], l[8] = l[8], l[5]
if l[5] > l[9]: l[5], l[9] = l[9], l[5]
if l[5] > l[10]: l[5], l[10] = l[10], l[5]
if l[5] > l[11]: l[5], l[11] = l[11], l[5]
if l[5] > l[12]: l[5], l[12] = l[12], l[5]
if l[5] > l[13]: l[5], l[13] = l[13], l[5]
if l[5] > l[14]: l[5], l[14] = l[14], l[5]
if l[5] > l[15]: l[5], l[15] = l[15], l[5]
if l[5] > l[16]: l[5], l[16] = l[16], l[5]
if l[5] > l[17]: l[5], l[17] = l[17], l[5]
if l[5] > l[18]: l[5], l[18] = l[18], l[5]
if l[5] > l[19]: l[5], l[19] = l[19], l[5]
if l[6] > l[7]: l[6], l[7] = l[7], l[6]
if l[6] > l[8]: l[6], l[8] = l[8], l[6]
if l[6] > l[9]: l[6], l[9] = l[9], l[6]
if l[6] > l[10]: l[6], l[10] = l[10], l[6]
if l[6] > l[11]: l[6], l[11] = l[11], l[6]
if l[6] > l[12]: l[6], l[12] = l[12], l[6]
if l[6] > l[13]: l[6], l[13] = l[13], l[6]
if l[6] > l[14]: l[6], l[14] = l[14], l[6]
if l[6] > l[15]: l[6], l[15] = l[15], l[6]
if l[6] > l[16]: l[6], l[16] = l[16], l[6]
if l[6] > l[17]: l[6], l[17] = l[17], l[6]
if l[6] > l[18]: l[6], l[18] = l[18], l[6]
if l[6] > l[19]: l[6], l[19] = l[19], l[6]
if l[7] > l[8]: l[7], l[8] = l[8], l[7]
if l[7] > l[9]: l[7], l[9] = l[9], l[7]
if l[7] > l[10]: l[7], l[10] = l[10], l[7]
if l[7] > l[11]: l[7], l[11] = l[11], l[7]
if l[7] > l[12]: l[7], l[12] = l[12], l[7]
if l[7] > l[13]: l[7], l[13] = l[13], l[7]
if l[7] > l[14]: l[7], l[14] = l[14], l[7]
if l[7] > l[15]: l[7], l[15] = l[15], l[7]
if l[7] > l[16]: l[7], l[16] = l[16], l[7]
if l[7] > l[17]: l[7], l[17] = l[17], l[7]
if l[7] > l[18]: l[7], l[18] = l[18], l[7]
if l[7] > l[19]: l[7], l[19] = l[19], l[7]
if l[8] > l[9]: l[8], l[9] = l[9], l[8]
if l[8] > l[10]: l[8], l[10] = l[10], l[8]
if l[8] > l[11]: l[8], l[11] = l[11], l[8]
if l[8] > l[12]: l[8], l[12] = l[12], l[8]
if l[8] > l[13]: l[8], l[13] = l[13], l[8]
if l[8] > l[14]: l[8], l[14] = l[14], l[8]
if l[8] > l[15]: l[8], l[15] = l[15], l[8]
if l[8] > l[16]: l[8], l[16] = l[16], l[8]
if l[8] > l[17]: l[8], l[17] = l[17], l[8]
if l[8] > l[18]: l[8], l[18] = l[18], l[8]
if l[8] > l[19]: l[8], l[19] = l[19], l[8]
if l[9] > l[10]: l[9], l[10] = l[10], l[9]
if l[9] > l[11]: l[9], l[11] = l[11], l[9]
if l[9] > l[12]: l[9], l[12] = l[12], l[9]
if l[9] > l[13]: l[9], l[13] = l[13], l[9]
if l[9] > l[14]: l[9], l[14] = l[14], l[9]
if l[9] > l[15]: l[9], l[15] = l[15], l[9]
if l[9] > l[16]: l[9], l[16] = l[16], l[9]
if l[9] > l[17]: l[9], l[17] = l[17], l[9]
if l[9] > l[18]: l[9], l[18] = l[18], l[9]
if l[9] > l[19]: l[9], l[19] = l[19], l[9]
if l[10] > l[11]: l[10], l[11] = l[11], l[10]
if l[10] > l[12]: l[10], l[12] = l[12], l[10]
if l[10] > l[13]: l[10], l[13] = l[13], l[10]
if l[10] > l[14]: l[10], l[14] = l[14], l[10]
if l[10] > l[15]: l[10], l[15] = l[15], l[10]
if l[10] > l[16]: l[10], l[16] = l[16], l[10]
if l[10] > l[17]: l[10], l[17] = l[17], l[10]
if l[10] > l[18]: l[10], l[18] = l[18], l[10]
if l[10] > l[19]: l[10], l[19] = l[19], l[10]
if l[11] > l[12]: l[11], l[12] = l[12], l[11]
if l[11] > l[13]: l[11], l[13] = l[13], l[11]
if l[11] > l[14]: l[11], l[14] = l[14], l[11]
if l[11] > l[15]: l[11], l[15] = l[15], l[11]
if l[11] > l[16]: l[11], l[16] = l[16], l[11]
if l[11] > l[17]: l[11], l[17] = l[17], l[11]
if l[11] > l[18]: l[11], l[18] = l[18], l[11]
if l[11] > l[19]: l[11], l[19] = l[19], l[11]
if l[12] > l[13]: l[12], l[13] = l[13], l[12]
if l[12] > l[14]: l[12], l[14] = l[14], l[12]
if l[12] > l[15]: l[12], l[15] = l[15], l[12]
if l[12] > l[16]: l[12], l[16] = l[16], l[12]
if l[12] > l[17]: l[12], l[17] = l[17], l[12]
if l[12] > l[18]: l[12], l[18] = l[18], l[12]
if l[12] > l[19]: l[12], l[19] = l[19], l[12]
if l[13] > l[14]: l[13], l[14] = l[14], l[13]
if l[13] > l[15]: l[13], l[15] = l[15], l[13]
if l[13] > l[16]: l[13], l[16] = l[16], l[13]
if l[13] > l[17]: l[13], l[17] = l[17], l[13]
if l[13] > l[18]: l[13], l[18] = l[18], l[13]
if l[13] > l[19]: l[13], l[19] = l[19], l[13]
if l[14] > l[15]: l[14], l[15] = l[15], l[14]
if l[14] > l[16]: l[14], l[16] = l[16], l[14]
if l[14] > l[17]: l[14], l[17] = l[17], l[14]
if l[14] > l[18]: l[14], l[18] = l[18], l[14]
if l[14] > l[19]: l[14], l[19] = l[19], l[14]
if l[15] > l[16]: l[15], l[16] = l[16], l[15]
if l[15] > l[17]: l[15], l[17] = l[17], l[15]
if l[15] > l[18]: l[15], l[18] = l[18], l[15]
if l[15] > l[19]: l[15], l[19] = l[19], l[15]
if l[16] > l[17]: l[16], l[17] = l[17], l[16]
if l[16] > l[18]: l[16], l[18] = l[18], l[16]
if l[16] > l[19]: l[16], l[19] = l[19], l[16]
if l[17] > l[18]: l[17], l[18] = l[18], l[17]
if l[17] > l[19]: l[17], l[19] = l[19], l[17]
if l[18] > l[19]: l[18], l[19] = l[19], l[18]
return l
print f([3, 12, 16, 8, 17, 6, 13, 0, 4, 15, 1, 14, 11, 18, 10, 5, 9, 7, 2, 19])
print f([2, 6, 11, 4, 7, 18, 19, 10, 15, 13, 3, 0, 17, 5, 8, 1, 14, 9, 16, 12])
print f([6, 12, 10, 7, 19, 15, 14, 5, 16, 1, 4, 11, 13, 2, 18, 9, 0, 3, 17, 8])
print f([1, 17, 13, 8, 9, 19, 18, 6, 5, 10, 12, 14, 2, 15, 0, 4, 11, 16, 7, 3])
print f([4, 7, 8, 6, 16, 10, 0, 5, 1, 3, 19, 2, 15, 12, 17, 11, 13, 18, 14, 9])
print f([14, 16, 11, 12, 5, 0, 10, 3, 1, 8, 17, 13, 4, 19, 9, 15, 6, 2, 7, 18])
print f([0, 3, 14, 9, 19, 13, 1, 7, 4, 17, 8, 16, 10, 5, 12, 6, 15, 11, 2, 18])
print f([17, 19, 3, 13, 15, 6, 16, 4, 0, 18, 8, 1, 9, 11, 2, 12, 7, 10, 5, 14])
print f([19, 5, 15, 1, 8, 2, 3, 12, 6, 14, 17, 7, 13, 10, 4, 18, 11, 9, 16, 0])
print f([13, 10, 11, 17, 19, 12, 14, 7, 5, 9, 2, 4, 18, 8, 6, 3, 16, 15, 0, 1])
| 9,371 | 6,821 |
from django.apps import AppConfig
class CartviewConfig(AppConfig):
name = 'cartview'
| 91 | 28 |
"""
7) Leia uma temperatura em graus Fahrenheit e apresente-a convertida em graus Celsius. A fórmula de conversão é: C = (F-32.0)*5.0/9.0, sendo C a temperatura em Celsius e F a temperatura em Fahrenheit.
"""
fahrenheit = float(input('Digite a temperatura em graus Fahrenheit F: \n'))
print(f'A temperatura em graus Celsius de {fahrenheit}F é: {(fahrenheit - 32.0) * 5.0 / 9.0}ºC')
| 384 | 154 |
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
log = logging.getLogger('typeRssi')
log.setLevel(logging.ERROR)
log.addHandler(logging.NullHandler())
import openType
class typeRssi(openType.openType):
def __init__(self):
# log
log.info("creating object")
# initialize parent class
openType.openType.__init__(self)
def __str__(self):
return '{0} dBm'.format(self.rssi)
#======================== public ==========================================
def update(self,rssi):
self.rssi = rssi
#======================== private =========================================
| 862 | 260 |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compile module compiles SMI data into Python objects for use by the SNMP
module. This started out clean, but now it's ugly. But at least it spits
out something useful.
"""
from __future__ import print_function
from __future__ import division
import os
import py_compile
from pycopia import textutils
from pycopia.SMI import SMI, Basetypes, Objects
USERMIBPATH = os.environ.get("USERMIBPATH", os.path.join("/", "var", "tmp", "mibs"))
# global name translation table
# Since we convert MIB modules to Python modules, we can't have a dash in
# the name. These are translated to underscores.
TRANSTABLE = textutils.maketrans("-", "_")
def convert_name(name):
return name.translate(TRANSTABLE)
# These are some of the attributes that the SNMP module needs, and are
# exported "as-is". Other attributes are special-cased in the appropriate
# generator method.
EXPORTS = {
"Type": ["status", "format", "units", "ranges", "enumerations"],
"Node": ["access", "create", "status", "units"],
"Macro": ["name", "status"],
"Module": ["name", "path", "conformance", "language", "description"],
"Group": ["name", "status"],
"Value": ["val"],
}
# objects directly imported from SMI.Objects in the mib modules
IMPORTED_OBJECTS = ["ColumnObject", "MacroObject", "NotificationObject",
"RowObject", "ScalarObject", "NodeObject", "ModuleObject", "GroupObject"]
def _classstr(tup):
def _cstr(c):
if type(c) is str:
return c
else:
if c.__name__ in IMPORTED_OBJECTS:
return c.__name__
else:
return "%s.%s" % (c.__module__, c.__name__)
return ", ".join(map(_cstr, tup))
# generic class producer. Returns source code string
def genClass(sminode, baseclass, attrdict=None, doc=None):
if not attrdict:
attrdict = {}
for attrname in EXPORTS[sminode.__class__.__name__]:
val = getattr(sminode, attrname)
if val is None:
continue
if type(val) is str:
attrdict[attrname] = repr(val)
else:
attrdict[attrname] = val
klassname = convert_name(sminode.name)
parents = (baseclass,)
s = []
if parents:
s.append( "class %s(%s):" % (klassname, _classstr(parents)) )
else:
s.append( "class %s(object):" % (klassname) )
if doc:
s.append('\t"""%s"""' % doc)
for key, val in attrdict.items():
if val:
s.append( "\t%s = %s" % (key, val) )
if len(s) == 1:
s.append("\tpass")
s.append("\n")
return "\n".join(s)
# generates a repr for SMI.Objects.IndexObjects
class IndexGenerator(list):
def __init__(self, init=None, implied=False):
super(IndexGenerator, self).__init__(init or [])
self.implied = bool(implied)
def __repr__(self):
lv = ", ".join(self)
return "pycopia.SMI.Objects.IndexObjects([%s], %r)" % (lv, self.implied)
class ListGenerator(list):
def __init__(self, init=None):
super(ListGenerator, self).__init__(init or [])
def __repr__(self):
return "[%s]" % (", ".join(self), )
class ObjectSourceGenerator(object):
"""
Usage: ObjectSourceGenerator(fileobject, modulename)
Parameters:
fileobject = A file-type object.
modulename = An SMI module name.
"""
def __init__(self, fo, oidfo, smimodule):
self.smimodule = smimodule
self.fo = fo
self.oidfo = oidfo
self.pymodname = convert_name(smimodule.name)
#self.tempmodule = new.module(self.pymodname)
self.imports = {}
self.fo.write("""# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import %s
""" % (", ".join(IMPORTED_OBJECTS),))
self.oidfo.write("""# python
# This file is generated by a program (mib2py).
import sys
{modname} = sys.modules["pycopia.mibs.{modname}"]
OIDMAP = {{
""".format(modname=self.pymodname))
def finalize(self):
self.oidfo.write("}\n")
handle_specials(self.fo, self.smimodule)
self.fo.write("""
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
""")
def add_comment(self, text):
self.fo.write("# %s\n" % text)
def genImports(self):
self.fo.write("# imports \n")
for node in self.smimodule.get_imports():
if node.module not in self.imports:
self.imports[node.module] = []
self.imports[node.module].append(node.name)
for modname, implist in self.imports.items():
impnames = [convert_name(s) for s in implist]
self.fo.write("from pycopia.mibs.%s import %s\n" % (convert_name(modname), ", ".join(impnames)))
self.fo.write("\n")
def genModule(self):
self.fo.write(genClass(self.smimodule, Objects.ModuleObject))
def genTypes(self):
self.fo.write("# types \n")
for smi_type in self.smimodule.get_types():
name = convert_name(smi_type.name)
if hasattr(Basetypes, name ):
self.fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name))
else:
self.fo.write("\n")
if smi_type.snmptype:
baseclass = getattr(Basetypes, smi_type.snmptype)
self.fo.write(genClass(smi_type, baseclass))
def genNodes(self):
self.fo.write("# nodes\n")
for node in self.smimodule.get_nodes(SMI.SMI_NODEKIND_NODE):
if node.name:
initdict = {}
initdict["name"] = repr(node.name)
initdict["OID"] = repr(Basetypes.ObjectIdentifier(node.OID))
self.fo.write(genClass(node, Objects.NodeObject, initdict))
self._genOIDItem(node.OID, node.name)
self.fo.write("\n")
def genScalars(self):
self.fo.write("# scalars \n")
for scalar in self.smimodule.get_scalars():
if scalar.status not in \
(SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_MANDATORY):
continue # do not expose optional or obsolete objects
initdict = {}
initdict["syntaxobject"] = so = self._getSyntax(scalar)
if so.find("Enumeration") >= 0:
initdict["enumerations"] = scalar.syntax.enumerations
initdict["OID"] = repr(Basetypes.ObjectIdentifier(scalar.OID))
self.fo.write(genClass(scalar, Objects.ScalarObject, initdict))
self.fo.write("\n")
self._genOIDItem(scalar.OID, scalar.name)
def genColumns(self):
self.fo.write("# columns\n")
for col in self.smimodule.get_columns():
initdict = {}
initdict["syntaxobject"] = so = self._getSyntax(col)
if so.find("Enumeration") >= 0:
initdict["enumerations"] = col.syntax.enumerations
initdict["OID"] = repr(Basetypes.ObjectIdentifier(col.OID))
self.fo.write(genClass(col, Objects.ColumnObject, initdict))
self.fo.write("\n")
self._genOIDItem(col.OID, col.name)
def genRows(self):
self.fo.write("# rows \n")
for row in self.smimodule.get_rows():
if row.status not in (SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
columns = "{%s}" % ", ".join(["%r: %s" % (s, s) for s in self._get_colnames(row)])
initdict["columns"] = columns
initdict["index"] = self._genIndexObjects(row)
rowstatus = row.rowstatus
if rowstatus:
initdict["rowstatus"] = row.rowstatus.name
initdict["OID"] = repr(Basetypes.ObjectIdentifier(row.OID))
self.fo.write(genClass(row, Objects.RowObject, initdict))
self.fo.write("\n")
def genMacros(self):
self.fo.write("# macros\n")
for node in self.smimodule.get_macros():
self.fo.write(genClass(node, Objects.MacroObject))
self.fo.write("\n")
def genNotifications(self):
self.fo.write("# notifications (traps) \n")
for notif in self.smimodule.get_notifications():
initdict = {"OID": repr(Basetypes.ObjectIdentifier(notif.OID))}
self.fo.write(genClass(notif, Objects.NotificationObject, initdict))
self._genOIDItem(notif.OID, notif.name)
def genGroups(self):
self.fo.write("# groups \n")
for group in self.smimodule.get_groups():
if group.status not in (SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
initdict["OID"] = repr(Basetypes.ObjectIdentifier(group.OID))
grouplist = []
for el in group.get_elements():
n = el.get_node()
grouplist.append(n.name)
initdict["group"] = "[%s]" % ", ".join(grouplist)
self.fo.write(genClass(group, Objects.GroupObject, initdict))
self._genOIDItem(group.OID, group.name)
def genCompliances(self):
self.fo.write("# compliances \n")
for comp in self.smimodule.get_compliances():
if comp.status not in (SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
mandlist = ListGenerator()
for el in comp.get_elements():
mandlist.append(el.get_node().name)
initdict["mandatory_group"] = mandlist
refs = ListGenerator()
for ref in comp.get_refinements(): # XXX
if ref.syntax:
n = ref.get_node()
refs.append(self._getSyntax(ref)) # XXX
initdict["refinements"] = repr(refs)
self.fo.write(genClass(comp, Objects.Compliance, initdict))
self.fo.write("\n")
def genCapabilities(self):
self.fo.write("# capabilities \n")
for cap in self.smimodule.get_capabilities():
if cap.status not in (SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
# XXX
self.fo.write(genClass(cap, Objects.Capability, initdict))
self.fo.write("\n")
# utility methods
def _get_colnames(self, row):
rv = []
for c in row.get_children():
if c.nodekind == SMI.SMI_NODEKIND_COLUMN:
rv.append(c.name)
return rv
def _genOIDItem(self, OID, classname):
self.oidfo.write('%r: %s.%s,\n' % (str(OID), self.pymodname, convert_name(classname)))
def _genIndexObjects(self, smirow):
index = smirow.get_index()
if index is None: # old, old v1 MIBS with no index
return
gen = IndexGenerator(implied=index.implied)
for n in index:
gen.append(n.name)
if smirow.indexkind == SMI.SMI_INDEX_AUGMENT:
for node in index:
mod = node.get_module()
self.fo.write("from %s import %s\n" % (convert_name(mod.name), node.name))
return repr(gen)
def _getSyntax(self, node):
syntax = node.syntax
if syntax is None:
print ("***** unable to get SYNTAX for node %s" % (node.name))
return "UNKNOWN"
if not syntax.name:
syntax = syntax.get_parent()
syntaxname = syntax.name
if not syntaxname:
syntaxname = syntax.snmptype
if hasattr(Objects, syntaxname):
cl = getattr(Objects, syntaxname)
return "%s.%s" % (cl.__module__, cl.__name__)
elif hasattr(Basetypes, syntaxname):
cl = getattr(Basetypes, syntaxname)
return "%s.%s" % (cl.__module__, cl.__name__)
# else must be a locally defined type.
return syntaxname
def genAll(self):
self.genImports()
self.genModule()
self.genNodes()
self.genMacros()
self.genTypes()
self.genScalars()
self.genColumns()
self.genRows()
self.genNotifications()
self.genGroups()
#self.genCompliances()
self.genCapabilities()
self.finalize()
# some modules require special handling. Crude, hopefully temporary, hack
def handle_specials(fo, smimodule):
fo.write("\n# special additions\n")
handler = {'SNMPv2-SMI': _handle_smi,
'SNMPv2-TC': _handle_tc}.get(smimodule.name, _handle_default)
handler(fo, smimodule)
def _handle_smi(fo, mod):
fo.write("\n")
for name in ("ObjectSyntax", "SimpleSyntax", "ApplicationSyntax"):
fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name))
def _handle_tc(fo, mod):
fo.write("\n")
for name in ("Bits", "BITS"):
fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name))
def _handle_default(fo, mod):
pass
def _compile_module(smimodule):
if not smimodule.name:
return # unnamed from where?
fname = os.path.join(USERMIBPATH, convert_name(smimodule.name)+".py")
oidfname = os.path.join(USERMIBPATH, convert_name(smimodule.name)+"_OID.py")
if not os.path.exists(fname):
print ("Compiling module", smimodule.name)
fd = open(fname, "w")
oidfd = open(oidfname, "w")
generator = ObjectSourceGenerator(fd, oidfd, smimodule)
generator.genAll()
fd.close()
try:
py_compile.compile(fname)
except Exception as err:
print ("***", err)
else:
print (" +++ file %r exists, skipping." % (fname, ))
def compile_module(modname, preload=None, all=False):
if preload:
for pm in preload:
SMI.load_module(pm)
smimodule = SMI.get_module(modname)
if not smimodule:
print ("Could not load module", modname)
return
if all:
for dep in _get_dependents(smimodule):
_compile_module(SMI.get_module(dep))
_compile_module(smimodule)
def _get_dependents(module, hash=None):
h = hash or {}
for imp in module.get_imports():
h[imp.module] = True
_get_dependents(SMI.get_module(imp.module), h)
return h.keys()
def compile_everything(all=False):
count = 0
paths = SMI.get_path().split(":")
for dir in paths:
print ("Looking in", dir)
for modname in os.listdir(dir):
modpath = os.path.join(dir, modname)
if os.path.isfile(modpath):
print ("Found module", modname, "compiling...")
try:
compile_module(modname, None, all)
except SMI.SmiError as err:
print ("***[", err, "]***")
count += 1
SMI.clear() # clear out mememory
SMI.init()
print ("Found and compiled %d MIBS." % (count, ))
if __name__ == "__main__":
from pycopia import autodebug
compile_everything(True)
| 16,092 | 5,214 |
"""Create count_weekdays function
Revision ID: 564d660d4ddb
Revises:
Create Date: 2021-10-29 12:02:59.409012
"""
import textwrap
from alembic import op
from sqlalchemy.sql.ddl import CreateSchema, DropSchema
# revision identifiers, used by Alembic.
revision = '564d660d4ddb'
down_revision = None
branch_labels = None
depends_on = None
SQL_FUNCTION_COUNT_WEEKDAYS = textwrap.dedent('''
CREATE FUNCTION plugin_burotel.count_weekdays(from_date date, to_date date)
RETURNS bigint
AS $$
SELECT COUNT(*)
FROM generate_series(from_date, to_date, '1 day'::interval) d
WHERE extract('dow' FROM d) NOT IN (0, 6)
$$
LANGUAGE SQL IMMUTABLE STRICT;
''')
def upgrade():
op.execute(CreateSchema('plugin_burotel'))
op.execute(SQL_FUNCTION_COUNT_WEEKDAYS)
def downgrade():
op.execute('DROP FUNCTION plugin_burotel.count_weekdays(from_date date, to_date date)')
op.execute(DropSchema('plugin_burotel'))
| 960 | 370 |
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4
import winsdk
_ns_module = winsdk._import_ns_module("Windows.Graphics.Capture.Interop")
try:
import winsdk.windows.graphics.capture
except Exception:
pass
create_for_monitor = _ns_module.create_for_monitor
create_for_window = _ns_module.create_for_window
| 354 | 123 |
from __future__ import division
import math
import torch
import torch.nn as nn
def conv(in_planes, out_planes, stride=1, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes, eps=1e-3),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True),
nn.ReLU(inplace=True)
)
def deconv(in_planes, out_planes, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),
nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_planes, eps=1e-3),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),
nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=True),
nn.ReLU(inplace=True)
)
def predict_depth(in_planes, with_confidence):
return nn.Conv2d(in_planes, 2 if with_confidence else 1, kernel_size=3, stride=1, padding=1, bias=True)
def post_process_depth(depth, activation_function=None, clamp=False):
if activation_function is not None:
depth = activation_function(depth)
if clamp:
depth = depth.clamp(10, 80)
return depth[:,0]
def adaptative_cat(out_conv, out_deconv, out_depth_up):
out_deconv = out_deconv[:, :, :out_conv.size(2), :out_conv.size(3)]
out_depth_up = out_depth_up[:, :, :out_conv.size(2), :out_conv.size(3)]
return torch.cat((out_conv, out_deconv, out_depth_up), 1)
def init_modules(net):
for m in net.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2/n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| 2,327 | 910 |
# Generated by Django 3.1.2 on 2020-10-29 21:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0016_auto_20201029_2133'),
]
operations = [
migrations.AlterModelOptions(
name='seminarhistory',
options={'verbose_name_plural': 'seminar histories'},
),
migrations.RemoveConstraint(
model_name='seminarhistory',
name='Rating must be between 1 and 10',
),
]
| 516 | 175 |
from prices import MoneyRange
from ...shipping import models
from ...shipping.interface import ShippingMethodData
from ..channel import ChannelQsContext
from ..translations.resolvers import resolve_translation
def resolve_shipping_zones(channel_slug):
if channel_slug:
instances = models.ShippingZone.objects.filter(channels__slug=channel_slug)
else:
instances = models.ShippingZone.objects.all()
return ChannelQsContext(qs=instances, channel_slug=channel_slug)
def resolve_price_range(channel_slug):
# TODO: Add dataloader.
channel_listing = models.ShippingMethodChannelListing.objects.filter(
channel__slug=str(channel_slug)
)
prices = [shipping.get_total() for shipping in channel_listing]
return MoneyRange(min(prices), max(prices)) if prices else None
def resolve_shipping_translation(root: ShippingMethodData, info, language_code):
if root.is_external:
return None
return resolve_translation(root, info, language_code)
| 1,005 | 293 |
# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.
from matplotlib import pyplot as plt
import pandemic_simulator as ps
def eval_government_strategies(experiment_name: str, opts: ps.sh.EvaluationOpts) -> None:
data_saver = ps.data.H5DataSaver(experiment_name, path=opts.data_saver_path)
print('Running Swedish strategy')
ps.sh.experiment_main(sim_config=opts.default_sim_config,
sim_opts=ps.env.PandemicSimOpts(),
data_saver=data_saver,
pandemic_regulations=ps.sh.swedish_regulations,
stages_to_execute=swedish_strategy,
num_random_seeds=opts.num_seeds,
max_episode_length=opts.max_episode_length,
exp_id=0)
print('Running Italian strategy')
ps.sh.experiment_main(sim_config=opts.default_sim_config,
sim_opts=ps.env.PandemicSimOpts(),
data_saver=data_saver,
pandemic_regulations=ps.sh.italian_regulations,
stages_to_execute=italian_strategy,
num_random_seeds=opts.num_seeds,
max_episode_length=opts.max_episode_length,
exp_id=1)
if __name__ == '__main__':
swedish_strategy = [ps.data.StageSchedule(stage=0, end_day=3),
ps.data.StageSchedule(stage=1, end_day=None)]
italian_strategy = [ps.data.StageSchedule(stage=0, end_day=3),
ps.data.StageSchedule(stage=1, end_day=8),
ps.data.StageSchedule(stage=2, end_day=13),
ps.data.StageSchedule(stage=3, end_day=25),
ps.data.StageSchedule(stage=4, end_day=59),
ps.data.StageSchedule(stage=3, end_day=79),
ps.data.StageSchedule(stage=2, end_day=None)]
opts = ps.sh.EvaluationOpts(
num_seeds=30,
max_episode_length=180,
enable_warm_up=False
)
exp_name = 'swedish_italian_strategies'
try:
eval_government_strategies(exp_name, opts)
except ValueError:
# Expect a value error because we are reusing the same directory.
pass
ps.sh.make_evaluation_plots(exp_name=exp_name,
data_saver_path=opts.data_saver_path,
param_labels=['SWE', 'ITA'],
bar_plot_xlabel='Real Government Strategies',
annotate_stages=True,
show_cumulative_reward=False,
show_time_to_peak=False, show_pandemic_duration=True)
plt.show()
| 2,806 | 858 |
from twisted.internet.defer import inlineCallbacks
from EGGS_labrad.clients import GUIClient
from EGGS_labrad.config.device_db import device_db
from EGGS_labrad.clients.ARTIQ_client.DAC_gui import DAC_gui
from copy import deepcopy
DACID = 659312
class DAC_client(GUIClient):
"""
Client for an ARTIQ Fastino/Zotino board.
"""
name = "Fastino Client"
servers = {'aq': 'ARTIQ Server'}
def getgui(self):
if self.gui is None:
self.gui = DAC_gui(self.dac_list)
return self.gui
@inlineCallbacks
def initClient(self):
# device dictionary
self.dac_list = {}
# get devices
self._getDevices(device_db)
# connect to signals
yield self.aq.signal__dac_changed(DACID)
yield self.aq.addListener(listener=self.updateChannel, source=None, ID=DACID)
def _getDevices(self, device_db):
# get devices
for name, params in device_db.items():
if 'class' not in params:
continue
elif params['class'] in ('Zotino', 'Fastino'):
self.dac_list[name] = {}
#@inlineCallbacks
def initData(self):
# todo: read DAC register values and update
pass
def initGUI(self):
# todo: fix global ofs
self.gui.zotino_global_ofs.valueChanged.connect(lambda voltage_mu: self.aq.dac_ofs(voltage_mu, 'mu'))
for dac_name, dac_channels in self.dac_list.items():
for channel_num, channel_gui in dac_channels.items():
channel_gui.dac.valueChanged.connect(lambda voltage_mu, _channel_num=channel_num:
self.aq.dac_set(_channel_num, voltage_mu, 'mu'))
channel_gui.resetswitch.clicked.connect(lambda _channel_num=channel_num:
self.aq.dac_set(_channel_num, 0, 'mu'))
if "ZOTINO" in dac_name.upper():
channel_gui.off.valueChanged.connect(lambda voltage_mu, _channel_num=channel_num:
self.aq.dac_offset(_channel_num, voltage_mu, 'mu'))
channel_gui.gain.valueChanged.connect(lambda gain_mu, _channel_num=channel_num:
self.aq.dac_offset(_channel_num, gain_mu, 'mu'))
channel_gui.calibrateswitch.clicked.connect(lambda: self.calibrate)
# lock widgets on startup
channel_gui.lock(False)
def updateChannel(self, c, signal):
num, param, val = signal
channel_gui = self.gui.channel_widgets[num]
gui_element = None
if param == 'dac':
gui_element = channel_gui.dac
elif param == 'gain':
gui_element = channel_gui.dac
elif param == 'off':
gui_element = channel_gui.dac
elif param == 'ofs':
gui_element = channel_gui.zotino_global_ofs
# adjust value without causing the signal to trigger
gui_element.setEnabled(False)
gui_element.setValue(val)
gui_element.setEnabled(True)
if __name__ == "__main__":
from EGGS_labrad.clients import runClient
runClient(DAC_client)
| 3,285 | 1,017 |
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.contrib import messages
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic.simple import direct_to_template
from schedule.models import Event
from schedule.periods import Period
from profiles.models import *
from profiles.forms import ProfileForm, ProfileSkillsForm
from profiles.controllers import tag_clean
from django.contrib.auth.decorators import login_required
from django.utils import simplejson
def userprofile(request, username=None, template_name='profiles/profile.html'):
form = ProfileForm()
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def ajax_view(request, profile_id, skill_id, verb):
datadict = {'profile_id':profile_id, 'skill_id':skill_id, 'verb':verb}
profile = Profile.objects.get(id = profile_id)
skill = Skill.objects.get(id = skill_id)
if verb == "remove":
try:
profile.skills.remove(skill)
except Exception, e:
datadict['status'] = "failure"
else:
datadict['status'] = "success"
return HttpResponse(simplejson.dumps(datadict))
else:
return HttpResponse("verb unrecognized")
def ajax_toggle_availability(request):
datadict = dict()
datadict['status'] = "failure"
if request.user.is_authenticated():
try:
user = request.user
profile = user.get_profile()
profile.is_available = not profile.is_available
profile.save()
except:
pass
else:
datadict['status'] = "success"
datadict['availability'] = profile.is_available
return HttpResponse(simplejson.dumps(datadict))
def set_availability(request,set_status):
datadict = dict()
datadict['status'] = "failure"
if request.user.is_authenticated():
try:
user = request.user
profile = user.get_profile()
profile.is_available = bool(int(set_status)) # someone fix this casting for me please
profile.save()
except:
pass
else:
datadict['status'] = "success"
datadict['availability'] = profile.is_available
if request.is_ajax():
return HttpResponse(simplejson.dumps(datadict))
else:
return HttpResponse("GOOBER!, ENABLE JAVASCRIPT! "+simplejson.dumps(datadict))
def _can_view_full_profile(user):
# for now just check if user is logged in, later there may be karma and/or
# other requirements.
return user.is_authenticated()
def list_profiles_by_skill(request, skill):
skills = Skill.objects.filter(name__contains=skill)
qs = User.objects.filter(profile__skills__in=skills).distinct()
return list_profiles(request, qs=qs)
def list_profiles(request, qs=None, template_name='profiles/list_profiles.html'):
"""Display a list of Users
If qs == None, return list of all Users
Optionally pass a qs of users
"""
if qs == None:
users = User.objects.all()
else:
users = qs
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def view_profile(request, username, template_name='profiles/view_profile.html'):
user = get_object_or_404(User, username=username)
display_full_profile = _can_view_full_profile(request.user)
events = Event.objects.filter(creator=user)
start = datetime.now()
end = start + timedelta(days=30)
period = Period(events=events, start=start, end=end)
office_hours = period.get_occurrences()
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def profile(request, template_name="profiles/edit_profile.html"):
user = request.user
profile = user.profile
def update_profile():
profile_form = ProfileForm(data=request.POST,
instance=request.user.get_profile())
if profile_form.is_valid():
profile_form.save()
messages.success(request, 'Profile updated.')
def update_skills():
#fuck capitals
tag_list = request.POST.get('skills_text').lower().split(',')
for tag in tag_list:
if tag and tag != '':
#fucking excess whitespace man
tag = tag_clean(tag)
skill, created = Skill.objects.get_or_create(name=tag)
profile.skills.add(skill)
psf = ProfileSkillsForm(request.POST)
if psf.is_valid():
skills_list = Skill.objects.filter(id__in = psf.cleaned_data.get('skills'))
for skill in skills_list:
profile.skills.add(skill)
profile.save()
messages.success(request, 'Skills updated.')
if request.method == "POST":
origin = request.POST.get('origin')
if origin == "profile":
update_profile()
else: #origin == "skill":
update_skills()
profile_form = ProfileForm(instance=request.user.get_profile())
skill_form = ProfileSkillsForm()
skills = profile.skills.all()
events = Event.objects.filter(creator=user)
start = datetime.now()
end = start + timedelta(days=30)
office_hours = reduce(lambda x,y: x+y, [e.get_occurrences(start, end)
for e in events]) if events else []
return direct_to_template(request, template_name,
{'skill_form':skill_form,
'profile_form':profile_form,
'profile':profile,
'skills':skills,
'editable':True,
'office_hours':office_hours})
| 6,156 | 1,726 |
import subprocess
from subprocess import PIPE
def gits_version(args):
try:
ver = list()
ver.append("git")
ver.append("--version")
process1 = subprocess.Popen(ver, stdout=PIPE, stderr=PIPE)
stdout, stderr = process1.communicate()
print(stdout.decode("UTF-8"))
except Exception as e:
print("ERROR: gits version command caught an exception")
print("ERROR: {}".format(str(e)))
return False
return True
| 486 | 145 |
# (c) Copyright 2015, 2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
ringspec_simple = '''
global:
all_ring_specifications:
- region_name_not_used: region1
rings:
- display_name: Account Ring
min_part_hours: 12
name: account
partition_power: 17
replication_policy:
replica_count: 1
- display_name: Container Ring
min_part_hours: 12
name: container
partition_power: 17
replication_policy:
replica_count: 2
- default: true
display_name: General
min_part_hours: 12
name: object-0
partition_power: 17
replication_policy:
replica_count: 3
- default: false
display_name: EC
min_part_hours: 12
name: object-1
partition_power: 17
erasure_coding_policy:
ec_num_data_fragments: 4
ec_num_parity_fragments: 10
ec_type: jerasure_rs_vand
ec_object_segment_size: 1000000
'''
ringspec_region_zones = '''
global:
all_ring_specifications:
- region_name_not_used: region1
swift_regions:
- id: 2
server_groups:
- sg21
- sgtwotwo
- sgtwo3
- id: 3
server_groups:
- sg31
- sgthreetwo
- sgthree3
rings:
- display_name: Account Ring
min_part_hours: 12
name: account
partition_power: 17
replication_policy:
replica_count: 3
- display_name: Container Ring
min_part_hours: 12
name: container
partition_power: 17
replication_policy:
replica_count: 3
- default: true
display_name: General
min_part_hours: 12
name: object-0
partition_power: 17
replication_policy:
replica_count: 3
'''
ringspec_null_zones = '''
global:
all_ring_specifications:
- region_name_not_used: region1
swift_regions: []
swift_zones: []
rings:
- display_name: Account Ring
min_part_hours: 12
name: account
partition_power: 17
replication_policy:
replica_count: 3
swift_zones:
- id: 2
server_groups_omitted: on-purpose
- display_name: Container Ring
min_part_hours: 12
name: container
partition_power: 17
replication_policy:
replica_count: 3
- default: true
display_name: General
min_part_hours: 12
name: object-0
partition_power: 17
replication_policy:
replica_count: 3
'''
ringspec_zones_not_speced = '''
global:
all_ring_specifications:
- region_name_not_used: region1
rings:
- display_name: Account Ring
min_part_hours: 12
name: account
partition_power: 17
replication_policy:
replica_count: 3
- display_name: Container Ring
min_part_hours: 12
name: container
partition_power: 17
replication_policy:
replica_count: 3
- default: true
display_name: General
min_part_hours: 12
name: object-0
partition_power: 17
replication_policy:
replica_count: 3
'''
ringspec_zones_duplicate_in_ring = '''
global:
all_ring_specifications:
- region_name_not_used: region1
rings:
- display_name: Account Ring
min_part_hours: 12
name: account
partition_power: 17
replication_policy:
replica_count: 3
swift_zones:
- id: 1
server_groups:
- ONE
- SAME
- id: 2
server_groups:
- TWO
- SAME
- display_name: Container Ring
min_part_hours: 12
name: container
partition_power: 17
replication_policy:
replica_count: 3
- default: true
display_name: General
min_part_hours: 12
name: object-0
partition_power: 17
replication_policy:
replica_count: 3
'''
| 5,271 | 1,525 |
from pyamaze import maze,COLOR,agent
from environments import Maze
from agents import MazeAgentDFS, MazeAgentBranchAndBound, MazeAgentAStar
env = Maze(8,8)
ag = MazeAgentAStar(env)
#ag = MazeAgentBranchAndBound(env,40)
#ag = MazeAgentDFS(env)
ag.act()
| 254 | 100 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import ujson
from datetime import datetime
class Util:
@staticmethod
def write(file, obj):
with open(file, "w") as filef:
filef.write(ujson.dumps(obj))
@staticmethod
def read(file):
data = {}
with open(file,"r") as filef:
data = (ujson.load(filef))
return data
@staticmethod
def now():
return datetime.now().strftime("%Y%m%d%H%M%S")
@staticmethod
def makedir(ndir):
if not os.path.exists(ndir):
os.makedirs(ndir)
@staticmethod
def splitname(filef):
base = os.path.basename(filef)
base = os.path.splitext(base)
#name = base[0]
return base
@staticmethod
def split(filef,separator):
return filef.split(separator) | 921 | 309 |
from .image_loader import ImageGenerator
| 41 | 10 |
#!/usr/bin/env python3
# vim:fenc=utf-8 ff=unix ft=python ts=4 sw=4 sts=4 noet :
import os
import sys
import datetime
from pynmea2 import NMEASentence, ParseError
if len(sys.argv) != 2:
print("Usage: %s nmeafile.nmea" % sys.argv[0] )
sys.exit(1)
file_path = os.path.abspath(sys.argv[1])
tmp_path = os.path.dirname(file_path) + "/updated_" + os.path.basename(file_path)
start_datetime = None
with open(file_path) as f:
with open(tmp_path, mode='w',newline="\r\n") as t:
for line in f:
try:
nmea = NMEASentence.parse(line)
if hasattr(nmea, 'datestamp'):
nmea.datestamp = (nmea.datestamp + datetime.timedelta(weeks=1024)).strftime("%d%m%y")
if start_datetime == None:
start_datetime = nmea.datetime.strftime("%Y%m%d%H%M%S")
t.write(str(nmea))
t.write("\n")
#print(str(nmea))
except ParseError as e:
t.write(e.args[0][1])
#print(str(nmea))
os.rename(tmp_path, os.path.dirname(tmp_path) + "/%s.nma" % start_datetime)
| 1,005 | 490 |
# -*- coding: utf-8 -*-
"""
Coinkit
~~~~~
:copyright: (c) 2013 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import re
def int_to_hex(i):
return re.sub(r'^0x|L$', '', hex(i))
def int_to_string(integer, keyspace_chars):
""" Turn a positive integer into a string. """
if not integer > 0:
raise ValueError('integer must be > 0')
output = ""
while integer > 0:
integer, digit = divmod(integer, len(keyspace_chars))
output += keyspace_chars[digit]
return output[::-1]
def string_to_int(string, keyspace_chars):
""" Turn a string into a positive integer. """
output = 0
for char in string:
output = output * len(keyspace_chars) + keyspace_chars.index(char)
return output
def change_keyspace(string, original_keyspace, target_keyspace):
""" Convert a string from one keyspace to another. """
assert isinstance(string, str)
intermediate_integer = string_to_int(string, original_keyspace)
output_string = int_to_string(intermediate_integer, target_keyspace)
return output_string | 1,103 | 369 |
# (c) Copyright 2019 Brocade, a Broadcom Company
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Brocade south bound connector to communicate with switch using
REST over HTTP or HTTPS protocol.
"""
import json
from oslo_log import log as logging
from oslo_serialization import base64
import requests
import six
from cinder.i18n import _
from cinder.zonemanager.drivers.brocade import exception
from cinder.zonemanager.drivers.brocade import fc_zone_constants
from cinder.zonemanager.drivers.brocade import rest_constants
LOG = logging.getLogger(__name__)
class BrcdRestFCZoneClient(object):
def __init__(self, ipaddress, username,
password, port, vfid, protocol):
"""Initializing the client with the parameters passed.
:param ipaddress: IP Address of the device.
:param username: User id to login.
:param password: User password.
:param port: Device Communication port
:param vfid: Virtual Fabric ID.
:param protocol: Communication Protocol.
"""
self.sw_ip = ipaddress
self.sw_user = username
self.sw_pwd = password
self.protocol = protocol
self.vfid = vfid
self.status_code = ''
self.session = None
self._login()
def is_supported_firmware(self):
is_supported_firmware = False
fw_version = self._get_firmware_version()
ver = fw_version.split(".")
if len(ver[0]) > 1:
major_ver = ver[0]
ver[0] = major_ver[1]
if len(ver[2]) > 1:
patch_ver = ver[2]
ver[2] = patch_ver[0]
LOG.debug("Firmware version: %(version)s.", {'version': ver})
if int(ver[0] + ver[1] + ver[2]) > 820:
is_supported_firmware = True
return is_supported_firmware
def get_active_zone_set(self):
active_zone_set, checksum = self._get_effective_zone_set()
return active_zone_set
def get_nameserver_info(self):
return self._get_name_server()
def add_zones(self, add_zone_map, activate, active_zone_set=None):
self._add_zones(add_zone_map, activate)
def update_zones(self, update_zone_map, activate, operation,
active_zone_set=None):
self._update_zones(update_zone_map, activate, operation)
def delete_zones(self, zone_names_to_delete, activate,
active_zone_set=None):
self._delete_zones(zone_names_to_delete, activate)
def cleanup(self):
self._logout()
def _login(self):
if self.protocol == fc_zone_constants.REST_HTTPS:
self.protocol = fc_zone_constants.HTTPS
else:
self.protocol = fc_zone_constants.HTTP
if self.session is None:
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=1,
pool_maxsize=1)
self.session.mount(self.protocol + '://', adapter)
credentials = base64.encode_as_text('%s:%s' % (self.sw_user,
self.sw_pwd)).replace('\n', '')
self.session.headers = {rest_constants.USER_AGENT:
rest_constants.ZONE_DRIVER,
rest_constants.ACCEPT: rest_constants.YANG,
rest_constants.AUTHORIZATION:
"Basic %s" % credentials}
response = self.session.post(self._build_url(rest_constants.LOGIN))
if response.status_code == 200:
auth = response.headers.get('Authorization')
LOG.info("REST login success, setting auth: %s", auth)
self.session.headers = {rest_constants.USER_AGENT:
rest_constants.ZONE_DRIVER,
rest_constants.ACCEPT: rest_constants.YANG,
rest_constants.CONTENT_TYPE:
rest_constants.YANG,
rest_constants.AUTHORIZATION: auth}
else:
msg = (_("REST login failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return response.status_code
def _logout(self):
response = self.session.post(self._build_url(rest_constants.LOGOUT))
if response.status_code == 204:
LOG.info("REST logout success")
else:
msg = (_("REST logout failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _get_firmware_version(self):
response = self.session.get(self._build_url(rest_constants.GET_SWITCH))
firmware_version = ''
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
switch = json_response[rest_constants.SWITCH]
firmware_version = switch[rest_constants.FIRMWARE_VERSION]
LOG.info("REST firmware version: %s", firmware_version)
else:
msg = (_("REST get switch fw version failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return firmware_version
def _get_name_server(self):
port_names = []
url = self._build_url(rest_constants.GET_NAMESERVER)
response = self.session.get(url)
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
nsinfos = json_response[rest_constants.FC_NAME_SERVER]
i = 0
for nsinfo in nsinfos:
port_names.append(nsinfos[i][rest_constants.PORT_NAME])
i = i + 1
else:
msg = (_("REST get NS info failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return port_names
def _get_effective_zone_set(self):
active_zone_set = {}
zones_map = {}
url = self._build_url(rest_constants.GET_ACTIVE_ZONE_CFG)
response = self.session.get(url)
checksum = ''
active_cfg_name = ''
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
effective_cfg = json_response[rest_constants.EFFECTIVE_CFG]
checksum = effective_cfg[rest_constants.CHECKSUM]
try:
active_cfg_name = effective_cfg[rest_constants.CFG_NAME]
zones = effective_cfg[rest_constants.ENABLED_ZONE]
if type(zones) is list:
for i, zone in enumerate(zones):
zones_map.update({zones[i][rest_constants.ZONE_NAME]:
zones[i][rest_constants.MEMBER_ENTRY]
[rest_constants.ENTRY_NAME]})
else:
zones_map.update({zones[rest_constants.ZONE_NAME]:
zones[rest_constants.MEMBER_ENTRY]
[rest_constants.ENTRY_NAME]})
except Exception:
active_cfg_name = ''
LOG.info("REST get effective zoneset success: "
"active cfg: %(cfg_name)s, checksum: %(chksum)s",
{'cfg_name': active_cfg_name, 'chksum': checksum})
else:
msg = (_("REST get effective zoneset failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
active_zone_set = {"active_zone_config": active_cfg_name,
"zones": zones_map}
return active_zone_set, checksum
def _add_zones(self, add_zone_map, activate):
active_zone_set, checksum = self._get_effective_zone_set()
# if activate, get the zones already configured in the active cfg
if activate:
zones_in_active_cfg = active_zone_set.get("zones")
# for each new zone, create a zone entry in defined zone db
for zone_name, members in add_zone_map.items():
if zone_name not in zones_in_active_cfg:
body = {rest_constants.MEMBER_ENTRY:
{rest_constants.ENTRY_NAME:
add_zone_map.get(zone_name)}}
json_str = json.dumps(body)
url = self._build_url(rest_constants.POST_ZONE + zone_name)
response = self.session.post(url, data=json_str)
if response.status_code == 201:
LOG.info("REST create zone success: %s", zone_name)
else:
msg = (_("REST create zone failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# update the cfg with the new zones
active_cfg_name = active_zone_set.get("active_zone_config")
active_zones = active_zone_set.get("zones")
active_zone_names = active_zones.keys()
active_zone_names.extend(add_zone_map.keys())
body = {rest_constants.MEMBER_ZONE:
{rest_constants.ZONE_NAME: active_zone_names}}
json_str = json.dumps(body)
if active_cfg_name == '':
active_cfg_name = fc_zone_constants.CFG_NAME
url = self._build_url(rest_constants.POST_CFG + active_cfg_name)
response = self.session.post(url, data=json_str)
if response.status_code == 201:
LOG.info("REST cfg create success: %s", active_cfg_name)
self._save_and_activate_cfg(checksum, activate,
active_cfg_name)
else:
msg = (_("REST cfg create failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
else:
url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name)
response = self.session.patch(url, data=json_str)
# if update successful, save the configuration changes
if response.status_code == 204:
LOG.info("REST cfg update success: %s", active_cfg_name)
self._save_and_activate_cfg(checksum, activate,
active_cfg_name)
else:
msg = (_("REST cfg update failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _update_zones(self, update_zone_map, activate, operation):
active_zone_set, checksum = self._get_effective_zone_set()
active_cfg_name = active_zone_set.get("active_zone_config")
active_zones = active_zone_set.get("zones")
# for each zone, update the zone members in defined zone db
for zone_name, members in update_zone_map.items():
current_members = active_zones.get(zone_name)
if operation == "ADD":
new_members = set(members).difference(set(current_members))
if new_members:
update_zone_map.update({zone_name: new_members})
elif operation == "REMOVE":
new_members = set(current_members).difference(set(members))
if new_members:
update_zone_map.update({zone_name: new_members})
# for each zone to be updated, make REST PATCH call to update
for zone in update_zone_map.keys():
body = {rest_constants.MEMBER_ENTRY:
{rest_constants.ENTRY_NAME: update_zone_map.get(zone)}}
json_str = json.dumps(body)
url = self._build_url(rest_constants.POST_ZONE + zone)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST zone update success: %s", zone)
else:
msg = (_("REST zone update failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# save and activate the config changes
self._save_and_activate_cfg(checksum, activate, active_cfg_name)
def _delete_zones(self, zone_names_to_delete, activate):
zone_names_to_delete = zone_names_to_delete.split(";")
active_zone_set, checksum = self._get_effective_zone_set()
# for each zone name, make REST DELETE call
for zone in zone_names_to_delete:
url = self._build_url(rest_constants.DELETE_ZONE + zone)
response = self.session.delete(url)
if response.status_code == 204:
LOG.info("REST delete zone success: %s", zone)
else:
msg = (_("REST delete zone failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# update the cfg removing the deleted zones
active_cfg_name = active_zone_set.get("active_zone_config")
active_zones = active_zone_set.get("zones")
active_zone_names = active_zones.keys()
if len(active_zone_names) == len(zone_names_to_delete):
# disable the cfg
url = self._build_url(rest_constants.PATCH_CFG_DISABLE)
body = {"checksum": checksum}
json_str = json.dumps(body)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST cfg disable success")
else:
msg = (_("REST cfg disable failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# delete the cfg
url = self._build_url(rest_constants.DELETE_CFG + active_cfg_name)
response = self.session.delete(url)
if response.status_code == 204:
LOG.info("REST cfg delete success: %s", active_cfg_name)
else:
msg = (_("REST cfg delete failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
checksum = self._get_checksum()
self._save_and_activate_cfg(checksum, False, active_cfg_name)
else:
# update the cfg by removing the deleted zones
zone_names_in_cfg = list(set(active_zone_names)
.difference(set(zone_names_to_delete)))
body = {rest_constants.MEMBER_ZONE:
{rest_constants.ZONE_NAME: zone_names_in_cfg}}
json_str = json.dumps(body)
url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name)
response = self.session.patch(url, data=json_str)
# if update successful, save the configuration changes
if response.status_code == 204:
LOG.info("REST cfg update success: %s", active_cfg_name)
self._save_and_activate_cfg(checksum, activate,
active_cfg_name)
else:
msg = (_("REST cfg update failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _save_and_activate_cfg(self, checksum, activate, active_cfg_name):
body = {"checksum": checksum}
json_str = json.dumps(body)
url = self._build_url(rest_constants.PATCH_CFG_SAVE)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST cfg save success")
else:
msg = (_("REST cfg save failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# if activate=true, then enable the cfg changes to effective cfg
if activate:
checksum = self._get_checksum()
body = {"checksum": checksum}
json_str = json.dumps(body)
url = self._build_url(rest_constants.PATCH_CFG_ENABLE
+ active_cfg_name)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST cfg activate success: %s", active_cfg_name)
else:
msg = (_("REST cfg activate failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _get_checksum(self):
url = self._build_url(rest_constants.GET_CHECKSUM)
response = self.session.get(url)
checksum = ''
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
effective_cfg = json_response[rest_constants.EFFECTIVE_CFG]
checksum = effective_cfg[rest_constants.CHECKSUM]
LOG.info("REST get checksum success: %s", checksum)
else:
msg = (_("REST get checksum failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return checksum
def _build_url(self, path):
url = '%s://%s%s' % (self.protocol, self.sw_ip, path)
if self.vfid is not None:
url = '%s?vf-id=%s' % (url, self.vfid)
return url
| 18,907 | 5,484 |
from django.db import models
class PersonType(models.Model):
type = models.CharField(max_length=45)
def __str__(self):
return f"{self.type}"
class Address(models.Model):
city = models.CharField(max_length=45)
street = models.CharField(max_length=45)
street_number = models.IntegerField()
zip = models.CharField(max_length=45)
def __str__(self):
return f"{self.city} {self.street} {self.street_number}"
class Person(models.Model):
USER_TYPES = (
('user', 'USER'),
('admin', 'ADMIN'),
)
date_of_birth = models.DateField()
password = models.BinaryField()
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
username = models.CharField(max_length=60)
type = models.CharField(max_length=50, choices=USER_TYPES)
address = models.ManyToManyField(Address)
def __str__(self):
return f"{self.type} - {self.username}"
# TODO: Add list of params to the type
class Contact(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
type = models.CharField(max_length=45)
value = models.CharField(max_length=45)
def __str__(self):
return f"{self.person} {self.type}:{self.value}"
class Car(models.Model):
person = models.OneToOneField(Person, on_delete=models.CASCADE)
plate = models.CharField(max_length=45)
type = models.CharField(max_length=45)
brand = models.CharField(max_length=45)
def __str__(self):
return f"{self.person} {self.plate}"
class Shift(models.Model):
drivers = models.ManyToManyField(Person)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
def __str__(self):
return f"{self.start_time} - {self.end_time}"
class Cuisine(models.Model):
name = models.CharField(max_length=45)
def __str__(self):
return f"{self.name}"
class Dish(models.Model):
name = models.CharField(max_length=45)
def __str__(self):
return f"{self.name}"
class Restaurant(models.Model):
name = models.CharField(max_length=45)
address = models.ForeignKey(Address, on_delete=models.CASCADE)
dishes = models.ManyToManyField(Dish, through='RestaurantDish')
cuisine = models.ForeignKey(Cuisine, on_delete=models.CASCADE)
def __str__(self):
return f"{self.name}"
class RestaurantDish(models.Model):
dish = models.ForeignKey(Dish, on_delete=models.CASCADE)
restaurant = models.ForeignKey(
Restaurant, on_delete=models.CASCADE, related_name="restaurant_dish")
price = models.IntegerField()
description = models.TextField(max_length=200)
def __str__(self):
return f"{self.restaurant} - {self.dish} - {self.price}"
class Delivery(models.Model):
person = models.ForeignKey(
Person, on_delete=models.CASCADE)
driver = models.ForeignKey(
Person, on_delete=models.CASCADE, related_name='driver')
arival = models.DateTimeField(blank=True, auto_now_add=True)
delivery_fee = models.IntegerField(blank=True, default=0)
dishes = models.ManyToManyField(RestaurantDish, through='DeliveryDish')
def __str__(self):
return f"Order: {self.person} - Driver: {self.driver} - {self.arival}"
class DeliveryDish(models.Model):
dish = models.ForeignKey(RestaurantDish, on_delete=models.CASCADE)
delivery = models.ForeignKey(
Delivery, on_delete=models.CASCADE, related_name="delivery_dish")
requirements = models.TextField(max_length=200)
def __str__(self):
return f"{self.dish} | {self.delivery} | {self.requirements}"
class Review(models.Model):
reviewer = models.ForeignKey(
Person, on_delete=models.CASCADE)
driver = models.ForeignKey(
Person, blank=True, null=True, on_delete=models.CASCADE, related_name='review_driver')
restaurant_dish = models.ForeignKey(
RestaurantDish, blank=True, null=True, on_delete=models.CASCADE)
rating = models.SmallIntegerField()
text = models.TextField(max_length=500)
def __str__(self):
return f"{self.reviewer} - {self.rating} - {self.text}"
class CustomDropTable(models.Model):
text = models.TextField(max_length=500)
| 4,233 | 1,438 |
""" eas/lookup init file """
import os
import json
import logging
import requests
import azure.functions as func
from shared_code.common import func_json_response
def main(req: func.HttpRequest) -> func.HttpResponse:
""" main function for eas/lookup """
logging.info('EAS Lookup processed a request.')
try:
params = req.params.copy()
if params['search'] :
params['$where'] = \
"address like upper('{}%') AND parcel_number IS NOT NULL"\
.format(params['search'])
del params['search']
response = requests.get(
os.getenv('EAS_API_URL'),
params=params,
headers={'X-App-Token': os.getenv('EAS_APP_TOKEN')}
)
headers = {
"Cache-Control": "s-maxage=1, stale-while-revalidate, max-age={}"\
.format(os.getenv('EAS_CACHE_MAX_AGE')),
"Access-Control-Allow-Origin": "*"
}
return func_json_response(response, headers)
#pylint: disable=broad-except
except Exception as err:
logging.error("EAS Lookup error occurred: %s", err)
return func.HttpResponse(f"This endpoint encountered an error. {err}", status_code=500)
| 1,232 | 362 |
#!/usr/bin/python3
# Copyright (c) 2017, PolyVection UG.
#
# Based on configure-edison, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# aplay -l | awk -F \: '/,/{print $2}' | awk '{print $1}' | uniq
import os
import sys
from sys import stdout
import time
import termios
import fcntl
import subprocess
import polyterminal
def selectSPDIF():
f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w')
f.write("ctl.!default {\n")
f.write("type hw\n")
f.write("card pcm5121\n")
f.write("}\n")
f.write("pcm.!default {\n")
f.write("type hw\n")
f.write("card imxspdif\n")
f.write("}\n")
f.close()
def selectLINE():
f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w')
f.write("ctl.!default {\n")
f.write("type hw\n")
f.write("card pcm5121\n")
f.write("}\n")
f.write("pcm.!default {\n")
f.write("type hw\n")
f.write("card pcm5121\n")
f.write("}\n")
f.close()
def selectAMP1():
f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w')
f.write("ctl.!default {\n")
f.write("type hw\n")
f.write("card is31ap2121\n")
f.write("}\n")
f.write("pcm.!default {\n")
f.write("type hw\n")
f.write("card is31ap2121\n")
f.write("}\n")
f.close()
def chooseFTS():
polyterminal.reset("PolyOS - Audio Setup")
print("")
print("Please select the audio output:")
print("-----------------------------------------")
print("")
print("0 -\t TOSLINK \t(ZERO)")
print("1 -\t ANALOG \t(ZERO)")
print("2 -\t AMPLIFIER\t(AMP1)")
print("")
user = input("Enter either 0 or 1 to configure audio output: ")
if user == "0":
selectSPDIF()
if user == "1":
selectLINE()
if user == "2":
selectAMP1()
else:
selectSPDIF()
| 2,228 | 850 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 09:19:33 2018
@author: anonymous
"""
import os
import torch
import numpy as np
from arguments import parse_a2c_args
from multi_env import MultiEnv
from models import CNNPolicy
from a2c_agent import *
from utils import initialize_logging
from doom_environment import DoomEnvironment
import cv2
import pickle
from moviepy.editor import ImageSequenceClip
from PIL import Image
def batch_from_obs(obs, batch_size=32):
"""Converts an obs (C,H,W) to a batch (B,C,H,W) of given size"""
if isinstance(obs, torch.Tensor):
if len(obs.shape)==3:
obs = obs.unsqueeze(0)
return obs.repeat(batch_size, 1, 1, 1)
if len(obs.shape)==3:
obs = np.expand_dims(obs, axis=0)
return np.repeat(obs, repeats=batch_size, axis=0)
def make_movie(policy, env, filename, args, n_runs=50, use_tta=False,
use_rot=False, use_gray=False, name='', view=None, txt_pos=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
time_taken = []
losses = []
for i in range(n_runs):
if use_tta:
if use_rot:
path = 'policy.pth.tar'
else:
path='policy_TTA_GRAY.pth.tar'
checkpoint = torch.load('tta_models/'+path, map_location=device)
policy = CNNPolicy((3,64,112), args).to(device)
policy.load_state_dict(checkpoint['model'])
policy.eval()
if use_rot or use_gray:
tta_agent = TTAAgent(use_rot=use_rot,obs_shape=(3,64,112), hidden_size=128)
tta_agent.load()
tta_agent.copy_conv_weights(policy.conv_head)
state = torch.zeros(1, args.hidden_size)
mask = torch.ones(1,1)
obss = []
pos_list = []
obs = env.reset().astype(np.float32)
done = False
while not done:
#Gamma correction
obs = 255*np.power(obs/255.0, args.gamma_val)
#Inverse image
if args.inverse:
obs = 255 - obs
obss.append(obs)
with torch.no_grad():
result = policy(torch.from_numpy(obs).unsqueeze(0), state, mask)
action = result['actions']
state = result['states']
obs, reward, done, _ = env.step(action.item())
if view != None and txt_pos != None:
x, y, _ = env.get_player_position()
pos_list.append([x, y])
if use_tta and (use_rot or use_gray):
batch_next_obs = batch_from_obs(torch.Tensor(obs).to(device), batch_size=16)
# Adapt using rotation prediction
losses.append(tta_agent.update_tta(batch_next_obs))
obs = obs.astype(np.float32)
time_taken.append(len(obss)/int(30/args.frame_skip))
if use_tta:
if use_rot:
tta_type='rotation'
elif use_gray:
tta_type='grayscale'
else:
tta_type = 'tta_OFF'
else:
tta_type='baseline'
pickle.dump(time_taken, open(f'TTA_videos/{tta_type}/{name}.pkl', 'wb'))
print(len(obss))
print(f'Average time taken: {np.mean(time_taken):.2f}s')
print(f'TTA mean loss: {np.mean(losses):.3f}')
observations = [o.transpose(1,2,0) for o in obss]
clip = ImageSequenceClip(observations, fps=int(30/args.frame_skip))
clip.write_videofile(filename)
if view != None and txt_pos != None:
# saving the view of the agent and the position
# of the last run
pos_txt = open(txt_pos, "w+")
for p in pos_list:
pos_txt.write("%d,%d\r\n" % (p[0], p[1]))
pos_txt.close()
for c, o in enumerate(observations):
im = Image.fromarray(o.astype(np.uint8))
fig_name = str(c) + ".png"
im.save(view + fig_name)
def evaluate_saved_model():
args = parse_a2c_args()
USE_TTA = args.use_tta
USE_ROT = args.use_rot
USE_GRAY = args.use_gray
exp_name = args.experiment_name
SV_VW_POS = args.save_view_position
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
obs_shape = (3, args.screen_height, args.screen_width)
policy = CNNPolicy(obs_shape, args).to(device)
#Load Agent
if USE_TTA:
if USE_ROT:
path = 'policy.pth.tar'
else:
path='policy_TTA_GRAY.pth.tar'
checkpoint = torch.load('tta_models/'+path, map_location=device)
else:
path = 'saved_models/labyrinth_9_checkpoint_0198658048.pth.tar'
checkpoint = torch.load(path, map_location=device)
policy.load_state_dict(checkpoint['model'])
policy.eval()
assert args.model_checkpoint, 'No model checkpoint found'
assert os.path.isfile(args.model_checkpoint), 'The model could not be loaded'
for i in range(args.num_mazes_test):
#env = MultiEnv(args.simulator, args.num_environments, args, is_train=True)
env = DoomEnvironment(args, idx=i, is_train=True, use_shaping=args.use_shaping, fixed_scenario=True)
name='False'
if USE_TTA:
if USE_ROT:
tta_type='rotation'
elif USE_GRAY:
tta_type='grayscale'
else:
tta_type = 'tta_OFF'
else:
tta_type = 'baseline'
print(tta_type)
if SV_VW_POS:
view_name = f'map_creation/TTA_view/{tta_type}/'
txt_pos_track_name = f'map_creation/TTA_position/{tta_type}/{exp_name}.txt'
print('Saving view and positions of the agent.')
else:
view_name = None
txt_pos_track_name = None
movie_name = f'TTA_videos/{tta_type}/{exp_name}.mp4'
print('Creating movie {}'.format(movie_name))
make_movie(policy, env, movie_name, args, n_runs=100,
use_tta=USE_TTA, use_rot=USE_ROT, use_gray=USE_GRAY, name=exp_name, view=view_name, txt_pos=txt_pos_track_name)
if __name__ == '__main__':
evaluate_saved_model()
| 6,168 | 2,165 |
from nose.tools import raises
import numpy as np
from napi import neval
from napi.transformers import NapiTransformer, LazyTransformer
from napi.transformers import short_circuit_and
TRANSFORMERS = [NapiTransformer]#, LazyTransformer]
randbools = lambda *n: np.random.randn(*n) < 0
def short_circuit_and_(arrays, shape):
assert np.all(short_circuit_and(list(arrays), shape) == np.all(arrays, 0))
def short_circuit_or_(arrays, shape):
assert np.all(short_circuit_and(list(arrays), shape) == np.all(arrays, 0))
def test_short_circuit_and():
for func in [short_circuit_and_, short_circuit_or_]:
for shape in [(10,), (10, 3), (1, 10, 1, 4)]:
yield func, [randbools(*shape), randbools(*shape),
randbools(*shape)], shape
def check_napi_magic_configuration(func, line):
assert func(line) is None
def test_napi_magic_configuration():
from napi.magics import NapiMagics
magic = NapiMagics(None)
magic._remove = magic._append = lambda: None
func = magic.napi
for line in ['', '', 'on', 'off', '1', '0', 'sq', 'sq', 'sc', 'sc',
'sq on', 'sq off', 'sq 1', 'sq 0',
'sc 0', 'sc 10000']:
yield check_napi_magic_configuration, func, line
def check_logicops_of_python_types(source, debug=False, trans=None):
result, expect = neval(source, debug=debug, transformer=trans), eval(source)
assert result == expect, '{} != {}'.format(result, expect)
def test_logicops_of_python_types(debug=False):
for t in TRANSFORMERS:
for src in [
'1 and True', 'True and 1', '[] and True', 'True and []', '1 and [1]',
'0 or True', 'False or 1', '[] or True', 'True or []', 'False or [1]',
'True and [1] and 1 and {1: 1}',
'True and [1] and 1 and {1: 1} and {}',
'True and [1] and 0 or {} and {1: 1}',]:
yield check_logicops_of_python_types, src, debug, t
def check_logicops_of_arrays(source, expect, ns, debug=False, sc=10000):
result = neval(source, ns, debug=debug)
assert np.all(result == expect), '{} != {}'.format(result, expect)
def test_logicops_of_arrays(debug=False):
a = np.arange(10)
b = randbools(10)
bo = np.ones(10, bool)
bz = np.zeros(10, bool)
ns = locals()
for src, res in [
('a and a', np.logical_and(a, a)),
('b and b', np.logical_and(b, b)),
('b and b and b', np.logical_and(b, b)),
('a and b', np.logical_and(a, b)),
('a or a', np.logical_or(a, a)),
('b or b', np.logical_or(b, b)),
('a or b', np.logical_or(a, b)),
('a or b or b', np.logical_or(a, b)),
('not a', np.logical_not(a)),
('not b', np.logical_not(b)),
('a and not a', bz),
('b and not b', bz),
('b and True', b),
('(a or b) and False', bz),]:
yield check_logicops_of_arrays, src, res, ns, debug
def test_array_squeezing(debug=False):
b = randbools(10)
b2d = randbools(1, 10)
b3d = randbools(1, 10, 1)
b5d = randbools(2, 1, 5, 1, 10)
b6d = randbools(1, 2, 1, 5, 1, 10, 1)
ns = locals()
for src, res in [
('b or b2d', np.logical_or(b, b2d.squeeze())),
('b or b2d and b3d', np.logical_or(b,
np.logical_and(b2d.squeeze(), b3d.squeeze()))),
('b5d and b6d', np.logical_and(b5d.squeeze(), b6d.squeeze())),
]:
yield check_logicops_of_arrays, src, res, ns, debug
def test_logicops_with_arithmetics_and_comparisons(debug=False):
a = np.arange(10)
b = randbools(10)
ns = locals()
for src, res in [
('a >= 0 and a + 1', np.logical_and(a >= 0, a + 1)),
('-a <= 0 and a**2 + 1', np.logical_and(-a <= 0, a**2 + 1)),
('---a - 1 <= 0 and b', np.logical_and(---a - 1<= 0, b)),
]:
yield check_logicops_of_arrays, src, res, ns, debug
def test_short_circuiting(debug=False):
arr = [randbools(10000) for i in range(5)]
a, b, c, d, e = arr
ns = locals()
for sc in (False, 10000):
for src, res in [
('a and a', np.logical_and(a, a)),
('b and b', np.logical_and(b, b)),
('a and b', np.logical_and(a, b)),
('a or a', np.logical_or(a, a)),
('b or b', np.logical_or(b, b)),
('a or b', np.logical_or(a, b)),
('a and b and c and d and e', np.all(arr, 0)),
('a or b or c or d or e', np.any(arr, 0)),
('a and b or c or d and e',
np.any([np.logical_and(a, b), c, np.logical_and(d, e)], 0)),
]:
yield check_logicops_of_arrays, src, res, ns, debug, sc
def test_multidim_short_circuiting(debug=False):
arr = [randbools(10, 100, 10) for i in range(5)]
a, b, c, d, e = arr
ns = locals()
for sc in (False, 10000):
for src, res in [
('a and a', np.logical_and(a, a)),
('b and b', np.logical_and(b, b)),
('a and b', np.logical_and(a, b)),
('a or a', np.logical_or(a, a)),
('b or b', np.logical_or(b, b)),
('a or b', np.logical_or(a, b)),
('a and b and c and d and e', np.all(arr, 0)),
('a or b or c or d or e', np.any(arr, 0)),
('a and b or c or d and e',
np.any([np.logical_and(a, b), c, np.logical_and(d, e)], 0)),
]:
yield check_logicops_of_arrays, src, res, ns, debug, sc
def test_comparison_chaining(debug=False):
"""`a < b < c < d`"""
a = np.arange(10) - 4
b, c, d = a * 2, a * 3, a * 4
ns = locals()
for src, res in [
('a < b < c < d', np.all([a < b, b < c, c < d], 0)),
('a == b == c == d', np.all([a == b, b == c, c == d], 0)),
('0 == a == 0 == b', np.all([a == 0, b == 0,], 0)),
]:
yield check_logicops_of_arrays, src, res, ns, debug
@raises(ValueError)
def check_array_problems(source, ns, debug=False):
neval(source, ns, debug=debug)
def test_array_problems(debug=False):
a5 = randbools(5)
a9 = randbools(9)
a9by5 = randbools(9, 5)
ns = locals()
for src in [
'a5 and a9',
'a9 or a5',
'a9 or a9by5',
]:
yield check_array_problems, src, ns, debug
@raises(NameError)
def test_name_problem(debug=False):
neval('a and b', {}, debug=debug)
'''
def test_or_not(debug=False):
a = booleans(10)
assert all(eval('a or not a', locals(), debug=debug) ==
any([a, invert(a)], 0))
def test_equal(debug=False):
a = arange(10)
assert all(eval('a == 1 and a', locals(), debug=debug) ==
all([a == 1, a], 0))
''' | 6,706 | 2,720 |
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2014 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
import api.impl.filters
class Review(object):
def __init__(self, review_id, repository_id, branch_id, state, summary,
description):
self.id = review_id
self.__repository_id = repository_id
self.__branch_id = branch_id
self.state = state
self.summary = summary
self.description = description
self.__owners_ids = None
self.__reviewers_ids = None
self.__watchers_ids = None
self.__filters = None
self.__commits = None
self.__rebases = None
def getRepository(self, critic):
return api.repository.fetch(critic, repository_id=self.__repository_id)
def getBranch(self, critic):
return api.branch.fetch(critic, branch_id=self.__branch_id)
def __fetchOwners(self, critic):
if self.__owners_ids is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT uid
FROM reviewusers
WHERE review=%s
AND owner""",
(self.id,))
self.__owners_ids = frozenset(user_id for (user_id,) in cursor)
def getOwners(self, critic):
self.__fetchOwners(critic)
return frozenset(api.user.fetch(critic, user_id=user_id)
for user_id in self.__owners_ids)
def __fetchReviewers(self, critic):
if self.__reviewers_ids is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT DISTINCT uid
FROM reviewuserfiles
JOIN reviewfiles ON (reviewfiles.id=reviewuserfiles.file)
WHERE reviewfiles.review=%s""",
(self.id,))
assigned_reviewers = frozenset(user_id for (user_id,) in cursor)
cursor.execute("""SELECT DISTINCT uid
FROM reviewfilechanges
JOIN reviewfiles ON (reviewfiles.id=reviewfilechanges.file)
WHERE reviewfiles.review=%s""",
(self.id,))
actual_reviewers = frozenset(user_id for (user_id,) in cursor)
self.__reviewers_ids = assigned_reviewers | actual_reviewers
def getReviewers(self, critic):
self.__fetchReviewers(critic)
return frozenset(api.user.fetch(critic, user_id=user_id)
for user_id in self.__reviewers_ids)
def __fetchWatchers(self, critic):
if self.__watchers_ids is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT uid
FROM reviewusers
WHERE review=%s""",
(self.id,))
associated_users = frozenset(user_id for (user_id,) in cursor)
self.__fetchOwners(critic)
self.__fetchReviewers(critic)
non_watchers = self.__owners_ids | self.__reviewers_ids
self.__watchers_ids = associated_users - non_watchers
def getWatchers(self, critic):
self.__fetchWatchers(critic)
return frozenset(api.user.fetch(critic, user_id=user_id)
for user_id in self.__watchers_ids)
def getFilters(self, critic):
if self.__filters is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT uid, type, path, id, review, creator
FROM reviewfilters
WHERE review=%s""",
(self.id,))
impls = [api.impl.filters.ReviewFilter(*row) for row in cursor]
self.__filters = [api.filters.ReviewFilter(critic, impl)
for impl in impls]
return self.__filters
def getCommits(self, critic):
if self.__commits is None:
cursor = critic.getDatabaseCursor()
# Direct changesets: no merges, no rebase changes.
cursor.execute(
"""SELECT DISTINCT commits.id, commits.sha1
FROM commits
JOIN changesets ON (changesets.child=commits.id)
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
WHERE reviewchangesets.review=%s
AND changesets.type='direct'""",
(self.id,))
commit_ids_sha1s = set(cursor)
# Merge changesets, excluding those added by move rebases.
cursor.execute(
"""SELECT DISTINCT commits.id, commits.sha1
FROM commits
JOIN changesets ON (changesets.child=commits.id)
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
LEFT OUTER JOIN reviewrebases ON (reviewrebases.review=%s
AND reviewrebases.equivalent_merge=commits.id)
WHERE reviewchangesets.review=%s
AND changesets.type='merge'
AND reviewrebases.id IS NULL""",
(self.id, self.id))
commit_ids_sha1s.update(cursor)
repository = self.getRepository(critic)
commits = [api.commit.fetch(repository, commit_id, sha1)
for commit_id, sha1 in commit_ids_sha1s]
self.__commits = api.commitset.create(critic, commits)
return self.__commits
def getRebases(self, wrapper):
return api.log.rebase.fetchAll(wrapper.critic, wrapper)
def wrap(self, critic):
return api.review.Review(critic, self)
def make(critic, args):
for (review_id, repository_id, branch_id,
state, summary, description) in args:
def callback():
return Review(review_id, repository_id, branch_id,
state, summary, description).wrap(critic)
yield critic._impl.cached(api.review.Review, review_id, callback)
def fetch(critic, review_id, branch):
cursor = critic.getDatabaseCursor()
if review_id is not None:
cursor.execute("""SELECT reviews.id, branches.repository, branches.id,
state, summary, description
FROM reviews
JOIN branches ON (branches.id=reviews.branch)
WHERE reviews.id=%s""",
(review_id,))
else:
cursor.execute("""SELECT reviews.id, branches.repository, branches.id,
state, summary, description
FROM reviews
JOIN branches ON (branches.id=reviews.branch)
WHERE branches.id=%s""",
(int(branch),))
row = cursor.fetchone()
if not row:
if review_id is not None:
raise api.review.InvalidReviewId(review_id)
else:
raise api.review.InvalidReviewBranch(branch)
return next(make(critic, [row]))
def fetchAll(critic, repository, state):
cursor = critic.getDatabaseCursor()
conditions = ["TRUE"]
values = []
if repository is not None:
conditions.append("branches.repository=%s")
values.append(repository.id)
if state is not None:
conditions.append("reviews.state IN (%s)"
% ", ".join(["%s"] * len(state)))
values.extend(state)
cursor.execute("""SELECT reviews.id, branches.repository, branches.id,
state, summary, description
FROM reviews
JOIN branches ON (branches.id=reviews.branch)
WHERE """ + " AND ".join(conditions) + """
ORDER BY reviews.id""",
values)
return list(make(critic, cursor))
| 8,621 | 2,250 |
from dearpygui.core import *
from dearpygui.simple import *
# callback
def update(sender, data):
uvmin = get_value("uv_min")
uvmax = get_value("uv_max")
uvminx = uvmin[0]
uvminy = uvmin[1]
uvmaxx = uvmax[0]
uvmaxy = uvmax[1]
add_data("TextureCoordinates", [uvminx, uvminy, uvmaxx, uvmaxy])
configure_item("image_1",
uv_min=[get_data("TextureCoordinates")[0], get_data("TextureCoordinates")[1]],
uv_max=[get_data("TextureCoordinates")[2], get_data("TextureCoordinates")[3]])
print(get_data("TextureCoordinates"))
add_slider_float2("uv_min", default_value=[0, 0], callback=update, min_value=-2, max_value=2)
add_slider_float2("uv_max", default_value=[1, 1], callback=update, min_value=-2, max_value=2)
add_data("TextureCoordinates", [0, 0, 1, 1])
add_image("image_1", "SpriteMapExample.png",
uv_min=[get_data("TextureCoordinates")[0], get_data("TextureCoordinates")[1]],
uv_max=[get_data("TextureCoordinates")[2], get_data("TextureCoordinates")[3]])
show_logger()
start_dearpygui()
| 1,080 | 412 |
from common import database
from common import TaskQueues, get_object_store
from common.utils import setup_logging, shell_escape
from hashlib import sha256
import logging
import os
import shutil
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import functions
import subprocess
import tempfile
SQLSession = None
object_store = None
# IP as understood by Docker daemon, not this container
DOCKER_REGISTRY = os.environ.get('REGISTRY', 'localhost:5000')
def run_cmd_and_log(session, run_id, cmd):
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.close()
for line in iter(proc.stdout.readline, ''):
logging.info("> %s", line)
session.add(database.RunLogLine(
run_id=run_id,
line=line.rstrip()))
session.commit()
return proc.wait()
def run_request(channel, method, _properties, body):
"""Process a run task.
Lookup a run in the database, get the input files from S3, then do the run
from the Docker image, upload the log and the output files.
"""
logging.info("Run request received: %r", body)
# Look up the run in the database
session = SQLSession()
exp = joinedload(database.Run.experiment)
run = (session.query(database.Run)
.options(joinedload(database.Run.parameter_values),
joinedload(database.Run.input_files),
exp.joinedload(database.Experiment.parameters),
exp.joinedload(database.Experiment.paths))
.get(int(body)))
if not run:
logging.error("Got a run request but couldn't get the run from the "
"database (body=%r)", body)
# ACK anyway
channel.basic_ack(delivery_tag=method.delivery_tag)
return
# Update status in database
if run.started:
logging.warning("Starting run which has already been started")
else:
run.started = functions.now()
session.commit()
# Remove previous info
run.log[:] = []
run.output_files[:] = []
def set_error(msg):
logging.warning("Got error: %s", msg)
run.done = functions.now()
session.add(database.RunLogLine(run_id=run.id, line=msg))
session.commit()
channel.basic_ack(delivery_tag=method.delivery_tag)
if run.experiment.status != database.Status.BUILT:
return set_error("Experiment to run is not BUILT")
# Make build directory
directory = tempfile.mkdtemp('build_%s' % run.experiment_hash)
container = None
fq_image_name = '%s/%s' % (DOCKER_REGISTRY, run.experiment.docker_image)
try:
# Get list of parameters
params = {}
params_unset = set()
for param in run.experiment.parameters:
if not param.optional:
params_unset.add(param.name)
params[param.name] = param.default
# Get parameter values
for param in run.parameter_values:
if param.name in params:
logging.info("Param: %s=%r", param.name, param.value)
params[param.name] = param.value
params_unset.discard(param.name)
else:
return set_error("Got parameter value for parameter %s which "
"does not exist" % param.name)
if params_unset:
return set_error("Missing value for parameters: %s" %
", ".join(params_unset))
# Get paths
paths = {}
for path in run.experiment.paths:
paths[path.name] = path.path
# Get input files
inputs = []
for input_file in run.input_files:
if input_file.name not in paths:
return set_error("Got an unknown input file %s" %
input_file.name)
inputs.append((input_file,
paths[input_file.name]))
logging.info("Using %d input files: %s", len(inputs),
", ".join(f.name for f, p in inputs))
# Create container
container = 'run_%s' % body
logging.info("Creating container %s with image %s",
container, run.experiment.docker_image)
# Turn parameters into a command-line
cmdline = []
for k, v in params.iteritems():
if k.startswith('cmdline_'):
i = k[8:]
cmdline.extend(['cmd', v, 'run', i])
cmdline = ['docker', 'create', '-i', '--name', container,
'--', fq_image_name] + cmdline
logging.info('$ %s', ' '.join(shell_escape(a) for a in cmdline))
subprocess.check_call(cmdline)
for input_file, path in inputs:
local_path = os.path.join(directory, 'input_%s' % input_file.hash)
# Download file from S3
logging.info("Downloading input file: %s, %s, %d bytes",
input_file.name, input_file.hash, input_file.size)
object_store.download_file('inputs', input_file.hash, local_path)
# Put file in container
logging.info("Copying file to container")
subprocess.check_call(['docker', 'cp', '--',
local_path,
'%s:%s' % (container, path)])
# Remove local file
os.remove(local_path)
# Start container using parameters
logging.info("Starting container")
try:
ret = run_cmd_and_log(session, run.id,
['docker', 'start', '-ai', '--', container])
except IOError:
return set_error("Got IOError running experiment")
if ret != 0:
return set_error("Error: Docker returned %d" % ret)
run.done = functions.now()
# Get output files
for path in run.experiment.paths:
if path.is_output:
local_path = os.path.join(directory, 'output_%s' % path.name)
# Copy file out of container
logging.info("Getting output file %s", path.name)
ret = subprocess.call(['docker', 'cp', '--',
'%s:%s' % (container, path.path),
local_path])
if ret != 0:
logging.warning("Couldn't get output %s", path.name)
session.add(database.RunLogLine(
run_id=run.id,
line="Couldn't get output %s" % path.name))
continue
with open(local_path, 'rb') as fp:
# Hash it
hasher = sha256()
chunk = fp.read(4096)
while chunk:
hasher.update(chunk)
chunk = fp.read(4096)
filehash = hasher.hexdigest()
# Rewind it
filesize = fp.tell()
fp.seek(0, 0)
# Upload file to S3
logging.info("Uploading file, size: %d bytes" % filesize)
object_store.upload_fileobj('outputs', filehash, fp)
# Add OutputFile to database
run.output_files.append(
database.OutputFile(hash=filehash, name=path.name,
size=filesize))
# Remove local file
os.remove(local_path)
# ACK
session.commit()
channel.basic_ack(delivery_tag=method.delivery_tag)
logging.info("Done!")
except Exception:
logging.exception("Error processing run!")
if True:
set_error("Internal error!")
else:
# Set database status back to QUEUED
run.status = database.Status.QUEUED
session.commit()
# NACK the task in RabbitMQ
channel.basic_nack(delivery_tag=method.delivery_tag)
finally:
# Remove container if created
if container is not None:
subprocess.call(['docker', 'rm', '-f', '--', container])
# Remove image
subprocess.call(['docker', 'rmi', '--', fq_image_name])
# Remove build directory
shutil.rmtree(directory)
def main():
setup_logging('REPROSERVER-RUNNER')
# SQL database
global SQLSession
engine, SQLSession = database.connect()
# AMQP
tasks = TaskQueues()
# Object storage
global object_store
object_store = get_object_store()
logging.info("Ready, listening for requests")
tasks.consume_run_tasks(run_request)
| 8,833 | 2,457 |
# -*- coding: utf-8 -*-
db.define_table('ip2nation',
Field('code',type='integer', requires=IS_NOT_EMPTY()),
Field('country', requires= IS_NOT_EMPTY()))
| 184 | 62 |
from yalul.parser import Parser
from yalul.parsers.ast.nodes.statements.expressions.binary import Binary
from yalul.lex.token import Token
from yalul.lex.token_type import TokenType
from yalul.parsers.ast.nodes.statements.expressions.values.integer import Integer
class TestParserBinary:
"""Test parser generating binary operations expressions"""
def test_parser_run_generates_correct_ast_complex_binary_expression_with_multi_precedence(self):
"""
Validates if parser is generating a correct AST to a binary expressions with multi precedence, like 39 * 2 + 42
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, "*"),
Token(TokenType.INTEGER, 2),
Token(TokenType.SUM, '+'),
Token(TokenType.INTEGER, 42),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
asts = parser_response.ast.statements
ast = asts[0]
assert type(ast) is Binary
assert ast.operator.type is TokenType.SUM
assert type(ast.left) is Binary
assert ast.left.operator.type is TokenType.MULTIPLY
assert type(ast.left.left) is Integer
assert ast.left.left.value == 39
assert type(ast.left.right) is Integer
assert ast.left.right.value == 2
assert type(ast.right) is Integer
assert ast.right.value == 42
class TestParserGenerateErrors:
"""Test parser generating correct parser errors"""
def test_parser_run_generates_correct_parser_errors(self):
"""
Validates if parser is generating a correct parser errors
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.LEFT_PAREN, 'Left Paren'),
Token(TokenType.INTEGER, 41),
Token(TokenType.SUM, '+'),
Token(TokenType.INTEGER, 1),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expected a RIGHT PAREN ) after expression'
class TestParserGenerateUnfinishedExpressionErrors:
"""Test parser generating correct parser errors"""
def test_parse_run_generates_correct_error_unfinished_expression(self):
"""
Validates if parser if generating correct error to unfinished expressions
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.INTEGER, 41),
Token(TokenType.SUM, '+'),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expect Expression after TokenType.SUM, Value: +'
class TestParserGenerateUnopenedOperatorError:
"""Test parser generating correct parser errors"""
def test_parse_run_generates_correct_error_unopened_operators_right_paren(self):
"""
Validates if parser if generating correct error to unopened operators
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.RIGHT_PAREN, ')'),
Token(TokenType.INTEGER, 41),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expect a open operator for TokenType.RIGHT_PAREN, Value: )'
def test_parse_run_generates_correct_error_unopened_operators_right_brace(self):
"""
Validates if parser if generating correct error to unopened operators
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.RIGHT_BRACE, '}'),
Token(TokenType.INTEGER, 41),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expect a open operator for TokenType.RIGHT_BRACE, Value: }'
| 4,574 | 1,349 |
import dataclasses
import pytest
from blspy import PrivateKey
from src.server.outbound_message import NodeType
from src.types.peer_info import PeerInfo
from src.util.block_tools import BlockTools
from src.util.hash import std_hash
from src.util.ints import uint16
from src.util.validate_alert import create_alert_file, create_not_ready_alert_file
from tests.core.full_node.test_full_sync import node_height_at_least
from tests.setup_nodes import self_hostname, setup_daemon, setup_full_system
from tests.simulation.test_simulation import test_constants_modified
from tests.time_out_assert import time_out_assert, time_out_assert_custom_interval
from tests.util.alert_server import AlertServer
no_genesis = dataclasses.replace(test_constants_modified, GENESIS_CHALLENGE=None)
b_tools = BlockTools(constants=no_genesis)
b_tools_1 = BlockTools(constants=no_genesis)
master_int = 5399117110774477986698372024995405256382522670366369834617409486544348441851
master_sk: PrivateKey = PrivateKey.from_bytes(master_int.to_bytes(32, "big"))
pubkey_alert = bytes(master_sk.get_g1()).hex()
alert_url = "http://127.0.0.1:59000/status"
new_config = b_tools._config
new_config["CHIA_ALERTS_PUBKEY"] = pubkey_alert
new_config["ALERTS_URL"] = alert_url
new_config["daemon_port"] = 55401
new_config["network_overrides"]["constants"][new_config["selected_network"]]["GENESIS_CHALLENGE"] = None
b_tools.change_config(new_config)
new_config_1 = b_tools_1._config
new_config_1["CHIA_ALERTS_PUBKEY"] = pubkey_alert
new_config_1["ALERTS_URL"] = alert_url
new_config_1["daemon_port"] = 55402
new_config_1["network_overrides"]["constants"][new_config_1["selected_network"]]["GENESIS_CHALLENGE"] = None
b_tools_1.change_config(new_config_1)
class TestDaemonAlerts:
@pytest.fixture(scope="function")
async def get_daemon(self):
async for _ in setup_daemon(btools=b_tools):
yield _
@pytest.fixture(scope="function")
async def get_daemon_1(self):
async for _ in setup_daemon(btools=b_tools_1):
yield _
@pytest.fixture(scope="function")
async def simulation(self):
async for _ in setup_full_system(b_tools_1.constants, b_tools=b_tools, b_tools_1=b_tools_1):
yield _
@pytest.mark.asyncio
async def test_daemon_alert_simulation(self, simulation, get_daemon, get_daemon_1):
node1, node2, _, _, _, _, _, _, _, server1 = simulation
await server1.start_client(PeerInfo(self_hostname, uint16(21238)))
daemon = get_daemon
daemon_1 = get_daemon_1
alert_file_path = daemon.root_path / "alert.txt"
alert_server = await AlertServer.create_alert_server(alert_file_path, 59000)
create_not_ready_alert_file(alert_file_path, master_sk)
await alert_server.run()
selected = daemon.net_config["selected_network"]
async def num_connections():
count = len(node2.server.connection_by_type[NodeType.FULL_NODE].items())
return count
await time_out_assert_custom_interval(60, 1, num_connections, 1)
preimage = "This is test preimage!"
expected_genesis = std_hash(bytes(preimage, "utf-8")).hex()
alert_file_path.unlink()
create_alert_file(alert_file_path, master_sk, "This is test preimage!")
def check_genesis(expected):
deamon_updated = (
daemon.net_config["network_overrides"]["constants"][selected]["GENESIS_CHALLENGE"] == expected
)
deamon_1_updated = (
daemon_1.net_config["network_overrides"]["constants"][selected]["GENESIS_CHALLENGE"] == expected
)
return deamon_updated and deamon_1_updated
await time_out_assert(15, check_genesis, True, expected_genesis)
def check_initialized():
return node1.full_node.initialized is True and node2.full_node.initialized is True
await time_out_assert(15, check_initialized, True)
await time_out_assert(1500, node_height_at_least, True, node2, 7)
| 4,036 | 1,439 |
import keras
import keras.backend as K
from keras.datasets import imdb
from keras.layers import LSTM, Embedding, TimeDistributed, Input, Dense
from keras.models import Model
from tensorflow.python.client import device_lib
from tqdm import tqdm
import os, random
from argparse import ArgumentParser
import numpy as np
from tensorboardX import SummaryWriter
import util
CHECK = 5
def generate_seq(model : Model, seed, size, temperature=1.0):
"""
:param model: The complete RNN language model
:param seed: The first few wordas of the sequence to start generating from
:param size: The total size of the sequence to generate
:param temperature: This controls how much we follow the probabilities provided by the network. For t=1.0 we just
sample directly according to the probabilities. Lower temperatures make the high-probability words more likely
(providing more likely, but slightly boring sentences) and higher temperatures make the lower probabilities more
likely (resulting is weirder sentences). For temperature=0.0, the generation is _greedy_, i.e. the word with the
highest probability is always chosen.
:return: A list of integers representing a samples sentence
"""
ls = seed.shape[0]
# Due to the way Keras RNNs work, we feed the model a complete sequence each time. At first it's just the seed,
# zero-padded to the right length. With each iteration we sample and set the next character.
tokens = np.concatenate([seed, np.zeros(size - ls)])
for i in range(ls, size):
probs = model.predict(tokens[None,:])
# Extract the i-th probability vector and sample an index from it
next_token = util.sample_logits(probs[0, i-1, :], temperature=temperature)
tokens[i] = next_token
return [int(t) for t in tokens]
def sparse_loss(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
def go(options):
tbw = SummaryWriter(log_dir=options.tb_dir)
if options.seed < 0:
seed = random.randint(0, 1000000)
print('random seed: ', seed)
np.random.seed(seed)
else:
np.random.seed(options.seed)
if options.task == 'wikisimple':
x, w21, i2w = \
util.load_words(util.DIR + '/datasets/wikisimple.txt', vocab_size=options.top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
numwords = len(i2w)
print('max sequence length ', x_max_len)
print(numwords, 'distinct words')
x = util.batch_pad(x, options.batch, add_eos=True)
elif options.task == 'file':
x, w21, i2w = \
util.load_words(options.data_dir, vocab_size=options.top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
numwords = len(i2w)
print('max sequence length ', x_max_len)
print(numwords, 'distinct words')
x = util.batch_pad(x, options.batch, add_eos=True)
else:
raise Exception('Task {} not recognized.'.format(options.task))
def decode(seq):
return ' '.join(i2w[id] for id in seq)
print('Finished data loading. ', sum([b.shape[0] for b in x]), ' sentences loaded')
## Define model
input = Input(shape=(None, ))
embedding = Embedding(numwords, options.lstm_capacity, input_length=None)
embedded = embedding(input)
decoder_lstm = LSTM(options.lstm_capacity, return_sequences=True)
h = decoder_lstm(embedded)
if options.extra is not None:
for _ in range(options.extra):
h = LSTM(options.lstm_capacity, return_sequences=True)(h)
fromhidden = Dense(numwords, activation='linear')
out = TimeDistributed(fromhidden)(h)
model = Model(input, out)
opt = keras.optimizers.Adam(lr=options.lr)
lss = sparse_loss
model.compile(opt, lss)
model.summary()
## Training
#- Since we have a variable batch size, we make our own training loop, and train with
# model.train_on_batch(...). It's a little more verbose, but it gives us more control.
epoch = 0
instances_seen = 0
while epoch < options.epochs:
for batch in tqdm(x):
n, l = batch.shape
batch_shifted = np.concatenate([np.ones((n, 1)), batch], axis=1) # prepend start symbol
batch_out = np.concatenate([batch, np.zeros((n, 1))], axis=1) # append pad symbol
loss = model.train_on_batch(batch_shifted, batch_out[:, :, None])
instances_seen += n
tbw.add_scalar('lm/batch-loss', float(loss), instances_seen)
epoch += 1
# Show samples for some sentences from random batches
for temp in [0.0, 0.9, 1, 1.1, 1.2]:
print('### TEMP ', temp)
for i in range(CHECK):
b = random.choice(x)
if b.shape[1] > 20:
seed = b[0,:20]
else:
seed = b[0, :]
seed = np.insert(seed, 0, 1)
gen = generate_seq(model, seed, 60, temperature=temp)
print('*** [', decode(seed), '] ', decode(gen[len(seed):]))
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs.",
default=20, type=int)
parser.add_argument("-E", "--embedding-size",
dest="embedding_size",
help="Size of the word embeddings on the input layer.",
default=300, type=int)
parser.add_argument("-o", "--output-every",
dest="out_every",
help="Output every n epochs.",
default=1, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.001, type=float)
parser.add_argument("-b", "--batch-size",
dest="batch",
help="Batch size",
default=128, type=int)
parser.add_argument("-t", "--task",
dest="task",
help="Task",
default='wikisimple', type=str)
parser.add_argument("-D", "--data-directory",
dest="data",
help="Data file. Should contain one sentence per line.",
default='./data', type=str)
parser.add_argument("-L", "--lstm-hidden-size",
dest="lstm_capacity",
help="LSTM capacity",
default=256, type=int)
parser.add_argument("-m", "--max_length",
dest="max_length",
help="Max length",
default=None, type=int)
parser.add_argument("-w", "--top_words",
dest="top_words",
help="Top words",
default=10000, type=int)
parser.add_argument("-I", "--limit",
dest="limit",
help="Character cap for the corpus",
default=None, type=int)
parser.add_argument("-T", "--tb-directory",
dest="tb_dir",
help="Tensorboard directory",
default='./runs/words', type=str)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random (seed is printed for reproducability).",
default=-1, type=int)
parser.add_argument("-x", "--extra-layers",
dest="extra",
help="Number of extra LSTM layers.",
default=None, type=int)
options = parser.parse_args()
print('OPTIONS', options)
go(options) | 8,196 | 2,447 |
import re
# Splits a line by spaces or tabs, returns a list object
def tokenize(line):
linesplit = line.rstrip().split("\t")
if len(linesplit) == 1: linesplit = line.rstrip().split(" ")
else: return linesplit
# Returns dictionary containing information of pedigree file
def process_ped(fam_filepath):
ped_dictionary = {} # ped_dictionary[iid] = (fid, iid, father_iid, mother_iid, sex, phen)
with open(fam_filepath) as f:
for line in f:
linesplit = tokenize(line)
fid,iid,father_iid,mother_iid,sex,phen = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4],linesplit[5]
if sex != "1" and sex != "2": continue
if father_iid == "0" and mother_iid == "0": continue
ped_dictionary[iid] = (fid,iid,father_iid,mother_iid,sex,phen)
return ped_dictionary
# Convert spaces to tabs
def tabbit(line):
linesplit = re.split("\s+", line.rstrip())
return "\t".join(linesplit[1:])
| 979 | 333 |
from ws.RLAgents.B_ValueBased.Bootstrapping.qtable_mgt import qtable_mgt
def impl_mgt(app_info):
_env = app_info.ENV
Display = app_info.ENV.Display
fn_get_qval, fn_set_qval, fn_get_q_actions, fn_get_max_q_actions = qtable_mgt()
def _fn_update_knowledge(state, action, reward, next_state, next_action):
current_q = fn_get_qval(state, action)
next_state_q = fn_get_qval(next_state, next_action)
new_q = (current_q + app_info.LEARNING_RATE *
(reward + app_info.DISCOUNT_FACTOR * next_state_q - current_q))
fn_set_qval(state, action, new_q)
def fn_run_sarsa():
episode_num = 0
while True:
episode_num += 1
episode_status = _fn_run_episode(Display.fn_move_cursor)
print('episode number: {} status = {}'.format(episode_num, episode_status))
if 'TEST_MODE' in app_info:
if app_info.TEST_MODE: # ONLY 1 episode needed
break
pass
def _fn_run_episode(fn_move_cursor):
new_state = None
state = _env.fn_reset_env()
action = fn_get_max_q_actions(state, app_info.EPSILON)
Display.fn_update_qvalue(state, fn_get_q_actions(state))
continue_running = True
while continue_running:
new_state, reward, done, _ = _env.fn_take_step(action)
continue_running = reward == 0
if fn_move_cursor is not None:
fn_move_cursor(state, new_state)
new_action = fn_get_max_q_actions(new_state, app_info.EPSILON)
_fn_update_knowledge(state, action, reward, new_state, new_action)
Display.fn_update_qvalue(state, fn_get_q_actions(state))
action = new_action
state = new_state
if fn_move_cursor is not None:
fn_move_cursor(new_state)
return continue_running
return fn_run_sarsa
| 1,925 | 652 |