content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
from apps.emailutil.tasks import Email
from apps.org.models import Org
from apps.physicaldevice.models import Device
from apps.project.models import Project
from apps.sqsworker.action import Action
from apps.sqsworker.exceptions import WorkerActionHardError, WorkerActionSoftError
from apps.stream.models import StreamId, StreamVariable
from apps.streamnote.models import StreamNote
from apps.utils.data_helpers.manager import DataManager
logger = logging.getLogger(__name__)
user_model = get_user_model()
class ProjectDeleteAction(Action):
"""
This action will delete the project as well as its associated variables, streams, and data streams
Actions:
- Deleting all StreamVariable points of the project
- Deleting all StreamId points of the project
- Deleting all StreamData points of the project
"""
_project = None
_user = None
_logs = []
@classmethod
@classmethod
def schedule(cls, args, queue_name=getattr(settings, 'SQS_WORKER_QUEUE_NAME'), delay_seconds=None):
"""
schedule function should always have at least args and queue_name as arguments
:param args:
:param queue_name:
:param delay_seconds: optional
:return:
"""
module_name = cls.__module__
class_name = cls.__name__
if ProjectDeleteAction._arguments_ok(args):
super(ProjectDeleteAction, cls)._schedule(queue_name, module_name, class_name, args, delay_seconds)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
6725,
13,
12888,
22602,
13,
83,
6791,
1330,
9570,
198,
6738,
6725,
1... | 3.001908 | 524 |
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
import h5py
from copy import copy
from scipy.cluster.hierarchy import linkage
from sklearn.metrics import homogeneity_score, completeness_score
from ..hyp_defs import float_cpu
from ..hyp_model import HypModel
| [
37811,
198,
15069,
2864,
25824,
21183,
2059,
220,
357,
13838,
25,
5803,
9757,
282,
7012,
8,
198,
24843,
362,
13,
15,
220,
357,
4023,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
8,
198,
3781... | 3.028249 | 177 |
import sys
"""
Version has unique source in pyproject.toml.
importlib fetches version from distribution metadata files
(in dist-info or egg-info dirs).
From Python 3.8, importlib_metadata is in standard library as importlib.metadata.
"""
if sys.version_info >= (3, 8):
from importlib import metadata
else:
import importlib_metadata as metadata
__version__ = metadata.version("snakefmt")
DEFAULT_LINE_LENGTH = 88
| [
11748,
25064,
198,
198,
37811,
198,
14815,
468,
3748,
2723,
287,
12972,
16302,
13,
39532,
75,
13,
198,
11748,
8019,
11351,
2052,
2196,
422,
6082,
20150,
3696,
198,
7,
259,
1233,
12,
10951,
393,
5935,
12,
10951,
288,
17062,
737,
198,
4... | 3.357143 | 126 |
"""Enums used by pipelinewise-target-snowflake"""
from enum import Enum, unique
from types import ModuleType
from typing import Callable
import target_snowflake.file_formats
from target_snowflake.exceptions import FileFormatNotFoundException, InvalidFileFormatException
# Supported types for file formats.
@unique
class FileFormatTypes(str, Enum):
"""Enum of supported file format types"""
CSV = 'csv'
PARQUET = 'parquet'
@staticmethod
def list():
"""List of supported file type values"""
return list(map(lambda c: c.value, FileFormatTypes))
# pylint: disable=too-few-public-methods
class FileFormat:
"""File Format class"""
def __init__(self, file_format: str, query_fn: Callable, file_format_type: FileFormatTypes=None):
"""Find the file format in Snowflake, detect its type and
initialise file format specific functions"""
if file_format_type:
self.file_format_type = file_format_type
else:
# Detect file format type by querying it from Snowflake
self.file_format_type = self._detect_file_format_type(file_format, query_fn)
self.formatter = self._get_formatter(self.file_format_type)
@classmethod
def _get_formatter(cls, file_format_type: FileFormatTypes) -> ModuleType:
"""Get the corresponding file formatter implementation based
on the FileFormatType parameter
Params:
file_format_type: FileFormatTypes enum item
Returns:
ModuleType implementation of the file ormatter
"""
formatter = None
if file_format_type == FileFormatTypes.CSV:
formatter = target_snowflake.file_formats.csv
elif file_format_type == FileFormatTypes.PARQUET:
formatter = target_snowflake.file_formats.parquet
else:
raise InvalidFileFormatException(f"Not supported file format: '{file_format_type}")
return formatter
@classmethod
def _detect_file_format_type(cls, file_format: str, query_fn: Callable) -> FileFormatTypes:
"""Detect the type of an existing snowflake file format object
Params:
file_format: File format object
query_fn: A callable function that can run SQL queries in an active Snowflake session
Returns:
FileFormatTypes enum item
"""
if 'format_name' not in file_format:
return FileFormatTypes(file_format['type'].replace("'", "").lower())
file_format_name = file_format['format_name'].split('.')[-1]
file_formats_in_sf = query_fn(f"SHOW FILE FORMATS LIKE '{file_format_name}'")
if len(file_formats_in_sf) == 1:
file_format = file_formats_in_sf[0]
try:
file_format_type = FileFormatTypes(file_format['type'].lower())
except ValueError as ex:
raise InvalidFileFormatException(
f"Not supported named file format {file_format_name}. Supported file formats: {FileFormatTypes}") \
from ex
else:
raise FileFormatNotFoundException(
f"Named file format not found: {file_format}")
return file_format_type
| [
37811,
4834,
5700,
973,
416,
7347,
27176,
413,
786,
12,
16793,
12,
82,
2197,
47597,
37811,
198,
6738,
33829,
1330,
2039,
388,
11,
3748,
198,
6738,
3858,
1330,
19937,
6030,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
11748,
2496,
62,
... | 2.509274 | 1,294 |
# This function is not intended to be invoked directly. Instead it will be
# triggered by an orchestrator function.
import logging
import os
from azure.mgmt.containerservice import ContainerServiceClient
from azure.identity import ClientSecretCredential
from azure.mgmt.subscription import SubscriptionClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.containerservice.models import (ManagedClusterAgentPoolProfile, ManagedCluster)
from azure.data.tables import TableClient
from azure.data.tables import UpdateMode
| [
2,
770,
2163,
318,
407,
5292,
284,
307,
24399,
3264,
13,
5455,
340,
481,
307,
198,
2,
13973,
416,
281,
28127,
1352,
2163,
13,
198,
198,
11748,
18931,
220,
220,
198,
11748,
28686,
198,
198,
6738,
35560,
495,
13,
11296,
16762,
13,
364... | 3.793103 | 145 |
from commaqa.configs.step_config import StepConfig
from commaqa.dataset.utils import get_predicate_args
| [
6738,
39650,
20402,
13,
11250,
82,
13,
9662,
62,
11250,
1330,
5012,
16934,
198,
6738,
39650,
20402,
13,
19608,
292,
316,
13,
26791,
1330,
651,
62,
28764,
5344,
62,
22046,
628,
198
] | 3.3125 | 32 |
#!/usr/bin/env python
"""
This requires the python docutils library.
"""
from docutils.core import publish_cmdline
publish_cmdline(writer_name='html')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
1212,
4433,
262,
21015,
2205,
26791,
5888,
13,
198,
37811,
198,
198,
6738,
2205,
26791,
13,
7295,
1330,
7715,
62,
28758,
1370,
198,
198,
12984,
1836,
62,
28758,
1370,
... | 3.142857 | 49 |
import glob
import json
import os
import platform
import re
import subprocess
from typing import Any
import webbrowser
import shlex
import locale
import numpy as np
import bpy
import arm.lib.armpack
from arm.lib.lz4 import LZ4
import arm.log as log
import arm.make_state as state
import arm.props_renderpath
from enum import Enum, unique
class WorkingDir:
"""Context manager for safely changing the current working directory."""
# Passed by load_post handler when armsdk is found in project folder
use_local_sdk = False
def get_relative_paths():
"""Whether to convert absolute paths to relative"""
addon_prefs = get_arm_preferences()
return False if not hasattr(addon_prefs, 'relative_paths') else addon_prefs.relative_paths
def get_pref_or_default(prop_name: str, default: Any) -> Any:
"""Return the preference setting for prop_name, or the value given as default if the property does not exist."""
addon_prefs = get_arm_preferences()
return getattr(addon_prefs, prop_name, default)
script_props = {}
script_props_defaults = {}
script_warnings = {}
def get_prop_type_from_value(value: str):
"""
Returns the property type based on its representation in the code.
If the type is not supported, `None` is returned.
"""
# Maybe ast.literal_eval() is better here?
try:
int(value)
return "Int"
except ValueError:
try:
float(value)
return "Float"
except ValueError:
# "" is required, " alone will not work
if len(value) > 1 and value.startswith(("\"", "'")) and value.endswith(("\"", "'")):
return "String"
if value in ("true", "false"):
return "Bool"
if value.startswith("new "):
value = value.split()[1].split("(")[0]
if value.startswith("Vec"):
return value
if value.startswith("iron.math.Vec"):
return value[10:]
return None
def get_type_default_value(prop_type: str):
"""
Returns the default value of the given Haxe type.
If the type is not supported, `None` is returned:
"""
if prop_type == "Int":
return 0
if prop_type == "Float":
return 0.0
if prop_type == "String" or prop_type in (
"Object", "CameraObject", "LightObject", "MeshObject", "SpeakerObject"):
return ""
if prop_type == "Bool":
return False
if prop_type == "Vec2":
return [0.0, 0.0]
if prop_type == "Vec3":
return [0.0, 0.0, 0.0]
if prop_type == "Vec4":
return [0.0, 0.0, 0.0, 0.0]
return None
def safestr(s: str) -> str:
"""Outputs a string where special characters have been replaced with
'_', which can be safely used in file and path names."""
for c in r'[]/\;,><&*:%=+@!#^()|?^':
s = s.replace(c, '_')
return ''.join([i if ord(i) < 128 else '_' for i in s])
def asset_path(s):
"""Remove leading '//'"""
return s[2:] if s[:2] == '//' else s
def export_bone_data(bobject: bpy.types.Object) -> bool:
"""Returns whether the bone data of the given object should be exported."""
return bobject.find_armature() and is_bone_animation_enabled(bobject) and get_rp().arm_skin == 'On'
def generate_sublime_project(subl_project_path):
"""Generates a [project_name].sublime-project file."""
print('Generating Sublime Text project file')
project_data = {
"folders": [
{
"path": ".",
"file_exclude_patterns": ["*.blend*", "*.arm"]
},
],
}
with open(subl_project_path, 'w', encoding='utf-8') as project_file:
json.dump(project_data, project_file, ensure_ascii=False, indent=4)
# Enum Permissions Name
# Add permission for target android
def type_name_to_type(name: str) -> bpy.types.bpy_struct:
"""Return the Blender type given by its name, if registered."""
return bpy.types.bpy_struct.bl_rna_get_subclass_py(name) | [
11748,
15095,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
302,
198,
11748,
850,
14681,
198,
6738,
19720,
1330,
4377,
198,
11748,
3992,
40259,
198,
11748,
427,
2588,
198,
11748,
36693,
198,
198,
11748,
299,
32152,
3... | 2.429688 | 1,664 |
#
# Sample Todo module
#
"""
Your license message ...
"""
import os, bottle, json, datetime
from appmodule import AppModule
from .modeldb import setupDB, Todo, MyS
app = MyAppModule()
@app.get('/static/<path:path>')
@app.route('/')
@app.auth('access module')
@app.view('index.tpl')
def _():
'''
Default view
'''
bs = app.get_beaker_session()
user = bs.get('username')
if user:
title = 'Todo for {}'.format(user)
else:
title = 'Todo for Anonymous'
return dict(user=user, title=title)
@app.post('/list')
@app.auth('access module')
@app.post('/add')
@app.auth('access module')
@app.post('/delete')
@app.auth('access module')
@app.post('/update')
@app.auth('access module')
| [
2,
198,
2,
27565,
309,
24313,
8265,
198,
2,
198,
198,
37811,
198,
198,
7120,
5964,
3275,
2644,
198,
198,
37811,
198,
198,
11748,
28686,
11,
9294,
11,
33918,
11,
4818,
8079,
198,
6738,
598,
21412,
1330,
2034,
26796,
198,
6738,
764,
1... | 2.432787 | 305 |
import feedparser
import pandas
import django
django.setup()
from activitypub.models import Account, Note
| [
11748,
3745,
48610,
198,
11748,
19798,
292,
198,
11748,
42625,
14208,
198,
28241,
14208,
13,
40406,
3419,
198,
6738,
3842,
12984,
13,
27530,
1330,
10781,
11,
5740,
198
] | 3.785714 | 28 |
from textwrap import dedent
from tests import check_as_expected
ROOT = 'superhelp.helpers.for_help.'
# test_misc()
| [
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
6738,
5254,
1330,
2198,
62,
292,
62,
40319,
198,
198,
13252,
2394,
796,
705,
16668,
16794,
13,
16794,
364,
13,
1640,
62,
16794,
2637,
198,
198,
2,
1332,
62,
44374,
3419,
198
] | 2.95 | 40 |
from blitzcrank import Blitzcrank
import responses
import json
| [
6738,
32528,
6098,
962,
1330,
29299,
6098,
962,
198,
11748,
9109,
198,
11748,
33918,
628
] | 4.266667 | 15 |
if __name__ == '__main__':
main() | [
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419
] | 2.3125 | 16 |
from getpass import getpass
from mysql.connector import connect, Error
try:
with connect(
host="localhost",
#user=input("Enter username: "),
user = 'acm',
password=getpass("Enter password: "),
database = 'online_movie_rating'
) as connection:
show_db_query = "SHOW DATABASES"
create_movies_table_query = """
CREATE TABLE movies(
id INT AUTO_INCREMENT PRIMARY KEY,
title VARCHAR(100),
release_year YEAR(4),
genre VARCHAR(100),
collection_in_mil INT
)
"""
create_reviewers_table_query = """
CREATE TABLE reviewers (
id INT AUTO_INCREMENT PRIMARY KEY,
first_name VARCHAR(100),
last_name VARCHAR(100)
)
"""
create_ratings_table_query = """
CREATE TABLE ratings (
movie_id INT,
reviewer_id INT,
rating DECIMAL(2,1),
FOREIGN KEY(movie_id) REFERENCES movies(id),
FOREIGN KEY(reviewer_id) REFERENCES reviewers(id),
PRIMARY KEY(movie_id, reviewer_id)
)
"""
with connection.cursor() as cursor:
cursor.execute(show_db_query)
for db in cursor:
print(db)
#cursor.execute(create_movies_table_query)
# cursor.execute(create_reviewers_table_query)
#cursor.execute(create_ratings_table_query)
#connection.commit()
create_db_query = "CREATE DATABASE online_movie_rating"
# with connection.cursor() as cursor:
# cursor.execute(create_db_query)
print(connection)
show_table_query_1 = "DESCRIBE movies"
show_table_query_2 = "DESCRIBE reviewers"
show_table_query_3 = "DESCRIBE ratings"
with connection.cursor() as cursor:
cursor.execute(show_table_query_1)
# Fetch rows from last executed query
result = cursor.fetchall()
for row in result:
print(row)
cursor.execute(show_table_query_2)
# Fetch rows from last executed query
result = cursor.fetchall()
for row in result:
print(row)
cursor.execute(show_table_query_3)
# Fetch rows from last executed query
result = cursor.fetchall()
for row in result:
print(row)
# Back after a long while to this project. Clear need to comment my code for better understanding
# Starting code to alter tables in SQL with Py
#SQL Code ALTER TABLE movies MODIFY COLUMN collection_in_mil DECIMAL(4,1)
alter_table_query = """
ALTER TABLE movies
MODIFY COLUMN collection_in_mil DECIMAL(4,1)
"""
with connection.cursor() as cursor:
cursor.execute(alter_table_query)
cursor.execute(show_table_query_1) #Show movies again
#Fetch rows from last executed query
result = cursor.fetchall()
print('Movie Table Schema after alteration:')
for row in result:
print(row)
# Outcome: Type of collection_in_mil changed from DECIMAL(2,1) to DECIMAL(4,1)
#Now, Dropping tables in SQL with Python
# Warning! This is an irreversible process
drop_table_query = 'DROP TABLE ratings'
# with connection.cursor() as cursor:
# cursor.execute(drop_table_query)
# cursor.execute(show_table_query_3)
# result = cursor.fetchall()
# print('The Schema ratings shouldn\'t exist anymore')
# print(result)
#Outcome: 1146 (42S02): Table 'online_movie_rating.ratings' doesn't exist
#Now, populating table schemas with data using .execute() for small amout of data that can be hard coded
# and .executemany() for large datas
#Becareful of repeat population
insert_movies_query = '''
INSERT INTO movies (title, release_year, genre, collection_in_mil)
VALUES
("Forrest Gump", 1994, "Drama", 330.2),
("3 Idiots", 2009, "Drama", 2.4),
("Eternal Sunshine of the Spotless Mind", 2004, "Drama", 34.5),
("Good Will Hunting", 1997, "Drama", 138.1),
("Skyfall", 2012, "Action", 304.6),
("Gladiator", 2000, "Action", 188.7),
("Black", 2005, "Drama", 3.0),
("Titanic", 1997, "Romance", 659.2),
("The Shawshank Redemption", 1994, "Drama",28.4),
("Udaan", 2010, "Drama", 1.5),
("Home Alone", 1990, "Comedy", 286.9),
("Casablanca", 1942, "Romance", 1.0),
("Avengers: Endgame", 2019, "Action", 858.8),
("Night of the Living Dead", 1968, "Horror", 2.5),
("The Godfather", 1972, "Crime", 135.6),
("Haider", 2014, "Action", 4.2),
("Inception", 2010, "Adventure", 293.7),
("Evil", 2003, "Horror", 1.3),
("Toy Story 4", 2019, "Animation", 434.9),
("Air Force One", 1997, "Drama", 138.1),
("The Dark Knight", 2008, "Action",535.4),
("Bhaag Milkha Bhaag", 2013, "Sport", 4.1),
("The Lion King", 1994, "Animation", 423.6),
("Pulp Fiction", 1994, "Crime", 108.8),
("Kai Po Che", 2013, "Sport", 6.0),
("Beasts of No Nation", 2015, "War", 1.4),
("Andadhun", 2018, "Thriller", 2.9),
("The Silence of the Lambs", 1991, "Crime", 68.2),
("Deadpool", 2016, "Action", 363.6),
("Drishyam", 2015, "Mystery", 3.0)
'''
with connection.cursor() as cursor:
#cursor.execute(insert_movies_query)
#connection.commit() #important to commit after any modification to table
cursor.execute(show_table_query_1)
result = cursor.fetchall()
for row in result:
print(row)
# outcome: this only shows data cols and their types
# not the data themselves
#Now, to modify bzw. insert with .executemany()
#this accepts two parameters; one for query with placeholders,
#one for list
insert_reviewers_query = """
INSERT INTO reviewers
(first_name, last_name)
VALUES ( %s, %s )
"""
reviewers_records = [
("Chaitanya", "Baweja"),
("Mary", "Cooper"),
("John", "Wayne"),
("Thomas", "Stoneman"),
("Penny", "Hofstadter"),
("Mitchell", "Marsh"),
("Wyatt", "Skaggs"),
("Andre", "Veiga"),
("Sheldon", "Cooper"),
("Kimbra", "Masters"),
("Kat", "Dennings"),
("Bruce", "Wayne"),
("Domingo", "Cortes"),
("Rajesh", "Koothrappali"),
("Ben", "Glocker"),
("Mahinder", "Dhoni"),
("Akbar", "Khan"),
("Howard", "Wolowitz"),
("Pinkie", "Petit"),
("Gurkaran", "Singh"),
("Amy", "Farah Fowler"),
("Marlon", "Crafford"),
]
#with connection.cursor() as cursor:
#cursor.executemany(insert_reviewers_query, reviewers_records)
#connection.commit()
#Now to populate the ratings table
insert_ratings_query = '''INSERT INTO ratings
(rating, movie_id, reviewer_id)
VALUES ( %s, %s, %s)
'''
ratings_records = [
(6.4, 17, 5), (5.6, 19, 1), (6.3, 22, 14), (5.1, 21, 17),
(5.0, 5, 5), (6.5, 21, 5), (8.5, 30, 13), (9.7, 6, 4),
(8.5, 24, 12), (9.9, 14, 9), (8.7, 26, 14), (9.9, 6, 10),
(5.1, 30, 6), (5.4, 18, 16), (6.2, 6, 20), (7.3, 21, 19),
(8.1, 17, 18), (5.0, 7, 2), (9.8, 23, 3), (8.0, 22, 9),
(8.5, 11, 13), (5.0, 5, 11), (5.7, 8, 2), (7.6, 25, 19),
(5.2, 18, 15), (9.7, 13, 3), (5.8, 18, 8), (5.8, 30, 15),
(8.4, 21, 18), (6.2, 23, 16), (7.0, 10, 18), (9.5, 30, 20),
(8.9, 3, 19), (6.4, 12, 2), (7.8, 12, 22), (9.9, 15, 13),
(7.5, 20, 17), (9.0, 25, 6), (8.5, 23, 2), (5.3, 30, 17),
(6.4, 5, 10), (8.1, 5, 21), (5.7, 22, 1), (6.3, 28, 4),
(9.8, 13, 1)
]
#with connection.cursor() as cursor:
#cursor.executemany(insert_ratings_query, ratings_records)
#connection.commit()
# Now to read data off of tables using SELECT
select_movies_query = ''' SELECT title,release_year
FROM movies
LIMIT 2,5'''
with connection.cursor() as cursor:
cursor.execute(select_movies_query)
for row in cursor.fetchall():
print(row)
select_movies_query = '''
SELECT *
FROM movies
LIMIT 1,5'''
with connection.cursor() as cursor:
cursor.execute(select_movies_query)
for movie in cursor.fetchall():
print(movie)
select_movies_query = '''
SELECT CONCAT (title, "(" , release_year, ")" ), collection_in_mil
FROM movies
'''
with connection.cursor() as cursor:
cursor.execute(select_movies_query)
for movie in cursor.fetchmany(size=10):
print(movie)
cursor.fetchall() #Cleans up remaining results, else Error raised
#Had repeats of Avengers Endgame showing up because of repeats in DB entry
# Basically everytime, the insert into Movies code above ran as well
#show movies with best ratings
select_movies_query = '''
SELECT title, AVG(rating) as average_rating
FROM ratings
INNER JOIN movies
ON movies.id = ratings.movie_id
GROUP BY movie_id
ORDER BY average_rating DESC
LIMIT 5'''
with connection.cursor() as cursor:
cursor.execute(show_table_query_3)
print('The ratings table has:\n')
result = cursor.fetchall() # I think every execution has to be fetched and read maybe
for row in result:
print(row)
cursor.execute(select_movies_query)
for movie in cursor.fetchall():
print(movie)
#Show reviwer with most reviews
select_movies_query = '''
SELECT CONCAT(first_name, ' ', last_name), COUNT(*) as num
FROM reviewers
INNER JOIN ratings
ON reviewers.id = ratings.reviewer_id
GROUP BY reviewer_id
ORDER BY num DESC
LIMIT 1'''
with connection.cursor() as cursor:
cursor.execute(select_movies_query)
for movie in cursor.fetchall():
print(movie)
#What does Inner Join do exactly in both these cases? How is movies involved in the query?
#Update Amys lastname
update_queryy = '''
UPDATE reviewers
SET last_name= 'Cooper'
WHERE first_name = 'Amy'
'''
with connection.cursor() as cursor:
cursor.execute(update_queryy)
connection.commit()
#update a rating
update_query = '''
UPDATE ratings
SET rating= 5
WHERE movie_id = 18 AND reviewer_id=15'''
show_update= '''
SELECT *
FROM ratings
WHERE movie_id=18 AND reviewer_id=15
'''
with connection.cursor() as cursor:
cursor.execute(update_query)
connection.commit()
cursor.execute(show_update)
result = cursor.fetchall()
print(result)
#Deleting Entries
#First make sure with SELECT
select_movies_query = '''
SELECT reviewer_id, movie_id FROM ratings
WHERE reviewer_id = 2'''
with connection.cursor() as cursor:
cursor.execute(select_movies_query)
for movie in cursor.fetchall():
print(movie)
#Is this what you expected? IF yes, then proceed to delete
confirm = input('Is this what you expected? Proceed with delete:\n')
if confirm:
delete_query= '''
DELETE FROM ratings WHERE reviewer_id=2'''
with connection.cursor() as cursor:
cursor.execute(delete_query)
connection.commit()
except Error as e:
print(e) | [
6738,
651,
6603,
1330,
651,
6603,
198,
6738,
48761,
13,
8443,
273,
1330,
2018,
11,
13047,
198,
198,
28311,
25,
198,
220,
220,
220,
351,
2018,
7,
198,
220,
220,
220,
220,
220,
220,
220,
2583,
2625,
36750,
1600,
198,
220,
220,
220,
... | 2.113178 | 5,699 |
# -*- coding: utf-8 -*-
"""
General description:
---------------------
Example that illustrates how to use custom component `GenericCHP` can be used.
In this case it is used to model a combined cycle extraction turbine.
Installation requirements:
---------------------------
This example requires the latest version of oemof. Install by:
pip install oemof
"""
import os
import pandas as pd
import oemof.solph as solph
from oemof.network import Node
from oemof.outputlib import processing, views
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# read sequence data
full_filename = os.path.join(os.path.dirname(__file__), 'generic_chp.csv')
data = pd.read_csv(full_filename, sep=",")
# select periods
periods = len(data)-1
# create an energy system
idx = pd.date_range('1/1/2017', periods=periods, freq='H')
es = solph.EnergySystem(timeindex=idx)
Node.registry = es
# resources
bgas = solph.Bus(label='bgas')
rgas = solph.Source(label='rgas', outputs={bgas: solph.Flow()})
# heat
bth = solph.Bus(label='bth')
# dummy source at high costs that serves the residual load
source_th = solph.Source(label='source_th',
outputs={bth: solph.Flow(variable_costs=1000)})
demand_th = solph.Sink(label='demand_th', inputs={bth: solph.Flow(fixed=True,
actual_value=data['demand_th'], nominal_value=200)})
# power
bel = solph.Bus(label='bel')
demand_el = solph.Sink(label='demand_el', inputs={bel: solph.Flow(
variable_costs=data['price_el'])})
# combined cycle extraction turbine
ccet = solph.components.GenericCHP(
label='combined_cycle_extraction_turbine',
fuel_input={bgas: solph.Flow(
H_L_FG_share_max=[0.19 for p in range(0, periods)])},
electrical_output={bel: solph.Flow(
P_max_woDH=[200 for p in range(0, periods)],
P_min_woDH=[80 for p in range(0, periods)],
Eta_el_max_woDH=[0.53 for p in range(0, periods)],
Eta_el_min_woDH=[0.43 for p in range(0, periods)])},
heat_output={bth: solph.Flow(
Q_CW_min=[30 for p in range(0, periods)])},
Beta=[0.19 for p in range(0, periods)],
back_pressure=False)
# create an optimization problem and solve it
om = solph.Model(es)
# debugging
# om.write('generic_chp.lp', io_options={'symbolic_solver_labels': True})
# solve model
om.solve(solver='cbc', solve_kwargs={'tee': True})
# create result object
results = processing.results(om)
# plot data
if plt is not None:
# plot PQ diagram from component results
data = results[(ccet, None)]['sequences']
ax = data.plot(kind='scatter', x='Q', y='P', grid=True)
ax.set_xlabel('Q (MW)')
ax.set_ylabel('P (MW)')
plt.show()
# plot thermal bus
data = views.node(results, 'bth')['sequences']
ax = data.plot(kind='line', drawstyle='steps-post', grid=True)
ax.set_xlabel('Time (h)')
ax.set_ylabel('Q (MW)')
plt.show()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
12218,
6764,
25,
198,
19351,
12,
198,
16281,
326,
21290,
703,
284,
779,
2183,
7515,
4600,
46189,
3398,
47,
63,
460,
307,
973,
13,
198,
818,
428,
1339,
340,... | 2.515021 | 1,165 |
#!/usr/bin/env python
from __future__ import print_function
import subprocess
import re
from getters import *
DEVEL_VM = "Development VM"
APPLIANCE = "Appliance (Full deployment on single machine)"
CLUSTER = "Cluster (High volume production deployment)"
DEPLOYMENT_TYPES = [DEVEL_VM, APPLIANCE, CLUSTER]
# noinspection PyBroadException
# noinspection PyBroadException
DEPLOYMENT_MAP = {
DEVEL_VM: devel_vm,
APPLIANCE: appliance,
CLUSTER: cluster
}
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
seed_path = sys.argv[1]
else:
seed_path = None
start(os.path.dirname(os.path.realpath(__file__)), seed_path)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
850,
14681,
198,
11748,
302,
198,
6738,
651,
1010,
1330,
1635,
198,
198,
7206,
18697,
62,
15996,
796,
366,
41206,
16990,
... | 2.544776 | 268 |
# Copyright 2021 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for pubsub.py"""
import base64
import binascii
import json
import unittest
from unittest.mock import Mock
from utilities import pubsub
| [
2,
15069,
33448,
3012,
11,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
... | 3.62069 | 203 |
from PyQt5.QtWidgets import QScrollArea
class SMSScrollArea(QScrollArea):
'''
自定义滚动控件
'''
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
29261,
30547,
198,
198,
4871,
29287,
29261,
30547,
7,
48,
29261,
30547,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
5525,
229,
103,
22522,
248,
20046,
231,... | 1.716667 | 60 |
import json
import os
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
from .config import DEFAULT_VAULT_FILE, DEFAULT_GROUP_KEY_BITS, DEFAULT_PAD_BYTES, DEFAULT_IV_BITS, \
DEFAULT_KEY_ENCODING, HASH_SECRETS
from .helpers import makeBytesOf, makeStringOf, encodeToBase64, decodeFromBase64
from .identity import Identity
PUBLIC_KEY_HIVE = 'public_keys'
GROUP_KEY_HIVE = 'group_keys'
SECRETS_HIVE = 'secrets'
| [
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
198,
6738,
36579,
13,
26257,
1330,
25630,
11645,
198,
6738,
36579,
13,
29531,
1330,
651,
62,
25120,
62,
33661,
198,
6738,
36579,
13,
18274,
346,
13,
47,... | 2.822581 | 186 |
# Generated by Django 2.2.21 on 2021-05-28 00:44
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
2481,
319,
33448,
12,
2713,
12,
2078,
3571,
25,
2598,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
# coding=utf-8
"""scikit-surgerytorch"""
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
36216,
15813,
12,
11793,
7076,
13165,
354,
37811,
198
] | 2.333333 | 18 |
# site, moving dimension, direction, index of site representation
| [
198,
198,
2,
2524,
11,
3867,
15793,
11,
4571,
11,
6376,
286,
2524,
10552,
628
] | 4.6 | 15 |
import tensorflow as tf
import pickle
from LstmUnit import LstmUnit
from OutputUnit import OutputUnit
model = Model()
import numpy as np
with tf.Session() as sess:
for i in range(10):
sess.run(tf.global_variables_initializer())
_1, _2 = sess.run([model.train_op, model.mean_loss], feed_dict={
model.encoder_input: np.random.randint(0, 100, [10, 20]),
model.decoder_input: np.random.randint(0, 100, [10, 20]),
model.encoder_time_step: [18]*10,
model.decoder_time_step: [18]*9+[19],
model.decoder_output: np.random.randint(0, 100, [10, 20])})
print(_2) | [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2298,
293,
198,
6738,
406,
301,
76,
26453,
1330,
406,
301,
76,
26453,
198,
6738,
25235,
26453,
1330,
25235,
26453,
628,
628,
198,
19849,
796,
9104,
3419,
198,
11748,
299,
32152,
355,
459... | 2.176271 | 295 |
"""
Input/Output routines for XDS and file conversion utilities
"""
import os
import numpy
import multiprocessing
from autoprocess.utils import misc
DEFAULT_DELPHI = 8
if os.environ.get('DPS_NODES'):
HOSTS = {
x.split(':')[0]: int(x.split(':')[1]) for x in os.environ['DPS_NODES'].split()
}
else:
HOSTS = {
'localhost': multiprocessing.cpu_count()
}
def write_xds_input(jobs, parameters):
"""
Create XDS.INP file using parameters in the dictionary params
jobs = XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT
params = {
'wavelength': float
'distance': float
'start_angle': float
'first_frame': int
'delta_angle': float
'space_group': int
'unit_cell' : tuple of 6 floats
'reindex_matrix': tuple of 12 ints OR None
'file_template': str (full/relative path including directory)
'file_format': str (TIFF)
'data_range' : tuple of 2 ints
'spot_range' : list of (tuple of 2 ints)'s
'skip_range' : list of (tuple of 2 ints)'s
'detector_size': tuple of 2 ints
'pixel_size' : float
'two_theta': float
'saturated_value': float
'beam_center': tuple of 2 floats
'min_spot_size': int or None
'min_spot_seperation': int or None
'cluster_radius': int or None
'sigma': float or None default (6)
'reference_data': filename OR None
'shells': list of numbers or None
'anomalous': True or False default False
'strict_absorption': True or False default False
}
"""
# defaults
params = {
'refine_index': 'CELL BEAM ORIENTATION AXIS',
'refine_integrate': 'DISTANCE POSITION BEAM ORIENTATION'
}
params.update(parameters)
params['min_valid_value'] = 1
params['profile_grid_size'] = 13
if params.get('detector_type').lower() in ['q4', 'q210', 'q4-2x', 'q210-2x', 'q315', 'q315-2x']:
detector = 'ADSC'
elif params.get('detector_type').lower() in ['mar165', 'mx300', 'mx300he', 'mar225', 'mar325']:
detector = 'CCDCHESS'
elif 'pilatus' in params.get('detector_type').lower():
detector = 'PILATUS'
params['min_spot_size'] = 3
params['fixed_scale_factor'] = True
params['min_valid_value'] = 0
params['saturated_value'] = 1048500
params['sensor_thickness'] = 1.0
elif 'eiger' in params.get('detector_type').lower():
detector = 'EIGER'
params['min_spot_separation'] = 4
params['cluster_radius'] = 2
params['min_valid_value'] = 0
# params['untrusted'] = [
# (0, 4150, 513, 553),
# (0, 4150, 1064, 1104),
# (0, 4150, 1615, 1655),
# (0, 4150, 2166, 2206),
# (0, 4150, 2717, 2757),
# (0, 4150, 3268, 3308),
# (0, 4150, 3819, 3859),
# (1029, 1042, 0, 4371),
# (2069, 2082, 0, 4371),
# (3109, 3122, 0, 4371),
# ]
else:
detector = 'CCDCHESS'
num_frames = params['data_range'][1] - params['data_range'][0] + 1
num_jobs, batch_size, delphi = get_job_params(
num_frames, params['delta_angle'], min(params.get('max_delphi', 8), DEFAULT_DELPHI)
)
params['jobs'] = jobs
params['detector'] = detector
params['sensor_thickness'] = params.get('sensor_thickness', 0.0)
params['num_jobs'] = num_jobs
params['batch_size'] = batch_size
params['delphi'] = delphi
params['cluster_nodes'] = ' '.join(list(HOSTS.keys()))
params['sigma'] = params.get('sigma', 4)
params['friedel'] = str(not params.get('anomalous', False)).upper()
params['space_group'] = params.get('reference_spacegroup', params.get('space_group', 0))
params['resolution'] = params.get('resolution', 1.0)
params['detector_yaxis'] = (
0.0,
numpy.cos(numpy.radians(params['two_theta'])),
-1 * numpy.sin(numpy.radians(params['two_theta']))
)
job_text = (
"!- XDS.INP ----------- Generated by AutoProcess\n"
"JOB= {jobs}\n"
"CLUSTER_NODES= {cluster_nodes}\n"
).format(**params)
dataset_text = (
"!------------------- Dataset parameters\n"
"X-RAY_WAVELENGTH= {wavelength:7.5f}\n"
"DETECTOR_DISTANCE= {distance:5.1f}\n"
"STARTING_ANGLE= {start_angle:5.1f}\n"
"STARTING_FRAME= {start_angle}\n"
"OSCILLATION_RANGE= {delta_angle:4.2f}\n"
"NAME_TEMPLATE_OF_DATA_FRAMES={file_template}\n"
"FRIEDEL'S_LAW= {friedel}\n"
"DATA_RANGE= {data_range[0]} {data_range[1]}\n"
"DELPHI= {delphi:4.2f} \n"
).format(**params)
# Allow injecting an external library for reading dataset files
if os.environ.get('XDS_DATALIB'):
dataset_text += 'LIB= {}\n'.format(os.environ['XDS_DATALIB'])
for r_s, r_e in params.get('skip_range', []):
dataset_text += "EXCLUDE_DATA_RANGE= {} {}\n".format(r_s, r_e)
for r_s, r_e in params['spot_range']:
dataset_text += "SPOT_RANGE= {} {}\n".format(r_s, r_e)
if params.get('background_range'):
dataset_text += "BACKGROUND_RANGE= {background_range[0]} {background_range[1]}\n".format(**params)
if params.get('space_group'):
# space group and cell parameters
dataset_text += (
"SPACE_GROUP_NUMBER= {space_group}\n"
"UNIT_CELL_CONSTANTS= {unit_cell[0]:0.3f} {unit_cell[1]:0.3f} {unit_cell[2]:0.3f} "
"{unit_cell[3]:0.3f} {unit_cell[4]:0.3f} {unit_cell[5]:0.3f}\n"
).format(space_group=params['space_group'], unit_cell=params['unit_cell'])
# reindexing matrix
if params.get('reindex_matrix'):
dataset_text += "REIDX={} {} {} {} {} {} {} {} {} {} {} {}\n".format(*params['reindex_matrix'])
# reference data
if params.get('reference_data'):
dataset_text += "REFERENCE_DATA_SET= {reference_data}\n".format(**params)
beamline_text = (
"!----------------- Beamline parameters\n"
"DETECTOR= {detector}\n"
"NX={detector_size[0]} NY= {detector_size[1]}\n"
"QX={pixel_size:7.5f} QY={pixel_size:7.5f}\n"
"ORGX={beam_center[0]:5.0f} ORGY={beam_center[1]:5.0f}\n"
"SENSOR_THICKNESS= {sensor_thickness:0.3f}\n"
"MINIMUM_VALID_PIXEL_VALUE= {min_valid_value}\n"
"OVERLOAD= {saturated_value}\n"
"STRONG_PIXEL= {sigma:5.0f}\n"
"MINIMUM_ZETA= 0.05\n"
"TRUSTED_REGION=0.00 1.2\n"
"TEST_RESOLUTION_RANGE= 50.0 1.0\n"
"RESOLUTION_SHELLS= {resolution:5.2f}\n"
"TOTAL_SPINDLE_ROTATION_RANGES= 90 360 30\n"
"STARTING_ANGLES_OF_SPINDLE_ROTATION= 0 180 15\n"
"VALUE_RANGE_FOR_TRUSTED_DETECTOR_PIXELS= 6000 30000\n"
"INCLUDE_RESOLUTION_RANGE=50.0 0.0\n"
"ROTATION_AXIS= 1.0 0.0 0.0\n"
"INCIDENT_BEAM_DIRECTION=0.0 0.0 1.0\n"
"FRACTION_OF_POLARIZATION=0.99\n"
"POLARIZATION_PLANE_NORMAL= 0.0 1.0 0.0\n"
"DIRECTION_OF_DETECTOR_X-AXIS= 1.000 0.000 0.000\n"
"DIRECTION_OF_DETECTOR_Y-AXIS= {detector_yaxis[0]:0.3f} {detector_yaxis[1]:0.3f} {detector_yaxis[2]:0.3f}\n"
).format(**params)
extra_text = "!----------------- Extra parameters\n"
if params.get('min_spot_separation'):
extra_text += 'SEPMIN= {min_spot_separation}\n'
if params.get('min_spot_size'):
extra_text += 'MINIMUM_NUMBER_OF_PIXELS_IN_A_SPOT= {min_spot_size}\n'
if params.get('cluster_radius'):
extra_text += 'CLUSTER_RADIUS= {cluster_radius}\n'
if params.get('shells'):
extra_text += 'RESOLUTION_SHELLS= {}\n'.format(' '.join(['{:0.2f}'.format(x) for x in params['shells']]))
if params.get('strict_correction'):
extra_text += 'STRICT_ABSORPTION_CORRECTION= {}\n'.format(str(params.get('strict_absorption', False)).upper())
if params.get('refine_index'):
extra_text += 'REFINE(IDXREF)= {refine_index}\n'
if params.get('refine_integrate'):
extra_text += 'REFINE(INTEGRATE)= {refine_integrate}\n'
if params.get('profile_grid_size'):
extra_text += 'NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA/BETA= {profile_grid_size}\n'
if params.get('fixed_scale_factor'):
extra_text += 'DATA_RANGE_FIXED_SCALE_FACTOR= {data_range[0]} {data_range[1]} 1.0\n'
for rectangle in params.get('untrusted', []):
extra_text += 'UNTRUSTED_RECTANGLE= {} {} {} {}\n'.format(*rectangle)
extra_text = extra_text.format(**params)
with open('XDS.INP', 'w') as outfile:
outfile.write(job_text)
outfile.write(dataset_text)
outfile.write(beamline_text)
outfile.write(extra_text)
def write_xscale_input(params):
"""
Create XSCALE.INP file using parameters in the dictionary params
params = {
'strict_absorption': True or False default False
'sections' : list of [ {
'reindex_matrix': tuple of 12 ints, optional
'space_group': int, optional
'unit_cell': tuple of 6 floats, optional
'anomalous': bool
'output_file': str
'crystal': str
'inputs': list of [{'input_file': str, 'resolution': float, 'reference':bool}]
}]
}
"""
header = "!-XSCALE.INP--------File generated by auto.process \n"
header += "MAXIMUM_NUMBER_OF_PROCESSORS=%d \n" % misc.get_cpu_count()
body = ""
shells = []
friedel = 'FALSE' if params['sections'][0].get('anomalous') else 'TRUE'
strict_abs = 'TRUE' if params.get('strict_absorption', False) else 'FALSE'
for i, section in enumerate(params['sections']):
body += "OUTPUT_FILE={}\n".format(section['output_file'])
if section.get('reindex_matrix'):
body += "REIDX={:2d} {:2d} {:2d} {:2d} {:2d} {:2d} {:2d} {:2d} {:2d} {:2d} {:2d} {:2d}\n".format(*section['reindex_matrix'])
if section.get('space_group') and section.get('unit_cell'):
body += "SPACE_GROUP_NUMBER={:d} \n".format(section['space_group'])
body += "UNIT_CELL_CONSTANTS={:6.2f} {:6.2f} {:6.2f} {:4.2f} {:4.2f} {:4.2f} \n".format(*section['unit_cell'])
body += f"FRIEDEL'S_LAW={friedel}\n"
body += f"STRICT_ABSORPTION_CORRECTION={strict_abs}\n"
if i == 0:
shells = section.get('shells')
elif section.get('shells')[-1] < shells[-1]:
shells = section.get('shells')
for _input in section['inputs']:
star = '*' if _input.get('reference', False) else ' '
body += "INPUT_FILE={}{} \n".format(star, _input['input_file'])
body += "INCLUDE_RESOLUTION_RANGE= 50 {:5.2f}\n".format(_input.get('resolution', 0))
if section.get('crystal'):
body += "CRYSTAL_NAME={}\n".format(section['crystal'])
if shells:
header += 'RESOLUTION_SHELLS= {}\n'.format(' '.join([f'{x:0.2f}' for x in shells]))
file_text = header + body + "!-------------------File generated by auto.process \n"
outfile = open('XSCALE.INP', 'w')
outfile.write(file_text)
outfile.close()
def write_xdsconv_input(params):
"""
Create XDSCONV.INP file using parameters in the dictionary params
params = {
'space_group': int
'unit_cell': tuple of 6 floats
'anomalous': bool
'format' : str
'input_file': str
'output_file' : str
'freeR_fraction': float 0.0 < x < 1.0
}
"""
friedel = {True: 'FALSE', False: 'TRUE'}
file_text = "!-XDSCONV.INP--------File generated by auto.process \n"
file_text += "INPUT_FILE= %s XDS_ASCII\n" % params['input_file']
file_text += "OUTPUT_FILE=%s %s\n" % (params['output_file'], params['format'])
file_text += "FRIEDEL'S_LAW=%s\n" % (friedel[params['anomalous']])
file_text += "MERGE=FALSE\n"
if params['freeR_fraction'] > 0.0:
file_text += "GENERATE_FRACTION_OF_TEST_REFLECTIONS=%0.2f\n" % params['freeR_fraction']
file_text += "!-------------------File generated by auto.process \n"
outfile = open('XDSCONV.INP', 'w')
outfile.write(file_text)
outfile.close()
| [
37811,
198,
20560,
14,
26410,
31878,
329,
1395,
5258,
290,
2393,
11315,
20081,
198,
198,
37811,
198,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
198,
11748,
18540,
305,
919,
278,
198,
6738,
22320,
305,
919,
13,
26791,
1330,
12747,
1... | 2.08741 | 5,846 |
import os
from pyabf import ABF
from analyze_abf import VCTestData
import numpy as np
ABF_LOCATION = r'C:\Users\mattisj\Desktop\9-Patching\GC juvenile Scn1a\VC test'
VC_TEST_OUTPUT_FILE = r'C:\Users\mattisj\Desktop\9-Patching\GC juvenile Scn1a\VC test GC juvenile Scn1a.csv'
if os.path.isdir(ABF_LOCATION):
abf_files = [os.path.join(ABF_LOCATION, f) for f in os.listdir(ABF_LOCATION) if f.endswith('.abf')]
else:
abf_files = [ABF_LOCATION]
# Print the files we're analyzing as a sanity check
print('Analyzing the following files:\n{}'.format(abf_files))
# Gathering data from the abf files
input_resistance_output = {}
for filepath in abf_files:
abf = ABF(filepath)
experiment = VCTestData(abf)
filename = os.path.basename(filepath)
print('Analyzing {}'.format(filename))
input_resistance_output[os.path.basename(filename)] = []
input_resistance_output[filename] = experiment.get_input_resistance()
# Writing the additional analysis to output file
with open(VC_TEST_OUTPUT_FILE, 'w') as f:
f.write("filename, input resistance\n")
for filename in input_resistance_output:
f.write('{}, {}\n'.format(filename, 1000*np.mean(input_resistance_output[filename])))
| [
11748,
28686,
198,
6738,
12972,
397,
69,
1330,
9564,
37,
198,
6738,
16602,
62,
397,
69,
1330,
569,
4177,
395,
6601,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6242,
37,
62,
29701,
6234,
796,
374,
6,
34,
7479,
14490,
59,
76,
1078,... | 2.634783 | 460 |
"""Map Filter Zip """ | [
37811,
13912,
25853,
38636,
37227
] | 4.2 | 5 |
# coding: utf-8
from Arknights.click_location.click_location import *
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
9128,
77,
2337,
13,
12976,
62,
24886,
13,
12976,
62,
24886,
1330,
1635,
198
] | 2.958333 | 24 |
import argparse
import json
import os
import time
import numpy as np
import jax.numpy as jnp
from jax import jit
from jax.lax import cond, fori_loop
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
6738,
474,
897,
1330,
474,
270,
198,
6738,
474,
897,
13,
75... | 2.742857 | 70 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import signal
import subprocess
import argparse
import time
import math
import random
from multiprocessing import Process
from functools import reduce
import numpy as np
import pickle
import unittest
import six
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import io
from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP
from dist_simnet_bow import TestDistSimnetBow2x2, DATA_URL, DATA_MD5
if __name__ == "__main__":
paddle.dataset.common.download(DATA_URL, 'simnet', DATA_MD5, "train")
runtime_main(TestDistSaveLoad2x2)
| [
2,
220,
220,
15069,
357,
66,
8,
2864,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 3.467213 | 366 |
"""
Developed by : Adem Boussetha
Email : ademboussetha@gmail.com
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
from color_recognition_api import color_histogram_feature_extraction
from color_recognition_api import knn_classifier
import os
import os.path
import sys
global color | [
37811,
220,
198,
19246,
276,
416,
1058,
1215,
368,
347,
516,
2617,
3099,
198,
15333,
1058,
512,
24419,
516,
2617,
3099,
31,
14816,
13,
785,
198,
37811,
198,
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
... | 2.862745 | 102 |
# Generated by Django 3.0.3 on 2020-05-19 15:34
import datetime
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
18,
319,
12131,
12,
2713,
12,
1129,
1315,
25,
2682,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.972222 | 36 |
from setuptools import setup, find_packages
from os import path
from cli import __version__
# https://packaging.python.org/guides/making-a-pypi-friendly-readme/
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name='databricks-workspace-tool',
version=__version__,
description='Tool to manage notebooks and clean output cells.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/frogrammer/databricks-workspace-tool',
author='Luke Vinton',
author_email='luke0vinton@gmail.com',
license='Apache 2.0',
packages=find_packages(),
install_requires=['fire', 'databricks-cli', 'fire-cli-helper', 'gitpython'],
tests_require=[],
classifiers=[],
test_suite='',
entry_points={
'console_scripts': [
'dwt = cli.__main__:main',
],
},
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
537,
72,
1330,
11593,
9641,
834,
198,
198,
2,
3740,
1378,
8002,
3039,
13,
29412,
13,
2398,
14,
5162,
1460,
14,
8601,
12,
64,
12,... | 2.583554 | 377 |
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from .load import get_beer2vec
from .access_ext import beer_emb, word_weighter
if __name__ == "__main__":
run_demo()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
13,
24874,
3083,
1330,
8615,
500,
62,
38610,
414,
198,
198,
6738,
764,
2220,
1330,
651,
62,
42428,
17,
35138,
198,
6738,
764,
15526,
62,
2302,
1330,
6099,
62,
244... | 2.911765 | 68 |
import pytest
from k8s_snapshots.logconf import configure_logging
@pytest.fixture(scope='session', autouse=True)
from .fixtures import * # noqa
from .fixtures.kube import * # noqa
| [
11748,
12972,
9288,
198,
198,
6738,
479,
23,
82,
62,
45380,
20910,
13,
6404,
10414,
1330,
17425,
62,
6404,
2667,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
29891,
3256,
1960,
1076,
28,
17821,
8,
198,
198,
6738,
764,
69... | 2.861538 | 65 |
"""Arch's package."""
from .arch import Arch
| [
37811,
19895,
338,
5301,
526,
15931,
198,
6738,
764,
998,
1330,
5579,
198
] | 3.461538 | 13 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-30 20:11
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
import django.contrib.sites.managers
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import filebrowser.fields
import taggit.managers
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
19,
319,
1584,
12,
1065,
12,
1270,
1160,
25,
1157,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.916667 | 120 |
#!/usr/bin/env python
"""Tests for `evolutionary_forest` package."""
from numpy.testing import assert_almost_equal
from sklearn.datasets import make_regression
from sklearn.metrics import mean_squared_error
from evolutionary_forest.forest import EvolutionaryForestRegressor
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
51,
3558,
329,
4600,
1990,
2122,
560,
62,
29623,
63,
5301,
526,
15931,
198,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
28177,
62,
40496,
198,
6738,
1341,
35720,
13,
... | 3.518987 | 79 |
#!/usr/bin/env python3
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import os
import glob
import re
import skimage.io as io
import tensorflow as tf
from tensorflow.keras.losses import binary_crossentropy
tf.set_random_seed(1)
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, TensorBoard
from data import build_train_val, trainvalGenerator, testGenerator, save_result
from model import unet, unet_dilated
from losses import dice_loss
from mask_to_submission import make_submission
NUM_EPOCH = 100
NUM_TRAINING_STEP = 1000
NUM_VALIDATION_STEP = 180
TEST_SIZE = 94
# paths
train_path = os.path.join("data", "training")
val_path = os.path.join("data", "validation")
test_path = os.path.join("data", "test_set_images")
predict_path = "predict_images"
submission_path = "submission"
weight_path = "weights"
if not os.path.exists(val_path):
print("Build training and validation data set...")
build_train_val(train_path, val_path, val_size=0.2)
else:
print("Have found training and validation data set...")
print("Create generator for training and validation...")
# Arguments for data augmentation
data_gen_args = dict(rotation_range=45,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
# # Data generator for training dataset. Define the arguments.
# data_gen_args = dict(rotation_range = 40,
# width_shift_range = 0.2,
# height_shift_range = 0.2,
# shear_range = 0.2,
# zoom_range = 0.2,
# horizontal_flip = True,
# vertical_flip = True)
# Build generator for training and validation set
trainGen, valGen = trainvalGenerator(batch_size=8, aug_dict=data_gen_args,
train_path=train_path, val_path=val_path,
image_folder='images2', mask_folder='groundtruth2',
train_dir = None, # Set it to None if you don't want to save
val_dir = None, # Set it to None if you don't want to save
target_size = (256, 256), seed = 1)
print("Build model and training...")
print("...Build & train the modified U-Net with 32 filters...")
# Build model
model_32 = unet(n_filter=32, activation='elu', dropout_rate=0.2, loss=dice_loss)
reset_weights(model_32)
# Callback functions
callbacks = [
EarlyStopping(monitor='val_loss', patience=9, verbose=1, min_delta=1e-4),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, min_delta=1e-4),
ModelCheckpoint(os.path.join(weight_path, 'weights_32_dice.h5'), monitor='val_loss', save_best_only=True, verbose=1)
]
# Training
history_32 = model_32.fit_generator(generator=trainGen, steps_per_epoch=NUM_TRAINING_STEP,
validation_data=valGen, validation_steps=NUM_VALIDATION_STEP,
epochs=NUM_EPOCH, callbacks=callbacks)
print("...Build & train the modified U-Net with 64 filters...")
# Build model
model_64 = unet(n_filter=64, activation='elu', dropout_rate=0.2, loss=dice_loss)
reset_weights(model_64)
# Callback functions
callbacks = [
EarlyStopping(monitor='val_loss', patience=10, verbose=1, min_delta=2e-4),
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, verbose=1, min_delta=1e-4),
ModelCheckpoint(os.path.join(weight_path, 'weights_64_dice.h5'), monitor='val_loss', save_best_only=True, verbose=1)
]
# Training
history_64 = model_64.fit_generator(generator=trainGen, steps_per_epoch=NUM_TRAINING_STEP,
validation_data=valGen, validation_steps=NUM_VALIDATION_STEP,
epochs=NUM_EPOCH, callbacks=callbacks)
print("...Build & train the U-Net with dilated convolution...")
# Build model
model_dilated = unet_dilated(n_filter=32, activation='elu', loss=dice_loss, dropout=False, batchnorm=False)
reset_weights(model_dilated)
# Callback functions
callbacks = [
EarlyStopping(monitor='val_loss', patience=10, verbose=1, min_delta=2e-4),
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, verbose=1, min_delta=1e-4),
ModelCheckpoint(os.path.join(weight_path, 'weights_dilated_dice.h5'), monitor='val_loss', save_best_only=True, verbose=1)
]
# Training
history_dilated = model_dilated.fit_generator(generator=trainGen, steps_per_epoch=NUM_TRAINING_STEP,
validation_data=valGen, validation_steps=NUM_VALIDATION_STEP,
epochs=NUM_EPOCH, callbacks=callbacks)
print("Predict and save results...")
test_imgs = []
test_index = []
filelist = glob.glob(os.path.join("data", "test", "images")+'/*.png')
for filename in filelist:
if os.path.isfile(filename):
img = io.imread(filename)
img = img / 255
img = np.reshape(img,(1,)+img.shape)
img_number = int(re.search(r"\d+", filename).group(0))
test_imgs.append(img)
test_index.append(img_number)
else:
print('File {} does not exists'.format(filename))
print(len(test_imgs))
print("...For U-Net with 32 filters...")
testGene = testGenerator(test_imgs)
result_1 = model_32.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...For U-Net with 64 filters...")
testGene = testGenerator(test_imgs)
result_2 = model_64.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...For U-Net with dilated convolution...")
testGene = testGenerator(test_imgs)
result_3 = model_dilated.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...Averaging the prediction results...")
result = (result_1 + result_2 + result_3)/3
save_result(predict_path, result, test_index)
print("Make submission...")
make_submission(predict_path, test_size=TEST_SIZE, indices=test_index, submission_filename=os.path.join(submission_path, "submission.csv"))
print("Done!") | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
37659,
13,
25120,
13,
28826,
7,
16,
8,
198,
11748,
4738,
198,
25120,
13,
28826,
7,
16,
8,
198,
11748,
28686,
198,
11748,
15095,
198,
1174... | 2.318748 | 2,651 |
"""Support for Ombi."""
import logging
import pyombi
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_NAME,
ATTR_SEASON,
CONF_URLBASE,
DEFAULT_PORT,
DEFAULT_SEASON,
DEFAULT_SSL,
DEFAULT_URLBASE,
DOMAIN,
SERVICE_MOVIE_REQUEST,
SERVICE_MUSIC_REQUEST,
SERVICE_TV_REQUEST,
)
_LOGGER = logging.getLogger(__name__)
def urlbase(value) -> str:
"""Validate and transform urlbase."""
if value is None:
raise vol.Invalid("string value is None")
value = str(value).strip("/")
if not value:
return value
return value + "/"
SUBMIT_MOVIE_REQUEST_SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_NAME): cv.string})
SUBMIT_MUSIC_REQUEST_SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_NAME): cv.string})
SUBMIT_TV_REQUEST_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NAME): cv.string,
vol.Optional(ATTR_SEASON, default=DEFAULT_SEASON): vol.In(
["first", "latest", "all"]
),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_URLBASE, default=DEFAULT_URLBASE): urlbase,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Ombi component platform."""
ombi = pyombi.Ombi(
ssl=config[DOMAIN][CONF_SSL],
host=config[DOMAIN][CONF_HOST],
port=config[DOMAIN][CONF_PORT],
api_key=config[DOMAIN][CONF_API_KEY],
username=config[DOMAIN][CONF_USERNAME],
urlbase=config[DOMAIN][CONF_URLBASE],
)
try:
ombi.test_connection()
except pyombi.OmbiError as err:
_LOGGER.warning("Unable to setup Ombi: %s", err)
return False
hass.data[DOMAIN] = {"instance": ombi}
def submit_movie_request(call):
"""Submit request for movie."""
name = call.data[ATTR_NAME]
movies = ombi.search_movie(name)
if movies:
movie = movies[0]
ombi.request_movie(movie["theMovieDbId"])
else:
raise Warning("No movie found.")
def submit_tv_request(call):
"""Submit request for TV show."""
name = call.data[ATTR_NAME]
tv_shows = ombi.search_tv(name)
if tv_shows:
season = call.data[ATTR_SEASON]
show = tv_shows[0]["id"]
if season == "first":
ombi.request_tv(show, request_first=True)
elif season == "latest":
ombi.request_tv(show, request_latest=True)
elif season == "all":
ombi.request_tv(show, request_all=True)
else:
raise Warning("No TV show found.")
def submit_music_request(call):
"""Submit request for music album."""
name = call.data[ATTR_NAME]
music = ombi.search_music_album(name)
if music:
ombi.request_music(music[0]["foreignAlbumId"])
else:
raise Warning("No music album found.")
hass.services.register(
DOMAIN,
SERVICE_MOVIE_REQUEST,
submit_movie_request,
schema=SUBMIT_MOVIE_REQUEST_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_MUSIC_REQUEST,
submit_music_request,
schema=SUBMIT_MUSIC_REQUEST_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_TV_REQUEST,
submit_tv_request,
schema=SUBMIT_TV_REQUEST_SERVICE_SCHEMA,
)
hass.helpers.discovery.load_platform("sensor", DOMAIN, {}, config)
return True
| [
37811,
15514,
329,
440,
2022,
72,
526,
15931,
198,
11748,
18931,
198,
198,
11748,
12972,
2381,
72,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
13,
9979,
1330,
357,
198,
220,
220,
220,
7102,
37,
62,
1761... | 2.028127 | 1,991 |
from graphs.synonyms import *
| [
6738,
28770,
13,
28869,
43612,
1330,
1635,
201,
198,
201,
198
] | 3 | 11 |
# Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/
from aiven_mysql_migrate import config
from aiven_mysql_migrate.migration import MySQLMigrateMethod, MySQLMigration
import logging
LOGGER = logging.getLogger(__name__)
def main(args=None, *, app="mysql_migrate"):
"""Migrate MySQL database from source to target, take configuration from CONFIG"""
import argparse
parser = argparse.ArgumentParser(description="MySQL migration tool.", prog=app)
parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging.")
parser.add_argument(
"-f", "--filter-dbs", help="Comma separated list of databases to filter out during migration", required=False
)
parser.add_argument("--validate-only", action="store_true", help="Run migration pre-checks only")
parser.add_argument(
"--seconds-behind-master",
type=int,
default=-1,
help="Max replication lag in seconds to wait for, by default no wait"
)
parser.add_argument(
"--stop-replication", action="store_true", help="Stop replication, by default replication is left running"
)
parser.add_argument(
"--privilege-check-user",
type=str,
required=False,
help="User to be used when replicating for privileges check "
"(e.g. 'checker@%%', must have REPLICATION_APPLIER grant)"
)
args = parser.parse_args(args)
setup_logging(debug=args.debug)
assert config.SOURCE_SERVICE_URI, "SOURCE_SERVICE_URI is not specified"
assert config.TARGET_SERVICE_URI, "TARGET_SERVICE_URI is not specified"
migration = MySQLMigration(
source_uri=config.SOURCE_SERVICE_URI,
target_uri=config.TARGET_SERVICE_URI,
target_master_uri=config.TARGET_MASTER_SERVICE_URI,
filter_dbs=args.filter_dbs,
privilege_check_user=args.privilege_check_user,
)
migration.setup_signal_handlers()
LOGGER.info("MySQL migration from %s to %s", migration.source.hostname, migration.target.hostname)
LOGGER.info("Starting pre-checks")
migration_method = migration.run_checks()
if migration_method == MySQLMigrateMethod.replication:
LOGGER.info("All pre-checks passed successfully.")
else:
LOGGER.info("Not all pre-checks passed successfully. Replication method is not available.")
if args.validate_only:
return
LOGGER.info("Starting migration using method: %s", migration_method)
migration.start(
migration_method=migration_method,
seconds_behind_master=args.seconds_behind_master,
stop_replication=args.stop_replication,
)
LOGGER.info("Migration finished.")
if migration_method == MySQLMigrateMethod.replication and not args.stop_replication:
LOGGER.info("IMPORTANT: Replication is still running, make sure to stop it after switching to the target DB")
if __name__ == "__main__":
import sys
sys.exit(main())
| [
2,
15069,
357,
66,
8,
12131,
317,
1469,
11,
47688,
11,
17837,
13,
3740,
1378,
64,
1469,
13,
952,
14,
198,
6738,
257,
1469,
62,
28744,
13976,
62,
76,
42175,
1330,
4566,
198,
6738,
257,
1469,
62,
28744,
13976,
62,
76,
42175,
13,
76,... | 2.793201 | 1,059 |
##################################################
# Basic HTML fetching and parsing for SMH.com.au #
##################################################
import urllib2
from bs4 import BeautifulSoup
| [
29113,
14468,
2235,
198,
2,
14392,
11532,
21207,
278,
290,
32096,
329,
9447,
39,
13,
785,
13,
559,
1303,
198,
29113,
14468,
2235,
198,
198,
11748,
2956,
297,
571,
17,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
628
] | 4.926829 | 41 |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 1.775 | 40 |
#Crie um programa que leia varios numerios inteiros
#so ira parar quando digitar o valor 999, que é a condição final
#mostre quantos numeros foram digitados, e qual foi a soma entre eles.
soma = quantidade = 0
while True:
n1 = int(input('Digite um número: [999] PARA PARAR'))
if n1 != 999:
soma = soma + n1
quantidade = quantidade + 1
else:
break
print(f'Você digitou ao todo {quantidade}, somando todos os numeros chega a {soma}') | [
2,
34,
5034,
23781,
1430,
64,
8358,
443,
544,
1401,
4267,
5470,
4267,
493,
20295,
4951,
198,
2,
568,
4173,
64,
1582,
283,
627,
25440,
3100,
7940,
267,
1188,
273,
36006,
11,
8358,
38251,
257,
1779,
72,
16175,
28749,
2457,
198,
2,
171... | 2.358586 | 198 |
#!/usr/bin/env python
from distutils.core import setup
setup(
name="objection",
version="1.0",
description="AndroidAPS preference editor",
author="Dave Carlson",
author_email="thecubic@thecubic.net",
url="https://github.com/thecubic/objection/",
packages=["objection"],
scripts=["objection-dump", "objection-pass", "objection-reset", "objection-pwchg", "objection-reset"],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
15252,
295,
1600,
198,
220,
220,
220,
2196,
2625,
16,
13,
15,
1600,
198,
220,
220,
... | 2.673203 | 153 |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
198,
2,
13248,
262,
14143,
53,
6765,
893,
2163,
2174,
13,
628,
198,
361,
11593,
3672,
834,
6624,
705,... | 2.496183 | 131 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly
import plotly.offline as offline
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
7110,
306,
198,
11748,
7110,
306,
13,
2364,
1370,
355,
18043,
198,
6738,
7110,
306,
13,
349... | 3.123288 | 73 |
import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
loadSMILES('O[C@H](N)C |&1:0|')
loadSMILES('O[C@H](N)C |o1:0|')
loadSMILES('O[C@H](N)C |a:3|')
| [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
27237,
6978,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
828,
705,
492,
3256,
705,
492,
3256,
705,
... | 1.929577 | 142 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.transform import AutoStructify
from smalldict import __version__ as release
# -- Project information -----------------------------------------------------
project = "SmallDict"
copyright = "2021, Yusuke Minami"
author = "Yusuke Minami"
version = release
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"recommonmark",
"sphinx_copybutton",
]
# enable autosummary plugin (table of contents for modules/classes/class
# methods)
autosummary_generate = True
source_suffix = {
".rst": "restructuredtext",
".md": "markdown",
}
# The master toctree document.
master_doc = "index"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"collapse_navigation": False, "style_external_links": True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "smalldictdoc"
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "smalldict", "SmallDict Documentation", [author], 1)]
| [
2,
28373,
2393,
329,
262,
45368,
28413,
10314,
27098,
13,
198,
2,
198,
2,
770,
2393,
691,
4909,
257,
6356,
286,
262,
749,
2219,
3689,
13,
1114,
257,
1336,
198,
2,
1351,
766,
262,
10314,
25,
198,
2,
3740,
1378,
2503,
13,
82,
746,
... | 3.455032 | 934 |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import (Imputer, LabelEncoder,StandardScaler)
from sklearn import metrics
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
# Importing Data
dataset = pd.read_csv('Data.csv')
# Removing some columns which aren't useful for our calculation
df = dataset.drop([col for col in ['movie_title', 'color', 'plot_keywords', 'movie_imdb_link',
'aspect_ratio', 'genres','actor_2_name','actor_3_name','actor_1_name',
'director_name']
if col in dataset], axis=1)
#get the positions of the columns which are strings
language_pos = df.columns.get_loc("language")
country_pos = df.columns.get_loc("country")
content_rating_pos = df.columns.get_loc("content_rating")
#create a exclude list of these excluded attributes
categorical_fts = []
categorical_fts.append(language_pos)
categorical_fts.append(country_pos)
categorical_fts.append(content_rating_pos)
#Array of features, exludes the last column
X = df.iloc[:, :-1].values
# Last column array, string of length 6 (dtype)
Ystr = np.asarray(df.iloc[:, df.shape[1]-1], dtype="|S6") #numpy is moody
#Convert it to float
Y = Ystr.astype(np.float)
#Keeps throwing the error msg
label_language = LabelEncoder()
X[0:, language_pos] = label_language.fit_transform(X[0:, language_pos])
label_country = LabelEncoder()
X[0:, country_pos] = label_country.fit_transform(X[0:, country_pos])
label_content_rating = LabelEncoder()
X[0:, content_rating_pos] = label_content_rating.fit_transform(X[0:, content_rating_pos])
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
X = imp.fit_transform(X)
#Lets pick the important features
model = RandomForestClassifier()
rfe = RFE(model,15)
rfe = rfe.fit(X,Ystr)
#print(rfe.support_) #These are the coloumns we're keeping
#print(rfe.ranking_) #These are ranking the coloumns
#Drop the unimportant features
drop_list = []
for i in range(0,len(rfe.support_)):
if rfe.support_[i]:
print(df.columns.values[i]) #TODO Remove this later
else:
drop_list.append(i)
X = np.delete(X,drop_list,axis=1)
#Scaling X
"""
We don't use OneHotEncoder as we are already using LabelEncoder.
Whoever suggested we use OneHotEncoder was an idiot.
"""
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Ystr, test_size = 0.15, random_state = 0)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312... | 2.837109 | 927 |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic nuclear spin-rotation tensors for RKS
'''
from pyscf.prop.nmr import rks as rks_nmr
from pyscf.prop.nsr import rhf as rhf_nsr
class NSR(rhf_nsr.NSR):
'''Nuclear-spin rotation tensors for RKS'''
get_fock = rks_nmr.get_fock
solve_mo1 = rks_nmr.solve_mo1
from pyscf import lib
from pyscf import dft
dft.rks.RKS.NSR = dft.rks_symm.RKS.NSR = lib.class_as_method(NSR)
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
from pyscf import lib
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = '''h , 0. 0. 0.917
f , 0. 0. 0.
'''
mol.basis = 'dzp'
mol.build()
mf = dft.RKS(mol).run(xc='b3lyp')
rotg = mf.NSR()
m = rotg.kernel()
print(m[1,0,0] - -301.49652448221707)
print(lib.finger(m) - 28.57893850199683)
rotg.gauge_orig = (0,0,.917/lib.param.BOHR)
m = rotg.kernel()
print(m[0,0,0] - 277.173892536396)
print(lib.finger(m) - 96.92616726791988)
mol.atom = '''C , 0. 0. 0.
O , 0. 0. 1.1283
'''
mol.basis = 'ccpvdz'
mol.nucprop = {'C': {'mass': 13}}
mol.build()
mf = dft.RKS(mol).run(xc='bp86')
rotg = NSR(mf)
m = rotg.kernel()
print(m[0,0,0] - -32.23298865237305)
print(lib.finger(m) - -11.278686427378966)
mol.atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587'''
mol.basis = 'ccpvdz'
mol.build()
mf = dft.RKS(mol).run()
rotg = NSR(mf)
m = rotg.kernel()
print(lib.finger(m) - -66.94250282318671)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1946,
12,
23344,
383,
9485,
6173,
37,
34152,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,... | 2.054148 | 1,145 |
###
# Copyright 2019 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
"""RMC implementation """
#---------Imports---------
import os
import sys
import time
import copy
import shutil
import logging
from collections import OrderedDict
import six
import jsonpatch
import jsonpointer
import redfish.ris.gen_compat
import redfish.ris.validation
from redfish.rest.v1 import RestClient
from redfish.ris.ris import SessionExpired, RisMonolith
from redfish.ris.validation import ValidationManager, Typepathforval
from redfish.ris.resp_handler import ResponseHandler
from redfish.ris.utils import merge_dict, getattributeregistry, diffdict, \
navigatejson, iterateandclear, skipnonsettingsinst, warning_handler, \
validate_headers, checkallowablevalues
from redfish.ris.rmc_helper import (UndefinedClientError, InstanceNotFoundError, \
NothingSelectedError, ValidationError, RmcConfig, RmcFileCacheManager, \
NothingSelectedSetError, LoadSkipSettingError, ValueChangedError, \
IloResponseError, EmptyRaiseForEAFP)
#---------End of imports---------
#---------Debug logger---------
LOGGER = logging.getLogger(__name__)
#---------End of debug logger---------
class RmcApp(object):
"""Application level implementation of RMC"""
def __init__(self, Args=[]):
"""Initialize RmcApp
:param Args: arguments to be passed to RmcApp
:type Args: str
"""
self.logger = LOGGER
self.redfishinst = None
self._config = RmcConfig()
self._cm = RmcFileCacheManager(self)
self._monolith = None
self._iloversion = None
self._validationmanager = None
self._selector = None
self.verbose = False
self.typepath = redfish.ris.gen_compat.Typesandpathdefines()
Typepathforval(typepathobj=self.typepath)
if "--showwarnings" not in Args:
self.logger.setLevel(logging.WARNING)
if self.logger.handlers and self.logger.handlers[0].name == 'lerr':
self.logger.handlers.remove(self.logger.handlers[0])
configfile = [Args[ind+1] for ind, arg in enumerate(Args) if arg in ('--config', '-c')]
configfile = [arg.split("=", 1)[1] for arg in Args if \
arg.startswith("--config=")] if not configfile else configfile
self.config_file = [os.path.join(os.path.dirname(sys.executable), \
'redfish.conf') if os.name == 'nt' else '/etc/ilorest/redfish.conf'][0] \
if not configfile else configfile[0]
if not os.path.isfile(self.config_file):
LOGGER.warning("Config file '%s' not found\n\n", self.config_file)
@property
def monolith(self):
"""Get the monolith from the current client"""
return self._monolith
@monolith.setter
def monolith(self, monolith):
"""Set the monolith"""
self._monolith = monolith
@property
def current_client(self):
"""Get the current client"""
if self.redfishinst:
return self.redfishinst
raise UndefinedClientError()
@property
def validationmanager(self):
"""Get the valdation manager"""
if self.getiloversion():
if self._validationmanager:
self._validationmanager.reset_errors_warnings()
else:
monolith = self.monolith
self._validationmanager = ValidationManager(monolith, defines=self.typepath)
self._validationmanager.updatevalidationdata()
else:
self._validationmanager = None
return self._validationmanager
@property
def selector(self):
"""Get the selector"""
return self._selector
@selector.setter
def selector(self, sel):
"""Set the selector"""
self._selector = sel
@property
def config(self):
"""Return config"""
return self._config
@property
def cache(self):
"""Return config"""
return self._config.get_cache()
def restore(self, creds=None, enc=False):
"""Restore monolith from cache"""
self._cm.uncache_rmc(creds=creds, enc=enc)
def deletelogoutfunction(self, url=None):
"""Wrapper function for logout helper function
:param url: The URL to perform a logout request on.
:type url: str.
"""
return self._cm.logout_del_function(url)
def set_encode_funct(self, funct):
""" set the encoding function for cache to use
:param funct: The function to use for encoding data
:type funct: function.
"""
self._cm.encodefunct = funct
def set_decode_funct(self, funct):
""" set the decoding function for cache to use
:param funct: The function to use for decoding data
:type funct: function.
"""
self._cm.decodefunct = funct
def save(self):
"""Cache current monolith build"""
self._cm.cache_rmc()
def config_from_file(self, filename):
"""Get config from file
:param filename: The config file name.
:type filename: str.
"""
self._config = RmcConfig(filename=filename)
self._config.load()
def login(self, username=None, password=None, base_url='blobstore://.', \
path=None, skipbuild=False, includelogs=False, \
biospassword=None, is_redfish=False, proxy=None, ssl_cert=None):
"""Main worker function for login command
:param username: user name required to login to server.
:type: str.
:param password: password credentials required to login.
:type password: str.
:param base_url: redfish host name or ip address.
:type base_url: str.
:param path: path to initiate login to.
:type path: str.
:param proxy: any proxy required for connection.
:type proxy: str.
:param ssl_cert: path to the CA bundle or SSL certificate to use with connection.
:type ssl_cert: str.
:param skipbuild: flag to determine whether to start monolith download.
:type skipbuild: boolean.
:param includelogs: flag to determine id logs should be downloaded.
:type includelogs: boolean.
:param biospassword: BIOS password for the server if set.
:type biospassword: str.
:param is_redfish: If True, a Redfish specific header (OData) will be
added to every request.
:type is_redfish: boolean.
"""
self.typepath.getgen(url=base_url, username=username, password=password, \
proxy=proxy, isredfish=is_redfish, ca_certs=ssl_cert)
is_redfish = self.typepath.updatedefinesflag(redfishflag=is_redfish)
if self.redfishinst and self.redfishinst.session_key:
self.logout()
self.redfishinst = RestClient(base_url=base_url, username=username, password=password, \
default_prefix=self.typepath.defs.startpath, biospassword=biospassword, \
is_redfish=is_redfish, proxy=proxy, ca_certs=ssl_cert)
self.current_client.login()
inittime = time.time()
self._build_monolith(path=path, includelogs=includelogs, skipbuild=skipbuild)
endtime = time.time()
if self.verbose:
sys.stdout.write("Monolith build process time: %s\n" % (endtime - inittime))
self.save()
if not self.monolith:
self.monolith.update_member(resp=self.current_client.root, \
path=self.typepath.defs.startpath, init=False)
def logout(self, url=None):
"""Main function for logout command
:param url: the URL for the logout request.
:type url: str.
"""
sessionlocs = []
self._validationmanager = None
self._iloversion = None
try:
self.current_client.logout()
except Exception:
sessionlocs = self.deletelogoutfunction(url)
else:
self.deletelogoutfunction(url)
for session in sessionlocs:
try:
self.delete_handler(session[0], silent=True, service=True)
except:
pass
self.redfishinst = None
cachedir = self.config.get_cachedir()
if cachedir:
try:
shutil.rmtree(cachedir)
except Exception:
pass
def select(self, selector=None, fltrvals=(None, None), rel=False):
"""Selects instances based on selector and filter values
:param selector: the type selection for the get operation.
:type selector: str.
:param fltsvals: the filter values for the select operation (Key,Val).
:type fltrvals: tuple.
:param rel: flag to reload the selected instances.
:type rel: boolean.
:returns: returns a list of selected items
"""
if selector:
selector = self.typepath.modifyselectorforgen(selector)
instances = self._getinstances(selector=selector, rel=rel)
val = fltrvals[1].strip('\'\"') if isinstance(fltrvals[1], \
six.string_types) else fltrvals[1]
instances = [inst for inst in instances if not fltrvals[0] or \
navigatejson(fltrvals[0].split('/'), copy.deepcopy(inst.dict), val)]
if any(instances):
self.selector = selector
self.save()
return instances
errmsg = "Unable to locate instance for '{0}' and filter '{1}={2}'". \
format(selector, fltrvals[0], fltrvals[1]) if fltrvals[0] \
and fltrvals[1] else "Unable to locate instance for {}".format(selector)
raise InstanceNotFoundError(errmsg)
def types(self, fulltypes=False):
"""Main function for types command
:param fulltypes: flag to determine if types return full name.
:type fulltypes: boolean.
:returns: returns a list of type strings
"""
instances = list()
monolith = self.monolith
rdirtype = next(monolith.gettypename(self.typepath.defs.\
resourcedirectorytype), None)
if not rdirtype:
for inst in monolith.iter():
if not any([x for x in ['ExtendedError', 'object', 'string']\
if x in inst.type]):
instances.append(inst.type)
else:
for instance in monolith.iter(rdirtype):
for item in instance.resp.dict["Instances"]:
if item and instance._typestring in list(item.keys()) and \
not 'ExtendedError' in item[instance._typestring]:
if not fulltypes and instance._typestring == '@odata.type':
tval = item["@odata.type"].split('#')
tval = tval[-1].split('.')[:-1]
tval = '.'.join(tval)
instances.append(tval)
elif item:
instances.append(item[instance._typestring])
return instances
def getprops(self, selector=None, props=None, nocontent=None, \
skipnonsetting=True, remread=False, insts=None):
"""Gets properties from a specified selector
:param selector: the type selection for the get operation.
:type selector: str.
:param skipnonsetting: flag to remove non settings path.
:type skipnonsetting: boolean.
:param nocontent: props not found are added to this list.
:type nocontent: list.
:param remread: flag to remove readonly properties.
:type remread: boolean.
:param props: provide the required property within current selection.
:type props: list.
:param insts: instances to be searched for specific props
:type insts: list
:returns: returns a list from the get command
"""
results = list()
nocontent = set() if nocontent is None else nocontent
if props:
noprop = {prop:False for prop in props} if props else {}
instances = insts if insts else self._getinstances(selector=selector)
instances = skipnonsettingsinst(instances) if skipnonsetting else instances
if not instances or len(instances) == 0:
raise NothingSelectedError()
for instance in instances:
currdict = instance.dict
for patch in instance.patches:
currdict = jsonpatch.apply_patch(currdict, patch)
_ = self.removereadonlyprops(currdict, emptyraise=True) if remread else None
temp_dict = dict()
if props:
if isinstance(props, six.string_types):
props = [props]
for prop in props:
copydict = copy.deepcopy(currdict)
propsdict = navigatejson(prop.split('/'), copydict)
if propsdict is None:
continue
noprop[prop] = True
merge_dict(temp_dict, propsdict)
if temp_dict:
results.append(temp_dict)
else:
results.append(currdict)
if props:
_ = [nocontent.add(prop) for prop in props if not noprop[prop]]
return results
def info(self, selector=None, props=None, ignorelist=None, dumpjson=False, \
latestschema=False):
"""Main function for info command
:param selector: the type selection for the get operation.
:type selector: str.
:param props: the type selection for the get operation.
:type props: str.
:param ignorelist: list that contains keys to be removed from output.
:type ignorelist: list.
:param dumpjson: flag to determine if output should be printed out.
:type dumpjson: boolean.
:param autotest: flag to determine if this part of automatic testing.
:type autotest: boolean.
:param latestschema: flag to determine if we should use smart schema.
:type latestschema: boolean.
:returns: returns a list of keys from current dict that are not ignored
"""
model = None
outdata = ''
nokey = False
results = set()
typestring = self.typepath.defs.typestring
iloversion = self.getiloversion()
if not iloversion:
return results
instances = self._getinstances(selector)
attributeregistry = getattributeregistry(instances)
instances = skipnonsettingsinst(instances)
if not instances or len(instances) == 0:
raise NothingSelectedError()
for inst in instances:
bsmodel = None
currdict = inst.resp.dict
proppath = inst.resp.getheader('Link').split(';')[0].strip('<>') \
if inst.resp.getheader('Link') else None
seldict = {}
if not props:
currdict = currdict['Attributes'] if inst.maj_type.\
startswith(self.typepath.defs.biostype) and currdict.get('Attributes'\
, None) else currdict
results.update([key for key in currdict if key not in \
ignorelist and not '@odata' in key.lower()])
continue
if isinstance(props, six.string_types):
props = props.split('/') if '/' in props else props
props = [props] if not isinstance(props, (list, \
tuple)) else props
seldict = navigatejson(props, copy.deepcopy(currdict))
if seldict is None:
nokey = True
continue
if self.typepath.defs.typestring in currdict:
seldict[typestring] = currdict[typestring]
model, bsmodel = self.get_model(currdict, \
attributeregistry, latestschema, newarg= \
props[:-1], proppath=proppath)
if not model and not bsmodel:
errmsg = "/".join(props)
warning_handler("Unable to locate registry model or "\
"No data available for entry: {}\n".format(errmsg))
continue
found = model.get_validator(props[-1]) if model else None
found = bsmodel.get_validator(props[-1]) if not found and \
bsmodel else found
outdata = found if found and dumpjson else \
found.print_help(props[-1]) if found else outdata
if outdata or results:
return outdata if outdata else results
errmsg = "Entry {} not found in current selection\n".format("/".\
join(props)) if nokey else "Entry {} not found in current"\
" selection\n".format("/".join(props))
warning_handler(errmsg)
def loadset(self, seldict=None, fltrvals=(None, None), diffonly=False,\
latestschema=False, uniqueoverride=False, selector=None):
"""Optimized version of the old style of set properties
:param selector: the type selection for the get operation.
:type selector: str.
:param seldict: current selection dictionary with required changes.
:type seldict: dict.
:param fltsvals: the filter values of selection for the set operation
(Key,Val).
:type fltrvals: tuple.
:param latestschema: flag to determine if we should use smart schema.
:type latestschema: boolean.
:param diffonly: flag to differentiate only existing properties.
:type diffonly: bool.
:param uniqueoverride: flag to determine override for unique properties.
:type uniqueoverride: str.
:returns: returns a status or a list of set properties
"""
results = list()
nochangesmade = False
settingskipped = [False]
selector = self.selector if not selector else selector
instances = self.select(selector=selector, fltrvals=fltrvals)
attributeregistry = getattributeregistry(instances=instances)
instances = skipnonsettingsinst(instances=instances)
if not instances or len(instances) == 0:
raise NothingSelectedSetError()
for instance in instances:
if validate_headers(instance, verbose=self.verbose):
continue
else:
nochangesmade = True
currdict = instance.resp.dict
diff_resp = diffdict(newdict=copy.deepcopy(seldict),\
oridict=copy.deepcopy(currdict), settingskipped=settingskipped)
iloversion = self.getiloversion()
if iloversion:
proppath = instance.resp.getheader('Link').split(';')[0].\
strip('<>') if instance.resp.getheader('Link') \
else None
self._validatechanges(instance=instance, attributeregistry=attributeregistry,\
newdict=diff_resp, oridict=currdict, \
unique=uniqueoverride, latestschema=latestschema, \
proppath=proppath)
patches = jsonpatch.make_patch(currdict, diff_resp)
if patches:
torem = []
_ = [torem.append(patch) for patch in patches.patch if patch["op"] == "remove"]
_ = [patches.patch.remove(patch) for patch in torem]
for ind, item in enumerate(instance.patches):
ppath = item.patch[0]["path"] if hasattr(item, "patch") else item[0]["path"]
jpath = jsonpointer.JsonPointer(ppath.lower())
jval = jpath.resolve(seldict, default='kasjdk?!')
if not jval == 'kasjdk?!':
del instance.patches[ind]
if patches:
for patch in patches.patch:
forprint = patch["value"] if "value" in patch\
else (patch["op"] + " " + patch["from"])
results.append({patch["path"][1:]:forprint})
instance.patches.append(patches)
else:
nochangesmade = True
if not nochangesmade:
return results
elif settingskipped[0] is True:
raise LoadSkipSettingError()
else:
return results
def status(self):
"""Returns all pending changes that have not been committed yet"""
iloversion = self.getiloversion()
finalresults = list()
monolith = self.monolith
(_, _) = self.get_selection(setenable=True)
attrreg = getattributeregistry([ele for ele in monolith.iter() if ele])
for instance in monolith.iter():
results = list()
if not(instance.patches and len(instance.patches) > 0):
continue
for item in instance.patches:
if isinstance(item, list):
results.extend(jsonpatch.JsonPatch(item))
else:
results.extend(item)
currdict = instance.resp.dict
itemholder = list()
for mainitem in results:
item = copy.deepcopy(mainitem)
if iloversion:
_, bsmodel = self.get_model(currdict, attrreg)
if bsmodel:
prop = item["path"][1:].split('/')[-1]
validator = bsmodel.get_validator(prop)
if validator:
if isinstance(validator, redfish.ris.\
validation.PasswordValidator):
item["value"] = "******"
itemholder.append(item)
if itemholder:
finalresults.append({instance.maj_type+'('+instance.path+')': itemholder})
return finalresults
def commit(self):
"""Applies all pending patches to the server
:yields: Two strings: 1. Path being PATCHed 2. Result of the PATCH
True:Success, False:Fail
"""
instances = [inst for inst in self.monolith.iter() if inst.patches]
if not instances or len(instances) == 0:
raise NothingSelectedError()
for instance in instances:
if validate_headers(instance, verbose=self.verbose):
continue
currdict = dict()
oridict = instance.resp.dict
totpayload = dict()
# apply patches to represent current edits
for patches in instance.patches:
if self._iloversion < 5.130:
self._checkforetagchange(instance=instance)
fulldict = jsonpatch.apply_patch(oridict, patches)
for patch in patches:
currdict = copy.deepcopy(fulldict)
patchpath = patch["path"]
pobj = jsonpointer.JsonPointer(patchpath)
indpayloadcount = 0
for item in pobj.parts:
payload = pobj.walk(currdict, item)
indpayloadcount = indpayloadcount+1
if isinstance(payload, list):
break
else:
if not isinstance(payload, dict):
break
currdict = copy.deepcopy(payload)
indices = pobj.parts[:indpayloadcount]
createdict = lambda x, y: {x:y}
while len(indices):
payload = createdict(indices.pop(), payload)
merge_dict(totpayload, payload)
currdict = copy.deepcopy(totpayload)
if currdict:
yield instance.resp.request.path
put_path = instance.resp.request.path
etag = self.monolith.paths[put_path].etag
headers = dict([('If-Match', etag)]) if self._iloversion > 5.130 else None
try:
self.patch_handler(put_path, currdict, optionalpassword=\
self.current_client.bios_password, headers=headers)
except IloResponseError:
yield True #Failure
else:
yield False #Success
def patch_handler(self, put_path, body, headers=None, response=False, silent=False, \
optionalpassword=None, service=False):
"""Main worker function for raw patch command
:param put_path: the URL path.
:type put_path: str.
:param body: the body to the sent.
:type body: str.
:param url: originating URL.
:type url: str.
:param sessionid: session id to be used instead of credentials.
:type sessionid: str.
:param headers: additional headers to be added to the request.
:type headers: str.
:param response: flag to return the response.
:type response: str.
:param optionalpassword: provide password for authentication.
:type optionalpassword: str.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:returns: returns RestResponse object containing response data
"""
(put_path, body) = self._checkpostpatch(body=body, path=put_path, patch=True)
if optionalpassword:
self.current_client.bios_password = optionalpassword
results = self.current_client.patch(put_path, body=body, headers=headers)
if results and getattr(results, "status", None) and results.status == 401:
raise SessionExpired()
self._modifiedpath(results, replace=True)
# if results and getattr(results, "status", None) and results.status == 412:
if results and hasattr(results, "status") and results.status == 412:
self._updatemono(path=put_path, rel=True)
if not silent:
ResponseHandler(self.validationmanager, self.typepath.defs.messageregistrytype).\
output_resp(results, dl_reg=service, print_code=self.verbose)
if response:
return results
def get_handler(self, put_path, silent=False, uncache=False, headers=None, \
response=False, service=False):
"""main worker function for raw get command
:param put_path: the URL path.
:type put_path: str.
:param silent: flag to determine if no output should be done.
:type silent: boolean.
:param url: originating URL.
:type url: str.
:param sessionid: session id to be used instead of credentials.
:type sessionid: str.
:param uncache: flag to not store the data downloaded into cache.
:type uncache: boolean.
:param headers: additional headers to be added to the request.
:type headers: str.
:param response: flag to return the response.
:type response: str.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:returns: returns a RestResponse object from client's get command
:param is_redfish: If True, a Redfish specific header (OData) will be
added to every request.
:type is_redfish: boolean.
"""
results = self.current_client.get(put_path, headers=headers)
if not uncache and results.status == 200:
self.monolith.update_member(resp=results, path=put_path, init=False)
if results and getattr(results, "status", None) and results.status == 401:
raise SessionExpired()
if results.status == 200:
service = True
if not silent:
ResponseHandler(self.validationmanager, self.typepath.defs.messageregistrytype).\
output_resp(results, dl_reg=service, print_code=self.verbose)
if results.status == 200 or response:
return results
return None
def post_handler(self, put_path, body, headers=None, response=False, \
silent=False, service=False):
"""Main worker function for raw post command
:param put_path: the URL path.
:type put_path: str.
:param body: the body to the sent.
:type body: str.
:param url: originating URL.
:type url: str.
:param sessionid: session id to be used instead of credentials.
:type sessionid: str.
:param headers: additional headers to be added to the request.
:type headers: str.
:param response: flag to return the response.
:type response: str.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:returns: returns a RestResponse from client's Post command
"""
(put_path, body) = self._checkpostpatch(body=body, path=put_path)
results = self.current_client.post(put_path, body=body, headers=headers)
if results and getattr(results, "status", None) and results.status == 401:
raise SessionExpired()
self._modifiedpath(results)
if not silent:
ResponseHandler(self.validationmanager, self.typepath.defs.messageregistrytype).\
output_resp(results, dl_reg=service, print_code=self.verbose)
if response:
return results
def put_handler(self, put_path, body, headers=None, response=False, silent=False, \
optionalpassword=None, service=False):
"""Main worker function for raw put command
:param put_path: the URL path.
:type put_path: str.
:param body: the body to the sent.
:type body: str.
:param url: originating URL.
:type url: str.
:param sessionid: session id to be used instead of credentials.
:type sessionid: str.
:param headers: additional headers to be added to the request.
:type headers: str.
:param response: flag to return the response.
:type response: str.
:param optionalpassword: provide password for authentication.
:type optionalpassword: str.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:returns: returns a RestResponse object from client's Put command
:param is_redfish: If True, a Redfish specific header (OData) will be
added to every request.
:type is_redfish: boolean.
"""
if optionalpassword:
self.current_client.bios_password = optionalpassword
results = self.current_client.put(put_path, body=body, headers=headers)
if results and getattr(results, "status", None) and results.status == 401:
raise SessionExpired()
self._modifiedpath(results, replace=True)
if not silent:
ResponseHandler(self.validationmanager, self.typepath.defs.messageregistrytype).\
output_resp(results, dl_reg=service, print_code=self.verbose)
if response:
return results
def delete_handler(self, put_path, headers=None, silent=False, service=False):
"""Main worker function for raw delete command
:param put_path: the URL path.
:type put_path: str.
:param url: originating URL.
:type url: str.
:param sessionid: session id to be used instead of credentials.
:type sessionid: str.
:param headers: additional headers to be added to the request.
:type headers: str.
:param silent: flag to disable output.
:type silent: boolean.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:returns: returns a RestResponse object from client's Delete command
"""
results = self.current_client.delete(put_path, headers=headers)
if results and getattr(results, "status", None) and results.status == 401:
raise SessionExpired()
self._modifiedpath(results, delete=True)
if not silent:
ResponseHandler(self.validationmanager, self.typepath.defs.messageregistrytype).\
output_resp(results, dl_reg=service, print_code=self.verbose)
return results
def head_handler(self, put_path, silent=False, service=False):
"""Main worker function for raw head command
:param put_path: the URL path.
:type put_path: str.
:param url: originating URL.
:type url: str.
:param sessionid: session id to be used instead of credentials.
:type sessionid: str.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:returns: returns a RestResponse object from client's Head command
:param is_redfish: If True, a Redfish specific header (OData) will be
added to every request.
:type is_redfish: boolean.
"""
results = self.current_client.head(put_path)
if results and getattr(results, "status", None) and results.status == 401:
raise SessionExpired()
if not silent:
ResponseHandler(self.validationmanager, self.typepath.defs.messageregistrytype).\
output_resp(results, dl_reg=service, print_code=self.verbose)
if results.status == 200:
return results
return None
def removereadonlyprops(self, currdict, emptyraise=False, \
removeunique=True, specify_props=None):
"""Remove readonly properties from dictionary
:param currdict: dictionary to be filtered
:type currdict: dictionary
:param emptyraise: Raise empty error
:type emptyraise: boolean
:type removeunique: flag to remove unique values
:type removeunique: boolean
:parm specify_props: modify list of properties to be removed
:type specify_props: list
"""
try:
type_str = self.typepath.defs.typestring
currtype = currdict.get(type_str, None)
oridict = copy.deepcopy(currdict)
if specify_props:
templist = specify_props
else:
templist = ["Modified", "Type", "Description", "Status",\
"links", "SettingsResult", "Attributes", \
"@odata.context", "@odata.type", "@odata.id",\
"@odata.etag", "Links", "Actions", \
"AvailableActions", "BiosVersion"]
#Attributes removed and readded later as a validation workaround
currdict = iterateandclear(currdict, templist)
iloversion = self.getiloversion()
if not iloversion:
return currdict
self.validationmanager.validatedict(currdict, currtype=currtype, \
monolith=self.monolith, unique=removeunique, searchtype=None)
if oridict.get("Attributes", None):
currdict["Attributes"] = oridict["Attributes"]
return currdict
except:
if emptyraise is True:
raise EmptyRaiseForEAFP()
elif emptyraise == 'pass':
pass
else:
raise
def getidbytype(self, tpe):
""" Return a list of URIs that correspond to the supplied type string
:param tpe: type string to search for.
:type tpe: string.
"""
urls = list()
val = next(self.monolith.gettypename(tpe), None)
urls.extend(self.monolith.typesadded[val] if val else [])
return urls
def getcollectionmembers(self, path, fullresp=False):
"""Returns collection/item lists of the provided path
:param path: path to return.
:type path: string.
:param fullresp: Return full json data instead of only members.
:type path: bool.
:returns: returns collection list
"""
if self.typepath.defs.isgen10 and self.typepath.gencompany \
and '?$expand=.' not in path:
path += '?$expand=.' if path.endswith('/') else '/?$expand=.'
members = self.get_handler(path, service=True, silent=True)
if members and not fullresp:
try:
members = members.dict['Members'] if self.typepath.defs.\
isgen10 else members.dict['Items']
except KeyError:
members = []
elif fullresp:
members = [members.dict]
return members
def getbiosfamilyandversion(self):
"""Function that returns the current BIOS family"""
self._updatemono(currtype="ComputerSystem.", crawl=False)
try:
for inst in self.monolith.iter("ComputerSystem."):
if "Current" in inst.resp.obj["Bios"]:
oemjson = inst.resp.obj["Bios"]["Current"]
parts = oemjson["VersionString"].split(" ")
return (parts[0], parts[1][1:])
else:
parts = inst.resp.obj["BiosVersion"].split(" ")
return (parts[0], parts[1][1:])
except Exception:
pass
return (None, None)
def getiloversion(self, skipschemas=False):
"""Function that returns the current iLO version
:param skipschemas: flag to determine whether to skip schema download.
:type skipschemas: boolean.
:returns: returns current iLO version
"""
iloversion = self._iloversion = self._iloversion if self._iloversion \
else self.typepath.iloversion
if self.typepath.gencompany and not self._iloversion and not self.typepath.noschemas:
self.monolith.load(self.typepath.defs.managerpath, crawl=False)
results = next(iter(self.getprops('Manager.', ['FirmwareVersion', 'Firmware'])))
def quickdrill(_dict, key):
""" function to find key in nested dictionary """
return _dict[key]
while isinstance(results, dict):
results = quickdrill(results, next(iter(results.keys())))
iloversionlist = results.replace('v', '').replace('.', '').split(' ')
iloversion = float('.'.join(iloversionlist[1:3]))
model = self.getprops('Manager.', ['Model'])
if model:
if next(iter(model))['Model'] == "iLO CM":
# Assume iLO 4 types in Moonshot
iloversion = None
self._iloversion = iloversion
elif not self.typepath.gencompany:#Assume schemas are available somewhere in non-hpe redfish
self._iloversion = iloversion = 4.210
conf = None if not skipschemas else True
if not skipschemas:
if iloversion and iloversion >= 4.210:
conf = self._verifyschemasdownloaded(self.monolith)
elif iloversion and iloversion < 4.210:
warning_handler("Please upgrade to iLO 4 version 2.1 or above for schema support.")
else:
warning_handler("Schema support unavailable on the currently logged in system.")
return iloversion if iloversion and iloversion >= 4.210 and conf else None
def get_selection(self, selector=None, setenable=False, reloadpath=False):
"""Special main function for set/filter with select command
:param selector: the type selection for the get operation.
:type selector: str.
:param sel: property(s) to be filtered by.
:type sel: str.
:param val: value to be filtered by.
:type val: str.
:param setenable: flag to determine if registry should also be returned.
:type setenable: boolean.
:param reloadpath: flag to reload the selected instances.
:type reloadpath: boolean.
:returns: returns a list of selected items
"""
instances = self._getinstances(selector=selector, rel=reloadpath)
if setenable:
attributeregistryfound = getattributeregistry(instances=instances)
instances = skipnonsettingsinst(instances=instances)
return instances, attributeregistryfound
return instances
def create_save_header(self, selector=None, selectignore=False):
"""Adds save file headers to show what server the data came from
:param selector: the type selection for the get save operation.
:type selector: str.
:param selectignore: ignore selection
:type selectignore: boolean
:returns: returns an header ordered dictionary
"""
instances = OrderedDict()
monolith = self.monolith
selector = self.selector if not selector else selector
if not selector and not selectignore:
return instances
self._updatemono(currtype="ComputerSystem.", crawl=False)
self._updatemono(currtype=self.typepath.defs.biostype, crawl=False)
self._updatemono(currtype="Manager.", crawl=False)
instances["Comments"] = OrderedDict()
try:
for instance in monolith.iter("ComputerSystem."):
if instance.resp.obj["Manufacturer"]:
instances["Comments"]["Manufacturer"] = \
instance.resp.obj["Manufacturer"]
if instance.resp.obj["Model"]:
instances["Comments"]["Model"] = instance.resp.obj["Model"]
if instance.resp.obj["Oem"][self.typepath.defs.oemhp]["Bios"]["Current"]:
oemjson = instance.resp.obj["Oem"][self.typepath.defs.oemhp]["Bios"]["Current"]
instances["Comments"]["BIOSFamily"] = oemjson["Family"]
instances["Comments"]["BIOSDate"] = oemjson["Date"]
for instance in monolith.iter(self.typepath.defs.biostype):
if "Attributes" in list(instance.resp.obj.keys()) and \
instance.resp.obj["Attributes"]["SerialNumber"]:
instances["Comments"]["SerialNumber"] = \
instance.resp.obj["Attributes"]["SerialNumber"]
elif instance.resp.obj["SerialNumber"]:
instances["Comments"]["SerialNumber"] = instance.resp.obj["SerialNumber"]
for instance in monolith.iter("Manager."):
if instance.resp.obj["FirmwareVersion"]:
instances["Comments"]["iLOVersion"] = instance.resp.obj["FirmwareVersion"]
except Exception:
pass
return instances
def download_path(self, paths, crawl=True, rel=False):
"""Loads a path into monolith
:param paths: list of paths to download
:type paths: list
:param rel: flag to reload the paths or not.
:type rel: bool.
:param crawl: flag to determine if load should traverse found links.
:type crawl: boolean.
"""
if not paths:
return
try:
map(lambda x: self.monolith.load(path=x, init=False, rel=rel,\
crawl=crawl, includelogs=True), paths)
except Exception as excp:
try:
if excp.errno == 10053:
raise SessionExpired()
except:
raise excp
else:
raise excp
def get_model(self, currdict, attributeregistry, latestschema=None, newarg=None, proppath=None):
"""Returns the model for the current instance's schema/registry
:param currdict: current selection dictionary.
:type currdict: dict.
:param attributeregistry: current systems attribute registry.
:type attributeregistry: dict.
:param latestschema: flag to determine if we should use smart schema.
:type latestschema: boolean.
:param newargs: list of multi level properties to be modified.
:type newargs: list.
:param proppath: path of the schema you want to validate.
:type proppath: str.
:returns: returns model model, bios model
"""
type_str = self.typepath.defs.typestring
bsmodel = None
valobj = self.validationmanager
model = valobj.get_registry_model(currtype=currdict[type_str], \
newarg=newarg, latestschema=latestschema, proppath=proppath)
if not attributeregistry and model:
return model, bsmodel
if not model and not attributeregistry:
LOGGER.warning("Unable to locate registry/schema for %s", currdict[type_str])
return None, None
attrval = currdict.get("AttributeRegistry", None)
attrval = list(attributeregistry.values())[0] if not attrval and \
attributeregistry else attrval
bsmodel = valobj.get_registry_model(currtype=attrval if attrval else \
currdict[type_str], newarg=newarg, \
latestschema=latestschema, searchtype=\
self.typepath.defs.attributeregtype)
return model, bsmodel
def _build_monolith(self, path=None, includelogs=False, skipbuild=False):
"""Run through the RIS tree to build monolith
:param path: path to initiate login to.
:type path: str.
:param includelogs: flag to determine id logs should be downloaded.
:type includelogs: boolean.
:param skipbuild: if true, skip build of monolith (initialize empty)
:type skipbuild: True
"""
self.monolith = RisMonolith(self.current_client, self.typepath)
if not skipbuild:
self.monolith.load(path=path, includelogs=includelogs, init=True)
self.monolith.populatecollections()
else:
self.monolith.update_member(resp=self.current_client.root, \
path=self.current_client.default_prefix,\
init=False)
def _modifiedpath(self, results, delete=False, replace=False):
"""Check the path and set the modified flag
:param delete: Flag to delete the path in the results
:type delete: bool
:param replace: Flag to replace the path from the results
:type replace: bool
:param results: Response for the path
:type results: RestResponse
"""
if not results or not results.status in (200, 201):
return
path = results.path
path = path.split('/Actions')[0] if 'Actions' in path else path
path = path + '/' if self.typepath.defs.isgen10 and path[-1] != '/' else path
if not replace and path in self.monolith.paths:
self.monolith.paths[path].modified = True
_ = self.monolith.markmodified(path)
if delete and path in self.monolith.paths:
self.monolith.removepath(path)
if replace and path in self.monolith.paths:
self.monolith.paths[path].modified = True
self.monolith.paths[path].patches = []
def _checkforchange(self, paths, crawl=True):
"""Check if the given paths have been modified and updates monolith if it has
:param paths: paths to be checked
:type paths: list
"""
(pathtoetag, _) = self._gettypeswithetag()
mono = self.monolith
self.download_path(list(paths), crawl=crawl, rel=True)
etags = [None if not path in mono.paths else mono.paths[path].etag for path in paths]
sametag = [path for ind, path in enumerate(paths) if path in pathtoetag\
and path in self.monolith.paths and pathtoetag[path] != etags[ind]]
for path in sametag:
self.monolith.paths[path].patches = []
if sametag:
LOGGER.warning('The data in the following paths have been updated. '\
'Recheck the changes made to made. %s', ','.join([str(path) for \
path in sametag]))
def _updatemono(self, currtype=None, path=None, crawl=False, rel=False):
"""Check if type/path exists in current monolith
:param entrytype: the found entry type.
:type entrytype: str.
:param currtype: the current entry type.
:type currtype: str.
:param crawl: flag to determine if load should traverse found links.
:type crawl: boolean.
"""
monolith = self.monolith
currtype = None if currtype == '"*"' else currtype
paths = set()
if currtype:
for path, resp in monolith.paths.items():
if currtype and currtype.lower() not in resp.maj_type.lower():
continue
if rel or not resp:
paths.add(path)
if resp.modified:
paths.add(path)
paths.update(monolith.checkmodified(path) if path in monolith.ctree else set())
elif path:
if monolith.paths and not monolith.paths.keys()[0][-1] == '/':
path = path[:-1] if path[-1] == '/' else path
if rel or not monolith.path(path):
paths.add(path)
if path in monolith.paths and monolith.paths[path].modified:
paths.add(path)
paths.update(monolith.checkmodified(path) if path in monolith.ctree else set())
if paths:
self._checkforchange(list(paths), crawl=crawl)
def _verifyschemasdownloaded(self, monolith):
"""Function to verify that the schema has been downloaded
:param monolith: full data model retrieved from server.
:type monolith: dict.
"""
schemaid = self.typepath.schemapath
regid = self.typepath.regpath
if not (schemaid and regid):
warning_handler("Missing Schemas or registries.")
return None
schemacoll = next(monolith.gettypename(self.typepath.defs.schemafilecollectiontype), None)
if not schemacoll or any(paths.lower() == schemaid and \
monolith.paths[paths] \
for paths in monolith.typesadded[schemacoll]):
self.download_path([schemaid], crawl=False)
schemacoll = next(monolith.gettypename(\
self.typepath.defs.schemafilecollectiontype), None)
regcoll = next(monolith.gettypename(self.typepath.defs.regfilecollectiontype), None)
if not regcoll or any(paths.lower() == regid and monolith.paths[paths] \
for paths in monolith.typesadded[regcoll]):
self.download_path([regid], crawl=False)
regcoll = next(monolith.gettypename(self.typepath.defs.regfilecollectiontype), None)
return any(paths.lower() in (schemaid.lower(), regid.lower()) and \
monolith.paths[paths] for paths in monolith.paths)
def _validatechanges(self, instance=None, attributeregistry=None, latestschema=None, \
proppath=None, newdict=None, oridict=None, unique=False):
"""Validate the changes that are requested by the user.
:param newdict: dictionary with only the properties that have changed
:type newdict: dict.
:param oridict: selection dictionary with current state.
:type oridict: dict.
:param unique: flag to determine override for unique properties.
:type unique: str.
:param iloversion: current iLO version.
:type iloversion: float.
:param instance: current selection instance.
:type instance: RisMonolithMemberv100.
:param attrreg: Registry entry of the given attribute.
:type attrreg: RepoRegistryEntry.
"""
entrymono = self.monolith
currtype = oridict[self.typepath.defs.typestring]
validation_manager = self.validationmanager
validation_manager.validatedict(newdict, \
currtype=attributeregistry[instance.maj_type]\
if attributeregistry else currtype, monolith=entrymono, \
unique=unique, searchtype=self.typepath.defs.attributeregtype\
if attributeregistry else None, latestschema=latestschema, \
proppath=proppath)
validation_errors = validation_manager.errors
for warninngs in validation_manager.warnings:
warning_handler(warninngs)
if validation_errors and len(validation_errors) > 0:
raise ValidationError(validation_errors)
checkallowablevalues(newdict=newdict, oridict=oridict)
def _getinstances(self, selector=None, rel=False, crawl=False):
"""Main function to get instances of particular type and reload
:param selector: the type selection for the get operation.
:type selector: str.
:param setenable: flag to determine if registry should also be returned.
:type setenable: boolean.
:param setenable: flag to determine if registry should also be returned.
:type setenable: boolean.
:param rel: flag to reload the selected instances.
:type rel: boolean.
:returns: returns a list of selected items
"""
instances = list()
selector = self.selector if not selector else selector
if selector:
selector = ".".join(selector.split('#')[-1].split(".")[:2])
self._updatemono(currtype=selector, crawl=crawl, rel=rel)
if not selector:
return instances
selector = None if selector == '"*"' else selector
instances = [inst for inst in self.monolith.iter(selector) \
if inst.maj_type not in ['object', 'string']]
_ = [setattr(inst, 'patches', []) for inst in instances if rel]
return instances
def _checkpostpatch(self, body=None, path=None, patch=False):
"""Make the post file compatible with the system generation
:param body: contents to be checked
:type body: str.
:param path: The URL location to check
:type path: str.
:param service: flag to determine if minimum calls should be done.
:type service: boolean.
:param url: originating url.
:type url: str.
:param sessionid: session id to be used instead of iLO credentials.
:type sessionid: str.
:param headers: additional headers to be added to the request.
:type headers: str.
:param iloresponse: flag to return the iLO response.
:type iloresponse: str.
:param silent: flag to determine if no output should be done.
:type silent: boolean.
:param patch: flag to determine if a patch is being made
:type patch: boolean.
:returns: modified body and path parameter for target and action respectively
"""
try:
if self.typepath.defs.flagforrest:
if "Target" not in body and not patch:
if "/Oem/Hp" in path:
body["Target"] = self.typepath.defs.oempath
if path.startswith("/redfish/v1"):
path = path.replace("/redfish", "/rest", 1)
if "/Actions/" in path:
ind = path.find("/Actions/")
path = path[:ind]
if path.endswith('/'):
path = path[:-1]
elif path.startswith("/rest/") and self.typepath.defs.isgen9:
results = self.get_handler(put_path=path, service=True, silent=True)
if results and results.status == 200:
if results.dict:
if "Target" in body:
actions = results.dict["Oem"][self.typepath.defs.oemhp]["Actions"]
elif "Actions" in body:
actions = results.dict["Actions"]
else:
return (path, body)
allkeys = list(actions.keys())
targetkey = [x for x in allkeys if x.endswith(body["Action"])]
if targetkey[0].startswith("#"):
targetkey[0] = targetkey[0][1:]
path = path.replace("/rest", "/redfish", 1)
path = path+"/Actions"
if "Target" in body:
path = path+self.typepath.defs.oempath
del body["Target"]
if targetkey:
path = path + "/" + targetkey[0] + "/"
return (path, body)
except Exception as excp:
raise excp
def _checkforetagchange(self, instance=None):
"""Function to check the status of the etag
:param instance: retrieved instance to check etag for change.
:type instance: dict.
"""
if instance:
path = instance.path
(oldtag, _) = self._gettypeswithetag()
self._updatemono(path=path, rel=True)
(newtag, _) = self._gettypeswithetag()
if (oldtag[path] != newtag[path]) and \
not self.typepath.defs.hpilodatetimetype in instance.maj_type:
warning_handler("The property you are trying to change " \
"has been updated. Please check entry again " \
" before manipulating it.\n")
raise ValueChangedError()
def _gettypeswithetag(self):
"""Gathers etags of all paths in monolith and their type associations"""
instancepath = dict()
instances = dict()
for inst in self.monolith.iter():
instancepath[inst.path] = inst.maj_type
instances[inst.path] = inst.etag
return [instances, instancepath]
| [
21017,
201,
198,
2,
15069,
13130,
30446,
15503,
6400,
446,
14973,
11,
3457,
13,
1439,
2489,
10395,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
... | 2.136474 | 28,467 |
__source__ = 'https://leetcode.com/problems/smallest-subtree-with-all-the-deepest-nodes/'
# Time: O(N)
# Space: O(N)
#
# Description: Leetcode # 865. Smallest Subtree with all the Deepest Nodes
#
# Given a binary tree rooted at root, the depth of each node is the shortest distance to the root.
#
# A node is deepest if it has the largest depth possible among any node in the entire tree.
#
# The subtree of a node is that node, plus the set of all descendants of that node.
#
# Return the node with the largest depth such that it contains all the deepest nodes in its subtree.
#
# Example 1:
#
# Input: [3,5,1,6,2,0,8,null,null,7,4]
# Output: [2,7,4]
# Explanation:
#
# We return the node with value 2, colored in yellow in the diagram.
# The nodes colored in blue are the deepest nodes of the tree.
# The input "[3, 5, 1, 6, 2, 0, 8, null, null, 7, 4]" is a serialization of the given tree.
# The output "[2, 7, 4]" is a serialization of the subtree rooted at the node with value 2.
# Both the input and output have TreeNode type.
#
# Note:
#
# The number of nodes in the tree will be between 1 and 500.
# The values of each node are unique.
#
import unittest
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 24ms 99.69%
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/smallest-subtree-with-all-the-deepest-nodes/solution/
#
# Time Complexity: O(N), where N is the number of nodes in the tree.
# Space Complexity: O(N).
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# Two pass DFS: 1. get max_depth, 2. get subtree
# 4ms 48.78%
class Solution {
Map<TreeNode, Integer> depth;
int max_depth;
public TreeNode subtreeWithAllDeepest(TreeNode root) {
depth = new HashMap();
depth.put(null, -1);
dfs(root, null);
max_depth = -1;
for (Integer d: depth.values()) {
max_depth = Math.max(max_depth, d);
}
return getSubtree(root);
}
public void dfs(TreeNode node, TreeNode parent) {
if (node != null) {
depth.put(node, depth.get(parent) + 1);
dfs(node.left, node);
dfs(node.right, node);
}
}
public TreeNode getSubtree(TreeNode node) {
if (node == null || depth.get(node) == max_depth) return node;
TreeNode left = getSubtree(node.left),
right = getSubtree(node.right);
if (left != null && right != null) return node;
if (left != null) return left;
if (right != null) return right;
return null;
}
}
/**
* The TreeDepth of a subtree is:
* Result.node: the largest depth node that is equal to or
* an ancestor of all the deepest nodes of this subtree.
* Result.dist: the number of nodes in the path from the root
* of this subtree, to the deepest node in this subtree.
*/
# One pass DFS
# 3ms 87.24%
class TreeDepth {
TreeNode node;
int depth;
TreeDepth(TreeNode n, int d) {
node = n;
depth = d;
}
}
class Solution {
public TreeNode subtreeWithAllDeepest(TreeNode root) {
return dfs(root).node;
}
public TreeDepth dfs(TreeNode node) {
if (node == null) return new TreeDepth(null, 0);
TreeDepth left = dfs(node.left),
right = dfs(node.right);
if (left.depth > right.depth) return new TreeDepth(left.node, left.depth + 1);
if (left.depth < right.depth) return new TreeDepth(right.node, right.depth + 1);
return new TreeDepth(node, left.depth + 1);
}
}
'''
| [
834,
10459,
834,
796,
705,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
17470,
395,
12,
7266,
21048,
12,
4480,
12,
439,
12,
1169,
12,
22089,
395,
12,
77,
4147,
14,
6,
198,
2,
3862,
25,
220,
440,
7,
45,
8,
198,
2,
... | 2.481601 | 1,549 |
# This file implements a MultiIsoVisual class that can be used to show
# multiple layers of isosurface simultaneously. It is derived from the original
# VolumeVisual class in vispy.visuals.volume, which is releaed under a BSD license
# included here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the BSD license given in the LICENSE
# file in this repository.
from ..extern.vispy.gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from ..extern.vispy.visuals.volume import VolumeVisual, Visual
from ..extern.vispy.scene.visuals import create_visual_node
import numpy as np
from ..extern.vispy.visuals.volume import VERT_SHADER, frag_dict
from ..extern.vispy.color import get_colormap
# TODO: find a way to add a uniform variable instead of rewriting the whole shader code
# Fragment shader
FRAG_SHADER = """
// uniforms
uniform $sampler_type u_volumetex;
uniform vec3 u_shape;
uniform float u_threshold;
uniform float u_relative_step_size;
uniform int level; //threshold level numbers
//varyings
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
float rand(vec2 co)
{{
// Create a pseudo-random number between 0 and 1.
// http://stackoverflow.com/questions/4200224
return fract(sin(dot(co.xy ,vec2(12.9898, 78.233))) * 43758.5453);
}}
float colorToVal(vec4 color1)
{{
return color1.g; // todo: why did I have this abstraction in visvis?
}}
vec4 calculateColor(vec4 betterColor, vec3 loc, vec3 step)
{{
// Calculate color by incorporating lighting
vec4 color1;
vec4 color2;
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N; // normal
color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
N[0] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
N[1] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
N[2] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N,V) > 0.0);
N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Get color of the texture (albeido)
color1 = betterColor;
color2 = color1;
// todo: parametrise color1_to_color2
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 final_color;
// todo: allow multiple light, define lights on viewvox or subscene
int nlights = 1;
for (int i=0; i<nlights; i++)
{{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L+(1.0-lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow( max(dot(H,N),0.0), u_shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * u_specular;
}}
// Calculate final color by componing different components
final_color = color2 * ( ambient_color + diffuse_color) + specular_color;
final_color.a = color2.a;
// Done
return final_color;
}}
// for some reason, this has to be the last function in order for the
// filters to be inserted in the correct place...
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
(u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_relative_step_size + 0.5);
if( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//gl_FragColor = vec4(0.0, nsteps / 3.0 / u_shape.x, 1.0, 1.0);
//return;
{before_loop}
// This outer loop seems necessary on some systems for large
// datasets. Ugly, but it works ...
vec3 loc = start_loc;
int iter = 0;
while (iter < nsteps) {{
for (iter=iter; iter<nsteps; iter++)
{{
// Get sample color
vec4 color = $sample(u_volumetex, loc);
float val = color.g;
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
}}
{after_loop}
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
""" # noqa
ISO_SNIPPETS = dict(
before_loop="""
vec4 total_color = vec4(0.0); // final color
vec4 src = vec4(0.0);
vec4 dst = vec4(0.0);
vec3 dstep = 1.5 / u_shape; // step to sample derivative
gl_FragColor = vec4(0.0);
float val_prev = 0;
float outa = 0;
vec3 loc_prev = vec3(0.0);
vec3 loc_mid = vec3(0.0);
""",
in_loop="""
for (int i=0; i<level; i++){
// render from outside to inside
if (val < u_threshold*(1.0-i/float(level)) && val_prev > u_threshold*(1.0-i/float(level))){
// Use bisection to find correct position of contour
for (int i=0; i<20; i++) {
loc_mid = 0.5 * (loc_prev + loc);
val = $sample(u_volumetex, loc_mid).g;
if (val < u_threshold) {
loc = loc_mid;
} else {
loc_prev = loc_mid;
}
}
//dst = $cmap(val); // this will call colormap function if have
dst = $cmap(i);
dst = calculateColor(dst, loc, dstep);
dst.a = 1. * (1.0 - i/float(level)); // transparency
src = total_color;
outa = src.a + dst.a * (1 - src.a);
total_color = (src * src.a + dst * dst.a * (1 - src.a)) / outa;
total_color.a = outa;
}
}
val_prev = val;
loc_prev = loc;
""",
after_loop="""
gl_FragColor = total_color;
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
frag_dict['iso'] = ISO_FRAG_SHADER
class MultiIsoVisual(VolumeVisual):
"""
Carry out additive volume rendering using an RGBA cube instead of a 3d cube
of values and a colormap.
Parameters
----------
data : np.ndarray
A 4-d array with dimensions (z, y, x, 4) where the last dimension
corresponds to RGBA. The data should be normalized from 0 to 1 in each
channel.
relative_step_size : float
The relative step size to step through the volume. Default 0.8.
Increase to e.g. 1.5 to increase performance, at the cost of
quality.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
"""
# TODO: add cmap later
@property
@step.setter
MultiIsoVisual = create_visual_node(MultiIsoVisual)
| [
2,
770,
2393,
23986,
257,
15237,
40,
568,
36259,
1398,
326,
460,
307,
973,
284,
905,
198,
2,
3294,
11685,
286,
318,
418,
333,
2550,
11640,
13,
632,
318,
10944,
422,
262,
2656,
198,
2,
14701,
36259,
1398,
287,
1490,
9078,
13,
41464,
... | 2.510066 | 4,421 |
from textwrap import dedent
from rest_framework.schemas.openapi import AutoSchema
| [
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
6738,
1334,
62,
30604,
13,
1416,
4411,
292,
13,
9654,
15042,
1330,
11160,
27054,
2611,
628
] | 3.5 | 24 |
import json
import logging
import os
import shutil
import socket
from os.path import join
from hacksport.operations import execute
from shell_manager.bundle import get_bundle, get_bundle_root
from shell_manager.util import (BUNDLE_ROOT, DEPLOYED_ROOT, get_problem,
get_problem_root, HACKSPORTS_ROOT, PROBLEM_ROOT,
STAGING_ROOT)
logger = logging.getLogger(__name__)
def get_all_problems():
""" Returns a dictionary of name:object mappings """
problems = {}
if os.path.isdir(PROBLEM_ROOT):
for name in os.listdir(PROBLEM_ROOT):
try:
problem = get_problem(get_problem_root(name, absolute=True))
problems[name] = problem
except FileNotFoundError as e:
pass
return problems
def get_all_bundles():
""" Returns a dictionary of name:object mappings """
bundles = {}
if os.path.isdir(BUNDLE_ROOT):
for name in os.listdir(BUNDLE_ROOT):
try:
bundle = get_bundle(get_bundle_root(name, absolute=True))
bundles[name] = bundle
except FileNotFoundError as e:
pass
return bundles
def get_all_problem_instances(problem_path):
""" Returns a list of instances for a given problem """
instances = []
instances_dir = join(DEPLOYED_ROOT, problem_path)
if os.path.isdir(instances_dir):
for name in os.listdir(instances_dir):
if name.endswith(".json"):
try:
instance = json.loads(
open(join(instances_dir, name)).read())
except Exception as e:
continue
instances.append(instance)
return instances
def publish(args, config):
""" Main entrypoint for publish """
problems = get_all_problems()
bundles = get_all_bundles()
output = {"problems": [], "bundles": []}
for path, problem in problems.items():
problem["instances"] = get_all_problem_instances(path)
problem["sanitized_name"] = path
output["problems"].append(problem)
for _, bundle in bundles.items():
output["bundles"].append(bundle)
print(json.dumps(output, indent=2))
def clean(args, config):
""" Main entrypoint for clean """
lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
# remove staging directories
if os.path.isdir(STAGING_ROOT):
logger.info("Removing the staging directories")
shutil.rmtree(STAGING_ROOT)
# remove lock file
if os.path.isfile(lock_file):
logger.info("Removing the stale lock file")
os.remove(lock_file)
# TODO: potentially perform more cleaning
def status(args, config):
""" Main entrypoint for status """
bundles = get_all_bundles()
problems = get_all_problems()
if args.problem is not None:
problem = problems.get(args.problem, None)
if problem is None:
print("Could not find problem \"{}\"".format(args.problem))
return
problem_status = get_problem_status(args.problem, problem)
if args.json:
print(json.dumps(problem_status, indent=4))
else:
print_problem_status(problem_status, args.problem, prefix="")
elif args.bundle is not None:
bundle = bundles.get(args.bundle, None)
if bundle is None:
print("Could not find bundle \"{}\"".format(args.bundle))
return
if args.json:
print(json.dumps(get_bundle_status(bundle), indent=4))
else:
print_bundle(bundle, args.bundle, prefix="")
else:
return_code = 0
if args.json:
result = {
"bundles":
bundles,
"problems":
list(
map(lambda tup: get_problem_status(*tup), problems.items()))
}
print(json.dumps(result, indent=4))
elif args.errors_only:
for path, problem in problems.items():
problem_status = get_problem_status(path, problem)
# Determine if any problem instance is offline
for instance_status in problem_status["instances"]:
if not instance_status["service"]:
print_problem_status(problem_status, path, prefix=" ")
return_code = 1
else:
print("** Installed Bundles [{}] **".format(len(bundles)))
shown_problems = []
for path, bundle in bundles.items():
print_bundle(bundle, path, prefix=" ")
print("** Installed Problems [{}] **".format(len(problems)))
for path, problem in problems.items():
problem_status = get_problem_status(path, problem)
# Determine if any problem instance is offline
for instance_status in problem_status["instances"]:
if not instance_status["service"]:
return_code = 1
print_problem_status(problem_status, path, prefix=" ")
if return_code != 0:
exit(return_code)
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
17802,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
198,
6738,
30445,
634,
13,
3575,
602,
1330,
12260,
198,
6738,
7582,
62,
37153,
13,
65,
31249,
133... | 2.164808 | 2,421 |
from django.shortcuts import render
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
198
] | 3.705882 | 17 |
#encoding=utf-8
import pymysql
from time import sleep
import datetime
import tkinter.messagebox
| [
2,
12685,
7656,
28,
40477,
12,
23,
201,
198,
11748,
279,
4948,
893,
13976,
201,
198,
6738,
640,
1330,
3993,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
256,
74,
3849,
13,
20500,
3524,
201
] | 2.857143 | 35 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_points_regression(x,
y,
title,
xlabel,
ylabel,
prediction=None,
legend=False,
r_squared=None,
position=(90, 100)):
"""
Plots the data points and the prediction,
if there is one.
:param x: design matrix
:type x: np.array
:param y: regression targets
:type y: np.array
:param title: plot's title
:type title: str
:param xlabel: x axis label
:type xlabel: str
:param ylabel: y axis label
:type ylabel: str
:param prediction: model's prediction
:type prediction: np.array
:param legend: param to control print legends
:type legend: bool
:param r_squared: r^2 value
:type r_squared: float
:param position: text position
:type position: tuple
"""
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
line1, = ax.plot(x, y, 'bo', label='Real data')
if prediction is not None:
line2, = ax.plot(x, prediction, 'r', label='Predicted data')
if legend:
plt.legend(handles=[line1, line2], loc=2)
ax.set_title(title,
fontsize=20,
fontweight='bold')
if r_squared is not None:
bbox_props = dict(boxstyle="square,pad=0.3",
fc="white", ec="black", lw=0.2)
t = ax.text(position[0], position[1], "$R^2 ={:.4f}$".format(r_squared),
size=15, bbox=bbox_props)
ax.set_xlabel(xlabel, fontsize=20)
ax.set_ylabel(ylabel, fontsize=20)
plt.show()
def plot_cost_function_curve(X,
y,
cost_function,
title,
weights_list=None,
cost_list=None,
position=(20, 40),
range_points=(20, 40)):
"""
Plots a cost surfice.
It assumes that weight.shape == (2,).
:param X: design matrix
:type X: np.ndarray
:param y: regression targets
:type y: np.ndarray
:param cost_function: function to compute regression cost
:type cost_function: lambda: (np.ndarray, np.ndarray, np.ndarray) -> float
:param title: plot's title
:type title: str
:param weights_list: list of weights
:type weights_list: list
:param cost_list: list of costs
:type cost_list: list
:param position: surfice rotation position
:type position: tuple
:param range_points: range of values for w
:type range_points: tuple
"""
w_0, w_1 = 0, 0
ms = np.linspace(w_0 - range_points[0] , w_0 + range_points[0], range_points[0])
bs = np.linspace(w_1 - range_points[1] , w_1 + range_points[1], range_points[1])
M, B = np.meshgrid(ms, bs)
MB = np.stack((np.ravel(M), np.ravel(B)), axis=1)
size = MB.shape[0]
MB = MB.reshape((size, 2, 1))
zs = np.array([cost_function(X, y, MB[i])
for i in range(size)])
Z = zs.reshape(M.shape)
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(M, B, Z, rstride=1, cstride=1, color='b', alpha=0.2)
ax.set_xlabel('w[0]', labelpad=30, fontsize=24, fontweight='bold')
ax.set_ylabel('w[1]', labelpad=30, fontsize=24, fontweight='bold')
ax.set_zlabel('J(w)', labelpad=30, fontsize=24, fontweight='bold')
if weights_list is not None and cost_list is not None:
ax.plot([weights_list[0][0]],
[weights_list[0][1]],
[cost_list[0]],
markerfacecolor=(1.0, 0.0, 0.0, 1.0),
markeredgecolor=(1.0, 0.0, 0.0, 1.0),
marker='o',
markersize=7)
ax.plot([weights_list[-1][0]],
[weights_list[-1][1]],
[cost_list[-1]],
markerfacecolor=(0.0, 0.0, 1.0, 1.0),
markeredgecolor=(0.0, 0.0, 1.0, 1.0),
marker='o',
markersize=7)
temp_red = 1.0
temp_blue = 0.0
size = len(weights_list)
oldx = 0.0
oldy = 0.0
oldz = 0.0
for w, cost in zip(weights_list, cost_list):
rgba_color = (temp_red * 1.0, 0.0, temp_blue * 1.0, 1.0)
ax.plot([w[0]],
[w[1]],
[cost],
markerfacecolor=rgba_color,
markeredgecolor=rgba_color,
marker='.',
markersize=4)
if oldx + oldy + oldz != 0.0 :
rgba_color_weak = list(rgba_color)
rgba_color_weak[-1] = 0.3
ax.plot([w[0], oldx],[w[1], oldy], [cost, oldz],color=rgba_color_weak)
temp_red += - 1 / size
temp_blue += 1 / size
oldx = w[0]
oldy = w[1]
oldz = cost
ax.view_init(elev=position[0], azim=position[1])
ax.set_title(title,
fontsize=20,
fontweight='bold')
plt.show()
def simple_step_plot(ylist,
yname,
title,
figsize=(4, 4),
labels=None):
"""
Plots values over time.
:param ylist: list of values lists
:type ylist: list
:param yname: value name
:type yname: str
:param title: plot's title
:type title: str
:param figsize: plot's size
:type figsize: tuple
:param labels: label for each values list in ylist
:type range_points: list
"""
y0 = ylist[0]
x = np.arange(1, len(y0) + 1, 1)
fig, ax = plt.subplots(1, 1, figsize=figsize)
for y in ylist:
ax.plot(x, y)
plt.xlabel('step')
plt.ylabel(yname)
plt.title(title,
fontsize=14,
fontweight='bold')
plt.grid(True)
if labels is not None:
plt.legend(labels,
loc='upper right')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,
18,
35,
628,
198,
4299,
7110,
62,
13033,
62,
2301,
2234,
... | 1.849579 | 3,324 |
from __future__ import annotations
from .abc import StorerFormatter
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
764,
39305,
1330,
9363,
81,
8479,
1436,
628
] | 4.117647 | 17 |
from .views import crud_bp
from .models import db, ShanghaiPersonInfo
__all__ = ['crud_bp', 'db', 'ShanghaiPersonInfo']
LOGO = """
▄▄▄▄▄
▀▀▀██████▄▄▄ _______________
▄▄▄▄▄ █████████▄ / \\
▀▀▀▀█████▌ ▀▐▄ ▀▐█ | Gotta go fast! |
▀▀█████▄▄ ▀██████▄██ | _________________/
▀▄▄▄▄▄ ▀▀█▄▀█════█▀ |/
▀▀▀▄ ▀▀███ ▀ ▄▄
▄███▀▀██▄████████▄ ▄▀▀▀▀▀▀█▌ _____________________________
██▀▄▄▄██▀▄███▀ ▀▀████ ▄██ █ \\\\
▄▀▀▀▄██▄▀▀▌████▒▒▒▒▒▒███ ▌▄▄▀▀▀▀█_____________________________//
▌ ▐▀████▐███▒▒▒▒▒▐██▌
▀▄▄▄▄▀ ▀▀████▒▒▒▒▄██▀
▀▀█████████▀
▄▄██▀██████▀█
▄██▀ ▀▀▀ █
▄█ ▐▌
▄▄▄▄█▌ ▀█▄▄▄▄▀▀▄
▌ ▐ ▀▀▄▄▄▀
▀▀▄▄▀ ██
\ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ▀
\- ▌ SanicCRUD-vue ▀ ▀
- ▌ (o) ▀
/- ▌ Go Go Go ! ▀ ▀ Author:Boyle Gu{0}
/ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ▀ Version 0.1
██ Our Freedom ~~
""" | [
6738,
764,
33571,
1330,
1067,
463,
62,
46583,
198,
6738,
764,
27530,
1330,
20613,
11,
21865,
15439,
12360,
628,
198,
834,
439,
834,
796,
37250,
6098,
463,
62,
46583,
3256,
705,
9945,
3256,
705,
2484,
272,
20380,
15439,
12360,
20520,
628... | 1.30777 | 991 |
import sentencepiece as spm
from gtts import gTTS
sp = spm.SentencePieceProcessor()
sp.Load("./dataset/sp.et.model")
message = open("pred.txt", "r", encoding="utf8").read()
message_decoded = sp.DecodePieces(message.split())
print(message_decoded)
tts = gTTS(message_decoded, lang='et')
tts.save('et.mp3')
| [
11748,
6827,
12239,
355,
599,
76,
198,
6738,
308,
83,
912,
1330,
308,
51,
4694,
198,
198,
2777,
796,
599,
76,
13,
31837,
594,
47,
8535,
18709,
273,
3419,
198,
2777,
13,
8912,
7,
1911,
14,
19608,
292,
316,
14,
2777,
13,
316,
13,
... | 2.566667 | 120 |
#!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyDNS class through which nodes get DNS name servers.
#
#
from __future__ import absolute_import
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyNode import HappyNode
options = {}
options["quiet"] = False
options["add"] = False
options["delete"] = False
options["dns"] = None
options["node_id"] = None
class HappyDNS(HappyNode):
"""
Assigns DNS servers to virtual nodes.
happy-dns [-h --help] [-q --quiet] [-a --add] [-d --delete]
[-i --id <NODE_NAME>] <DNS_LIST>
-i --id Optional. Node to assign a DNS server to. Find
using happy-link-list.
Examples:
$ happy-dns 8.8.8.8
Assign DNS server 8.8.8.8 to all virtual nodes.
$ happy-dns -d onhub 8.8.8.8
Removes DNS server 8.8.8.8 from the onhub node.
return:
0 success
1 fail
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
198,
2,
220,
220,
220,
15069,
357,
66,
8,
1584,
12,
5539,
21420,
23500,
11,
3457,
13,
198,
2,
220,
220,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
220,
220,
4... | 2.692308 | 611 |
"""
Utils
"""
from ios_device.util.service_info import MyServiceInfo
__all__ = ['DictAttrProperty', 'DictAttrFieldNotFoundError']
import socket
import struct
import threading
from zeroconf import Zeroconf, ServiceBrowser, ServiceInfo
_NotSet = object()
| [
37811,
198,
18274,
4487,
198,
37811,
198,
6738,
1312,
418,
62,
25202,
13,
22602,
13,
15271,
62,
10951,
1330,
2011,
16177,
12360,
198,
198,
834,
439,
834,
796,
37250,
35,
713,
8086,
81,
21746,
3256,
705,
35,
713,
8086,
81,
15878,
3673,... | 3.119048 | 84 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mailing.models
import ckeditor_uploader.fields
from django.conf import settings
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
21898,
13,
27530,
198,
11748,
... | 3.233333 | 60 |
#!/usr/bin/env python3
#####################################
#
# Filename : test_Distructure.py
#
# Projectname : diSTruct
#
# Author : Oskar Taubert
#
# Creation Date : Mon 11 Jun 2018 04:04:59 PM CEST
#
# Last Modified : Thu 28 Mar 2019 11:14:02 AM CET
#
#####################################
from Bio import SeqIO
from Bio.PDB.PDBParser import PDBParser
from distruct import Distructure
from distruct import config
testFilePath = config.data_path + "tests/"
if __name__ == '__main__':
test_Distructure()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
29113,
4242,
2,
198,
2,
198,
2,
7066,
12453,
1058,
1332,
62,
20344,
5620,
13,
9078,
198,
2,
198,
2,
4935,
3672,
1058,
2566,
2257,
1356,
198,
2,
198,
2,
6434,
1058,
440,
8135,
... | 3.171779 | 163 |
from Dataset import Datasets
import numpy as np
import csv, sys, glob, os
import pandas as pd
from enum import Enum
from Dataset.Ucihar import UCIHAR,SignalsUCIHAR
from Dataset.Dsads import DSADS ,SignalsDsads
from Dataset.Uschad import USCHAD,SignalsUschad
from Dataset.Pamap2 import PAMAP2,SignalsPAMAP2
from Process.Manager import preprocess_datasets
from Process.Protocol import Loso
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if args.debug:
import pydevd_pycharm
pydevd_pycharm.settrace('172.22.100.3', port=22, stdoutToServer=True, stderrToServer=True, suspend=False)
if len(sys.argv) > 2:
file_wisdm = sys.argv[1]
dir_datasets = sys.argv[2]
dir_save_file = sys.argv[3]
else:
source = 'C:\\Users\\gcram\\Documents\\Datasets\\originals\\'
outPath = 'C:\\Users\\gcram\\Documents\\Datasets\\frankDataset\\'
| [
6738,
16092,
292,
316,
1330,
16092,
292,
1039,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
21370,
11,
25064,
11,
15095,
11,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
16092,
... | 2.55 | 380 |
import os, json, argparse, sys, datetime, time
import pronto, six
"""
Check all existing ChEBI items have their InChi keys and the current ChEBI
release as source.
"""
script = os.path.basename(sys.argv[0])[:-3]
write_line_to._hdl = {}
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--release-date", help="ChEBI release item",
required=True,
action="store")
parser.add_argument("-q", "--query", help="perform SPARQL query",
action="store_true")
# Read arguments from the command line
args = parser.parse_args()
print(args)
# Check for --version or -V
dontquery = not args.query
rdate = args.release_date
CHEBREF = 'Q98915402'
if dontquery is False:
print('performing query...')
ret = os.popen('wd sparql {}.rq >{}.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}.json'.format(script))
s = file.read()
jol = json.loads(s)
blacklist = ['Q11786072', 'Q420138', 'Q412742', 'Q5201339', 'Q904668',
'Q5198686', 'Q417504', 'Q27289582', 'Q27278834', 'Q5102966',
'Q27255739', 'Q27292730', 'Q27092937', 'Q41874783', 'Q920900',
'Q716544', 'Q27263575', 'Q27274220', 'Q27279584', 'Q27257875',
'Q2102184', 'Q3530618', 'Q4021823', 'Q27110377', 'Q27110219',
'Q5404490', 'Q27292191', 'Q1452081', 'Q7334473', 'Q5045786',
'Q416827', 'Q27106184', 'Q5210829', 'Q27122191']
chbits = {}
chebst = {}
stmref = {}
items = set()
dupes = set()
for d in jol:
item = d.get('item')
if item in blacklist:
continue
chebi = d.get('chebi')
x = chbits.get(chebi)
if x is not None and x != item:
print(x, item)
raise
chbits[chebi] = item
if item in items:
dupes.add(item)
continue
else:
items.add(item)
stmt = d.get('stmt')
ikey = d.get('ikey')
if stmt is None or len(stmt) == 0:
continue
x = chebst.get(chebi)
if x is None:
chebst[chebi] = set([(stmt, ikey)])
else:
x.add((stmt, ikey))
ref = d.get('refnode')
if ref is None or len(ref) == 0:
continue
src = d.get('src')
if src is None or len(src) == 0:
continue
sdate = d.get('sdate')
s = stmref.get(stmt)
if s is None:
stmref[stmt] = set([(ref, sdate)])
else:
s.add((ref, sdate))
print('Reading ChEBI')
ont = pronto.Ontology('chebi.obo')
print('Writing wikibase-cli input files')
for chebi in chbits.keys():
term = ont.get('CHEBI:' + chebi)
it = chbits.get(chebi)
if it is not None and it in dupes:
continue
if term is None:
print('obsolete: CHEBI:{} {}'.format(chebi, chbits.get(chebi)))
continue
xref = term.annotations
if xref is None:
continue
l = [i.literal for i in xref if i.property == 'http://purl.obolibrary.org/obo/chebi/inchikey']
if len(l) == 0:
continue
ikey = l[0]
ss = chebst.get(chebi)
if ss is None:
new_ikey(chbits.get(chebi), ikey)
continue
s = [stmt for stmt,ik in chebst.get(chebi) if ik == ikey]
if len(s) == 0:
it = chbits.get(chebi)
if it is None:
print(it, chebi)
raise
add_ikey(it, ikey)
continue
if len(s) > 1:
print(chebi, s)
raise
stmt = s[0]
refs = stmref.get(stmt)
if ref is None or len(ref) == 0:
add_ref(stmt)
continue
if all(sdate != rdate + 'T00:00:00Z' for _,sdate in refs):
add_ref(stmt)
for ref,sdate in refs:
if sdate != rdate + 'T00:00:00Z':
del_ref(stmt, ref)
| [
198,
11748,
28686,
11,
33918,
11,
1822,
29572,
11,
25064,
11,
4818,
8079,
11,
640,
198,
11748,
778,
5957,
11,
2237,
198,
198,
37811,
198,
9787,
477,
4683,
609,
36,
3483,
3709,
423,
511,
554,
1925,
72,
8251,
290,
262,
1459,
609,
36,
... | 2.060159 | 1,762 |
#Sanitize HTML output
#Example: htmlspecialchars('I love the <b> tag') == 'I love the <b> tag'
| [
2,
15017,
270,
1096,
11532,
5072,
198,
2,
16281,
25,
27711,
20887,
354,
945,
10786,
40,
1842,
262,
1279,
65,
29,
7621,
11537,
6624,
705,
40,
1842,
262,
1222,
2528,
26,
65,
5,
13655,
26,
7621,
6,
198
] | 2.657895 | 38 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import itertools
import os
import requests
from girder import events
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
from girder.plugins.imagespace.settings import ImageSpaceSetting
from urlparse import urlparse
setting = ImageSpaceSetting()
def adjust_qparams_for_subtype(event):
"""
SMQTK only works on png/jpeg/tiff as of now, so limit the results
to those to avoid confusion when using IQR.
"""
if 'fq' not in event.info:
event.info['fq'] = []
event.info['fq'].append('subType:("png" OR "jpeg" OR "tiff")')
event.addResponse(event.info)
def uppercase_basename_for_resourcenames(event):
"""
Certain searches were indexed before conversion of the Solr index, so they pass
values with lowercase resourcenames that actually correspond to the uppercase resource
name versions.
"""
if event.info['field'] == 'resourcename_t_md':
event.info['values'] = [os.path.basename(x).upper() for x in
event.info['values']]
event.addResponse(event.info)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
220,
15069,
10897,
1574,
3457,
13,
198,
2,
198,
2,
220,
49962,
739,
262... | 3.178333 | 600 |
""" Define the abstract class for Search Index"""
class SearchIndex:
"""Search Index handles creation of various search index, getting values, destroying search index"""
| [
37811,
2896,
500,
262,
12531,
1398,
329,
11140,
12901,
37811,
628,
198,
4871,
11140,
15732,
25,
198,
220,
220,
220,
37227,
18243,
12901,
17105,
6282,
286,
2972,
2989,
6376,
11,
1972,
3815,
11,
13897,
2989,
6376,
37811,
198
] | 4.631579 | 38 |
import csv
import random
import re
import sys
import tqdm
import numpy as np
import torch
from torch.utils.data import TensorDataset
from transformers.tokenization_bert import BertTokenizer
def _convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
# print(len(input_ids),len(input_mask),len(segment_ids),max_seq_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal
# percent of tokens from each, since if one sequence is very short then each
# token that's truncated likely contains more information than a longer
# sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class InputExample:
"""A single training/test example for simple sequence classification."""
class InputFeatures(object):
"""A single set of features of data."""
class SST1_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class SST2_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class TREC_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class SUBJ_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def get_train_examples(self, cv=0):
"""See base class."""
examples = self._read_examples()
idx = list(range(len(examples)))
np.random.shuffle(idx)
test_index = cv
test_example = []
train_example = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_example.append(examples[id_])
else:
train_example.append(examples[id_])
return train_example, test_example
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
return examples
# return shuffle_data(examples)
class MR_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def get_train_examples(self, cv=0):
"""See base class."""
examples = self._read_examples()
idx = list(range(len(examples)))
np.random.shuffle(idx)
test_index = cv
test_example = []
train_example = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_example.append(examples[id_])
else:
train_example.append(examples[id_])
return train_example, test_example
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
))
return examples
if __name__ == "__main__":
processor = TREC_Processor(cv=2)
print(processor.get_labels())
train = processor.train_examples
for x in train:
print(x.text_a, x.label)
break
# class OPT:
# def __init__(self):
# self.dataset="SUBJ"
# self.cv = "0"
# self.scale_rate=1
# self.MAX_SENT_LEN=-1
# opt = OPT()
# dset = getattr(sys.modules[__name__],'load_dataset')(opt)
# for x in dset[0]:
# print(x)
# break
# from torch.utils.data import DataLoader
# train_loader = DataLoader(dset[0], batch_size=50, shuffle=True)
| [
11748,
269,
21370,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
256,
80,
36020,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
309,
22854,
27354,
292,
316,
198,
6... | 2.154639 | 4,559 |
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import os
from MyModule import MyModel
from MyDataset import MyDataset
lr = 0.001
batch_size = 10
epochs = 30
device = "cpu"
result = pd.read_csv("covid.train.csv")
keys = list(result.columns.values)
#turn into tensor
a = torch.FloatTensor(result.iloc[0][0:82])
b = torch.FloatTensor(result.iloc[0][82:])
result = pd.DataFrame.to_numpy(result)
result = torch.FloatTensor(result)
#standardize
temp = result[:,0:40]
temp2 = result[:,40:-1]
temp3 = result[:,-1:]
temp2 = (temp2 - torch.min(temp2,0)[0]) / (torch.max(temp2,0)[0] - torch.min(temp2,0)[0])
#remove the items with low correlation
new_data = []
new_keys = []
show_corre = 0
for i in range(temp2.shape[1]):
show_corre = np.corrcoef(temp2[:,i],temp3.squeeze())
correlation = np.corrcoef(temp2[:,i],temp3.squeeze())[0][1]
if correlation > 0.35 or correlation < -0.35:
new_keys.append(keys[i+40])
temp = torch.cat((temp,torch.unsqueeze(temp2[:,i],1)),1)
temp = torch.cat((temp,temp3),1)
result = temp
dataset = MyDataset(result)
dataloader = DataLoader(dataset, batch_size, shuffle=True)
model = MyModel().to(device)
criterion = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(model.parameters(), lr)
loss_con = []
epoch_number = 0
trigger = 0
trigger_2 = 0
average_loss_list = []
for epoch in range(epochs):
model.train()
epoch_loss = 0
for x,y in dataloader: #attention
optimizer.zero_grad()
x, y = x.to(device), y.to(device)
pred = model(x)
loss = criterion(pred, y)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print(epoch_loss)
epoch_number = epoch_number + 1
if (epoch_loss < 70000) and (trigger == 0):
trigger = 1
loss_con.append(epoch_loss)
elif (trigger == 1):
loss_con.append(epoch_loss)
elif (epoch_loss > 70000) and (trigger == 0):
pass
model.eval()
total_loss = 0
pred_results = []
final_loss = []
for x,y in dataloader:
x = x.to(device)
epoch_final_loss = 0
with torch.no_grad():
predict = model(x)
pred_results.append(predict.cpu())
loss_final = criterion(predict, y)
epoch_final_loss += loss_final.item()
print(epoch_final_loss)
final_loss.append(epoch_final_loss)
print(pred_results) | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
11,
6060,
17401,
... | 2.291782 | 1,083 |
# -*- coding: utf-8 -*-
import os
import unittest
from types import FunctionType
import webtest
from paste.deploy.loadwsgi import loadapp
from openprocurement.audit.api.constants import VERSION
from openprocurement.audit.api.design import sync_design
COUCHBD_NAME_SETTING = 'couchdb.db_name'
def snitch(func):
"""
This method is used to add test function to TestCase classes.
snitch method gets test function and returns a copy of this function
with 'test_' prefix at the beginning (to identify this function as
an executable test).
It provides a way to implement a storage (python module that
contains non-executable test functions) for tests and to include
different set of functions into different test cases.
"""
return FunctionType(func.func_code, func.func_globals,
'test_' + func.func_name, closure=func.func_closure)
class BaseWebTest(unittest.TestCase):
"""
Base Web Test to test openprocurement.audit.api.
It setups the database before each test and delete it after.
"""
AppClass = BaseTestApp
relative_uri = "config:tests.ini"
relative_to = os.path.dirname(__file__)
initial_auth = None
@classmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
3858,
1330,
15553,
6030,
198,
198,
11748,
3992,
9288,
198,
6738,
17008,
13,
2934,
1420,
13,
2220,
18504,
12397,
1330,
... | 2.843182 | 440 |
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
from .filter_operator import FilterOperator
class FilterTuple(FilterOperator):
"""
...
"""
def reduce_with_op_func(self, feature_tuple, **kwargs):
"""
:param feature_tuple:
:param kwargs:
:return:
"""
op_func_args = self.prepare_op_func_args(feature_tuple)
result = tuple(op_func_args)
return result
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
770,
318,
636,
286,
2823,
31029,
13,
198,
220,
220,
220,
21522,
771,
416,
266,
33781,
379,
2177,
13,
2713,
13,
3023,
8702,
25,
1507,
25,
1983,
... | 2.294355 | 248 |
import jax
import jax.numpy as jnp
import pytest
from e3nn_jax import (FunctionalFullyConnectedTensorProduct,
FunctionalTensorProduct, FunctionalTensorSquare, Irreps)
from e3nn_jax.util import prod
@pytest.mark.parametrize('connection_mode', ['uvw', 'uvu', 'uvv'])
@pytest.mark.parametrize('jitted', [False, True])
@pytest.mark.parametrize('optimize_einsums', [False, True])
@pytest.mark.parametrize('specialized_code', [False, True])
@pytest.mark.parametrize('irrep_normalization', ['component', 'norm'])
@pytest.mark.parametrize('path_normalization', ['element', 'path'])
@pytest.mark.parametrize('irrep_normalization', ['component', 'norm'])
| [
11748,
474,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
11748,
12972,
9288,
198,
6738,
304,
18,
20471,
62,
73,
897,
1330,
357,
22203,
282,
37,
2132,
13313,
276,
51,
22854,
15667,
11,
198,
220,
220,
220,
220,
220,... | 2.610039 | 259 |
import urllib2
import pandas as pd
from bs4 import BeautifulSoup, element
if __name__ == "__main__":
pathname = "cogsci_proceedings_raw.csv"
papers = get_papers()
papers.to_csv(pathname, encoding='utf-8')
| [
11748,
2956,
297,
571,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
11,
5002,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3108,
3672,
... | 2.563218 | 87 |
import django_tables2 as tables
from django_tables2.utils import Accessor
from tenancy.tables import COL_TENANT
from utilities.tables import BaseTable, ToggleColumn
from .models import Circuit, CircuitType, Provider
CIRCUITTYPE_ACTIONS = """
<a href="{% url 'circuits:circuittype_changelog' slug=record.slug %}" class="btn btn-default btn-xs" title="Changelog">
<i class="fa fa-history"></i>
</a>
{% if perms.circuit.change_circuittype %}
<a href="{% url 'circuits:circuittype_edit' slug=record.slug %}?return_url={{ request.path }}"
class="btn btn-xs btn-warning"><i class="glyphicon glyphicon-pencil" aria-hidden="true"></i></a>
{% endif %}
"""
STATUS_LABEL = """
<span class="label label-{{ record.get_status_class }}">{{ record.get_status_display }}</span>
"""
#
# Providers
#
#
# Circuit types
#
#
# Circuits
#
| [
11748,
42625,
14208,
62,
83,
2977,
17,
355,
8893,
198,
6738,
42625,
14208,
62,
83,
2977,
17,
13,
26791,
1330,
8798,
273,
198,
198,
6738,
46543,
13,
83,
2977,
1330,
20444,
62,
51,
1677,
8643,
198,
6738,
20081,
13,
83,
2977,
1330,
730... | 2.675159 | 314 |
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
if world.blockstore.raw_blocks[world.blockstore.get_offset(x,y+1,z)] != '\x14':
block = '\x14'
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
if entity[4] not in entities_childerenlist:
for client in worldblockchangesdict:
cx,cy,cz,var_timeofchange,userblock = worldblockchangesdict[client][0][:5]
if (cx,cy,cz) == (x,y+1,z) and time()- var_timeofchange < 2:
worldblockchangedellist.append(client)
if userblock in colorblocks:
i = world.entities_childerenlist_index
world.entities_childerenlist_index += 1
entities_childerenlist.append(i)
entity[4] = i
px,py,pz,ph,pp = worldblockchangesdict[client][1]
distancebetween = ((x-px)**2+(y+1-py)**2+(z-pz)**2)**0.5
h = math.radians(ph*360.0/256.0)
p = math.radians(pp*360.0/256.0)
rx,ry,rz = math.sin(h)*math.cos(p),-math.sin(p),-math.cos(h)*math.cos(p)
entitylist.append(["paintball",(rx*distancebetween+rx+px,ry*distancebetween+ry+py,rz*distancebetween+rz+pz),2,2,(rx,ry,rz),i,userblock])
else:
client.sendServerMessage("Please select a color block to use this Paintball Gun.")
| [
2,
383,
16756,
3788,
318,
11971,
739,
262,
347,
10305,
362,
12,
2601,
682,
5964,
13,
198,
2,
198,
2,
921,
815,
423,
664,
39591,
257,
4866,
286,
428,
5964,
351,
262,
3788,
13,
198,
2,
1002,
345,
750,
407,
11,
345,
460,
1064,
530,... | 2.110236 | 762 |
# -*- coding: utf-8 -*-
'''
File name: code\sets_with_a_given_least_common_multiple\sol_590.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #590 :: Sets with a given Least Common Multiple
#
# For more information see:
# https://projecteuler.net/problem=590
# Problem Statement
'''
Let H(n) denote the number of sets of positive integers such that the least common multiple of the integers in the set equals n.
E.g.:
The integers in the following ten sets all have a least common multiple of 6:
{2,3}, {1,2,3}, {6}, {1,6}, {2,6} ,{1,2,6}, {3,6}, {1,3,6}, {2,3,6} and {1,2,3,6}.
Thus H(6)=10.
Let L(n) denote the least common multiple of the numbers 1 through n.
E.g. L(6) is the least common multiple of the numbers 1,2,3,4,5,6 and L(6) equals 60.
Let HL(n) denote H(L(n)).
You are given HL(4)=H(12)=44.
Find HL(50000). Give your answer modulo 109.
'''
# Solution
# Solution Approach
'''
'''
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
9220,
1438,
25,
2438,
59,
28709,
62,
4480,
62,
64,
62,
35569,
62,
293,
459,
62,
11321,
62,
48101,
59,
34453,
62,
36993,
13,
9078,
... | 2.63369 | 374 |
from .vehicle_controller_interface import VehicleControllerI
class ControllerSimulator(VehicleControllerI):
"""
Controller implementation with
"""
| [
6738,
764,
33892,
1548,
62,
36500,
62,
39994,
1330,
21501,
22130,
40,
628,
198,
4871,
22741,
8890,
8927,
7,
37870,
1548,
22130,
40,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
22741,
7822,
351,
198,
220,
220,
220,
37227,
198
... | 3.833333 | 42 |
"""
test_ext_duration
~~~~~~~~~~~~~~~~~
Test sphinx.ext.duration extension.
:copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
@pytest.mark.sphinx('dummy', testroot='basic',
confoverrides={'extensions': ['sphinx.ext.duration']})
| [
37811,
198,
220,
220,
220,
1332,
62,
2302,
62,
32257,
198,
220,
220,
220,
220,
27156,
93,
628,
220,
220,
220,
6208,
599,
20079,
87,
13,
2302,
13,
32257,
7552,
13,
628,
220,
220,
220,
1058,
22163,
4766,
25,
15069,
4343,
12,
1238,
1... | 2.531915 | 141 |
import argparse
import logging
import gym
import universe
import numpy as np
from itertools import count
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
from reframe import resize_frame
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log_interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--reward_threshold', type=int, default=100, metavar='RT',
help='reward threshold to win a game (default: 100)')
args = parser.parse_args()
LOGGING_FORMAT = '%(asctime)s - %(name)s - %(thread)d|%(process)d - %(levelname)s - %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
logger = logging.getLogger('Car')
env = gym.make('flashgames.CoasterRacer-v0')
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
running_reward = 10
for i_episode in count(1):
state = env.reset()
for t in range(10000): # Don't infinite loop while learning
action = select_action(state)
state, reward, done, info = env.step(action)
env.render()
policy.rewards.append(reward[0])
if(state[0] != None):
finish_episode()
running_reward = running_reward * 0.99 + t * 0.01
if i_episode % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
i_episode, t, running_reward))
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
198,
11748,
11550,
198,
11748,
6881,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
340,
861,
10141,
1330,
954,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11... | 2.563739 | 706 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Translation of a single field."""
#TODO(charbull): update types and validation in the class
class FieldTranslation(object):
"""A translation for a single field of an entity.
Args:
std_field_name: string. Standard name of the field in the ontology
raw_field_name: string. Name of the field sent in the telemetry payload
units: dictionary from standard units to expected telemetry units
states: dictionary from standard states to expected telemetry states
"""
# TODO(b/176095085): handle the comment in the bug while refactoring
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
13789,
1776,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257... | 3.829932 | 294 |
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Henry 'Pi' James, Loring Holden and Matt Chisholm
app_name = "BitTorrent"
from BTL.translation import _
import time
from sys import *
from os.path import *
from sha import *
from BTL.bencode import *
from BitTorrent import version
NAME, EXT = splitext(basename(argv[0]))
print _("%s %s - decode %s metainfo files") % (NAME, version, app_name)
print
if len(argv) == 1:
print _("Usage: %s [TORRENTFILE [TORRENTFILE ... ] ]") % basename(argv[0])
print
exit(2) # common exit code for syntax error
labels = {'metafile' : _("metainfo file: %s" ),
'infohash' : _("info hash: %s" ),
'filename' : _("file name: %s" ),
'filesize' : _("file size:" ),
'files' : _("files:" ),
'title' : _("title: %s" ),
'dirname' : _("directory name: %s" ),
'creation date' : _("creation date: %s" ),
'archive' : _("archive size:" ),
'announce' : _("tracker announce url: %s"),
'announce-list' : _("tracker announce list: %s"),
'nodes' : _("trackerless nodes:" ),
'comment' : _("comment:" ),
'content_type' : _("content_type: %s" ),
'url-list' : _("url sources: %s" ),
}
maxlength = max( [len(v[:v.find(':')]) for v in labels.values()] )
# run through l10n-ed labels and make them all the same length
for k,v in labels.items():
if ':' in v:
index = v.index(':')
newlabel = v.replace(':', '.'*(maxlength-index) + ':')
labels[k] = newlabel
for metainfo_name in argv[1:]:
metainfo_file = open(metainfo_name, 'rb')
metainfo = bdecode(metainfo_file.read())
metainfo_file.close()
info = metainfo['info']
info_hash = sha(bencode(info))
if metainfo.has_key('title'):
print labels['title'] % metainfo['title']
print labels['metafile'] % basename(metainfo_name)
print labels['infohash'] % info_hash.hexdigest()
piece_length = info['piece length']
if info.has_key('length'):
# let's assume we just have a file
print labels['filename'] % info['name']
file_length = info['length']
name = labels['filesize']
if info.has_key('content_type'):
print labels['content_type'] % info['content_type']
else:
# let's assume we have a directory structure
print labels['dirname'] % info['name']
print labels['files']
file_length = 0;
for file in info['files']:
path = ''
for item in file['path']:
if (path != ''):
path = path + "/"
path = path + item
if file.has_key('content_type'):
print ' %s (%d,%s)' % (path, file['length'],
file['content_type'])
else:
print ' %s (%d)' % (path, file['length'])
file_length += file['length']
name = labels['archive']
piece_number, last_piece_length = divmod(file_length, piece_length)
print '%s %i (%i * %i + %i)' \
% (name,file_length, piece_number, piece_length, last_piece_length)
if metainfo.has_key('announce'):
print labels['announce'] % metainfo['announce']
if 'announce-list' in metainfo:
print labels['announce-list'] % metainfo['announce-list']
if metainfo.has_key('nodes'):
print labels['nodes']
for n in metainfo['nodes']:
print '\t%s\t:%d' % (n[0], n[1])
if metainfo.has_key('comment'):
print labels['comment'], metainfo['comment']
else:
print labels['comment']
if metainfo.has_key('url-list'):
print labels['url-list'] % '\n'.join(metainfo['url-list'])
if metainfo.has_key('creation date'):
fmt = "%a, %d %b %Y %H:%M:%S"
gm = time.gmtime(metainfo['creation date'])
s = time.strftime(fmt, gm)
print labels['creation date'] % s
# DANGER: modifies torrent file
if False:
metainfo_file = open(metainfo_name, 'wb')
metainfo_file.write(bencode(metainfo))
metainfo_file.close() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
383,
10154,
286,
428,
2393,
389,
2426,
284,
262,
4722,
39286,
4946,
8090,
13789,
198,
2,
10628,
352,
13,
16,
357,
1169,
13789,
737,
220,
921,
743,
407,
4866,
393,
779,
428,
... | 2.175706 | 2,231 |
# Generated by Django 2.2.1 on 2021-10-24 19:38
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
16,
319,
33448,
12,
940,
12,
1731,
678,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from __future__ import absolute_import
from celery import shared_task
from actstream import action
import core.models as cm
import time
@shared_task
@shared_task
import boto
import logging
logging.basicConfig()
from boto.elastictranscoder.exceptions import (
InternalServiceException,
LimitExceededException,
ResourceInUseException,
)
from django.conf import settings | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
18725,
1924,
1330,
4888,
62,
35943,
198,
6738,
719,
5532,
1330,
2223,
198,
11748,
4755,
13,
27530,
355,
12067,
198,
11748,
640,
198,
198,
31,
28710,
62,
35943,
198,
198,
... | 3.389381 | 113 |
import numpy as np
import pylab as pl
times=np.arange(3,6,0.01) #times为x的值,0为起点,5为终点,0,01为步长
fun=lambda x:(x-5.4)**2+24.24#np.cos(20*x)*np.exp(-pl.absolute(x)) #fun为关于x的函数,也就是对应于x的y的值
pl.plot(times,fun(times)) #画图
pl.xlabel("time period length") #x轴的标记
pl.ylabel("evacuation time") #y轴的标记
#pl.title("damped oscillation") #图的标题
pl.show() #显示图
| [
11748,
299,
32152,
355,
45941,
198,
11748,
279,
2645,
397,
355,
458,
198,
22355,
28,
37659,
13,
283,
858,
7,
18,
11,
21,
11,
15,
13,
486,
8,
220,
1303,
22355,
10310,
118,
87,
21410,
161,
222,
120,
171,
120,
234,
15,
10310,
118,
... | 1.413655 | 249 |
"""
osgameclones has the following fields:
'updated', 'video', 'repo', 'license', 'originals', 'status', 'multiplayer', 'info', 'lang', 'feed', 'content', 'images', 'url', 'name', 'framework', 'type', 'development'
mandatory fields are: 'name', 'license', 'type', 'originals'
possible values:
osgc-development: active(337), complete(32), halted(330), sporadic(129), very active(6)
osgc-multiplayer: Co-op(5), Competitive(13), Hotseat(3), LAN(17), Local(3), Matchmaking(1), Online(33), Split-screen(7)
osgc-type: clone(171), remake(684), similar(11), tool(7)
osgc-status: playable(274), semi-playable(34), unplayable(34)
osgc-license: ['AFL3', 'AGPL3', 'Apache', 'Artistic', 'As-is', 'BSD', 'BSD2', 'BSD4', 'bzip2', 'CC-BY', 'CC-BY-NC', 'CC-BY-NC-ND', 'CC-BY-NC-SA', 'CC-BY-SA', 'CC0', 'Custom', 'GPL2', 'GPL3', 'IJG', 'ISC', 'JRL', 'LGPL2', 'LGPL3', 'Libpng', 'MAME', 'MIT', 'MPL', 'MS-PL', 'Multiple', 'NGPL', 'PD', 'WTFPL', 'Zlib']
osgc-content: commercial(104), free(32), open(61), swappable(5)
Mapping osgameclones -> ours
name -> name
type -> keywords, description
originals -> keywords
repo -> code repository
url -> home
feed (-> home)
development -> state
status -> state
multiplayer -> keywords
lang -> code language
framework -> code dependencies
license -> code license / assets license
content -> keywords
info -> after fields
updated not used
images not used
video: not used
"""
# TODO also ignore our rejected entries
import ruamel.yaml as yaml
import os
import requests
from io import BytesIO
from PIL import Image
from utils import constants as c, utils as u, osg, osg_rejected
# mapping from their names to our names (means that likely the names should change on osgameclones)
osgc_name_aliases = {'4DTris': '4D-TRIS', 'fheroes2': 'Free Heroes 2',
'Duke3d_win32': 'Duke3d_w32', 'GNOME Atomix': 'Atomix', 'Head over Heels 2': 'Head over Heels',
'mewl': 'M.E.W.L.', 'LinWarrior': 'Linwarrior 3D', 'Mice Men Remix': 'Mice Men: Remix',
'OpenApoc': 'Open Apocalypse', 'open-cube': 'Open Cube', 'Heart of the Alien Redux': 'Heart of the Alien',
'opengl_test_drive_clone': 'OpenGL Test Drive Remake', 'Dune 2 - The Maker': 'Dune II - The Maker',
'Play Freeciv!': 'Freeciv-web', 'ProjectX': 'Forsaken', 'Lyon': 'Roton', 'Mafia II: Toolkit': 'Mafia: Toolkit',
'Siege of Avalon Open Source': 'Siege of Avalon : Open Source', 'ss13remake': 'SS13 Remake',
'shadowgrounds': 'Shadowgrounds', 'RxWars': 'Prescription Wars', 'REDRIVER2': 'REDriver2',
'Super Mario Bros And Level Editor in C#': 'Mario Objects', 'Unitystation': 'unitystation',
'tetris': 'Just another Tetris™ clone', 'twin-e': 'TwinEngine', 'super-methane-brothers-gx': 'Super Methane Brothers for Wii and GameCube',
'CrossUO: Ultima Online': 'CrossUO', 'OpMon': 'OPMon', '3DGE': 'EDGE', 'ironseed_fpc': 'Ironseed',
'2048-python': '2048 Python', 'Free Heroes 2 - Enhanced': 'Free Heroes 2',
'KKnD': 'OpenKrush', 'bab-be-u': 'BAB BE U', 'urde': 'Metaforce'}
# conversion between licenses syntax them and us
osgc_licenses_map = {'GPL2': 'GPL-2.0', 'GPL3': 'GPL-3.0', 'AGPL3': 'AGPL-3.0', 'LGPL3': 'LGPL-3.0',
'LGPL2': 'LGPL-2.0 or 2.1?', 'MPL': 'MPL-2.0', 'Apache': 'Apache-2.0',
'Artistic': 'Artistic License', 'Zlib': 'zlib', 'PD': 'Public domain', 'AFL3': 'AFL-3.0',
'BSD2': '2-clause BSD', 'JRL': 'Java Research License'}
# ignored osgc entries (for various reasons like unclear license etc.)
# TODO need to check them again and add to rejected list (if I find out why), also ask for appropriate licenses
osgc_ignored_entries = ["A Mouse's Vengeance", 'achtungkurve.com', 'AdaDoom3', 'Agendaroids', 'Alien 8', 'Ard-Reil',
'Balloon Fight', 'bladerunner (Engine within SCUMMVM)', 'Block Shooter', 'Bomb Mania Reloaded',
'boulder-dash', 'Cannon Fodder', 'Contra_remake', 'CosmicArk-Advanced', 'datastorm', 'Deuteros X',
'div-columns', 'div-pacman2600', 'div-pitfall', 'div-spaceinvaders2600', 'FreedroidClassic',
'FreeRails 2', 'Glest Advanced Engine', 'HeadOverHeels', 'Jumping Jack 2: Worryingly Familiar',
'Jumping Jack: Further Adventures', 'LixD', 'Meridian 59 German Server 112',
'Meridian 59 Server 105', 'OpenGeneral', 'OpenKKnD', 'Tile World 2', 'BattleCity']
def unique_field_contents(entries, field):
"""
Iterates through a list of dictionaries, adds the field content to a unique content set and then returns the set
as ordered list.
"""
unique_content = set()
for entry in entries:
if field in entry:
field_content = entry[field]
if type(field_content) is list:
unique_content.update(field_content)
else:
unique_content.add(field_content)
unique_content = sorted(list(unique_content), key=str.casefold)
return unique_content
def compare_sets(a, b, name, limit=None):
"""
Given two sets, a and b, calculates the differences of each with respect to the other and prints the differences out.
:param limit: 'notus', 'notthem'
:param a: them
:param b: us
:param name: prefix in output
:return: String that summarizes the differences.
"""
p = ''
if not isinstance(a, set):
a = set(a)
if not isinstance(b, set):
b = set(b)
d = sorted(list(a - b))
if d and limit != 'notus':
p += ' {} : us : {}\n'.format(name, ', '.join(d))
d = sorted(list(b - a))
if d and limit != 'notthem':
p += ' {} : them : {}\n'.format(name, ', '.join(d))
return p
def create_many_to_one_mapping(map):
"""
:return:
"""
result = {}
for k, v in map.items():
for key in k:
result[key] = v
return result
# conversion of osgc status to our state
osgc_status_map = create_many_to_one_mapping({(None,): '?', ('playable',): 'mature', ('semi-playable', 'unplayable'): 'beta'})
if __name__ == "__main__":
# some parameter
similarity_threshold = 0.8
maximal_newly_created_entries = 0
check_similar_names = False
download_missing_screenshots = False
# paths
root_path = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
# read our database
our_entries = osg.read_entries()
print('{} entries with us'.format(len(our_entries)))
# read our list of rejected entries and add to specifically ignored entries
our_rejected_entries = osg_rejected.read_rejected_file()
our_rejected_entries = [entry['Title'] for entry in our_rejected_entries] # only keep titles
print('{} ignored entries with us'.format(len(our_rejected_entries)))
_ = set(osgc_ignored_entries).intersection(set(our_rejected_entries))
if _:
print('Specific ignored entries {} can be unignored, because already rejected by us.'.format(_))
# print(sorted(list(set(osgc_ignored_entries) - _), key=str.casefold)) # just copy the output of this line into osgc_ignored_entries
# read screenshots
screenshots = osg.read_screenshots_overview()
# import osgameclones data
osgc_path = os.path.realpath(os.path.join(root_path, os.path.pardir, '11_osgameclones.git', 'games'))
osgc_files = os.listdir(osgc_path)
# iterate over all yaml files in osgameclones/data folder and load contents
osgc_entries = []
for file in osgc_files:
# read yaml
with open(os.path.join(osgc_path, file), 'r', encoding='utf-8') as stream:
try:
_ = yaml.safe_load(stream)
except Exception as exc:
print(file)
raise exc
# add to entries
osgc_entries.extend(_)
print('{} entries in osgameclones'.format(len(osgc_entries)))
# check: print all git repos in osgameclones with untypical structure
untypical_structure = ''
for osgc_entry in osgc_entries:
name = osgc_entry['name']
if 'repo' in osgc_entry:
osgc_repos = osgc_entry['repo']
if isinstance(osgc_repos, str):
osgc_repos = [osgc_repos]
for repo in osgc_repos:
if 'github' in repo and any((repo.endswith(x) for x in ('/', '.git'))):
untypical_structure += ' {} : {}\n'.format(osgc_entry['name'], repo)
if untypical_structure:
print('Git repos in osgc with untypical URL\n{}'.format(untypical_structure))
# which fields do they have
osgc_fields = set()
for osgc_entry in osgc_entries:
osgc_fields.update(osgc_entry.keys())
osgc_fields = sorted(list(osgc_fields))
print('Unique osgc-fields\n {}'.format(', '.join(osgc_fields)))
for field in osgc_fields:
if field in ('video', 'feed', 'url', 'repo', 'info', 'updated', 'images', 'name', 'originals'):
continue
osgc_content = [entry[field] for entry in osgc_entries if field in entry]
# flatten
flat_content = []
for content in osgc_content:
if isinstance(content, list):
flat_content.extend(content)
else:
flat_content.append(content)
statistics = u.unique_elements_and_occurrences(flat_content)
statistics.sort(key=str.casefold)
print('{}: {}'.format(field, ', '.join(statistics)))
# eliminate the ignored or rejected entries from them
# TODO for rejected entries we should actually have a test that also checks for the URLs because names could be not unique
_ = [x['name'] for x in osgc_entries if x['name'] in osgc_ignored_entries + our_rejected_entries] # those that will be ignored
_ = set(osgc_ignored_entries) - set(_) # those that shall be ignored minus those that will be ignored
if _:
print('Can un-ignore {} because not contained anymore in osgc with this name.'.format(_))
osgc_entries = [x for x in osgc_entries if x['name'] not in osgc_ignored_entries + our_rejected_entries]
# fix names and licenses (so they are not longer detected as deviations downstreams)
_ = [x['name'] for x in osgc_entries if x['name'] in osgc_name_aliases.keys()] # those that will be renamed
_ = set(osgc_name_aliases.keys()) - set(_) # those that shall be renamed minus those that will be renamed
if _:
print('Can un-rename {} because not contained anymore in osgc with this name.'.format(_))
for index, entry in enumerate(osgc_entries):
name = entry['name']
if name in osgc_name_aliases:
entry['name'] = osgc_name_aliases[name]
if 'license' in entry:
osgc_licenses = entry['license']
osgc_licenses = [osgc_licenses_map.get(x, x) for x in osgc_licenses]
entry['license'] = osgc_licenses
# fix content (add prefix content)
if 'content' in entry:
osgc_content = entry['content']
if isinstance(osgc_content, str):
osgc_content = [osgc_content]
osgc_content = ['content ' + x for x in osgc_content]
entry['content'] = osgc_content
# which fields do they have
osgc_fields = set()
for osgc_entry in osgc_entries:
osgc_fields.update(osgc_entry.keys())
print('unique osgc-fields: {}'.format(osgc_fields))
# which fields are mandatory
for osgc_entry in osgc_entries:
remove_fields = [field for field in osgc_fields if field not in osgc_entry]
osgc_fields -= set(remove_fields)
print('mandatory osfg-fields: {}'.format(osgc_fields))
# some field statistics
print('osgc-development: {}'.format(unique_field_contents(osgc_entries, 'development')))
print('osgc-multiplayer: {}'.format(unique_field_contents(osgc_entries, 'multiplayer')))
print('osgc-type: {}'.format(unique_field_contents(osgc_entries, 'type')))
print('osgc-languages: {}'.format(unique_field_contents(osgc_entries, 'lang')))
print('osgc-licenses: {}'.format(unique_field_contents(osgc_entries, 'license')))
print('osgc-status: {}'.format(unique_field_contents(osgc_entries, 'status')))
print('osgc-framework: {}'.format(unique_field_contents(osgc_entries, 'framework')))
print('osgc-content: {}'.format(unique_field_contents(osgc_entries, 'content')))
# just the names
osgc_names = set([x['name'] for x in osgc_entries])
our_names = set([x['Title'] for x in our_entries])
common_names = osgc_names & our_names
osgc_names -= common_names
our_names -= common_names
print('{} both, {} only osgameclones, {} only us'.format(len(common_names), len(osgc_names), len(our_names)))
# find similar names among the rest
if check_similar_names:
print('look for similar names (theirs - ours)')
for osgc_name in osgc_names:
for our_name in our_names:
if osg.name_similarity(osgc_name, our_name) > similarity_threshold:
print(' {} - {}'.format(osgc_name, our_name))
newly_created_entries = 0
# iterate over their entries
for osgc_entry in osgc_entries:
osgc_name = osgc_entry['name']
is_included = False
for our_entry in our_entries:
our_name = our_entry['Title']
# find those that entries in osgameclones that are also in our database and compare them
if osgc_name == our_name:
is_included = True
# a match, check the fields
name = osgc_name
# check if screenshots can be added
if download_missing_screenshots and 'images' in osgc_entry:
their_images = osgc_entry['images'][:3] # only first three images
our_file = our_entry['File'][:-3] # without trailing ".md"
our_screenshots = screenshots.get(our_file, {})
our_urls = [x[2] for x in our_screenshots.values()]
their_images = [x for x in their_images if x not in our_urls]
their_images = their_images[:3-len(our_urls)] # only fill up to 3
for image_url in their_images:
# download image
try:
r = requests.get(image_url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)'}, timeout=5, allow_redirects=True)
except Exception:
# SSLError or other
continue
image_url = r.url # just in case it got redirected
if r.status_code == requests.codes.ok:
try:
im = Image.open(BytesIO(r.content))
except Exception:
# PIL.UNidentifiedImageError
continue
if im.mode != 'RGB':
im = im.convert('RGB')
width = im.width
height = im.height
target_height = 128
target_width = int(width / height * target_height)
im_resized = im.resize((target_width, target_height), resample=Image.LANCZOS)
idx = len(our_screenshots) + 1
if any([image_url.startswith(x) for x in ('https://camo.githubusercontent', 'https://web.archive.org', ' https://user-content.gitlab', 'https://user-images.githubusercontent')]) or width <= 320:
image_url = '!' + image_url
our_screenshots[idx] = [target_width, target_height, image_url]
outfile = os.path.join(c.screenshots_path, '{}_{:02d}.jpg'.format(our_file, idx));
im_resized.save(outfile)
if our_screenshots:
screenshots[our_file] = our_screenshots
osg.write_screenshots_overview(screenshots)
p = '' # contains a summary of all differences, if empty, no differences are found
# TODO key names have changed on our side
# compare their lang with our code language
if 'lang' in osgc_entry:
osgc_languages = osgc_entry['lang']
if type(osgc_languages) == str:
osgc_languages = [osgc_languages]
our_languages = our_entry['Code language']
p += compare_sets(osgc_languages, our_languages, 'code language')
# compare their license with our code and assets license
if 'license' in osgc_entry:
osgc_licenses = osgc_entry['license']
our_code_licenses = our_entry['Code license'] # essential field
our_assets_licenses = our_entry.get('Assets license', [])
p += compare_sets(osgc_licenses, our_code_licenses + our_assets_licenses, 'licenses', 'notthem')
p += compare_sets(osgc_licenses, our_code_licenses, 'licenses', 'notus')
# compare their framework with our code dependencies (capitalization is ignored for now, only starts are compared)
our_framework_replacements = {'allegro4': 'allegro'}
if 'framework' in osgc_entry:
osgc_frameworks = osgc_entry['framework']
if type(osgc_frameworks) == str:
osgc_frameworks = [osgc_frameworks]
our_frameworks = our_entry.get('Code dependency', [])
our_frameworks = [x.casefold() for x in our_frameworks]
our_frameworks = [x if x not in our_framework_replacements else our_framework_replacements[x] for x
in our_frameworks]
osgc_frameworks = [x.casefold() for x in osgc_frameworks]
p += compare_sets(osgc_frameworks, our_frameworks, 'framework/dependencies')
# compare their repo with our code repository and download
if 'repo' in osgc_entry:
osgc_repos = osgc_entry['repo']
if type(osgc_repos) == str:
osgc_repos = [osgc_repos]
osgc_repos = [u.strip_url(url) for url in osgc_repos]
osgc_repos = [x for x in osgc_repos if not x.startswith(
'sourceforge.net/projects/')] # we don't need the general sites there
# osgc_repos = [x for x in osgc_repos if not x.startswith('https://sourceforge.net/projects/')] # ignore some
our_repos = our_entry.get('Code repository', [])
our_repos = [u.strip_url(url) for url in our_repos]
our_repos = [x for x in our_repos if not x.startswith(
'gitlab.com/osgames/')] # we do not yet spread our own deeds (but we will some day)
our_repos = [x for x in our_repos if
'cvs.sourceforge.net' not in x and 'svn.code.sf.net/p/' not in x] # no cvs or svn anymore
our_downloads = our_entry.get('Download', [])
our_downloads = [u.strip_url(url) for url in our_downloads]
p += compare_sets(osgc_repos, our_repos + our_downloads, 'repo',
'notthem') # if their repos are not in our downloads or repos
p += compare_sets(osgc_repos, our_repos[:1], 'repo',
'notus') # if our main repo is not in their repo
# compare their url (and feed) to our home (and strip urls)
if 'url' in osgc_entry:
osgc_urls = osgc_entry['url']
if type(osgc_urls) == str:
osgc_urls = [osgc_urls]
osgc_urls = [u.strip_url(url) for url in osgc_urls]
our_urls = our_entry['Home']
our_urls = [u.strip_url(url) for url in our_urls]
p += compare_sets(osgc_urls, our_urls, 'url/home', 'notthem') # if their urls are not in our urls
# our_urls = [url for url in our_urls if
# not url.startswith('github.com/')] # they don't have them as url
p += compare_sets(osgc_urls, our_urls[:1], 'url/home',
'notus') # if our first url is not in their urls
# compare their status with our state (playable can be beta/mature with us, but not playable must be beta)
if 'status' in osgc_entry:
osgc_status = osgc_entry['status']
our_status = our_entry['State'] # essential field
if osgc_status != 'playable' and 'mature' in our_status:
p += ' status : mismatch : them {}, us mature\n'.format(osgc_status)
# compare their development with our state
if 'development' in osgc_entry:
osgc_development = osgc_entry['development']
our_inactive = 'inactive' in our_entry
our_status = our_entry['State'] # essential field
if osgc_development == 'halted' and not our_inactive:
p += ' development : mismatch : them halted - us not inactive\n'
if osgc_development in ['very active', 'active'] and our_inactive:
p += ' development : mismatch : them {}, us inactive\n'.format(osgc_development)
if osgc_development == 'complete' and 'mature' not in our_status:
p += ' development : mismatch : them complete, us not mature\n'
# get our keywords
our_keywords = our_entry['Keyword'] # essential
# compare their originals to our inspirations
our_originals = our_entry.get('Inspiration', [])
if 'originals' in osgc_entry:
osgc_originals = osgc_entry['originals']
osgc_originals = [x.replace(',', '') for x in
osgc_originals] # we cannot have ',' or parts in parentheses in original names
p += compare_sets(osgc_originals, our_originals, 'originals')
# compare their multiplayer with our keywords (multiplayer) (only lowercase comparison)
if 'multiplayer' in osgc_entry:
osgc_multiplayer = osgc_entry['multiplayer']
if type(osgc_multiplayer) == str:
osgc_multiplayer = [osgc_multiplayer]
osgc_multiplayer = [x.casefold() for x in osgc_multiplayer]
osgc_multiplayer = [x for x in osgc_multiplayer if x not in ['competitive']] # ignored
our_multiplayer = [x for x in our_keywords if x.startswith('multiplayer ')]
if our_multiplayer:
if len(our_multiplayer) != 1:
print(our_entry)
raise RuntimeError()
assert len(our_multiplayer) == 1
our_multiplayer = our_multiplayer[0][11:].split('+')
our_multiplayer = [x.strip().casefold() for x in our_multiplayer]
p += compare_sets(osgc_multiplayer, our_multiplayer, 'multiplayer')
# compare content with keywords
if 'content' in osgc_entry:
osgc_content = osgc_entry['content']
if isinstance(osgc_content, str):
osgc_content = [osgc_content]
p += compare_sets(osgc_content, our_keywords, 'content/keywords',
'notthem') # only to us because we have more then them
# compare their type to our keywords
if 'type' in osgc_entry:
game_type = osgc_entry['type']
if isinstance(game_type, str):
game_type = [game_type]
p += compare_sets(game_type, our_keywords, 'type/keywords',
'notthem') # only to us because we have more then them
if p:
print('{}\n{}'.format(name, p))
if not is_included:
# a new entry, that we have never seen, maybe we should make an entry of our own
# TODO we could use the write capabilities to write the entry in our own format, the hardcoded format here might be brittle, on the other hand we can also write slightly wrong stuff here without problems
if newly_created_entries >= maximal_newly_created_entries:
continue
game_type = osgc_entry.get('type', None)
osgc_status = [osgc_status_map[osgc_entry.get('status', None)]]
# determine file name
print('create new entry for {}'.format(osgc_name))
file_name = osg.canonical_name(osgc_name) + '.md'
target_file = os.path.join(c.entries_path, file_name)
if os.path.isfile(target_file):
print('warning: file {} already existing, save under slightly different name'.format(file_name))
target_file = os.path.join(c.entries_path, file_name[:-3] + '-duplicate.md')
if os.path.isfile(target_file):
continue # just for safety reasons
# add Title and File
entry = {'Title': osgc_name, 'File': file_name}
# add home
if 'url' in osgc_entry:
home = osgc_entry['url']
if type(home) == str:
home = [home]
entry['Home'] = home
# add inspiration
if 'originals' in osgc_entry:
osgc_originals = osgc_entry['originals']
if type(osgc_originals) == str:
osgc_originals = [osgc_originals]
entry['Inspiration'] = osgc_originals
# add state
if osgc_entry.get('development', None) == 'halted':
osgc_status.append('inactive since XX')
entry['State'] = osgc_status
# language tags
lang = osgc_entry.get('lang', [])
if type(lang) == str:
lang = [lang]
# code language (mandatory on our side)
entry['Code language'] = lang
# platform 'Web' if language == JavaScript or TypeScript
if len(lang) == 1 and lang[0] in ('JavaScript', 'TypeScript'):
entry['Platform'] = ['Web']
# keywords
keywords = []
if game_type:
keywords.append(game_type)
if 'multiplayer' in osgc_entry:
osgc_multiplayer = osgc_entry['multiplayer']
if type(osgc_multiplayer) == str:
osgc_multiplayer = [osgc_multiplayer]
keywords.append('multiplayer {}'.format(' + '.join(osgc_multiplayer)))
if 'content' in osgc_entry:
osgc_content = osgc_entry['content'] # it's a list
keywords.extend(osgc_content)
if keywords:
entry['Keyword'] = keywords
# code repository (mandatory on our side)
repo = osgc_entry.get('repo', [])
if repo and repo.startswith('https://git') and not repo.endswith('.git'):
# we have them with .git on github/gitlab
repo += '.git'
if type(repo) == str:
repo = [repo]
entry['Code repository'] = repo
# code license
entry['Code license'] = osgc_entry['license']
# code dependencies (if existing)
if 'framework' in osgc_entry:
osgc_frameworks = osgc_entry['framework']
if type(osgc_frameworks) == str:
osgc_frameworks = [osgc_frameworks]
entry['Code dependency'] = osgc_frameworks
# write info (if existing)
if 'info' in osgc_entry:
entry['Note'] = osgc_entry['info']
# add empty building
entry['Building'] = {}
# finally write to file
print(entry)
osg.write_entry(entry)
newly_created_entries += 1
# save updated screenshots if they could have chenged
if download_missing_screenshots:
osg.write_screenshots_overview(screenshots)
# now iterate over our entries and test if we can add anything to them
print('entries that could be added to them:')
for our_entry in our_entries:
our_name = our_entry['Title']
# only if contains Inspiration and not "tool", "framework" or "library"
our_keywords = our_entry['Keyword']
if not 'Inspiration' in our_entry:
continue
if any([x in ['tool', 'library', 'framework', 'game engine'] for x in our_keywords]):
continue
is_included = False
for osgc_entry in osgc_entries:
osgc_name = osgc_entry['name']
if osgc_name == our_name:
is_included = True
if not is_included:
# that could be added to them
print('- [{}]({})'.format(our_name, 'https://github.com/Trilarion/opensourcegames/blob/master/entries/' + our_entry['File']))
| [
171,
119,
123,
37811,
198,
198,
418,
6057,
565,
1952,
468,
262,
1708,
7032,
25,
198,
6,
43162,
3256,
705,
15588,
3256,
705,
260,
7501,
3256,
705,
43085,
3256,
705,
11612,
6897,
3256,
705,
13376,
3256,
705,
41684,
7829,
3256,
705,
1095... | 2.124575 | 14,128 |
from connection import com
h = com()
print h.open()
print h.sensor.compressor_motor_temperature()
print h.sensor.compressor_supply_temperature() | [
6738,
4637,
1330,
220,
401,
628,
198,
71,
796,
401,
3419,
198,
198,
4798,
289,
13,
9654,
3419,
628,
198,
4798,
289,
13,
82,
22854,
13,
5589,
44292,
62,
76,
20965,
62,
11498,
21069,
3419,
628,
198,
4798,
289,
13,
82,
22854,
13,
558... | 2.923077 | 52 |
import operator
import warnings
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, Tuple
from django.http.response import JsonResponse
from django.shortcuts import render
from .exceptions import RequestValidationError
from .extras import merge_openapi_info
from .schema import schema_parameter, schema_request_body, schema_response
from .utils import get_all_urls, is_class_view
| [
11748,
10088,
198,
11748,
14601,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
309,
29291,
198,
198,
6738,
42625,
14208,
13,
4023,
13,
26209,
1330,
449,
1559... | 3.675439 | 114 |
"""getters functions"""
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from improved_permissions.exceptions import NotAllowed
from improved_permissions.models import UserRole
from improved_permissions.utils import check_my_model, get_roleclass
def get_user(role_class=None, obj=None):
"""
Get the User instance attached to the object.
Only one UserRole must exists and this relation
must be unique=True.
Returns None if there is no user attached
to the object.
"""
query = UserRole.objects.select_related('user').all()
role = None
if role_class:
# All users who have "role_class" attached to any object.
role = get_roleclass(role_class)
query = query.filter(role_class=role.get_class_name())
if obj:
# All users who have any role attached to the object.
ct_obj = ContentType.objects.get_for_model(obj)
query = query.filter(content_type=ct_obj.id, object_id=obj.id)
# Check if object belongs
# to the role class.
check_my_model(role, obj)
# Looking for a role class using unique=True
selected = list()
for ur_obj in query:
role = get_roleclass(ur_obj.role_class)
if role.unique is True:
selected.append(ur_obj.user)
users_set = set(selected)
if len(users_set) > 1:
raise NotAllowed(
'Multiple unique roles was found using '
'the function get_user. Use get_users '
'instead.'
)
if len(users_set) == 1:
return selected[0]
return None
def get_users(role_class=None, obj=None):
"""
If "role_class" and "obj" is provided,
returns a QuerySet of users who has
this role class attached to the
object.
If only "role_class" is provided, returns
a QuerySet of users who has this role
class attached to any object.
If neither "role_class" or "obj" are provided,
returns all users of the project.
"""
role = None
kwargs = {}
if role_class:
# All users who have "role_class" attached to any object.
role = get_roleclass(role_class)
kwargs['roles__role_class'] = role.get_class_name()
if obj:
# All users who have any role attached to the object.
ct_obj = ContentType.objects.get_for_model(obj)
kwargs['roles__content_type'] = ct_obj.id
kwargs['roles__object_id'] = obj.id
# Check if object belongs
# to the role class.
check_my_model(role, obj)
# Return as a distinct QuerySet.
return get_user_model().objects.filter(**kwargs).distinct()
def get_objects(user, role_class=None, model=None):
"""
Return the list of objects attached
to a given user.
If "role_class" is provided, only the objects
which as registered in that role class will
be returned.
If "model" is provided, only the objects
of that model will be returned.
"""
query = UserRole.objects.filter(user=user)
role = None
if role_class:
# Filtering by role class.
role = get_roleclass(role_class)
query = query.filter(role_class=role.get_class_name())
if model:
# Filtering by model.
ct_obj = ContentType.objects.get_for_model(model)
query = query.filter(content_type=ct_obj.id)
# Check if object belongs
# to the role class.
check_my_model(role, model)
# TODO
result = set(ur_obj.obj for ur_obj in query)
# TODO
return list(result)
def get_role(user, obj=None):
"""
Proxy method to be used when you sure that
will have only one role class attached.
"""
return get_roles(user, obj)[0]
def get_roles(user, obj=None):
"""
Return a list of role classes
that is attached to "user".
If "obj" is provided, the object
must be attached as well.
"""
query = UserRole.objects.filter(user=user)
if obj:
ct_obj = ContentType.objects.get_for_model(obj)
query = query.filter(content_type=ct_obj.id, object_id=obj.id)
# Transform the string representations
# into role classes and return as list.
return [get_roleclass(ur_obj.role_class) for ur_obj in query]
| [
37811,
1136,
1010,
5499,
37811,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
198,
6738,
6596,
62,
525,
... | 2.601471 | 1,631 |
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
scale = 1
transx = transy = 1
rota = 1
if __name__ == "__main__":
glutInit()
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(800, 600)
glutCreateWindow('Window')
glClearColor(0.2,0.2,0.2,0)
glutKeyboardFunc(keyboard)
glutDisplayFunc(draw1)
glutMainLoop()
| [
6738,
30672,
13,
8763,
1330,
1635,
198,
6738,
30672,
13,
8763,
52,
1330,
1635,
198,
6738,
30672,
13,
8763,
3843,
1330,
1635,
198,
9888,
796,
352,
198,
7645,
87,
796,
1007,
88,
796,
352,
198,
305,
8326,
796,
352,
198,
361,
11593,
367... | 2.401274 | 157 |
from django.test import TestCase
from django.contrib.auth import get_user_model
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
628
] | 3.375 | 24 |
#------------
# Web request
import requests
#------------
# Html parser
from bs4 import BeautifulSoup
import lxml
#-----------------
# Standard library
import json
import re
"""--------+---------+---------+---------+---------+---------+---------+---------+---------|
| M A I N C L A S S |
|----------+---------+---------+---------+---------+---------+---------+---------+-------"""
class Pybiblia():
"""--------+---------+---------+---------+---------+---------+---------+---------+---------|
| C O N S T R U C T O R |
|----------+---------+---------+---------+---------+---------+---------+---------+-------"""
"""--------+---------+---------+---------+---------+---------+---------+---------+---------|
| C L A S S R E Q U E S T S |
|----------+---------+---------+---------+---------+---------+---------+---------+-------"""
def content(self, bible, passage):
""" Returns the content of a bible """
assert(bible)
assert(passage)
if bible == 'rsvce':
return self.rsvce(passage)
else:
strOutput = '.txt'
strPassage = '&passage=' + passage
res = requests.get(self.strUrlBase + '/content/' + bible + strOutput + self.strUrlToken + strPassage)
if not res:
return self.json_error()
return res.text
def rsvce(self, passage):
""" Returns the content of the Revised Standard Edition Catholic Edition (RSVCE) """
assert(passage)
strUrlBase = 'https://biblia.com/bible/rsvce'
objRes = requests.get(strUrlBase + '/' + passage)
objSoup = BeautifulSoup(objRes.content, 'lxml')
#----------------------------
# 1/3 Scrape all the div tags
refs = []
for div in objSoup.find_all('div', attrs={'class': 'resourcetext'}):
refs.append(div)
#----------------------------
# Uncomment to debug raw html
#print(len(refs))
#print(str(div) + '\r')
#print(str(div.text))
#-----------------------------------------
# 2/3 From the div tag use Regex find text
objRegex = re.sub(r"(^|\W)\d+","",refs[0].text)
#-----------------------------
# 3/3 Split string to get text
strRegex = objRegex.lstrip()
return strRegex
def search(self, bible, query):
""" Searches the text of a bible """
strOutput = '.txt'
strQuery = '&query=' + query
res = requests.get(self.strUrlBase + '/search/' + bible + strOutput + self.strUrlToken + strQuery)
if not res:
return self.json_error()
return res.json()
def toc(self, bible):
""" Returns the table of contents of a bible """
assert(bible)
res = requests.get(self.strUrlBase + '/contents/' + bible + self.strUrlToken)
if not res:
return self.json_error()
return res.json()
def votd(self, bible):
""" Returns a carefully chosen verse each day """
assert(bible)
strUrlBase = 'https://biblia.com/api/plugins/verseoftheday'
objRes = requests.get(strUrlBase + '/' + bible)
objSoup = BeautifulSoup(objRes.content, 'lxml')
#-------------------------------
# 1/3 Scrape all the anchor tags
refs = []
for a in objSoup.find_all('a', href=True):
refs.append(a)
#----------------------------
# Uncomment to debug raw html
#print(len(refs))
#print(str(a) + '\r')
#print(str(a.text))
#-----------------------------------------------
# 2/3 From the anchor tag use Regex find passage
objRegex = re.search('ref.ly/[A-Z,a-z,1-9]+[1-9]+.[1-9,-]+;', str(refs[2]))
if objRegex:
strRegex = objRegex.group(0)
#--------------------------------
# 3/3 Split string to get passage
passage = strRegex.split('/')[1]
strRet = passage.split(';')[0]
return strRet
"""--------+---------+---------+---------+---------+---------+---------+---------+---------|
| C L A S S M E T H O D S |
|----------+---------+---------+---------+---------+---------+---------+---------+-------"""
"""--------+---------+---------+---------+---------+---------+---------+---------+---------|
| C L A S S M E T A D A T A |
|----------+---------+---------+---------+---------+---------+---------+---------+-------"""
def _init_meta(self):
"""
| _strMETACLASS, _strMETAVERSION, _strMETAFILE used to save() and load() members
"""
self._strMETACLASS = str(self.__class__).split('.')[1][:-2]
self._strMETAVERSION = "0.2.0"
"""
| Filename "_Class_Version_"
"""
self._strMETAFILE = "_" + self._strMETACLASS + "_" + self._strMETAVERSION + "_"
| [
2,
10541,
198,
2,
5313,
2581,
198,
11748,
7007,
198,
2,
10541,
198,
2,
367,
20369,
30751,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
300,
19875,
198,
2,
1783,
12,
198,
2,
8997,
5888,
198,
11748,
33918,
198,
11748,
... | 2.461266 | 1,975 |
"Unit tests for starlark API of ts_project with custom transpiler"
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
load("@rules_nodejs//nodejs:providers.bzl", "DeclarationInfo", "JSModuleInfo")
transitive_declarations_test = unittest.make(_impl0, attrs = {
"lib": attr.label(default = "transpile_with_swc"),
"expected_declarations": attr.string_list(default = ["big.d.ts"]),
})
transpile_with_failing_typecheck_test = unittest.make(_impl1, attrs = {
"lib": attr.label(default = "transpile_with_typeerror"),
"expected_js": attr.string_list(default = ["typeerror.js", "typeerror.js.map"]),
})
transpile_with_dts_test = unittest.make(_impl2, attrs = {
"lib": attr.label(default = "transpile_with_dts"),
"expected_js": attr.string_list(default = ["index.js", "index.js.map"]),
})
| [
1,
26453,
5254,
329,
3491,
75,
668,
7824,
286,
40379,
62,
16302,
351,
2183,
1007,
79,
5329,
1,
198,
198,
2220,
7203,
31,
65,
41319,
62,
15688,
8019,
1003,
8019,
25,
403,
715,
395,
13,
65,
48274,
1600,
366,
30493,
82,
1600,
366,
40... | 2.535604 | 323 |
import threading
import numpy as np
from PIL import ImageFont, ImageDraw, Image
class ThreadsafeIter:
"""
Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def threadsafe_generator(f):
"""
A decorator that takes a generator function and makes it thread-safe.
"""
return g
def redraw_frame(image, names, aligned):
"""
Adds names and bounding boxes to the frame
"""
i = 0
unicode_font = ImageFont.truetype("DejaVuSansMono.ttf", size=17)
img_pil = Image.fromarray(image)
draw = ImageDraw.Draw(img_pil)
for face in aligned:
draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 255, 0), width=2)
if names is not None and len(names) > i:
if names[i] == 'unknown':
draw.text((face[0], face[1] - 30), "unknown", fill=(0, 0, 255), font=unicode_font)
draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 0, 255), width=2)
else:
draw.text((face[0], face[1] - 30), names[i], fill=(0, 255, 0), font=unicode_font)
if names is None or len(names) <= i:
draw.text((face[0], face[1] - 30), 'refreshing...', fill=(255, 0, 0), font=unicode_font)
i += 1
return np.array(img_pil)
| [
11748,
4704,
278,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
23252,
11,
7412,
25302,
11,
7412,
628,
198,
4871,
14122,
21230,
29993,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
33687,
281,
41313,
... | 2.313029 | 591 |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging as log
from typing import List
from mo.moc_frontend.extractor import fe_user_data_repack
from mo.middle.passes.infer import validate_batch_in_shape
from openvino.runtime import Dimension, PartialShape # pylint: disable=no-name-in-module,import-error
from openvino.frontend import FrontEnd, Place # pylint: disable=no-name-in-module,import-error
from openvino.runtime.utils.types import get_element_type # pylint: disable=no-name-in-module,import-error
def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
"""
Load input model and convert it to nGraph function
:param: argv: parsed command line arguments
:param: moc_front_end: Loaded Frontend for converting input model
:return: converted nGraph function ready for serialization
"""
input_model = moc_front_end.load(argv.input_model)
user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
input_model, argv.placeholder_shapes, argv.placeholder_data_types,
argv.output, argv.freeze_placeholder_with_value)
def check_places_are_same(places_original: List[Place], places_new: List[Place]):
"""
Check if set of new places is same as original or not.
:param places_original: List[Place] Original model places
:param places_new: List[Place] New list of places
:return: True if new list of places is same as original
"""
return len(places_original) == len(places_new) and len(
[item for item in places_original if any(
[item.is_equal(item2['node']) for item2 in places_new])]) == len(places_original)
inputs_equal = True
if user_shapes:
inputs_equal = check_places_are_same(input_model.get_inputs(), user_shapes)
outputs_equal = True
if outputs:
outputs_equal = check_places_are_same(input_model.get_outputs(), outputs)
log.debug('Inputs are same: {}, outputs are same: {}'.format(
inputs_equal, outputs_equal))
if not inputs_equal and not outputs_equal:
# Use ExtractSubgraph
new_input_places = [x['node'] for x in user_shapes]
new_output_places = [x['node'] for x in outputs]
log.debug('Using extract subgraph')
input_model.extract_subgraph(new_input_places, new_output_places)
elif not inputs_equal:
new_input_places = [x['node'] for x in user_shapes]
log.debug('Using override_all_inputs')
input_model.override_all_inputs(new_input_places)
elif not outputs_equal:
new_output_places = [x['node'] for x in outputs]
log.debug('Using override_all_outputs')
input_model.override_all_outputs(new_output_places)
if user_shapes:
for user_shape in user_shapes:
if user_shape.get('shape') is not None:
input_model.set_partial_shape(
user_shape['node'], PartialShape(user_shape['shape']))
if user_shape.get('data_type') is not None:
data_type = get_element_type(user_shape['data_type'])
log.debug('Set data type: {}'.format(data_type))
input_model.set_element_type(user_shape['node'], data_type)
# Set batch size
if argv.batch is not None and argv.batch > 0:
log.debug('Setting batch size to {}'.format(argv.batch))
for place in input_model.get_inputs():
old_partial_shape = input_model.get_partial_shape(place)
old_shape_array = shape_to_array(old_partial_shape) if old_partial_shape.rank.is_static else []
joined_name = ' '.join(place.get_names())
validate_batch_in_shape(old_shape_array, joined_name)
# Assume batch size is always 1-st dimension in shape
# Keep other dimensions unchanged
new_shape = [old_partial_shape.get_dimension(i)
for i in range(old_partial_shape.rank.get_length())]
new_shape[0] = Dimension(argv.batch)
new_partial_shape = PartialShape(new_shape)
log.debug('Input: {}, Old shape: {}, New shape: {}'.format(
joined_name, old_shape_array, new_shape))
input_model.set_partial_shape(place, new_partial_shape)
ngraph_function = moc_front_end.convert(input_model)
return ngraph_function
| [
2,
15069,
357,
34,
8,
2864,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
1822,
29572,
198,
11748,
18931,
355,
2604,
198,
6738,
19720,
1330,
7343,
198,
198,
... | 2.455253 | 1,799 |
import re
# RegEx search strings for certain software vendors
oauth_sn = re.compile(r'\b(\d{3}-\d{8}\b)')
oauth_key = re.compile(r'\b([a-zA-Z]|\d)\d([a-zA-Z]|\d)[a-zA-Z]\d\b')
abalo_sn = re.compile(r'(\b(\d{4}-){5}\d{4}\b)')
serial_number_restrictions = {'OAuthDex': oauth_sn, 'Abalobadiah': abalo_sn}
product_key_restrictions = {'OAuthDex': oauth_key}
| [
11748,
302,
198,
198,
2,
220,
3310,
3109,
2989,
13042,
329,
1728,
3788,
17192,
198,
12162,
1071,
62,
16184,
796,
302,
13,
5589,
576,
7,
81,
6,
59,
65,
38016,
67,
90,
18,
92,
12,
59,
67,
90,
23,
32239,
65,
8,
11537,
198,
12162,
... | 1.977901 | 181 |
from setuptools import find_packages, setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# pylint: disable=line-too-long
setup(
name="rl_algorithms",
version="0.0.1",
author="medipixel",
author_email="kh.kim@medipixel.io",
description="RL algorithms which are being used for research activities",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/FurkanArslan/rl_algorithms.git",
keywords="reinforcement-learning python machine learning",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires=">=3.6",
zip_safe=False,
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,... | 2.754386 | 342 |