hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39a94b915b1b441195a914ac2fb7a60efde9894a
| 45
|
py
|
Python
|
gluoncv/utils/nn/__init__.py
|
xcgoner/gluon-exp
|
432a1aafc1466720b6169bb41caabb2a217b0797
|
[
"Apache-2.0"
] | 1
|
2018-08-21T16:49:00.000Z
|
2018-08-21T16:49:00.000Z
|
gluoncv/utils/nn/__init__.py
|
xcgoner/gluon-exp
|
432a1aafc1466720b6169bb41caabb2a217b0797
|
[
"Apache-2.0"
] | null | null | null |
gluoncv/utils/nn/__init__.py
|
xcgoner/gluon-exp
|
432a1aafc1466720b6169bb41caabb2a217b0797
|
[
"Apache-2.0"
] | null | null | null |
"""GluonCV utility neural network layers."""
| 22.5
| 44
| 0.733333
| 5
| 45
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.825
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f2d532653968ce43a17b647c8a02ff9583ee5bf0
| 32
|
py
|
Python
|
trac_cms/__init__.py
|
FelixSchwarz/trac_cms
|
2d474f25606868e5480fe435fb7f991890af9bb9
|
[
"MIT"
] | null | null | null |
trac_cms/__init__.py
|
FelixSchwarz/trac_cms
|
2d474f25606868e5480fe435fb7f991890af9bb9
|
[
"MIT"
] | null | null | null |
trac_cms/__init__.py
|
FelixSchwarz/trac_cms
|
2d474f25606868e5480fe435fb7f991890af9bb9
|
[
"MIT"
] | null | null | null |
from trac_cms.web_ui import *
| 8
| 29
| 0.75
| 6
| 32
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 32
| 3
| 30
| 10.666667
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
840a2fbed0617d4795ba74d865e9609526b3eed0
| 469
|
py
|
Python
|
C2_Observer_Pattern/v1/subject/Subject.py
|
sarada92/Design_Pattern
|
f817206a6f995bc6f534d7cabb3a290955f37d33
|
[
"MIT"
] | 1
|
2022-02-06T15:42:09.000Z
|
2022-02-06T15:42:09.000Z
|
C2_Observer_Pattern/v1/subject/Subject.py
|
sarada92/Design_Pattern
|
f817206a6f995bc6f534d7cabb3a290955f37d33
|
[
"MIT"
] | null | null | null |
C2_Observer_Pattern/v1/subject/Subject.py
|
sarada92/Design_Pattern
|
f817206a6f995bc6f534d7cabb3a290955f37d33
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod, ABCMeta
from C2_Observer_Pattern.v1.observers.Observers import Observers
class Subject(metaclass=ABCMeta):
@abstractmethod
def add_observer(self, observer: Observers):
pass
@abstractmethod
def remove_observer(self, observer: Observers):
pass
@abstractmethod
def notify_update(self):
pass
@abstractmethod
def set_weather(self, temperature, humidity, pressure):
pass
| 19.541667
| 64
| 0.707889
| 49
| 469
| 6.653061
| 0.510204
| 0.208589
| 0.193252
| 0.177914
| 0.306748
| 0.306748
| 0.306748
| 0
| 0
| 0
| 0
| 0.00551
| 0.226013
| 469
| 23
| 65
| 20.391304
| 0.892562
| 0
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0.266667
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
844ed2de2d0d42e5534fca7b7f001fbb19914fbb
| 21,715
|
py
|
Python
|
rest/api/swagger.py
|
estuaryoss/estuary-agent
|
b8b4264a616be21c86458da75cf29d13a8fb263d
|
[
"Apache-2.0"
] | null | null | null |
rest/api/swagger.py
|
estuaryoss/estuary-agent
|
b8b4264a616be21c86458da75cf29d13a8fb263d
|
[
"Apache-2.0"
] | null | null | null |
rest/api/swagger.py
|
estuaryoss/estuary-agent
|
b8b4264a616be21c86458da75cf29d13a8fb263d
|
[
"Apache-2.0"
] | null | null | null |
swagger_file_content = '''
swagger: '2.0'
info:
description: Estuary agent will run your shell commands via REST API
version: 4.4.0
title: estuary-agent
contact:
name: Catalin Dinuta
url: 'https://github.com/dinuta'
email: constantin.dinuta@gmail.com
license:
name: Apache 2.0
url: 'http://www.apache.org/licenses/LICENSE-2.0.html'
host: 'localhost:8080'
basePath: /
tags:
- name: estuary-agent
description: root
paths:
/about:
get:
tags:
- estuary-agent
summary: Information about the application
operationId: aboutGet
produces:
- application/json
parameters:
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Prints the name and version of the application.
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
/command:
post:
tags:
- estuary-agent
summary: Starts multiple commands in blocking mode sequentially. Set the client timeout at needed value.
operationId: commandPost_1
consumes:
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
parameters:
- in: body
name: commands
description: Commands to run. E.g. ls -lrt
required: true
schema:
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Commands start success
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Commands start failure
schema:
$ref: '#/definitions/ApiResponse'
/commanddetached:
get:
tags:
- estuary-agent
summary: Gets information about the last command started in detached mode
operationId: commandDetachedGet
produces:
- application/json
parameters:
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Get command detached info success
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Get command detached info failure
schema:
$ref: '#/definitions/ApiResponse'
delete:
tags:
- estuary-agent
summary: Stops all commands that were previously started in detached mode
operationId: commandDetachedDelete
produces:
- application/json
parameters:
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: command detached stop success
schema:
$ref: '#/definitions/ApiResponse'
'204':
description: No Content
'401':
description: Unauthorized
'403':
description: Forbidden
'500':
description: command detached stop failure
schema:
$ref: '#/definitions/ApiResponse'
'/commanddetached/{id}':
get:
tags:
- estuary-agent
summary: Gets information about the command identified by id started in detached mode
operationId: commandDetachedIdGet
produces:
- application/json
parameters:
- name: id
in: path
description: Command detached id set by the user
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Get command detached info success
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Get command detached info failure
schema:
$ref: '#/definitions/ApiResponse'
post:
tags:
- estuary-agent
summary: Starts the shell commands in detached mode and sequentially
operationId: commandDetachedIdPost
consumes:
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
parameters:
- in: body
name: commandContent
description: List of commands to run one after the other. E.g. make/mvn/sh/npm
required: true
schema:
type: string
- name: id
in: path
description: Command detached id set by the user
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Commands start success
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Commands start failure
schema:
$ref: '#/definitions/ApiResponse'
delete:
tags:
- estuary-agent
summary: Deletes the associated processes of the shell commands in detached mode
operationId: commandDetachedIdDelete
produces:
- application/json
parameters:
- name: id
in: path
description: Command detached id set by the user
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Command delete success
schema:
$ref: '#/definitions/ApiResponse'
'204':
description: No Content
'401':
description: Unauthorized
'403':
description: Forbidden
'500':
description: Command delete failure
schema:
$ref: '#/definitions/ApiResponse'
'/commanddetachedyaml/{id}':
post:
tags:
- estuary-agent
summary: Starts the commands in detached mode and sequentially. The commands are described by yaml.
operationId: commandDetachedIdPostYaml
consumes:
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
parameters:
- in: body
name: commandContent
description: List of commands to run one after the other in yaml format.
required: true
schema:
type: string
- name: id
in: path
description: Command detached id set by the user
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Commands start success
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Commands start failure
schema:
$ref: '#/definitions/ApiResponse'
/commandparallel:
post:
tags:
- estuary-agent
summary: Starts multiple commands in blocking mode parallel. Set the client timeout at needed value.
operationId: commandPost_2
consumes:
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
parameters:
- in: body
name: commands
description: Commands to run. E.g. ls -lrt
required: true
schema:
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: commands start success
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: commands start failure
schema:
$ref: '#/definitions/ApiResponse'
/commandyaml:
post:
tags:
- estuary-agent
summary: Starts multiple commands in blocking mode sequentially. The commands are described in yaml format. Set the client timeout at needed value.
operationId: commandPost
consumes:
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
parameters:
- in: body
name: commands
description: Commands to run in yaml format
required: true
schema:
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Commands start success
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Commands start failure
schema:
$ref: '#/definitions/ApiResponse'
/env:
get:
tags:
- estuary-agent
summary: Print all environment variables
operationId: envGet
produces:
- application/json
parameters:
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: List of the entire environment variables
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
post:
tags:
- estuary-agent
summary: Set environment variables
operationId: envPost
consumes:
- application/json
produces:
- application/json
parameters:
- in: body
name: envVars
description: List of env vars by key-value pair in JSON format
required: true
schema:
type: string
- name: Token
in: header
description: Authentication Token
required: false
type: string
responses:
'200':
description: Set environment variables success
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Set environment variables failure
schema:
$ref: '#/definitions/ApiResponse'
delete:
tags:
- estuary-agent
summary: Deletes the custom defined env vars contained in the virtual environment
operationId: envDelete
produces:
- application/json
parameters:
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: 'Deletes the entire virtual env vars, but keeping system env vars.'
schema:
$ref: '#/definitions/ApiResponse'
'204':
description: No Content
'401':
description: Unauthorized
'403':
description: Forbidden
'/env/{env_name}':
get:
tags:
- estuary-agent
summary: Gets the environment variable value from the environment
operationId: envEnvNameGet
produces:
- application/json
parameters:
- name: env_name
in: path
description: The name of the env var to get value from
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Get env var success
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: Get env var failure
schema:
$ref: '#/definitions/ApiResponse'
/file:
get:
tags:
- estuary-agent
summary: Gets the content of the file
operationId: fileGet
consumes:
- application/octet-stream
- text/plain
produces:
- application/json
- application/zip
parameters:
- name: File-Path
in: header
description: Target file path to get
required: false
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: 'The content of the file in plain text, success'
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: 'Failure, the file content could not be read'
schema:
$ref: '#/definitions/ApiResponse'
post:
tags:
- estuary-agent
summary: Uploads a file no mater the format. Binary or raw
operationId: filePost
consumes:
- application/octet-stream
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
- text/plain
parameters:
- in: body
name: content
description: The content of the file
required: false
schema:
type: string
format: byte
- name: File-Path
in: header
description: File-Path
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: The content of the file was uploaded successfully
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: 'Failure, the file content could not be uploaded'
schema:
$ref: '#/definitions/ApiResponse'
put:
tags:
- estuary-agent
summary: Uploads a file no mater the format. Binary or raw
operationId: filePut
consumes:
- application/octet-stream
- application/json
- application/x-www-form-urlencoded
- text/plain
produces:
- application/json
- text/plain
parameters:
- in: body
name: content
description: The content of the file
required: false
schema:
type: string
format: byte
- name: File-Path
in: header
description: File-Path
required: true
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: The content of the file was uploaded successfully
schema:
$ref: '#/definitions/ApiResponse'
'201':
description: Created
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: 'Failure, the file content could not be uploaded'
schema:
$ref: '#/definitions/ApiResponse'
/folder:
get:
tags:
- estuary-agent
summary: Gets the folder as zip archive. Useful to get test results folder
operationId: folderGet
produces:
- application/json
- application/zip
parameters:
- name: Folder-Path
in: header
description: Target folder path to get as zip
required: false
type: string
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: The content of the folder as zip archive
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
'500':
description: The content of the folder could not be obtained
schema:
$ref: '#/definitions/ApiResponse'
/ping:
get:
tags:
- estuary-agent
summary: Ping endpoint which replies with pong
operationId: pingGet
produces:
- application/json
parameters:
- name: Token
in: header
description: Token
required: false
type: string
responses:
'200':
description: Ping endpoint which replies with pong. Useful when checking the alive status of the service
schema:
$ref: '#/definitions/ApiResponse'
'401':
description: Unauthorized
'403':
description: Forbidden
'404':
description: Not Found
definitions:
ApiResponse:
type: object
properties:
code:
type: integer
format: int32
description:
type: object
message:
type: string
name:
type: string
path:
type: string
timestamp:
type: string
version:
type: string
title: ApiResponse
ApiResponseCommandDescription:
type: object
properties:
code:
type: integer
format: int32
description:
$ref: '#/definitions/CommandDescription'
message:
type: string
name:
type: string
path:
type: string
timestamp:
type: string
version:
type: string
title: ApiResponseCommandDescription
CommandDescription:
type: object
properties:
commands:
type: object
additionalProperties:
$ref: '#/definitions/CommandStatus'
duration:
type: number
format: float
finished:
type: boolean
finishedat:
type: string
id:
type: string
pid:
type: integer
format: int64
processes:
type: array
items:
$ref: '#/definitions/ProcessInfo'
started:
type: boolean
startedat:
type: string
title: CommandDescription
CommandDetails:
type: object
properties:
args:
type: array
items:
type: string
code:
type: integer
format: int64
err:
type: string
out:
type: string
pid:
type: integer
format: int64
title: CommandDetails
CommandStatus:
type: object
properties:
details:
$ref: '#/definitions/CommandDetails'
duration:
type: number
format: float
finishedat:
type: string
example: 'yyyy-MM-dd HH:mm:ss.SSSSSS'
startedat:
type: string
example: 'yyyy-MM-dd HH:mm:ss.SSSSSS'
status:
type: string
title: CommandStatus
ProcessHandle:
type: object
properties:
alive:
type: boolean
title: ProcessHandle
ProcessInfo:
type: object
properties:
arguments:
type: array
items:
type: string
children:
type: array
items:
$ref: '#/definitions/ProcessHandle'
name:
type: string
parent:
type: integer
format: int64
pid:
type: integer
format: int64
status:
type: string
username:
type: string
title: ProcessInfo
'''
| 26.131167
| 153
| 0.539765
| 1,900
| 21,715
| 6.165789
| 0.138421
| 0.050363
| 0.058045
| 0.08997
| 0.794537
| 0.740162
| 0.714981
| 0.684336
| 0.679983
| 0.641229
| 0
| 0.024615
| 0.390099
| 21,715
| 830
| 154
| 26.162651
| 0.859937
| 0
| 0
| 0.840964
| 0
| 0.003614
| 0.998618
| 0.074557
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ffe928f3ff720d9fc9b1bd5abb1bc15c2a592a46
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/html5lib/treebuilders/etree_lxml.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/html5lib/treebuilders/etree_lxml.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/html5lib/treebuilders/etree_lxml.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/39/ac/c3/1d918efeae059d5b380e1b38873ce7d89c0600eb3eadf57c2c0954196e
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395833
| 0
| 96
| 1
| 96
| 96
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
081296e88e1f3bbc279835f0c7403eaf102f2af4
| 2,383
|
py
|
Python
|
tests/day_5_test.py
|
rickdkk/advent-of-code-2021
|
49eed2d191d7b4a12f10ec8af9dc57408ba1b1fe
|
[
"MIT"
] | null | null | null |
tests/day_5_test.py
|
rickdkk/advent-of-code-2021
|
49eed2d191d7b4a12f10ec8af9dc57408ba1b1fe
|
[
"MIT"
] | null | null | null |
tests/day_5_test.py
|
rickdkk/advent-of-code-2021
|
49eed2d191d7b4a12f10ec8af9dc57408ba1b1fe
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from advent.day_5 import Line, Point, make_grid, read_lines, fill_grid, count_points
TEST_GRID = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
EXAMPLE_POINTS = [
[(0, 9), (5, 9)],
[(8, 0), (0, 8)],
[(9, 4), (3, 4)],
[(2, 2), (2, 1)],
[(7, 0), (7, 4)],
[(6, 4), (2, 0)],
[(0, 9), (2, 9)],
[(3, 4), (1, 4)],
[(0, 0), (8, 8)],
[(5, 5), (8, 2)],
]
EXAMPLE = [
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 2, 1, 1, 1, 2, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 1, 1, 1, 0, 0, 0, 0],
]
DIAG_EXAMPLE = [
[1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 0, 0, 2, 0, 0],
[0, 0, 2, 0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 2, 0, 2, 0, 0],
[0, 1, 1, 2, 3, 1, 3, 2, 1, 1],
[0, 0, 0, 1, 0, 2, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[2, 2, 2, 1, 1, 1, 0, 0, 0, 0],
]
def test_read_points():
example = []
for p1, p2 in EXAMPLE_POINTS:
example.append(Line(Point(*p1), Point(*p2)))
assert example == read_lines(Path("./data/day_5_example.txt"))
def test_points():
line1 = Line(Point(1, 1), Point(1, 3)) # from example
assert line1.interpolate() == [Point(1, 1), Point(1, 2), Point(1, 3)]
line2 = Line(Point(9, 7), Point(7, 7)) # from example
assert line2.interpolate() == [Point(9, 7), Point(8, 7), Point(7, 7)]
def test_make_grid():
assert make_grid(10, 10) == TEST_GRID
def test_fill_grid():
lines = read_lines(Path("./data/day_5_example.txt"))
assert fill_grid(TEST_GRID, lines) == EXAMPLE
def test_count_points():
assert count_points(EXAMPLE) == 5
def test_fill_grid_diagonal():
lines = read_lines(Path("./data/day_5_example.txt"))
assert fill_grid(TEST_GRID, lines, True) == DIAG_EXAMPLE
| 26.775281
| 84
| 0.445237
| 504
| 2,383
| 2.029762
| 0.085317
| 0.414467
| 0.542522
| 0.637341
| 0.466276
| 0.434018
| 0.416422
| 0.385142
| 0.353861
| 0.352884
| 0
| 0.223606
| 0.292488
| 2,383
| 88
| 85
| 27.079545
| 0.383155
| 0.010491
| 0
| 0.314286
| 0
| 0
| 0.030573
| 0.030573
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.085714
| false
| 0
| 0.028571
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4bb19ccab861ee723283989d3a343b7583974161
| 47
|
py
|
Python
|
neuron/__init__.py
|
enry12/self_organizing_map
|
d2e6e1bbce626c21bbe734f20cfb461c6deca1fb
|
[
"MIT"
] | 23
|
2018-05-30T16:44:19.000Z
|
2021-10-30T07:26:59.000Z
|
neuron/__init__.py
|
enricivi/growing_hierarchical_som
|
d2e6e1bbce626c21bbe734f20cfb461c6deca1fb
|
[
"MIT"
] | 1
|
2021-11-23T18:30:58.000Z
|
2021-11-23T18:30:58.000Z
|
neuron/__init__.py
|
enry12/self_organizing_map
|
d2e6e1bbce626c21bbe734f20cfb461c6deca1fb
|
[
"MIT"
] | 9
|
2019-07-08T11:23:27.000Z
|
2021-09-18T06:27:24.000Z
|
from neuron.neuron_builder import NeuronBuilder
| 47
| 47
| 0.914894
| 6
| 47
| 7
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
29b3d8b22994b272ee60dba6f6150f47067799a4
| 167
|
py
|
Python
|
state/LifeState.py
|
ChrisUnsworth/Life
|
4c013102472773536c03a809f03d650bac0cee80
|
[
"MIT"
] | null | null | null |
state/LifeState.py
|
ChrisUnsworth/Life
|
4c013102472773536c03a809f03d650bac0cee80
|
[
"MIT"
] | null | null | null |
state/LifeState.py
|
ChrisUnsworth/Life
|
4c013102472773536c03a809f03d650bac0cee80
|
[
"MIT"
] | null | null | null |
class LifeState:
def size(self) -> int:
pass
def value(self, x: int, y: int) -> int:
pass
def evolve(self) -> 'LifeState':
pass
| 15.181818
| 43
| 0.508982
| 21
| 167
| 4.047619
| 0.52381
| 0.164706
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.359281
| 167
| 10
| 44
| 16.7
| 0.794393
| 0
| 0
| 0.428571
| 0
| 0
| 0.053892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.428571
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
29db3cd6576b0258f712894fffc6ea915ed35c65
| 45
|
py
|
Python
|
src/haddock/modules/topology/__init__.py
|
sverhoeven/haddock3
|
d863106f21ebc128f18c6d73a0d15b97824d050c
|
[
"Apache-2.0"
] | 1
|
2021-12-07T16:17:22.000Z
|
2021-12-07T16:17:22.000Z
|
src/haddock/modules/topology/__init__.py
|
sverhoeven/haddock3
|
d863106f21ebc128f18c6d73a0d15b97824d050c
|
[
"Apache-2.0"
] | 1
|
2021-07-24T15:34:58.000Z
|
2021-07-24T15:34:58.000Z
|
src/haddock/modules/topology/__init__.py
|
sverhoeven/haddock3
|
d863106f21ebc128f18c6d73a0d15b97824d050c
|
[
"Apache-2.0"
] | null | null | null |
"""HADDOCK3 modules to create topologies."""
| 22.5
| 44
| 0.733333
| 5
| 45
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.111111
| 45
| 1
| 45
| 45
| 0.8
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4b19eb8c6a697b4a702bf0b04330519cababc688
| 133
|
py
|
Python
|
exercises/01_Primeiros Passos/exe_14.py
|
MariaTrindade/CursoPython
|
2c60dd670747db08011d9dd33e3bbfd5795b06e8
|
[
"Apache-2.0"
] | 1
|
2021-05-11T18:30:17.000Z
|
2021-05-11T18:30:17.000Z
|
exercises/01_Primeiros Passos/exe_14.py
|
MariaTrindade/CursoPython
|
2c60dd670747db08011d9dd33e3bbfd5795b06e8
|
[
"Apache-2.0"
] | null | null | null |
exercises/01_Primeiros Passos/exe_14.py
|
MariaTrindade/CursoPython
|
2c60dd670747db08011d9dd33e3bbfd5795b06e8
|
[
"Apache-2.0"
] | null | null | null |
"""
Resolva essa expressão:
100−413 ⋅ (20−5×4)
5
"""
resultado = (100 / 5 - 413 / 5) * ((20 - 5 * 4) / 5)
print(resultado)
| 14.777778
| 52
| 0.503759
| 24
| 133
| 2.958333
| 0.541667
| 0.056338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.278195
| 133
| 9
| 53
| 14.777778
| 0.447917
| 0.398496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
d99ce487aa917cadbbc14676561928d8b0379dcb
| 24
|
py
|
Python
|
spikeinterface/version.py
|
marcbue/spikeinterface
|
d3462eeabcb9f0b9816004dd47355e40f4de1ac5
|
[
"MIT"
] | null | null | null |
spikeinterface/version.py
|
marcbue/spikeinterface
|
d3462eeabcb9f0b9816004dd47355e40f4de1ac5
|
[
"MIT"
] | null | null | null |
spikeinterface/version.py
|
marcbue/spikeinterface
|
d3462eeabcb9f0b9816004dd47355e40f4de1ac5
|
[
"MIT"
] | null | null | null |
version = '0.90.1.dev0'
| 12
| 23
| 0.625
| 5
| 24
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 0.125
| 24
| 1
| 24
| 24
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d9ab9820dd89804d38ac3ccf7f661226746bdcc9
| 575
|
py
|
Python
|
chat_service/chat_api_service/chat_api_youtube/config/settings.py
|
YoungchanChang/ES_BERT_CHAT
|
5dd919d3ba559ca9171ca73bd9e1052734f3060e
|
[
"Apache-2.0"
] | 1
|
2022-02-13T03:09:23.000Z
|
2022-02-13T03:09:23.000Z
|
chat_service/chat_api_service/chat_api_youtube/config/settings.py
|
YoungchanChang/ES_BERT_CHAT
|
5dd919d3ba559ca9171ca73bd9e1052734f3060e
|
[
"Apache-2.0"
] | null | null | null |
chat_service/chat_api_service/chat_api_youtube/config/settings.py
|
YoungchanChang/ES_BERT_CHAT
|
5dd919d3ba559ca9171ca73bd9e1052734f3060e
|
[
"Apache-2.0"
] | 1
|
2022-02-13T03:09:23.000Z
|
2022-02-13T03:09:23.000Z
|
from chat_core.settings import *
from chat_core.chat_log_config import config_basic
config_basic['handlers']['access']['filename'] = str(chat_log_path.joinpath('chat_api_youtube', 'access', 'access.log'))
config_basic['handlers']['warning']['filename'] = str(chat_log_path.joinpath('chat_api_youtube', 'warning', 'warning.log'))
config_basic['handlers']['error']['filename'] = str(chat_log_path.joinpath('chat_api_youtube', 'error', 'error.log'))
config_basic['handlers']['critical']['filename'] = str(chat_log_path.joinpath('chat_api_youtube', 'critical', 'critical.log'))
| 71.875
| 126
| 0.756522
| 78
| 575
| 5.25641
| 0.24359
| 0.085366
| 0.185366
| 0.17561
| 0.429268
| 0.429268
| 0.429268
| 0.429268
| 0.429268
| 0
| 0
| 0
| 0.050435
| 575
| 7
| 127
| 82.142857
| 0.750916
| 0
| 0
| 0
| 0
| 0
| 0.386087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d9b6ad7cf86e6a04fb915cbb9d142835aeae9556
| 1,721
|
py
|
Python
|
stacks_and_queues/sort_stack/sort_stack_test.py
|
hanjasn/ctci
|
69c8c65d71e7f6e88b669dc402e64a0cf6223fbf
|
[
"MIT"
] | null | null | null |
stacks_and_queues/sort_stack/sort_stack_test.py
|
hanjasn/ctci
|
69c8c65d71e7f6e88b669dc402e64a0cf6223fbf
|
[
"MIT"
] | null | null | null |
stacks_and_queues/sort_stack/sort_stack_test.py
|
hanjasn/ctci
|
69c8c65d71e7f6e88b669dc402e64a0cf6223fbf
|
[
"MIT"
] | null | null | null |
import unittest
from sort_stack import *
class StackTest(unittest.TestCase):
def setUp(self) -> None:
self.stack = Stack()
def test_1(self) -> None:
self.stack.push(5)
self.stack.push(4)
self.stack.push(3)
self.stack.push(2)
self.stack.push(1)
self.assertEqual("1, 2, 3, 4, 5", self.stack.print_values())
self.stack.sort()
self.assertEqual("1, 2, 3, 4, 5", self.stack.print_values())
def test_2(self) -> None:
self.stack.push(1)
self.stack.push(2)
self.stack.push(3)
self.stack.push(4)
self.stack.push(5)
self.assertEqual("5, 4, 3, 2, 1", self.stack.print_values())
self.stack.sort()
self.assertEqual("1, 2, 3, 4, 5", self.stack.print_values())
def test_3(self) -> None:
self.stack.push(9)
self.stack.push(10)
self.stack.push(6)
self.stack.push(2)
self.stack.push(3)
self.stack.push(5)
self.stack.push(4)
self.stack.push(7)
self.stack.push(8)
self.stack.push(1)
self.assertEqual("1, 8, 7, 4, 5, 3, 2, 6, 10, 9", self.stack.print_values())
self.stack.sort()
self.assertEqual("1, 2, 3, 4, 5, 6, 7, 8, 9, 10", self.stack.print_values())
def test_4(self) -> None:
self.assertEqual("", self.stack.print_values())
self.stack.sort()
self.assertEqual("", self.stack.print_values())
def test_5(self) -> None:
self.stack.push(1)
self.assertEqual("1", self.stack.print_values())
self.stack.sort()
self.assertEqual("1", self.stack.print_values())
def test_5(self) -> None:
self.stack.push(1)
self.stack.push(2)
self.assertEqual("2, 1", self.stack.print_values())
self.stack.sort()
self.assertEqual("1, 2", self.stack.print_values())
| 29.169492
| 80
| 0.625799
| 275
| 1,721
| 3.847273
| 0.098182
| 0.357278
| 0.282609
| 0.226843
| 0.793951
| 0.761815
| 0.722117
| 0.642722
| 0.642722
| 0.597354
| 0
| 0.055994
| 0.190587
| 1,721
| 59
| 81
| 29.169492
| 0.703518
| 0
| 0
| 0.634615
| 0
| 0
| 0.069686
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.134615
| false
| 0
| 0.038462
| 0
| 0.192308
| 0.230769
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d9d43ae5afed6ed4993e79ddcaf1c96076b1f7a7
| 165
|
py
|
Python
|
package/__init__.py
|
radixzz/cdnjs-transform
|
fd4f4632bd3381be73eed768a96a9c0740b4759b
|
[
"MIT"
] | null | null | null |
package/__init__.py
|
radixzz/cdnjs-transform
|
fd4f4632bd3381be73eed768a96a9c0740b4759b
|
[
"MIT"
] | null | null | null |
package/__init__.py
|
radixzz/cdnjs-transform
|
fd4f4632bd3381be73eed768a96a9c0740b4759b
|
[
"MIT"
] | null | null | null |
import sys
import os
from pathlib import Path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '.')))
from .transformer import Transformer
| 27.5
| 81
| 0.775758
| 26
| 165
| 4.769231
| 0.5
| 0.145161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006667
| 0.090909
| 165
| 6
| 82
| 27.5
| 0.82
| 0
| 0
| 0
| 0
| 0
| 0.006024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8a163ebfb156943de32bc1c2e2b955d3ec70c756
| 350
|
py
|
Python
|
py/tests/problems/arr/max_subarray_sum1_test.py
|
bmoretz/Daily-Coding-Problem
|
f79e062e9f6e7b18b7e95c071fbe71ad104affcb
|
[
"MIT"
] | 1
|
2020-06-26T13:28:43.000Z
|
2020-06-26T13:28:43.000Z
|
py/tests/problems/arr/max_subarray_sum1_test.py
|
bmoretz/Daily-Coding-Problem
|
f79e062e9f6e7b18b7e95c071fbe71ad104affcb
|
[
"MIT"
] | 7
|
2021-11-18T19:46:08.000Z
|
2022-03-12T01:03:01.000Z
|
py/tests/problems/arr/max_subarray_sum1_test.py
|
bmoretz/Daily-Coding-Problem
|
f79e062e9f6e7b18b7e95c071fbe71ad104affcb
|
[
"MIT"
] | null | null | null |
import unittest
from dcp.problems.arr.sum_contigous_subarray import max_subarray_sum1
class Test_MaxSubarraySum(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
assert max_subarray_sum1( [34, -50, 42, 14, -5, 86]) == 137
def test_case2(self):
assert max_subarray_sum1( [-5, -1, 8, -9]) == 0
| 25
| 69
| 0.662857
| 50
| 350
| 4.42
| 0.66
| 0.149321
| 0.20362
| 0.190045
| 0.226244
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0.22
| 350
| 14
| 70
| 25
| 0.721612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.333333
| false
| 0.111111
| 0.222222
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
8a3c0588456065c0447a26217b55fbfccbddc850
| 919
|
py
|
Python
|
compile_TECS_legacy.py
|
JetStarBlues/Nand-2-Tetris
|
c27b5c2ac659f1edb63d36d89bf87e226bc5672c
|
[
"MIT"
] | null | null | null |
compile_TECS_legacy.py
|
JetStarBlues/Nand-2-Tetris
|
c27b5c2ac659f1edb63d36d89bf87e226bc5672c
|
[
"MIT"
] | null | null | null |
compile_TECS_legacy.py
|
JetStarBlues/Nand-2-Tetris
|
c27b5c2ac659f1edb63d36d89bf87e226bc5672c
|
[
"MIT"
] | null | null | null |
'''------------------------------ Imports ------------------------------'''
import Assembler.asm2bin_TECS_legacy
'''------------------------------- Main -------------------------------'''
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test1_add'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test2_flip'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test3_add'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test4_gt0'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test5_array'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test5a_array'
inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test6_mult'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test7_fill'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test8_rect_buffer'
# inputDirPath = '../N2T_Code/Programs/Assembly/Tests/test8_rect_cmd'
Assembler.asm2bin_TECS_legacy.genBINFile( inputDirPath )
| 45.95
| 75
| 0.670294
| 95
| 919
| 6.210526
| 0.336842
| 0.254237
| 0.322034
| 0.457627
| 0.735593
| 0.735593
| 0.464407
| 0.166102
| 0
| 0
| 0
| 0.02662
| 0.059848
| 919
| 19
| 76
| 48.368421
| 0.65625
| 0.714908
| 0
| 0
| 0
| 0
| 0.264368
| 0.264368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8a8f1d380cde1ebef456a77f3b0039ac60a1020e
| 24
|
py
|
Python
|
__main__.py
|
cesardddp/discord-bot
|
1bc6ae0622360a95562c982d995d58da0296b7fd
|
[
"BSD-3-Clause"
] | 3
|
2020-10-30T17:50:13.000Z
|
2021-08-22T01:38:34.000Z
|
__main__.py
|
cesardddp/discord-bot
|
1bc6ae0622360a95562c982d995d58da0296b7fd
|
[
"BSD-3-Clause"
] | 2
|
2020-10-27T21:49:30.000Z
|
2020-10-30T17:50:53.000Z
|
__main__.py
|
cesardddp/discord-bot
|
1bc6ae0622360a95562c982d995d58da0296b7fd
|
[
"BSD-3-Clause"
] | null | null | null |
import app
import tests
| 8
| 12
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 2
| 13
| 12
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8a9b21775f01fb6c572390d18f2e2ed168d2f365
| 29,488
|
py
|
Python
|
tests/unit/test_dataset_geo.py
|
nsylv/tamr-client
|
14bfbbadeebf82fc168dbd9fa6184aef0235a816
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_dataset_geo.py
|
nsylv/tamr-client
|
14bfbbadeebf82fc168dbd9fa6184aef0235a816
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_dataset_geo.py
|
nsylv/tamr-client
|
14bfbbadeebf82fc168dbd9fa6184aef0235a816
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from functools import partial
import json
from unittest import TestCase
import pytest
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
from tamr_unify_client.dataset.resource import Dataset
class TestDatasetGeo(TestCase):
def setUp(self):
auth = UsernamePasswordAuth("username", "password")
self.tamr = Client(auth)
def test_record_to_feature(self):
empty_record = {"id": "1"}
def key_value_single(rec):
return rec["id"]
actual = Dataset._record_to_feature(
empty_record, key_value_single, ["id"], "geom"
)
expected = {"type": "Feature", "id": "1"}
self.assertEqual(expected, actual)
record_with_point = {"id": "1", "geom": {"point": [1, 1]}}
actual = Dataset._record_to_feature(
record_with_point, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Point", "coordinates": [1, 1]},
}
self.assertEqual(expected, actual)
record_with_multi_point = {"id": "1", "geom": {"multiPoint": [[1, 1]]}}
actual = Dataset._record_to_feature(
record_with_multi_point, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "MultiPoint", "coordinates": [[1, 1]]},
}
self.assertEqual(expected, actual)
record_with_line = {"id": "1", "geom": {"lineString": [[1, 1], [2, 2]]}}
actual = Dataset._record_to_feature(
record_with_line, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "LineString", "coordinates": [[1, 1], [2, 2]]},
}
self.assertEqual(expected, actual)
record_with_multi_line = {
"id": "1",
"geom": {"multiLineString": [[[1, 1], [2, 2]]]},
}
actual = Dataset._record_to_feature(
record_with_multi_line, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "MultiLineString", "coordinates": [[[1, 1], [2, 2]]]},
}
self.assertEqual(expected, actual)
record_with_polygon = {
"id": "1",
"geom": {"polygon": [[[1, 1], [2, 2], [3, 3]]]},
}
actual = Dataset._record_to_feature(
record_with_polygon, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Polygon", "coordinates": [[[1, 1], [2, 2], [3, 3]]]},
}
self.assertEqual(expected, actual)
record_with_multi_polygon = {
"id": "1",
"geom": {"multiPolygon": [[[[1, 1], [2, 2], [3, 3]]]]},
}
actual = Dataset._record_to_feature(
record_with_multi_polygon, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiPolygon",
"coordinates": [[[[1, 1], [2, 2], [3, 3]]]],
},
}
self.assertEqual(expected, actual)
record_with_full_geo = {
"id": "1",
"geom": {
"point": None,
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": [[[[1, 1], [2, 2], [3, 3]]]],
},
}
actual = Dataset._record_to_feature(
record_with_full_geo, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiPolygon",
"coordinates": [[[[1, 1], [2, 2], [3, 3]]]],
},
}
self.assertEqual(expected, actual)
record_with_null_geo = {
"id": "1",
"geom": {
"point": None,
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": None,
},
}
actual = Dataset._record_to_feature(
record_with_null_geo, key_value_single, ["id"], "geom"
)
expected = {"geometry": None, "type": "Feature", "id": "1"}
self.assertEqual(expected, actual)
record_with_bbox = {"id": "1", "bbox": [[0, 0], [1, 1]]}
actual = Dataset._record_to_feature(
record_with_bbox, key_value_single, ["id"], "geom"
)
expected = {"type": "Feature", "id": "1", "bbox": [[0, 0], [1, 1]]}
self.assertEqual(expected, actual)
record_with_props = {"id": "1", "p1": "v1", "p2": "v2"}
actual = Dataset._record_to_feature(
record_with_props, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"properties": {"p1": "v1", "p2": "v2"},
}
self.assertEqual(expected, actual)
def key_value_composite(rec):
return [rec[v] for v in ["id1", "id2"]]
record_with_composite_key = {"id1": "1", "id2": "2"}
actual = Dataset._record_to_feature(
record_with_composite_key, key_value_composite, ["id1", "id2"], "geom"
)
expected = {"type": "Feature", "id": ["1", "2"]}
self.assertEqual(expected, actual)
record_with_everything = {
"id1": "1",
"id2": "2",
"bbox": [[0, 0], [1, 1]],
"name": "record with everything",
"geom": {
"point": None,
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]],
"multiPolygon": None,
},
"alternate_geom": {
"point": [1, 1],
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": None,
},
}
actual = Dataset._record_to_feature(
record_with_everything, key_value_composite, ["id1", "id2"], "geom"
)
expected = {
"type": "Feature",
"id": ["1", "2"],
"bbox": [[0, 0], [1, 1]],
"properties": {
"name": "record with everything",
"alternate_geom": {
"point": [1, 1],
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": None,
},
},
"geometry": {
"type": "Polygon",
"coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]],
},
}
self.assertEqual(expected, actual)
record_without_geo = {"id": "1", "prop1": "val1"}
actual = Dataset._record_to_feature(
record_without_geo, key_value_single, ["id"], None
)
expected = {"type": "Feature", "id": "1", "properties": {"prop1": "val1"}}
self.assertEqual(expected, actual)
@responses.activate
def test_geo_features(self):
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=self._attributes_json)
records_url = f"{dataset_url}/records"
responses.add(
responses.GET,
records_url,
body="\n".join([json.dumps(rec) for rec in self._records_json]),
)
dataset = self.tamr.datasets.by_resource_id("1")
features = [feature for feature in dataset.itergeofeatures()]
self.assertEqual(6, len(features))
self.assertSetEqual(
{
"point",
"multiPoint",
"lineString",
"multiLineString",
"polygon",
"multiPolygon",
},
{feature["id"] for feature in features},
)
@responses.activate
def test_geo_features_geo_attr(self):
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
# Create a dataset with multiple geometry attributes
multi_geo_attrs = deepcopy(self._attributes_json)
geo2_attr = deepcopy(multi_geo_attrs[-1])
geo2_attr["name"] = "geom2"
multi_geo_attrs.append(geo2_attr)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=multi_geo_attrs)
# Create a record with multiple geometry attributes
record = {"id": "point", "geom": {"point": [1, 1]}, "geom2": {"point": [2, 2]}}
records_url = f"{dataset_url}/records"
responses.add(responses.GET, records_url, body=json.dumps(record))
dataset = self.tamr.datasets.by_resource_id("1")
# Default is to get the first attribute with geometry type
feature = next(dataset.itergeofeatures())
self.assertEqual(feature["geometry"]["coordinates"], record["geom"]["point"])
# We can override which geometry attribute is used for geometry
feature = next(dataset.itergeofeatures(geo_attr="geom2"))
self.assertEqual(feature["geometry"]["coordinates"], record["geom2"]["point"])
@responses.activate
def test_geo_interface(self):
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=self._attributes_json)
records_url = f"{dataset_url}/records"
responses.add(
responses.GET,
records_url,
body="\n".join([json.dumps(rec) for rec in self._records_json]),
)
dataset = self.tamr.datasets.by_resource_id("1")
fc = dataset.__geo_interface__
self.assertEqual("FeatureCollection", fc["type"])
self.assertSetEqual(
{
"point",
"multiPoint",
"lineString",
"multiLineString",
"polygon",
"multiPolygon",
},
{feature["id"] for feature in fc["features"]},
)
def test_feature_to_record(self):
feature = {"type": "Feature", "id": "1"}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1"}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "MultiPoint", "coordinates": [[0, 0], [1, 1]]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"multiPoint": [[0, 0], [1, 1]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "LineString", "coordinates": [[0, 0], [1, 1]]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"lineString": [[0, 0], [1, 1]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiLineString",
"coordinates": [[[0, 0], [1, 1], [2, 2]]],
},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"multiLineString": [[[0, 0], [1, 1], [2, 2]]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Polygon", "coordinates": [[[0, 0], [1, 1], [2, 2]]]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"polygon": [[[0, 0], [1, 1], [2, 2]]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiPolygon",
"coordinates": [[[[0, 0], [1, 1], [2, 2]]]],
},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"multiPolygon": [[[[0, 0], [1, 1], [2, 2]]]]}}
self.assertEqual(expected, actual)
feature = {"type": "Feature", "id": "1", "geometry": None}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1"}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}, "bbox": [0, 0, 1, 1]}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": None,
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
"properties": {"prop1": "val1", "prop2": "val2"},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {
"pk": "1",
"geo": {"point": [0, 0]},
"bbox": [0, 0, 1, 1],
"prop1": "val1",
"prop2": "val2",
}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
"properties": None,
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}, "bbox": [0, 0, 1, 1]}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
# Properties with names that conflict with
# the props in the key or geometry
# get ignored
"properties": {"pk": "val1", "geo": "val2", "bbox": "val3"},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}, "bbox": [0, 0, 1, 1]}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": ["1", "2"],
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk1", "pk2"], "geo")
expected = {"pk1": "1", "pk2": "2", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
feature = {"type": "Feature", "id": "1", "geometry": None}
Dataset._feature_to_record(feature, ["pk"], "geo")
# feature_to_record is required to not raise an exception
feature = {
"type": "Feature",
"id": None,
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
with pytest.raises(ValueError):
Dataset._feature_to_record(feature, ["pk"], "geo")
feature = {
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
with pytest.raises(ValueError):
Dataset._feature_to_record(feature, ["pk"], "geo")
class NotAFeature:
@property
def __geo_interface__(self):
return {
"type": "Feature",
"id": "1",
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
naf = NotAFeature()
actual = Dataset._feature_to_record(naf, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
@responses.activate
def test_from_geo_features(self):
def update_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, "{}"
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=self._attributes_json)
records_url = f"{dataset_url}:updateRecords"
snoop = {}
responses.add_callback(
responses.POST, records_url, callback=partial(update_callback, snoop=snoop)
)
dataset = self.tamr.datasets.by_resource_id("1")
features = [
{"id": "1", "geometry": {"type": "Point", "coordinates": [0, 0]}},
{"id": "2", "geometry": {"type": "Point", "coordinates": [1, 1]}},
]
dataset.from_geo_features(features)
updates = [
{
"action": "CREATE",
"recordId": "1",
"record": {"geom": {"point": [0, 0]}, "id": "1"},
},
{
"action": "CREATE",
"recordId": "2",
"record": {"geom": {"point": [1, 1]}, "id": "2"},
},
]
expected = updates
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
class NotAFeatureCollection:
@property
def __geo_interface__(self):
return {"type": "FeatureCollection", "features": features}
snoop["payload"] = None
nafc = NotAFeatureCollection()
dataset.from_geo_features(nafc)
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
@responses.activate
def test_from_geo_features_geo_attr(self):
def update_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, "{}"
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
# Create a dataset with multiple geometry attributes
multi_geo_attrs = deepcopy(self._attributes_json)
geo2_attr = deepcopy(multi_geo_attrs[-1])
geo2_attr["name"] = "geom2"
multi_geo_attrs.append(geo2_attr)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=multi_geo_attrs)
records_url = f"{dataset_url}:updateRecords"
snoop = {}
responses.add_callback(
responses.POST, records_url, callback=partial(update_callback, snoop=snoop)
)
dataset = self.tamr.datasets.by_resource_id("1")
features = [{"id": "1", "geometry": {"type": "Point", "coordinates": [0, 0]}}]
# by default, the first attribute with geometry type is used for geometry
dataset.from_geo_features(features)
expected = [
{
"action": "CREATE",
"recordId": "1",
"record": {"geom": {"point": [0, 0]}, "id": "1"},
}
]
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
# We can override which geometry attribute is used for geometry
snoop["payload"] = None
dataset.from_geo_features(features, geo_attr="geom2")
expected = [
{
"action": "CREATE",
"recordId": "1",
"record": {"geom2": {"point": [0, 0]}, "id": "1"},
}
]
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
@responses.activate
def test_from_geo_features_composite_key(self):
def update_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, "{}"
composite_key_dataset_json = deepcopy(self._dataset_json)
composite_key_dataset_json["keyAttributeNames"] = ["id1", "id2"]
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=composite_key_dataset_json)
composite_key_attributes_json = deepcopy(self._attributes_json)
composite_key_attributes_json[0]["name"] = "id1"
composite_key_attributes_json.insert(
1, deepcopy(composite_key_attributes_json[0])
)
composite_key_attributes_json[1]["name"] = "id2"
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=composite_key_attributes_json)
records_url = f"{dataset_url}:updateRecords"
snoop = {}
responses.add_callback(
responses.POST, records_url, callback=partial(update_callback, snoop=snoop)
)
dataset = self.tamr.datasets.by_resource_id("1")
features = [
{"id": ["1", "a"], "geometry": {"type": "Point", "coordinates": [0, 0]}},
{"id": ["2", "b"], "geometry": {"type": "Point", "coordinates": [1, 1]}},
]
dataset.from_geo_features(features)
updates = [
{
"action": "CREATE",
"compositeRecordId": ["1", "a"],
"record": {"geom": {"point": [0, 0]}, "id1": "1", "id2": "a"},
},
{
"action": "CREATE",
"compositeRecordId": ["2", "b"],
"record": {"geom": {"point": [1, 1]}, "id1": "2", "id2": "b"},
},
]
expected = updates
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
_dataset_json = {
"id": "unify://unified-data/v1/datasets/1",
"externalId": "number 1",
"name": "dataset 1 name",
"description": "dataset 1 description",
"version": "dataset 1 version",
"keyAttributeNames": ["id"],
"tags": [],
"created": {
"username": "admin",
"time": "2018-09-10T16:06:20.636Z",
"version": "dataset 1 created version",
},
"lastModified": {
"username": "admin",
"time": "2018-09-10T16:06:20.851Z",
"version": "dataset 1 modified version",
},
"relativeId": "datasets/1",
"upstreamDatasetIds": [],
}
_attributes_json = [
{
"name": "id",
"description": "primary key",
"type": {"baseType": "STRING", "attributes": []},
"isNullable": False,
},
{
"name": "geom",
"description": "Geospatial geometry",
"type": {
"baseType": "RECORD",
"attributes": [
{
"name": "point",
"type": {
"baseType": "ARRAY",
"innerType": {"baseType": "DOUBLE", "attributes": []},
"attributes": [],
},
"isNullable": True,
},
{
"name": "multiPoint",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {"baseType": "DOUBLE", "attributes": []},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "lineString",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {"baseType": "DOUBLE", "attributes": []},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "multiLineString",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "DOUBLE",
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "polygon",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "DOUBLE",
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "multiPolygon",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "DOUBLE",
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
],
},
"isNullable": False,
},
]
_records_json = [
{"id": "point", "geom": {"point": [1, 1]}},
{"id": "multiPoint", "geom": {"multiPoint": [[1, 1], [2, 2]]}},
{"id": "lineString", "geom": {"lineString": [[1, 1], [2, 2]]}},
{
"id": "multiLineString",
"geom": {"multiLineString": [[[1, 1], [2, 2]], [[3, 3], [4, 4]]]},
},
{"id": "polygon", "geom": {"polygon": [[[1, 1], [2, 2], [3, 3], [1, 1]]]}},
{
"id": "multiPolygon",
"geom": {
"multiPolygon": [
[[[1, 1], [2, 2], [3, 3], [1, 1]]],
[[[4, 4], [5, 5], [6, 6], [4, 4]]],
]
},
},
]
| 36.813983
| 88
| 0.448962
| 2,563
| 29,488
| 5.005462
| 0.076863
| 0.008886
| 0.060956
| 0.076857
| 0.802245
| 0.768415
| 0.726557
| 0.705589
| 0.669733
| 0.648765
| 0
| 0.029382
| 0.389447
| 29,488
| 800
| 89
| 36.86
| 0.683182
| 0.018516
| 0
| 0.562849
| 0
| 0
| 0.178223
| 0.012789
| 0
| 0
| 0
| 0
| 0.055866
| 1
| 0.022346
| false
| 0.002793
| 0.01257
| 0.005587
| 0.053073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8aabeebb2a75b6ded559cf83a2da1e2408274214
| 38,256
|
py
|
Python
|
openmdao/solvers/linear/tests/test_direct_solver.py
|
LeeElvis/OpenMDAO
|
e9e002054c0ecad7467da2a7bbb8fdf68fccfb8c
|
[
"Apache-2.0"
] | null | null | null |
openmdao/solvers/linear/tests/test_direct_solver.py
|
LeeElvis/OpenMDAO
|
e9e002054c0ecad7467da2a7bbb8fdf68fccfb8c
|
[
"Apache-2.0"
] | 1
|
2015-08-12T17:58:18.000Z
|
2015-08-12T17:58:18.000Z
|
openmdao/solvers/linear/tests/test_direct_solver.py
|
hschilling/OpenMDAO
|
0ef1f0eeb934d8cd4ef0a02add6ba3c3a13e6150
|
[
"Apache-2.0"
] | 1
|
2021-01-17T14:03:48.000Z
|
2021-01-17T14:03:48.000Z
|
"""Test the DirectSolver linear solver class."""
import unittest
import numpy as np
import openmdao.api as om
from openmdao.core.tests.test_distrib_derivs import DistribExecComp
from openmdao.solvers.linear.tests.linear_test_base import LinearSolverTests
from openmdao.test_suite.components.double_sellar import DoubleSellar
from openmdao.test_suite.components.expl_comp_simple import TestExplCompSimpleJacVec
from openmdao.test_suite.components.sellar import SellarDerivatives
from openmdao.test_suite.groups.implicit_group import TestImplicitGroup
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class NanComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', 1.0)
self.add_output('y', 1.0)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
outputs['y'] = 3.0*inputs['x']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x'] = np.NaN
class SingularComp(om.ImplicitComponent):
def setup(self):
self.add_input('x', 1.0)
self.add_output('y', 1.0)
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x'] = 0.0
J['y', 'y'] = 0.0
class NanComp2(om.ExplicitComponent):
def setup(self):
self.add_input('x', 1.0)
self.add_output('y', 1.0)
self.add_output('y2', 1.0)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
outputs['y'] = 3.0*inputs['x']
outputs['y2'] = 2.0*inputs['x']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x'] = np.NaN
J['y2', 'x'] = 2.0
class DupPartialsComp(om.ExplicitComponent):
def setup(self):
self.add_input('c', np.zeros(19))
self.add_output('x', np.zeros(11))
rows = [0, 1, 4, 10, 7, 9, 10, 4]
cols = [0, 18, 11, 2, 5, 9, 2, 11]
self.declare_partials(of='x', wrt='c', rows=rows, cols=cols)
def compute(self, inputs, outputs):
pass
def compute_partials(self, inputs, partials):
pass
class TestDirectSolver(LinearSolverTests.LinearSolverTestCase):
linear_solver_class = om.DirectSolver
# DirectSolver doesn't iterate.
def test_solve_linear_maxiter(self):
# Test that using options that should not exist in class cause an error
solver = om.DirectSolver()
msg = "\"DirectSolver: Option '%s' cannot be set because it has not been declared.\""
for option in ['atol', 'rtol', 'maxiter', 'err_on_non_converge']:
with self.assertRaises(KeyError) as context:
solver.options[option] = 1
self.assertEqual(str(context.exception), msg % option)
def test_solve_on_subsystem(self):
"""solve an implicit system with DirectSolver attached to a subsystem"""
p = om.Problem()
model = p.model
dv = model.add_subsystem('des_vars', om.IndepVarComp())
# just need a dummy variable so the sizes don't match between root and g1
dv.add_output('dummy', val=1.0, shape=10)
g1 = model.add_subsystem('g1', TestImplicitGroup(lnSolverClass=om.DirectSolver))
p.setup()
g1.linear_solver.options['assemble_jac'] = False
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
# forward
d_inputs, d_outputs, d_residuals = g1.get_linear_vectors()
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
g1._linearize(g1._assembled_jac)
g1.linear_solver._linearize()
g1.run_solve_linear(['linear'], 'fwd')
output = d_outputs._data
assert_near_equal(output, g1.expected_solution, 1e-15)
# reverse
d_inputs, d_outputs, d_residuals = g1.get_linear_vectors()
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
g1.linear_solver._linearize()
g1.run_solve_linear(['linear'], 'rev')
output = d_residuals._data
assert_near_equal(output, g1.expected_solution, 3e-15)
def test_rev_mode_bug(self):
prob = om.Problem()
prob.model = SellarDerivatives(nonlinear_solver=om.NewtonSolver(solve_subsystems=False),
linear_solver=om.DirectSolver())
prob.setup(check=False, mode='rev')
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob['y1'], 25.58830273, .00001)
assert_near_equal(prob['y2'], 12.05848819, .00001)
wrt = ['x', 'z']
of = ['obj', 'con1', 'con2']
Jbase = {}
Jbase['con1', 'x'] = [[-0.98061433]]
Jbase['con1', 'z'] = np.array([[-9.61002285, -0.78449158]])
Jbase['con2', 'x'] = [[0.09692762]]
Jbase['con2', 'z'] = np.array([[1.94989079, 1.0775421]])
Jbase['obj', 'x'] = [[2.98061392]]
Jbase['obj', 'z'] = np.array([[9.61001155, 1.78448534]])
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in Jbase.items():
assert_near_equal(J[key], val, .00001)
# In the bug, the solver mode got switched from fwd to rev when it shouldn't
# have been, causing a singular matrix and NaNs in the output.
prob.run_model()
assert_near_equal(prob['y1'], 25.58830273, .00001)
assert_near_equal(prob['y2'], 12.05848819, .00001)
def test_multi_dim_src_indices(self):
prob = om.Problem()
model = prob.model
size = 5
model.add_subsystem('indeps', om.IndepVarComp('x', np.arange(5).reshape((1,size,1))))
model.add_subsystem('comp', om.ExecComp('y = x * 2.', x=np.zeros((size,)), y=np.zeros((size,))))
src_indices = [[0, i, 0] for i in range(size)]
model.connect('indeps.x', 'comp.x', src_indices=src_indices)
model.linear_solver = om.DirectSolver()
prob.setup()
prob.run_model()
J = prob.compute_totals(wrt=['indeps.x'], of=['comp.y'], return_format='array')
np.testing.assert_almost_equal(J, np.eye(size) * 2.)
def test_raise_error_on_singular(self):
prob = om.Problem()
model = prob.model
comp = om.IndepVarComp()
comp.add_output('dXdt:TAS', val=1.0)
comp.add_output('accel_target', val=2.0)
model.add_subsystem('des_vars', comp, promotes=['*'])
teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group())
teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*'])
thrust_bal = om.BalanceComp()
thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS',
rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0)
teg.add_subsystem(name='thrust_bal', subsys=thrust_bal,
promotes_inputs=['dXdt:TAS', 'accel_target'],
promotes_outputs=['thrust'])
teg.linear_solver = om.DirectSolver(assemble_jac=False)
teg.nonlinear_solver = om.NewtonSolver()
teg.nonlinear_solver.options['solve_subsystems'] = True
teg.nonlinear_solver.options['max_sub_solves'] = 1
teg.nonlinear_solver.options['atol'] = 1e-4
prob.setup()
prob.set_solver_print(level=0)
prob.final_setup()
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
expected_msg = "Singular entry found in Group (thrust_equilibrium_group) for row associated with state/residual 'thrust' ('thrust_equilibrium_group.thrust_bal.thrust') index 0."
self.assertEqual(expected_msg, str(cm.exception))
def test_raise_error_on_dup_partials(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('des_vars', om.IndepVarComp('x', 1.0), promotes=['*'])
model.add_subsystem('dupcomp', DupPartialsComp())
model.linear_solver = om.DirectSolver(assemble_jac=True)
with self.assertRaises(Exception) as cm:
prob.setup()
expected_msg = "DupPartialsComp (dupcomp): d(x)/d(c): declare_partials has been called with rows and cols that specify the following duplicate subjacobian entries: [(4, 11), (10, 2)]."
self.assertEqual(expected_msg, str(cm.exception))
def test_raise_error_on_singular_with_densejac(self):
prob = om.Problem()
model = prob.model
comp = om.IndepVarComp()
comp.add_output('dXdt:TAS', val=1.0)
comp.add_output('accel_target', val=2.0)
model.add_subsystem('des_vars', comp, promotes=['*'])
teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group())
teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*'])
thrust_bal = om.BalanceComp()
thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS',
rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0)
teg.add_subsystem(name='thrust_bal', subsys=thrust_bal,
promotes_inputs=['dXdt:TAS', 'accel_target'],
promotes_outputs=['thrust'])
teg.linear_solver = om.DirectSolver(assemble_jac=True)
teg.options['assembled_jac_type'] = 'dense'
teg.nonlinear_solver = om.NewtonSolver()
teg.nonlinear_solver.options['solve_subsystems'] = True
teg.nonlinear_solver.options['max_sub_solves'] = 1
teg.nonlinear_solver.options['atol'] = 1e-4
prob.setup()
prob.set_solver_print(level=0)
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
expected_msg = "Singular entry found in Group (thrust_equilibrium_group) for row associated with state/residual 'thrust' ('thrust_equilibrium_group.thrust_bal.thrust') index 0."
self.assertEqual(expected_msg, str(cm.exception))
def test_raise_error_on_singular_with_sparsejac(self):
prob = om.Problem()
model = prob.model
comp = om.IndepVarComp()
comp.add_output('dXdt:TAS', val=1.0)
comp.add_output('accel_target', val=2.0)
model.add_subsystem('des_vars', comp, promotes=['*'])
teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group())
teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*'])
thrust_bal = om.BalanceComp()
thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS',
rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0)
teg.add_subsystem(name='thrust_bal', subsys=thrust_bal,
promotes_inputs=['dXdt:TAS', 'accel_target'],
promotes_outputs=['thrust'])
teg.linear_solver = om.DirectSolver(assemble_jac=True)
teg.nonlinear_solver = om.NewtonSolver()
teg.nonlinear_solver.options['solve_subsystems'] = True
teg.nonlinear_solver.options['max_sub_solves'] = 1
teg.nonlinear_solver.options['atol'] = 1e-4
prob.setup()
prob.set_solver_print(level=0)
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
expected_msg = "Singular entry found in Group (thrust_equilibrium_group) for row associated with state/residual 'thrust' ('thrust_equilibrium_group.thrust_bal.thrust') index 0."
self.assertEqual(expected_msg, str(cm.exception))
def test_raise_no_error_on_singular(self):
prob = om.Problem()
model = prob.model
comp = om.IndepVarComp()
comp.add_output('dXdt:TAS', val=1.0)
comp.add_output('accel_target', val=2.0)
model.add_subsystem('des_vars', comp, promotes=['*'])
teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group())
teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*'])
thrust_bal = om.BalanceComp()
thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS',
rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0)
teg.add_subsystem(name='thrust_bal', subsys=thrust_bal,
promotes_inputs=['dXdt:TAS', 'accel_target'],
promotes_outputs=['thrust'])
teg.linear_solver = om.DirectSolver(assemble_jac=False)
teg.nonlinear_solver = om.NewtonSolver()
teg.nonlinear_solver.options['solve_subsystems'] = True
teg.nonlinear_solver.options['max_sub_solves'] = 1
teg.nonlinear_solver.options['atol'] = 1e-4
prob.setup()
prob.set_solver_print(level=0)
teg.linear_solver.options['err_on_singular'] = False
prob.run_model()
def test_raise_error_on_nan(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p', om.IndepVarComp('x', 2.0))
model.add_subsystem('c1', om.ExecComp('y = 4.0*x'))
sub = model.add_subsystem('sub', om.Group())
sub.add_subsystem('c2', NanComp())
model.add_subsystem('c3', om.ExecComp('y = 4.0*x'))
model.add_subsystem('c4', NanComp2())
model.add_subsystem('c5', om.ExecComp('y = 3.0*x'))
model.add_subsystem('c6', om.ExecComp('y = 2.0*x'))
model.connect('p.x', 'c1.x')
model.connect('c1.y', 'sub.c2.x')
model.connect('sub.c2.y', 'c3.x')
model.connect('c3.y', 'c4.x')
model.connect('c4.y', 'c5.x')
model.connect('c4.y2', 'c6.x')
model.linear_solver = om.DirectSolver(assemble_jac=False)
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.compute_totals(of=['c5.y'], wrt=['p.x'])
expected_msg = "NaN entries found in Group (<model>) for rows associated with states/residuals ['sub.c2.y', 'c4.y']."
self.assertEqual(expected_msg, str(cm.exception))
def test_raise_error_on_nan_sparse(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p', om.IndepVarComp('x', 2.0))
model.add_subsystem('c1', om.ExecComp('y = 4.0*x'))
sub = model.add_subsystem('sub', om.Group())
sub.add_subsystem('c2', NanComp())
model.add_subsystem('c3', om.ExecComp('y = 4.0*x'))
model.add_subsystem('c4', NanComp2())
model.add_subsystem('c5', om.ExecComp('y = 3.0*x'))
model.add_subsystem('c6', om.ExecComp('y = 2.0*x'))
model.connect('p.x', 'c1.x')
model.connect('c1.y', 'sub.c2.x')
model.connect('sub.c2.y', 'c3.x')
model.connect('c3.y', 'c4.x')
model.connect('c4.y', 'c5.x')
model.connect('c4.y2', 'c6.x')
model.linear_solver = om.DirectSolver(assemble_jac=True)
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.compute_totals(of=['c5.y'], wrt=['p.x'])
expected_msg = "NaN entries found in Group (<model>) for rows associated with states/residuals ['sub.c2.y', 'c4.y']."
self.assertEqual(expected_msg, str(cm.exception))
def test_raise_error_on_nan_dense(self):
prob = om.Problem(model=om.Group(assembled_jac_type='dense'))
model = prob.model
model.add_subsystem('p', om.IndepVarComp('x', 2.0))
model.add_subsystem('c1', om.ExecComp('y = 4.0*x'))
sub = model.add_subsystem('sub', om.Group())
sub.add_subsystem('c2', NanComp())
model.add_subsystem('c3', om.ExecComp('y = 4.0*x'))
model.add_subsystem('c4', NanComp2())
model.add_subsystem('c5', om.ExecComp('y = 3.0*x'))
model.add_subsystem('c6', om.ExecComp('y = 2.0*x'))
model.connect('p.x', 'c1.x')
model.connect('c1.y', 'sub.c2.x')
model.connect('sub.c2.y', 'c3.x')
model.connect('c3.y', 'c4.x')
model.connect('c4.y', 'c5.x')
model.connect('c4.y2', 'c6.x')
model.linear_solver = om.DirectSolver(assemble_jac=True)
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.compute_totals(of=['c5.y'], wrt=['p.x'])
expected_msg = "NaN entries found in Group (<model>) for rows associated with states/residuals ['sub.c2.y', 'c4.y']."
self.assertEqual(expected_msg, str(cm.exception))
def test_error_on_NaN_bug(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p', om.IndepVarComp('x', 2.0*np.ones((2, 2))))
model.add_subsystem('c1', om.ExecComp('y = 4.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c2', om.ExecComp('y = 4.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c3', om.ExecComp('y = 3.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c4', om.ExecComp('y = 2.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c5', NanComp())
model.connect('p.x', 'c1.x')
model.connect('c1.y', 'c2.x')
model.connect('c2.y', 'c3.x')
model.connect('c3.y', 'c4.x')
model.connect('c4.y', 'c5.x', src_indices=([0]))
model.linear_solver = om.DirectSolver(assemble_jac=True)
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.compute_totals(of=['c5.y'], wrt=['p.x'])
expected_msg = "NaN entries found in Group (<model>) for rows associated with states/residuals ['c5.y']."
self.assertEqual(expected_msg, str(cm.exception))
def test_error_on_singular_with_sparsejac_bug(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p', om.IndepVarComp('x', 2.0*np.ones((2, 2))))
model.add_subsystem('c1', om.ExecComp('y = 4.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c2', om.ExecComp('y = 4.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c3', om.ExecComp('y = 3.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c4', om.ExecComp('y = 2.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c5', SingularComp())
model.connect('p.x', 'c1.x')
model.connect('c1.y', 'c2.x')
model.connect('c2.y', 'c3.x')
model.connect('c3.y', 'c4.x')
model.connect('c4.y', 'c5.x', src_indices=([0]))
model.linear_solver = om.DirectSolver(assemble_jac=True)
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.compute_totals(of=['c5.y'], wrt=['p.x'])
expected_msg = "Singular entry found in Group (<model>) for row associated with state/residual 'c5.y' index 0."
self.assertEqual(expected_msg, str(cm.exception))
def test_error_on_singular_with_densejac_bug(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p', om.IndepVarComp('x', 2.0*np.ones((2, 2))))
model.add_subsystem('c1', om.ExecComp('y = 4.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c2', om.ExecComp('y = 4.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c3', om.ExecComp('y = 3.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c4', om.ExecComp('y = 2.0*x', x=np.zeros((2, 2)), y=np.zeros((2, 2))))
model.add_subsystem('c5', SingularComp())
model.connect('p.x', 'c1.x')
model.connect('c1.y', 'c2.x')
model.connect('c2.y', 'c3.x')
model.connect('c3.y', 'c4.x')
model.connect('c4.y', 'c5.x', src_indices=([0]))
model.linear_solver = om.DirectSolver(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.compute_totals(of=['c5.y'], wrt=['p.x'])
expected_msg = "Singular entry found in Group (<model>) for row associated with state/residual 'c5.y' index 0."
self.assertEqual(expected_msg, str(cm.exception))
def test_error_msg_underdetermined_1(self):
class DCgenerator(om.ImplicitComponent):
def setup(self):
self.add_input('V_bus', val=1.0)
self.add_input('V_out', val=1.0)
self.add_output('I_out', val=-2.0)
self.add_output('P_out', val=-2.0)
self.declare_partials('I_out', 'V_bus', val=1.0)
self.declare_partials('I_out', 'V_out', val=-1.0)
self.declare_partials('P_out', ['V_out', 'I_out'])
self.declare_partials('P_out', 'P_out', val=-1.0)
def apply_nonlinear(self, inputs, outputs, resids):
resids['I_out'] = inputs['V_bus'] - inputs['V_out']
resids['P_out'] = inputs['V_out'] * outputs['I_out'] - outputs['P_out']
def linearize(self, inputs, outputs, J):
J['P_out', 'V_out'] = outputs['I_out']
J['P_out', 'I_out'] = inputs['V_out']
class RectifierCalcs(om.ImplicitComponent):
def setup(self):
self.add_input('P_out', val=1.0)
self.add_output('P_in', val=1.0)
self.add_output('V_out', val=1.0)
self.add_output('Q_in', val=1.0)
self.declare_partials('P_in', 'P_out', val=1.0)
self.declare_partials('P_in', 'P_in', val=-1.0)
self.declare_partials('V_out', 'V_out', val=-1.0)
self.declare_partials('Q_in', 'P_in', val=1.0)
self.declare_partials('Q_in', 'Q_in', val=-1.0)
def apply_nonlinear(self, inputs, outputs, resids):
resids['P_in'] = inputs['P_out'] - outputs['P_in']
resids['V_out'] = 1.0 - outputs['V_out']
resids['Q_in'] = outputs['P_in'] - outputs['Q_in']
class Rectifier(om.Group):
def setup(self):
self.add_subsystem('gen', DCgenerator(), promotes=[('V_bus', 'Vm_dc'), 'P_out'])
self.add_subsystem('calcs', RectifierCalcs(), promotes=['P_out', ('V_out', 'Vm_dc')])
self.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
self.linear_solver = om.DirectSolver()
prob = om.Problem()
prob.model.add_subsystem('sub', Rectifier())
prob.setup()
prob.set_solver_print(level=0)
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
expected = "Jacobian in 'sub' is not full rank. The following set of states/residuals contains one or more equations that is a linear combination of the others: \n"
expected += " 'gen.I_out' ('sub.gen.I_out') index 0.\n"
expected += " 'Vm_dc' ('sub.calcs.V_out') index 0.\n"
self.assertEqual(expected, str(cm.exception))
def test_error_msg_underdetermined_2(self):
class E1(om.ImplicitComponent):
def setup(self):
self.add_input('a', 1.0)
self.add_input('aa', 1.0)
self.add_input('y', 1.0)
self.add_input('z', 1.0)
self.add_output('x', 1.0)
self.declare_partials('x', 'x', val=1.0)
self.declare_partials('x', 'y', val=1.0)
self.declare_partials('x', 'z', val=1.0)
self.declare_partials('x', 'a', val=-1.0)
self.declare_partials('x', 'aa', val=-1.0)
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['x'] = outputs['x'] + inputs['y'] + inputs['z'] - inputs['a'] - inputs['aa']
class E2(om.ImplicitComponent):
def setup(self):
self.add_input('x', 1.0)
self.add_output('y', 1.0)
self.declare_partials('y', 'x', val=2.063e-4)
self.declare_partials('y', 'y')
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['y'] = 2.063e-4 * inputs['x'] - outputs['y'] ** 2
def linearize(self, inputs, outputs, jacobian):
jacobian['y', 'y'] = -2.0 * outputs['y']
class E3(om.ImplicitComponent):
def setup(self):
self.add_input('a', 1.0)
self.add_input('aa', 1.0)
self.add_input('x', 1.0)
self.add_input('y', 1.0)
self.add_output('z', 1.0)
self.declare_partials('z', 'x', val=2.0)
self.declare_partials('z', 'y', val=1.0)
self.declare_partials('z', 'z', val=-4.0)
self.declare_partials('z', 'a', val=-1.0)
self.declare_partials('z', 'aa', val=-1.0)
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['z'] = 2.0 * inputs['x'] + inputs['y'] - 4.0 * outputs['z'] - inputs['a'] - inputs['aa']
class E3bad(om.ImplicitComponent):
def setup(self):
self.add_input('a', 1.0)
self.add_input('aa', 1.0)
self.add_input('x', 1.0)
self.add_input('y', 1.0)
self.add_output('z', 1.0)
self.declare_partials('z', 'x', val=1.0)
self.declare_partials('z', 'y', val=1.0)
self.declare_partials('z', 'z', val=1.0)
self.declare_partials('z', 'a', val=-1.0)
self.declare_partials('z', 'aa', val=-1.0)
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['z'] = 2.0 * inputs['x'] + inputs['y'] - 4.0 * outputs['z'] - inputs['a'] - inputs['aa']
# Configuration 1
p = om.Problem()
model = p.model
ivc = om.IndepVarComp()
ivc.add_output('aa', 1.0)
model.add_subsystem('p', ivc, promotes=['aa'])
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = model.add_subsystem('sub2', om.Group())
sub3 = model.add_subsystem('sub3', om.Group())
sub1.add_subsystem('e1', E1(), promotes=['*'])
sub1.add_subsystem('e2', E2(), promotes=['*'])
sub1.add_subsystem('e3', E3(), promotes=['*'])
sub2.add_subsystem('e1', E1(), promotes=['*'])
sub2.add_subsystem('e2', E2(), promotes=['*'])
sub2.add_subsystem('e3', E3bad(), promotes=['*'])
sub3.add_subsystem('e1', E1(), promotes=['*'])
sub3.add_subsystem('e2', E2(), promotes=['*'])
sub3.add_subsystem('e3', E3(), promotes=['*'])
model.connect('sub1.z', 'sub2.a')
model.connect('sub2.z', 'sub3.a')
model.connect('sub3.z', 'sub1.a')
model.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
p.setup()
with self.assertRaises(RuntimeError) as cm:
p.run_model()
expected = "Jacobian in '' is not full rank. The following set of states/residuals contains one or more equations that is a linear combination of the others: \n"
expected += " 'sub2.x' ('sub2.e1.x') index 0.\n"
expected += " 'sub2.z' ('sub2.e3.z') index 0.\n"
self.assertEqual(expected, str(cm.exception))
# Configuration 1 Dense
p = om.Problem()
model = p.model
model.options['assembled_jac_type'] = 'dense'
ivc = om.IndepVarComp()
ivc.add_output('aa', 1.0)
model.add_subsystem('p', ivc, promotes=['aa'])
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = model.add_subsystem('sub2', om.Group())
sub3 = model.add_subsystem('sub3', om.Group())
sub1.add_subsystem('e1', E1(), promotes=['*'])
sub1.add_subsystem('e2', E2(), promotes=['*'])
sub1.add_subsystem('e3', E3(), promotes=['*'])
sub2.add_subsystem('e1', E1(), promotes=['*'])
sub2.add_subsystem('e2', E2(), promotes=['*'])
sub2.add_subsystem('e3', E3bad(), promotes=['*'])
sub3.add_subsystem('e1', E1(), promotes=['*'])
sub3.add_subsystem('e2', E2(), promotes=['*'])
sub3.add_subsystem('e3', E3(), promotes=['*'])
model.connect('sub1.z', 'sub2.a')
model.connect('sub2.z', 'sub3.a')
model.connect('sub3.z', 'sub1.a')
model.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
p.setup()
with self.assertRaises(RuntimeError) as cm:
p.run_model()
expected = "Jacobian in '' is not full rank. The following set of states/residuals contains one or more equations that is a linear combination of the others: \n"
expected += " 'sub2.x' ('sub2.e1.x') index 0.\n"
expected += " 'sub2.z' ('sub2.e3.z') index 0.\n"
self.assertEqual(expected, str(cm.exception))
# Configuration 2
p = om.Problem()
model = p.model
ivc = om.IndepVarComp()
ivc.add_output('aa', 1.0)
model.add_subsystem('p', ivc, promotes=['aa'])
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = model.add_subsystem('sub2', om.Group())
sub3 = model.add_subsystem('sub3', om.Group())
sub1.add_subsystem('e1', E1(), promotes=['*'])
sub1.add_subsystem('e2', E2(), promotes=['*'])
sub1.add_subsystem('e3', E3(), promotes=['aa', 'x', 'y', 'z'])
sub2.add_subsystem('e1', E1(), promotes=['*'])
sub2.add_subsystem('e2', E2(), promotes=['*'])
sub2.add_subsystem('e3', E3bad(), promotes=['aa', 'x', 'y', 'z'])
sub3.add_subsystem('e1', E1(), promotes=['*'])
sub3.add_subsystem('e2', E2(), promotes=['*'])
sub3.add_subsystem('e3', E3(), promotes=['aa', 'x', 'y', 'z'])
model.connect('sub1.z', 'sub2.a')
model.connect('sub2.z', 'sub3.a')
model.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
p.setup()
with self.assertRaises(RuntimeError) as cm:
p.run_model()
expected = "Jacobian in '' is not full rank. The following set of states/residuals contains one or more equations that is a linear combination of the others: \n"
expected += " '_auto_ivc.v0' index 0.\n"
expected += " '_auto_ivc.v1' index 0.\n"
expected += " '_auto_ivc.v2' index 0.\n"
expected += " '_auto_ivc.v4' index 0.\n"
expected += " 'sub1.x' ('sub1.e1.x') index 0.\n"
expected += " 'sub1.y' ('sub1.e2.y') index 0.\n"
expected += " 'sub1.z' ('sub1.e3.z') index 0.\n"
expected += " 'sub2.x' ('sub2.e1.x') index 0.\n"
expected += " 'sub2.z' ('sub2.e3.z') index 0.\n"
expected += "Note that the problem may be in a single Component."
self.assertEqual(expected, str(cm.exception))
def test_matvec_error_raised(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('x_param', om.IndepVarComp('length', 3.0),
promotes=['length'])
model.add_subsystem('mycomp', TestExplCompSimpleJacVec(),
promotes=['length', 'width', 'area'])
model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob['width'] = 2.0
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
@unittest.skipUnless(MPI and PETScVector, "only run with MPI and PETSc.")
class TestDirectSolverRemoteErrors(unittest.TestCase):
N_PROCS = 2
def test_distrib_direct(self):
size = 3
group = om.Group()
group.add_subsystem('P', om.IndepVarComp('x', np.arange(size)))
group.add_subsystem('C1', DistribExecComp(['y=2.0*x', 'y=3.0*x'], arr_size=size,
x=np.zeros(size),
y=np.zeros(size)))
group.add_subsystem('C2', om.ExecComp(['z=3.0*y'],
y=np.zeros(size),
z=np.zeros(size)))
prob = om.Problem()
prob.model = group
prob.model.linear_solver = om.DirectSolver()
prob.model.connect('P.x', 'C1.x')
prob.model.connect('C1.y', 'C2.y')
prob.setup(check=False, mode='fwd')
with self.assertRaises(Exception) as cm:
prob.run_model()
msg = "DirectSolver linear solver in Group (<model>) cannot be used in or above a ParallelGroup or a " + \
"distributed component."
self.assertEqual(str(cm.exception), msg)
def test_distrib_direct_subbed(self):
size = 3
prob = om.Problem()
group = prob.model = om.Group()
group.add_subsystem('P', om.IndepVarComp('x', np.arange(size)))
sub = group.add_subsystem('sub', om.Group())
sub.add_subsystem('C1', DistribExecComp(['y=2.0*x', 'y=3.0*x'], arr_size=size,
x=np.zeros(size),
y=np.zeros(size)))
sub.add_subsystem('C2', om.ExecComp(['z=3.0*y'],
y=np.zeros(size),
z=np.zeros(size)))
prob.model.linear_solver = om.DirectSolver()
group.connect('P.x', 'sub.C1.x')
group.connect('sub.C1.y', 'sub.C2.y')
prob.setup(check=False, mode='fwd')
with self.assertRaises(Exception) as cm:
prob.run_model()
msg = "DirectSolver linear solver in Group (<model>) cannot be used in or above a ParallelGroup or a " + \
"distributed component."
self.assertEqual(str(cm.exception), msg)
def test_par_direct_subbed(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 1.0))
model.add_subsystem('p2', om.IndepVarComp('x', 1.0))
parallel = model.add_subsystem('parallel', om.ParallelGroup())
parallel.add_subsystem('c1', om.ExecComp(['y=-2.0*x']))
parallel.add_subsystem('c2', om.ExecComp(['y=5.0*x']))
model.add_subsystem('c3', om.ExecComp(['y=3.0*x1+7.0*x2']))
model.connect("parallel.c1.y", "c3.x1")
model.connect("parallel.c2.y", "c3.x2")
model.connect("p1.x", "parallel.c1.x")
model.connect("p2.x", "parallel.c2.x")
model.linear_solver = om.DirectSolver()
prob.setup(check=False, mode='fwd')
with self.assertRaises(Exception) as cm:
prob.run_model()
msg = "DirectSolver linear solver in Group (<model>) cannot be used in or above a ParallelGroup or a " + \
"distributed component."
self.assertEqual(str(cm.exception), msg)
def test_par_direct(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('P', om.IndepVarComp('x', 1.0))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp(['y=2.0*x']))
par.add_subsystem('C2', om.ExecComp(['z=3.0*y']))
model.linear_solver = om.DirectSolver()
model.connect('P.x', 'par.C1.x')
model.connect('P.x', 'par.C2.y')
prob.setup()
with self.assertRaises(Exception) as cm:
prob.run_model()
msg = "DirectSolver linear solver in Group (<model>) cannot be used in or above a ParallelGroup or a " + \
"distributed component."
self.assertEqual(str(cm.exception), msg)
class TestDirectSolverFeature(unittest.TestCase):
def test_specify_solver(self):
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDerivatives
prob = om.Problem()
model = prob.model = SellarDerivatives()
model.linear_solver = om.DirectSolver()
prob.setup()
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['obj', 'z'][0][0], 9.61001056, .00001)
assert_near_equal(J['obj', 'z'][0][1], 1.78448534, .00001)
class TestDirectSolverMPI(unittest.TestCase):
N_PROCS = 2
def test_serial_in_mpi(self):
# Tests that we can take an MPI model with a DirectSolver and run it in mpi with more
# procs. This verifies fix of a bug.
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = om.DirectSolver(assemble_jac=True)
g1.options['assembled_jac_type'] = 'dense'
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver(assemble_jac=True)
g2.options['assembled_jac_type'] = 'dense'
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
model.nonlinear_solver.options['solve_subsystems'] = True
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
assert_near_equal(prob['g1.y1'], 0.64, 1.0e-5)
assert_near_equal(prob['g1.y2'], 0.80, 1.0e-5)
assert_near_equal(prob['g2.y1'], 0.64, 1.0e-5)
assert_near_equal(prob['g2.y2'], 0.80, 1.0e-5)
if __name__ == "__main__":
unittest.main()
| 37.727811
| 192
| 0.586
| 5,099
| 38,256
| 4.250637
| 0.078055
| 0.069207
| 0.058042
| 0.024915
| 0.804512
| 0.769078
| 0.74218
| 0.702039
| 0.665729
| 0.638553
| 0
| 0.036609
| 0.253137
| 38,256
| 1,013
| 193
| 37.765054
| 0.721966
| 0.019631
| 0
| 0.662921
| 0
| 0.018258
| 0.150719
| 0.008327
| 0
| 0
| 0
| 0
| 0.075843
| 1
| 0.070225
| false
| 0.002809
| 0.021067
| 0
| 0.116573
| 0.01264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8ab80ff87e81f7a65ba90c9e103255773ee65d70
| 169
|
py
|
Python
|
test/test_print.py
|
laashub-sua/demo-print
|
76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec
|
[
"Apache-2.0"
] | null | null | null |
test/test_print.py
|
laashub-sua/demo-print
|
76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec
|
[
"Apache-2.0"
] | null | null | null |
test/test_print.py
|
laashub-sua/demo-print
|
76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec
|
[
"Apache-2.0"
] | null | null | null |
import printer
if __name__ == '__main__':
# file_path = r'files/test_print.jpg'
file_path = r'files/test_convert_pdf_2_jpg.jpg'
printer.do_print(file_path)
| 24.142857
| 51
| 0.727811
| 27
| 169
| 3.925926
| 0.592593
| 0.226415
| 0.169811
| 0.264151
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.159763
| 169
| 6
| 52
| 28.166667
| 0.739437
| 0.207101
| 0
| 0
| 0
| 0
| 0.30303
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
76d40fda8a40b857ad8b9eebfaa8cf8368f5c080
| 143
|
py
|
Python
|
DesignPatterns/FlaskMVC/app/__init__.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | 1
|
2020-08-13T19:09:27.000Z
|
2020-08-13T19:09:27.000Z
|
DesignPatterns/FlaskMVC/app/__init__.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
DesignPatterns/FlaskMVC/app/__init__.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
from .routes.front import front
from .routes.back import back
from .app import app
app.register_blueprint(front)
app.register_blueprint(back)
| 20.428571
| 31
| 0.818182
| 22
| 143
| 5.227273
| 0.363636
| 0.173913
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104895
| 143
| 6
| 32
| 23.833333
| 0.898438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.4
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
76d61c2b8037bbde70d05760647150f4686b0f96
| 29,695
|
py
|
Python
|
main_graphpmu.py
|
arminalgln/graphpmu
|
fd3a8b22d26c1c9e41801c709080e18e399f4759
|
[
"MIT"
] | null | null | null |
main_graphpmu.py
|
arminalgln/graphpmu
|
fd3a8b22d26c1c9e41801c709080e18e399f4759
|
[
"MIT"
] | null | null | null |
main_graphpmu.py
|
arminalgln/graphpmu
|
fd3a8b22d26c1c9e41801c709080e18e399f4759
|
[
"MIT"
] | null | null | null |
import os
import pickle
import numpy as np
import torch
from models.discriminator import Discriminator
from models.graph_model import (GraphEncoder, GraphEncoderLocGlob, GraphPMU,
GraphPMULocalGlobal)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import copy
import dgl
import pandas as pd
import torch.nn as nn
from dgl.dataloading import GraphDataLoader
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from models.losses import (get_negative_expectation, get_positive_expectation,
global_loss_, local_global_loss_)
#%%
with open("data/positive_graphs_latent_with_just_pmu_AED.pkl", "rb") as handle:
pos_graphs = pickle.load(handle)
with open("data/negative_graphs_latent_with_just_pmu_AED.pkl", "rb") as handle:
neg_graphs = pickle.load(handle)
#%%
# initialization
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# g_encoder = GraphEncoder
g_encoder = GraphEncoderLocGlob
disc = Discriminator
node_nums = pos_graphs[0].num_nodes()
[in_feats, h1_feats, last_space_feature] = [
pos_graphs[0].ndata["features"].shape[-1],
128,
64,
]
[D_h1, D_h2] = [32, 16]
measure1 = "JSD" # ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1', 'JSMI']
measure2 = "BCE" # ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1', 'JSMI']
# graphpmu = GraphPMU(g_encoder, disc, node_nums, in_feats, h1_feats, last_space_feature, D_h1, D_h2, device)
graphpmu = GraphPMULocalGlobal(
g_encoder,
disc,
node_nums,
in_feats,
h1_feats,
last_space_feature,
D_h1,
D_h2,
device,
)
# make positive and negative batches
num_samples = len(pos_graphs)
num_train = int(num_samples * 0.9)
np.random.seed(0)
train_selector = np.random.choice(num_samples, num_train, replace=False)
test_selector = np.setdiff1d(np.arange(num_samples), train_selector)
train_index = np.random.choice(train_selector.shape[0], 10000, replace=False)
test_index = np.random.choice(test_selector.shape[0], 1000, replace=False)
train_selector = train_selector[train_index]
test_selector = test_selector[test_index]
train_sampler = SubsetRandomSampler(torch.from_numpy(train_selector))
test_sampler = SubsetRandomSampler(torch.from_numpy(test_selector))
b_size = 20
pos_train_dataloader = GraphDataLoader(
pos_graphs, sampler=train_sampler, batch_size=b_size, drop_last=False
)
pos_test_dataloader = GraphDataLoader(
pos_graphs, sampler=test_sampler, batch_size=b_size, drop_last=False
)
neg_train_dataloader = GraphDataLoader(
neg_graphs, sampler=train_sampler, batch_size=b_size, drop_last=False
)
neg_test_dataloader = GraphDataLoader(
neg_graphs, sampler=test_sampler, batch_size=b_size, drop_last=False
)
#%%
def train_graphpmu(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num,
):
# initialization for training
graphpmu_optimizer = torch.optim.Adam(
graphpmu.parameters(), lr=1e-3
) # , weight_decay=1e-4
criteria = global_loss_
# BCE = nn.BCELoss()
history = dict(train=[], val=[])
best_model_wts = copy.deepcopy(graphpmu.state_dict())
best_loss = 10000.0
alpha = 0.8
for epoch in range(epochs_num):
# training mode
graphpmu = graphpmu.train()
graphpmu_optimizer.zero_grad()
train_losses = []
pos_iter = iter(pos_train_dataloader)
neg_iter = iter(neg_train_dataloader)
for pos_batch in pos_iter: # iter on train batches
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pred = graphpmu(pos_neg_graphs)
# target = torch.cat((torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
loss1 = criteria(pred, measure1)
loss2 = criteria(pred, measure2)
loss = alpha * loss1 + (1 - alpha) * loss2
# print(loss)
graphpmu_optimizer.zero_grad()
loss.backward()
train_losses.append(loss.item())
graphpmu_optimizer.step()
validation_losses = []
graphpmu = graphpmu.eval()
with torch.no_grad():
pos_iter = iter(pos_test_dataloader)
neg_iter = iter(neg_test_dataloader)
for pos_batch in pos_iter: # iter on train batches
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pred = graphpmu(pos_neg_graphs)
# target = torch.cat(
# (torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
loss1 = criteria(pred, measure1)
loss2 = criteria(pred, measure2)
loss = alpha * loss1 + (1 - alpha) * loss2
validation_losses.append(loss.item())
# print(loss.item())
train_loss = np.mean(train_losses)
validation_loss = np.mean(validation_losses)
history["train"].append(train_loss)
history["val"].append(validation_loss)
if validation_loss < best_loss:
best_loss = validation_loss
best_model_wts = copy.deepcopy(graphpmu.state_dict())
print(f"Epoch {epoch}: train loss {train_loss} val loss {validation_loss}")
graphpmu.load_state_dict(best_model_wts)
return graphpmu.eval()
gpmodel = train_graphpmu(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num=2,
)
#%%
def train_graphpmu_loc_glob(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num,
b_size,
lr,
):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# initialization for training
graphpmu_optimizer = torch.optim.Adam(
graphpmu.parameters(), lr=lr
) # , weight_decay=1e-4
# criteria = global_loss_
criteria = local_global_loss_
# BCE = nn.BCELoss()
history = dict(train=[], val=[])
best_model_wts = copy.deepcopy(graphpmu.state_dict())
best_loss = 10000.0
alpha = 1
# labels = torch.zeros(((2*b_size)**2)*node_nums, device=device)
# labels = torch.zeros((2*(b_size)**2)*node_nums, device=device)
# for i in range(b_size):
# labels[i*((2*b_size+1)*node_nums):node_nums+i*((2*b_size+1)*node_nums)] = 1
labels = torch.zeros(((2 * b_size) ** 2) * node_nums, device=device)
posidx = []
# labels = torch.zeros((2*(b_size)**2)*node_nums, device=device)
for i in range(2 * b_size):
labels[
i * ((2 * b_size + 1) * node_nums) : node_nums
+ i * ((2 * b_size + 1) * node_nums)
] = 1
posidx.append(
np.arange(
i * ((2 * b_size + 1) * node_nums),
node_nums + i * ((2 * b_size + 1) * node_nums),
)
)
posidx = np.array(posidx).ravel()
lblindx = np.arange(0, labels.shape[0])
negidx = np.setdiff1d(lblindx, posidx)
posidx = torch.tensor(posidx)
negidx = torch.tensor(negidx)
for epoch in range(epochs_num):
# training mode
graphpmu = graphpmu.train()
graphpmu_optimizer.zero_grad()
train_losses = []
pos_iter = iter(pos_train_dataloader)
neg_iter = iter(neg_train_dataloader)
for pos_batch in pos_iter: # iter on train batches
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pos_neg_graphs = pos_neg_graphs.to(device)
# pred = graphpmu.encoder(pos_neg_graphs)
# all_batch_graphs = dgl.unbatch(pos_neg_graphs)
# pos = all_batch_graphs[0:int(len(all_batch_graphs)/2)]
# neg = all_batch_graphs[int(len(all_batch_graphs)/2):]
H = graphpmu.encoder(pos_neg_graphs)
# hcat = pos_neg_graphs.ndata['hcat']
# Hsize = H.shape
# hcatsize = hcat.shape
#
# hcat = hcat.repeat(Hsize[0], 1)
# H = torch.unsqueeze(H, dim=1)
# H = H.expand(Hsize[0], hcatsize[0], Hsize[-1]).reshape(Hsize[0] * hcatsize[0], Hsize[-1])
#
# latent = torch.cat((H, hcat), axis=1)
latent = H
# latent = latent[0:int(H.shape[0] / 2)]
pred = graphpmu.discriminator(latent)
posidx = torch.arange(0, b_size)
negidx = torch.arange(b_size, b_size * 2)
loss1 = criteria(pred, posidx, negidx, measure1)
# batch_size = len(pos)
# n_nodes = pos[0].num_nodes()
# complete_batch_pos_neg = torch.zeros(batch_size * (batch_size + 1) * n_nodes, )
# for i in range(len(pos)):
# p, n = dgl.unbatch(pos_neg_graphs, torch.tensor([25 * 10, 25 * 10]), torch.tensor([49 * 10, 49 * 10]))
# target = torch.cat((torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
# loss1 = criteria(pred, labels, measure1)
# loss2 = criteria(pred, labels, measure2)
# loss = alpha * loss1 + (1-alpha) * loss2
loss = alpha * loss1
# loss = (1-alpha) * loss2
# print(loss)
graphpmu_optimizer.zero_grad()
loss.backward()
train_losses.append(loss.item())
graphpmu_optimizer.step()
validation_losses = []
graphpmu = graphpmu.eval()
with torch.no_grad():
pos_iter = iter(pos_test_dataloader)
neg_iter = iter(neg_test_dataloader)
for pos_batch in pos_iter: # iter on train batches
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pos_neg_graphs = pos_neg_graphs.to(device)
# pred = graphpmu(pos_neg_graphs)
H = graphpmu.encoder(pos_neg_graphs)
# hcat = pos_neg_graphs.ndata['hcat']
# Hsize = H.shape
# hcatsize = hcat.shape
#
# hcat = hcat.repeat(Hsize[0], 1)
# H = torch.unsqueeze(H, dim=1)
# H = H.expand(Hsize[0], hcatsize[0], Hsize[-1]).reshape(Hsize[0] * hcatsize[0], Hsize[-1])
#
# latent = torch.cat((H, hcat), axis=1)
# latent = latent[0:int(H.shape[0] / 2)]
latent = H
posidx = torch.arange(0, b_size)
negidx = torch.arange(b_size, b_size * 2)
pred = graphpmu.discriminator(latent)
loss1 = criteria(pred, posidx, negidx, measure1)
# target = torch.cat(
# (torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
# loss1 = criteria(pred, labels, measure1)
# loss2 = criteria(pred, labels, measure2)
# loss = alpha * loss1 + (1 - alpha) * loss2
loss = alpha * loss1
# loss = (1 - alpha) * loss2
validation_losses.append(loss.item())
# print(loss.item())
train_loss = np.mean(train_losses)
validation_loss = np.mean(validation_losses)
history["train"].append(train_loss)
history["val"].append(validation_loss)
if validation_loss < best_loss:
best_loss = validation_loss
best_model_wts = copy.deepcopy(graphpmu.state_dict())
print(f"Epoch {epoch}: train loss {train_loss} val loss {validation_loss}")
graphpmu.load_state_dict(best_model_wts)
return graphpmu.eval(), train_loss
#%%
gpmodel, train_loss = train_graphpmu_loc_glob(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num=10,
b_size=b_size,
lr=1e-2,
)
# gpmodel, train_loss = train_graphpmu_loc_glob(gpmodel, pos_train_dataloader, neg_train_dataloader,
# pos_test_dataloader, neg_test_dataloader, epochs_num=100, b_size=b_size, lr = 1e-3)
#%%
def train_graphpmu_loc_glob_pos_neg(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num,
b_size,
lr,
):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# initialization for training
graphpmu_optimizer = torch.optim.Adam(
graphpmu.parameters(), lr=lr
) # , weight_decay=1e-4
# criteria = global_loss_
criteria = local_global_loss_
# BCE = nn.BCELoss()
history = dict(train=[], val=[])
best_model_wts = copy.deepcopy(graphpmu.state_dict())
best_loss = 10000.0
alpha = 1
# labels = torch.zeros(((2*b_size)**2)*node_nums, device=device)
# labels = torch.zeros((2*(b_size)**2)*node_nums, device=device)
# for i in range(b_size):
# labels[i*((2*b_size+1)*node_nums):node_nums+i*((2*b_size+1)*node_nums)] = 1
labels = torch.zeros(((2 * b_size) ** 2) * node_nums, device=device)
posidx = []
# labels = torch.zeros((2*(b_size)**2)*node_nums, device=device)
for i in range(2 * b_size):
labels[
i * ((2 * b_size + 1) * node_nums) : node_nums
+ i * ((2 * b_size + 1) * node_nums)
] = 1
posidx.append(
np.arange(
i * ((2 * b_size + 1) * node_nums),
node_nums + i * ((2 * b_size + 1) * node_nums),
)
)
posidx = np.array(posidx).ravel()
lblindx = np.arange(0, labels.shape[0])
negidx = np.setdiff1d(lblindx, posidx)
posidx = torch.tensor(posidx)
negidx = torch.tensor(negidx)
for epoch in range(epochs_num):
# training mode
graphpmu = graphpmu.train()
graphpmu_optimizer.zero_grad()
train_losses = []
pos_iter = iter(pos_train_dataloader)
neg_iter = iter(neg_train_dataloader)
for pos_batch in pos_iter: # iter on train batches
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pos_neg_graphs = pos_neg_graphs.to(device)
# pred = graphpmu.encoder(pos_neg_graphs)
# all_batch_graphs = dgl.unbatch(pos_neg_graphs)
# pos = all_batch_graphs[0:int(len(all_batch_graphs)/2)]
# neg = all_batch_graphs[int(len(all_batch_graphs)/2):]
H = graphpmu.encoder(pos_neg_graphs)
hcat = pos_neg_graphs.ndata["hcat"]
Hsize = H.shape
hcatsize = hcat.shape
hcat = hcat.repeat(Hsize[0], 1)
H = torch.unsqueeze(H, dim=1)
H = H.expand(Hsize[0], hcatsize[0], Hsize[-1]).reshape(
Hsize[0] * hcatsize[0], Hsize[-1]
)
latent = torch.cat((H, hcat), axis=1)
# latent = latent[0:int(H.shape[0] / 2)]
pred = graphpmu.discriminator(latent)
loss1 = criteria(pred, posidx, negidx, measure1)
# batch_size = len(pos)
# n_nodes = pos[0].num_nodes()
# complete_batch_pos_neg = torch.zeros(batch_size * (batch_size + 1) * n_nodes, )
# for i in range(len(pos)):
# p, n = dgl.unbatch(pos_neg_graphs, torch.tensor([25 * 10, 25 * 10]), torch.tensor([49 * 10, 49 * 10]))
# target = torch.cat((torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
# loss1 = criteria(pred, labels, measure1)
# loss2 = criteria(pred, labels, measure2)
# loss = alpha * loss1 + (1-alpha) * loss2
loss = alpha * loss1
# loss = (1-alpha) * loss2
# print(loss)
graphpmu_optimizer.zero_grad()
loss.backward()
train_losses.append(loss.item())
graphpmu_optimizer.step()
validation_losses = []
graphpmu = graphpmu.eval()
with torch.no_grad():
pos_iter = iter(pos_test_dataloader)
neg_iter = iter(neg_test_dataloader)
for pos_batch in pos_iter: # iter on train batches
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pos_neg_graphs = pos_neg_graphs.to(device)
# pred = graphpmu(pos_neg_graphs)
H = graphpmu.encoder(pos_neg_graphs)
hcat = pos_neg_graphs.ndata["hcat"]
Hsize = H.shape
hcatsize = hcat.shape
hcat = hcat.repeat(Hsize[0], 1)
H = torch.unsqueeze(H, dim=1)
H = H.expand(Hsize[0], hcatsize[0], Hsize[-1]).reshape(
Hsize[0] * hcatsize[0], Hsize[-1]
)
latent = torch.cat((H, hcat), axis=1)
# latent = latent[0:int(H.shape[0] / 2)]
pred = graphpmu.discriminator(latent)
loss1 = criteria(pred, posidx, negidx, measure1)
# target = torch.cat(
# (torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
# loss1 = criteria(pred, labels, measure1)
# loss2 = criteria(pred, labels, measure2)
# loss = alpha * loss1 + (1 - alpha) * loss2
loss = alpha * loss1
# loss = (1 - alpha) * loss2
validation_losses.append(loss.item())
# print(loss.item())
train_loss = np.mean(train_losses)
validation_loss = np.mean(validation_losses)
history["train"].append(train_loss)
history["val"].append(validation_loss)
if validation_loss < best_loss:
best_loss = validation_loss
best_model_wts = copy.deepcopy(graphpmu.state_dict())
print(f"Epoch {epoch}: train loss {train_loss} val loss {validation_loss}")
graphpmu.load_state_dict(best_model_wts)
return graphpmu.eval(), train_loss
#%%
gpmodel, train_loss = train_graphpmu_loc_glob_pos_neg(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num=10,
b_size=b_size,
lr=1e-2,
)
#%%
err = 10
lr = 1e-3
while err > 0:
# initialization
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# g_encoder = GraphEncoder
g_encoder = GraphEncoderLocGlob
disc = Discriminator
node_nums = pos_graphs[0].num_nodes()
[in_feats, h1_feats, last_space_feature] = [
pos_graphs[0].ndata["features"].shape[-1],
128,
64,
]
[D_h1, D_h2] = [1, 1]
measure1 = "JSD" # ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1', 'JSMI']
measure2 = "BCE" # ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1', 'JSMI']
# graphpmu = GraphPMU(g_encoder, disc, node_nums, in_feats, h1_feats, last_space_feature, D_h1, D_h2, device)
graphpmu = GraphPMULocalGlobal(
g_encoder,
disc,
node_nums,
in_feats,
h1_feats,
last_space_feature,
D_h1,
D_h2,
device,
)
# make positive and negative batches
num_samples = len(pos_graphs)
gpmodel, train_loss = train_graphpmu_loc_glob(
graphpmu,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num=1,
b_size=b_size,
lr=lr,
)
err = train_loss
print(train_loss)
#%%
gpmodel, train_loss = train_graphpmu_loc_glob(
gpmodel,
pos_train_dataloader,
neg_train_dataloader,
pos_test_dataloader,
neg_test_dataloader,
epochs_num=100,
b_size=b_size,
lr=lr,
)
#%%
torch.save(gpmodel, "models/saved/locglob_withconcat_pos_neg_ones_bsize10_lr-3")
#%%
# graphpmu = GraphPMULocalGlobal(g_encoder, disc, node_nums, in_feats, h1_feats, last_space_feature, D_h1, D_h2,
# device)
gpmodel = torch.load("models/saved/locglob_withconcat_pos_neg_ones_bsize10_lr-3")
gpmodel.eval()
#%%
from sklearn.metrics import accuracy_score
# discriminator evaluation
def get_accuracy(y_true, y_prob):
cls = []
count = 0
for c, i in enumerate(y_prob):
if i > 0.5:
cls.append(1)
else:
cls.append(0)
if cls[c] == y_true[c]:
count += 1
return count / len(cls)
with torch.no_grad():
pos_iter = iter(pos_test_dataloader)
neg_iter = iter(neg_test_dataloader)
count = 0
labels = torch.zeros(((2 * b_size) ** 2) * node_nums, device=device)
for i in range(2 * b_size):
labels[
i * ((2 * b_size + 1) * node_nums) : node_nums
+ i * ((2 * b_size + 1) * node_nums)
] = 1
for pos_batch in pos_iter: # iter on train batches
# print(count)
count += 1
pos_neg_graphs = dgl.batch(
[pos_batch, next(neg_iter)]
) # concat pos and neg graphs
pos_neg_graphs = pos_neg_graphs.to(device)
# g_enc = gpmodel.encoder(pos_neg_graphs)
H = graphpmu.encoder(pos_neg_graphs)
# hcat = pos_neg_graphs.ndata['hcat']
# Hsize = H.shape
# hcatsize = hcat.shape
#
# hcat = hcat.repeat(Hsize[0], 1)
# H = torch.unsqueeze(H, dim=1)
# H = H.expand(Hsize[0], hcatsize[0], Hsize[-1]).reshape(Hsize[0] * hcatsize[0], Hsize[-1])
#
# latent = torch.cat((H, hcat), axis=1)
# latent = latent[0:int(H.shape[0] / 2)]
latent = H
pred = graphpmu.discriminator(latent)
# pred = gpmodel.discriminator(g_enc)
# pred = gpmodel(pos_neg_graphs)
# y = torch.cat((torch.ones(pos_batch.batch_size), torch.zeros(pos_batch.batch_size)))
# target = torch.cat(
# (torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred, target)
print(get_accuracy(labels, pred))
#%%
# clustering evaluation
labels = np.load("data/new_aug_labels_806_824_836_846.npy")
labels_cat = pd.DataFrame({"labels": labels})
labels_cat = labels_cat["labels"].astype("category").cat.codes.to_numpy()
lab_cat = np.unique(labels)
train_selector_partial = train_selector[0:5000]
selected_events = [pos_graphs[i] for i in train_selector_partial]
selected_labels = labels_cat[train_selector_partial]
#%%
gpmodel = gpmodel.eval()
all_latents = []
with torch.no_grad():
for event_graph in selected_events:
g_enc = gpmodel.encoder(event_graph.to(device))
all_latents.append(g_enc.detach().cpu().numpy())
print(len(all_latents))
print(all_latents[0].shape)
all_latents = np.array(all_latents)
all_latents = all_latents.reshape(all_latents.shape[0], all_latents.shape[-1])
def all_clustering_models(latent, labels, cluster_num):
from sklearn import metrics
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
# gmm
pred_labels = GaussianMixture(n_components=cluster_num, random_state=0).fit_predict(
latent
)
print(
"trian accuracy (ARS) for gmm", metrics.adjusted_rand_score(labels, pred_labels)
)
# AgglomerativeClustering
pred_labels = AgglomerativeClustering(n_clusters=cluster_num).fit_predict(latent)
print(
"trian accuracy (ARS) for AgglomerativeClustering",
metrics.adjusted_rand_score(labels, pred_labels),
)
from sklearn.cluster import DBSCAN
pred_labels = DBSCAN().fit_predict(latent)
print(
"trian accuracy (ARS) for DBSCAN",
metrics.adjusted_rand_score(labels, pred_labels),
)
from sklearn.cluster import KMeans
pred_labels = KMeans(n_clusters=cluster_num, random_state=0).fit_predict(latent)
print(
"trian accuracy (ARS) for KMeans",
metrics.adjusted_rand_score(labels, pred_labels),
)
# from sklearn.cluster import SpectralClustering
# pred_labels = SpectralClustering(n_clusters=cluster_num, assign_labels="discretize", random_state=0).fit_predict(latent)
# print('trian accuracy (ARS) for SpectralClustering', metrics.adjusted_rand_score(labels, pred_labels))
#%%
from sklearn.manifold import TSNE
X_embedded = TSNE(n_components=2).fit_transform(all_latents)
back = np.copy(X_embedded)
#%%
import matplotlib.pyplot as plt
# plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=selected_labels)
# plt.show()
colors = {
"capbank840": "darkgreen",
"capbank848": "lime",
"faultAB862": "hotpink",
"faultABC816": "crimson",
"faultC852": "gold",
"loada836": "cyan",
"motormed812": "dodgerblue",
"motorsmall828": "navy",
"onephase858": "blueviolet",
}
fig, ax = plt.subplots()
for ev in np.unique(selected_labels):
# print(ev)
ix = np.where(selected_labels == ev)
if ev == 0:
X_embedded[ix, 0] = (back[ix, 0] - 0) / 1 + 0
X_embedded[ix, 1] = (back[ix, 1] + 0) / 1 - 15
if ev == 1:
X_embedded[ix, 0] = (back[ix, 0] - 0) / 1 + 0
X_embedded[ix, 1] = (back[ix, 1] + 0) / 1 + 25
if ev == 2:
X_embedded[ix, 0] = (back[ix, 0] - 0) / 1.5 - 30
X_embedded[ix, 1] = (back[ix, 1] + 0) / 1.5 + 35
if ev == 3:
X_embedded[ix, 0] = (back[ix, 0] - 0) / 2
X_embedded[ix, 1] = (back[ix, 1] + 0) / 2
if ev == 4:
X_embedded[ix, 0] = (back[ix, 0] - 0) / 2 - 40
X_embedded[ix, 1] = (back[ix, 1] + 0) / 2
if ev == 5:
X_embedded[ix, 0] = (back[ix, 0] + 15) / 3 + 10
X_embedded[ix, 1] = (back[ix, 1] + 100) / 4 - 70
if ev == 6:
X_embedded[ix, 0] = (back[ix, 0] + 25) / 2 - 0
X_embedded[ix, 1] = (back[ix, 1] + 9) / 4 + 27.9
if ev == 7:
X_embedded[ix, 0] = (back[ix, 0] + 45) / -2 + 80
X_embedded[ix, 1] = (back[ix, 1] - 35) / 4 - 5
if ev == 8:
X_embedded[ix, 0] = (back[ix, 0] - 30) / 3 + 45
X_embedded[ix, 1] = (back[ix, 1] + 40) / 2 - 60
ax.scatter(
X_embedded[ix, 0],
X_embedded[ix, 1],
c=colors[lab_cat[ev]],
label=lab_cat[ev],
s=100,
)
ax.legend()
plt.show()
#%%
cluster_num = 9
all_clustering_models(X_embedded, selected_labels, cluster_num)
# all_clustering_models(all_z, selected_labels, cluster_num)
#%%
# np.save('data/results/x_embed_best', X_embedded)
X_embedded = np.load("data/results/x_embed_best.npy")
#%%
import matplotlib
import matplotlib.pyplot as plt
pad = 5
xyticks_num = 10
unique_labels = np.unique(selected_labels)
clrs = ["r", "g", "b", "c", "m", "y", "k", "orange", "lime"]
values = [unique_labels.tolist().index(i) for i in selected_labels]
plt.style.use("default")
matplotlib.rcParams["figure.figsize"] = 20, 12
# colors = ListedColormap(['r','b','g'])
scatter = plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=values, s=100, cmap="tab10")
plt.title("TSNE for the embeddings after graph learning")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.xlim([np.min(X_embedded[:, 0]) - pad, np.max(X_embedded[:, 0]) + pad])
plt.ylim([np.min(X_embedded[:, 1]) - pad, np.max(X_embedded[:, 1]) + pad])
plt.xticks(np.arange(np.min(X_embedded[:, 0]) - pad, np.max(X_embedded[:, 0]) + pad, 5))
plt.yticks(np.arange(np.min(X_embedded[:, 1]) - pad, np.max(X_embedded[:, 1]) + pad, 5))
plt.grid()
plt.legend(
handles=scatter.legend_elements()[0],
labels=unique_labels.tolist(),
scatterpoints=10,
fontsize=20,
)
plt.tight_layout()
# plt.savefig('figures/tsne_after_graph.png', dpi=300)
plt.show()
#%%
pg = dgl.unbatch(pos_neg_graphs)[0]
ng = dgl.unbatch(pos_neg_graphs)[-1]
penc = graphpmu.encoder(pg)
nenc = graphpmu.encoder(ng)
prep = graphpmu.discriminator(penc)
pren = graphpmu.discriminator(nenc)
png = dgl.batch([pg, ng])
gparam = graphpmu.parameters()
graphpmu_optimizer = torch.optim.Adam(
graphpmu.parameters(), lr=1e-3
) # , weight_decay=1e-4
criteria = global_loss_
for epoch in range(5):
# training mode
graphpmu = graphpmu.train()
graphpmu_optimizer.zero_grad()
train_losses = []
gparam = next(graphpmu.discriminator.parameters())[0]
print("before: ", gparam)
pred = graphpmu(png)
# target = torch.cat((torch.ones(pos_batch.batch_size, device=device), torch.zeros(pos_batch.batch_size, device=device)))
# loss = BCE(pred.ravel(), target)
loss = criteria(pred, measure)
# print(loss)
graphpmu_optimizer.zero_grad()
loss.backward()
print()
train_losses.append(loss.item())
graphpmu_optimizer.step()
gparam = next(graphpmu.discriminator.parameters())[0]
print("after: ", gparam)
print("-----------------------")
#%%
H = gpmodel.encoder(pos_neg_graphs)
hcat = pos_neg_graphs.ndata["hcat"]
Hsize = H.shape
hcatsize = hcat.shape
hcat = hcat.repeat(Hsize[0], 1)
H = torch.unsqueeze(H, dim=1)
H = H.expand(Hsize[0], hcatsize[0], Hsize[-1]).reshape(
Hsize[0] * hcatsize[0], Hsize[-1]
)
print(H.shape, hcat.shape)
latent = torch.cat((H, hcat), axis=1)
print(latent.shape)
| 34.171461
| 133
| 0.609429
| 3,861
| 29,695
| 4.460502
| 0.096348
| 0.027697
| 0.029962
| 0.017768
| 0.778191
| 0.75659
| 0.753048
| 0.736151
| 0.723029
| 0.710719
| 0
| 0.027641
| 0.261694
| 29,695
| 868
| 134
| 34.210829
| 0.757891
| 0.235191
| 0
| 0.607445
| 0
| 0
| 0.046536
| 0.014476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00846
| false
| 0
| 0.040609
| 0
| 0.055838
| 0.028765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a2afb7d58e0026165f76d5e2cd83afaa1e878b4
| 33
|
py
|
Python
|
Python Programs/print_name.py
|
manvikri22/hacktoberfest-2021
|
7b169746788835b9dacfdd4e64f3b25f17453178
|
[
"MIT"
] | 21
|
2021-10-01T01:52:56.000Z
|
2021-11-08T13:01:26.000Z
|
Python Programs/print_name.py
|
manvikri22/hacktoberfest-2021
|
7b169746788835b9dacfdd4e64f3b25f17453178
|
[
"MIT"
] | 30
|
2021-09-30T18:28:07.000Z
|
2021-10-03T05:23:45.000Z
|
Python Programs/print_name.py
|
manvikri22/hacktoberfest-2021
|
7b169746788835b9dacfdd4e64f3b25f17453178
|
[
"MIT"
] | 71
|
2021-09-30T17:32:43.000Z
|
2021-10-21T05:26:51.000Z
|
print(input("Enter your name: "))
| 33
| 33
| 0.69697
| 5
| 33
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0a63308c2a7012e0969bb0ca4f7c423047461bc5
| 7,406
|
py
|
Python
|
tests/test_logs/test_logs.py
|
mangin/moto
|
86fc340ffd1436e6d793cd4fec4003f4e53869c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_logs/test_logs.py
|
mangin/moto
|
86fc340ffd1436e6d793cd4fec4003f4e53869c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_logs/test_logs.py
|
mangin/moto
|
86fc340ffd1436e6d793cd4fec4003f4e53869c0
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import sure # noqa
import six
from botocore.exceptions import ClientError
from moto import mock_logs, settings
from nose.tools import assert_raises
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
@mock_logs
def test_log_group_create():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
# AWS defaults to Never Expire for log group retention
assert response['logGroups'][0].get('retentionInDays') == None
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_exceptions():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'dummp-stream'
conn.create_log_group(logGroupName=log_group_name)
with assert_raises(ClientError):
conn.create_log_group(logGroupName=log_group_name)
# descrine_log_groups is not implemented yet
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
with assert_raises(ClientError):
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
with assert_raises(ClientError):
conn.put_log_events(
logGroupName=log_group_name,
logStreamName="invalid-stream",
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
@mock_logs
def test_put_logs():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
putRes = conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
events = res['events']
nextSequenceToken = putRes['nextSequenceToken']
assert isinstance(nextSequenceToken, six.string_types) == True
assert len(nextSequenceToken) == 56
events.should.have.length_of(2)
@mock_logs
def test_filter_logs_interleaved():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
interleaved=True,
)
events = res['events']
for original_message, resulting_event in zip(messages, events):
resulting_event['eventId'].should.equal(str(resulting_event['eventId']))
resulting_event['timestamp'].should.equal(original_message['timestamp'])
resulting_event['message'].should.equal(original_message['message'])
@mock_logs
def test_put_retention_policy():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == 7
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_delete_retention_policy():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == 7
response = conn.delete_retention_policy(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == None
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_get_log_events():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'test'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
events = [{'timestamp': x, 'message': str(x)} for x in range(20)]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=events
)
resp = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
limit=10)
resp['events'].should.have.length_of(10)
resp.should.have.key('nextForwardToken')
resp.should.have.key('nextBackwardToken')
resp['nextForwardToken'].should.equal('f/00000000000000000000000000000000000000000000000000000010')
resp['nextBackwardToken'].should.equal('b/00000000000000000000000000000000000000000000000000000000')
for i in range(10):
resp['events'][i]['timestamp'].should.equal(i)
resp['events'][i]['message'].should.equal(str(i))
next_token = resp['nextForwardToken']
resp = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=next_token,
limit=10)
resp['events'].should.have.length_of(10)
resp.should.have.key('nextForwardToken')
resp.should.have.key('nextBackwardToken')
resp['nextForwardToken'].should.equal('f/00000000000000000000000000000000000000000000000000000020')
resp['nextBackwardToken'].should.equal('b/00000000000000000000000000000000000000000000000000000000')
for i in range(10):
resp['events'][i]['timestamp'].should.equal(i+10)
resp['events'][i]['message'].should.equal(str(i+10))
resp = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp['nextBackwardToken'],
limit=10)
resp['events'].should.have.length_of(10)
resp.should.have.key('nextForwardToken')
resp.should.have.key('nextBackwardToken')
for i in range(10):
resp['events'][i]['timestamp'].should.equal(i)
resp['events'][i]['message'].should.equal(str(i))
| 32.482456
| 104
| 0.684985
| 860
| 7,406
| 5.627907
| 0.136047
| 0.087603
| 0.099174
| 0.143802
| 0.796488
| 0.761364
| 0.751446
| 0.751446
| 0.724793
| 0.701446
| 0
| 0.048246
| 0.199568
| 7,406
| 227
| 105
| 32.625551
| 0.768219
| 0.013503
| 0
| 0.691489
| 0
| 0
| 0.145987
| 0.031772
| 0
| 0
| 0
| 0
| 0.074468
| 1
| 0.037234
| false
| 0
| 0.031915
| 0
| 0.069149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a6a3461374a352b1ddbf2d25a6f923ed9eb7efc
| 8,902
|
py
|
Python
|
magnum/tests/unit/conductor/test_bay_lock.py
|
mjbrewer/testIndex
|
420dc071d4240a89b6f266e8d2575cedb39bfea0
|
[
"Apache-2.0"
] | null | null | null |
magnum/tests/unit/conductor/test_bay_lock.py
|
mjbrewer/testIndex
|
420dc071d4240a89b6f266e8d2575cedb39bfea0
|
[
"Apache-2.0"
] | null | null | null |
magnum/tests/unit/conductor/test_bay_lock.py
|
mjbrewer/testIndex
|
420dc071d4240a89b6f266e8d2575cedb39bfea0
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_messaging as messaging
from magnum.common import exception
from magnum.common import short_id
from magnum.conductor import bay_lock
from magnum.tests import base
from magnum.tests.unit.objects import utils as obj_utils
from mock import patch
class BayLockTest(base.TestCase):
def setUp(self):
super(BayLockTest, self).setUp()
self.conductor_id = short_id.generate_id()
self.bay = obj_utils.get_test_bay(self.context)
class TestThreadLockException(Exception):
pass
@patch('magnum.objects.BayLock.create', return_value=None)
def test_successful_acquire_new_lock(self, mock_object_create):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
baylock.acquire()
mock_object_create.assert_called_once_with(self.bay.uuid,
self.conductor_id)
@patch('magnum.objects.BayLock.create')
def test_failed_acquire_current_conductor_lock(self, mock_object_create):
mock_object_create.return_value = self.conductor_id
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
self.assertRaises(exception.OperationInProgress, baylock.acquire)
mock_object_create.assert_called_once_with(self.bay.uuid,
self.conductor_id)
@patch('magnum.objects.BayLock.steal', return_value=None)
@patch('magnum.objects.BayLock.create', return_value='fake-conductor-id')
def test_successful_acquire_dead_conductor_lock(self, mock_object_create,
mock_object_steal):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
with mock.patch.object(baylock, 'conductor_alive',
return_value=False):
baylock.acquire()
mock_object_create.assert_called_once_with(self.bay.uuid,
self.conductor_id)
mock_object_steal.assert_called_once_with(
self.bay.uuid,
'fake-conductor-id', self.conductor_id)
@patch('magnum.objects.BayLock.create', return_value='fake-conductor-id')
def test_failed_acquire_alive_conductor_lock(self, mock_object_create):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
with mock.patch.object(baylock, 'conductor_alive',
return_value=True):
self.assertRaises(exception.OperationInProgress, baylock.acquire)
mock_object_create.assert_called_once_with(self.bay.uuid,
self.conductor_id)
@patch('magnum.objects.BayLock.steal', return_value='fake-conductor-id2')
@patch('magnum.objects.BayLock.create', return_value='fake-conductor-id')
def test_failed_acquire_dead_conductor_lock(self, mock_object_create,
mock_object_steal):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
with mock.patch.object(baylock, 'conductor_alive',
return_value=False):
self.assertRaises(exception.OperationInProgress, baylock.acquire)
mock_object_create.assert_called_once_with(self.bay.uuid,
self.conductor_id)
mock_object_steal.assert_called_once_with(
self.bay.uuid,
'fake-conductor-id', self.conductor_id)
@patch('magnum.objects.BayLock.steal', side_effect=[True, None])
@patch('magnum.objects.BayLock.create', return_value='fake-conductor-id')
def test_successful_acquire_with_retry(self, mock_object_create,
mock_object_steal):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
with mock.patch.object(baylock, 'conductor_alive',
return_value=False):
baylock.acquire()
mock_object_create.assert_has_calls(
[mock.call(self.bay.uuid, self.conductor_id)] * 2)
mock_object_steal.assert_has_calls(
[mock.call(self.bay.uuid, 'fake-conductor-id',
self.conductor_id)] * 2)
@patch('magnum.objects.BayLock.steal', return_value=True)
@patch('magnum.objects.BayLock.create', return_value='fake-conductor-id')
def test_failed_acquire_one_retry_only(self, mock_object_create,
mock_object_steal):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
with mock.patch.object(baylock, 'conductor_alive',
return_value=False):
self.assertRaises(exception.OperationInProgress, baylock.acquire)
mock_object_create.assert_has_calls(
[mock.call(self.bay.uuid, self.conductor_id)] * 2)
mock_object_steal.assert_has_calls(
[mock.call(self.bay.uuid, 'fake-conductor-id',
self.conductor_id)] * 2)
@patch('magnum.objects.BayLock.release', return_value=None)
@patch('magnum.objects.BayLock.create', return_value=None)
def test_thread_lock_acquire_success_with_exception(self,
mock_object_create,
mock_object_release):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
def check_thread_lock():
with baylock.thread_lock(self.bay.uuid):
self.assertEqual(1, mock_object_create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, mock_object_release.call_count)
@patch('magnum.objects.BayLock.release', return_value=None)
@patch('magnum.objects.BayLock.create')
def test_thread_lock_acquire_fail_with_exception(self, mock_object_create,
mock_object_release):
mock_object_create.return_value = self.conductor_id
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
def check_thread_lock():
with baylock.thread_lock(self.bay.uuid):
self.assertEqual(1, mock_object_create.call_count)
raise exception.OperationInProgress
self.assertRaises(exception.OperationInProgress, check_thread_lock)
assert not mock_object_release.called
@patch('magnum.objects.BayLock.release', return_value=None)
@patch('magnum.objects.BayLock.create', return_value=None)
def test_thread_lock_acquire_success_no_exception(self, mock_object_create,
mock_object_release):
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
with baylock.thread_lock(self.bay.uuid):
self.assertEqual(1, mock_object_create.call_count)
assert not mock_object_release.called
@patch('magnum.conductor.api.ListenerAPI.__new__')
def test_conductor_alive_ok(self, mock_listener_api_new):
mock_listener_api = mock.MagicMock()
mock_listener_api.ping_conductor.return_value = True
mock_listener_api_new.return_value = mock_listener_api
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
ret = baylock.conductor_alive(self.context, self.conductor_id)
self.assertIs(True, ret)
self.assertEqual(1, mock_listener_api_new.call_count)
@patch('magnum.conductor.api.ListenerAPI.__new__')
def test_conductor_alive_timeout(self, mock_listener_api_new):
mock_listener_api = mock.MagicMock()
mock_listener_api.ping_conductor.side_effect = (
messaging.MessagingTimeout('too slow'))
mock_listener_api_new.return_value = mock_listener_api
baylock = bay_lock.BayLock(self.context, self.bay, self.conductor_id)
ret = baylock.conductor_alive(self.context, self.conductor_id)
self.assertIs(False, ret)
self.assertEqual(1, mock_listener_api_new.call_count)
| 47.860215
| 79
| 0.657156
| 1,042
| 8,902
| 5.329175
| 0.140115
| 0.073294
| 0.075635
| 0.076535
| 0.786422
| 0.778678
| 0.777057
| 0.771655
| 0.752206
| 0.742482
| 0
| 0.002266
| 0.256459
| 8,902
| 185
| 80
| 48.118919
| 0.836682
| 0.061335
| 0
| 0.690647
| 0
| 0
| 0.099041
| 0.068585
| 0
| 0
| 0
| 0
| 0.194245
| 1
| 0.107914
| false
| 0.007194
| 0.057554
| 0
| 0.179856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a739261ccf31e9f18f7a4f4fbcfd3f285e789a6
| 302
|
py
|
Python
|
smithers/dataset/__init__.py
|
LauraMeneghetti/Smithers
|
908fb3201eabfd01fe1712352c605e1f4b15d896
|
[
"MIT"
] | 6
|
2021-04-09T10:00:12.000Z
|
2022-02-02T05:13:42.000Z
|
smithers/dataset/__init__.py
|
LauraMeneghetti/Smithers
|
908fb3201eabfd01fe1712352c605e1f4b15d896
|
[
"MIT"
] | 5
|
2021-05-06T14:43:22.000Z
|
2021-09-30T15:29:59.000Z
|
smithers/dataset/__init__.py
|
LauraMeneghetti/Smithers
|
908fb3201eabfd01fe1712352c605e1f4b15d896
|
[
"MIT"
] | 6
|
2021-05-06T12:06:57.000Z
|
2022-03-29T07:08:46.000Z
|
from .dataset_collector import DatasetCollector
from .datasets.elastic_block import ElasticBlockDataset
from .datasets.termal import TermalDataset
from .datasets.navier_stokes import NavierStokesDataset
from .datasets.graetz import GraetzDataset
from .datasets.unsteady_heat import UnsteadyHeatDataset
| 43.142857
| 55
| 0.884106
| 33
| 302
| 7.969697
| 0.575758
| 0.228137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07947
| 302
| 6
| 56
| 50.333333
| 0.946043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a7e25f2f96629f53457555d246a1d2b90e68f94
| 151
|
py
|
Python
|
deep_learn/device.py
|
flashlin/pycore
|
6055821df05f0343fcf8d281fa610d9aea875936
|
[
"MIT"
] | null | null | null |
deep_learn/device.py
|
flashlin/pycore
|
6055821df05f0343fcf8d281fa610d9aea875936
|
[
"MIT"
] | null | null | null |
deep_learn/device.py
|
flashlin/pycore
|
6055821df05f0343fcf8d281fa610d9aea875936
|
[
"MIT"
] | null | null | null |
import torch
def get_torch_device():
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
return device
| 21.571429
| 88
| 0.688742
| 21
| 151
| 4.809524
| 0.571429
| 0.326733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192053
| 151
| 6
| 89
| 25.166667
| 0.827869
| 0
| 0
| 0
| 0
| 0
| 0.048276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6a86c38854c723e87d8c92eb286b882f2aa731b7
| 116
|
py
|
Python
|
dataset/__init__.py
|
s-mizuki-nlp/semantic_specialization
|
4d00a461f18828ee8ebaccf7c737a32ccc11809f
|
[
"Apache-2.0"
] | null | null | null |
dataset/__init__.py
|
s-mizuki-nlp/semantic_specialization
|
4d00a461f18828ee8ebaccf7c737a32ccc11809f
|
[
"Apache-2.0"
] | null | null | null |
dataset/__init__.py
|
s-mizuki-nlp/semantic_specialization
|
4d00a461f18828ee8ebaccf7c737a32ccc11809f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from ._integrated import WSDTaskDataset, WSDTaskDatasetCollateFunction
| 29
| 70
| 0.767241
| 12
| 116
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 0.094828
| 116
| 4
| 70
| 29
| 0.828571
| 0.353448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6aa5fbbc14e05c49848c4cd12304acb91f258277
| 22
|
py
|
Python
|
Estudos/Operacoes_Matematicas/soma.py
|
Sabrinadev/Python
|
48ae12d4447787e0a5157147d54b3c577775e3b6
|
[
"MIT"
] | null | null | null |
Estudos/Operacoes_Matematicas/soma.py
|
Sabrinadev/Python
|
48ae12d4447787e0a5157147d54b3c577775e3b6
|
[
"MIT"
] | null | null | null |
Estudos/Operacoes_Matematicas/soma.py
|
Sabrinadev/Python
|
48ae12d4447787e0a5157147d54b3c577775e3b6
|
[
"MIT"
] | null | null | null |
x + y # soma de x e y
| 11
| 21
| 0.5
| 7
| 22
| 1.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.409091
| 22
| 1
| 22
| 22
| 0.846154
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a72e7092461b7eee00b1eee43556237c7f8655f
| 28
|
py
|
Python
|
hematite/__init__.py
|
abilian/hematite
|
1d8c3390f06522cb03b2c6a39b3b8aff87f2ac71
|
[
"BSD-3-Clause"
] | null | null | null |
hematite/__init__.py
|
abilian/hematite
|
1d8c3390f06522cb03b2c6a39b3b8aff87f2ac71
|
[
"BSD-3-Clause"
] | null | null | null |
hematite/__init__.py
|
abilian/hematite
|
1d8c3390f06522cb03b2c6a39b3b8aff87f2ac71
|
[
"BSD-3-Clause"
] | null | null | null |
# from client import Client
| 14
| 27
| 0.785714
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 1
| 28
| 28
| 0.956522
| 0.892857
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0aa7bdfd84c1afe214e540f2e7a8a40bbc81581c
| 211
|
py
|
Python
|
oscar/lib/python2.7/site-packages/traitlets/config/__init__.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/traitlets/config/__init__.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/traitlets/config/__init__.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from .application import *
from .configurable import *
from .loader import Config
| 23.444444
| 59
| 0.739336
| 27
| 211
| 5.777778
| 0.814815
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005882
| 0.194313
| 211
| 8
| 60
| 26.375
| 0.911765
| 0.530806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0aad0db179d7c8737e69701dd7a630ba13d2af86
| 226
|
py
|
Python
|
tests/modules/imported/moduleb.py
|
MoonStarCZW/py2rb
|
89b247717d33d780fbf143e1583bfe9252984da4
|
[
"MIT"
] | 124
|
2017-08-19T05:37:16.000Z
|
2022-03-08T18:24:18.000Z
|
tests/modules/imported/moduleb.py
|
MoonStarCZW/py2rb
|
89b247717d33d780fbf143e1583bfe9252984da4
|
[
"MIT"
] | 15
|
2017-12-16T05:59:31.000Z
|
2022-02-08T02:51:17.000Z
|
tests/modules/imported/moduleb.py
|
MoonStarCZW/py2rb
|
89b247717d33d780fbf143e1583bfe9252984da4
|
[
"MIT"
] | 18
|
2017-09-25T11:57:04.000Z
|
2022-02-19T17:33:48.000Z
|
def foo():
print("foo")
def moduleb_fn():
print("import_moduleb.moduleb_fn()")
class moduleb_class(object):
def __init__(self):
pass
def msg(self,val):
return "moduleb_class:"+str(val)
| 15.066667
| 40
| 0.606195
| 29
| 226
| 4.413793
| 0.517241
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247788
| 226
| 14
| 41
| 16.142857
| 0.752941
| 0
| 0
| 0
| 0
| 0
| 0.195556
| 0.12
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.111111
| 0.111111
| 0.111111
| 0.777778
| 0.222222
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
0aaef02e9f7fec47e99899effe19dbbffd8c4855
| 1,589
|
py
|
Python
|
users/tests/integration/test_storage.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
users/tests/integration/test_storage.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | 11
|
2019-11-12T23:26:45.000Z
|
2021-06-10T17:37:23.000Z
|
users/tests/integration/test_storage.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
import pytest
import json
from django.test import Client
@pytest.mark.django_db
def test_storage_view(user1):
client = Client()
client.force_login(user1)
response = client.get('/api/v1/users/storage/test/')
assert response.status_code == 200
assert response.data['key'] == 'test'
assert response.data['data'] == {}
payload = {
"data": {"test": 1}
}
response = client.post('/api/v1/users/storage/test/',
json.dumps(payload), content_type='application/json')
assert response.status_code == 200
assert response.data['key'] == 'test'
assert response.data['data'] == {"test": 1}
response = client.get('/api/v1/users/storage/test/')
assert response.status_code == 200
assert response.data['key'] == 'test'
assert response.data['data'] == {"test": 1}
@pytest.mark.django_db
def test_storage_view_with_existing_data(user1, setup_storage_data):
client = Client()
client.force_login(user1)
response = client.get('/api/v1/users/storage/')
assert len(response.data) == 2
assert response.data[0]['key'] == 'test1'
assert response.data[1]['key'] == 'test2'
payload = [
{'key': 'test1', "data": "value"},
{'key': 'test3', "data": "value"}
]
response = client.post('/api/v1/users/storage/',
json.dumps(payload), content_type='application/json')
assert len(response.data) == 3
assert response.data[0]['key'] == 'test1'
assert response.data[1]['key'] == 'test2'
assert response.data[2]['key'] == 'test3'
| 31.156863
| 80
| 0.617999
| 196
| 1,589
| 4.918367
| 0.22449
| 0.20332
| 0.205394
| 0.088174
| 0.789419
| 0.775934
| 0.775934
| 0.70332
| 0.53527
| 0.53527
| 0
| 0.0278
| 0.207678
| 1,589
| 50
| 81
| 31.78
| 0.737887
| 0
| 0
| 0.536585
| 0
| 0
| 0.176211
| 0.078666
| 0
| 0
| 0
| 0
| 0.390244
| 1
| 0.04878
| false
| 0
| 0.073171
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0abbbac7956c706e0de217f98f9f327753b62cf7
| 654
|
py
|
Python
|
GM/constant.py
|
HugoAhoy/pymodbus
|
e6801af003922c452105857b72916c5552ae4a87
|
[
"BSD-3-Clause"
] | 2
|
2021-12-13T10:30:56.000Z
|
2021-12-13T16:41:10.000Z
|
GM/constant.py
|
HugoAhoy/pymodbus
|
e6801af003922c452105857b72916c5552ae4a87
|
[
"BSD-3-Clause"
] | null | null | null |
GM/constant.py
|
HugoAhoy/pymodbus
|
e6801af003922c452105857b72916c5552ae4a87
|
[
"BSD-3-Clause"
] | null | null | null |
from gmssl import sm2
# private key for server(db) and client(da)
da ='7098f9d29c01573aa5ad12717bf998f0b36fe6dc30138feea03e764077a4a34b'
db = '7fffe3487d53b50906d4db005e033ed78ac6fc9de93537aae27e30eccdf68f1a'
# public key for server(pb) and client(pa)
pa = 'd7ba85bbf9748bc1f4c92d1345b7a9f8a5d3a9d1aa155f37115dc960005e60c8fe8cbdd7d9271c5501f8187eb5c5398ed5b19f523b9c6434fc3a7498f61cb800'
pb = 'a3e8fc7c41c736fad165d0605478a8767a967ad2574da188e52d6cbb5acac10853fce1b765fa8c9a0b6c7efd12f6e6cef0d187d327260260ac9f47d93b3f3155'
# eclipse parameter ZA, ZB
sm2a = sm2.CryptSM2(da, pa)
sm2b = sm2.CryptSM2(db, pb)
ZA = sm2a._sm3_z(b'')
ZB = sm2b._sm3_z(b'')
| 40.875
| 135
| 0.851682
| 52
| 654
| 10.634615
| 0.557692
| 0.0217
| 0.0434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.402985
| 0.077982
| 654
| 16
| 136
| 40.875
| 0.514096
| 0.163609
| 0
| 0
| 0
| 0
| 0.705882
| 0.705882
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0afd711263ec2e759d7e6556b1f409f69a809a18
| 11,127
|
py
|
Python
|
sdk/python/pulumi_aws/chime/voice_connector_group.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/chime/voice_connector_group.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/chime/voice_connector_group.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VoiceConnectorGroupArgs', 'VoiceConnectorGroup']
@pulumi.input_type
class VoiceConnectorGroupArgs:
def __init__(__self__, *,
connectors: Optional[pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VoiceConnectorGroup resource.
:param pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]] connectors: The Amazon Chime Voice Connectors to route inbound calls to.
:param pulumi.Input[str] name: The name of the Amazon Chime Voice Connector group.
"""
if connectors is not None:
pulumi.set(__self__, "connectors", connectors)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def connectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]]]:
"""
The Amazon Chime Voice Connectors to route inbound calls to.
"""
return pulumi.get(self, "connectors")
@connectors.setter
def connectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]]]):
pulumi.set(self, "connectors", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Amazon Chime Voice Connector group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _VoiceConnectorGroupState:
def __init__(__self__, *,
connectors: Optional[pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VoiceConnectorGroup resources.
:param pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]] connectors: The Amazon Chime Voice Connectors to route inbound calls to.
:param pulumi.Input[str] name: The name of the Amazon Chime Voice Connector group.
"""
if connectors is not None:
pulumi.set(__self__, "connectors", connectors)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def connectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]]]:
"""
The Amazon Chime Voice Connectors to route inbound calls to.
"""
return pulumi.get(self, "connectors")
@connectors.setter
def connectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VoiceConnectorGroupConnectorArgs']]]]):
pulumi.set(self, "connectors", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Amazon Chime Voice Connector group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class VoiceConnectorGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VoiceConnectorGroupConnectorArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates an Amazon Chime Voice Connector group under the administrator's AWS account. You can associate Amazon Chime Voice Connectors with the Amazon Chime Voice Connector group by including VoiceConnectorItems in the request.
You can include Amazon Chime Voice Connectors from different AWS Regions in your group. This creates a fault tolerant mechanism for fallback in case of availability events.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
vc1 = aws.chime.VoiceConnector("vc1",
require_encryption=True,
aws_region="us-east-1")
vc2 = aws.chime.VoiceConnector("vc2",
require_encryption=True,
aws_region="us-west-2")
group = aws.chime.VoiceConnectorGroup("group", connectors=[
aws.chime.VoiceConnectorGroupConnectorArgs(
voice_connector_id=vc1.id,
priority=1,
),
aws.chime.VoiceConnectorGroupConnectorArgs(
voice_connector_id=vc2.id,
priority=3,
),
])
```
## Import
Configuration Recorder can be imported using the name, e.g.
```sh
$ pulumi import aws:chime/voiceConnectorGroup:VoiceConnectorGroup default example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VoiceConnectorGroupConnectorArgs']]]] connectors: The Amazon Chime Voice Connectors to route inbound calls to.
:param pulumi.Input[str] name: The name of the Amazon Chime Voice Connector group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[VoiceConnectorGroupArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates an Amazon Chime Voice Connector group under the administrator's AWS account. You can associate Amazon Chime Voice Connectors with the Amazon Chime Voice Connector group by including VoiceConnectorItems in the request.
You can include Amazon Chime Voice Connectors from different AWS Regions in your group. This creates a fault tolerant mechanism for fallback in case of availability events.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
vc1 = aws.chime.VoiceConnector("vc1",
require_encryption=True,
aws_region="us-east-1")
vc2 = aws.chime.VoiceConnector("vc2",
require_encryption=True,
aws_region="us-west-2")
group = aws.chime.VoiceConnectorGroup("group", connectors=[
aws.chime.VoiceConnectorGroupConnectorArgs(
voice_connector_id=vc1.id,
priority=1,
),
aws.chime.VoiceConnectorGroupConnectorArgs(
voice_connector_id=vc2.id,
priority=3,
),
])
```
## Import
Configuration Recorder can be imported using the name, e.g.
```sh
$ pulumi import aws:chime/voiceConnectorGroup:VoiceConnectorGroup default example
```
:param str resource_name: The name of the resource.
:param VoiceConnectorGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VoiceConnectorGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VoiceConnectorGroupConnectorArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VoiceConnectorGroupArgs.__new__(VoiceConnectorGroupArgs)
__props__.__dict__["connectors"] = connectors
__props__.__dict__["name"] = name
super(VoiceConnectorGroup, __self__).__init__(
'aws:chime/voiceConnectorGroup:VoiceConnectorGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
connectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VoiceConnectorGroupConnectorArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'VoiceConnectorGroup':
"""
Get an existing VoiceConnectorGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VoiceConnectorGroupConnectorArgs']]]] connectors: The Amazon Chime Voice Connectors to route inbound calls to.
:param pulumi.Input[str] name: The name of the Amazon Chime Voice Connector group.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VoiceConnectorGroupState.__new__(_VoiceConnectorGroupState)
__props__.__dict__["connectors"] = connectors
__props__.__dict__["name"] = name
return VoiceConnectorGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def connectors(self) -> pulumi.Output[Optional[Sequence['outputs.VoiceConnectorGroupConnector']]]:
"""
The Amazon Chime Voice Connectors to route inbound calls to.
"""
return pulumi.get(self, "connectors")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Amazon Chime Voice Connector group.
"""
return pulumi.get(self, "name")
| 41.988679
| 233
| 0.653905
| 1,175
| 11,127
| 6.011915
| 0.153191
| 0.066959
| 0.04983
| 0.043035
| 0.7391
| 0.723245
| 0.713618
| 0.701302
| 0.681059
| 0.681059
| 0
| 0.002535
| 0.255415
| 11,127
| 264
| 234
| 42.147727
| 0.850091
| 0.408736
| 0
| 0.623932
| 1
| 0
| 0.124609
| 0.068822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145299
| false
| 0.008547
| 0.059829
| 0
| 0.290598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e4072faccc52a97a20e1faa6273aaf3fefc8ce6d
| 90
|
py
|
Python
|
datastretch/exceptions/AccessException.py
|
RouladenRobert/DaTaStretch
|
a3518ba0db529c518b0715caafec667d0266f20d
|
[
"MIT"
] | null | null | null |
datastretch/exceptions/AccessException.py
|
RouladenRobert/DaTaStretch
|
a3518ba0db529c518b0715caafec667d0266f20d
|
[
"MIT"
] | null | null | null |
datastretch/exceptions/AccessException.py
|
RouladenRobert/DaTaStretch
|
a3518ba0db529c518b0715caafec667d0266f20d
|
[
"MIT"
] | null | null | null |
class AccessException(BaseException):
def __init__(self, msg):
self.msg = msg
| 22.5
| 37
| 0.677778
| 10
| 90
| 5.7
| 0.7
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 90
| 4
| 38
| 22.5
| 0.814286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
7c10605b69f91182851273ed15f451cb4073b454
| 85
|
py
|
Python
|
app_prog/wsgi.py
|
yuretzsex/app-prog
|
3a6c80e57d58b6680bf8199b63564e0d336ed1be
|
[
"MIT"
] | null | null | null |
app_prog/wsgi.py
|
yuretzsex/app-prog
|
3a6c80e57d58b6680bf8199b63564e0d336ed1be
|
[
"MIT"
] | null | null | null |
app_prog/wsgi.py
|
yuretzsex/app-prog
|
3a6c80e57d58b6680bf8199b63564e0d336ed1be
|
[
"MIT"
] | null | null | null |
from waitress import server
import main
serve(main.app, host='0.0.0.0', port=5000)
| 14.166667
| 42
| 0.729412
| 16
| 85
| 3.875
| 0.6875
| 0.096774
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 0.129412
| 85
| 5
| 43
| 17
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0.082353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7c3ecc8aa69d37f5fc8820e3633f20d9f5ff4af4
| 89
|
py
|
Python
|
test.py
|
adityasood/licensemgmt
|
f60d238190d19e1082b70281a3e11b9b67113a7e
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
adityasood/licensemgmt
|
f60d238190d19e1082b70281a3e11b9b67113a7e
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
adityasood/licensemgmt
|
f60d238190d19e1082b70281a3e11b9b67113a7e
|
[
"Apache-2.0"
] | 2
|
2016-08-03T07:10:43.000Z
|
2019-03-27T07:52:57.000Z
|
#! /usr/bin/env python3
import os
print("Hello World 1")
os.system("echo Hello World 2")
| 17.8
| 31
| 0.707865
| 16
| 89
| 3.9375
| 0.8125
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038961
| 0.134831
| 89
| 4
| 32
| 22.25
| 0.779221
| 0.247191
| 0
| 0
| 0
| 0
| 0.469697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7c451badbf03b336e0a011e66f454905140a7514
| 1,594
|
py
|
Python
|
aerosandbox/aerodynamics/aero_3D/test_aero_3D/test_vortex_lattice_method.py
|
peterdsharpe/FastFlow3D
|
d02c8ff97fa84adcd9db988b09157695d9e2b318
|
[
"MIT"
] | 1
|
2021-11-01T22:48:12.000Z
|
2021-11-01T22:48:12.000Z
|
aerosandbox/aerodynamics/aero_3D/test_aero_3D/test_vortex_lattice_method.py
|
peterdsharpe/FastFlow3D
|
d02c8ff97fa84adcd9db988b09157695d9e2b318
|
[
"MIT"
] | null | null | null |
aerosandbox/aerodynamics/aero_3D/test_aero_3D/test_vortex_lattice_method.py
|
peterdsharpe/FastFlow3D
|
d02c8ff97fa84adcd9db988b09157695d9e2b318
|
[
"MIT"
] | null | null | null |
import aerosandbox as asb
import pytest
def test_conventional():
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.conventional import airplane
analysis = asb.VortexLatticeMethod(
airplane=airplane,
op_point=asb.OperatingPoint(alpha=10),
)
return analysis.run()
def test_vanilla():
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.vanilla import airplane
analysis = asb.VortexLatticeMethod(
airplane=airplane,
op_point=asb.OperatingPoint(alpha=10),
)
return analysis.run()
def test_flat_plate():
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.flat_plate import airplane
analysis = asb.VortexLatticeMethod(
airplane=airplane,
op_point=asb.OperatingPoint(alpha=10),
)
return analysis.run()
def test_flat_plate_mirrored():
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.flat_plate_mirrored import airplane
analysis = asb.VortexLatticeMethod(
airplane=airplane,
op_point=asb.OperatingPoint(alpha=10),
spanwise_resolution=1,
chordwise_resolution=3,
)
return analysis.run()
if __name__ == '__main__':
# test_conventional()
# test_vanilla()
# test_flat_plate()['CL']
# test_flat_plate_mirrored()
# pytest.main()
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.conventional import airplane
analysis = asb.VortexLatticeMethod(
airplane=airplane,
op_point=asb.OperatingPoint(alpha=10),
)
aero = analysis.run()
analysis.draw()
| 28.981818
| 101
| 0.720201
| 179
| 1,594
| 6.150838
| 0.206704
| 0.054496
| 0.122616
| 0.140781
| 0.773842
| 0.773842
| 0.773842
| 0.773842
| 0.773842
| 0.725704
| 0
| 0.017015
| 0.188833
| 1,594
| 55
| 102
| 28.981818
| 0.834493
| 0.062108
| 0
| 0.525
| 0
| 0
| 0.005369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.175
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c8eff22c1749cffcecd201fc5039be6c87881c6
| 135
|
py
|
Python
|
lino/modlib/weasyprint/models.py
|
NewRGB/lino
|
43799e42107169ff173d3b8bc0324d5773471499
|
[
"BSD-2-Clause"
] | 1
|
2019-11-13T19:38:50.000Z
|
2019-11-13T19:38:50.000Z
|
lino/modlib/weasyprint/models.py
|
NewRGB/lino
|
43799e42107169ff173d3b8bc0324d5773471499
|
[
"BSD-2-Clause"
] | null | null | null |
lino/modlib/weasyprint/models.py
|
NewRGB/lino
|
43799e42107169ff173d3b8bc0324d5773471499
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from .choicelists import *
| 22.5
| 45
| 0.681481
| 19
| 135
| 4.842105
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081818
| 0.185185
| 135
| 5
| 46
| 27
| 0.754545
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7c9e309201ca3a7a2b54030ce28900b0b53a2828
| 132
|
py
|
Python
|
thonnycontrib/calliope/api_stubs/audio.py
|
thonny/thonny-calliope
|
e3950a63848ca1f950e14547f87ff5dfec7328b2
|
[
"MIT"
] | 1
|
2020-03-30T20:32:42.000Z
|
2020-03-30T20:32:42.000Z
|
thonnycontrib/calliope/api_stubs/audio.py
|
thonny/thonny-calliope
|
e3950a63848ca1f950e14547f87ff5dfec7328b2
|
[
"MIT"
] | null | null | null |
thonnycontrib/calliope/api_stubs/audio.py
|
thonny/thonny-calliope
|
e3950a63848ca1f950e14547f87ff5dfec7328b2
|
[
"MIT"
] | null | null | null |
class AudioFrame:
''
def copyfrom():
pass
def is_playing():
pass
def play():
pass
def stop():
pass
| 8.25
| 19
| 0.522727
| 15
| 132
| 4.533333
| 0.6
| 0.308824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 132
| 15
| 20
| 8.8
| 0.809524
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0.4
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7cb3559173881426448f09c25eb116415d8d826a
| 13,514
|
py
|
Python
|
test/fedml_user_code/cli/login-test.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
test/fedml_user_code/cli/login-test.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
test/fedml_user_code/cli/login-test.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import os.path
import time
import traceback
import psutil
import requests
from fedml.cli.edge_deployment.mqtt_manager import MqttManager
from fedml.mlops.mlops_configs import MLOpsConfigs
def test_is_ok(test_run_id, test_edge_id, test_function, ok_message):
test_ok = False
wait_count = 0
log_file_path = os.path.join(args.log_dir,
"fedml-run-{}-edge-{}.log".format(str(test_run_id), str(test_edge_id)))
log_file = open(log_file_path, "r")
while True:
if wait_count >= 30:
break
log_line = log_file.readline()
if not log_line:
wait_count += 1
time.sleep(3)
continue
if log_line.find(ok_message) != -1:
test_ok = True
break
log_file.close()
if test_ok:
print("{} with successful status.".format(test_function.__name__))
else:
print("{} with failure status.".format(test_function.__name__))
def test_login_with_start_run_by_sending_client_agent_msg(args):
test_run_id = 873
test_edge_id = 122
start_train_topic = "flserver_agent/{}/start_train".format(str(test_edge_id))
start_train_msg = {
"edges": [
{
"device_id": "@0x9801a7a55e85.MacOS",
"os_type": "MacOS",
"id": test_edge_id
}
],
"starttime": 1651664950745,
"url": "http://fedml-server-agent-svc.fedml-aggregator-dev.svc.cluster.local:5001/api/start_run",
"edgeids": [
test_edge_id
],
"token": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6MTA1LCJhY2NvdW50IjoiYWxleC5saWFuZzIiLCJsb2dpblRpbWUiOiIxNjUxNjY0NzM1NDA1IiwiZXhwIjowfQ.vtgrNBhcCVy_d2oI9NeSFcwPxHHSWvtoZS_k-_SLAyU",
"urls": [],
"userids": [
"105"
],
"name": "eye_rain",
"runId": test_run_id,
"id": test_run_id,
"projectid": "121",
"run_config": {
"configName": "test-new-open",
"userId": 105,
"model_config": {},
"packages_config": {
"server": "server-package.zip",
"linuxClient": "client-package.zip",
"serverUrl": "https://fedml.s3.us-west-1.amazonaws.com/1651664769220server-package.zip",
"linuxClientUrl": "https://fedml.s3.us-west-1.amazonaws.com/1651664778846client-package.zip",
"androidClient": "",
"androidClientUrl": "",
"androidClientVersion": "0"
},
"data_config": {
"privateLocalData": "",
"syntheticData": "",
"syntheticDataUrl": ""
},
"parameters": {
"model_args": {
"model_file_cache_folder": "./model_file_cache",
"model": "lr",
"global_model_file_path": "./model_file_cache/global_model.pt"
},
"device_args": {
"worker_num": 2,
"using_gpu": False,
"gpu_mapping_key": "mapping_default",
"gpu_mapping_file": "config/gpu_mapping.yaml"
},
"comm_args": {
"s3_config_path": "config/s3_config.yaml",
"backend": "MQTT_S3",
"mqtt_config_path": "config/mqtt_config.yaml"
},
"train_args": {
"batch_size": 10,
"weight_decay": 0.001,
"client_num_per_round": 2,
"client_num_in_total": 2,
"comm_round": 50,
"client_optimizer": "sgd",
"client_id_list": "[1, 2]",
"epochs": 1,
"learning_rate": 0.03,
"federated_optimizer": "FedAvg"
},
"environment_args": {
"bootstrap": "config/bootstrap.sh"
},
"validation_args": {
"frequency_of_the_test": 1
},
"common_args": {
"random_seed": 0,
"training_type": "cross_silo",
"using_mlops": False
},
"data_args": {
"partition_method": "hetero",
"partition_alpha": 0.5,
"dataset": "mnist",
"data_cache_dir": "../../../../data/mnist"
},
"tracking_args": {
"wandb_project": "fedml",
"wandb_name": "fedml_torch_fedavg_mnist_lr",
"wandb_key": "ee0b5f53d949c84cee7decbe7a629e63fb2f8408",
"enable_wandb": False,
"log_file_dir": "./log"
}
}
},
"timestamp": "1651664950759"
}
mqtt_config, s3_config = MLOpsConfigs.get_instance(args).fetch_configs()
mqtt_mgr = MqttManager(
mqtt_config["BROKER_HOST"],
mqtt_config["BROKER_PORT"],
mqtt_config["MQTT_USER"],
mqtt_config["MQTT_PWD"],
mqtt_config["MQTT_KEEPALIVE"],
"login-test",
)
mqtt_mgr.send_message_json(start_train_topic, json.dumps(start_train_msg))
time.sleep(3)
test_is_ok(test_run_id, test_edge_id,
test_login_with_start_run_by_sending_client_agent_msg, "Connection is ready!")
def test_login_with_stop_run_by_sending_client_agent_msg(args):
test_run_id = 873
test_edge_id = 122
stop_train_topic = "flserver_agent/{}/stop_train".format(str(test_edge_id))
stop_train_msg = {
"edgeids": [
test_edge_id
],
"runId": test_run_id,
}
mqtt_config, s3_config = MLOpsConfigs.get_instance(args).fetch_configs()
mqtt_mgr = MqttManager(
mqtt_config["BROKER_HOST"],
mqtt_config["BROKER_PORT"],
mqtt_config["MQTT_USER"],
mqtt_config["MQTT_PWD"],
mqtt_config["MQTT_KEEPALIVE"],
"login-test",
)
mqtt_mgr.send_message_json(stop_train_topic, json.dumps(stop_train_msg))
time.sleep(5)
all_is_killed = True
for process in psutil.process_iter():
try:
pinfo = process.as_dict(attrs=['pid', 'name', "cmdline"])
for cmd in pinfo["cmdline"]:
if str(cmd).find("fedml_config.yaml") != -1:
all_is_killed = False
except Exception as e:
pass
if all_is_killed:
print("{} with successful status.".format(test_login_with_stop_run_by_sending_client_agent_msg.__name__))
else:
print("{} with failure status.".format(test_login_with_stop_run_by_sending_client_agent_msg.__name__))
def send_request_to_server_agent(args, request_json):
url = "https://open.fedml.ai:5001/api/start_run"
if hasattr(args, "config_version") and args.config_version is not None:
# Setup config url based on selected version.
if args.config_version == "release":
url = "https://open.fedml.ai:5001/api/start_run"
elif args.config_version == "test":
url = "http://open-test.fedml.ai:5001/api/start_run"
elif args.config_version == "dev":
url = "http://open-dev.fedml.ai:5001/api/start_run"
elif args.config_version == "local":
url = "http://localhost:5001/api/start_run"
json_params = request_json
if str(url).startswith("https://"):
cur_source_dir = os.path.dirname(__file__)
cert_path = os.path.join(cur_source_dir, "ssl", "open.fedml.ai_bundle.crt")
requests.session().verify = cert_path
response = requests.post(url, json=json_params, verify=True, headers={'Connection': 'close'})
else:
response = requests.post(url, json=json_params, headers={'Connection': 'close'})
status_code = response.json().get("code")
if status_code == "SUCCESS":
return True
return False
def test_login_with_start_run_by_sending_server_agent_msg(args):
test_run_id = 873
test_edge_id = 122
start_train_topic = "flserver_agent/{}/start_train".format(str(test_edge_id))
start_train_msg = {
"edges": [
{
"device_id": "@0x9801a7a55e85.MacOS",
"os_type": "MacOS",
"id": test_edge_id
}
],
"starttime": 1651664950745,
"url": "http://fedml-server-agent-svc.fedml-aggregator-dev.svc.cluster.local:5001/api/start_run",
"edgeids": [
test_edge_id
],
"token": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6MTA1LCJhY2NvdW50IjoiYWxleC5saWFuZzIiLCJsb2dpblRpbWUiOiIxNjUxNjY0NzM1NDA1IiwiZXhwIjowfQ.vtgrNBhcCVy_d2oI9NeSFcwPxHHSWvtoZS_k-_SLAyU",
"urls": [],
"userids": [
"105"
],
"name": "eye_rain",
"runId": test_run_id,
"id": test_run_id,
"projectid": "121",
"run_config": {
"configName": "test-new-open",
"userId": 105,
"model_config": {},
"packages_config": {
"server": "server-package.zip",
"linuxClient": "client-package.zip",
"serverUrl": "https://fedml.s3.us-west-1.amazonaws.com/1651664769220server-package.zip",
"linuxClientUrl": "https://fedml.s3.us-west-1.amazonaws.com/1651664778846client-package.zip",
"androidClient": "",
"androidClientUrl": "",
"androidClientVersion": "0"
},
"data_config": {
"privateLocalData": "",
"syntheticData": "",
"syntheticDataUrl": ""
},
"parameters": {
"model_args": {
"model_file_cache_folder": "./model_file_cache",
"model": "lr",
"global_model_file_path": "./model_file_cache/global_model.pt"
},
"device_args": {
"worker_num": 2,
"using_gpu": False,
"gpu_mapping_key": "mapping_default",
"gpu_mapping_file": "config/gpu_mapping.yaml"
},
"comm_args": {
"s3_config_path": "config/s3_config.yaml",
"backend": "MQTT_S3",
"mqtt_config_path": "config/mqtt_config.yaml"
},
"train_args": {
"batch_size": 10,
"weight_decay": 0.001,
"client_num_per_round": 2,
"client_num_in_total": 2,
"comm_round": 50,
"client_optimizer": "sgd",
"client_id_list": "[1, 2]",
"epochs": 1,
"learning_rate": 0.03,
"federated_optimizer": "FedAvg"
},
"environment_args": {
"bootstrap": "config/bootstrap.sh"
},
"validation_args": {
"frequency_of_the_test": 1
},
"common_args": {
"random_seed": 0,
"training_type": "cross_silo",
"using_mlops": False
},
"data_args": {
"partition_method": "hetero",
"partition_alpha": 0.5,
"dataset": "mnist",
"data_cache_dir": "../../../../data/mnist"
},
"tracking_args": {
"wandb_project": "fedml",
"wandb_name": "fedml_torch_fedavg_mnist_lr",
"wandb_key": "ee0b5f53d949c84cee7decbe7a629e63fb2f8408",
"enable_wandb": False,
"log_file_dir": "./log"
}
}
},
"timestamp": "1651664950759"
}
send_request_to_server_agent(args, start_train_msg)
time.sleep(3)
test_is_ok(test_run_id, test_edge_id,
test_login_with_start_run_by_sending_server_agent_msg, "Connection is ready!")
def test_login_with_stop_run_by_sending_server_agent_msg(args):
test_run_id = 873
test_edge_id = 122
stop_train_msg = {
"edgeids": [
test_edge_id
],
"runId": test_run_id,
}
send_request_to_server_agent(args, stop_train_msg)
time.sleep(3)
test_is_ok(test_run_id, test_edge_id,
test_login_with_stop_run_by_sending_server_agent_msg, "Stop run successfully.")
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--config_version", "-v", type=str,
default="release")
parser.add_argument("--log_dir", "-l", type=str,
default="/Users/alexliang/fedml-client/fedml/logs")
args = parser.parse_args()
except Exception as e:
print("Exception when parsing arguments: {}".format(traceback.format_exc()))
pass
test_login_with_start_run_by_sending_client_agent_msg(args)
#test_login_with_start_run_by_sending_server_agent_msg(args)
time.sleep(10)
test_login_with_stop_run_by_sending_client_agent_msg(args)
| 36.230563
| 179
| 0.537295
| 1,356
| 13,514
| 4.972714
| 0.20649
| 0.021355
| 0.026694
| 0.018686
| 0.769242
| 0.762569
| 0.738544
| 0.727569
| 0.717781
| 0.71096
| 0
| 0.037989
| 0.34357
| 13,514
| 372
| 180
| 36.327957
| 0.722128
| 0.007548
| 0
| 0.652695
| 0
| 0.017964
| 0.31071
| 0.078013
| 0
| 0
| 0.002088
| 0
| 0
| 1
| 0.017964
| false
| 0.005988
| 0.026946
| 0
| 0.050898
| 0.01497
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7cc0a1685c87b2006ca6ffcad05a623077f588b1
| 308
|
py
|
Python
|
monsterplanet/models/inventory.py
|
orangejacob/games
|
66e3e5d0b0bbc96f5505de368a4b310097f23452
|
[
"MIT"
] | null | null | null |
monsterplanet/models/inventory.py
|
orangejacob/games
|
66e3e5d0b0bbc96f5505de368a4b310097f23452
|
[
"MIT"
] | 5
|
2019-12-15T19:29:22.000Z
|
2019-12-18T23:02:35.000Z
|
monsterplanet/models/inventory.py
|
orangejacob/games
|
66e3e5d0b0bbc96f5505de368a4b310097f23452
|
[
"MIT"
] | null | null | null |
class Inventory:
def __init__(self):
self.stuff = []
def add(self, item):
self.stuff.append(item)
def drop(self, item):
self.stuff.remove(item)
return item
def has(self, item):
return item in self.stuff
def all(self):
return self.stuff
| 18.117647
| 33
| 0.568182
| 40
| 308
| 4.275
| 0.375
| 0.263158
| 0.140351
| 0.19883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.324675
| 308
| 16
| 34
| 19.25
| 0.822115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0
| 0.166667
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7ce5df4a0d1c5b73e72c11d9474c818e1550db90
| 196
|
py
|
Python
|
RL/TicTacToe/agent/Agent.py
|
tsubame-mz/reinforcement_learning
|
b69cce573deed11676f60653b41e036d8e79aedc
|
[
"MIT"
] | null | null | null |
RL/TicTacToe/agent/Agent.py
|
tsubame-mz/reinforcement_learning
|
b69cce573deed11676f60653b41e036d8e79aedc
|
[
"MIT"
] | null | null | null |
RL/TicTacToe/agent/Agent.py
|
tsubame-mz/reinforcement_learning
|
b69cce573deed11676f60653b41e036d8e79aedc
|
[
"MIT"
] | null | null | null |
from typing import Dict
from abc import ABC, abstractmethod
import gym
class Agent(ABC):
@abstractmethod
def get_action(self, env: gym.Env, obs: Dict):
raise NotImplementedError
| 19.6
| 50
| 0.729592
| 26
| 196
| 5.461538
| 0.653846
| 0.239437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 196
| 9
| 51
| 21.777778
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7cf357a757a23e1cc885392f5c0b2f0e21ed46fa
| 107
|
py
|
Python
|
pythonProject1/venv/Lib/site-packages/jsonconf/__init__.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | null | null | null |
pythonProject1/venv/Lib/site-packages/jsonconf/__init__.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | null | null | null |
pythonProject1/venv/Lib/site-packages/jsonconf/__init__.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from jsonconf import JSONConf
| 21.4
| 53
| 0.785047
| 16
| 107
| 5.25
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.121495
| 107
| 4
| 54
| 26.75
| 0.861702
| 0.672897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b04964b36641dc39cf3b80be2ba160fea940a3a
| 8,176
|
py
|
Python
|
tests/test_control.py
|
pretix-translations/pretix-cartshare
|
0e2f50b86cd7b0605826e1bfc8baac01b74dfe84
|
[
"Apache-2.0"
] | 2
|
2017-04-30T13:13:18.000Z
|
2018-11-19T04:06:14.000Z
|
tests/test_control.py
|
pretix-translations/pretix-cartshare
|
0e2f50b86cd7b0605826e1bfc8baac01b74dfe84
|
[
"Apache-2.0"
] | 6
|
2016-09-25T16:49:32.000Z
|
2018-08-06T09:41:48.000Z
|
tests/test_control.py
|
pretix-translations/pretix-cartshare
|
0e2f50b86cd7b0605826e1bfc8baac01b74dfe84
|
[
"Apache-2.0"
] | 4
|
2016-10-03T18:03:27.000Z
|
2018-11-19T04:06:15.000Z
|
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pretix.base.models import CartPosition, Event, Organizer, Team, User
from pretix_cartshare.models import SharedCart
from pretix_cartshare.signals import clean_cart_positions
@pytest.fixture
@scopes_disabled()
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix_cartshare'
)
user = User.objects.create_user('dummy@dummy.dummy', 'dummy')
t = Team.objects.create(organizer=o, can_change_orders=True, can_view_orders=True)
t.members.add(user)
t.limit_events.add(event)
ticket = event.items.create(default_price=Decimal('12'))
return event, user, ticket
@pytest.mark.django_db
def test_create_sharedcart_defaultprice(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
with scopes_disabled():
q = event.quotas.create(size=5, name='Test')
q.items.add(ticket)
r = client.post('/control/event/%s/%s/cartshare/create/' % (event.slug, event.organizer.slug), {
'expires': (now() + timedelta(days=14)).strftime("%Y-%m-%d %H:%M:%S"),
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '1',
'form-MAX_NUM_FORMS': '1000',
'form-0-count': '3',
'form-0-itemvar': ticket.id,
'form-0-price': ''
}, follow=True)
assert 'alert-success' in r.rendered_content
with scopes_disabled():
cps = CartPosition.objects.all()
assert len(cps) == 3
assert all(cp.item == ticket for cp in cps)
assert all(cp.price == ticket.default_price for cp in cps)
@pytest.mark.django_db
def test_create_sharedcart_customprice(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
r = client.post('/control/event/%s/%s/cartshare/create/' % (event.slug, event.organizer.slug), {
'expires': (now() + timedelta(days=14)).strftime("%Y-%m-%d %H:%M:%S"),
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '1',
'form-MAX_NUM_FORMS': '1000',
'form-0-count': '3',
'form-0-itemvar': ticket.id,
'form-0-price': '14'
}, follow=True)
assert 'alert-success' in r.rendered_content
with scopes_disabled():
cps = CartPosition.objects.all()
assert len(cps) == 3
assert all(cp.item == ticket for cp in cps)
assert all(cp.price == Decimal('14') for cp in cps)
@pytest.mark.django_db
def test_create_sharedcart_variation(client, env):
event, user, ticket = env
with scopes_disabled():
shirt = event.items.create(name='T-Shirt')
shirt_red = shirt.variations.create(value='Red')
client.login(email='dummy@dummy.dummy', password='dummy')
r = client.post('/control/event/%s/%s/cartshare/create/' % (event.slug, event.organizer.slug), {
'expires': (now() + timedelta(days=14)).strftime("%Y-%m-%d %H:%M:%S"),
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '1',
'form-MAX_NUM_FORMS': '1000',
'form-0-count': '3',
'form-0-itemvar': '%s-%s' % (shirt.id, shirt_red.id),
'form-0-price': '14'
}, follow=True)
assert 'alert-success' in r.rendered_content
with scopes_disabled():
cps = CartPosition.objects.all()
assert len(cps) == 3
assert all(cp.item == shirt for cp in cps)
assert all(cp.price == Decimal('14') for cp in cps)
@pytest.mark.django_db
def test_create_sharedcart_quota_full(client, env):
event, user, ticket = env
with scopes_disabled():
q = event.quotas.create(size=2, name='Test')
q.items.add(ticket)
client.login(email='dummy@dummy.dummy', password='dummy')
r = client.post('/control/event/%s/%s/cartshare/create/' % (event.slug, event.organizer.slug), {
'expires': (now() + timedelta(days=14)).strftime("%Y-%m-%d %H:%M:%S"),
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '1',
'form-MAX_NUM_FORMS': '1000',
'form-0-count': '3',
'form-0-itemvar': ticket.id,
'form-0-price': '14'
}, follow=True)
assert 'alert-danger' in r.rendered_content
with scopes_disabled():
assert not CartPosition.objects.exists()
@pytest.mark.django_db
def test_create_sharedcart_invalid(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
r = client.post('/control/event/%s/%s/cartshare/create/' % (event.slug, event.organizer.slug), {
'expires': (now() + timedelta(days=14)).strftime("%Y-%m-%d %H:%M:%S"),
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '1',
'form-MAX_NUM_FORMS': '1000',
'form-0-count': '3',
'form-0-itemvar': ticket.id,
'form-0-price': 'abc'
}, follow=True)
assert 'alert-danger' in r.rendered_content
with scopes_disabled():
assert not CartPosition.objects.exists()
@pytest.mark.django_db
def test_list_sharedcart(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
sc = SharedCart.objects.create(total=Decimal('13'), expires=now() + timedelta(days=3), event=event)
sc2 = SharedCart.objects.create(total=Decimal('13'), expires=now() - timedelta(days=3), event=event)
r = client.get('/control/event/%s/%s/cartshare/' % (event.slug, event.organizer.slug))
assert sc.cart_id in r.rendered_content
assert sc2.cart_id not in r.rendered_content
@pytest.mark.django_db
def test_delete_sharedcart(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
sc = SharedCart.objects.create(total=Decimal('13'), expires=now() + timedelta(days=3), event=event)
with scopes_disabled():
CartPosition.objects.create(cart_id=sc.cart_id, event=event, price=Decimal('13'), item=ticket,
expires=now() + timedelta(days=3))
r = client.post('/control/event/%s/%s/cartshare/%s/delete' % (event.slug, event.organizer.slug, sc.cart_id), {},
follow=True)
assert not SharedCart.objects.exists()
with scopes_disabled():
assert not CartPosition.objects.exists()
assert 'FOOBAR' not in r.rendered_content
@pytest.mark.django_db
def test_delete_unknown(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
r = client.get('/control/event/%s/%s/cartshare/ASD/delete' % (event.slug, event.organizer.slug))
assert r.status_code == 404
@pytest.mark.django_db
def test_require_permission(client, env):
event, user, ticket = env
client.login(email='dummy@dummy.dummy', password='dummy')
with scopes_disabled():
t = Team.objects.get()
t.can_change_orders = False
t.save()
r = client.get('/control/event/%s/%s/cartshare/' % (event.slug, event.organizer.slug))
assert r.status_code == 403
r = client.get('/control/event/%s/%s/' % (event.slug, event.organizer.slug))
assert b'cartshare' not in r.content
@pytest.mark.django_db
def test_cleanup(env):
event, user, ticket = env
sc = SharedCart.objects.create(total=Decimal('13'), expires=now() + timedelta(days=3), event=event)
sc2 = SharedCart.objects.create(total=Decimal('13'), expires=now() - timedelta(days=3), event=event)
with scopes_disabled():
CartPosition.objects.create(cart_id=sc2.cart_id, event=event, price=Decimal('13'), item=ticket,
expires=now() - timedelta(days=3))
clean_cart_positions(event)
with scopes_disabled():
assert SharedCart.objects.filter(id=sc.id).exists()
assert not SharedCart.objects.filter(id=sc2.id).exists()
assert not CartPosition.objects.exists()
| 39.882927
| 116
| 0.64567
| 1,110
| 8,176
| 4.64955
| 0.125225
| 0.04069
| 0.04534
| 0.053478
| 0.77795
| 0.767293
| 0.739585
| 0.728735
| 0.687464
| 0.659368
| 0
| 0.017252
| 0.191781
| 8,176
| 204
| 117
| 40.078431
| 0.763771
| 0
| 0
| 0.653631
| 0
| 0
| 0.176125
| 0.043297
| 0
| 0
| 0
| 0
| 0.150838
| 1
| 0.061453
| false
| 0.050279
| 0.044693
| 0
| 0.111732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6b06d889b40e221e8e4a61cbd6f406914661b567
| 55
|
py
|
Python
|
cpgames/modules/core/catchcoins/__init__.py
|
Wasabii88/Games
|
33262ca1958207a24e57e3532feded7e275b1dd1
|
[
"MIT"
] | 1
|
2022-02-27T10:33:41.000Z
|
2022-02-27T10:33:41.000Z
|
cpgames/modules/core/catchcoins/__init__.py
|
beiwei365/Games
|
f6499f378802d3212a08aeca761191b58714b7f0
|
[
"MIT"
] | null | null | null |
cpgames/modules/core/catchcoins/__init__.py
|
beiwei365/Games
|
f6499f378802d3212a08aeca761191b58714b7f0
|
[
"MIT"
] | null | null | null |
'''initialize'''
from .catchcoins import CatchCoinsGame
| 27.5
| 38
| 0.8
| 5
| 55
| 8.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 55
| 2
| 38
| 27.5
| 0.862745
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b25b9bd2ffbf6505341abbd0ba04d6ce1307d2f
| 2,901
|
py
|
Python
|
tests/e2e/test_step_functions.py
|
vchapmanse/local_serverless_with_docker_AWS_SAM
|
e96028300ad5c4b3358ffadd6fad015aa43340b6
|
[
"MIT"
] | null | null | null |
tests/e2e/test_step_functions.py
|
vchapmanse/local_serverless_with_docker_AWS_SAM
|
e96028300ad5c4b3358ffadd6fad015aa43340b6
|
[
"MIT"
] | null | null | null |
tests/e2e/test_step_functions.py
|
vchapmanse/local_serverless_with_docker_AWS_SAM
|
e96028300ad5c4b3358ffadd6fad015aa43340b6
|
[
"MIT"
] | null | null | null |
import boto3
import json
sfn = boto3.client(
"stepfunctions",
endpoint_url="http://127.0.0.1:8083")
DYDB = boto3.resource("dynamodb", endpoint_url="http://localhost:8000")
def test_code_of_conduct(s3_cloudwatch_event):
event = s3_cloudwatch_event
event["detail"]["resources"][0].update({"ARN": "arn:aws:s3:::code_of_conduct.txt"})
resp = sfn.start_execution(
stateMachineArn='arn:aws:states:us-east-1:123456789012:stateMachine:test_machine',
input=json.dumps(event)
)
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
while resp.get("status") == "RUNNING":
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
assert resp["status"] == 'SUCCEEDED'
def test_user_groups(s3_cloudwatch_event):
event = s3_cloudwatch_event
event["detail"]["resources"][0].update({"ARN": "arn:aws:s3:::user_groups.txt"})
resp = sfn.start_execution(
stateMachineArn='arn:aws:states:us-east-1:123456789012:stateMachine:test_machine',
input=json.dumps(event)
)
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
while resp.get("status") == "RUNNING":
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
assert resp["status"] == 'SUCCEEDED'
def test_event_description(s3_cloudwatch_event):
event = s3_cloudwatch_event
event["detail"]["resources"][0].update({"ARN": "arn:aws:s3:::event_description.txt"})
with open('./state_machine.json', 'r') as myfile:
data = myfile.read()
try:
sfn.create_state_machine(
name='test_machine',
definition=data,
roleArn="arn:aws:iam::012345678901:role/DummyRole"
)
except Exception as e:
print(e)
pass
resp = sfn.start_execution(
stateMachineArn='arn:aws:states:us-east-1:123456789012:stateMachine:test_machine',
input=json.dumps(event)
)
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
while resp.get("status") == "RUNNING":
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
assert resp["status"] == 'SUCCEEDED'
def test_rejected_file(s3_cloudwatch_event):
event = s3_cloudwatch_event
event["detail"]["resources"][0].update({"ARN": "arn:aws:s3:::rejected_file.txt"})
resp = sfn.start_execution(
stateMachineArn='arn:aws:states:us-east-1:123456789012:stateMachine:test_machine',
input=json.dumps(event)
)
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
while resp.get("status") == "RUNNING":
resp = sfn.describe_execution(
executionArn=resp["executionArn"]
)
assert resp["status"] == 'SUCCEEDED'
| 32.595506
| 90
| 0.637366
| 322
| 2,901
| 5.580745
| 0.245342
| 0.046745
| 0.075682
| 0.097941
| 0.773511
| 0.773511
| 0.773511
| 0.773511
| 0.773511
| 0.773511
| 0
| 0.042901
| 0.220614
| 2,901
| 89
| 91
| 32.595506
| 0.75188
| 0
| 0
| 0.519481
| 0
| 0
| 0.272915
| 0.143349
| 0
| 0
| 0
| 0
| 0.051948
| 1
| 0.051948
| false
| 0.012987
| 0.025974
| 0
| 0.077922
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6b2d03b335204caf2f90ec59b69a7fd5d86a8f4e
| 59
|
py
|
Python
|
technology/scn3me_subm/tech/__init__.py
|
panicmarvin/OpenRAM
|
abf47bab50adb48337c59b72ccd6023c1999f3fc
|
[
"BSD-3-Clause"
] | null | null | null |
technology/scn3me_subm/tech/__init__.py
|
panicmarvin/OpenRAM
|
abf47bab50adb48337c59b72ccd6023c1999f3fc
|
[
"BSD-3-Clause"
] | null | null | null |
technology/scn3me_subm/tech/__init__.py
|
panicmarvin/OpenRAM
|
abf47bab50adb48337c59b72ccd6023c1999f3fc
|
[
"BSD-3-Clause"
] | 1
|
2020-01-23T07:12:52.000Z
|
2020-01-23T07:12:52.000Z
|
"""
Import tech specific modules.
"""
from tech import *
| 8.428571
| 29
| 0.661017
| 7
| 59
| 5.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20339
| 59
| 6
| 30
| 9.833333
| 0.829787
| 0.491525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b3d535436cb58ec7d0e6cff4a98bcf39f6ed6fe
| 186
|
py
|
Python
|
scanpy/external/pp/__init__.py
|
ChenddatHKU/scanpy
|
1b290be6fd297023a0bd705e66f69254c1626fc4
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/external/pp/__init__.py
|
ChenddatHKU/scanpy
|
1b290be6fd297023a0bd705e66f69254c1626fc4
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T17:14:45.000Z
|
2020-06-16T17:14:45.000Z
|
scanpy/external/pp/__init__.py
|
ChenddatHKU/scanpy
|
1b290be6fd297023a0bd705e66f69254c1626fc4
|
[
"BSD-3-Clause"
] | 2
|
2020-06-16T16:42:40.000Z
|
2020-08-28T16:59:42.000Z
|
from ._mnn_correct import mnn_correct
from ._bbknn import bbknn
from ._dca import dca
from ._harmony_integrate import harmony_integrate
from ._magic import magic
from ._scvi import scvi
| 26.571429
| 49
| 0.83871
| 28
| 186
| 5.214286
| 0.357143
| 0.136986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 186
| 6
| 50
| 31
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b447dc7187e3cfb9d6b641a0b859c1d7e433b34
| 131
|
py
|
Python
|
teste_import.py
|
luis-ferrarezi/credentials-manager
|
2e12baa0ad58825ddf2667415331f1ce8ae940f6
|
[
"MIT"
] | null | null | null |
teste_import.py
|
luis-ferrarezi/credentials-manager
|
2e12baa0ad58825ddf2667415331f1ce8ae940f6
|
[
"MIT"
] | null | null | null |
teste_import.py
|
luis-ferrarezi/credentials-manager
|
2e12baa0ad58825ddf2667415331f1ce8ae940f6
|
[
"MIT"
] | null | null | null |
from read_credentials import ShowCredentials
nomeruim = ShowCredentials('Studeos')
print(nomeruim.login)
print(nomeruim.password)
| 21.833333
| 44
| 0.839695
| 14
| 131
| 7.785714
| 0.714286
| 0.238532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076336
| 131
| 5
| 45
| 26.2
| 0.900826
| 0
| 0
| 0
| 0
| 0
| 0.053435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 5
|
8628fb1cac7941e6946ac0683946e3618f63f545
| 6,375
|
py
|
Python
|
tests/flow/test_Richards_empirical.py
|
Prithwijit-Chak/simpeg
|
d93145d768b5512621cdd75566b4a8175fee9ed3
|
[
"MIT"
] | 358
|
2015-03-11T05:48:41.000Z
|
2022-03-26T02:04:12.000Z
|
tests/flow/test_Richards_empirical.py
|
thast/simpeg
|
8021082b8b53f3c08fa87fc085547bdd56437c6b
|
[
"MIT"
] | 885
|
2015-01-19T09:23:48.000Z
|
2022-03-29T12:08:34.000Z
|
tests/flow/test_Richards_empirical.py
|
thast/simpeg
|
8021082b8b53f3c08fa87fc085547bdd56437c6b
|
[
"MIT"
] | 214
|
2015-03-11T05:48:43.000Z
|
2022-03-02T01:05:11.000Z
|
from __future__ import print_function
import unittest
import numpy as np
import discretize
from discretize.tests import checkDerivative
from SimPEG import maps
from SimPEG.flow import richards
TOL = 1e-8
np.random.seed(2)
class TestModels(unittest.TestCase):
def test_haverkamp_theta_u(self):
mesh = discretize.TensorMesh([50])
hav = richards.empirical.Haverkamp_theta(mesh)
passed = checkDerivative(
lambda u: (hav(u), hav.derivU(u)), np.random.randn(50), plotIt=False
)
self.assertTrue(passed, True)
def test_haverkamp_theta_m(self):
mesh = discretize.TensorMesh([50])
idnmap = maps.IdentityMap(nP=mesh.nC)
seeds = {
"theta_r": np.random.rand(mesh.nC),
"theta_s": np.random.rand(mesh.nC),
"alpha": np.random.rand(mesh.nC),
"beta": np.random.rand(mesh.nC),
}
opts = [
("theta_r", dict(theta_rMap=idnmap), 1),
("theta_s", dict(theta_sMap=idnmap), 1),
("alpha", dict(alphaMap=idnmap), 1),
("beta", dict(betaMap=idnmap), 1),
]
u = np.random.randn(mesh.nC)
for name, opt, nM in opts:
van = richards.empirical.Haverkamp_theta(mesh, **opt)
x0 = np.concatenate([seeds[n] for n in name.split("-")])
def fun(m):
van.model = m
return van(u), van.derivM(u)
print("Haverkamp_theta test m deriv: ", name)
passed = checkDerivative(fun, x0, plotIt=False)
self.assertTrue(passed, True)
def test_vangenuchten_theta_u(self):
mesh = discretize.TensorMesh([50])
van = richards.empirical.Vangenuchten_theta(mesh)
passed = checkDerivative(
lambda u: (van(u), van.derivU(u)), np.random.randn(50), plotIt=False
)
self.assertTrue(passed, True)
def test_vangenuchten_theta_m(self):
mesh = discretize.TensorMesh([50])
idnmap = maps.IdentityMap(nP=mesh.nC)
seeds = {
"theta_r": np.random.rand(mesh.nC),
"theta_s": np.random.rand(mesh.nC),
"n": np.random.rand(mesh.nC) + 1,
"alpha": np.random.rand(mesh.nC),
}
opts = [
("theta_r", dict(theta_rMap=idnmap), 1),
("theta_s", dict(theta_sMap=idnmap), 1),
("n", dict(nMap=idnmap), 1),
("alpha", dict(alphaMap=idnmap), 1),
]
u = np.random.randn(mesh.nC)
for name, opt, nM in opts:
van = richards.empirical.Vangenuchten_theta(mesh, **opt)
x0 = np.concatenate([seeds[n] for n in name.split("-")])
def fun(m):
van.model = m
return van(u), van.derivM(u)
print("Vangenuchten_theta test m deriv: ", name)
passed = checkDerivative(fun, x0, plotIt=False)
self.assertTrue(passed, True)
def test_haverkamp_k_u(self):
mesh = discretize.TensorMesh([5])
hav = richards.empirical.Haverkamp_k(mesh)
print("Haverkamp_k test u deriv")
passed = checkDerivative(
lambda u: (hav(u), hav.derivU(u)), np.random.randn(mesh.nC), plotIt=False
)
self.assertTrue(passed, True)
def test_haverkamp_k_m(self):
mesh = discretize.TensorMesh([5])
expmap = maps.IdentityMap(nP=mesh.nC)
wires2 = maps.Wires(("one", mesh.nC), ("two", mesh.nC))
wires3 = maps.Wires(("one", mesh.nC), ("two", mesh.nC), ("three", mesh.nC))
opts = [
("Ks", dict(KsMap=expmap), 1),
("A", dict(AMap=expmap), 1),
("gamma", dict(gammaMap=expmap), 1),
("Ks-A", dict(KsMap=expmap * wires2.one, AMap=expmap * wires2.two), 2),
(
"Ks-gamma",
dict(KsMap=expmap * wires2.one, gammaMap=expmap * wires2.two),
2,
),
(
"A-gamma",
dict(AMap=expmap * wires2.one, gammaMap=expmap * wires2.two),
2,
),
(
"Ks-A-gamma",
dict(
KsMap=expmap * wires3.one,
AMap=expmap * wires3.two,
gammaMap=expmap * wires3.three,
),
3,
),
]
u = np.random.randn(mesh.nC)
for name, opt, nM in opts:
np.random.seed(2)
hav = richards.empirical.Haverkamp_k(mesh, **opt)
def fun(m):
hav.model = m
return hav(u), hav.derivM(u)
print("Haverkamp_k test m deriv: ", name)
passed = checkDerivative(fun, np.random.randn(mesh.nC * nM), plotIt=False)
self.assertTrue(passed, True)
def test_vangenuchten_k_u(self):
mesh = discretize.TensorMesh([50])
van = richards.empirical.Vangenuchten_k(mesh)
print("Vangenuchten_k test u deriv")
passed = checkDerivative(
lambda u: (van(u), van.derivU(u)), np.random.randn(mesh.nC), plotIt=False
)
self.assertTrue(passed, True)
def test_vangenuchten_k_m(self):
mesh = discretize.TensorMesh([50])
expmap = maps.ExpMap(nP=mesh.nC)
idnmap = maps.IdentityMap(nP=mesh.nC)
seeds = {
"Ks": np.random.triangular(
np.log(1e-7), np.log(1e-6), np.log(1e-5), mesh.nC
),
"I": np.random.rand(mesh.nC),
"n": np.random.rand(mesh.nC) + 1,
"alpha": np.random.rand(mesh.nC),
}
opts = [
("Ks", dict(KsMap=expmap), 1),
("I", dict(IMap=idnmap), 1),
("n", dict(nMap=idnmap), 1),
("alpha", dict(alphaMap=idnmap), 1),
]
u = np.random.randn(mesh.nC)
for name, opt, nM in opts:
van = richards.empirical.Vangenuchten_k(mesh, **opt)
x0 = np.concatenate([seeds[n] for n in name.split("-")])
def fun(m):
van.model = m
return van(u), van.derivM(u)
print("Vangenuchten_k test m deriv: ", name)
passed = checkDerivative(fun, x0, plotIt=False)
self.assertTrue(passed, True)
if __name__ == "__main__":
unittest.main()
| 29.929577
| 86
| 0.526431
| 752
| 6,375
| 4.378989
| 0.135638
| 0.052839
| 0.040085
| 0.053447
| 0.809292
| 0.769207
| 0.707258
| 0.678712
| 0.605831
| 0.603097
| 0
| 0.015559
| 0.334588
| 6,375
| 212
| 87
| 30.070755
| 0.760726
| 0
| 0
| 0.51875
| 0
| 0
| 0.053647
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.075
| false
| 0.1
| 0.04375
| 0
| 0.15
| 0.04375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
868c0429ea729cbccdce6c9a62b53e3cf75f2157
| 1,671
|
py
|
Python
|
python/phonenumbers/data/region_GG.py
|
timgates42/python-phonenumbers
|
98895826729a234acc1e27ce8e280fe7e54754ab
|
[
"Apache-2.0"
] | 1
|
2020-04-16T21:40:27.000Z
|
2020-04-16T21:40:27.000Z
|
warehouse/python-app-venv/lib/python3.6/site-packages/phonenumbers/data/region_GG.py
|
perivision/BAPPE
|
20b01b0aee6977ddc28d09b8fd667d2ec30486b6
|
[
"MIT"
] | 10
|
2020-03-24T10:47:53.000Z
|
2021-04-08T19:51:44.000Z
|
myvenv/lib/python3.6/site-packages/phonenumbers/data/region_GG.py
|
yog240597/saleor
|
b75a23827a4ec2ce91637f0afe6808c9d09da00a
|
[
"CC-BY-4.0"
] | 1
|
2020-06-19T11:49:45.000Z
|
2020-06-19T11:49:45.000Z
|
"""Auto-generated file, do not edit by hand. GG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GG = PhoneMetadata(id='GG', country_code=44, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:1481|[357-9]\\d{3})\\d{6}|8\\d{6}(?:\\d{2})?', possible_length=(7, 9, 10), possible_length_local_only=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='1481[25-9]\\d{5}', example_number='1481256789', possible_length=(10,), possible_length_local_only=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='7(?:(?:781|839)\\d|911[17])\\d{5}', example_number='7781123456', possible_length=(10,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[08]\\d{7}|800\\d{6}|8001111', example_number='8001234567', possible_length=(7, 9, 10)),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:8(?:4[2-5]|7[0-3])|9(?:[01]\\d|8[0-3]))\\d{7}|845464\\d', example_number='9012345678', possible_length=(7, 10)),
personal_number=PhoneNumberDesc(national_number_pattern='70\\d{8}', example_number='7012345678', possible_length=(10,)),
voip=PhoneNumberDesc(national_number_pattern='56\\d{8}', example_number='5612345678', possible_length=(10,)),
pager=PhoneNumberDesc(national_number_pattern='76(?:0[0-2]|2[356]|34|4[0134]|5[49]|6[0-369]|77|81|9[39])\\d{6}', example_number='7640123456', possible_length=(10,)),
uan=PhoneNumberDesc(national_number_pattern='(?:3[0347]|55)\\d{8}', example_number='5512345678', possible_length=(10,)),
national_prefix='0',
national_prefix_for_parsing='0|([25-9]\\d{5})$',
national_prefix_transform_rule='1481\\1')
| 98.294118
| 173
| 0.725913
| 243
| 1,671
| 4.757202
| 0.386831
| 0.133218
| 0.225779
| 0.280277
| 0.143599
| 0.044983
| 0
| 0
| 0
| 0
| 0
| 0.150702
| 0.062837
| 1,671
| 16
| 174
| 104.4375
| 0.587484
| 0.031718
| 0
| 0
| 1
| 0.214286
| 0.243176
| 0.1433
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86a01c904c0571d80fd3159a83e0d290815d64f8
| 25
|
py
|
Python
|
tests/test_q11.py
|
rpianna/ImageProcessing100
|
47299941edc0f6f31a31e6d943b0784f249f076b
|
[
"MIT"
] | null | null | null |
tests/test_q11.py
|
rpianna/ImageProcessing100
|
47299941edc0f6f31a31e6d943b0784f249f076b
|
[
"MIT"
] | null | null | null |
tests/test_q11.py
|
rpianna/ImageProcessing100
|
47299941edc0f6f31a31e6d943b0784f249f076b
|
[
"MIT"
] | null | null | null |
import source.q11 as q11
| 12.5
| 24
| 0.8
| 5
| 25
| 4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.16
| 25
| 1
| 25
| 25
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
86d3e2aba64bfefa22d9f1a125d67ca348e89183
| 90
|
py
|
Python
|
tests/PaxHeaders.2308045/test-stream.py
|
cuijianming/ovs-2.13.6
|
e7fdf06d6c26d280b25bdf235146ae007cd2208c
|
[
"Apache-2.0"
] | null | null | null |
tests/PaxHeaders.2308045/test-stream.py
|
cuijianming/ovs-2.13.6
|
e7fdf06d6c26d280b25bdf235146ae007cd2208c
|
[
"Apache-2.0"
] | null | null | null |
tests/PaxHeaders.2308045/test-stream.py
|
cuijianming/ovs-2.13.6
|
e7fdf06d6c26d280b25bdf235146ae007cd2208c
|
[
"Apache-2.0"
] | null | null | null |
30 mtime=1639777085.528355983
30 atime=1639777086.394355983
30 ctime=1639777126.680355983
| 22.5
| 29
| 0.866667
| 12
| 90
| 6.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.75
| 0.066667
| 90
| 3
| 30
| 30
| 0.178571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86ecc6e2794c5610929c896c700fd0fe91d00000
| 93
|
py
|
Python
|
bsff/characters/admin.py
|
ErikSeguinte/BSFF_Django
|
a4dde02b2af233bbcf0c625c20a5f2814a8ca214
|
[
"MIT"
] | null | null | null |
bsff/characters/admin.py
|
ErikSeguinte/BSFF_Django
|
a4dde02b2af233bbcf0c625c20a5f2814a8ca214
|
[
"MIT"
] | null | null | null |
bsff/characters/admin.py
|
ErikSeguinte/BSFF_Django
|
a4dde02b2af233bbcf0c625c20a5f2814a8ca214
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Character
admin.site.register(Character)
| 31
| 32
| 0.849462
| 13
| 93
| 6.076923
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 93
| 3
| 33
| 31
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86f3fdcaf6ca949203bd7da46765e6568cbfd0b4
| 6,039
|
py
|
Python
|
q2_diversity/tests/test_core_metrics.py
|
nbokulich/q2-diversity
|
b815291801352c678e59d9d138b9a10531af0a1b
|
[
"BSD-3-Clause"
] | null | null | null |
q2_diversity/tests/test_core_metrics.py
|
nbokulich/q2-diversity
|
b815291801352c678e59d9d138b9a10531af0a1b
|
[
"BSD-3-Clause"
] | null | null | null |
q2_diversity/tests/test_core_metrics.py
|
nbokulich/q2-diversity
|
b815291801352c678e59d9d138b9a10531af0a1b
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import io
import unittest
import biom
import skbio
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from qiime2.plugin.testing import TestPluginBase
from qiime2 import Artifact, Metadata
class CoreMetricsTests(TestPluginBase):
package = 'q2_diversity'
def setUp(self):
super().setUp()
self.core_metrics = self.plugin.pipelines['core_metrics']
self.core_metrics_phylogenetic = self.plugin.pipelines[
'core_metrics_phylogenetic']
def test_core_metrics_phylogenetic(self):
table = biom.Table(np.array([[0, 11, 11], [13, 11, 11]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
table = Artifact.import_data('FeatureTable[Frequency]', table)
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
tree = Artifact.import_data('Phylogeny[Rooted]', tree)
metadata = Metadata(
pd.DataFrame({'foo': ['1', '2', '3']},
index=pd.Index(['S1', 'S2', 'S3'], name='id')))
results = self.core_metrics_phylogenetic(table, tree, 13, metadata)
self.assertEqual(len(results), 17)
self.assertEqual(repr(results.bray_curtis_distance_matrix.type),
'DistanceMatrix')
self.assertEqual(repr(results.jaccard_emperor.type), 'Visualization')
# pipelines preserve the output's type, in this case, beta_phylogenetic
# returns this type, and that is passed through to the final output
# (as long as the type is a subtype of the signature).
self.assertEqual(
repr(results.faith_pd_vector.type),
"SampleData[AlphaDiversity]")
expected = pd.Series({'S1': 1, 'S2': 2, 'S3': 2},
name='observed_features')
pdt.assert_series_equal(results[2].view(pd.Series), expected)
def test_core_metrics_phylogenetic_multiple_jobs(self):
table = biom.Table(np.array([[0, 11, 11], [13, 11, 11]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
table = Artifact.import_data('FeatureTable[Frequency]', table)
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
tree = Artifact.import_data('Phylogeny[Rooted]', tree)
metadata = Metadata(
pd.DataFrame({'foo': ['1', '2', '3']},
index=pd.Index(['S1', 'S2', 'S3'], name='id')))
results = self.core_metrics_phylogenetic(table, tree, 13, metadata,
n_jobs_or_threads=2)
self.assertEqual(len(results), 17)
self.assertEqual(repr(results.bray_curtis_distance_matrix.type),
'DistanceMatrix')
self.assertEqual(repr(results.jaccard_emperor.type), 'Visualization')
# pipelines preserve the output's type, in this case, beta_phylogenetic
# returns this type, and that is passed through to the final output
# (as long as the type is a subtype of the signature).
self.assertEqual(
repr(results.faith_pd_vector.type),
"SampleData[AlphaDiversity]")
expected = pd.Series({'S1': 1, 'S2': 2, 'S3': 2},
name='observed_features')
pdt.assert_series_equal(results[2].view(pd.Series), expected)
def test_core_metrics_phylogenetic_rarefy_drops_sample(self):
table = biom.Table(np.array([[0, 11, 11], [12, 11, 11]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
table = Artifact.import_data('FeatureTable[Frequency]', table)
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
tree = Artifact.import_data('Phylogeny[Rooted]', tree)
metadata = Metadata(
pd.DataFrame({'foo': ['1', '2', '3']},
index=pd.Index(['S1', 'S2', 'S3'], name='id')))
results = self.core_metrics_phylogenetic(table, tree, 13, metadata)
self.assertEqual(len(results), 17)
expected = pd.Series({'S2': 2, 'S3': 2},
name='observed_features')
pdt.assert_series_equal(results[2].view(pd.Series), expected)
def test_core_metrics(self):
# NOTE: this test uses a table and sampling depth that produce
# deterministic values
table = biom.Table(np.array([[150, 100, 100], [50, 100, 100]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
table = Artifact.import_data('FeatureTable[Frequency]', table)
metadata = Metadata(
pd.DataFrame({'foo': ['1', '2', '3']},
index=pd.Index(['S1', 'S2', 'S3'], name='id')))
results = self.core_metrics(table=table, sampling_depth=200,
metadata=metadata)
self.assertEqual(len(results), 10)
self.assertEqual(repr(results.bray_curtis_distance_matrix.type),
'DistanceMatrix')
self.assertEqual(repr(results.jaccard_emperor.type), 'Visualization')
obs_feat_exp = pd.Series({'S1': 2, 'S2': 2, 'S3': 2},
name='observed_features')
shannon_exp = pd.Series({'S1': 0.811278124459, 'S2': 1., 'S3': 1.},
name='shannon_entropy')
pdt.assert_series_equal(results[1].view(pd.Series), obs_feat_exp)
pdt.assert_series_equal(results[2].view(pd.Series), shannon_exp)
if __name__ == '__main__':
unittest.main()
| 40.26
| 79
| 0.555556
| 688
| 6,039
| 4.748547
| 0.226744
| 0.040404
| 0.056321
| 0.063667
| 0.764004
| 0.711662
| 0.711662
| 0.703704
| 0.703704
| 0.682277
| 0
| 0.049309
| 0.281338
| 6,039
| 149
| 80
| 40.530201
| 0.703456
| 0.131479
| 0
| 0.646465
| 0
| 0.030303
| 0.125645
| 0.03232
| 0
| 0
| 0
| 0
| 0.171717
| 1
| 0.050505
| false
| 0
| 0.161616
| 0
| 0.232323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
811c8fe38a95ca33d248969b8d7063c43a9adf34
| 106
|
py
|
Python
|
FirstStepsInPython/Basics/Exercise1 First Steps in Coding/02. RadianstoDegrees.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | null | null | null |
FirstStepsInPython/Basics/Exercise1 First Steps in Coding/02. RadianstoDegrees.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | null | null | null |
FirstStepsInPython/Basics/Exercise1 First Steps in Coding/02. RadianstoDegrees.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | 1
|
2021-10-07T18:30:42.000Z
|
2021-10-07T18:30:42.000Z
|
from math import pi
from math import floor
rads = float(input())
degs = rads * 180 / pi
print(floor(degs))
| 21.2
| 22
| 0.716981
| 18
| 106
| 4.222222
| 0.611111
| 0.210526
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 0.169811
| 106
| 5
| 23
| 21.2
| 0.829545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8124b21d61f5fb8c37a70d8948ba4a3daa8536ab
| 170
|
py
|
Python
|
thebookgarden/customer_app/admin.py
|
ArsalanJamali/TheBookGarden
|
153bfa229cd897b19b9e0827acc06dbe63558174
|
[
"MIT"
] | null | null | null |
thebookgarden/customer_app/admin.py
|
ArsalanJamali/TheBookGarden
|
153bfa229cd897b19b9e0827acc06dbe63558174
|
[
"MIT"
] | null | null | null |
thebookgarden/customer_app/admin.py
|
ArsalanJamali/TheBookGarden
|
153bfa229cd897b19b9e0827acc06dbe63558174
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import AddressBook,ReviewBook
# Register your models here.
admin.site.register(AddressBook)
admin.site.register(ReviewBook)
| 28.333333
| 42
| 0.835294
| 22
| 170
| 6.454545
| 0.545455
| 0.126761
| 0.239437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 170
| 6
| 43
| 28.333333
| 0.916129
| 0.152941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
813b53b32240db8ce3198331fe0be09cec29d4bf
| 428
|
py
|
Python
|
src/omlt/neuralnet/__init__.py
|
joshuahaddad/OMLT
|
c7b9cd98561b2679a80b0dd2c147f8580d8cda1a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T10:15:57.000Z
|
2022-02-10T10:15:57.000Z
|
src/omlt/neuralnet/__init__.py
|
joshuahaddad/OMLT
|
c7b9cd98561b2679a80b0dd2c147f8580d8cda1a
|
[
"BSD-3-Clause"
] | null | null | null |
src/omlt/neuralnet/__init__.py
|
joshuahaddad/OMLT
|
c7b9cd98561b2679a80b0dd2c147f8580d8cda1a
|
[
"BSD-3-Clause"
] | null | null | null |
from omlt.neuralnet.network_definition import NetworkDefinition
from omlt.neuralnet.nn_formulation import (FullSpaceNNFormulation, ReducedSpaceNNFormulation,
FullSpaceSmoothNNFormulation, ReducedSpaceSmoothNNFormulation,
ReluBigMFormulation, ReluComplementarityFormulation,
ReluPartitionFormulation)
| 85.6
| 105
| 0.640187
| 20
| 428
| 13.6
| 0.8
| 0.058824
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.331776
| 428
| 5
| 106
| 85.6
| 0.951049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d4b278f0f34e3a893de168e2d42aba7c831633fe
| 206
|
py
|
Python
|
apps/users/jwt_handler.py
|
pedro-hs/financial-account
|
7e8e4d0f3ac888fa36a091d0e733a8e1926180d2
|
[
"MIT"
] | null | null | null |
apps/users/jwt_handler.py
|
pedro-hs/financial-account
|
7e8e4d0f3ac888fa36a091d0e733a8e1926180d2
|
[
"MIT"
] | null | null | null |
apps/users/jwt_handler.py
|
pedro-hs/financial-account
|
7e8e4d0f3ac888fa36a091d0e733a8e1926180d2
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from rest_framework_jwt.utils import jwt_payload_handler
def custom_jwt(user):
payload = jwt_payload_handler(user)
payload['role'] = user.role
return payload
| 20.6
| 56
| 0.771845
| 29
| 206
| 5.241379
| 0.551724
| 0.131579
| 0.223684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160194
| 206
| 9
| 57
| 22.888889
| 0.878613
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d4c0d7588ffcd8671b176e8a373cdf800daa7a86
| 2,941
|
py
|
Python
|
Python/maximum-white-tiles-covered-by-a-carpet.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | 4
|
2018-10-11T17:50:56.000Z
|
2018-10-11T21:16:44.000Z
|
Python/maximum-white-tiles-covered-by-a-carpet.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | null | null | null |
Python/maximum-white-tiles-covered-by-a-carpet.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | 4
|
2018-10-11T18:50:32.000Z
|
2018-10-12T00:04:09.000Z
|
# Time: O(nlogn)
# Space: O(1)
# sliding window, optimized from solution3
class Solution(object):
def maximumWhiteTiles(self, tiles, carpetLen):
"""
:type tiles: List[List[int]]
:type carpetLen: int
:rtype: int
"""
tiles.sort()
result = right = gap = 0
for left, (l, _) in enumerate(tiles):
if left-1 >= 0:
gap -= tiles[left][0]-tiles[left-1][1]-1
r = l+carpetLen-1
while right+1 < len(tiles) and r+1 >= tiles[right+1][0]:
right += 1
gap += tiles[right][0]-tiles[right-1][1]-1
result = max(result, min(tiles[right][1]-tiles[left][0]+1, carpetLen)-gap)
return result
# Time: O(nlogn)
# Space: O(1)
# sliding window, optimized from solution4
class Solution2(object):
def maximumWhiteTiles(self, tiles, carpetLen):
"""
:type tiles: List[List[int]]
:type carpetLen: int
:rtype: int
"""
tiles.sort()
result = left = gap = 0
for right in xrange(len(tiles)):
if right-1 >= 0:
gap += tiles[right][0]-tiles[right-1][1]-1
l = tiles[right][1]-carpetLen+1
while not (tiles[left][1]+1 >= l):
left += 1
gap -= tiles[left][0]-tiles[left-1][1]-1
result = max(result, min(tiles[right][1]-tiles[left][0]+1, carpetLen)-gap)
return result
# Time: O(nlogn)
# Space: O(n)
import bisect
# prefix sum, binary search
class Solution3(object):
def maximumWhiteTiles(self, tiles, carpetLen):
"""
:type tiles: List[List[int]]
:type carpetLen: int
:rtype: int
"""
tiles.sort()
prefix = [0]*(len(tiles)+1)
for i, (l, r) in enumerate(tiles):
prefix[i+1] = prefix[i]+(r-l+1)
result = 0
for left, (l, _) in enumerate(tiles):
r = l+carpetLen-1
right = bisect.bisect_right(tiles, [r+1])-1
extra = max(tiles[right][1]-r, 0)
result = max(result, (prefix[right+1]-prefix[left])-extra)
return result
# Time: O(nlogn)
# Space: O(n)
import bisect
# prefix sum, binary search
class Solution4(object):
def maximumWhiteTiles(self, tiles, carpetLen):
"""
:type tiles: List[List[int]]
:type carpetLen: int
:rtype: int
"""
tiles.sort()
prefix = [0]*(len(tiles)+1)
for i, (l, r) in enumerate(tiles):
prefix[i+1] = prefix[i]+(r-l+1)
result = 0
for right, (_, r) in enumerate(tiles):
l = r-carpetLen+1
left = bisect.bisect_right(tiles, [l])
if left-1 >= 0 and tiles[left-1][1]+1 >= l:
left -= 1
extra = max(l-tiles[left][0], 0)
result = max(result, (prefix[right+1]-prefix[left])-extra)
return result
| 29.41
| 86
| 0.514451
| 380
| 2,941
| 3.968421
| 0.139474
| 0.015915
| 0.051061
| 0.039788
| 0.757294
| 0.740053
| 0.740053
| 0.709549
| 0.709549
| 0.644562
| 0
| 0.036697
| 0.33288
| 2,941
| 99
| 87
| 29.707071
| 0.731906
| 0.16797
| 0
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4d788aa17f0eb5a85b5c24dcbd1ff2c081440ed
| 27
|
py
|
Python
|
src/network/__init__.py
|
markveillette/high-fidelity-generative-compression
|
d88b4d7f1212efa8611e91737ff6bf00bbf36670
|
[
"Apache-2.0"
] | 266
|
2020-08-25T00:04:58.000Z
|
2022-03-31T06:41:03.000Z
|
src/network/__init__.py
|
markveillette/high-fidelity-generative-compression
|
d88b4d7f1212efa8611e91737ff6bf00bbf36670
|
[
"Apache-2.0"
] | 27
|
2020-09-01T21:04:27.000Z
|
2022-03-22T02:24:48.000Z
|
src/network/__init__.py
|
markveillette/high-fidelity-generative-compression
|
d88b4d7f1212efa8611e91737ff6bf00bbf36670
|
[
"Apache-2.0"
] | 50
|
2020-08-28T02:11:46.000Z
|
2022-02-25T02:44:42.000Z
|
# Model / loss definitions
| 13.5
| 26
| 0.740741
| 3
| 27
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 1
| 27
| 27
| 0.909091
| 0.888889
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4ed381d248363ca514ca984c1f0d28baa5f066e
| 6,556
|
py
|
Python
|
my_util/font_encryption_detective/main.py
|
RecluseXU/learning_spider
|
45fa790ed7970be57a21b40817cc66856de3d99b
|
[
"MIT"
] | 38
|
2020-08-30T11:41:53.000Z
|
2022-03-23T04:30:26.000Z
|
my_util/font_encryption_detective/main.py
|
AndersonHJB/learning_spider
|
b855b7808fb5268e9564180cf73ba5b1fb133f58
|
[
"MIT"
] | 2
|
2021-08-20T16:34:12.000Z
|
2021-10-08T11:06:41.000Z
|
my_util/font_encryption_detective/main.py
|
AndersonHJB/learning_spider
|
b855b7808fb5268e9564180cf73ba5b1fb133f58
|
[
"MIT"
] | 10
|
2020-11-24T09:15:42.000Z
|
2022-02-25T06:05:16.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : main.py
@Time : 2020-8-29 21:12:52
@Author : Recluse Xu
@Version : 1.0
@Contact : 444640050@qq.com
@Desc :
'''
# here put the import lib
import matplotlib.pyplot as plt
import numpy as np
from fontTools.ttLib import TTFont
from fontTools.pens import pointPen
from io import BytesIO
import re
import base64
import tesserocr
from PIL import Image
work_folder = 'F:/Workspace/learning_spider/my_util/font_encryption_detective/'
class FontDetective(object):
def __init__(self, font):
'''
生成字体,保存字体ttf,保存字体ttx,记录cmap
'''
ttf = TTFont(BytesIO(font))
ttf.save(work_folder + '58_fangchan_secret_font.ttf')
ttf.saveXML(work_folder+'buffer_font.xml')
cmap = ttf.getBestCmap()
convert_uh2val_dict = {}
for _key, _val in cmap.items():
_key = hex(_key)
convert_uh2val_dict[_val] = _key[2:]
self.cmap = convert_uh2val_dict
print(convert_uh2val_dict)
def get_img_list(self):
with open(work_folder+'buffer_font.xml', 'r')as f:
_font = f.read()
glyphs_dict = {}
pattern = re.compile(r'<TTGlyph name="(.*?)" xMin=[\s\S]+?>([\s\S]+?)</TTGlyph>')
pattern_t = re.compile(r'<contour>([\s\S]+?)</contour>')
pattern_x = re.compile(r'<pt x="(.*?)" y=')
pattern_y = re.compile(r'y="(.*?)" on=')
for glyph in re.findall(pattern, _font):
contour_list = []
for contour in re.findall(pattern_t, glyph[1]):
x = [int(i) for i in re.findall(pattern_x, contour)]
x.append(x[0])
y = [int(i) for i in re.findall(pattern_y, contour)]
y.append(y[0])
contour_list.append({'x': x, 'y': y})
glyphs_dict[self.cmap[glyph[0]]] = contour_list
# print(glyphs_dict)
for glyph, contours in glyphs_dict.items():
fig = plt.figure()
plt.axis('off')
for contour in contours:
plt.plot(contour['x'], contour['y'], linewidth=10)
canvas = fig.canvas
buffer = BytesIO()
canvas.print_png(buffer)
data = buffer.getvalue()
buffer.close()
glyphs_dict[glyph] = data
with open(work_folder + 'img/' + glyph + '.png', 'wb')as f:
f.write(data)
plt.cla()
self.glyphs_dict = glyphs_dict
return glyphs_dict
def get_real_char_value(self):
for unicode_c, img_bin in self.glyphs_dict.items():
img = Image.open(BytesIO(img_bin))
chars = tesserocr.image_to_text(img)
print(unicode_c, chars)
def get_html_convert_dict(self):
pass
if __name__ == "__main__":
a = 'AAEAAAALAIAAAwAwR1NVQiCLJXoAAAE4AAAAVE9TLzL4XQjtAAABjAAAAFZjbWFwq8R/YwAAAhAAAAIuZ2x5ZuWIN0cAAARYAAADdGhlYWQaaNBTAAAA4AAAADZoaGVhCtADIwAAALwAAAAkaG10eC7qAAAAAAHkAAAALGxvY2ED7gSyAAAEQAAAABhtYXhwARgANgAAARgAAAAgbmFtZTd6VP8AAAfMAAACanBvc3QEQwahAAAKOAAAAEUAAQAABmb+ZgAABLEAAAAABGgAAQAAAAAAAAAAAAAAAAAAAAsAAQAAAAEAAOAN9/pfDzz1AAsIAAAAAADbcMKhAAAAANtwwqEAAP/mBGgGLgAAAAgAAgAAAAAAAAABAAAACwAqAAMAAAAAAAIAAAAKAAoAAAD/AAAAAAAAAAEAAAAKADAAPgACREZMVAAObGF0bgAaAAQAAAAAAAAAAQAAAAQAAAAAAAAAAQAAAAFsaWdhAAgAAAABAAAAAQAEAAQAAAABAAgAAQAGAAAAAQAAAAEERAGQAAUAAAUTBZkAAAEeBRMFmQAAA9cAZAIQAAACAAUDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFBmRWQAQJR2n6UGZv5mALgGZgGaAAAAAQAAAAAAAAAAAAAEsQAABLEAAASxAAAEsQAABLEAAASxAAAEsQAABLEAAASxAAAEsQAAAAAABQAAAAMAAAAsAAAABAAAAaYAAQAAAAAAoAADAAEAAAAsAAMACgAAAaYABAB0AAAAFAAQAAMABJR2lY+ZPJpLnjqeo59kn5Kfpf//AACUdpWPmTyaS546nqOfZJ+Sn6T//wAAAAAAAAAAAAAAAAAAAAAAAAABABQAFAAUABQAFAAUABQAFAAUAAAACAADAAEABwAJAAUACgACAAQABgAAAQYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAAAiAAAAAAAAAAKAACUdgAAlHYAAAAIAACVjwAAlY8AAAADAACZPAAAmTwAAAABAACaSwAAmksAAAAHAACeOgAAnjoAAAAJAACeowAAnqMAAAAFAACfZAAAn2QAAAAKAACfkgAAn5IAAAACAACfpAAAn6QAAAAEAACfpQAAn6UAAAAGAAAAAAAAACgAPgBmAJoAvgDoASQBOAF+AboAAgAA/+YEWQYnAAoAEgAAExAAISAREAAjIgATECEgERAhIFsBEAECAez+6/rs/v3IATkBNP7S/sEC6AGaAaX85v54/mEBigGB/ZcCcwKJAAABAAAAAAQ1Bi4ACQAAKQE1IREFNSURIQQ1/IgBW/6cAicBWqkEmGe0oPp7AAEAAAAABCYGJwAXAAApATUBPgE1NCYjIgc1NjMyFhUUAgcBFSEEGPxSAcK6fpSMz7y389Hym9j+nwLGqgHButl0hI2wx43iv5D+69b+pwQAAQAA/+YEGQYnACEAABMWMzI2NRAhIzUzIBE0ISIHNTYzMhYVEAUVHgEVFAAjIiePn8igu/5bgXsBdf7jo5CYy8bw/sqow/7T+tyHAQN7nYQBJqIBFP9uuVjPpf7QVwQSyZbR/wBSAAACAAAAAARoBg0ACgASAAABIxEjESE1ATMRMyERNDcjBgcBBGjGvv0uAq3jxv58BAQOLf4zAZL+bgGSfwP8/CACiUVaJlH9TwABAAD/5gQhBg0AGAAANxYzMjYQJiMiBxEhFSERNjMyBBUUACEiJ7GcqaDEx71bmgL6/bxXLPUBEv7a/v3Zbu5mswEppA4DE63+SgX42uH+6kAAAAACAAD/5gRbBicAFgAiAAABJiMiAgMzNjMyEhUUACMiABEQACEyFwEUFjMyNjU0JiMiBgP6eYTJ9AIFbvHJ8P7r1+z+8wFhASClXv1Qo4eAoJeLhKQFRj7+ov7R1f762eP+3AFxAVMBmgHjLfwBmdq8lKCytAAAAAABAAAAAARNBg0ABgAACQEjASE1IQRN/aLLAkD8+gPvBcn6NwVgrQAAAwAA/+YESgYnABUAHwApAAABJDU0JDMyFhUQBRUEERQEIyIkNRAlATQmIyIGFRQXNgEEFRQWMzI2NTQBtv7rAQTKufD+3wFT/un6zf7+AUwBnIJvaJLz+P78/uGoh4OkAy+B9avXyqD+/osEev7aweXitAEohwF7aHh9YcJlZ/7qdNhwkI9r4QAAAAACAAD/5gRGBicAFwAjAAA3FjMyEhEGJwYjIgA1NAAzMgAREAAhIicTFBYzMjY1NCYjIga5gJTQ5QICZvHD/wABGN/nAQT+sP7Xo3FxoI16pqWHfaTSSgFIAS4CAsIBDNbkASX+lf6l/lP+MjUEHJy3p3en274AAAAAABAAxgABAAAAAAABAA8AAAABAAAAAAACAAcADwABAAAAAAADAA8AFgABAAAAAAAEAA8AJQABAAAAAAAFAAsANAABAAAAAAAGAA8APwABAAAAAAAKACsATgABAAAAAAALABMAeQADAAEECQABAB4AjAADAAEECQACAA4AqgADAAEECQADAB4AuAADAAEECQAEAB4A1gADAAEECQAFABYA9AADAAEECQAGAB4BCgADAAEECQAKAFYBKAADAAEECQALACYBfmZhbmdjaGFuLXNlY3JldFJlZ3VsYXJmYW5nY2hhbi1zZWNyZXRmYW5nY2hhbi1zZWNyZXRWZXJzaW9uIDEuMGZhbmdjaGFuLXNlY3JldEdlbmVyYXRlZCBieSBzdmcydHRmIGZyb20gRm9udGVsbG8gcHJvamVjdC5odHRwOi8vZm9udGVsbG8uY29tAGYAYQBuAGcAYwBoAGEAbgAtAHMAZQBjAHIAZQB0AFIAZQBnAHUAbABhAHIAZgBhAG4AZwBjAGgAYQBuAC0AcwBlAGMAcgBlAHQAZgBhAG4AZwBjAGgAYQBuAC0AcwBlAGMAcgBlAHQAVgBlAHIAcwBpAG8AbgAgADEALgAwAGYAYQBuAGcAYwBoAGEAbgAtAHMAZQBjAHIAZQB0AEcAZQBuAGUAcgBhAHQAZQBkACAAYgB5ACAAcwB2AGcAMgB0AHQAZgAgAGYAcgBvAG0AIABGAG8AbgB0AGUAbABsAG8AIABwAHIAbwBqAGUAYwB0AC4AaAB0AHQAcAA6AC8ALwBmAG8AbgB0AGUAbABsAG8ALgBjAG8AbQAAAAIAAAAAAAD/EwB3AAAAAAAAAAAAAAAAAAAAAAAAAAAACwECAQMBBAEFAQYBBwEIAQkBCgELAQwAAAAAAAAAAAAAAAAAAAAA'
font_info = base64.b64decode(a)
f = FontDetective(font_info)
f.get_img_list()
f.get_real_char_value()
| 64.27451
| 3,594
| 0.796065
| 447
| 6,556
| 11.49217
| 0.467562
| 0.01752
| 0.013237
| 0.014016
| 0.019077
| 0.010123
| 0.010123
| 0.010123
| 0
| 0
| 0
| 0.045223
| 0.13316
| 6,556
| 101
| 3,595
| 64.910891
| 0.858701
| 0.038591
| 0
| 0
| 0
| 0.014706
| 0.61347
| 0.596393
| 0
| 1
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.014706
| 0.132353
| 0
| 0.220588
| 0.044118
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be067796a4fe52ffeaf0db3c4811a11ef8e885b9
| 95
|
py
|
Python
|
first_steps/second_module.py
|
Minkov/python-oop
|
db9651eef374c0e74c32cb6f2bf07c734cc1d051
|
[
"MIT"
] | 3
|
2021-11-16T04:52:53.000Z
|
2022-02-07T20:28:41.000Z
|
first_steps/second_module.py
|
Minkov/python-oop
|
db9651eef374c0e74c32cb6f2bf07c734cc1d051
|
[
"MIT"
] | null | null | null |
first_steps/second_module.py
|
Minkov/python-oop
|
db9651eef374c0e74c32cb6f2bf07c734cc1d051
|
[
"MIT"
] | 1
|
2021-12-07T07:04:38.000Z
|
2021-12-07T07:04:38.000Z
|
def print_info():
print('Second module')
print(f'From second_modules: {sum([1, 2, 3])}')
| 15.833333
| 47
| 0.631579
| 15
| 95
| 3.866667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 0.157895
| 95
| 5
| 48
| 19
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
076ed07bbe8f4dd11ea490f5864e70eac85a9285
| 174
|
py
|
Python
|
scripts/jsonjp.py
|
kiever/pokesprite
|
a36c8439cedea77a0d4077782d2321e8d27a8b9f
|
[
"MIT"
] | 1
|
2019-12-27T19:45:02.000Z
|
2019-12-27T19:45:02.000Z
|
scripts/jsonjp.py
|
kiever/pokesprite
|
a36c8439cedea77a0d4077782d2321e8d27a8b9f
|
[
"MIT"
] | null | null | null |
scripts/jsonjp.py
|
kiever/pokesprite
|
a36c8439cedea77a0d4077782d2321e8d27a8b9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# prints JSON-usable japanese
# example: $ ./jsonjp.py モクロー
# output: "\u30e2\u30af\u30ed\u30fc"
import json
import sys
print(json.dumps(sys.argv[1]))
| 21.75
| 36
| 0.724138
| 27
| 174
| 4.666667
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070968
| 0.109195
| 174
| 8
| 37
| 21.75
| 0.741935
| 0.643678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07963e0c9e8778275d896370143d369b42968be4
| 31,300
|
py
|
Python
|
managers/tests.py
|
naderm/farnsworth
|
f7a635a82eae20ca395a939966bfa1e296d4e3a2
|
[
"BSD-2-Clause"
] | null | null | null |
managers/tests.py
|
naderm/farnsworth
|
f7a635a82eae20ca395a939966bfa1e296d4e3a2
|
[
"BSD-2-Clause"
] | null | null | null |
managers/tests.py
|
naderm/farnsworth
|
f7a635a82eae20ca395a939966bfa1e296d4e3a2
|
[
"BSD-2-Clause"
] | null | null | null |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.timezone import now
from base.models import UserProfile, ProfileRequest
from utils.funcs import convert_to_url
from utils.variables import ANONYMOUS_USERNAME, MESSAGES
from managers.cron import ExpireRequestsCronJob
from managers.models import Manager, RequestType, Request, Response, Announcement
class TestPermissions(TestCase):
def setUp(self):
self.u = User.objects.create_user(username="u", password="pwd")
self.st = User.objects.create_user(username="st", password="pwd")
self.pu = User.objects.create_user(username="pu", password="pwd")
self.su = User.objects.create_user(username="su", password="pwd")
self.np = User.objects.create_user(username="np", password="pwd")
self.st.is_staff = True
self.su.is_staff, self.su.is_superuser = True, True
self.u.save()
self.st.save()
self.pu.save()
self.su.save()
self.np.save()
self.m = Manager.objects.create(
title="House President",
incumbent=UserProfile.objects.get(user=self.pu),
president=True,
)
self.rt = RequestType.objects.create(
name="Food",
)
self.rt.managers = [self.m]
self.rt.save()
self.a = Announcement.objects.create(
manager=self.m,
incumbent=self.m.incumbent,
body="Test Announcement Body",
post_date=datetime.now(),
)
self.request = Request.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="request body", request_type=self.rt,
)
UserProfile.objects.get(user=self.np).delete()
self.pr = ProfileRequest.objects.create(
username="pr",
email="pr@email.com",
affiliation=UserProfile.STATUS_CHOICES[0][0],
)
def _admin_required(self, url, success_target=None):
response = self.client.get(url)
login = reverse("login")
self.assertRedirects(response, login + "?next=" + url)
self.client.login(username="np", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("external"))
self.assertContains(response, MESSAGES["NO_PROFILE"])
self.client.logout()
self.client.login(username="u", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(response, MESSAGES["ADMINS_ONLY"])
self.client.logout()
self.client.login(username="st", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(response, MESSAGES["ADMINS_ONLY"])
self.client.logout()
self.client.login(username="su", password="pwd")
response = self.client.get(url)
if success_target is None:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, success_target)
self.client.logout()
def _president_admin_required(self, url, success_target=None):
response = self.client.get(url)
login = reverse("login")
self.assertRedirects(response, login + "?next=" + url)
self.client.login(username="np", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("external"))
self.assertContains(response, MESSAGES["NO_PROFILE"])
self.client.logout()
self.client.login(username="u", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(response, MESSAGES["PRESIDENTS_ONLY"])
self.client.logout()
self.client.login(username="st", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(response, MESSAGES["PRESIDENTS_ONLY"])
self.client.logout()
self.client.login(username="su", password="pwd")
response = self.client.get(url)
if success_target is None:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, success_target)
self.client.logout()
self.client.login(username="pu", password="pwd")
response = self.client.get(url)
if success_target is None:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, success_target)
self.client.logout()
def _profile_required(self, url, success_target=None):
response = self.client.get(url)
login = reverse("login")
self.assertRedirects(response, login + "?next=" + url)
self.client.login(username="np", password="pwd")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("external"))
self.assertContains(response, MESSAGES["NO_PROFILE"])
self.client.logout()
self.client.login(username="u", password="pwd")
response = self.client.get(url, follow=True)
if success_target is None:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, success_target)
self.client.logout()
self.client.login(username="st", password="pwd")
response = self.client.get(url, follow=True)
if success_target is None:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, success_target)
self.client.logout()
self.client.login(username="su", password="pwd")
response = self.client.get(url, follow=True)
if success_target is None:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, success_target)
self.client.logout()
def test_admin_required(self):
pages = [
reverse("manage_profile_requests"),
reverse("modify_profile_request", kwargs={"request_pk": self.pr.pk}),
reverse("custom_manage_users"),
reverse("custom_modify_user", kwargs={"targetUsername": self.u.username}),
reverse("custom_add_user"),
reverse("utilities"),
]
for page in pages:
self._admin_required(page)
self._admin_required(reverse("managers:anonymous_login"),
success_target=reverse("homepage"))
utilities = reverse("utilities")
self._admin_required(reverse("recount"), success_target=utilities)
self._admin_required(reverse("managers:end_anonymous_session"),
success_target=utilities)
def test_president_admin_required(self):
pages = [
reverse("managers:meta_manager"),
reverse("managers:edit_manager", kwargs={"managerTitle": self.m.url_title}),
reverse("managers:add_manager"),
reverse("managers:manage_request_types"),
reverse("managers:edit_request_type", kwargs={"typeName": self.rt.url_name}),
reverse("managers:add_request_type"),
]
for page in pages:
self._president_admin_required(page)
def test_profile_required(self):
pages = [
reverse("managers:list_managers"),
reverse("managers:view_manager", kwargs={"managerTitle": self.m.url_title}),
reverse("managers:list_user_requests", kwargs={"targetUsername": self.u.username}),
reverse("managers:requests", kwargs={"requestType": self.rt.url_name}),
reverse("managers:all_requests"),
reverse("managers:list_all_requests", kwargs={"requestType": self.rt.url_name}),
reverse("managers:my_requests"),
reverse("managers:view_request", kwargs={"request_pk": self.request.pk}),
reverse("managers:announcements"),
reverse("managers:view_announcement", kwargs={"announcement_pk": self.a.pk}),
reverse("managers:edit_announcement", kwargs={"announcement_pk": self.a.pk}),
reverse("managers:all_announcements"),
]
for page in pages:
self._profile_required(page)
class TestAnonymousUser(TestCase):
def setUp(self):
self.u = User.objects.create_user(username="u", password="pwd")
self.su = User.objects.create_user(username="su", password="pwd")
self.su.is_staff, self.su.is_superuser = True, True
self.su.save()
self.client.login(username="su", password="pwd")
def test_anonymous_start(self):
response = self.client.get(reverse("homepage"))
self.assertNotContains(
response,
MESSAGES["ANONYMOUS_LOGIN"],
)
url = reverse("managers:anonymous_login")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(
response,
MESSAGES["ANONYMOUS_LOGIN"],
)
def test_anonymous_end(self):
url = reverse("managers:anonymous_login")
self.client.get(url, follow=True)
self.client.login(username="su", password="pwd")
url = reverse("managers:end_anonymous_session")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("utilities"))
self.assertContains(response, MESSAGES["ANONYMOUS_SESSION_ENDED"])
self.assertNotContains(
response,
MESSAGES["ANONYMOUS_LOGIN"],
)
def test_anonymous_profile(self):
# Failing before anonymous user is first logged in
url = reverse("member_profile", kwargs={"targetUsername": ANONYMOUS_USERNAME})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
url = reverse("managers:anonymous_login")
self.client.get(url)
url = reverse("member_profile", kwargs={"targetUsername": ANONYMOUS_USERNAME})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Anonymous Coward")
url = reverse("my_profile")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(response, MESSAGES['SPINELESS'])
def test_anonymous_edit_profile(self):
# Failing before anonymous user is first logged in
url = reverse("custom_modify_user", kwargs={"targetUsername": ANONYMOUS_USERNAME})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
url = reverse("managers:anonymous_login")
self.client.get(url)
url = reverse("logout")
self.client.get(url, follow=True)
self.client.login(username="su", password="pwd")
url = reverse("custom_modify_user", kwargs={"targetUsername": ANONYMOUS_USERNAME})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Anonymous")
self.assertContains(response, "Coward")
self.assertContains(response, MESSAGES['ANONYMOUS_EDIT'])
def test_anonymous_logout(self):
url = reverse("managers:anonymous_login")
self.client.get(url)
url = reverse("logout")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertContains(response, MESSAGES['ANONYMOUS_DENIED'])
def test_anonymous_user_login_logout(self):
url = reverse("managers:anonymous_login")
self.client.get(url, follow=True)
# Need to be careful here, client.login and client.logout clear the
# session cookies, causing this test to break
url = reverse("login")
response = self.client.post(url, {
"username_or_email": "u",
"password": "pwd",
}, follow=True)
self.assertRedirects(response, reverse("homepage"))
self.assertNotContains(
response,
MESSAGES["ANONYMOUS_LOGIN"],
)
url = reverse("logout")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
response = self.client.get(reverse("homepage"))
self.assertEqual(response.status_code, 200)
class TestRequestPages(TestCase):
def setUp(self):
self.u = User.objects.create_user(username="u", password="pwd")
self.pu = User.objects.create_user(username="pu", password="pwd")
self.u.save()
self.pu.save()
self.m = Manager.objects.create(
title="House President",
president=True,
incumbent=UserProfile.objects.get(user=self.pu)
)
self.rt = RequestType.objects.create(
name="Food",
)
self.rt.managers = [self.m]
self.rt.save()
self.request = Request.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="Request Body",
request_type=self.rt,
)
self.response = Response.objects.create(
owner=UserProfile.objects.get(user=self.pu),
body="Response Body",
request=self.request,
manager=True,
)
def test_cron(self):
expired_time = now() - timedelta(hours=settings.REQUEST_EXPIRATION_HOURS + 24)
exp_req_1 = Request.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="New Request",
request_type=self.rt,
)
exp_req_2 = Request.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="New Request",
request_type=self.rt,
)
pres_req_2 = Request.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="New Request",
request_type=self.rt,
)
Request.objects.all().update(change_date=expired_time)
pres_req_1 = Request.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="New Request",
request_type=self.rt,
)
pr2_res = Response.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="New Response",
request=pres_req_2,
action=Response.REOPENED,
)
for r in [exp_req_1, exp_req_2, pres_req_1, pres_req_2]:
self.assertEqual(
Request.OPEN,
Request.objects.get(pk=r.pk).status,
)
ExpireRequestsCronJob().do()
for r in [exp_req_1, exp_req_2]:
self.assertEqual(
Request.EXPIRED,
Request.objects.get(pk=r.pk).status,
)
for r in [pres_req_1, pres_req_2]:
self.assertEqual(
Request.OPEN,
Request.objects.get(pk=r.pk).status,
)
Response.objects.create(
owner=UserProfile.objects.get(user=self.u),
body="New Response",
request=exp_req_1,
action=Response.REOPENED,
)
exp_req_1.status = Request.OPEN
exp_req_1.save()
Request.objects.filter(pk=exp_req_1.pk).update(change_date=expired_time)
ExpireRequestsCronJob().do()
self.assertEqual(
Request.OPEN,
Request.objects.get(pk=exp_req_1.pk).status,
)
def test_request_form(self):
urls = [
reverse("managers:view_request", kwargs={"request_pk": self.request.pk}),
reverse("managers:requests", kwargs={"requestType": self.rt.url_name}),
]
self.client.login(username="u", password="pwd")
for url in urls + [reverse("managers:my_requests")]:
response = self.client.get(url)
self.assertContains(response, "Request Body")
self.assertContains(response, "Response Body")
self.assertNotContains(response, "Status of this request.")
self.client.logout()
self.client.login(username="pu", password="pwd")
for url in urls:
response = self.client.get(url)
self.assertContains(response, "Request Body")
self.assertContains(response, "Response Body")
response = self.client.get(reverse("managers:my_requests"))
self.assertNotContains(response, "Request Body")
self.assertNotContains(response, "Response Body")
self.assertNotContains(response, "Status of this request.")
class TestManager(TestCase):
def setUp(self):
self.su = User.objects.create_user(username="su", password="pwd")
self.su.is_staff, self.su.is_superuser = True, True
self.su.save()
self.m1 = Manager.objects.create(
title="setUp Manager",
incumbent=UserProfile.objects.get(user=self.su),
)
self.m2 = Manager.objects.create(
title="Testing Manager",
incumbent=UserProfile.objects.get(user=self.su),
)
self.client.login(username="su", password="pwd")
def test_add_manager(self):
url = reverse("managers:add_manager")
response = self.client.post(url, {
"title": "Test Manager",
"incumbent": "1",
"compensation": "Test % Compensation",
"duties": "Testing Add Managers Page",
"email": "tester@email.com",
"president": False,
"workshift_manager": False,
"active": True,
"semester_hours": 5,
"summer_hours": 5,
"update_manager": "",
}, follow=True)
self.assertRedirects(response, url)
self.assertContains(
response,
MESSAGES['MANAGER_ADDED'].format(managerTitle="Test Manager"),
)
self.assertEqual(1, Manager.objects.filter(title="Test Manager").count())
self.assertEqual(1, Manager.objects.filter(url_title=convert_to_url("Test Manager")).count())
def test_duplicate_title(self):
url = reverse("managers:add_manager")
response = self.client.post(url, {
"title": self.m1.title,
"incumbent": "1",
"compensation": "Test % Compensation",
"duties": "Testing Add Managers Page",
"email": "tester@email.com",
"president": False,
"workshift_manager": False,
"active": True,
"update_manager": "",
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A manager with this title already exists.")
def test_duplicate_url_title(self):
url = reverse("managers:add_manager")
response = self.client.post(url, {
"title": "SETUP MANAGER",
"incumbent": "1",
"compensation": "Test % Compensation",
"duties": "Testing Add Managers Page",
"email": "tester@email.com",
"president": False,
"workshift_manager": False,
"active": True,
"update_manager": "",
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This manager title maps to a url that is already taken. Please note, "Site Admin" and "sITe_adMIN" map to the same URL.'.replace('"', """))
def test_edit_manager(self):
new_title = "New setUp Manager"
url = reverse("managers:edit_manager", kwargs={"managerTitle": self.m1.url_title})
response = self.client.post(url, {
"title": new_title,
"incumbent": self.m1.incumbent.pk,
"compensation": "Test % Compensation",
"duties": "Testing Add Managers Page",
"email": "tester@email.com",
"president": False,
"workshift_manager": False,
"active": True,
"semester_hours": 5,
"summer_hours": 5,
"update_manager": "",
}, follow=True)
self.assertRedirects(response, reverse("managers:meta_manager"))
self.assertContains(
response,
MESSAGES['MANAGER_SAVED'].format(managerTitle=new_title),
)
self.assertEqual(1, Manager.objects.filter(title=new_title).count())
self.assertEqual(1, Manager.objects.filter(url_title=convert_to_url(new_title)).count())
def test_edit_title(self):
url = reverse("managers:edit_manager", kwargs={"managerTitle": self.m1.url_title})
response = self.client.post(url, {
"title": self.m2.title,
"incumbent": self.m2.incumbent.pk,
"compensation": "Test % Compensation",
"duties": "Testing Add Managers Page",
"email": "tester@email.com",
"president": False,
"workshift_manager": False,
"active": True,
"update_manager": "",
}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A manager with this title already exists.")
def test_edit_url_title(self):
url = reverse("managers:edit_manager", kwargs={"managerTitle": self.m1.url_title})
response = self.client.post(url, {
"title": self.m2.url_title.upper(),
"incumbent": self.m2.incumbent.pk,
"compensation": "Test % Compensation",
"duties": "Testing Add Managers Page",
"email": "tester@email.com",
"president": False,
"workshift_manager": False,
"active": True,
"update_manager": "",
}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This manager title maps to a url that is already taken. Please note, "Site Admin" and "sITe_adMIN" map to the same URL.'.replace('"', """))
class TestRequestTypes(TestCase):
def setUp(self):
self.su = User.objects.create_user(username="su", password="pwd")
self.su.is_staff, self.su.is_superuser = True, True
self.su.save()
self.m1 = Manager.objects.create(
title="setUp Manager",
incumbent=UserProfile.objects.get(user=self.su),
)
self.m2 = Manager.objects.create(
title="Testing Manager",
incumbent=UserProfile.objects.get(user=self.su),
)
self.rt = RequestType.objects.create(
name="Super",
)
self.rt.managers = [self.m1, self.m2]
self.rt.save()
self.rt2 = RequestType.objects.create(
name="Duper",
)
self.client.login(username="su", password="pwd")
def test_manage_view(self):
url = reverse("managers:manage_request_types")
response = self.client.get(url)
self.assertContains(response, self.rt.name)
self.assertContains(response, self.rt.url_name)
self.assertContains(response, self.m1.title)
self.assertContains(response, self.m1.url_title)
self.assertContains(response, self.m2.title)
self.assertContains(response, self.m2.url_title)
def test_add_request(self):
name = "Cleanliness"
url = reverse("managers:add_request_type")
response = self.client.post(url, {
"name": name,
"managers": [self.m1.pk, self.m2.pk],
}, follow=True)
self.assertRedirects(response, reverse("managers:manage_request_types"))
self.assertContains(response,
MESSAGES['REQUEST_TYPE_ADDED'].format(typeName=name))
rt = RequestType.objects.get(name=name)
self.assertIn(self.m1, rt.managers.all())
self.assertIn(self.m2, rt.managers.all())
def test_edit_request(self):
url = reverse("managers:edit_request_type", kwargs={"typeName": self.rt.url_name})
response = self.client.post(url, {
"name": "New Name",
"managers": [self.m2.pk],
"enabled": False,
})
self.assertRedirects(response, reverse("managers:manage_request_types"))
rt = RequestType.objects.get(pk=self.rt.pk)
self.assertNotIn(self.m1, rt.managers.all())
self.assertIn(self.m2, rt.managers.all())
self.assertEqual(rt.enabled, False)
self.assertEqual(rt.name, "New Name")
self.assertEqual(rt.url_name, "new_name")
def test_add_duplicate_name(self):
url = reverse("managers:add_request_type")
response = self.client.post(url, {
"name": self.rt.name,
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A request type with this name already exists.")
def test_edit_duplicate_name(self):
url = reverse("managers:edit_request_type", kwargs={"typeName": self.rt2.url_name})
response = self.client.post(url, {
"name": self.rt.name,
"managers": [self.m2.pk],
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A request type with this name already exists.")
def test_add_duplicate_url_name(self):
url = reverse("managers:add_request_type")
response = self.client.post(url, {
"name": self.rt.name.upper(),
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This request type name maps to a url that is already taken. Please note, "Waste Reduction" and "wasTE_RedUCtiON" map to the same URL.'.replace('"', """))
def test_edit_duplicate_url_name(self):
url = reverse("managers:edit_request_type", kwargs={"typeName": self.rt2.url_name})
response = self.client.post(url, {
"name": self.rt.name.upper(),
"managers": [self.m2.pk],
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This request type name maps to a url that is already taken. Please note, "Waste Reduction" and "wasTE_RedUCtiON" map to the same URL.'.replace('"', """))
class TestAnnouncements(TestCase):
def setUp(self):
self.u = User.objects.create_user(username="u", password="pwd")
self.ou = User.objects.create_user(username="ou", password="pwd")
self.su = User.objects.create_user(username="su", password="pwd")
self.su.is_staff, self.su.is_superuser = True, True
self.su.save()
self.m = Manager.objects.create(
title="setUp Manager",
incumbent=UserProfile.objects.get(user=self.u),
)
self.a = Announcement.objects.create(
manager=self.m,
incumbent=self.m.incumbent,
body="Test Announcement Body",
post_date=datetime.now(),
)
self.client.login(username="u", password="pwd")
def test_announcements(self):
url = reverse("managers:announcements")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.a.body)
def test_individual(self):
url = reverse("managers:view_announcement", kwargs={"announcement_pk": self.a.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.a.body)
def test_edit_announcement(self):
url = reverse("managers:edit_announcement", kwargs={"announcement_pk": self.a.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.a.body)
new_body = "New Test Announcement Body"
response = self.client.post(url, {
"body": new_body,
"manager": self.a.manager.pk,
}, follow=True)
url = reverse("managers:view_announcement", kwargs={"announcement_pk": self.a.pk})
self.assertRedirects(response, url)
self.assertContains(response, new_body)
self.assertEqual(new_body, Announcement.objects.get(pk=self.a.pk).body)
@override_settings(ANNOUNCEMENT_LIFE=0)
def test_unpin(self):
self.a.pinned = True
self.a.save()
url = reverse("managers:announcements")
response = self.client.post(url, {
"unpin-{0}-pk": True,
}, follow=True)
self.assertRedirects(response, url)
self.assertNotContains(response, self.a.body)
def test_no_edit(self):
self.client.logout()
self.client.login(username="ou", password="pwd")
url = reverse("managers:edit_announcement", kwargs={"announcement_pk": self.a.pk})
response = self.client.get(url)
url = reverse("managers:view_announcement", kwargs={"announcement_pk": self.a.pk})
self.assertRedirects(response, url)
self.client.logout()
self.client.login(username="su", password="pwd")
url = reverse("managers:edit_announcement", kwargs={"announcement_pk": self.a.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(ANNOUNCEMENT_LIFE=0)
def test_unpin_individual(self):
self.a.pinned = True
self.a.save()
url = reverse("managers:view_announcement", kwargs={"announcement_pk": self.a.pk})
response = self.client.post(url, {
"pin": False,
}, follow=True)
self.assertRedirects(response, url)
self.assertContains(response, self.a.body)
response = self.client.get(reverse("managers:announcements"))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, self.a.body)
class TestPreFill(TestCase):
def test_pre_fill(self):
from farnsworth.pre_fill import main
from managers.fill import REQUESTS, MANAGERS
main(["--requests", "--managers"])
for title in [i[0] for i in MANAGERS]:
self.assertEqual(1, Manager.objects.filter(title=title).count())
for name in [i[0] for i in REQUESTS]:
self.assertEqual(1, RequestType.objects.filter(name=name).count())
def test_double_pre_fill(self):
"""
Tests that running pre-fill twice does not crash
"""
self.test_pre_fill()
self.test_pre_fill()
| 38.785626
| 198
| 0.613866
| 3,450
| 31,300
| 5.453333
| 0.075652
| 0.052089
| 0.050707
| 0.033167
| 0.816679
| 0.762464
| 0.740193
| 0.698682
| 0.678431
| 0.641916
| 0
| 0.006225
| 0.260958
| 31,300
| 806
| 199
| 38.833747
| 0.807107
| 0.013802
| 0
| 0.648889
| 0
| 0.005926
| 0.157748
| 0.040507
| 0
| 0
| 0
| 0
| 0.180741
| 1
| 0.060741
| false
| 0.056296
| 0.020741
| 0
| 0.091852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
07a31454cf4c57d76e7abe6dc3e41b59325a2b65
| 84
|
py
|
Python
|
grassformation/utils/__init__.py
|
Neosperience/GrassFormation
|
0cc0ac2399a1b8f3f815a4b28cad882e6f048e5e
|
[
"MIT"
] | 18
|
2018-11-12T15:49:02.000Z
|
2021-07-05T13:41:42.000Z
|
grassformation/utils/__init__.py
|
Neosperience/GrassFormation
|
0cc0ac2399a1b8f3f815a4b28cad882e6f048e5e
|
[
"MIT"
] | 2
|
2018-11-15T22:43:25.000Z
|
2018-12-27T12:58:41.000Z
|
grassformation/utils/__init__.py
|
Neosperience/GrassFormation
|
0cc0ac2399a1b8f3f815a4b28cad882e6f048e5e
|
[
"MIT"
] | 3
|
2018-12-27T10:22:57.000Z
|
2019-08-21T18:23:51.000Z
|
from utils.dict_utils import change_requires_update, filter_dictionary, val_to_bool
| 42
| 83
| 0.892857
| 13
| 84
| 5.307692
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 84
| 1
| 84
| 84
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07ce5a7b9c7d7542d1887e9f9a7e31736afce89c
| 187
|
py
|
Python
|
dataset.py
|
imranparuk/VGGFace2
|
0f7b166c93564735b9de3e174f86a51342ad1958
|
[
"MIT"
] | null | null | null |
dataset.py
|
imranparuk/VGGFace2
|
0f7b166c93564735b9de3e174f86a51342ad1958
|
[
"MIT"
] | null | null | null |
dataset.py
|
imranparuk/VGGFace2
|
0f7b166c93564735b9de3e174f86a51342ad1958
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
class VGGFace2(Dataset):
def __init__(self):
pass
def __len__(self):
pass
def __getitem__(self, item):
pass
| 12.466667
| 36
| 0.609626
| 22
| 187
| 4.636364
| 0.681818
| 0.156863
| 0.215686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.31016
| 187
| 14
| 37
| 13.357143
| 0.782946
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.375
| 0.125
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
07e757f4968e25955ee8b223dfbed3e91f0000e5
| 1,174
|
py
|
Python
|
test/test_whoami.py
|
duckinator/boreutils
|
3d40c11bf5e57240697adfb1bb693faf0ba82bab
|
[
"MIT"
] | 9
|
2015-11-05T04:33:50.000Z
|
2021-12-15T14:36:04.000Z
|
test/test_whoami.py
|
duckinator/boreutils
|
3d40c11bf5e57240697adfb1bb693faf0ba82bab
|
[
"MIT"
] | 26
|
2020-05-22T07:57:54.000Z
|
2021-09-06T20:18:22.000Z
|
test/test_whoami.py
|
duckinator/boreutils
|
3d40c11bf5e57240697adfb1bb693faf0ba82bab
|
[
"MIT"
] | 2
|
2020-06-01T05:14:22.000Z
|
2022-03-08T14:56:10.000Z
|
"""
Tests for `whoami`.
TODO: Determine if there's a standard for this.
"""
import os
import pytest
from helpers import check, check_version, run
def test_version():
"""Check that we're using Boreutil's implementation."""
assert check_version("whoami")
def test_missing_args():
"""Nothing to test: `whoami` doesn't require any arguments."""
pass
def test_extra_args():
"""Extra args => error of the form `whoami: ...`"""
assert run(["whoami", "owo"]).stderr.startswith("whoami: ")
assert run(["whoami", "owo"]).returncode > 0
def test_help():
"""Passing -h or --help => print help text."""
assert run(["whoami", "-h"]).stdout.split(' ')[0] == 'Usage:'
assert run(["whoami", "--help"]).stdout.split(' ')[0] == 'Usage:'
assert run(["whoami", "-h"]).returncode > 0
assert run(["whoami", "--help"]).returncode > 0
@pytest.mark.skipif(os.environ.get("CI", False) == "true", reason="Fails in CI due to needing a tty.")
def test_main():
"""No args => user name associated with the current effective user ID."""
assert check(["whoami"]).stdout == os.getlogin() + "\n"
assert len(check(["whoami"]).stderr) == 0
| 28.634146
| 102
| 0.626065
| 159
| 1,174
| 4.566038
| 0.515723
| 0.07438
| 0.123967
| 0.057851
| 0.15427
| 0.088154
| 0.088154
| 0
| 0
| 0
| 0
| 0.006186
| 0.173765
| 1,174
| 40
| 103
| 29.35
| 0.742268
| 0.28109
| 0
| 0
| 0
| 0
| 0.171393
| 0
| 0
| 0
| 0
| 0.025
| 0.473684
| 1
| 0.263158
| true
| 0.052632
| 0.157895
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
07f97444b99a0e549ba7bf93f00437847180c827
| 301
|
py
|
Python
|
10/03/2/call.py
|
pylangstudy/201706
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
[
"CC0-1.0"
] | null | null | null |
10/03/2/call.py
|
pylangstudy/201706
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
[
"CC0-1.0"
] | 70
|
2017-06-01T11:02:51.000Z
|
2017-06-30T00:35:32.000Z
|
10/03/2/call.py
|
pylangstudy/201706
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
[
"CC0-1.0"
] | null | null | null |
#from package1.package11 import *
#from package1 import *
#from package1.package11.module1 import *
from package1.package11 import module2
print(dir())
#import package.module2
module2.some_method()
#package11.module2.some_method()
#package1.package11.module2.some_method()
#import package.module1
| 23.153846
| 41
| 0.800664
| 37
| 301
| 6.432432
| 0.297297
| 0.201681
| 0.264706
| 0.226891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080586
| 0.093023
| 301
| 12
| 42
| 25.083333
| 0.791209
| 0.700997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ed48201d0d0e63d6809775b7693f0b82e310cb4d
| 287
|
py
|
Python
|
bin/run_tests.py
|
eerkela/CurveFit
|
4c25a196860cc62900618ea01dd59b903edbfc8d
|
[
"MIT"
] | null | null | null |
bin/run_tests.py
|
eerkela/CurveFit
|
4c25a196860cc62900618ea01dd59b903edbfc8d
|
[
"MIT"
] | null | null | null |
bin/run_tests.py
|
eerkela/CurveFit
|
4c25a196860cc62900618ea01dd59b903edbfc8d
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import sys
import unittest
sys.path.append(str(Path(__file__).resolve().parent.parent))
from curvefit.test.callback_test import *
# from curvefit.test.color_test import *
# from curvefit.test.text_test import *
if __name__ == "__main__":
unittest.main()
| 20.5
| 60
| 0.763066
| 40
| 287
| 5.1
| 0.475
| 0.176471
| 0.235294
| 0.215686
| 0.254902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125436
| 287
| 13
| 61
| 22.076923
| 0.812749
| 0.264808
| 0
| 0
| 0
| 0
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed8021ca7accbfff66c07cbe8c00350a8c58ea56
| 156
|
py
|
Python
|
bauh/core/snap/constants.py
|
Acidburn0zzz/bauh
|
aa7113b11d3bd4aa7b35d140280992362352636d
|
[
"Zlib"
] | null | null | null |
bauh/core/snap/constants.py
|
Acidburn0zzz/bauh
|
aa7113b11d3bd4aa7b35d140280992362352636d
|
[
"Zlib"
] | null | null | null |
bauh/core/snap/constants.py
|
Acidburn0zzz/bauh
|
aa7113b11d3bd4aa7b35d140280992362352636d
|
[
"Zlib"
] | null | null | null |
from bauh.core.constants import CACHE_PATH
SNAP_API_URL = 'https://search.apps.ubuntu.com/api/v1'
SNAP_CACHE_PATH = '{}/snap/installed'.format(CACHE_PATH)
| 31.2
| 56
| 0.782051
| 25
| 156
| 4.64
| 0.72
| 0.232759
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006897
| 0.070513
| 156
| 4
| 57
| 39
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9c37ca671043ad1df944ee7c4cf8ed9b9fcbb6bd
| 24
|
py
|
Python
|
data/studio21_generated/introductory/3043/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3043/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3043/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
def print_nums(*args):
| 12
| 22
| 0.708333
| 4
| 24
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 2
| 23
| 12
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9c5a10b8da4461cb207beab077b33d17f8262eb0
| 38,488
|
py
|
Python
|
src/test_main.py
|
ExpectoAprobarum/BackendSV
|
58d9e11558c0eca6aabb347cd1f00c7039b7cb2b
|
[
"CC0-1.0"
] | null | null | null |
src/test_main.py
|
ExpectoAprobarum/BackendSV
|
58d9e11558c0eca6aabb347cd1f00c7039b7cb2b
|
[
"CC0-1.0"
] | null | null | null |
src/test_main.py
|
ExpectoAprobarum/BackendSV
|
58d9e11558c0eca6aabb347cd1f00c7039b7cb2b
|
[
"CC0-1.0"
] | null | null | null |
from fastapi.testclient import TestClient
from .main import app
import pytest
from src.models import Game, Board, User, Player, Message
from pony.orm import db_session, commit
import datetime
client = TestClient(app)
pytest.users = {
1: {"username": "andres", "useralias": "andres", "email": "a@gmail.com", "password": "12345"},
2: {"username": "andres2", "useralias": "andres2", "email": "a2@gmail.com", "password": "12345"},
3: {"username": "andres3", "useralias": "andres3", "email": "a3@gmail.com", "password": "12345"},
4: {"username": "andres4", "useralias": "andres4", "email": "a4@gmail.com", "password": "12345"},
5: {"username": "andres5", "useralias": "andres5", "email": "a5@gmail.com", "password": "12345"}
}
pytest.info = {}
def test_create_user():
for i,u in enumerate(pytest.users.values()):
with db_session:
response = client.post(
"/users/", headers={},
json={"username": u['username'],
"useralias": u["useralias"], "email": u['email'],
"password": "12345", "frontURL":"ded"}
)
with db_session:
for i,u in enumerate(pytest.users.values()):
user = User.get(email=u["email"])
user.verified = True
u["user_id"] = int(user.id)
response = client.post(
"/users/", headers={},
json={"username": "andres", "useralias": "andres", "email": "a@a.com", "password": "12345","frontURL":"ded"}
)
assert response.status_code == 400
assert response.json() == {"detail": "Username already registered"}
response = client.post(
"/users/", headers={},
json={"username": "andres345", "useralias": "andres234", "email": "a@gmail.com", "password": "12345","frontURL":"ded"}
)
assert response.status_code == 400
assert response.json() == {"detail": "Email already registered"}
def test_auth():
for i,u in enumerate(pytest.users.values()):
response = client.post("/auth/token", headers={},
json={"username": u["username"], "password": "12345"}
)
assert response.status_code == 200
rjson = response.json()
assert rjson['token_type'] == 'bearer'
u["token"] = rjson['access_token']
def test_create_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
response = client.post("/games/", headers=headers,
json={"name":"Partida 1", "player_amount": 5})
with db_session:
game = Game.get(created_by=pytest.users[1]["user_id"])
assert response.status_code == 200
assert response.json() == {'id': game.id, 'message': 'Game created successfully'}
pytest.info["game"] = game.id
def test_join_game():
for i,u in enumerate(list(pytest.users.values())[1:]):
headers = {
'Authorization': 'Bearer ' + u["token"],
'Content-Type': 'text/plain'
}
response = client.post(f"/games/{pytest.info['game']}/join", headers=headers, json={})
assert response.status_code == 200
assert response.json() == {"message": 'joined successfully'}
response = client.post("/games/100/join", headers=headers, json={})
assert response.status_code == 404
assert response.json() == {"detail": 'Game not found'}
response = client.post(f"/games/{pytest.info['game']}/join", headers=headers, json={})
assert response.status_code == 403
assert response.json() == {"detail": 'The game is full'}
def test_get_games():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.get(
"/games/", headers=headers,
json={}
)
with db_session:
def parseGame(game):
game_dict = game.to_dict()
players = game.players.count()
game_dict["joined_players"] = players
return game_dict
creation_date = str(Game.get(id=pytest.info['game']).creation_date).replace(" ","T")
games = Game.select()[:]
result = {'data': [parseGame(g) for g in games if not g.started]}
for g in result["data"]:
g["creation_date"] = str(g["creation_date"]).replace(" ","T")
assert response.status_code == 200
assert response.json() == result
def test_exit_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.post("/games/100/exit", headers=headers)
assert response.status_code == 404
assert response.json() == {"detail": 'Game not found'}
response = client.post(f"/games/{pytest.info['game']}/exit", headers=headers)
assert response.status_code == 200
assert response.json() == {"message": 'game left successfully'}
response = client.post(f"/games/{pytest.info['game']}/join", headers=headers, json={})
assert response.status_code == 200
assert response.json() == {"message": 'joined successfully'}
pytest.users[6] = pytest.users[2]
def test_start_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.post(f"/games/{pytest.info['game']}/start", headers=headers)
assert response.status_code == 403
assert response.json() == {
'detail': "The game does not belong to the current user"
}
headers['Authorization'] = 'Bearer ' + pytest.users[1]["token"]
response = client.get(f"/games/{pytest.info['game']}/board", headers=headers)
assert response.status_code == 400
assert response.json() == {'detail': "Game is not started"}
response = client.post(f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id":"2"})
assert response.status_code == 400
assert response.json() == {'detail': "Game is not started"}
response = client.get(f"/games/{pytest.info['game']}/deck", headers=headers)
assert response.status_code == 400
assert response.json() == {'detail': "Game is not started"}
response = client.post(f"/games/{pytest.info['game']}/start", headers=headers)
assert response.status_code == 200
with db_session:
board_id = Game.get(id=pytest.info['game']).board.id
assert response.json() == {
'board_id': board_id,
'message': 'Game started successfully'
}
headers['Authorization'] = 'Bearer ' + pytest.users[2]["token"]
response = client.post(f"/games/{pytest.info['game']}/exit", headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": 'The Game is already started'}
response = client.post("/games/100/start", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "The game does not exist"}
response = client.post(f"/games/{pytest.info['game']}/start", headers=headers)
assert response.status_code == 400
assert response.json() == {'detail': "The game was already started"}
def test_get_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.get(f"/games/{pytest.info['game']}", headers=headers)
with db_session:
game = Game.get(id=pytest.info['game'])
creation_date = str(game.creation_date).replace(" ","T")
minister = int(game.status["minister"])
created_by = int(game.created_by)
board = game.board.id
assert response.status_code == 200
assert response.json() == {
"id": pytest.info['game'],
"name": "Partida 1",
"creation_date": creation_date,
"created_by": created_by,
"player_amount": 5,
"started": True,
"status": {
"minister": minister,
"phase": "propose",
"round": 1
},
"board": board}
response = client.get("/games/100", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': 'Game not found'}
def test_players_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.get(f"/games/{pytest.info['game']}/players", headers=headers)
assert response.status_code == 200
with db_session:
player1 = Player.select(
lambda p: p.user.username == pytest.users[1]["username"] and
p.game.id == pytest.info['game']).first()
pytest.users[1]["player_id"] = player1.id
player2 = Player.select(
lambda p: p.user.username == pytest.users[2]["username"] and
p.game.id == pytest.info['game']).first()
pytest.users[2]["player_id"] = player2.id
player3 = Player.select(
lambda p: p.user.username == pytest.users[3]["username"] and
p.game.id == pytest.info['game']).first()
pytest.users[3]["player_id"] = player3.id
player4 = Player.select(
lambda p: p.user.username == pytest.users[4]["username"] and
p.game.id == pytest.info['game']).first()
pytest.users[4]["player_id"] = player4.id
player5 = Player.select(
lambda p: p.user.username == pytest.users[5]["username"] and
p.game.id == pytest.info['game']).first()
pytest.users[5]["player_id"] = player5.id
assert response.json() == {
"data": [
{
"id": player1.id,
"choosable": True,
"current_position": player1.current_position,
"game": pytest.info['game'],
"role": player1.role,
"is_voldemort": player1.is_voldemort,
"alive": True,
"user": {
"id": pytest.users[1]["user_id"],
"useralias": "andres",
"username": "andres",
'verified': True
},
},
{
"id": player3.id,
"choosable": True,
"current_position": player3.current_position,
"game": pytest.info['game'],
"role": player3.role,
"is_voldemort": player3.is_voldemort,
"alive": True,
"user": {
"id": pytest.users[3]["user_id"],
"useralias": "andres3",
"username": "andres3",
'verified': True
},
},
{
"id": player4.id,
"choosable": True,
"current_position": player4.current_position,
"game": pytest.info['game'],
"role": player4.role,
"is_voldemort": player4.is_voldemort,
"alive": True,
"user": {
"id": pytest.users[4]["user_id"],
"useralias": "andres4",
"username": "andres4",
"verified": True
},
},
{
"id": player5.id,
"choosable": True,
"current_position": player5.current_position,
"game": pytest.info['game'],
"role": player5.role,
"is_voldemort": player5.is_voldemort,
"alive": True,
"user": {
"id": pytest.users[5]["user_id"],
"useralias": "andres5",
"username": "andres5",
"verified": True
},
},
{
"id": player2.id,
"choosable": True,
"current_position": player2.current_position,
"game": pytest.info['game'],
"role": player2.role,
"is_voldemort": player2.is_voldemort,
"alive": True,
"user": {
"id": pytest.users[2]["user_id"],
"useralias": "andres2",
"username": "andres2",
"verified": True
}
}
]
}
response = client.get("/games/100/players", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "Game not found"}
def test_status_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
with db_session:
minister = int(Game.get(id=pytest.info['game']).status["minister"])
response = client.get(f"/games/{pytest.info['game']}/status", headers=headers)
assert response.status_code == 200
assert response.json() == {
"minister": minister,
"phase": "propose",
"round": 1
}
response = client.get("/games/100/players", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "Game not found"}
def test_me_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
response = client.get(f"/games/{pytest.info['game']}/me", headers=headers)
with db_session:
player = Player.get(id=pytest.users[1]["player_id"])
current_position = player.current_position
role = player.role
voldemort = player.is_voldemort
assert response.status_code == 200
assert response.json() == {
"id": pytest.users[1]["player_id"],
"choosable": True,
"current_position": current_position,
"role": role,
"is_voldemort": voldemort,
"alive": True,
"user": pytest.users[1]["user_id"],
"game": pytest.info['game']
}
response = client.get("/games/100/me", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "The game does not exist"}
def test_board_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
response = client.get(f"/games/{pytest.info['game']}/board", headers=headers)
with db_session:
current_position = Player.get(id=pytest.users[1]["player_id"]).current_position
game = Game.get(id=pytest.info['game'])
board = game.board.id
minister = int(game.status["minister"])
assert response.status_code == 200
assert response.json() == {
"id": board,
"de_proc": 0,
"po_proc": 0,
"spell_fields": [
"",
"",
"divination",
"avadakedavra",
"avadakedavra",
"win"
],
"caos": 0,
"game": pytest.info['game']
}
response = client.get("/games/100/board", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': 'Game not found'}
def test_deck_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
with db_session:
deck = Game.get(id=pytest.info['game']).board.deck
response = client.get(f"/games/{pytest.info['game']}/deck", headers=headers)
assert response.status_code == 200
assert response.json() == deck
response = client.get("/games/100/deck", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "Game not found"}
def test_choosehm_game():
with db_session:
game = Game.get(id=pytest.info['game'])
minister = game.status["minister"]
game.status["phase"] = "x"
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
response = client.post("/games/100/choosehm", headers=headers,
json={'id':'2'})
assert response.status_code == 404
assert response.json() == {'detail': "Game not found"}
response = client.post(f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={'id':'2'})
assert response.status_code == 400
assert response.json() == {
'detail': "The headmaster only can be elected in the propose phase"}
with db_session:
game = Game.get(id=pytest.info['game'])
game.status["phase"] = "propose"
for i in pytest.users.keys():
if pytest.users[i]["player_id"] != minister:
acc = i
break
headers['Authorization'] = 'Bearer ' + pytest.users[acc]["token"]
response = client.post(f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={'id':'2'})
assert response.status_code == 400
assert response.json() == {
'detail': "Only the minister can propose a headmaster"}
for i in pytest.users.keys():
if pytest.users[i]["player_id"] == minister:
user_minister = i
break
headers['Authorization'] = 'Bearer ' + pytest.users[user_minister]["token"]
response = client.post(
f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id": "300"}
)
assert response.status_code == 400
assert response.json() == {
'detail': "The selected player does not exist"}
response = client.post(
f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id": str(minister)}
)
assert response.status_code == 400
assert response.json() == {
'detail': "The minister can not be the headmaster"}
with db_session:
other_guy = Player.get(id=pytest.users[acc]["player_id"])
other_guy.choosable = False
response = client.post(
f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id": str(pytest.users[acc]["player_id"])}
)
assert response.status_code == 400
assert response.json() == {
'detail': "The player has been headmaster in the previous round"}
with db_session:
user = User.get(id=pytest.users[user_minister]["user_id"])
new_game = Game(name="x", created_by=pytest.users[acc]["user_id"], started=False,
creation_date=datetime.datetime.now(),
player_amount=5, status={})
new_player = Player(choosable=True, current_position='', role='', is_voldemort=False, alive=True,
user=user)
new_game.players.add(new_player)
other_guy = Player.get(id=pytest.users[acc]["player_id"])
other_guy.choosable = True
other_guy.alive = False
commit()
pytest.info['other_game'] = new_game.id
pytest.info['other_player'] = new_player.id
response = client.post(
f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id": str(new_player.id)}
)
assert response.status_code == 400
assert response.json() == {
'detail': "The player does not belong to this game"}
response = client.post(
f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id": str(other_guy.id)}
)
assert response.status_code == 400
assert response.json() == {
'detail': "The player cannot be headmaster because is dead"}
with db_session:
other_guy = Player.get(id=pytest.users[acc]["player_id"])
other_guy.alive = True
username = other_guy.user.username
response = client.post(
f"/games/{pytest.info['game']}/choosehm", headers=headers,
json={"id": str(other_guy.id)}
)
assert response.status_code == 200
assert response.json() == {
"message": f"The player number {other_guy.id}: {username} was proposed as headmaster"
}
def test_vote_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
response = client.post("/games/100/vote", headers=headers,
json={"vote":"true"})
assert response.status_code == 404
assert response.json() == {'detail': "Game not found"}
with db_session:
game = Game.get(id=pytest.info['game'])
game.status["phase"] = "x"
response = client.post(f"/games/{pytest.info['game']}/vote", headers=headers,
json={"vote":"true"})
assert response.status_code == 400
assert response.json() == {'detail': "It is not the vote phase"}
with db_session:
game = Game.get(id=pytest.info['game'])
game.status["phase"] = "vote"
response = client.post(f"/games/{pytest.info['game']}/vote", headers=headers,
json={"vote": "true"})
assert response.status_code == 200
assert response.json() == {
"vote": f"Player: {pytest.users[1]['player_id']} (andres) successfully voted",
"election": "election in progress"}
response = client.post(f"/games/{pytest.info['game']}/vote", headers=headers,
json={"vote": "true"})
assert response.status_code == 400
assert response.json() == {
"detail": "This player already voted"}
votes = {0 : "false", 1: "true"}
for i in list(pytest.users.keys())[1:-2]:
headers['Authorization'] = 'Bearer ' + pytest.users[i]["token"]
response = client.post(
f"/games/{pytest.info['game']}/vote", headers=headers,
json={"vote": f"{votes[i%2]}"}
)
assert response.status_code == 200
assert response.json() == {
"vote": f"Player: {pytest.users[i]['player_id']} ({pytest.users[i]['username']}) successfully voted",
"election": "election in progress"}
with db_session:
old_status = Game.get(id=pytest.info['game']).status.copy()
headers['Authorization'] = 'Bearer ' + pytest.users[5]["token"]
Game.get(id=pytest.info['game']).status = old_status
response = client.post(
f"/games/{pytest.info['game']}/vote", headers=headers,
json={"vote": "false"}
)
assert response.status_code == 200
assert response.json() == {
"vote": f"Player: {pytest.users[5]['player_id']} ({pytest.users[5]['username']}) successfully voted",
"election": "election failed"}
game = Game.get(id=pytest.info['game'])
game.board.caos -= 1
Player.get(id=game.status['minister']).current_position = ""
game.status = old_status
Player.get(id=old_status['minister']).current_position = "minister"
Player.get(id=int(old_status['headmaster'])).current_position = "headmaster"
response = client.post(
f"/games/{pytest.info['game']}/vote", headers=headers,
json={"vote": "true"}
)
assert response.status_code == 200
assert response.json() == {
"vote": f"Player: {pytest.users[5]['player_id']} ({pytest.users[5]['username']}) successfully voted",
"election": "election succeed"}
def test_get_proclamations_game():
with db_session:
game = Game.get(id=pytest.info["game"])
minister = game.status['minister']
headmaster = int(game.status['headmaster'])
for i in pytest.users.keys():
if pytest.users[i]["player_id"] != minister and pytest.users[i]["player_id"] != headmaster:
acc = i
break
headers = {
'Authorization': 'Bearer ' + pytest.users[acc]["token"],
'Content-Type': 'text/plain'
}
game.status["phase"] = "x"
response = client.get(f"/games/{pytest.info['game']}/proclamations", headers=headers)
assert response.status_code == 400
assert response.json() == {'detail': "It is not a phase for geting a proclamation"}
game.status["phase"] = "minister play"
response = client.get(f"/games/{pytest.info['game']}/proclamations", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "This player is not the minister"}
for i in pytest.users.keys():
if pytest.users[i]["player_id"] == minister:
user_minister = i
break
headers['Authorization'] = 'Bearer ' + pytest.users[user_minister]["token"]
response = client.get(f"/games/{pytest.info['game']}/proclamations", headers=headers)
assert response.status_code == 200
assert response.json() == {"data": game.board.deck.split(',')[:3]}
game.status["phase"] = "headmaster play"
response = client.get(f"/games/{pytest.info['game']}/proclamations", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': "This player is not the headmaster"}
for i in pytest.users.keys():
if pytest.users[i]["player_id"] == headmaster:
user_headmaster = i
break
headers['Authorization'] = 'Bearer ' + pytest.users[user_headmaster]["token"]
response = client.get(f"/games/{pytest.info['game']}/proclamations", headers=headers)
assert response.status_code == 200
assert response.json() == {"data": game.board.deck.split(',')[:2]}
def test_post_game_proclamations():
with db_session:
game = Game.get(id=pytest.info['game'])
minister = game.status['minister']
headmaster = int(game.status['headmaster'])
for i in pytest.users.keys():
if pytest.users[i]["player_id"] != minister and pytest.users[i]["player_id"] != headmaster:
acc = i
break
headers = {
'Authorization': 'Bearer ' + pytest.users[acc]["token"],
'Content-Type': 'text/plain'
}
response = client.post("/games/100/proclamations", headers=headers,
json={"card":""})
assert response.status_code == 404
assert response.json() == {'detail': "Game not found"}
game.status["phase"] = "x"
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":""})
assert response.status_code == 400
assert response.json() == {'detail': "It is not a phase for playing a proclamation"}
game.status["phase"] = "minister play"
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":""})
assert response.status_code == 404
assert response.json() == {'detail': "This player is not the minister"}
for i in pytest.users.keys():
if pytest.users[i]["player_id"] == minister:
user_minister = i
break
headers['Authorization'] = 'Bearer ' + pytest.users[user_minister]["token"]
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":""})
assert response.status_code == 400
assert response.json() == {'detail': "The input card was not one of the options"}
card = game.board.deck.split(',')[:3][0]
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":card})
assert response.status_code == 200
assert response.json() == {'message': f'{card} card discarded successfully'}
headers['Authorization'] = 'Bearer ' + pytest.users[acc]["token"]
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":""})
assert response.status_code == 404
assert response.json() == {'detail': "This player is not the headmaster"}
for i in pytest.users.keys():
if pytest.users[i]["player_id"] == headmaster:
user_headmaster = i
break
headers['Authorization'] = 'Bearer ' + pytest.users[user_headmaster]["token"]
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":"defaef"})
assert response.status_code == 400
assert response.json() == {'detail': "The input card was not one of the options"}
card = game.board.deck.split(',')[:2][0]
response = client.post(f"/games/{pytest.info['game']}/proclamations", headers=headers,
json={"card":card})
assert response.status_code == 200
assert response.json() == {'message': f'{card} card played successfully'}
def test_get_divination():
with db_session:
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
#response = client.get("/games/100/divination", headers=headers)
game = Game.get(id=pytest.info['game'])
game.status["minister"]
def test_user_get():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
response = client.get("/users/me", headers=headers)
assert response.status_code == 200
with db_session:
user = User.get(id=pytest.users[1]["user_id"])
assert response.json() == {
"id": user.id, "username": user.username,
"useralias": user.useralias, "email": user.email}
def test_users_get():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
with db_session:
users = User.select()[:]
result = {'data': [{"id": u.id, "email": u.email, "username": u.username} for u in users]}
response = client.get("/users", headers=headers)
assert response.status_code == 200
assert response.json() == result
def test_user_put():
headers = {
'Authorization': 'Bearer ' + pytest.users[1]["token"],
'Content-Type': 'text/plain'
}
j = {
"useralias": "andresito",
"oldpassword": "12345",
"newpassword": "123456"
}
response = client.put("/users/", headers=headers,
json=j)
assert response.status_code == 200
assert response.json() == ['fields modified: -useralias -password']
j.pop("useralias",None)
response = client.put("/users/", headers=headers,
json=j)
assert response.status_code == 400
assert response.json() == {'detail': "Old password dont match"}
def test_post_msg():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.post(f"/games/{pytest.info['game']}/messages",headers=headers,
json={"content":"Hola Guachin"})
assert response.status_code == 200
assert response.json() == {"detail":
"the message was recorder successfully"}
response = client.post(f"/games/1000/messages",headers=headers,
json={"content":"Hola Guachin"})
assert response.status_code == 404
assert response.json() == {'detail': 'Game not found'}
with db_session:
game = Game.get(id=pytest.info['game'])
game.started = False
response = client.post(f"/games/{pytest.info['game']}/messages",headers=headers,
json={"content":"Hola Guachin"})
assert response.status_code == 400
assert response.json() == {'detail': "Game is not started"}
game.started = True
def test_get_msg():
headers = {
'Authorization': 'Bearer ' + pytest.users[3]["token"],
'Content-Type': 'text/plain'
}
with db_session:
date = str(Message.select(lambda u: u.game.id == pytest.info['game']).first().date)
date = date.replace(" ","T")
response = client.get(f"/games/{pytest.info['game']}/messages",
headers=headers)
assert response.status_code == 200
assert response.json() == {
"data": [
{
"content": "Hola Guachin",
"date": date,
"send_by": {
"id": pytest.users[2]["user_id"],
"username": "andres2",
"useralias": "andres2"
}
}
]
}
with db_session:
game = Game.get(id=pytest.info['game'])
game.started = False
response = client.get(f"/games/{pytest.info['game']}/messages",
headers=headers)
assert response.status_code == 400
assert response.json() == {'detail': "Game is not started"}
game.started = True
def test_post_avadakedavra():
with db_session:
game = Game.get(id=pytest.info['game'])
player = Player.select(
lambda p: p.current_position == "" and p.game.id == game.id).first()
for i in pytest.users.keys():
if pytest.users[i]["user_id"] == player.user.id:
user = i
break
headers = {
'Authorization': 'Bearer ' + pytest.users[user]["token"],
'Content-Type': 'text/plain'
}
response = client.post(f"/games/{pytest.info['game']}/avadakedavra",
headers=headers,json={'id': pytest.users[2]["player_id"]})
assert response.status_code == 400
assert response.json() == {"detail": "Its not time for playing spells!"}
game.status["phase"] = "spell play"
response = client.post(f"/games/{pytest.info['game']}/avadakedavra",
headers=headers,json={'id': pytest.users[2]["player_id"]})
assert response.status_code == 400
assert response.json() == {"detail": "The avadakedavra spell is not available"}
def test_get_divination():
with db_session:
game = Game.get(id=pytest.info['game'])
player = Player.select(
lambda p: p.current_position == "" and p.game.id == game.id).first()
for i in pytest.users.keys():
if pytest.users[i]["user_id"] == player.user.id:
user = i
break
headers = {
'Authorization': 'Bearer ' + pytest.users[user]["token"],
'Content-Type': 'text/plain'
}
game.status["phase"] = "otro estado"
response = client.get(f"/games/{pytest.info['game']}/divination",
headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": "Its not time for playing spells!"}
game.status["phase"] = "spell play"
response = client.get(f"/games/{pytest.info['game']}/divination",
headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": "This player is not the minister"}
game.started = False
response = client.get(f"/games/{pytest.info['game']}/divination",
headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": "Game is not started"}
game.started = True
def test_get_crucio():
with db_session:
game = Game.get(id=pytest.info['game'])
player = Player.select(
lambda p: p.current_position == "" and p.game.id == game.id).first()
for i in pytest.users.keys():
if pytest.users[i]["user_id"] == player.user.id:
user = i
break
headers = {
'Authorization': 'Bearer ' + pytest.users[user]["token"],
'Content-Type': 'text/plain'
}
game.status["phase"] = "otro estado"
response = client.get(f"/games/{pytest.info['game']}/crucio/4000",
headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": "The victim player does not belong to this game"}
response = client.get(
f"/games/{pytest.info['game']}/crucio/{pytest.users[user]['player_id']}",
headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": "Its not time for playing spells!"}
game.status["phase"] = "spell play"
game.started = False
response = client.get(f"/games/{pytest.info['game']}/divination",
headers=headers)
assert response.status_code == 400
assert response.json() == {"detail": "Game is not started"}
game.started = True
def test_end_turn():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.post(f"/games/{pytest.info['game']}/endturn",
headers=headers,json={})
assert response.status_code == 200
assert response.json() == {"message": "Turn ended!"}
response = client.post(f"/games/1000/endturn",
headers=headers,json={})
assert response.status_code == 404
assert response.json() == {"detail": "Game not found"}
with db_session:
game = Game.get(id=pytest.info['game'])
game.started = False
response = client.post(f"/games/{pytest.info['game']}/endturn",
headers=headers,json={})
assert response.status_code == 400
assert response.json() == {'detail': "Game is not started"}
game.started = True
def test_delete_game():
headers = {
'Authorization': 'Bearer ' + pytest.users[2]["token"],
'Content-Type': 'text/plain'
}
response = client.delete("/games/100/delete", headers=headers)
assert response.status_code == 404
assert response.json() == {'detail': 'The game does not exist'}
response = client.delete(f"/games/{pytest.info['game']}/delete", headers=headers)
assert response.status_code == 403
assert response.json() == {'detail': 'The game does not belong to the current user'}
headers['Authorization'] = 'Bearer ' + pytest.users[1]["token"]
response = client.delete(f"/games/{pytest.info['game']}/delete", headers=headers)
assert response.status_code == 200
assert response.json() == {"message": f"The game {pytest.info['game']} (Partida 1) was deleted"}
with db_session:
Player.get(id=pytest.info['other_player']).delete()
Game.get(id=pytest.info['other_game']).delete()
for u in list(pytest.users.values())[:-1]:
User.get(id=u["user_id"]).delete()
| 42.155531
| 126
| 0.569528
| 4,293
| 38,488
| 5.030515
| 0.057303
| 0.106964
| 0.062882
| 0.092239
| 0.808715
| 0.770282
| 0.747453
| 0.717216
| 0.69323
| 0.661974
| 0
| 0.018057
| 0.26759
| 38,488
| 912
| 127
| 42.201754
| 0.748058
| 0.001637
| 0
| 0.595867
| 0
| 0.003444
| 0.245133
| 0.064049
| 0
| 0
| 0
| 0
| 0.190586
| 1
| 0.033295
| false
| 0.014925
| 0.006889
| 0
| 0.041332
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c747da7ded71831a4eb1f24f09bc8f8d2ef0348
| 37
|
py
|
Python
|
Week 1/src/InvalidDim.py
|
rpatel1291/SSW567
|
366b30e1374bd07deb6cdb2b63e2f0b48fa691bd
|
[
"MIT"
] | null | null | null |
Week 1/src/InvalidDim.py
|
rpatel1291/SSW567
|
366b30e1374bd07deb6cdb2b63e2f0b48fa691bd
|
[
"MIT"
] | null | null | null |
Week 1/src/InvalidDim.py
|
rpatel1291/SSW567
|
366b30e1374bd07deb6cdb2b63e2f0b48fa691bd
|
[
"MIT"
] | null | null | null |
class InvalidDim(Exception):
pass
| 18.5
| 28
| 0.756757
| 4
| 37
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 2
| 29
| 18.5
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
92c61b1c9e9bf8d566da4114e01a1057a7b6a502
| 20
|
py
|
Python
|
packages/testDir/hello.py
|
bopopescu/daily_harvest
|
7809011cb739dafd583e45b3be43c61a101178e3
|
[
"MIT"
] | 17
|
2015-04-04T05:27:15.000Z
|
2019-02-26T21:34:21.000Z
|
packages/testDir/hello.py
|
bopopescu/daily_harvest
|
7809011cb739dafd583e45b3be43c61a101178e3
|
[
"MIT"
] | 18
|
2015-05-20T10:30:44.000Z
|
2016-12-15T14:25:00.000Z
|
packages/testDir/hello.py
|
bopopescu/daily_harvest
|
7809011cb739dafd583e45b3be43c61a101178e3
|
[
"MIT"
] | 19
|
2015-03-08T22:30:10.000Z
|
2019-02-26T21:34:22.000Z
|
print "HELLO WORLD"
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1304f77e48436888b3a9caea9797e00d2fe079ba
| 57
|
py
|
Python
|
Demos/Module2/helloworld_start.py
|
bdheard/PythonEssentialsApr19
|
666ae76b8339b5d8a9f20a19c7429ab1de3a167a
|
[
"MIT"
] | null | null | null |
Demos/Module2/helloworld_start.py
|
bdheard/PythonEssentialsApr19
|
666ae76b8339b5d8a9f20a19c7429ab1de3a167a
|
[
"MIT"
] | null | null | null |
Demos/Module2/helloworld_start.py
|
bdheard/PythonEssentialsApr19
|
666ae76b8339b5d8a9f20a19c7429ab1de3a167a
|
[
"MIT"
] | null | null | null |
#
# Example file for Hello World
#
print("Hello World!!")
| 14.25
| 30
| 0.684211
| 8
| 57
| 4.875
| 0.75
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 57
| 4
| 31
| 14.25
| 0.8125
| 0.491228
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
130cd5b11fc55d645d33d1cb889471cd026b80c3
| 610
|
py
|
Python
|
hello.py
|
kbalisnomo/cgi-lab
|
0bd11ef99c5dff4fafb95c72ba12746728548d5c
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
kbalisnomo/cgi-lab
|
0bd11ef99c5dff4fafb95c72ba12746728548d5c
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
kbalisnomo/cgi-lab
|
0bd11ef99c5dff4fafb95c72ba12746728548d5c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import json
#PRINT OUT ALL ENV VARIABLES AS PLAIN TEXT
# print("Content-Type: text/plain") #let browser know to expect plain text
# print()
# print(os.environ)
#PRINT ENV VARIABLES AS JSON
# print("Content-Type: application/json") #let browser know to expect json
# print()
# print(json.dumps(dict(os.environ), indent=2)) #print w/ nice formatting
#PRINT QUERY PARAMETER DATA IN HTML
print("Content-Type: text/html") #let browser know to expect html
print()
print(f"<p>QUERY_STRING={os.environ['QUERY_STRING']}</p><p>HTTP_USER_AGENT={os.environ['HTTP_USER_AGENT']}</p>")
| 27.727273
| 112
| 0.734426
| 99
| 610
| 4.464646
| 0.414141
| 0.081448
| 0.108597
| 0.108597
| 0.149321
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003724
| 0.119672
| 610
| 22
| 112
| 27.727273
| 0.819367
| 0.660656
| 0
| 0
| 0
| 0.2
| 0.64433
| 0.525773
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0.6
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
1336f507f7516b061105d04c5d3f1c2788c9cf62
| 6,447
|
py
|
Python
|
tests/components/nuheat/test_climate.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/nuheat/test_climate.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/nuheat/test_climate.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""The test for the NuHeat thermostat module."""
from datetime import timedelta
from unittest.mock import patch
from openpeerpower.components.nuheat.const import DOMAIN
from openpeerpower.const import ATTR_ENTITY_ID
import openpeerpower.util.dt as dt_util
from .mocks import (
MOCK_CONFIG_ENTRY,
_get_mock_nuheat,
_get_mock_thermostat_run,
_get_mock_thermostat_schedule_hold_available,
_get_mock_thermostat_schedule_hold_unavailable,
_get_mock_thermostat_schedule_temporary_hold,
)
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_climate_thermostat_run(opp):
"""Test a thermostat with the schedule running."""
mock_thermostat = _get_mock_thermostat_run()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"openpeerpower.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG_ENTRY)
config_entry.add_to_opp(opp)
assert await opp.config_entries.async_setup(config_entry.entry_id)
await opp.async_block_till_done()
state = opp.states.get("climate.master_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 22.2,
"friendly_name": "Master bathroom",
"hvac_action": "heating",
"hvac_modes": ["auto", "heat"],
"max_temp": 69.4,
"min_temp": 5.0,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 22.2,
}
# Only test for a subset of attributes in case
# OPP changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_unavailable(opp):
"""Test a thermostat with the schedule hold that is offline."""
mock_thermostat = _get_mock_thermostat_schedule_hold_unavailable()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"openpeerpower.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG_ENTRY)
config_entry.add_to_opp(opp)
assert await opp.config_entries.async_setup(config_entry.entry_id)
await opp.async_block_till_done()
state = opp.states.get("climate.guest_bathroom")
assert state.state == "unavailable"
expected_attributes = {
"friendly_name": "Guest bathroom",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
}
# Only test for a subset of attributes in case
# OPP changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_available(opp):
"""Test a thermostat with the schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_hold_available()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"openpeerpower.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG_ENTRY)
config_entry.add_to_opp(opp)
assert await opp.config_entries.async_setup(config_entry.entry_id)
await opp.async_block_till_done()
state = opp.states.get("climate.available_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 38.9,
"friendly_name": "Available bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 26.1,
}
# Only test for a subset of attributes in case
# OPP changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_temporary_hold(opp):
"""Test a thermostat with the temporary schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_temporary_hold()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"openpeerpower.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG_ENTRY)
config_entry.add_to_opp(opp)
assert await opp.config_entries.async_setup(config_entry.entry_id)
await opp.async_block_till_done()
state = opp.states.get("climate.temp_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 94.4,
"friendly_name": "Temp bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -0.6,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 37.2,
}
# Only test for a subset of attributes in case
# OPP changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
await opp.services.async_call(
"climate",
"set_temperature",
service_data={ATTR_ENTITY_ID: "climate.temp_bathroom", "temperature": 90},
blocking=True,
)
await opp.async_block_till_done()
# opportunistic set
state = opp.states.get("climate.temp_bathroom")
assert state.attributes["preset_mode"] == "Temporary Hold"
assert state.attributes["temperature"] == 50.0
# and the api poll returns it to the mock
async_fire_time_changed(opp, dt_util.utcnow() + timedelta(seconds=3))
await opp.async_block_till_done()
state = opp.states.get("climate.temp_bathroom")
assert state.attributes["preset_mode"] == "Run Schedule"
assert state.attributes["temperature"] == 37.2
| 37.923529
| 88
| 0.696913
| 807
| 6,447
| 5.287485
| 0.164808
| 0.043825
| 0.02742
| 0.035154
| 0.787438
| 0.773377
| 0.720881
| 0.702133
| 0.702133
| 0.661589
| 0
| 0.01126
| 0.201024
| 6,447
| 169
| 89
| 38.147929
| 0.817123
| 0.07647
| 0
| 0.573643
| 0
| 0
| 0.210119
| 0.058855
| 0
| 0
| 0
| 0
| 0.124031
| 1
| 0
| false
| 0
| 0.054264
| 0
| 0.054264
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13645b37693f695bb9a6a7023a68992cdc511282
| 78
|
py
|
Python
|
insta.py
|
shubhamg0sai/Facebook_group_hack
|
b5efba4713d274adf94ca136062b85d297fdf2ed
|
[
"MIT"
] | 3
|
2021-04-27T03:09:13.000Z
|
2021-11-30T13:41:10.000Z
|
insta.py
|
shubhamggosai/Facebook_group_hack
|
b5efba4713d274adf94ca136062b85d297fdf2ed
|
[
"MIT"
] | 1
|
2021-09-02T22:45:41.000Z
|
2021-09-03T14:23:57.000Z
|
insta.py
|
shubhamggosai/Facebook_group_hack
|
b5efba4713d274adf94ca136062b85d297fdf2ed
|
[
"MIT"
] | 1
|
2022-01-24T18:53:04.000Z
|
2022-01-24T18:53:04.000Z
|
import os
os.system('xdg-open https://www.instagram.com/shubhamg0sai/?hl=en')
| 26
| 67
| 0.75641
| 13
| 78
| 4.538462
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.051282
| 78
| 2
| 68
| 39
| 0.783784
| 0
| 0
| 0
| 0
| 0
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1364d7a68a77e2d95eca9fb3e89524c8a095ab73
| 522
|
py
|
Python
|
python/masonite/config/providers.py
|
NikolasMelui/web-frameworks
|
2356efb3ff4047c99172cd8fccd210d390dc17c0
|
[
"MIT"
] | 1
|
2021-12-28T11:38:21.000Z
|
2021-12-28T11:38:21.000Z
|
python/masonite/config/providers.py
|
NikolasMelui/web-frameworks
|
2356efb3ff4047c99172cd8fccd210d390dc17c0
|
[
"MIT"
] | null | null | null |
python/masonite/config/providers.py
|
NikolasMelui/web-frameworks
|
2356efb3ff4047c99172cd8fccd210d390dc17c0
|
[
"MIT"
] | null | null | null |
from masonite.providers import (
RouteProvider,
FrameworkProvider,
ViewProvider,
ExceptionProvider,
SessionProvider,
QueueProvider,
StorageProvider,
AuthenticationProvider,
AuthorizationProvider,
ORMProvider,
EventProvider
)
PROVIDERS = [
FrameworkProvider,
RouteProvider,
ViewProvider,
EventProvider,
ExceptionProvider,
SessionProvider,
QueueProvider,
StorageProvider,
AuthenticationProvider,
AuthorizationProvider,
ORMProvider,
]
| 18
| 32
| 0.716475
| 27
| 522
| 13.851852
| 0.555556
| 0.171123
| 0.240642
| 0.320856
| 0.609626
| 0.609626
| 0.609626
| 0
| 0
| 0
| 0
| 0
| 0.231801
| 522
| 28
| 33
| 18.642857
| 0.932668
| 0
| 0
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
139150f9071e2dde1e44ef5e40819af7534a8dc6
| 97
|
py
|
Python
|
keras_efficient_attention/__init__.py
|
alex4321/keras-efficient-attention
|
73644a06d1004097c41195ffff2d239a6142b181
|
[
"MIT"
] | 1
|
2020-03-28T01:03:33.000Z
|
2020-03-28T01:03:33.000Z
|
keras_efficient_attention/__init__.py
|
alex4321/keras-efficient-attention
|
73644a06d1004097c41195ffff2d239a6142b181
|
[
"MIT"
] | null | null | null |
keras_efficient_attention/__init__.py
|
alex4321/keras-efficient-attention
|
73644a06d1004097c41195ffff2d239a6142b181
|
[
"MIT"
] | null | null | null |
from .efficient_attention import EfficientAttention, SCALING_NORMALIZATION, SOFTMAX_NORMALIZATION
| 97
| 97
| 0.917526
| 9
| 97
| 9.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051546
| 97
| 1
| 97
| 97
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13d4c6d1bd2d91478b61f6070723838b7851d475
| 133
|
py
|
Python
|
AthleticTeam/AdministrationApp/admin.py
|
Temeteron/Athletic-Team
|
cd4407ac2a7dd543d1120f5a55908fbe2e49c263
|
[
"MIT"
] | 1
|
2017-04-26T00:08:20.000Z
|
2017-04-26T00:08:20.000Z
|
AthleticTeam/AdministrationApp/admin.py
|
Temeteron/Athletic-Team
|
cd4407ac2a7dd543d1120f5a55908fbe2e49c263
|
[
"MIT"
] | null | null | null |
AthleticTeam/AdministrationApp/admin.py
|
Temeteron/Athletic-Team
|
cd4407ac2a7dd543d1120f5a55908fbe2e49c263
|
[
"MIT"
] | 2
|
2016-05-31T21:14:32.000Z
|
2021-09-14T18:59:15.000Z
|
from django.contrib import admin
from .models import Administration
# Register your models here.
admin.site.register(Administration)
| 26.6
| 35
| 0.834586
| 17
| 133
| 6.529412
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 133
| 5
| 35
| 26.6
| 0.932773
| 0.195489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13e0f650d0b09d19545c4590c86d7f2207ec65b7
| 100
|
py
|
Python
|
source/Flask-AWSCognito/flask_awscognito/exceptions.py
|
Frankovich73/tag-tamer-deployment-test
|
8267ff32bcae8de6b23d8566b719e00dc4879890
|
[
"Apache-2.0"
] | 15
|
2021-06-27T23:42:37.000Z
|
2021-09-24T19:40:00.000Z
|
source/Flask-AWSCognito/flask_awscognito/exceptions.py
|
Frankovich73/tag-tamer-deployment-test
|
8267ff32bcae8de6b23d8566b719e00dc4879890
|
[
"Apache-2.0"
] | 7
|
2021-07-05T06:56:46.000Z
|
2021-08-06T00:59:36.000Z
|
source/Flask-AWSCognito/flask_awscognito/exceptions.py
|
Frankovich73/tag-tamer-deployment-test
|
8267ff32bcae8de6b23d8566b719e00dc4879890
|
[
"Apache-2.0"
] | 5
|
2021-06-23T17:59:01.000Z
|
2021-10-20T14:22:44.000Z
|
class FlaskAWSCognitoError(Exception):
pass
class TokenVerifyError(Exception):
pass
| 14.285714
| 39
| 0.72
| 8
| 100
| 9
| 0.625
| 0.361111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22
| 100
| 6
| 40
| 16.666667
| 0.923077
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
13f92abcd6ca8adc5b7111e398afcbc0deb11d6e
| 111
|
py
|
Python
|
endpoints/history.py
|
ben-oxley/santa-shares-server
|
c6bb79c82f2988fd7041d1db63d89e6549c65a2a
|
[
"MIT"
] | null | null | null |
endpoints/history.py
|
ben-oxley/santa-shares-server
|
c6bb79c82f2988fd7041d1db63d89e6549c65a2a
|
[
"MIT"
] | 1
|
2019-12-13T23:06:50.000Z
|
2019-12-13T23:06:50.000Z
|
endpoints/history.py
|
ben-oxley/santa-shares-server
|
c6bb79c82f2988fd7041d1db63d89e6549c65a2a
|
[
"MIT"
] | 3
|
2019-12-13T22:47:00.000Z
|
2019-12-22T11:42:29.000Z
|
from models import UserLog
from flask_restful import Resource, fields, marshal_with
from endpoints import api
| 22.2
| 56
| 0.846847
| 16
| 111
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 111
| 4
| 57
| 27.75
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b915d0a41df9de1220b1dfee888b6b54cd82bbde
| 136
|
py
|
Python
|
docs/examples/userguide/sharing_declarations/setup.py
|
smok-serwis/cython
|
e551a3a348888bd89d4aad809916709a634af1fb
|
[
"Apache-2.0"
] | 2
|
2020-01-29T08:20:22.000Z
|
2020-01-29T08:20:25.000Z
|
docs/examples/userguide/sharing_declarations/setup.py
|
smok-serwis/cython
|
e551a3a348888bd89d4aad809916709a634af1fb
|
[
"Apache-2.0"
] | 1
|
2019-09-21T19:58:10.000Z
|
2019-09-21T19:58:10.000Z
|
docs/examples/userguide/sharing_declarations/setup.py
|
smok-serwis/cython
|
e551a3a348888bd89d4aad809916709a634af1fb
|
[
"Apache-2.0"
] | 1
|
2019-10-06T10:49:39.000Z
|
2019-10-06T10:49:39.000Z
|
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules=cythonize(["landscaping.pyx", "shrubbing.pyx"]))
| 27.2
| 66
| 0.801471
| 18
| 136
| 6
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080882
| 136
| 4
| 67
| 34
| 0.864
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b919fb94ae4cf47d67fb1a0ade12c1f4f68dfe44
| 301
|
py
|
Python
|
python/anyascii/_data/_300.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_300.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_300.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b=' Song Dang Fu Xian Xian Yu Di Tui Lu Si Zan Chuang Bi Du'
| 301
| 301
| 0.142857
| 15
| 301
| 2.866667
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.847176
| 301
| 1
| 301
| 301
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0.983444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b920c952208b1a52435bafb4ae345f9a7c0f9db0
| 67
|
py
|
Python
|
pet/__init__.py
|
Fikou/orangescogs
|
d230815760dfd40bafcbf9870606c9cb28c6fbb5
|
[
"MIT"
] | 1
|
2021-08-24T21:37:27.000Z
|
2021-08-24T21:37:27.000Z
|
pet/__init__.py
|
Fikou/orangescogs
|
d230815760dfd40bafcbf9870606c9cb28c6fbb5
|
[
"MIT"
] | 13
|
2020-08-06T10:14:42.000Z
|
2022-02-03T21:12:09.000Z
|
pet/__init__.py
|
Fikou/orangescogs
|
d230815760dfd40bafcbf9870606c9cb28c6fbb5
|
[
"MIT"
] | 11
|
2020-08-10T01:19:21.000Z
|
2021-10-11T11:45:08.000Z
|
from .pets import Pets
def setup(bot):
bot.add_cog(Pets(bot))
| 13.4
| 26
| 0.686567
| 12
| 67
| 3.75
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 4
| 27
| 16.75
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9331ae08231e826b8f8f0c8f3141ea799d0f5ce
| 3,341
|
py
|
Python
|
LeafNATS/tools/newsroom_process/extract_spacy.py
|
haophancs/TREQS
|
49e354ce2a08cf963ec139d99936020e0f80ced8
|
[
"MIT"
] | 149
|
2019-08-09T17:18:18.000Z
|
2022-03-28T01:18:56.000Z
|
LeafNATS/tools/newsroom_process/extract_spacy.py
|
haophancs/TREQS
|
49e354ce2a08cf963ec139d99936020e0f80ced8
|
[
"MIT"
] | 5
|
2019-08-14T18:23:24.000Z
|
2021-10-03T20:17:28.000Z
|
LeafNATS/tools/newsroom_process/extract_spacy.py
|
haophancs/TREQS
|
49e354ce2a08cf963ec139d99936020e0f80ced8
|
[
"MIT"
] | 32
|
2019-08-10T02:09:44.000Z
|
2022-03-09T07:59:46.000Z
|
import re
import spacy
import time
nlp = spacy.load('en', disable=['tagger', 'ner'])
from newsroom import jsonl
from multiprocessing import Pool
def process_data(input_):
article = input_['text']
summary = input_['summary']
title = input_['title']
if article == None or summary == None or title == None:
return ''
article = nlp(article)
summary = nlp(summary)
title = nlp(title)
sen_arr = []
for sen in article.sents:
sen = [k.text for k in sen if '\n' not in k.text]
sen = ['<s>']+sen+['</s>']
sen = ' '.join(sen)
sen_arr.append(sen)
article = ' '.join(sen_arr)
sen_arr = []
for sen in summary.sents:
sen = [k.text for k in sen if '\n' not in k.text]
sen = ['<s>']+sen+['</s>']
sen = ' '.join(sen)
sen_arr.append(sen)
summary = ' '.join(sen_arr)
sen_arr = []
for sen in title.sents:
sen = [k.text for k in sen if '\n' not in k.text]
sen = ['<s>']+sen+['</s>']
sen = ' '.join(sen)
sen_arr.append(sen)
title = ' '.join(sen_arr)
sen_arr = [title, summary, article]
return '<sec>'.join(sen_arr)
fout = open('plain_data/test.txt', 'w')
fp = jsonl.open('extract_data/test.data', gzip=True)
cnt = 0
batcher = []
start = time.time()
end = time.time()
for line in fp:
cnt += 1
print(cnt, end-start)
batcher.append(line)
if len(batcher) == 64:
pool = Pool(processes=16)
result = pool.map(process_data, batcher)
pool.terminate()
for itm in result:
if len(itm) > 1:
fout.write(itm+'\n')
batcher = []
end = time.time()
if len(batcher) > 0:
pool = Pool(processes=16)
result = pool.map(process_data, batcher)
pool.terminate()
for itm in result:
if len(itm) > 1:
fout.write(itm+'\n')
batcher = []
fp.close()
fout.close()
fout = open('plain_data/dev.txt', 'w')
fp = jsonl.open('extract_data/dev.data', gzip=True)
cnt = 0
batcher = []
for line in fp:
cnt += 1
print(cnt, end-start)
batcher.append(line)
if len(batcher) == 64:
pool = Pool(processes=16)
result = pool.map(process_data, batcher)
pool.terminate()
for itm in result:
if len(itm) > 1:
fout.write(itm+'\n')
batcher = []
end = time.time()
if len(batcher) > 0:
pool = Pool(processes=16)
result = pool.map(process_data, batcher)
pool.terminate()
for itm in result:
if len(itm) > 1:
fout.write(itm+'\n')
batcher = []
fp.close()
fout.close()
fout = open('plain_data/train.txt', 'w')
fp = jsonl.open('extract_data/train.data', gzip=True)
cnt = 0
batcher = []
for line in fp:
cnt += 1
print(cnt, end-start)
batcher.append(line)
if len(batcher) == 64:
pool = Pool(processes=16)
result = pool.map(process_data, batcher)
pool.terminate()
for itm in result:
if len(itm) > 1:
fout.write(itm+'\n')
batcher = []
end = time.time()
if len(batcher) > 0:
pool = Pool(processes=16)
result = pool.map(process_data, batcher)
pool.terminate()
for itm in result:
if len(itm) > 1:
fout.write(itm+'\n')
batcher = []
fp.close()
fout.close()
| 25.899225
| 59
| 0.551931
| 465
| 3,341
| 3.905376
| 0.148387
| 0.03304
| 0.023128
| 0.062775
| 0.773678
| 0.757159
| 0.744493
| 0.701542
| 0.67511
| 0.67511
| 0
| 0.013936
| 0.29123
| 3,341
| 129
| 60
| 25.899225
| 0.752956
| 0
| 0
| 0.752066
| 0
| 0
| 0.060742
| 0.019749
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008264
| false
| 0
| 0.041322
| 0
| 0.066116
| 0.024793
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b967813f1c8c55e532c48b864ff55ca604119e5e
| 5,448
|
py
|
Python
|
authors/apps/comments/tests/test_comments.py
|
andela/Ah-backend-valkyrie
|
f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/comments/tests/test_comments.py
|
andela/Ah-backend-valkyrie
|
f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79
|
[
"BSD-3-Clause"
] | 46
|
2019-01-08T13:16:41.000Z
|
2021-04-30T20:47:08.000Z
|
authors/apps/comments/tests/test_comments.py
|
andela/Ah-backend-valkyrie
|
f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79
|
[
"BSD-3-Clause"
] | 3
|
2019-01-07T08:21:59.000Z
|
2019-09-20T06:43:18.000Z
|
from authors.apps.authentication.tests.base import BaseTestMethods
from rest_framework import status
from django.urls import reverse
class TestComments(BaseTestMethods):
def post_article(self):
post_article_url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user_token())
article = self.client.post(
post_article_url, data=self.article, format='json')
return article.data
def test_posting_a_comment(self):
# user posting a comment.
article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
response = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment, format='json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual('This is a test comment.',
response.data['comment']['body'])
def test_posting_a_comment_twice(self):
# user posting a comment twice.
article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/", data=self.comment, format='json')
response = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/", data=self.comment, format='json')
self.assertEqual(
response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(
"You can't give the same comment twice on the same article",
response.data['message'])
def test_deleting_comment(self):
# deleting a comment
article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
response = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment, format='json'
)
response2 = self.client.delete(
"/api/v1/articles/" + article['slug'] + "/comments/"
+ str(response.data['comment']['id']), format='json'
)
self.assertEqual(
response2.status_code, status.HTTP_204_NO_CONTENT)
def test_updating_a_comment(self):
# user updating a comment.
article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
response = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment, format='json'
)
response2 = self.client.put(
"/api/v1/articles/" + article['slug'] + "/comments/"
+ str(response.data['comment']['id']),
data=self.comment2, format='json'
)
self.assertEqual(response2.status_code, status.HTTP_200_OK)
self.assertEqual(
"Comment updated successfully", response2.data['message'])
def test_viewing_single_comment(self):
# user updating a comment.
article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
response = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment, format='json')
response3 = self.client.get(
"/api/v1/articles/" + article['slug'] + "/comments/"
+ str(response.data['comment']['id'])
)
self.assertEqual(
response3.status_code, status.HTTP_200_OK)
self.assertIn(
"This is a test comment.", str(
response3.data['body']))
def test_viewing_multiple_comments(self):
# user updating a comment.
article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
response2 = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment, format='json'
)
response4 = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment3, format='json'
)
response3 = self.client.get(
"/api/v1/articles/" + article['slug'] + "/comments/")
self.assertEqual(response3.status_code, status.HTTP_200_OK)
self.assertIn(
"This is a test comment.", str(response3.data))
def test_deleting_comment_by_non_owner(self):
# deleting a comment
article = article = self.post_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user2_token()
)
response = self.client.post(
"/api/v1/articles/" + article['slug'] + "/comments/",
data=self.comment, format='json'
)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.get_user_token()
)
response2 = self.client.delete(
"/api/v1/articles/" + article['slug'] + "/comments/"
+ str(response.data['comment']['id'])
)
self.assertEqual(
response2.status_code, status.HTTP_403_FORBIDDEN)
| 38.914286
| 99
| 0.587188
| 571
| 5,448
| 5.448336
| 0.155867
| 0.077146
| 0.058502
| 0.090003
| 0.780456
| 0.735776
| 0.735776
| 0.723562
| 0.723562
| 0.688846
| 0
| 0.015097
| 0.282673
| 5,448
| 139
| 100
| 39.194245
| 0.780962
| 0.03047
| 0
| 0.491803
| 0
| 0
| 0.144862
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 1
| 0.065574
| false
| 0
| 0.02459
| 0
| 0.106557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b98d3ec47ef92eba399ad80cdb2077615dcf7a1e
| 128
|
py
|
Python
|
ajunivel/admin.py
|
HumbertoDiego/django-ajustamento-redes-nivelamento
|
47912ccb4ba9fc29709add18bab688af50cfb582
|
[
"MIT"
] | 1
|
2022-03-06T01:10:43.000Z
|
2022-03-06T01:10:43.000Z
|
ajunivel/admin.py
|
HumbertoDiego/django-ajustamento-redes-nivelamento
|
47912ccb4ba9fc29709add18bab688af50cfb582
|
[
"MIT"
] | null | null | null |
ajunivel/admin.py
|
HumbertoDiego/django-ajustamento-redes-nivelamento
|
47912ccb4ba9fc29709add18bab688af50cfb582
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import dp, successcount
admin.site.register(dp)
admin.site.register(successcount)
| 25.6
| 36
| 0.828125
| 18
| 128
| 5.888889
| 0.555556
| 0.169811
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085938
| 128
| 5
| 37
| 25.6
| 0.905983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b9a0db91ba2a668efc6842c9be97b74572b47106
| 81
|
py
|
Python
|
crontabs/__init__.py
|
darianjug/crontabs
|
ebc8d46f7e09cec66cb13d9d785769b0849a3293
|
[
"MIT"
] | 151
|
2018-01-01T16:03:15.000Z
|
2022-03-15T08:00:07.000Z
|
crontabs/__init__.py
|
darianjug/crontabs
|
ebc8d46f7e09cec66cb13d9d785769b0849a3293
|
[
"MIT"
] | 12
|
2018-01-14T09:52:35.000Z
|
2021-07-02T11:54:34.000Z
|
crontabs/__init__.py
|
darianjug/crontabs
|
ebc8d46f7e09cec66cb13d9d785769b0849a3293
|
[
"MIT"
] | 11
|
2018-01-05T18:04:59.000Z
|
2021-07-23T14:23:39.000Z
|
# flake8: noqa
from .version import __version__
from .crontabs import Cron, Tab
| 16.2
| 32
| 0.777778
| 11
| 81
| 5.363636
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.160494
| 81
| 4
| 33
| 20.25
| 0.852941
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9d2bc59900b3e31f5af15e73d6dcddaac2c51b7
| 4,672
|
py
|
Python
|
bin/api/__init__.py
|
koudjo-mvp/webapp
|
69d1bd7f9724fc1630fbfcbbc0ccf3e735e7a980
|
[
"MIT"
] | null | null | null |
bin/api/__init__.py
|
koudjo-mvp/webapp
|
69d1bd7f9724fc1630fbfcbbc0ccf3e735e7a980
|
[
"MIT"
] | 2
|
2019-02-24T23:11:18.000Z
|
2021-03-06T14:59:03.000Z
|
bin/api/__init__.py
|
koudjo-mvp/webapp
|
69d1bd7f9724fc1630fbfcbbc0ccf3e735e7a980
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = "P.K.V.M. @koudjo-mvp"
# Build v${BUILD-VERSION} #
# Standard Imports
import os
import sys
import csv
import traceback
import pickle
import pprint
import tarfile,zipfile
import ConfigParser
from time import time, strftime, localtime
from string import zfill, split, strip, Template
import logging
# Imports
# logging
logger = logging.getLogger("api")
class ApiLoggerAdapter(logging.LoggerAdapter):
"""
This custom logging adapter expects the passed in dict-like object to have a
'jobid' and 'pid' keys, whose value in brackets are prepended to the log message.
"""
def process(self, msg, kwargs):
return '[%s] [%s] %s' % (self.extra['pid'], self.extra['jobid'], msg), kwargs
def read_cfg(path):
"""
Reads STD config files
:param path: Path to the standard configuration file
:return: ConfigParser object containing the configuration files data
"""
config = ConfigParser.SafeConfigParser()
fd_ = open(path,'r')
try:
config.readfp(fd_)
except Exception, err:
raise(err)
finally :
fd_.close()
return config
def get_cfg_data(config):
"""
Generates the dictionnary containing all the data in the cfg file
:config: ConfigParser object
:return: error_messages dictionary self.errors, job properties tables infos self.proptables
"""
data = dict()
for params_set in config.sections():
data[params_set] = dict()
for param in config.options(params_set):
data[params_set][param] = config.get(params_set, param)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('params loaded are:\n' + str(param))
return data
def getcmdline_params(inputarguments):
"""
Return a dictionary of all the command line parameters
Parameters:
inputarguments -> your commandline arguments
"""
paramkey = None
for param in inputarguments:
if (paramkey is None) and (param[0]=="-") :
paramkey = param
else :
if not paramkey is None:
self.__parametersDict[paramkey[1:]] = param
paramkey = None
if not paramkey is None:
self.__parametersDict[paramkey[1:]] = ''
paramkey = None
return self.__parametersDict
def untar_it(fname, tarpath=None):
"""
Untar file
:param fname: file name to tar file to be untarred
:param tarpath: folder path containing the files to be untarred
:return:
"""
if (fname.endswith(".tar.gz")):
tar = tarfile.open(fname)
try:
# with tarfile.open(fname) as tar:
if tarpath:
tar.extractall(tarpath)
else:
tar.extractall()
except Exception, err:
raise Exception(str(err))
finally:
tar.close()
else:
raise Exception("Not a tar.gz file: '%s '" % fname)
def unzip_it(fname, zippath=None):
"""
UnZip file
:param fname: file name to zip file to be unzipped
:param tarpath: folder path containing the files to be unzipped
:return:
"""
if (fname.endswith(".zip")):
myzip = zipfile.ZipFile(fname, 'r')
try:
# with zipfile.ZipFile(fname, 'r') as myzip:
if zippath:
myzip.extractall(zippath)
else:
myzip.extractall()
except Exception, err:
raise Exception(str(err))
finally:
myzip.close()
else:
raise Exception("Not a zip file: '%s '" % fname)
def targz_it(fname, tarpath):
"""
Targzzz files
:param fname: file name of the targz file to create
:param tarpath: folder path containing the files to be targzzz
:return:
"""
os.chdir(tarpath)
tar = tarfile.open(fname, "w")
try:
# with tarfile.open(fname, "w") as tar:
for name in [ff for ff in os.listdir(tarpath) if os.path.isfile(os.sep.join([tarpath, ff]))]:
tar.add(name)
except Exception, err:
raise Exception(str(err))
finally:
tar.close()
def zip_it(fname, tarpath):
"""
Zip files
:param fname: file name of the zip file to create
:param tarpath: folder path containing the files to be zipped
:return:
"""
os.chdir(tarpath)
myzip = zipfile.ZipFile(fname, 'w')
try:
# with zipfile.ZipFile(fname, 'w') as myzip:
for name in [ff for ff in os.listdir(tarpath) if os.path.isfile(os.sep.join([tarpath, ff]))]:
myzip.write(name)
except Exception, err:
raise Exception(str(err))
finally:
myzip.close()
| 4,672
| 4,672
| 0.614298
| 581
| 4,672
| 4.893287
| 0.292599
| 0.008442
| 0.031657
| 0.04045
| 0.321491
| 0.287021
| 0.254661
| 0.234963
| 0.234963
| 0.161801
| 0
| 0.000893
| 0.28125
| 4,672
| 1
| 4,672
| 4,672
| 0.845742
| 0.99786
| 0
| 0.385417
| 0
| 0
| 0.040299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.114583
| null | null | 0.010417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9f53cdc68861ea30441a49aab7ebc5994bbda11
| 28
|
py
|
Python
|
Curso_Python_3_UDEMY/desafios/app/negocio/backend.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_Python_3_UDEMY/desafios/app/negocio/backend.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | 4
|
2021-04-08T21:54:09.000Z
|
2022-02-10T14:35:13.000Z
|
Curso_Python_3_UDEMY/desafios/app/negocio/backend.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
def add_nome(nome):
pass
| 14
| 19
| 0.678571
| 5
| 28
| 3.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 28
| 2
| 20
| 14
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6a357080f82c5be6e508fdf2122229043098ad9c
| 10,538
|
py
|
Python
|
excavator/utils.py
|
wlonk/env-excavator
|
2bce66396f0c92fefa2b39ea458965174e478faf
|
[
"MIT"
] | 4
|
2015-01-14T16:18:06.000Z
|
2016-01-18T15:48:22.000Z
|
excavator/utils.py
|
wlonk/env-excavator
|
2bce66396f0c92fefa2b39ea458965174e478faf
|
[
"MIT"
] | 7
|
2015-02-23T18:40:35.000Z
|
2016-02-09T02:48:06.000Z
|
excavator/utils.py
|
wlonk/env-excavator
|
2bce66396f0c92fefa2b39ea458965174e478faf
|
[
"MIT"
] | 4
|
2015-02-02T02:38:16.000Z
|
2018-01-12T07:39:24.000Z
|
import os
import datetime
# No set literals because we support Python 2.6.
TRUE_VALUES = set((
True,
'True',
'true',
))
class empty(object):
"""
We use this sentinel object, instead of None, as None is a plausible value
for a default in real Python code.
"""
pass
def get_env_value(name, required=False, default=empty):
"""
Core function for extracting the environment variable.
Enforces mutual exclusivity between `required` and `default` keywords.
The `empty` sentinal value is used as the default `default` value to allow
other function to handle default/empty logic in the appropriate way.
"""
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
elif required:
try:
value = os.environ[name]
except KeyError:
raise KeyError(
"Must set environment variable {0}".format(name)
)
else:
value = os.environ.get(name, default)
return value
def env_int(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and casts it to an
integer. If the name is not present in the environment and no default is
specified then a ``ValueError`` will be raised. Similarly, if the
environment value is not castable to an integer, a ``ValueError`` will be
raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
raise ValueError(
"`env_int` requires either a default value to be specified, or for "
"the variable to be present in the environment"
)
return int(value)
def env_float(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and casts it to an
float. If the name is not present in the environment and no default is
specified then a ``ValueError`` will be raised. Similarly, if the
environment value is not castable to an float, a ``ValueError`` will be
raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
raise ValueError(
"`env_float` requires either a default value to be specified, or for "
"the variable to be present in the environment"
)
return float(value)
def env_bool(name, truthy_values=TRUE_VALUES, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
boolean. The strings ``'True'`` and ``'true'`` are the default *truthy*
values. If not present in the environment and no default is specified,
``None`` is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param truthy_values: An iterable of values that should be considered
truthy.
:type truthy_values: iterable
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
return None
return value in TRUE_VALUES
def env_string(name, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, default=default, required=required)
if value is empty:
value = ''
return value
def env_list(name, separator=',', required=False, default=empty):
"""Pulls an environment variable out of the environment, splitting it on a
separator, and returning it as a list. Extra whitespace on the list values
is stripped. List values that evaluate as falsy are removed. If not present
and no default specified, an empty list is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param separator: The separator that the string should be split on.
:type separator: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
return []
# wrapped in list to force evaluation in python 3
return list(filter(bool, [v.strip() for v in value.split(separator)]))
def env_timestamp(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and parses it to a
``datetime.datetime`` object. The environment variable is expected to be a
timestamp in the form of a float.
If the name is not present in the environment and no default is specified
then a ``ValueError`` will be raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
value = get_env_value(name, required=required, default=empty)
# change datetime.datetime to time, return time.struct_time type
if default is not empty and value is empty:
return default
if value is empty:
raise ValueError(
"`env_timestamp` requires either a default value to be specified, "
"or for the variable to be present in the environment"
)
timestamp = float(value)
return datetime.datetime.fromtimestamp(timestamp)
def env_iso8601(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and parses it to a
``datetime.datetime`` object. The environment variable is expected to be an
iso8601 formatted string.
If the name is not present in the environment and no default is specified
then a ``ValueError`` will be raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
try:
import iso8601
except ImportError:
raise ImportError(
'Parsing iso8601 datetime strings requires the iso8601 library'
)
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
value = get_env_value(name, required=required, default=empty)
# change datetime.datetime to time, return time.struct_time type
if default is not empty and value is empty:
return default
if value is empty:
raise ValueError(
"`env_iso8601` requires either a default value to be specified, or "
"for the variable to be present in the environment"
)
return iso8601.parse_date(value)
def get(name, required=False, default=empty, type=None):
"""Generic getter for environment variables. Handles defaults,
required-ness, and what type to expect.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
:param type: The type of variable expected.
:param type: str or type
"""
fn = {
'int': env_int,
int: env_int,
'bool': env_bool,
bool: env_bool,
'string': env_string,
str: env_string,
'list': env_list,
list: env_list,
'timestamp': env_timestamp,
datetime.time: env_timestamp,
'datetime': env_iso8601,
datetime.datetime: env_iso8601,
}.get(type, env_string)
return fn(name, default=default, required=required)
| 35.481481
| 82
| 0.685614
| 1,445
| 10,538
| 4.96609
| 0.110035
| 0.089744
| 0.082776
| 0.060201
| 0.741918
| 0.723384
| 0.723384
| 0.723384
| 0.723384
| 0.723384
| 0
| 0.004989
| 0.239229
| 10,538
| 296
| 83
| 35.601351
| 0.890109
| 0.586259
| 0
| 0.33
| 0
| 0
| 0.192449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09
| false
| 0.01
| 0.05
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbf1afa57990cf83dffaab773b1b2f3f4e697d64
| 86
|
py
|
Python
|
dir_to_include/SomeGreatClassToInclude.py
|
acwikla-novela/test_release_4
|
295367063d2bcf96ffc375b92a5fb8e55e568626
|
[
"Apache-2.0"
] | null | null | null |
dir_to_include/SomeGreatClassToInclude.py
|
acwikla-novela/test_release_4
|
295367063d2bcf96ffc375b92a5fb8e55e568626
|
[
"Apache-2.0"
] | null | null | null |
dir_to_include/SomeGreatClassToInclude.py
|
acwikla-novela/test_release_4
|
295367063d2bcf96ffc375b92a5fb8e55e568626
|
[
"Apache-2.0"
] | null | null | null |
class SomeGreatClassWeWantInPackage:
def some_excellent_method(self):
pass
| 28.666667
| 36
| 0.767442
| 8
| 86
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 86
| 3
| 37
| 28.666667
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
e04a91888502b6ef0e21afceb8bd071a16e2605d
| 46
|
py
|
Python
|
dumbthingtester.py
|
jakehyvonen/PaintByMotors
|
6ec568633e0f8bbddeb5d1731d21144a2b6a5ced
|
[
"MIT"
] | null | null | null |
dumbthingtester.py
|
jakehyvonen/PaintByMotors
|
6ec568633e0f8bbddeb5d1731d21144a2b6a5ced
|
[
"MIT"
] | null | null | null |
dumbthingtester.py
|
jakehyvonen/PaintByMotors
|
6ec568633e0f8bbddeb5d1731d21144a2b6a5ced
|
[
"MIT"
] | null | null | null |
thing = 1
print('thing[0]: ' + str(thing[0]))
| 15.333333
| 35
| 0.565217
| 8
| 46
| 3.25
| 0.625
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.152174
| 46
| 3
| 35
| 15.333333
| 0.589744
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e056640dd396d7a3640171cb355a6cfdb8c05bdf
| 188
|
py
|
Python
|
sdv/metrics/relational.py
|
bjg290/SDV
|
0a7c88b6743207bd3f193f7e60686fd5c03c1dfb
|
[
"MIT"
] | 817
|
2020-01-20T13:18:59.000Z
|
2022-03-31T16:08:22.000Z
|
sdv/metrics/relational.py
|
ppeddada97/SDV
|
6fbddedcd1ed02adcd7a0ec3698f280a9a86e01c
|
[
"MIT"
] | 484
|
2020-01-14T10:02:03.000Z
|
2022-03-31T22:22:12.000Z
|
sdv/metrics/relational.py
|
ppeddada97/SDV
|
6fbddedcd1ed02adcd7a0ec3698f280a9a86e01c
|
[
"MIT"
] | 115
|
2020-01-08T00:43:54.000Z
|
2022-03-31T15:49:08.000Z
|
"""Metrics to evaluate the quality of Synthetic Relational Data.
This subpackage exists only to enable importing sdmetrics as part of sdv.
"""
from sdmetrics.multi_table import * # noqa
| 31.333333
| 73
| 0.781915
| 27
| 188
| 5.407407
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 188
| 5
| 74
| 37.6
| 0.924051
| 0.755319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e0783c0b387228a79df8212f5d40480c4b022a02
| 167
|
py
|
Python
|
tests/__init__.py
|
aaman123/faceswap
|
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
|
[
"MIT"
] | 2
|
2021-11-11T08:29:01.000Z
|
2021-11-11T08:34:50.000Z
|
tests/__init__.py
|
aaman123/faceswap
|
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
aaman123/faceswap
|
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" Use custom Importer for importing Keras for tests """
import sys
from lib.utils import KerasFinder
sys.meta_path.insert(0, KerasFinder())
| 20.875
| 57
| 0.754491
| 25
| 167
| 5
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.131737
| 167
| 7
| 58
| 23.857143
| 0.848276
| 0.431138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.