content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Ural URL Extraction Unit Tests
# =============================================================================
from ural import urls_from_text
TEXT = """Facial-recognition technology is advancing faster than the people who worry about it have been able to think of ways to manage it." @NewYorker on the manifold challenges of harnessing a promising, but frightening, technology. http://mitsha.re/Qg1g30mVD78
Today @jovialjoy's @AJLUnited and @GeorgetownCPT are launching the Safe Face Pledge, which calls for facial analysis technology companies to commit to transparency in government contracts and mitigate potential abuse of their technology. http://www.safefacepledge.org #safefacepledge
Now accepting submissions for the 2018 Excellence in Local News Awards http://twib.in/l/xLzxjnpMXx7X via @medium http://foo.com/blah_(wikipedia)#cite-1
Directed to help #Alzheimers patients + others w/ impaired memory by providing intuitive ways to benefit from large amounts of personal data Check out this post by @physicspod in @singularityhub http://on.su.org/2rsPeXh"""
REF_SET = set(["http://mitsha.re/Qg1g30mVD78",
"http://www.safefacepledge.org",
"http://twib.in/l/xLzxjnpMXx7X",
"http://on.su.org/2rsPeXh",
"http://foo.com/blah_(wikipedia)#cite-1"])
TEXT_WITH_INVALID_URLS = """
This is a baaaad url: https://www.bfmtvregain-de-popularite-pour-emmanuel-macron-et-edouard-phi...
"""
TESTS = [
(
"please visit my website, https://oilab.eu/stijn, it's great",
['https://oilab.eu/stijn']
),
(
'I recently read this in a new york times article (https://nytimes.com/some-url-with-(parentheses))',
['https://nytimes.com/some-url-with-(parentheses)']
),
(
'"Bezoek alsjeblieft de websites van het [Juridisch Loket](https://www.juridischloket.nl/), [Sociaal Verhaal](http://www.sociaalverhaal.com/) en/of de [Rechtswinkel](http://www.rechtswinkel.nl/). Reddit is niet een geschikte plek voor juridisch advies."',
[
'https://www.juridischloket.nl/',
'http://www.sociaalverhaal.com/',
'http://www.rechtswinkel.nl/'
]
),
(
'What do you think of https://lemonde.fr? http://www.lemonde.fr. It is good http://www.lemonde.fr#?.',
[
'https://lemonde.fr',
'http://www.lemonde.fr',
'http://www.lemonde.fr'
]
),
(
'This is: "http://www.liberation.fr" and \'https://lefigaro.fr\'.',
[
'http://www.liberation.fr',
'https://lefigaro.fr'
]
),
(
'This is a [markdown]( https://lefigaro.fr) link.',
['https://lefigaro.fr']
),
(
'[http://www.lemonde.fr]',
['http://www.lemonde.fr']
)
]
class TestUrlsFromText(object):
def test_basics(self):
assert set(urls_from_text(TEXT)) == REF_SET
for string, urls in TESTS:
assert list(urls_from_text(string)) == urls
def test_invalid_urls(self):
urls = set(urls_from_text(TEXT_WITH_INVALID_URLS))
assert urls == {
'https://www.bfmtvregain'
}
|
nilq/baby-python
|
python
|
"""Train the ASR model.
Tested with Python 3.5, 3.6 and 3.7.
No Python 2 compatibility is being provided.
"""
import time
import tensorflow as tf
from asr.input_functions import input_fn_generator
from asr.model import CTCModel
from asr.params import FLAGS, get_parameters
from asr.util import storage
RANDOM_SEED = FLAGS.random_seed if FLAGS.random_seed != 0 else int(time.time())
def main(_):
"""TensorFlow starting routine."""
# Delete old model data if requested.
storage.maybe_delete_checkpoints(FLAGS.train_dir, FLAGS.delete)
# Logging information about the run.
print('TensorFlow-Version: {}; Tag-Version: {}; Branch: {}; Commit: {}\nParameters: {}'
.format(tf.VERSION, storage.git_latest_tag(), storage.git_branch(),
storage.git_revision_hash(), get_parameters()))
# Setup TensorFlow run configuration and hooks.
config = tf.estimator.RunConfig(
model_dir=FLAGS.train_dir,
tf_random_seed=RANDOM_SEED,
save_summary_steps=FLAGS.log_frequency,
session_config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement,
gpu_options=tf.GPUOptions(allow_growth=FLAGS.allow_vram_growth)
),
keep_checkpoint_max=5,
log_step_count_steps=FLAGS.log_frequency,
train_distribute=None
)
model = CTCModel()
# Construct the estimator that embodies the model.
estimator = tf.estimator.Estimator(
model_fn=model.model_fn,
model_dir=FLAGS.train_dir,
config=config
)
# Train the model.
curriculum_train_input_fn = input_fn_generator('train_batch')
estimator.train(input_fn=curriculum_train_input_fn, hooks=None)
# Evaluate the trained model.
dev_input_fn = input_fn_generator('dev')
evaluation_result = estimator.evaluate(input_fn=dev_input_fn, hooks=None)
tf.logging.info('Evaluation results of epoch {}: {}'.format(1, evaluation_result))
# Train the model and evaluate after each epoch.
for epoch in range(2, FLAGS.max_epochs + 1):
# Train the model.
train_input_fn = input_fn_generator('train_bucket')
estimator.train(input_fn=train_input_fn, hooks=None)
# L8ER: Possible replacement for evaluate every epoch:
# https://www.tensorflow.org/api_docs/python/tf/contrib/estimator/InMemoryEvaluatorHook
# Evaluate the trained model.
dev_input_fn = input_fn_generator('dev')
evaluation_result = estimator.evaluate(input_fn=dev_input_fn, hooks=None)
tf.logging.info('Evaluation results of epoch {}: {}'.format(epoch, evaluation_result))
if __name__ == '__main__':
# General TensorFlow setup.
tf.logging.set_verbosity(tf.logging.INFO)
tf.set_random_seed(RANDOM_SEED)
# Run training.
tf.app.run()
|
nilq/baby-python
|
python
|
from typing import List, Union
from datetime import datetime
from mongoengine import *
from regex import F
class Prediction(Document):
"""
The GFI prediction result for an open issue.
This collection will be updated periodically and used by backend and bot for GFI recommendation.
Attributes:
owner, name, number: uniquely identifies a GitHub issue.
threshold: the number of in-repository commits that disqualify one as a newcomer,
can be one to five. For more details please check the ICSE'22 paper.
probability: the modeled probability that the issue is a GFI.
last_updated: the last time this prediction result was updated,
necessary for incremental update.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
threshold: int = IntField(required=True, min_value=1, max_value=5)
probability: float = FloatField(required=True)
last_updated: datetime = DateTimeField(required=True)
meta = {
"indexes": [
{"fields": ["owner", "name", "number", "threshold"], "unique": True},
{"fields": ["probability"]},
]
}
class TrainingSummary(Document):
"""
Describes model training result for a specific repository and threshold.
This collection will be used to communicate the effectiveness of our model to users.
Attributes:
owner, name, threshold: uniquely identifies a GitHub repository and a training setting.
If owner="", name="", then this is a global summary result.
model_file: relative path to the model file, with repository as root.
n_resolved_issues: total number of resolved issues in this repository.
n_newcomer_resolved: the number of issues resolved by newcomers in this repository.
accuracy: the accuracy of the model on the training data.
auc: the area under the ROC curve.
last_updated: the last time this training summary was updated.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
issues_train: List[list] = ListField(ListField(), default=[])
issues_test: List[list] = ListField(ListField(), default=[])
threshold: int = IntField(required=True, min_value=1, max_value=5)
model_90_file: str = StringField(required=True)
model_full_file: str = StringField(required=True)
n_resolved_issues: int = IntField(required=True)
n_newcomer_resolved: int = IntField(required=True)
accuracy: float = FloatField(required=True)
auc: float = FloatField(required=True)
last_updated: datetime = DateTimeField(required=True)
meta = {
"indexes": [
{"fields": ["owner", "name", "threshold"], "unique": True},
]
}
class Dataset(Document):
"""
The final dataset involved for RecGFI training
All attributes are restored at a given time
Attributes:
owner, name, number: uniquely identifies a GitHub issue
created_at: The time when the issue is created
closed_at: The time when the issue is closed
before: The time when all features in this document is computed
resolver_commit_num: Issue resolver's commits to this repo, before the issue is resolved
if -1, means that the issue is still open
---------- Content ----------
title: Issue title
body: Issue description
len_title: Length of issue title
len_body: Length of issue description
n_code_snips: The number of code snippets in issue body
n_urls: The number of URLs in issue body
n_imgs: The number of imgs in issue body
coleman_liau_index: Readability index
flesch_reading_ease: Readability index
flesch_kincaid_grade: Readability index
automated_readability_index: Readability index
labels: The number of different labels
---------- Background ----------
reporter_feat: Features for issue reporter
owner_feat: Features for repository owner
prev_resolver_commits: A list of the commits made by resolver for all previously resolved issues
n_stars: Number of stars
n_pulls: Number of pull requests
n_commits: Number of commits
n_contributors: Number of contributors
n_closed_issues: Number of closed issues
n_open_issues: Number of open issues
r_open_issues: Ratio of open issues over all issues
issue_close_time: Median issue close time (in seconds)
---------- Dynamics ----------
comments: All issue comments
events: All issue events, excluding comments
comment_users: Features for all involved commenters
event_users: Features for all involved users
"""
class LabelCategory(EmbeddedDocument):
"""
Each attribute represents the number of labels under this type.
"""
bug: int = IntField(default=0)
feature: int = IntField(default=0)
test: int = IntField(default=0)
build: int = IntField(default=0)
doc: int = IntField(default=0)
coding: int = IntField(default=0)
enhance: int = IntField(default=0)
gfi: int = IntField(default=0)
medium: int = IntField(default=0)
major: int = IntField(default=0)
triaged: int = IntField(default=0)
untriaged: int = IntField(default=0)
class UserFeature(EmbeddedDocument):
"""User features in a dataset
Attributes:
name: GitHub username
n_commits: Number of commits the user made to this repository
n_issues: Number of issues the user opened in this repository
n_pulls: Number of pull requests the user opened in this repository
resolver_commits: For all resolved issue opened by this user,
number of the resolver's commits prior to issue resolution
"""
name: str = StringField(required=True)
n_commits: int = IntField(required=True, min_value=0)
n_issues: int = IntField(required=True, min_value=0)
n_pulls: int = IntField(required=True, min_value=0)
resolver_commits: List[int] = ListField(IntField(min_value=0), default=[])
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
closed_at: datetime = DateTimeField(null=True)
before: datetime = DateTimeField(required=True)
resolver_commit_num: int = IntField(required=True)
# ---------- Content ----------
title: str = StringField(required=True)
body: str = StringField(required=True)
len_title: int = IntField(required=True)
len_body: int = IntField(required=True)
n_code_snips: int = IntField(required=True)
n_urls: int = IntField(required=True)
n_imgs: int = IntField(required=True)
coleman_liau_index: float = FloatField(required=True)
flesch_reading_ease: float = FloatField(required=True)
flesch_kincaid_grade: float = FloatField(required=True)
automated_readability_index: float = FloatField(required=True)
labels: List[str] = ListField(StringField(), default=[])
label_category: LabelCategory = EmbeddedDocumentField(LabelCategory, required=True)
# ---------- Background ----------
reporter_feat: UserFeature = EmbeddedDocumentField(UserFeature, required=True)
owner_feat: UserFeature = EmbeddedDocumentField(UserFeature, required=True)
prev_resolver_commits: List[int] = ListField(IntField(), default=[])
n_stars: int = IntField(required=True)
n_pulls: int = IntField(required=True)
n_commits: int = IntField(required=True)
n_contributors: int = IntField(required=True)
n_closed_issues: int = IntField(required=True)
n_open_issues: int = IntField(required=True)
r_open_issues: float = FloatField(required=True)
issue_close_time: float = FloatField(required=True)
# ---------- Dynamics ----------
comments: List[str] = ListField(StringField(), default=[])
events: List[str] = ListField(StringField(), default=[])
comment_users: UserFeature = EmbeddedDocumentListField(UserFeature, default=[])
event_users: UserFeature = EmbeddedDocumentListField(UserFeature, default=[])
meta = {
"indexes": [
{"fields": ["owner", "name", "number", "before"], "unique": True},
]
}
class IssueEvent(DynamicEmbeddedDocument):
"""
Object representing issue events.
For assigned, unassigned, labeled, unlabeled, referenced,
cross-referenced, and commented events, additional fields are available.
This document may contain **additional** fields depending on the specific event.
Attributes:
type: Type of the event
time: The time when this event happened, can be null for some events
actor: The GitHub user (login name) associated with the event, can be null for some events
Attributes (for commented):
comment: The comment text
commenter: The commenter GitHub username
Attributes (for labeled, unlabeled):
label: The label name
Attributes (for assigned, unassigned):
assignee: The assignee name
Attributes (for referenced, cross-referenced):
source: The source of reference (an issue number), may be null
commit: The commit SHA of the reference, may be null
"""
type: str = StringField(required=True)
time: datetime = DateTimeField(null=True)
actor: str = StringField(null=True)
comment: str = StringField(null=True)
commenter: str = StringField(null=True)
label: str = StringField(null=True)
assignee: str = StringField(null=True)
source: int = IntField(null=True)
commit: str = StringField(null=True)
class ResolvedIssue(Document):
"""
Additional issue information for issue that are resolved by a developer.
These issues will be used as the training dataset for RecGFI training.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
resolved_at: datetime = DateTimeField(required=True)
resolver: str = StringField(required=True) # Issue resolver's GitHub user name
# If int, the PR number that resolved this issue.
# If string, the commit hash that resolved this issue
resolved_in: Union[int, str] = DynamicField(required=True)
# Issue resolver's commits to this repo, before the issue is resolved
resolver_commit_num: int = IntField(required=True)
events: List[IssueEvent] = ListField(EmbeddedDocumentField(IssueEvent))
meta = {"indexes": [{"fields": ["owner", "name", "number"], "unique": True}]}
class OpenIssue(Document):
"""
Additional issue information for currently open issues.
These issues will be used as the testing dataset for RecGFI training.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
updated_at: datetime = DateTimeField(required=True)
events: List[IssueEvent] = ListField(EmbeddedDocumentField(IssueEvent))
meta = {"indexes": [{"fields": ["owner", "name", "number"], "unique": True}]}
class Repo(Document):
"""
Repository statistics for both RecGFI training and web app.
Attributes:
created_at: The time when the repository was created in database
updated_at: The time when the repository was last updated in database
repo_created_at: The time when this repository is created in GitHub
owner, name: Uniquely identifies a GitHub repository
topics: A list of topics associated with the repository
language: Main programming language (as returned by GitHub), can be None
languages: All programming languages and their lines of code
description: Repository description
readme: Repostiory README content
median_issue_close_time: The median time it takes to close an issue (in seconds)
monthly_stars, monthly_commits, monthly_issues, monthly_pulls:
Four time series describing number of new stars, commits, issues, and pulls
in each month since repository creation
"""
class LanguageCount(EmbeddedDocument):
language: str = StringField(required=True)
count: int = IntField(required=True)
class MonthCount(EmbeddedDocument):
month: datetime = DateTimeField(required=True)
count: int = IntField(required=True, min_value=0)
created_at: datetime = DateTimeField(required=True)
updated_at: datetime = DateTimeField(required=True)
repo_created_at: datetime = DateTimeField(required=True)
owner: str = StringField(required=True)
name: str = StringField(required=True)
topics: List[str] = ListField(StringField(), default=[])
language: str = StringField(null=True)
languages: List[LanguageCount] = EmbeddedDocumentListField(
LanguageCount, default=[]
)
description: str = StringField(null=True)
readme: str = StringField(null=True)
median_issue_close_time: float = FloatField(null=True)
monthly_stars: List[MonthCount] = EmbeddedDocumentListField(MonthCount, default=[])
monthly_commits: List[MonthCount] = EmbeddedDocumentListField(
MonthCount, default=[]
)
monthly_issues: List[MonthCount] = EmbeddedDocumentListField(MonthCount, default=[])
monthly_pulls: List[MonthCount] = EmbeddedDocumentListField(MonthCount, default=[])
meta = {"indexes": [{"fields": ["owner", "name"], "unique": True}]}
class RepoCommit(Document):
"""Repository commit statistics for RecGFI training"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
sha: str = StringField(required=True)
# GitHub username of the commit author, can be None
author: str = StringField(null=True)
authored_at: datetime = DateTimeField(required=True)
# GitHub username of the committer, can be None
committer: str = StringField(null=True)
committed_at: datetime = DateTimeField(required=True)
message: str = StringField(required=True)
meta = {"indexes": [{"fields": ["owner", "name", "sha"], "unique": True}]}
class RepoIssue(Document):
"""
Repository issue statistics for RecGFI training.
Note that pull requests are also included in this collection
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True, min_value=0)
# GitHub username of the issue reporter / PR submitter
user: str = StringField(required=True)
state: str = StringField(required=True, choices=("open", "closed"))
created_at: datetime = DateTimeField(
required=True
) # The time when this issue/PR is created
closed_at: datetime = DateTimeField(
null=True
) # The time when this issue/PR is closed
is_pull: bool = BooleanField(required=True) # Whether the issue is a pull request
merged_at: datetime = DateTimeField(
null=True
) # If a PR, the time when this PR is merged
title: str = StringField(required=True)
body: str = StringField(null=True)
labels: List[str] = ListField(StringField(required=True))
meta = {"indexes": [{"fields": ["owner", "name", "number"], "unique": True}]}
class RepoStar(Document):
"""Repository star statistics for RecGFI training"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
# GitHub username who starred this repository
user: str = StringField(required=True)
starred_at: datetime = DateTimeField(required=True) # Time of the starred event
meta = {"indexes": [{"fields": ["owner", "name", "user"], "unique": True}]}
class User(Document):
"""User statistics for RecGFI training (TODO: This documentation is not finalized yet)"""
class Issue(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# issue number (state can not be updated incrementally)
state: str = StringField(required=True)
number: int = IntField(required=True, min_value=0)
created_at: datetime = DateTimeField(required=True)
class Pull(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# pull request number (state can not be updated incrementally)
state: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
class Review(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# review number & state
number: int = IntField(required=True)
state: str = StringField(required=True)
created_at: datetime = DateTimeField(required=True)
class CommitContribution(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# commit count
commit_count: int = IntField(required=True, min_value=0)
created_at: datetime = DateTimeField(required=True)
_created_at: datetime = DateTimeField(required=True) # created in the database
_updated_at: datetime = DateTimeField(required=True) # updated in the database
name: str = StringField(null=True)
login: str = StringField(required=True)
# issues, issueComments, pulls (use end cursor to paginate)
issues: Issue = EmbeddedDocumentListField(Issue)
pulls: Pull = EmbeddedDocumentListField(Pull)
# reviews, commits (use date to paginate)
pull_reviews: Review = EmbeddedDocumentListField(Review)
commit_contributions: CommitContribution = EmbeddedDocumentListField(
CommitContribution
)
meta = {
"indexes": [
{"fields": ["login"], "unique": True},
{"fields": ["issues.owner", "issues.name"]},
{"fields": ["issues.created_at"]},
{"fields": ["pulls.owner", "pulls.name"]},
{"fields": ["pulls.created_at"]},
{"fields": ["pull_reviews.owner", "pull_reviews.name"]},
{"fields": ["pull_reviews.created_at"]},
{"fields": ["commit_contributions.owner", "commit_contributions.name"]},
{"fields": ["commit_contributions.created_at"]},
]
}
class GithubTokens(Document):
"""GitHub tokens for GitHub App"""
app_name: str = StringField(required=True)
client_id: str = StringField(required=True)
client_secret: str = StringField(required=True)
meta = {
"indexes": [
{"fields": ["client_id"], "unique": True},
{"fields": ["app_name"], "unique": True},
]
}
class GfiUsers(Document):
"""User statictics for GFI-Bot Web App Users"""
github_id: int = IntField(required=True)
github_access_token: str = StringField(required=True)
github_login: str = StringField(required=True)
github_name: str = StringField(required=True)
is_github_app_user: bool = BooleanField(required=True)
github_avatar_url: str = StringField(required=False)
github_url: str = StringField(required=False)
github_email: str = StringField(required=False)
twitter_user_name = StringField(required=False)
meta = {
"indexes": [
{"fields": ["github_id", "is_github_app_user"], "unique": True},
{"fields": ["github_login", "is_github_app_user"], "unique": True},
{"fields": ["github_email"]},
{"fields": ["twitter_user_name"]},
]
}
class GfiQueries(Document):
"""GFI-Bot Web App queries"""
name: str = StringField(required=True)
owner: str = StringField(required=True)
user_github_login: str = StringField(required=True)
is_pending: bool = BooleanField(required=True)
is_finished: bool = BooleanField(required=True)
_created_at: datetime = DateTimeField(required=True)
_finished_at: datetime = DateTimeField(required=False)
mata = {
"indexes": [
{"fields": ["name", "owner"], "unique": True},
{"fields": ["user_github_login"]},
]
}
|
nilq/baby-python
|
python
|
def func(a):
return a + 1
ls = [func(a) for a in range(10)]
|
nilq/baby-python
|
python
|
from lxml import etree
from re import search
class Response:
@classmethod
def resultDict(cls, strResult):
responseGroup = search("\<RetornoXML>(.*)\</Retorno", strResult).group(1)
res = {}
root = etree.fromstring(responseGroup)
for i in root.iter():
text = i.text
text = text.encode("utf-8", "replace") if text else None
if text:
res.setdefault("{tag}".format(tag=i.tag), "{text}".format(text=text))
return res
@classmethod
def getTail(cls, strResult):
responseGroup = search("\<RetornoXML>(.*)\</Retorno", strResult).group(1)
responseGroup = search("\</Cabecalho>(.*)\</Retorno", responseGroup).group(1)
try:
root = "<root>" + responseGroup + "</root>"
tree = etree.fromstring(root)
nfeData = []
res = {}
for i in tree:
res.update({
"SerieRPS": i.find('.//SerieRPS', namespaces={}).text,
"NumeroRPS": i.find('.//NumeroRPS', namespaces={}).text,
"DataEmissaoNFe": i.find('.//DataEmissaoNFe', namespaces={}).text,
"CPFCNPJTomador": i.find('.//CPFCNPJTomador/CNPJ', namespaces={}).text,
"CodigoVerificacao": i.find('.//CodigoVerificacao', namespaces={}).text,
"NumeroNFe": i.find('.//NumeroNFe', namespaces={}).text
})
nfeData.append(res.copy())
return nfeData
except Exception as error:
return error
|
nilq/baby-python
|
python
|
import requests
from configparser import ConfigParser
import pandas as pd
from ipywidgets import widgets, interact
from IPython.display import display
from .appconfig import AppConfig
from abc import ABC, abstractmethod
class widget_container:
def __init__(self, **wlist):
interact(self.on_change, **wlist)
def on_change(self, w, w2):
print(w, w2)
class db_widget:
def __init__(self, widget):
interact(self.on_change, widget=widget)
def on_change(self, widget):
print(widget)
class tools:
def __init__(self):
self.config=AppConfig()
self.url = self.config["client"]["json_url"]
self.info = pd.DataFrame(requests.get(self.config["client"]["info_url"]).json())
def widgets(self):
subsystems = list(self.info.subsystem[~self.info.subsystem.duplicated()].values)
options = [(v,i) for i,v in enumerate(subsystems)]
subsystems.insert(0, '')
log = widgets.Dropdown(options=subsystems, descriptions="Log")
param = widgets.Dropdown(descriptions="Parameter")
submit = widgets.Button(description='Submit', tooltip='Get Data')
def on_select(log, params):
#print(log, param)
#self.info[self.info.subsystem == log]
param.options = list(self.info['ds_name'][self.info.subsystem == log])
def on_submit(value):
print(value)
interact(on_select, log=log, params=param)
display(submit)
submit.observe(on_submit)
def junk(self):
data = {"ds_names": ["laser_cutter_room_temperature3_C", 'hexapod_mini_off_guider_tz_applied'],
"delta_time": 360000}
rq = test_it(data=data)
df = pd.read_json(json.dumps(rq['data']))
print(rq["errors"])
print(rq["info"])
def test_it(self, data=None):
if data is None:
data = {"ds_names": ["laser_cutter_room_dewpoint3_C", 'hexapod_mini_off_guider_tz_applied'],
"delta_time": 360000}
url = self.config["client"]["json_url"]
rq = requests.get(url, json=data)
try:
resp = rq.json()
except Exception as err:
print(err)
resp = rq
return resp
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
from DLtorch.trainer.base import BaseTrainer
from DLtorch.trainer.CNNTrainer import CNNTrainer
|
nilq/baby-python
|
python
|
import pytest
from rest_framework import status
from rest_framework.reverse import reverse
from .factories import JsonFactory
pytestmark = pytest.mark.django_db
@pytest.fixture
def sample_json(box):
return JsonFactory(box=box, data={"key": "value", "lol": {"name": "hue", "age": 1}})
@pytest.mark.parametrize("method", ["get", "post", "put", "patch", "delete"])
def test_unauthorized(client_api_anon, method):
url = reverse("jsons:jsons-list")
response = getattr(client_api_anon, method)(url)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_get_jsons_list(client_api):
url = reverse("jsons:jsons-list")
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 0
assert data["next"] is None
assert data["previous"] is None
assert data["results"] == []
def test_get_jsons_list_simple(client_api, sample_json):
url = reverse("jsons:jsons-list")
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0] == {"id": str(sample_json.id), "data": sample_json.data}
def test_get_jsons_with_jsonmask(client_api, sample_json):
url = reverse("jsons:jsons-list") + "?fields=data(lol(name))"
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0] == {"data": {"lol": {"name": "hue"}}}
@pytest.mark.parametrize("search", ["key:value", "data__key:value"])
def test_get_jsons_filter_simple(client_api, sample_json, search):
url = reverse("jsons:jsons-list") + "?search={}".format(search)
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["data"]["key"] == "value"
@pytest.mark.parametrize(
"search,expected",
[
("key:value", 2),
("lol:yolo", 1),
("lol:", 1),
("key:value,lol:yolo", 1),
("key:value,lol:", 1),
("key:,lol:yolo", 0),
("key:,lol:", 0),
],
)
def test_get_jsons_filter_by_multiple_keys(client_api, box, search, expected):
JsonFactory(box=box, data={"key": "value", "lol": "yolo"})
JsonFactory(box=box, data={"key": "value", "lol": ""})
url = reverse("jsons:jsons-list") + "?search={}".format(search)
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == expected
@pytest.mark.parametrize("search", ["value", "some:value,other"])
def test_get_jsons_filter_with_invalid_search(client_api, search):
url = reverse("jsons:jsons-list") + "?search={}".format(search)
response = client_api.get(url)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_get_json_detail(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["id"] == str(sample_json.id)
assert data["data"] == sample_json.data
def test_get_json_detail_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api_secondary.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
data = response.json()
assert "not found" in data["detail"].lower()
def test_get_json_detail_with_jsonmask(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id]) + "?fields=data(lol(age))"
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data == {"data": {"lol": {"age": 1}}}
def test_delete_json(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api.delete(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
def test_delete_json_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api_secondary.delete(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_patch_json(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api.patch(url, data=payload)
assert response.status_code == status.HTTP_200_OK
assert response.json()["data"] == payload["data"]
@pytest.mark.parametrize("data", [{}, "", 123, None])
def test_patch_json_invalid(client_api, sample_json, data):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": data}
response = client_api.patch(url, data=payload)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_patch_json_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api_secondary.patch(url, data=payload)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_put_json(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api.put(url, data=payload)
assert response.status_code == status.HTTP_200_OK
assert response.json()["data"] == payload["data"]
@pytest.mark.parametrize("data", [{}, "", 123, None])
def test_put_json_invalid(client_api, sample_json, data):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": data}
response = client_api.put(url, data=payload)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_put_json_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api_secondary.put(url, data=payload)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_post_json_empty(client_api):
url = reverse("jsons:jsons-list")
response = client_api.post(url, data={})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "is required" in response.json()["data"][0]
def test_post_json_invalid(client_api):
url = reverse("jsons:jsons-list")
response = client_api.post(url, data="abc")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "invalid" in response.json()["non_field_errors"][0].lower()
def test_post_json_simple(client_api):
url = reverse("jsons:jsons-list")
payload = {"data": {"key": "value"}}
response = client_api.post(url, data=payload)
assert response.status_code == status.HTTP_201_CREATED
data = response.json()
assert "id" in data
assert data["data"] == payload["data"]
def test_post_json_complex(client_api):
url = reverse("jsons:jsons-list")
payload = {
"data": {
"key": "value",
"foobar": {"nested": 1, "lalala": ["la", "la", "la"]},
"alist": [3.14],
}
}
response = client_api.post(url, data=payload)
assert response.status_code == status.HTTP_201_CREATED
|
nilq/baby-python
|
python
|
import logging
import logging.config
from decimal import Decimal
from pprint import pformat
import time
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from trader.exchange.abstract_book import AbstractBook
from trader.exchange.order import Order
import config
from trader.database.manager import BaseWrapper
logging.config.dictConfig(config.log_config)
logger = logging.getLogger(__name__)
@AbstractBook.register
class Book(BaseWrapper):
pair = Column("pair", String(15))
orders = relationship("Order", lazy="dynamic", collection_class=list,
cascade="all, delete-orphan")
def __init__(self, pair):
self.pair = pair
self.persist = True
# if not isinstance(trader, ExchangeApi):
# raise ValueError("trader {} is not an instance of ExchangeApi", str(trader))
# self.trading_api = trader
def add_order_to_book(self, order):
if not isinstance(order, Order):
raise TypeError("Expected order to be of type Order, but received tyep {}", type(order))
self.orders.append(order)
def get_all_orders(self):
return self.orders.all()
def get_ready_orders(self):
return self.orders.filter(
Order._status == "ready").all()
def get_open_orders(self):
return self.orders.filter(
Order._status == "open").all()
def get_filled_orders(self):
return self.orders.filter(
Order._status == "filled").all()
def get_canceled_orders(self):
return self.orders.filter(
Order._status == "canceled").all()
def get_rejected_orders(self):
return self.orders.filter(
Order._status == "rejected").all()
def order_filled(self, filled_order):
logger.debug("Updating filled order: {}".format(filled_order))
filled_order.status = "filled"
filled_order.filled = filled_order.size
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
import pulumi_kubernetes
__all__ = ['CertManagerArgs', 'CertManager']
@pulumi.input_type
class CertManagerArgs:
def __init__(__self__, *,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
cainjector: Optional[pulumi.Input['CertManagerCaInjectorArgs']] = None,
cluster_resource_namespace: Optional[pulumi.Input[str]] = None,
container_security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs']] = None,
deployment_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
extra_args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
extra_env: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]] = None,
feature_gates: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input['CertManagerGlobalArgs']] = None,
helm_options: Optional[pulumi.Input['ReleaseArgs']] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input['CertManagerImageArgs']] = None,
ingress_shim: Optional[pulumi.Input['CertManagerIngressShimArgs']] = None,
install_crds: Optional[pulumi.Input[bool]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_selector: Optional[pulumi.Input['pulumi_kubernetes.core.v1.NodeSelectorArgs']] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_dns_config: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodDNSConfigArgs']] = None,
pod_dns_policy: Optional[pulumi.Input[str]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
prometheus: Optional[pulumi.Input['CertManagerPrometheusArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None,
security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] = None,
service_account: Optional[pulumi.Input['CertManagerServiceAccountArgs']] = None,
service_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
startupapicheck: Optional[pulumi.Input['CertManagerStartupAPICheckArgs']] = None,
strategy: Optional[pulumi.Input['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]] = None,
webhook: Optional[pulumi.Input['CertManagerWebhookArgs']] = None):
"""
The set of arguments for constructing a CertManager resource.
:param pulumi.Input[str] cluster_resource_namespace: Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources. By default, the same namespace as cert-manager is deployed within is used. This namespace will not be automatically created by the Helm chart.
:param pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs'] container_security_context: Container Security Context to be set on the controller component container. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] deployment_annotations: Optional additional annotations to add to the controller Deployment
:param pulumi.Input[Sequence[pulumi.Input[str]]] extra_args: Optional additional arguments.
:param pulumi.Input[str] feature_gates: Comma separated list of feature gates that should be enabled on the controller pod.
:param pulumi.Input['ReleaseArgs'] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Optional additional annotations to add to the controller Pods
:param pulumi.Input[str] pod_dns_policy: Optional DNS settings, useful if you have a public and private DNS zone for the same domain on Route 53. What follows is an example of ensuring cert-manager can access an ingress or DNS TXT records at all times. NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for the cluster to work.
:param pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs'] security_context: Pod Security Context. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_annotations: Optional additional annotations to add to the controller service
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_labels: Optional additional labels to add to the controller Service
"""
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if cainjector is not None:
pulumi.set(__self__, "cainjector", cainjector)
if cluster_resource_namespace is not None:
pulumi.set(__self__, "cluster_resource_namespace", cluster_resource_namespace)
if container_security_context is not None:
pulumi.set(__self__, "container_security_context", container_security_context)
if deployment_annotations is not None:
pulumi.set(__self__, "deployment_annotations", deployment_annotations)
if extra_args is not None:
pulumi.set(__self__, "extra_args", extra_args)
if extra_env is not None:
pulumi.set(__self__, "extra_env", extra_env)
if extra_volume_mounts is not None:
pulumi.set(__self__, "extra_volume_mounts", extra_volume_mounts)
if extra_volumes is not None:
pulumi.set(__self__, "extra_volumes", extra_volumes)
if feature_gates is not None:
pulumi.set(__self__, "feature_gates", feature_gates)
if global_ is not None:
pulumi.set(__self__, "global_", global_)
if helm_options is not None:
pulumi.set(__self__, "helm_options", helm_options)
if http_proxy is not None:
pulumi.set(__self__, "http_proxy", http_proxy)
if https_proxy is not None:
pulumi.set(__self__, "https_proxy", https_proxy)
if image is not None:
pulumi.set(__self__, "image", image)
if ingress_shim is not None:
pulumi.set(__self__, "ingress_shim", ingress_shim)
if install_crds is not None:
pulumi.set(__self__, "install_crds", install_crds)
if no_proxy is not None:
pulumi.set(__self__, "no_proxy", no_proxy)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if pod_dns_config is not None:
pulumi.set(__self__, "pod_dns_config", pod_dns_config)
if pod_dns_policy is not None:
pulumi.set(__self__, "pod_dns_policy", pod_dns_policy)
if pod_labels is not None:
pulumi.set(__self__, "pod_labels", pod_labels)
if prometheus is not None:
pulumi.set(__self__, "prometheus", prometheus)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if service_annotations is not None:
pulumi.set(__self__, "service_annotations", service_annotations)
if service_labels is not None:
pulumi.set(__self__, "service_labels", service_labels)
if startupapicheck is not None:
pulumi.set(__self__, "startupapicheck", startupapicheck)
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]:
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter
def cainjector(self) -> Optional[pulumi.Input['CertManagerCaInjectorArgs']]:
return pulumi.get(self, "cainjector")
@cainjector.setter
def cainjector(self, value: Optional[pulumi.Input['CertManagerCaInjectorArgs']]):
pulumi.set(self, "cainjector", value)
@property
@pulumi.getter(name="clusterResourceNamespace")
def cluster_resource_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources. By default, the same namespace as cert-manager is deployed within is used. This namespace will not be automatically created by the Helm chart.
"""
return pulumi.get(self, "cluster_resource_namespace")
@cluster_resource_namespace.setter
def cluster_resource_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_resource_namespace", value)
@property
@pulumi.getter(name="containerSecurityContext")
def container_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs']]:
"""
Container Security Context to be set on the controller component container. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "container_security_context")
@container_security_context.setter
def container_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs']]):
pulumi.set(self, "container_security_context", value)
@property
@pulumi.getter(name="deploymentAnnotations")
def deployment_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional annotations to add to the controller Deployment
"""
return pulumi.get(self, "deployment_annotations")
@deployment_annotations.setter
def deployment_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "deployment_annotations", value)
@property
@pulumi.getter(name="extraArgs")
def extra_args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional additional arguments.
"""
return pulumi.get(self, "extra_args")
@extra_args.setter
def extra_args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "extra_args", value)
@property
@pulumi.getter(name="extraEnv")
def extra_env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]:
return pulumi.get(self, "extra_env")
@extra_env.setter
def extra_env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]):
pulumi.set(self, "extra_env", value)
@property
@pulumi.getter(name="extraVolumeMounts")
def extra_volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]:
return pulumi.get(self, "extra_volume_mounts")
@extra_volume_mounts.setter
def extra_volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]):
pulumi.set(self, "extra_volume_mounts", value)
@property
@pulumi.getter(name="extraVolumes")
def extra_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]:
return pulumi.get(self, "extra_volumes")
@extra_volumes.setter
def extra_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]):
pulumi.set(self, "extra_volumes", value)
@property
@pulumi.getter(name="featureGates")
def feature_gates(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of feature gates that should be enabled on the controller pod.
"""
return pulumi.get(self, "feature_gates")
@feature_gates.setter
def feature_gates(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_gates", value)
@property
@pulumi.getter(name="global")
def global_(self) -> Optional[pulumi.Input['CertManagerGlobalArgs']]:
return pulumi.get(self, "global_")
@global_.setter
def global_(self, value: Optional[pulumi.Input['CertManagerGlobalArgs']]):
pulumi.set(self, "global_", value)
@property
@pulumi.getter(name="helmOptions")
def helm_options(self) -> Optional[pulumi.Input['ReleaseArgs']]:
"""
HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
"""
return pulumi.get(self, "helm_options")
@helm_options.setter
def helm_options(self, value: Optional[pulumi.Input['ReleaseArgs']]):
pulumi.set(self, "helm_options", value)
@property
@pulumi.getter
def http_proxy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http_proxy")
@http_proxy.setter
def http_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_proxy", value)
@property
@pulumi.getter
def https_proxy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https_proxy")
@https_proxy.setter
def https_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https_proxy", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['CertManagerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['CertManagerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="ingressShim")
def ingress_shim(self) -> Optional[pulumi.Input['CertManagerIngressShimArgs']]:
return pulumi.get(self, "ingress_shim")
@ingress_shim.setter
def ingress_shim(self, value: Optional[pulumi.Input['CertManagerIngressShimArgs']]):
pulumi.set(self, "ingress_shim", value)
@property
@pulumi.getter(name="installCRDs")
def install_crds(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "install_crds")
@install_crds.setter
def install_crds(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "install_crds", value)
@property
@pulumi.getter
def no_proxy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "no_proxy")
@no_proxy.setter
def no_proxy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "no_proxy", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.NodeSelectorArgs']]:
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.NodeSelectorArgs']]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional annotations to add to the controller Pods
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podDnsConfig")
def pod_dns_config(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]:
return pulumi.get(self, "pod_dns_config")
@pod_dns_config.setter
def pod_dns_config(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]):
pulumi.set(self, "pod_dns_config", value)
@property
@pulumi.getter(name="podDnsPolicy")
def pod_dns_policy(self) -> Optional[pulumi.Input[str]]:
"""
Optional DNS settings, useful if you have a public and private DNS zone for the same domain on Route 53. What follows is an example of ensuring cert-manager can access an ingress or DNS TXT records at all times. NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for the cluster to work.
"""
return pulumi.get(self, "pod_dns_policy")
@pod_dns_policy.setter
def pod_dns_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pod_dns_policy", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter
def prometheus(self) -> Optional[pulumi.Input['CertManagerPrometheusArgs']]:
return pulumi.get(self, "prometheus")
@prometheus.setter
def prometheus(self, value: Optional[pulumi.Input['CertManagerPrometheusArgs']]):
pulumi.set(self, "prometheus", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Pod Security Context. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input['CertManagerServiceAccountArgs']]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input['CertManagerServiceAccountArgs']]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter(name="serviceAnnotations")
def service_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional annotations to add to the controller service
"""
return pulumi.get(self, "service_annotations")
@service_annotations.setter
def service_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "service_annotations", value)
@property
@pulumi.getter(name="serviceLabels")
def service_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional labels to add to the controller Service
"""
return pulumi.get(self, "service_labels")
@service_labels.setter
def service_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "service_labels", value)
@property
@pulumi.getter
def startupapicheck(self) -> Optional[pulumi.Input['CertManagerStartupAPICheckArgs']]:
return pulumi.get(self, "startupapicheck")
@startupapicheck.setter
def startupapicheck(self, value: Optional[pulumi.Input['CertManagerStartupAPICheckArgs']]):
pulumi.set(self, "startupapicheck", value)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]:
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]:
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input['CertManagerWebhookArgs']]:
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input['CertManagerWebhookArgs']]):
pulumi.set(self, "webhook", value)
class CertManager(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
affinity: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.AffinityArgs']]] = None,
cainjector: Optional[pulumi.Input[pulumi.InputType['CertManagerCaInjectorArgs']]] = None,
cluster_resource_namespace: Optional[pulumi.Input[str]] = None,
container_security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.SecurityContextArgs']]] = None,
deployment_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
extra_args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
extra_env: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.EnvVarArgs']]]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeArgs']]]]] = None,
feature_gates: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input[pulumi.InputType['CertManagerGlobalArgs']]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[pulumi.InputType['CertManagerImageArgs']]] = None,
ingress_shim: Optional[pulumi.Input[pulumi.InputType['CertManagerIngressShimArgs']]] = None,
install_crds: Optional[pulumi.Input[bool]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_selector: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.NodeSelectorArgs']]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_dns_config: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]] = None,
pod_dns_policy: Optional[pulumi.Input[str]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
prometheus: Optional[pulumi.Input[pulumi.InputType['CertManagerPrometheusArgs']]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]] = None,
security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['CertManagerServiceAccountArgs']]] = None,
service_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
startupapicheck: Optional[pulumi.Input[pulumi.InputType['CertManagerStartupAPICheckArgs']]] = None,
strategy: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.TolerationArgs']]]]] = None,
webhook: Optional[pulumi.Input[pulumi.InputType['CertManagerWebhookArgs']]] = None,
__props__=None):
"""
Automates the management and issuance of TLS certificates from various issuing sources within Kubernetes
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_resource_namespace: Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources. By default, the same namespace as cert-manager is deployed within is used. This namespace will not be automatically created by the Helm chart.
:param pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.SecurityContextArgs']] container_security_context: Container Security Context to be set on the controller component container. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] deployment_annotations: Optional additional annotations to add to the controller Deployment
:param pulumi.Input[Sequence[pulumi.Input[str]]] extra_args: Optional additional arguments.
:param pulumi.Input[str] feature_gates: Comma separated list of feature gates that should be enabled on the controller pod.
:param pulumi.Input[pulumi.InputType['ReleaseArgs']] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Optional additional annotations to add to the controller Pods
:param pulumi.Input[str] pod_dns_policy: Optional DNS settings, useful if you have a public and private DNS zone for the same domain on Route 53. What follows is an example of ensuring cert-manager can access an ingress or DNS TXT records at all times. NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for the cluster to work.
:param pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] security_context: Pod Security Context. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_annotations: Optional additional annotations to add to the controller service
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_labels: Optional additional labels to add to the controller Service
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CertManagerArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Automates the management and issuance of TLS certificates from various issuing sources within Kubernetes
:param str resource_name: The name of the resource.
:param CertManagerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertManagerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
affinity: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.AffinityArgs']]] = None,
cainjector: Optional[pulumi.Input[pulumi.InputType['CertManagerCaInjectorArgs']]] = None,
cluster_resource_namespace: Optional[pulumi.Input[str]] = None,
container_security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.SecurityContextArgs']]] = None,
deployment_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
extra_args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
extra_env: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.EnvVarArgs']]]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeArgs']]]]] = None,
feature_gates: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input[pulumi.InputType['CertManagerGlobalArgs']]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[pulumi.InputType['CertManagerImageArgs']]] = None,
ingress_shim: Optional[pulumi.Input[pulumi.InputType['CertManagerIngressShimArgs']]] = None,
install_crds: Optional[pulumi.Input[bool]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_selector: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.NodeSelectorArgs']]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_dns_config: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]] = None,
pod_dns_policy: Optional[pulumi.Input[str]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
prometheus: Optional[pulumi.Input[pulumi.InputType['CertManagerPrometheusArgs']]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]] = None,
security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['CertManagerServiceAccountArgs']]] = None,
service_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
startupapicheck: Optional[pulumi.Input[pulumi.InputType['CertManagerStartupAPICheckArgs']]] = None,
strategy: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.TolerationArgs']]]]] = None,
webhook: Optional[pulumi.Input[pulumi.InputType['CertManagerWebhookArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertManagerArgs.__new__(CertManagerArgs)
__props__.__dict__["affinity"] = affinity
__props__.__dict__["cainjector"] = cainjector
__props__.__dict__["cluster_resource_namespace"] = cluster_resource_namespace
__props__.__dict__["container_security_context"] = container_security_context
__props__.__dict__["deployment_annotations"] = deployment_annotations
__props__.__dict__["extra_args"] = extra_args
__props__.__dict__["extra_env"] = extra_env
__props__.__dict__["extra_volume_mounts"] = extra_volume_mounts
__props__.__dict__["extra_volumes"] = extra_volumes
__props__.__dict__["feature_gates"] = feature_gates
__props__.__dict__["global_"] = global_
__props__.__dict__["helm_options"] = helm_options
__props__.__dict__["http_proxy"] = http_proxy
__props__.__dict__["https_proxy"] = https_proxy
__props__.__dict__["image"] = image
__props__.__dict__["ingress_shim"] = ingress_shim
__props__.__dict__["install_crds"] = install_crds
__props__.__dict__["no_proxy"] = no_proxy
__props__.__dict__["node_selector"] = node_selector
__props__.__dict__["pod_annotations"] = pod_annotations
__props__.__dict__["pod_dns_config"] = pod_dns_config
__props__.__dict__["pod_dns_policy"] = pod_dns_policy
__props__.__dict__["pod_labels"] = pod_labels
__props__.__dict__["prometheus"] = prometheus
__props__.__dict__["replica_count"] = replica_count
__props__.__dict__["resources"] = resources
__props__.__dict__["security_context"] = security_context
__props__.__dict__["service_account"] = service_account
__props__.__dict__["service_annotations"] = service_annotations
__props__.__dict__["service_labels"] = service_labels
__props__.__dict__["startupapicheck"] = startupapicheck
__props__.__dict__["strategy"] = strategy
__props__.__dict__["tolerations"] = tolerations
__props__.__dict__["webhook"] = webhook
__props__.__dict__["status"] = None
super(CertManager, __self__).__init__(
'kubernetes-cert-manager:index:CertManager',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.ReleaseStatus']:
"""
Detailed information about the status of the underlying Helm deployment.
"""
return pulumi.get(self, "status")
|
nilq/baby-python
|
python
|
import typing
from uuid import uuid4
from pydantic import BaseModel
IdentifierType = typing.NewType("IdentifierType", str)
def create_identifier() -> IdentifierType:
"""Create an identifier"""
return IdentifierType(str(uuid4()))
class EmptyModel(BaseModel):
pass
OffsetVector = typing.Tuple[float, float, float]
class JogPosition(BaseModel):
vector: OffsetVector
|
nilq/baby-python
|
python
|
import os, re
import pandas as pd
path = os.getcwd()
files = os.listdir('C:/Users/Richard/Desktop/Database/嘉南AD_20200317')
#print(files)
files_xls = [f for f in files if f[-4:] == 'xlsx']
#print(files_xls)
df = pd.DataFrame()
for f in files_xls:
data = pd.read_excel('C:/Users/Richard/Desktop/Database/嘉南AD_20200317/'+f, sheet_name='資料')
for i in data.index:
if 'SC' in str(data['B3 Name'][i]):
print(f)
break
df = df.append(data)
|
nilq/baby-python
|
python
|
def main():
#Lets Create the test dataset to build our tree
dataset = {'Name':['Person 1','Person 2','Person 3','Person 4','Person 5','Person 6','Person 7','Person 8','Person 9','Person 10'],
'Salary':['Low','Med','Med','Med','Med','High','Low','High','Med','Low'],
'Sex':['Male','Male','Male','Female','Male','Female','Female','Male','Female','Male'],
'Marital':['Unmarried','Unmarried','Married','Married','Married','Unmarried','Unmarried','Unmarried','Unmarried','Married'],
'Class':['No','No','Yes','No','Yes','Yes','No','Yes','Yes','Yes']}
from Chapter_02 import DecisionTree_ID3 as ID3
#Preprocess data set
df = ID3.preProcess(dataset)
#Lets build the tree
tree = ID3.buildTree(df)
import pprint
#print(tree)
pprint.pprint(tree)
#Select test instance
inst = df.ix[2]
#Remove its class attribute
inst.pop('Class')
#Get prediction
prediction = ID3.predict(inst, tree)
print("Prediction: %s"%prediction[0])
main()
|
nilq/baby-python
|
python
|
from django.db import models
from django.conf import settings
class Post(models.Model):
ip = models.CharField(max_length=50)
idUser = models.CharField(max_length=250)
idClick = models.CharField(max_length=250, primary_key=True)
classe = models.CharField(max_length=50)
texto = models.TextField(max_length=250)
current = models.CharField(max_length=250)
href = models.CharField(max_length=250)
timestamp = models.FloatField()
dateTimestamp = models.IntegerField()
dateR = models.DateTimeField(auto_now = False, auto_now_add=True)
class Adapters(models.Model):
rid = models.CharField(max_length=250,primary_key=True)
ativo = models.IntegerField(default=0, choices={(1,0)})
class RecomendacaoAcessada(models.Model):
rid = models.ForeignKey(Adapters,max_length=250, on_delete=models.CASCADE)
idClick = models.ForeignKey(Post,max_length=250,on_delete=models.CASCADE)
idRows = models.AutoField(primary_key=True)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
class RecomendacaoGerada(models.Model):
rid = models.ForeignKey(Adapters,on_delete=models.CASCADE)
idClick = models.CharField(max_length=250)
url = models.CharField(max_length=250)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
idFileira = models.AutoField(primary_key=True)
|
nilq/baby-python
|
python
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_node(self, ServerName: str, NodeName: str, EngineAttributes: List) -> Dict:
"""
Associates a new node with the server. For more information about how to disassociate a node, see DisassociateNode .
On a Chef server: This command is an alternative to ``knife bootstrap`` .
Example (Chef): ``aws opsworks-cm associate-node --server-name *MyServer* --node-name *MyManagedNode* --engine-attributes "Name=*CHEF_ORGANIZATION* ,Value=default" "Name=*CHEF_NODE_PUBLIC_KEY* ,Value=*public-key-pem* "``
On a Puppet server, this command is an alternative to the ``puppet cert sign`` command that signs a Puppet node CSR.
Example (Chef): ``aws opsworks-cm associate-node --server-name *MyServer* --node-name *MyManagedNode* --engine-attributes "Name=*PUPPET_NODE_CSR* ,Value=*csr-pem* "``
A node can can only be associated with servers that are in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid. The AssociateNode API call can be integrated into Auto Scaling configurations, AWS Cloudformation templates, or the user data of a server's instance.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/AssociateNode>`_
**Request Syntax**
::
response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be passed to the ``DescribeNodeAssociationStatus`` API call to get the status of the association request.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server with which to associate the node.
:type NodeName: string
:param NodeName: **[REQUIRED]**
The name of the node.
:type EngineAttributes: list
:param EngineAttributes: **[REQUIRED]**
Engine attributes used for associating the node.
**Attributes accepted in a AssociateNode request for Chef**
* ``CHEF_ORGANIZATION`` : The Chef organization with which the node is associated. By default only one organization named ``default`` can exist.
* ``CHEF_NODE_PUBLIC_KEY`` : A PEM-formatted public key. This key is required for the ``chef-client`` agent to access the Chef API.
**Attributes accepted in a AssociateNode request for Puppet**
* ``PUPPET_NODE_CSR`` : A PEM-formatted certificate-signing request (CSR) that is created by the node.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_backup(self, ServerName: str, Description: str = None) -> Dict:
"""
Creates an application-level backup of a server. While the server is in the ``BACKING_UP`` state, the server cannot be changed, and no additional backup can be created.
Backups can be created for servers in ``RUNNING`` , ``HEALTHY`` , and ``UNHEALTHY`` states. By default, you can create a maximum of 50 manual backups.
This operation is asynchronous.
A ``LimitExceededException`` is thrown when the maximum number of manual backups is reached. An ``InvalidStateException`` is thrown when the server is not in any of the following states: RUNNING, HEALTHY, or UNHEALTHY. A ``ResourceNotFoundException`` is thrown when the server is not found. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/CreateBackup>`_
**Request Syntax**
::
response = client.create_backup(
ServerName='string',
Description='string'
)
**Response Syntax**
::
{
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Backup** *(dict) --*
Backup created by request.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server that you want to back up.
:type Description: string
:param Description:
A user-defined description of the backup.
:rtype: dict
:returns:
"""
pass
def create_server(self, ServerName: str, InstanceProfileArn: str, InstanceType: str, ServiceRoleArn: str, AssociatePublicIpAddress: bool = None, DisableAutomatedBackup: bool = None, Engine: str = None, EngineModel: str = None, EngineVersion: str = None, EngineAttributes: List = None, BackupRetentionCount: int = None, KeyPair: str = None, PreferredMaintenanceWindow: str = None, PreferredBackupWindow: str = None, SecurityGroupIds: List = None, SubnetIds: List = None, BackupId: str = None) -> Dict:
"""
Creates and immedately starts a new server. The server is ready to use when it is in the ``HEALTHY`` state. By default, you can create a maximum of 10 servers.
This operation is asynchronous.
A ``LimitExceededException`` is thrown when you have created the maximum number of servers (10). A ``ResourceAlreadyExistsException`` is thrown when a server with the same name already exists in the account. A ``ResourceNotFoundException`` is thrown when you specify a backup ID that is not valid or is for a backup that does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
If you do not specify a security group by adding the ``SecurityGroupIds`` parameter, AWS OpsWorks creates a new security group.
*Chef Automate:* The default security group opens the Chef server to the world on TCP port 443. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22.
*Puppet Enterprise:* The default security group opens TCP ports 22, 443, 4433, 8140, 8142, 8143, and 8170. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22.
By default, your server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/CreateServer>`_
**Request Syntax**
::
response = client.create_server(
AssociatePublicIpAddress=True|False,
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
The server that is created by the request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type AssociatePublicIpAddress: boolean
:param AssociatePublicIpAddress:
Associate a public IP address with a server that you are launching. Valid values are ``true`` or ``false`` . The default value is ``true`` .
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Enable or disable scheduled backups. Valid values are ``true`` or ``false`` . The default value is ``true`` .
:type Engine: string
:param Engine:
The configuration management engine to use. Valid values include ``Chef`` and ``Puppet`` .
:type EngineModel: string
:param EngineModel:
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
:type EngineVersion: string
:param EngineVersion:
The major release version of the engine that you want to use. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
:type EngineAttributes: list
:param EngineAttributes:
Optional engine attributes on a specified server.
**Attributes accepted in a Chef createServer request:**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA public key. The corresponding private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, a private key is generated and returned in the response.
* ``CHEF_DELIVERY_ADMIN_PASSWORD`` : The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^&+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response.
**Attributes accepted in a Puppet createServer request:**
* ``PUPPET_ADMIN_PASSWORD`` : To work with the Puppet Enterprise console, a password must use ASCII characters.
* ``PUPPET_R10K_REMOTE`` : The r10k remote is the URL of your control repository (for example, ssh://git@your.git-repo.com:user/control-repo.git). Specifying an r10k remote opens TCP port 8170.
* ``PUPPET_R10K_PRIVATE_KEY`` : If you are using a private Git repository, add PUPPET_R10K_PRIVATE_KEY to specify an SSH URL and a PEM-encoded private SSH key.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks CM deletes the oldest backups if this number is exceeded. The default value is ``1`` .
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: **[REQUIRED]**
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, if you are using API commands instead, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: **[REQUIRED]**
The Amazon EC2 instance type to use. For example, ``m4.large`` . Recommended instance types include ``t2.medium`` and greater, ``m4.*`` , or ``c4.xlarge`` and greater.
:type KeyPair: string
:param KeyPair:
The Amazon EC2 key pair to set for the instance. This parameter is optional; if desired, you may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
The start time for a one-hour period each week during which AWS OpsWorks CM performs maintenance on the instance. Valid values must be specified in the following format: ``DDD:HH:MM`` . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See ``TimeWindowDefinition`` for more information.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
The start time for a one-hour period during which AWS OpsWorks CM backs up application-level data on your server if automated backups are enabled. Valid values must be specified in one of the following formats:
* ``HH:MM`` for daily backups
* ``DDD:HH:MM`` for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
**Example:** ``08:00`` , which represents a daily start time of 08:00 UTC.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds:
A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by ``SubnetIds`` .
If you do not specify this parameter, AWS OpsWorks CM creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
- *(string) --*
:type ServiceRoleArn: string
:param ServiceRoleArn: **[REQUIRED]**
The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.
:type SubnetIds: list
:param SubnetIds:
The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.
For more information about supported Amazon EC2 platforms, see `Supported Platforms <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html>`__ .
- *(string) --*
:type BackupId: string
:param BackupId:
If you specify this field, AWS OpsWorks CM creates the server by using the backup represented by BackupId.
:rtype: dict
:returns:
"""
pass
def delete_backup(self, BackupId: str) -> Dict:
"""
Deletes a backup. You can delete both manual and automated backups. This operation is asynchronous.
An ``InvalidStateException`` is thrown when a backup deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteBackup>`_
**Request Syntax**
::
response = client.delete_backup(
BackupId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ``ServerName-yyyyMMddHHmmssSSS`` .
:rtype: dict
:returns:
"""
pass
def delete_server(self, ServerName: str) -> Dict:
"""
Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance). When you run this command, the server state is updated to ``DELETING`` . After the server is deleted, it is no longer returned by ``DescribeServer`` requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when a server deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteServer>`_
**Request Syntax**
::
response = client.delete_server(
ServerName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ServerName: string
:param ServerName: **[REQUIRED]**
The ID of the server to delete.
:rtype: dict
:returns:
"""
pass
def describe_account_attributes(self) -> Dict:
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeAccountAttributes>`_
**Request Syntax**
::
response = client.describe_account_attributes()
**Response Syntax**
::
{
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
**Response Structure**
- *(dict) --*
- **Attributes** *(list) --*
The attributes that are currently set for the account.
- *(dict) --*
Stores account attributes.
- **Name** *(string) --*
The attribute name. The following are supported attribute names.
* *ServerLimit:* The number of current servers/maximum number of servers allowed. By default, you can have a maximum of 10 servers.
* *ManualBackupLimit:* The number of current manual backups/maximum number of backups allowed. By default, you can have a maximum of 50 manual backups saved.
- **Maximum** *(integer) --*
The maximum allowed value.
- **Used** *(integer) --*
The current usage, such as the current number of servers that are associated with the account.
:rtype: dict
:returns:
"""
pass
def describe_backups(self, BackupId: str = None, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeBackups>`_
**Request Syntax**
::
response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Backups** *(list) --*
Contains the response to a ``DescribeBackups`` request.
- *(dict) --*
Describes a single backup.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeBackups`` requests.
:type BackupId: string
:param BackupId:
Describes a single backup.
:type ServerName: string
:param ServerName:
Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeBackups`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeBackups`` requests.
:rtype: dict
:returns:
"""
pass
def describe_events(self, ServerName: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeEvents>`_
**Request Syntax**
::
response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ServerEvents** *(list) --*
Contains the response to a ``DescribeEvents`` request.
- *(dict) --*
An event that is related to the server, such as the start of maintenance or backup.
- **CreatedAt** *(datetime) --*
The time when the event occurred.
- **ServerName** *(string) --*
The name of the server on or for which the event occurred.
- **Message** *(string) --*
A human-readable informational or status message.
- **LogUrl** *(string) --*
The Amazon S3 URL of the event's log file.
- **NextToken** *(string) --*
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object's ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken:
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object\'s ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type MaxResults: integer
:param MaxResults:
To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a ``NextToken`` value that you can assign to the ``NextToken`` request parameter to get the next set of results.
:rtype: dict
:returns:
"""
pass
def describe_node_association_status(self, NodeAssociationStatusToken: str, ServerName: str) -> Dict:
"""
Returns the current status of an existing association or disassociation request.
A ``ResourceNotFoundException`` is thrown when no recent association or disassociation request with the specified token is found, or when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeNodeAssociationStatus>`_
**Request Syntax**
::
response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
**Response Syntax**
::
{
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatus** *(string) --*
The status of the association or disassociation request.
**Possible values:**
* ``SUCCESS`` : The association or disassociation succeeded.
* ``FAILED`` : The association or disassociation failed.
* ``IN_PROGRESS`` : The association or disassociation is still in progress.
- **EngineAttributes** *(list) --*
Attributes specific to the node association. In Puppet, the attibute PUPPET_NODE_CERT contains the signed certificate (the result of the CSR).
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: **[REQUIRED]**
The token returned in either the AssociateNodeResponse or the DisassociateNodeResponse.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:rtype: dict
:returns:
"""
pass
def describe_servers(self, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks CM does not query other services.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeServers>`_
**Request Syntax**
::
response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Servers': [
{
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Servers** *(list) --*
Contains the response to a ``DescribeServers`` request.
*For Puppet Server:* ``DescribeServersResponse$Servers$EngineAttributes`` contains PUPPET_API_CA_CERT. This is the PEM-encoded CA certificate that is used by the Puppet API over TCP port number 8140. The CA certificate is also used to sign node certificates.
- *(dict) --*
Describes a configuration management server.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeServers`` requests.
:type ServerName: string
:param ServerName:
Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeServers`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeServers`` requests.
:rtype: dict
:returns:
"""
pass
def disassociate_node(self, ServerName: str, NodeName: str, EngineAttributes: List = None) -> Dict:
"""
Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes. After a node is disassociated, the node key pair is no longer valid for accessing the configuration manager's API. For more information about how to associate a node, see AssociateNode .
A node can can only be disassociated from a server that is in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DisassociateNode>`_
**Request Syntax**
::
response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be passed to the ``DescribeNodeAssociationStatus`` API call to get the status of the disassociation request.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:type NodeName: string
:param NodeName: **[REQUIRED]**
The name of the client node.
:type EngineAttributes: list
:param EngineAttributes:
Engine attributes that are used for disassociating the node. No attributes are required for Puppet.
**Attributes required in a DisassociateNode request for Chef**
* ``CHEF_ORGANIZATION`` : The Chef organization with which the node was associated. By default only one organization named ``default`` can exist.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def export_server_engine_attribute(self, ExportAttributeName: str, ServerName: str, InputAttributes: List = None) -> Dict:
"""
Exports a specified server engine attribute as a base64-encoded string. For example, you can export user data that you can use in EC2 to associate nodes with a server.
This operation is synchronous.
A ``ValidationException`` is raised when parameters of the request are not valid. A ``ResourceNotFoundException`` is thrown when the server does not exist. An ``InvalidStateException`` is thrown when the server is in any of the following states: CREATING, TERMINATED, FAILED or DELETING.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/ExportServerEngineAttribute>`_
**Request Syntax**
::
response = client.export_server_engine_attribute(
ExportAttributeName='string',
ServerName='string',
InputAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'EngineAttribute': {
'Name': 'string',
'Value': 'string'
},
'ServerName': 'string'
}
**Response Structure**
- *(dict) --*
- **EngineAttribute** *(dict) --*
The requested engine attribute pair with attribute name and value.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **ServerName** *(string) --*
The server name used in the request.
:type ExportAttributeName: string
:param ExportAttributeName: **[REQUIRED]**
The name of the export attribute. Currently, the supported export attribute is ``Userdata`` . This exports a user data script that includes parameters and values provided in the ``InputAttributes`` list.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which you are exporting the attribute.
:type InputAttributes: list
:param InputAttributes:
The list of engine attributes. The list type is ``EngineAttribute`` . An ``EngineAttribute`` list item is a pair that includes an attribute name and its value. For the ``Userdata`` ExportAttributeName, the following are supported engine attribute names.
* **RunList** In Chef, a list of roles or recipes that are run in the specified order. In Puppet, this parameter is ignored.
* **OrganizationName** In Chef, an organization name. AWS OpsWorks for Chef Automate always creates the organization ``default`` . In Puppet, this parameter is ignored.
* **NodeEnvironment** In Chef, a node environment (for example, development, staging, or one-box). In Puppet, this parameter is ignored.
* **NodeClientVersion** In Chef, the version of the Chef engine (three numbers separated by dots, such as 13.8.5). If this attribute is empty, OpsWorks for Chef Automate uses the most current version. In Puppet, this parameter is ignored.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def restore_server(self, BackupId: str, ServerName: str, InstanceType: str = None, KeyPair: str = None) -> Dict:
"""
Restores a backup to a server that is in a ``CONNECTION_LOST`` , ``HEALTHY`` , ``RUNNING`` , ``UNHEALTHY`` , or ``TERMINATED`` state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of the server's client devices (nodes) should continue to work.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when the server is not in a valid state. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/RestoreServer>`_
**Request Syntax**
::
response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType:
The type of the instance to create. Valid values must be specified in the following format: ``^([cm][34]|t2).*`` For example, ``m4.large`` . Valid values are ``t2.medium`` , ``m4.large`` , and ``m4.2xlarge`` . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair:
The name of the key pair to set on the new EC2 instance. This can be helpful if the administrator no longer has the SSH key.
:rtype: dict
:returns:
"""
pass
def start_maintenance(self, ServerName: str, EngineAttributes: List = None) -> Dict:
"""
Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server is in an ``UNDER_MAINTENANCE`` state while maintenance is in progress.
Maintenance can only be started on servers in ``HEALTHY`` and ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/StartMaintenance>`_
**Request Syntax**
::
response = client.start_maintenance(
ServerName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to a ``StartMaintenance`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server on which to run maintenance.
:type EngineAttributes: list
:param EngineAttributes:
Engine attributes that are specific to the server on which you want to run maintenance.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def update_server(self, ServerName: str, DisableAutomatedBackup: bool = None, BackupRetentionCount: int = None, PreferredMaintenanceWindow: str = None, PreferredBackupWindow: str = None) -> Dict:
"""
Updates settings for a server.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServer>`_
**Request Syntax**
::
response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to a ``UpdateServer`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Setting DisableAutomatedBackup to ``true`` disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:rtype: dict
:returns:
"""
pass
def update_server_engine_attributes(self, ServerName: str, AttributeName: str, AttributeValue: str = None) -> Dict:
"""
Updates engine-specific attributes on a specified server. The server enters the ``MODIFYING`` state when this operation is in progress. Only one update can occur at a time. You can use this command to reset a Chef server's public key (``CHEF_PIVOTAL_KEY`` ) or a Puppet server's admin password (``PUPPET_ADMIN_PASSWORD`` ).
This operation is asynchronous.
This operation can only be called for servers in ``HEALTHY`` or ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is raised. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServerEngineAttributes>`_
**Request Syntax**
::
response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to an ``UpdateServerEngineAttributes`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type AttributeName: string
:param AttributeName: **[REQUIRED]**
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue:
The value to set for the attribute.
:rtype: dict
:returns:
"""
pass
|
nilq/baby-python
|
python
|
import media
import fresh_tomatoes
# Create movie instance for Toy Story
john_wick = media.Movie("John Wick",
"An ex-hitman comes out of retirement to track down the gangsters that took everything from him.",
"https://upload.wikimedia.org/wikipedia/en/9/98/John_Wick_TeaserPoster.jpg",
"https://www.youtube.com/watch?v=2AUmvWm5ZDQ")
# Create movie instance for Avatar
fist_fight = media.Movie("Fist Fight",
" When one school teacher gets the other fired, he is challenged to an after-school fight. ",
"https://upload.wikimedia.org/wikipedia/en/b/b2/Fist_Fight.png",
"https://www.youtube.com/watch?v=6YVBj2o_3mg")
# Create movie instance for Office Christmas Party
office_xmas_party = media.Movie("Office Christmas Party",
"When his uptight CEO sister threatens to shut down his"
" branch, the branch manager throws an epic Christmas"
" party in order to land a big client and save the day,"
" but the party gets way out of hand...",
"https://upload.wikimedia.org/wikipedia/en/8/8a/Office_Christmas_Party.png",
"https://www.youtube.com/watch?v=z4PHjxRiT2I")
# Create movie instance for This is 40
this_is_40 = media.Movie("This is 40", "Pete and Debbie are both about to turn 40, their kids hate each other, both of"
" their businesses are failing, they're on the verge of losing their house, and"
" their relationship is threatening to fall apart.",
"https://upload.wikimedia.org/wikipedia/en/e/eb/This_is_40.jpg",
"https://www.youtube.com/watch?v=6sGkPwrze0o")
# Create movie instance for Skyfall
skyfall = media.Movie("Skyfall", "Bond's loyalty to M is tested when her past comes back to haunt her."
" Whilst MI6 comes under attack, 007 must track down and destroy the"
" threat, no matter how personal the cost.",
"https://upload.wikimedia.org/wikipedia/en/a/a7/Skyfall_poster.jpg",
"https://www.youtube.com/watch?v=24mTIE4D9JM")
# Create movie instance for Deadpool
deadpool = media.Movie("Deadpool", "A fast-talking mercenary with a morbid sense of humor is subjected to a rogue"
" experiment that leaves him with accelerated healing powers and a quest for revenge.",
"https://upload.wikimedia.org/wikipedia/en/4/46/Deadpool_poster.jpg",
"https://www.youtube.com/watch?v=ONHBaC-pfsk")
# Create list of favorite movie instances
movies = [john_wick, fist_fight, office_xmas_party, this_is_40, skyfall, deadpool]
# Pass list of movies to generate website to display movies
fresh_tomatoes.open_movies_page(movies)
|
nilq/baby-python
|
python
|
import pickle
mv_grade = [0]*17771
for i in range(18):
with open('temgrade/'+str(i)+'_tem_grade', 'rb') as tf:
c = pickle.load(tf)
for (mi, grade) in c.items():
mv_grade[int(mi)] = float(grade)
print str(i)+ " DONE!"
with open('movie_grade.list', 'wb') as mg:
pickle.dump(mv_grade, mg)
|
nilq/baby-python
|
python
|
import json
# alias
# bind
# bind
# bind
def loadCommand(data):
return command(data[0], data[1], data[2])
def loadBind(data, key):
return bind(loadCommand(data[0]), key, data[1], data[2])
def loadBKey(data, base=None):
b = bKey(data[0], base)
for i in data[1]:
loadBind(i, b)
return b
def loadBKeys(data, base):
for i in data:
loadBKey(i, base)
def loadCommandHolder(data):
c = commandHolder()
for i in data:
c.add(loadCommand(i))
return c
def loadForm(lst):
b = bindHolder(loadBKey(lst[1]), loadCommandHolder(lst[0]))
loadBKeys(lst[2], b)
return b
class command(object):
command = ""
command2 = ""
name = ""
name2 = ""
string = ""
def __init__(self, name, command, command2=""):
self.command = command
self.command2 = command2
self.name = name
self.name2 = name
if self.command2 != "":
self.string += 'alias "+' + name + '" "' + self.command + '"\n'
self.string += 'alias "-' + name + '" "' + self.command2 + '"'
self.name = "+" + self.name
else:
self.string += 'alias "' + name + '" "' + self.command + '"'
def saveForm(self):
return [self.name2, self.command, self.command2]
class bind(object):
command = None
key = ""
up = True
string = ""
name = ""
def __init__(self, command, bKey, key, up=False):
if type(bKey) == str:
raise "Type Error! 'key' was ment to be an object of bKey."
self.command = command
self.key = key
self.up = up
if type(command) != str:
command = command.name
if up:
bKey = bKey.getBase()
self.name = "mod" + bKey.upper() + "_" + key.upper()
self.string = (
'alias "' + self.name + '" "bind ' + key.lower() + " " + command + '"'
)
if up:
bKey.append(self)
else:
bKey.down.append(self)
def saveForm(self):
return [self.command.saveForm(), self.key, self.up]
class bKey(object):
key = None
down = None
base = None
down = None
up = None
def __init__(self, key, b=None):
self.key = key
if b == None:
self.base = None
self.up = []
else:
self.base = b.base
b.add(self)
self.down = []
def upper(self):
return self.key.upper()
def getBase(self):
if self.base != None:
return self.base
return self
def getBinds(self):
string = ""
if self.base != None:
string += self.get(self.down)
string += (
'alias "+mod'
+ self.upper()
+ '" "'
+ ";".join(i.name for i in self.down)
+ '"\n'
)
string += 'alias "-mod' + self.upper() + '" "none"\n'
string += 'bind "' + self.upper() + '" "+mod' + self.upper() + '"'
else:
string += self.get(self.up)
string += 'alias "none" "' + ";".join(i.name for i in self.up) + '"\n'
return string
def get(self, lst):
string = ""
for i in lst:
string += i.command.string + "\n"
for i in lst:
string += i.string + "\n"
return string
def append(self, data):
if self.base != None:
self.base.append(data)
else:
self.up.append(data)
def saveForm(self):
if self.down != None:
return [self.key] + [[i.saveForm() for i in self.down]]
else:
return [self.key] + [[i.saveForm() for i in self.up]]
class commandHolder(object):
lst = None
def __init__(self):
self.lst = []
def add(self, data):
self.lst.append(data)
def getData(self):
return "\n".join(i.string for i in self.lst) + "\n"
def saveForm(self):
return [i.saveForm() for i in self.lst]
class bindHolder(object):
lst = None
base = None
comm = None
def __init__(self, b=None, c=None):
self.lst = []
if b == None:
self.base = bKey("")
else:
self.base = b
if c == None:
self.comm = commandHolder()
else:
self.comm = c
def getData(self):
string = ""
string += self.comm.getData()
string += self.base.getBinds()
string += "\n".join(i.getBinds() for i in self.lst)
return string
def add(self, data):
self.lst.append(data)
def saveForm(self):
return (
[self.comm.saveForm()]
+ [self.base.saveForm()]
+ [[i.saveForm() for i in self.lst]]
)
b = bindHolder()
m4 = bKey("mouse4", b)
b.comm.add(command("sFollow", ""))
bind(
command("top", "dota_camera_setpos -2296.339355 1085.593506 0.000000", "sFollow"),
m4,
"1",
)
bind(
command("bot", "dota_camera_setpos 2874.552734 -3017.180664 0.000000", "sFollow"),
m4,
"1",
True,
)
bind(command("tShop", "toggleshoppanel"), m4, "2")
bind(command("sToggle", "dota_smart_camera_toggle"), m4, "2", True)
bind(
command(
"home", "dota_select_courier;dota_ability_execute 0;+camera;dota_courier_burst"
),
m4,
"3",
)
bind(
command(
"secret",
"dota_select_courier;dota_ability_execute 1;+camera;dota_courier_burst",
),
m4,
"3",
True,
)
bind(command("courier", "dota_courier_deliver;dota_courier_burst"), m4, "4")
bind(command("burst", "dota_courier_burst"), m4, "4", True)
bind(command("sCourier", "dota_select_courier"), m4, "5")
bind(command("", ""), m4, "5", True)
bind(command("", ""), m4, "TAB")
bind(command("", ""), m4, "TAB", True)
bind(command("item0", "dota_item_execute 0"), m4, "a")
bind(command("item1", "dota_item_execute 1"), m4, "a", True)
bind(command("item2", "dota_item_execute 2"), m4, "s")
bind(command("item3", "dota_item_execute 3"), m4, "s", True)
bind(command("item4", "dota_item_execute 4"), m4, "d")
bind(command("item5", "dota_item_execute 5"), m4, "d", True)
m5 = bKey("mouse5", b)
bind(command("test", "test"), m5, "1")
item = b.saveForm()
b = loadForm(b.saveForm())
print item == b.saveForm()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the occupancy dataset.
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
SAMPLE = 16
TXT_URL = "https://web.archive.org/web/20191128145102if_/https://raw.githubusercontent.com/LuisM78/Occupancy-detection-data/master/datatraining.txt"
MD5_TXT = "e656cd731300cb444bd10fcd28071e37"
MD5_JSON = "bc6cd9adaf496fe30bf0e417d2c3b0c6"
NAME_TXT = "datatraining.txt"
NAME_JSON = "occupancy.json"
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_TXT)
def download_txt(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(TXT_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download txt. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(txt_path, target_path=None):
with open(txt_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
header = rows.pop(0)
header.insert(0, "id")
as_dicts = [dict(zip(header, r)) for r in rows]
var_include = ["Temperature", "Humidity", "Light", "CO2"]
time = [x["date"] for x in as_dicts]
time = [time[i] for i in range(0, len(time), SAMPLE)]
data = {
"name": "occupancy",
"longname": "Occupancy",
"n_obs": len(time),
"n_dim": len(var_include),
"time": {
"type": "string",
"format": "%Y-%m-%d %H:%M:%S",
"index": list(range(len(time))),
"raw": time,
},
"series": [],
}
for idx, var in enumerate(var_include, start=1):
lbl = "V%i" % idx
obs = [float(x[var]) for x in as_dicts]
obs = [obs[i] for i in range(0, len(obs), SAMPLE)]
data["series"].append({"label": lbl, "type": "float", "raw": obs})
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
txt_path = os.path.join(output_dir, NAME_TXT)
json_path = os.path.join(output_dir, NAME_JSON)
download_txt(target_path=txt_path)
write_json(txt_path, target_path=json_path)
def clean(output_dir="."):
txt_path = os.path.join(output_dir, NAME_TXT)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(txt_path):
os.unlink(txt_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
Test CLI
References:
* https://click.palletsprojects.com/en/7.x/testing/
ToDo: expand cli testing
"""
from __future__ import annotations
from typing import Any
from click.testing import CliRunner
from pytest_mock import MockFixture
from alsek import __version__
def test_version(
cli_runner: CliRunner,
mocker: MockFixture,
) -> None:
result = cli_runner.invoke(args=["--version"])
assert result.exit_code == 0
assert __version__ in result.output
def test_help(
cli_runner: CliRunner,
mocker: MockFixture,
) -> None:
result = cli_runner.invoke(args=["--help"])
assert result.exit_code == 0
assert "Start a pool of Alsek workers" in result.output
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import tablib
import pytz
from datetime import datetime
from decimal import Decimal, InvalidOperation
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand
from django.utils import timezone
from ...models import MinuteData
def make_timestamp(date_string):
"""
A row-operation that converts an Efergy timestamp of the form
"2015-12-31 12:34:56" into a Python datetime object.
"""
try:
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
except:
return None
class Command(BaseCommand):
help = """Load Efergy's Engage minute data directly in like this:
`python.py manage load_engage_data your_filename.csv`
"""
def add_arguments(self, parser):
parser.add_argument('file_name', nargs='+', type=str)
def handle(self, *args, **options):
file_name = options['file_name'][0]
data = tablib.Dataset()
data.csv = open(file_name).read()
counter = 0
for row in data:
timestamp = timezone.make_aware(
make_timestamp(row[0]), timezone.get_current_timezone())
try:
value = Decimal(row[1])
except InvalidOperation:
value = None
if timestamp and value:
minute = timestamp.hour * 60 + timestamp.minute
try:
MinuteData.objects.create(
# TODO: Obviously, this should be a setting somewhere
timestamp=timestamp.astimezone(
pytz.timezone("America/New_York")),
minute=minute,
watts=value
)
counter += 1
except IntegrityError:
pass
print('Imported {0} new minutes from {1}'.format(counter, file_name))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from .eg import eg_hierarchy
|
nilq/baby-python
|
python
|
"""Dahua package constants"""
__version__ = '0.0.2-2'
__author__ = "Alexander Ryazanov <alryaz@xavux.com>"
from .device import *
from .channel import *
|
nilq/baby-python
|
python
|
import pygame
from settings import *
class Tile(pygame.sprite.Sprite):
def __init__(self, pos, groups):
super().__init__(groups)
self.image = pygame.image.load('assets/rock.png').convert_alpha()
self.rect = self.image.get_rect(topleft = pos)
|
nilq/baby-python
|
python
|
#id name color
## Cityscapes, kiti, vkiti
CITYSCAPES_LABELS = \
[[ 0 , 'unlabeled' , ( 0, 0, 0)],
[ 1 , 'ego vehicle' , ( 0, 0, 0)],
[ 2 , 'rectification border' , ( 0, 0, 0)],
[ 3 , 'out of roi' , ( 0, 0, 0)],
[ 4 , 'static' , ( 0, 0, 0)],
[ 5 , 'dynamic' , (111, 74, 0)],
[ 6 , 'ground' , ( 81, 0, 81)],
[ 7 , 'road' , (128, 64,128)],
[ 8 , 'sidewalk' , (244, 35,232)],
[ 9 , 'parking' , (250,170,160)],
[10 , 'rail track' , (230,150,140)],
[11 , 'building' , ( 70, 70, 70)],
[12 , 'wall' , (102,102,156)],
[13 , 'fence' , (190,153,153)],
[14 , 'guard rail' , (180,165,180)],
[15 , 'bridge' , (150,100,100)],
[16 , 'tunnel' , (150,120, 90)],
[17 , 'pole' , (153,153,153)],
[18 , 'polegroup' , (153,153,153)],
[19 , 'traffic light' , (250,170, 30)],
[20 , 'traffic sign' , (220,220, 0)],
[21 , 'vegetation' , (107,142, 35)],
[22 , 'terrain' , (152,251,152)],
[23 , 'sky' , ( 70,130,180)],
[24 , 'person' , (220, 20, 60)],
[25 , 'rider' , (255, 0, 0)],
[26 , 'car' , ( 0, 0,142)],
[27 , 'truck' , ( 0, 0, 70)],
[28 , 'bus' , ( 0, 60,100)],
[29 , 'caravan' , ( 0, 0, 90)],
[30 , 'trailer' , ( 0, 0,110)],
[31 , 'train' , ( 0, 80,100)],
[32 , 'motorcycle' , ( 0, 0,230)],
[33 , 'bicycle' , (119, 11, 32)],
[34 , 'license plate' , ( 0, 0,142)]]
## SYNTHIA-SF
SYNTHIA =\
[[0 , 'void' , ( 0, 0, 0)],
[1 , 'road' , (128, 64,128)],
[2 , 'sidewalk' , (244, 35,232)],
[3 , 'building' , ( 70, 70, 70)],
[4 , 'wall' , (102,102,156)],
[5 , 'fence' , (190,153,153)],
[6 , 'pole' , (153,153,153)],
[7 , 'traffic light' , (250,170, 30)],
[8 , 'traffic sign' , (220,220, 0)],
[9 , 'vegetation' , (107,142, 35)],
[10 , 'terrain' , (152,251,152)],
[11 , 'sky' , ( 70,130,180)],
[12 , 'person' , (220, 20, 60)],
[13 , 'rider' , (255, 0, 0)],
[14 , 'car' , ( 0, 0,142)],
[15 , 'truck' , ( 0, 0, 70)],
[16 , 'bus' , ( 0, 60,100)],
[17 , 'train' , ( 0, 80,100)],
[18 , 'motorcycle' , ( 0, 0,230)],
[19 , 'bicycle' , (119, 11, 32)],
[20 , 'road lines' , (157,234, 50)],
[21 , 'other' , ( 72, 0, 98)],
[22 , 'road works' , (167,106, 29)]]
## VIPER
VIPER=\
{( 0, 0, 0) : (0 , 'unlabeled' ),
(111, 74, 0) : (1 , 'ambiguous' ),
( 70,130,180) : (2 , 'sky' ),
(128, 64,128) : (3 , 'road' ),
(244, 35,232) : (4 , 'sidewalk' ),
(230,150,140) : (5 , 'railtrack' ),
(152,251,152) : (6 , 'terrain' ),
( 87,182, 35) : (7 , 'tree' ),
( 35,142, 35) : (8 , 'vegetation' ),
( 70, 70, 70) : (9 , 'building' ),
(153,153,153) : (10 , 'infrastructure'),
(190,153,153) : (11 , 'fence' ),
(150, 20, 20) : (12 , 'billboard' ),
(250,170, 30) : (13 , 'trafficlight' ),
(220,220, 0) : (14 , 'trafficsign' ),
(180,180,100) : (15 , 'mobilebarrier' ),
(173,153,153) : (16 , 'firehydrant' ),
(168,153,153) : (17 , 'chair' ),
( 81, 0, 21) : (18 , 'trash' ),
( 81, 0, 81) : (19 , 'trashcan' ),
(220, 20, 60) : (20 , 'person' ),
(255, 0, 0) : (21 , 'animal' ),
(119, 11, 32) : (22 , 'bicycle' ),
( 0, 0,230) : (23 , 'motorcycle' ),
( 0, 0,142) : (24 , 'car' ),
( 0, 80,100) : (25 , 'van' ),
( 0, 60,100) : (26 , 'bus' ),
( 0, 0, 70) : (27 , 'truck' ),
( 0, 0, 90) : (28 , 'trailer' ),
( 0, 80,100) : (29 , 'train' ),
( 0,100,100) : (30 , 'plane' ),
( 50, 0, 90) : (31 , 'boat' )}
|
nilq/baby-python
|
python
|
"""FastApi Backend for my Portfolio Website.
This doesn't have much purpose currently, but eventually I want to use this
backend to interact with various Python-based projects I develop.
"""
|
nilq/baby-python
|
python
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Spectroscopy experiment class for resonators."""
from typing import Iterable, Optional, Tuple
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.exceptions import QiskitError
from qiskit.providers import Backend
import qiskit.pulse as pulse
from qiskit_experiments.framework import Options
from qiskit_experiments.library.characterization.spectroscopy import Spectroscopy
from .analysis.resonator_spectroscopy_analysis import ResonatorSpectroscopyAnalysis
class ResonatorSpectroscopy(Spectroscopy):
"""Perform spectroscopy on the readout resonator.
# section: overview
This experiment does spectroscopy on the readout resonator. It applies the following
circuit
.. parsed-literal::
┌─┐
q: ┤M├
└╥┘
c: 1/═╩═
0
where a spectroscopy pulse is attached to the measurement instruction.
Side note: when doing readout resonator spectroscopy, each measured IQ point has a
frequency dependent phase. Close to the resonance, the IQ points start rotating around
in the IQ plan. This effect must be accounted for in the data processing to produce a
meaningful signal. The default data processing workflow will therefore reduce the two-
dimensional IQ data to one-dimensional data using the magnitude of each IQ point.
# section: warning
Some backends may not have the required functionality to properly support resonator
spectroscopy experiments. The experiment may not work or the resulting resonance
may not properly reflect the properties of the readout resonator.
# section: example
The resonator spectroscopy experiment can be run by doing:
.. code:: python
qubit = 1
spec = ResonatorSpectroscopy(qubit, backend)
exp_data = spec.run().block_for_results()
exp_data.figure(0)
This will measure the resonator attached to qubit 1 and report the resonance frequency
as well as the kappa, i.e. the line width, of the resonator.
# section: analysis_ref
:py:class:`ResonatorSpectroscopyAnalysis`
# section: see_also
qiskit_experiments.library.characterization.qubit_spectroscopy.QubitSpectroscopy
"""
@classmethod
def _default_experiment_options(cls) -> Options:
"""Default option values used for the spectroscopy pulse.
All units of the resonator spectroscopy experiment are given in seconds.
Experiment Options:
amp (float): The amplitude of the spectroscopy pulse. Defaults to 1 and must
be between 0 and 1.
duration (float): The duration in seconds of the spectroscopy pulse.
sigma (float): The standard deviation of the spectroscopy pulse in seconds.
width (float): The width of the flat-top part of the GaussianSquare pulse in
seconds. Defaults to 0.
"""
options = super()._default_experiment_options()
options.amp = 1
options.duration = 480e-9
options.sigma = 60e-9
options.width = 360e-9
return options
def __init__(
self,
qubit: int,
backend: Optional[Backend] = None,
frequencies: Optional[Iterable[float]] = None,
absolute: bool = True,
**experiment_options,
):
"""Initialize a resonator spectroscopy experiment.
A spectroscopy experiment run by setting the frequency of the readout drive.
The parameters of the GaussianSquare spectroscopy pulse can be specified at run-time
through the experiment options.
Args:
qubit: The qubit on which to run readout spectroscopy.
backend: Optional, the backend to run the experiment on.
frequencies: The frequencies to scan in the experiment, in Hz. The default values
range from -20 MHz to 20 MHz in 51 steps. If the ``absolute`` variable is
set to True then a center frequency obtained from the backend's defaults is
added to each value of this range.
absolute: Boolean to specify if the frequencies are absolute or relative to the
resonator frequency in the backend. The default value is True.
experiment_options: Key word arguments used to set the experiment options.
Raises:
QiskitError: if no frequencies are given and absolute frequencies are desired and
no backend is given.
"""
analysis = ResonatorSpectroscopyAnalysis()
if frequencies is None:
frequencies = np.linspace(-20.0e6, 20.0e6, 51)
if absolute:
if backend is None:
raise QiskitError(
"Cannot automatically compute absolute frequencies without a backend."
)
center_freq = backend.defaults().meas_freq_est[qubit]
frequencies += center_freq
super().__init__(qubit, frequencies, backend, absolute, analysis, **experiment_options)
@property
def _backend_center_frequency(self) -> float:
"""Returns the center frequency of the experiment.
Returns:
The center frequency of the experiment.
Raises:
QiskitError: If the experiment does not have a backend set.
"""
if self.backend is None:
raise QiskitError("backend not set. Cannot call center_frequency.")
return self.backend.defaults().meas_freq_est[self.physical_qubits[0]]
def _template_circuit(self) -> QuantumCircuit:
"""Return the template quantum circuit."""
circuit = QuantumCircuit(1, 1)
circuit.measure(0, 0)
return circuit
def _schedule(self) -> Tuple[pulse.ScheduleBlock, Parameter]:
"""Create the spectroscopy schedule."""
dt, granularity = self._dt, self._granularity
duration = int(granularity * (self.experiment_options.duration / dt // granularity))
sigma = granularity * (self.experiment_options.sigma / dt // granularity)
width = granularity * (self.experiment_options.width / dt // granularity)
qubit = self.physical_qubits[0]
freq_param = Parameter("frequency")
with pulse.build(backend=self.backend, name="spectroscopy") as schedule:
pulse.shift_frequency(freq_param, pulse.MeasureChannel(qubit))
pulse.play(
pulse.GaussianSquare(
duration=duration,
amp=self.experiment_options.amp,
sigma=sigma,
width=width,
),
pulse.MeasureChannel(qubit),
)
pulse.acquire(duration, qubit, pulse.MemorySlot(0))
return schedule, freq_param
def circuits(self):
"""Create the circuit for the spectroscopy experiment.
The circuits are based on a GaussianSquare pulse and a frequency_shift instruction
encapsulated in a measurement instruction.
Returns:
circuits: The circuits that will run the spectroscopy experiment.
"""
sched, freq_param = self._schedule()
circs = []
for freq in self._frequencies:
freq_shift = freq - self._backend_center_frequency if self._absolute else freq
freq_shift = np.round(freq_shift, decimals=3)
sched_ = sched.assign_parameters({freq_param: freq_shift}, inplace=False)
circuit = self._template_circuit()
circuit.add_calibration("measure", self.physical_qubits, sched_)
self._add_metadata(circuit, freq, sched)
circs.append(circuit)
return circs
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.5 on 2019-09-25 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0002_squarefootlayout'),
]
operations = [
migrations.AddField(
model_name='squarefootlayout',
name='fill_cols',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='squarefootlayout',
name='fill_rows',
field=models.CharField(blank=True, max_length=200),
),
]
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract import APIResource
class Mandate(APIResource):
OBJECT_NAME = "mandate"
|
nilq/baby-python
|
python
|
from django.contrib.auth import logout, authenticate, login
from django.core.checks import messages
from django.shortcuts import render, redirect
from requests import auth
from django.contrib.auth.models import User, auth
from hotels.models import Reservation
from .forms import *
# Create your views here.
def log(request):
if request.method == 'POST':
password = request.POST.get('password')
username = request.POST.get('username')
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
return redirect('/login/')
else:
return render(request, 'login.html')
def log_out(request):
logout(request)
return redirect('/')
def registration(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
new_user = form.save(commit=False)
new_user.set_password(form.cleaned_data['password'])
new_user.save()
return redirect('/login/')
else:
form = UserForm()
return render(request, 'registration.html', {'form': form})
def profile(request):
user = request.user
reservations = Reservation.objects.filter(user=user)
return render(request, 'profile.html', {'reservations': reservations, 'user': user})
|
nilq/baby-python
|
python
|
# -*- Mode: Python; tab-width: 8; indent-tabs-mode: nil; python-indent-offset:4 -*-
# vim:set et sts=4 ts=4 tw=80:
# This Source Code Form is subject to the terms of the MIT License.
# If a copy of the ML was not distributed with this
# file, You can obtain one at https://opensource.org/licenses/MIT
# author: JackRed <jackred@tuta.io>
# Timothée Couble
from pso import PSO, minimise
from pso_ann import train_ANN_PSO
import train_help
from pso_json import get_boundary_config, decode_args, encode_args
import matplotlib.pyplot as plt
from args import opso_args
def scale_args(args, boundary):
# Iterate through all arguments to scale them between specific born
i = 0
for key in boundary:
args[i] = train_help.scale(args[i], boundary[key][0], boundary[key][1])
i += 1
# Round nb_h_layers and nb_neurons_layer to have int values
args[1] = round(args[1])
args[2] = round(args[2])
# Get activation functions
i_activation = round(train_help.scale(args[-1], 0,
len(train_help.ACTIVATIONS) - 1))
activations = [train_help.ACTIVATIONS[i_activation]
for _ in range(args[1] + 1)]
return args[:-1] + [activations]
def fitness_mean(*args):
res = []
best_pso = None
best_score = float("inf")
for i in range(4):
pso, _ = train_ANN_PSO(*args)
res.append(pso.best_global_score)
if pso.best_global_score < best_score:
best_score = pso.best_global_score
best_pso = pso
return sum(res) / len(res), best_pso
def train_PSO_PSO_ANN(inputs, res_ex, boundary, opso_arg, pso_arg,
draw_graph=False):
dim = 11
opso = PSO(dim,
lambda param: fitness_mean(inputs, res_ex, *pso_arg.values(),
*scale_args(param, boundary)),
**opso_arg, comparator=minimise,
min_bound=train_help.MIN_BOUND, max_bound=train_help.MAX_BOUND,
endl="11")
print("\nRunning...\n")
if draw_graph:
opso.set_graph_config(inputs=inputs, res_ex=res_ex, opso=True)
opso.run()
return opso
def main():
args = opso_args().parse_args()
file_name = train_help.name_to_file(args.function)
inputs, res_ex = train_help.read_input(file_name)
opso_arg = decode_args('', 'opso', args.onc)
real_time_graph = args.real_time
boundary = get_boundary_config(args.obc)
pso = train_PSO_PSO_ANN(inputs, res_ex, boundary, **opso_arg,
draw_graph=real_time_graph)
dict_pso = {**train_help.args_to_pso_kwargs(
scale_args(pso.best_position, boundary)),
**opso_arg["pso_arg"]}
train_help.write_activation(dict_pso)
encode_args(args.function, 'pso', **dict_pso)
if not real_time_graph:
pso.set_graph_config(inputs=inputs, res_ex=res_ex, opso=True)
pso.draw_graphs()
plt.show()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Test MQTT and Async
# 10 second button monitor
from machine import Pin
import pycom
import time
import uasyncio as asyncio
from my_mqtt import MyMqtt
pycom.heartbeat(False)
class RGB:
def __init__(self):
self.colour = 0x000000
def set(self, colour):
self.colour = colour
pycom.rgbled(self.colour)
rgb = RGB()
async def killer(duration):
await asyncio.sleep(duration)
async def toggle(rgbLED, time_ms):
while True:
await asyncio.sleep_ms(time_ms)
colour = rgb.colour
colour = (colour + 1) % 0xFFFFFF
rgb.set(colour) # Purple
# Starting to link to actual outputs to Sensors and multi threaded
# 1 second delays to prevent overloading MQTT which will then fail
rgb.set(0x200000) # Red
print("test4 version 0.10 2018-08-22")
mq = MyMqtt()
mq.send_value("0", "button")
rgb.set(0x002000) # Green
async def button_monitor():
p_in = Pin('P10', mode=Pin.IN, pull=Pin.PULL_UP)
while True:
# Button not pushed
pycom.rgbled(0xFF8000) # Orange
mq.send_value("0", "button")
await asyncio.sleep_ms(1000)
while p_in() == 1: # Wait for button push
await asyncio.sleep_ms(100)
rgb.set(0x008000) # Green
mq.send_value("0", "button")
await asyncio.sleep_ms(1000)
mq.send_value("1", "button")
await asyncio.sleep_ms(1000)
while p_in() == 0: # Wait for button release
await asyncio.sleep_ms(100)
rgb.set(0x808000) # Yellow
mq.send_value("1", "button")
await asyncio.sleep_ms(1000)
def test(duration):
loop = asyncio.get_event_loop()
duration = int(duration)
if duration > 0:
print("Run test for {:3d} seconds".format(duration))
loop.create_task(toggle(pycom.rgbled, 10))
loop.create_task(button_monitor())
loop.run_until_complete(killer(duration))
loop.close()
test(20)
time.sleep_ms(1000) # Make sure don't over load sending of data
mq.send_value("0", "button")
rgb.set(0x201010) # pale pink
print("Test completed")
|
nilq/baby-python
|
python
|
#Sobreira Gustavo
#Falta u, placar e repetições
from random import randint
def criar_tabuleiro():
#Cria a matriz para o jogo
for l in range(3):
linha = []
for c in range(3):
linha.append('🟫')
campo.append(linha)
def enumerar_colunas():
print(' COLUNA')
num = 0
print(' ' * 4, end=' ')
for i in range(3):
print(f'{num}', end=' ')
num += 1
print()
def enumerar_linha():
linha = 'INHA'
print(' L')
for l in range(3):
print(f' {linha[l]} {l} ', end=' ')
# Neste 'for c' é que é feito o visual do tabuleiro, aqui ele ganha forma
for c in range(3):
print(f'{campo[l][c]} ', end='')
print()
print(' A')
# O print a cima serve para que as linhas sejam puladas, recomendo que coloquem uma '#' nele e rodem o código
def exibir_tabuleiro():
criar_tabuleiro()
alinhar()
enumerar_colunas()
enumerar_linha()
alinhar()
def selecionar_player():
erro = 1
while erro != 0:
escolha = int(input('Antes de começarmos escolha seu símbolo\n'
'[ 1 ] - 🔳\n'
'[ 2 ] - 🔘\n'
'Digite o número referente ao símbolo: '))
if escolha == 1 or escolha == 2:
erro -= 1
return escolha
def verificar_ganhador():
ganhador = 0
for c in range(0, 3):
if (campo[0][c] == '🔳' and campo[1][c] == '🔳' and campo[2][c] == '🔳') \
or (campo[0][0] == '🔳' and campo[1][1] == '🔳' and campo[2][2] == '🔳')\
or campo[0][2] == '🔳' and campo[1][1] == '🔳' and campo[2][0] == '🔳':
ganhador += 1
else:
if (campo[0][c] == '🔘' and campo[1][c] == '🔘' and campo[2][c] == '🔘') \
or (campo[0][0] == '🔘' and campo[1][1] == '🔘' and campo[2][2] == '🔘') \
or campo[0][2] == '🔘' and campo[1][1] == '🔘' and campo[2][0] == '🔘':
ganhador += 2
return ganhador
def fazer_jogada(rodada):
#Já que se player escolhe 'X' obrigatoriamente bot escolhe 'O' temos:
if escolha == 1:
simbolo_player = '🔳'
simbolo_bot = '🔘'
else:
simbolo_player = '🔘'
simbolo_bot = '🔳'
#Para que o jogo nunca comece com o mesmo player, coloco um randint para deixar aleatório
ordem_jogada = 0
if rodada == 0:
ordem_jogada = randint(1, 2)
rodada += 1
while rodada != 10:
if verificar_ganhador() != 0:
if verificar_ganhador() == 1:
print('O jogador 🔳 VENCEU')
else:
print('O jogador 🔘 VENCEU')
break
#Assim caso o número que seja sorteado seja 2 o player joga
if ordem_jogada % 2 == 0:
erro = 1
ordem_jogada -= 1
#Evitando de usar 'Break' já que é uma função exclusiva Python busco essa solução com a função 'erro'
#Só haverá mudanças em erro caso o jogador acerte a jogada
while erro != 0:
linha = int(input('Selecione uma coordenada utilizando apenas os números\n'
'Linha: '))
coluna = int(input('Coluna: '))
if linha in (0, 1, 2) and coluna in (0, 1, 2):
if campo[linha][coluna] == '🟫':
campo[linha][coluna] = simbolo_player
erro -= 1
exibir_tabuleiro()
rodada += 1
else:
print(' =- =- =- =- =- Busque casas vazias -= -= -= -= -= ')
else:
erro = 1
ordem_jogada += 1
while erro != 0:
linha = randint(0, 2)
coluna = randint(0, 2)
if campo[linha][coluna] == '🟫':
campo[linha][coluna] = simbolo_bot
erro -= 1
exibir_tabuleiro()
rodada += 1
if rodada == 10:
print('Deu Velha')
def alinhar():
print('\n')
print('='*40)
print('\n')
campo = []
exibir_tabuleiro()
escolha = selecionar_player()
verificar_ganhador()
fazer_jogada(0)
|
nilq/baby-python
|
python
|
# Copyright 2017-2018, Mohammad Haft-Javaherian. (mh973@cornell.edu).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# References:
# -----------
# [1] Haft-Javaherian, M; Fang, L.; Muse, V.; Schaffer, C.B.; Nishimura,
# N.; & Sabuncu, M. R. (2018) Deep convolutional neural networks for
# segmenting 3D in vivo multiphoton images of vasculature in
# Alzheimer disease mouse models. *arXiv preprint, arXiv*:1801.00880.
# =============================================================================
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import range
import h5py
import time
import scipy.io as io
import sys
from random import shuffle
import itertools as it
# Change isTrain to True if you want to train the network
isTrain = False
# Change isForward to True if you want to test the network
isForward = True
# padSize is the padding around the central voxel to generate the field of view
padSize = ((3, 3), (16, 16), (16, 16), (0, 0))
WindowSize = np.sum(padSize, axis=1) + 1
# pad Size aroung the central voxel to generate 2D region of interest
corePadSize = 2
# number of epoch to train
nEpoch = 100
# The input h5 file location
if len(sys.argv) > 1:
inputData = sys.argv[1]
else:
inputData = raw_input("Enter h5 input file path (e.g. ../a.h5)> ")
# batch size
if len(sys.argv) > 2:
batch_size = int(sys.argv[2])
else:
batch_size = 1000
# start the TF session
sess = tf.InteractiveSession()
#create placeholder for input and output nodes
x = tf.placeholder(tf.float32, shape=[None, WindowSize[0], WindowSize[1],
WindowSize[2], WindowSize[3]])
y_ = tf.placeholder(tf.float32, shape=[None, (2 * corePadSize + 1) ** 2, 2])
# Import Data
f = h5py.File(inputData, 'r')
im = np.array(f.get('/im'))
im = im.reshape(im.shape + (1, ))
imSize = im.size
imShape = im.shape
if isTrain:
l = np.array(f.get('/l'))
l = l.reshape(l.shape + (1,))
nc = im.shape[1]
tst = im[:, (nc / 2):(3 * nc / 4), :]
tstL = l[:,(nc / 2):(3 * nc / 4), :]
trn = im[:, 0:(nc / 2), :]
trnL = l[:, 0:(nc / 2), :]
tst = np.pad(tst, padSize, 'symmetric')
trn = np.pad(trn, padSize, 'symmetric')
if isForward:
im = np.pad(im, padSize, 'symmetric')
V = np.ndarray(shape=(imShape), dtype=np.float32)
print("Data loaded.")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='VALID')
def max_pool(x, shape):
return tf.nn.max_pool3d(x, ksize=shape,
strides=[1, 2, 2, 2, 1], padding='SAME')
def get_batch(im, l, corePadSize, ID):
""" generate a batch from im and l for training
based on the location of ID entries and core pad size. Note that the ID
is based on no core pad.
"""
l_ = np.ndarray(shape=(len(ID), (2 * corePadSize + 1) ** 2, 2),
dtype=np.float32)
im_ = np.ndarray(shape=(len(ID), WindowSize[0], WindowSize[1], WindowSize[2],
WindowSize[3]), dtype=np.float32)
for i in range(len(ID)):
r = np.unravel_index(ID[i], l.shape)
im_[i, :, :, :] = im[r[0]:(r[0] + WindowSize[0]),
r[1]:(r[1] + WindowSize[1]), r[2]:(r[2] + WindowSize[2]), :]
l_[i, :, 1] = np.reshape(l[r[0],
(r[1] - corePadSize):(r[1] + corePadSize + 1),
(r[2] - corePadSize):(r[2] + corePadSize + 1),:],
(2 * corePadSize + 1) ** 2)
l_[i, :,0] = 1-l_[i, :, 1]
return im_, l_
def get_batch3d_fwd(im, Vshape, ID):
""" generate a batch from im for testing
based on the location of ID entries and core pad size. Note that the ID
is based on no core pad.
"""
im_=np.ndarray(shape=(len(ID),WindowSize[0], WindowSize[1], WindowSize[2]
, WindowSize[3]),dtype=np.float32)
for i in range(len(ID)):
r = np.unravel_index(ID[i],Vshape)
im_[i,:,:,:]=im[r[0]:r[0]+WindowSize[0],r[1]:r[1]+WindowSize[1],
r[2]:r[2]+WindowSize[2],r[3]:r[3]+WindowSize[3]]
return im_
# Define the DeepVess Architecture
W_conv1a = weight_variable([3, 3, 3, 1, 32])
b_conv1a = bias_variable([32])
h_conv1a = tf.nn.relu(conv3d(x, W_conv1a) + b_conv1a)
W_conv1b = weight_variable([3, 3, 3, 32, 32])
b_conv1b = bias_variable([32])
h_conv1b = tf.nn.relu(conv3d(h_conv1a, W_conv1b) + b_conv1b)
W_conv1c = weight_variable([3, 3, 3, 32, 32])
b_conv1c = bias_variable([32])
h_conv1c = tf.nn.relu(conv3d(h_conv1b, W_conv1c) + b_conv1c)
h_pool1 = max_pool(h_conv1c,[1, 1, 2, 2, 1])
W_conv2a = weight_variable([1, 3, 3, 32, 64])
b_conv2a = bias_variable([64])
h_conv2a = tf.nn.relu(conv3d(h_pool1, W_conv2a) + b_conv2a)
W_conv2b = weight_variable([1, 3, 3, 64, 64])
b_conv2b = bias_variable([64])
h_conv2b = tf.nn.relu(conv3d(h_conv2a, W_conv2b) + b_conv2b)
h_pool2 = max_pool(h_conv2b,[1, 1, 2, 2, 1])
W_fc1 = weight_variable([1 * 5 * 5 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 1 * 5 * 5 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 1 * 5 * 5 * 2])
b_fc2 = bias_variable([1 * 5 * 5 * 2])
h_fc1 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_conv = tf.reshape(h_fc1, [-1, 1 * 5 * 5, 2])
# loss function over (TP U FN U FP)
allButTN = tf.maximum(tf.argmax(y_conv, 2), tf.argmax(y_, 2))
cross_entropy = tf.reduce_mean(tf.multiply(tf.cast(allButTN, tf.float32),
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cross_entropy)
correct_prediction = tf.multiply(tf.argmax(y_conv, 2), tf.argmax(y_, 2))
accuracy = tf.divide(tf.reduce_sum(tf.cast(correct_prediction, tf.float32)),
tf.reduce_sum(tf.cast(allButTN, tf.float32)))
sess.run(tf.global_variables_initializer())
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
if isTrain:
file_log = open("model.log", "w")
file_log.write("Epoch, Step, training accuracy, test accuracy, Time (hr) \n")
file_log.close()
start = time.time()
begin = start
trnSampleID = []
for ii in range(0, trnL.shape[0]):
for ij in it.chain(range(corePadSize,
trnL.shape[1] - corePadSize, 2 * corePadSize + 1),
[trnL.shape[1] - corePadSize - 1]):
for ik in it.chain(range(corePadSize,trnL.shape[2]-corePadSize,
2*corePadSize + 1), [trnL.shape[2] - corePadSize - 1]):
trnSampleID.append(np.ravel_multi_index((ii, ij, ik, 0),
trnL.shape))
shuffle(trnSampleID)
tstSampleID = []
for ii in range(0, tstL.shape[0]):
for ij in it.chain(range(corePadSize, tstL.shape[1] - corePadSize,
2 * corePadSize + 1), [tstL.shape[1] - corePadSize - 1]):
for ik in it.chain(range(corePadSize, tstL.shape[2] - corePadSize,
2 * corePadSize + 1), [tstL.shape[2] - corePadSize - 1]):
tstSampleID.append(np.ravel_multi_index((ii, ij, ik, 0),
tstL.shape))
shuffle(tstSampleID)
x_tst,l_tst = get_batch(tst, tstL, corePadSize, tstSampleID[0:batch_size])
for epoch in range(nEpoch):
shuffle(trnSampleID)
for i in range(np.int(np.ceil(len(trnSampleID) / batch_size))):
x1,l1 = get_batch(trn, trnL, corePadSize,
trnSampleID[(i * batch_size):((i + 1) * batch_size)])
train_step.run(feed_dict={x: x1, y_: l1, keep_prob: 0.5})
if i%100 == 99:
train_accuracy = accuracy.eval(feed_dict={
x: x1 , y_: l1 , keep_prob: 1.0})
test_accuracy = accuracy.eval(feed_dict={
x: x_tst , y_: l_tst, keep_prob: 1.0})
end = time.time()
print("epoch %d, step %d, training accuracy %g, test accuracy %g. "
"Elapsed time/sample is %e sec. %f hour to finish."%(epoch, i,
train_accuracy, test_accuracy, (end - start) / 100000,
((nEpoch - epoch) * len(trnSampleID) / batch_size - i)
* (end - start) / 360000))
file_log = open("model.log","a")
file_log.write("%d, %d, %g, %g, %f \n" % (epoch, i, train_accuracy,
test_accuracy, (end-begin) / 3600))
file_log.close()
start = time.time()
if epoch%10 == 9:
save_path = saver.save(sess, "model-epoch" + str(epoch) + ".ckpt")
print("epoch %d, Model saved in file: %s" % (epoch, save_path))
if isForward:
saver.restore(sess, "private/model-epoch29999.ckpt")
print("Model restored.")
vID=[]
for ii in range(0,V.shape[0]):
for ij in it.chain(range(corePadSize, V.shape[1] - corePadSize,
2 * corePadSize + 1), [V.shape[1] - corePadSize - 1]):
for ik in it.chain(range(corePadSize, V.shape[2] - corePadSize,
2 * corePadSize + 1), [V.shape[2] - corePadSize - 1]):
vID.append(np.ravel_multi_index((ii, ij, ik, 0), V.shape))
start = time.time()
for i in range(np.int(np.ceil(len(vID) / batch_size))):
x1 = get_batch3d_fwd(im,imShape, vID[i*batch_size:(i+1)*batch_size])
y1 = np.reshape(y_conv.eval(feed_dict={x:x1,keep_prob: 1.0}),(-1,
(2*corePadSize+1), (2*corePadSize+1),2))
for j in range(y1.shape[0]):
r=np.unravel_index(vID[i * batch_size + j], V.shape)
V[r[0],(r[1]-corePadSize):(r[1]+corePadSize+1),
(r[2]-corePadSize):(r[2]+corePadSize+1),0] = np.argmax(y1[j],axis=2)
if i%100 == 99:
end = time.time()
print("step %d is done. %f min to finish." % (i, (end - start)
/ 60 / (i + 1) * (np.int(np.ceil(len(vID) / batch_size)) - i - 1)))
io.savemat(sys.argv[1][:-3] + '-V_fwd', {'V':np.transpose(np.reshape(V,
imShape[0:3]), (2, 1, 0))})
print(sys.argv[1][:-3] + '-V_fwd.mat is saved.')
|
nilq/baby-python
|
python
|
# Adapted from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/precise_bn.py # noqa: E501
# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501
import logging
import time
import mmcv
import torch
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import Hook
from mmcv.utils import print_log
from torch.nn import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader
def is_parallel_module(module):
"""Check if a module is a parallel module.
The following 3 modules (and their subclasses) are regarded as parallel
modules: DataParallel, DistributedDataParallel,
MMDistributedDataParallel (the deprecated version).
Args:
module (nn.Module): The module to be checked.
Returns:
bool: True if the input module is a parallel module.
"""
parallels = (DataParallel, DistributedDataParallel,
MMDistributedDataParallel)
if isinstance(module, parallels):
return True
else:
return False
@torch.no_grad()
def update_bn_stats(model, data_loader, num_iters=200, logger=None):
"""Recompute and update the batch norm stats to make them more precise.
During
training both BN stats and the weight are changing after every iteration,
so the running average can not precisely reflect the actual stats of the
current model.
In this function, the BN stats are recomputed with fixed weights, to make
the running average more precise. Specifically, it computes the true
average of per-batch mean/variance instead of the running average.
Args:
model (nn.Module): The model whose bn stats will be recomputed.
data_loader (iterator): The DataLoader iterator.
num_iters (int): number of iterations to compute the stats.
logger (:obj:`logging.Logger` | None): Logger for logging.
Default: None.
"""
model.train()
assert len(data_loader) >= num_iters, (
f'length of dataloader {len(data_loader)} must be greater than '
f'iteration number {num_iters}')
if is_parallel_module(model):
parallel_module = model
model = model.module
else:
parallel_module = model
# Finds all the bn layers with training=True.
bn_layers = [
m for m in model.modules() if m.training and isinstance(m, _BatchNorm)
]
if len(bn_layers) == 0:
print_log('No BN found in model', logger=logger, level=logging.WARNING)
return
print_log(f'{len(bn_layers)} BN found', logger=logger)
# Finds all the other norm layers with training=True.
for m in model.modules():
if m.training and isinstance(m, (_InstanceNorm, GroupNorm)):
print_log(
'IN/GN stats will be updated like training.',
logger=logger,
level=logging.WARNING)
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum *
# batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers] # pyre-ignore
for bn in bn_layers:
bn.momentum = 1.0
# Note that running_var actually means "running average of variance"
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]
finish_before_loader = False
prog_bar = mmcv.ProgressBar(len(data_loader))
for ind, data in enumerate(data_loader):
with torch.no_grad():
parallel_module(**data, return_loss=False)
prog_bar.update()
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# running var is actually
running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)
if (ind + 1) >= num_iters:
finish_before_loader = True
break
assert finish_before_loader, 'Dataloader stopped before ' \
f'iteration {num_iters}'
for i, bn in enumerate(bn_layers):
# Sets the precise bn stats.
bn.running_mean = running_mean[i]
bn.running_var = running_var[i]
bn.momentum = momentum_actual[i]
class PreciseBNHook(Hook):
"""Precise BN hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
num_iters (int): Number of iterations to update the bn stats.
Default: 200.
interval (int): Perform precise bn interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, num_iters=200, interval=1):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.num_iters = num_iters
def after_train_epoch(self, runner):
if self.every_n_epochs(runner, self.interval):
# sleep to avoid possible deadlock
time.sleep(2.)
print_log(
f'Running Precise BN for {self.num_iters} iterations',
logger=runner.logger)
update_bn_stats(
runner.model,
self.dataloader,
self.num_iters,
logger=runner.logger)
print_log('BN stats updated', logger=runner.logger)
# sleep to avoid possible deadlock
time.sleep(2.)
|
nilq/baby-python
|
python
|
from flask import Flask, jsonify
import RPi.GPIO as GPIO
app = Flask(__name__)
@app.route('/off/<int:pin>')
def getOff(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
state = GPIO.input(pin)
GPIO.output(pin,GPIO.HIGH)
return jsonify({'status':'LOW', 'pin_no':pin})
@app.route('/on/<int:pin>')
def getOn(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
state = GPIO.input(pin)
GPIO.output(pin,GPIO.LOW)
return jsonify({'status':'HIGH', 'pin_no':pin})
@app.route('/status/<int:pin>')
def getStatus(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
state = GPIO.input(pin)
if state == 0:
#GPIO.output(pin,GPIO.HIGH)
return jsonify({'status':'HIGH', 'pin_no':pin})
else:
#GPIO.output(pin,GPIO.LOW)
return jsonify({'status':'LOW', 'pin_no':pin})
if __name__ =='__main__':
app.run(host='0.0.0.0', debug=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
# author: Valentyn Kofanov
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.recycleview import RecycleView
Builder.load_file("style.kv")
CHATS = ["Alex", "Masha", "Petya", "Vasya", "Vilatiy", "Misha", "John", "Michael", "Alexander", "Fedor", "111", "333"]
class RV(RecycleView):
def __init__(self, chats=CHATS, **kwargs):
super(RV, self).__init__(**kwargs)
self.data = [{'text': str(chat)} for chat in chats]
class DialogScreen(Screen):
def refresh(self):
print(self.chat_list.selected.text)
|
nilq/baby-python
|
python
|
from data_exploration import explore_over_time, frame_count, generate_summary_plot
from file_contents_gen import get_batches_multi_dir, multi_dir_data_gen
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Activation, Flatten, Dense, Lambda, Dropout
# from tf.keras.layers import InputLayer
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
# from import tf as ktf
# import tensorflow as tf
# import keras
import matplotlib.pyplot as plt
import numpy as np
import keras
# choose the operations to perform
# load_prev_model can be combined with train_model to 'add on' to the knowledge of the network
produce_graph = True
load_prev_model = True
train_model = True # train the model using the data in the dirs variable
summary_plot = False # generate a matplotlib figure that include plots of steering angle, throttle, braking, etc and sample images from the 3 cameras
compile_statistics = False # generate statistics that indicate the distribution of the data by steering angle
dirs = \
[
"../data/P3-sim-data-udacity/data",
"../data/P3-sim-data-hard-left-0"
]
for d in dirs:
print('frame count for', d, 'is: ', frame_count(d))
if summary_plot:
images, sw_angles, throttle, brake_input, speeds = explore_over_time(fname, 300)
generate_summary_plot(images, sw_angles, throttle, brake_input, speeds)
if train_model:
model = Sequential() # use the keras Sequential model type
image_shape = (70, 160, 3)# images[0,0,:,:].shape
# model.add(__import__('tensorflow').keras.layers.InputLayer(input_shape=(None, 160, 320, 3)))
# started with the NVIDIA End-to-End SDC network described here: https://devblogs.nvidia.com/deep-learning-self-driving-cars/
# made adjustments to the sizes of the layers by trial and error and used greyscale instead of colour images
model.add(Lambda(lambda x: __import__('tensorflow').image.rgb_to_grayscale(x)))
# crop out parts of the top and bottom of the image, since these parts of the image do not seem necessary
# for steering the car.
model.add(Cropping2D(cropping=( (60,25), (0,0) )))
# use a keras Lambda to resize the image
model.add(Lambda(lambda x: __import__('keras').backend.tf.image.resize_images(x, (50,160))))
# change the range of the data to [-1.0, 1.0]
model.add(Lambda(lambda x: (x / 255.0 - 0.5) * 2))
# add the convolutional layers
model.add(Conv2D(filters=12, kernel_size=5, strides=(1,1), activation='relu'))
model.add(Conv2D(filters=24, kernel_size=5, strides=(2,2), activation='relu'))
model.add(Conv2D(filters=36, kernel_size=5, strides=(2,2), activation='relu'))
model.add(Conv2D(filters=48, kernel_size=3, strides=(1,1), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=3, strides=(1,1), activation='relu'))
# flatten the convolutional layers to connect to the Fully Connected layers
model.add(Flatten())
model.add(Dense(400, activation='relu'))
model.add(Dense(600, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(100, activation='relu'))
# use dropout to improve generalization to other data
model.add(Dropout(0.5))
model.add(Dense(1)) #steering wheel angle is the output
# features = images[:,0,:,:]
# labels = sw_angles
opt = keras.optimizers.Adam(lr=0.0001) # use the Adam Optimizer - was successful in P2 and worked well here too
# get the 'generator' for the data
# In the multi_dir_data_gen function, I included an option to split the data into Training and Validation data
# the keras fit function also provides options to split data into training/validation sets
data_gen_all = multi_dir_data_gen(dirs, 64, 0.2, "ALL")
# data_gen_train = multi_dir_data_gen(dirs, 64, 0.2, "TRAIN")
# data_gen_valid = multi_dir_data_gen(dirs, 64, 0.2, "VALIDATION")
model.compile(loss='mse', optimizer=opt)
if load_prev_model:
model = keras.models.load_model('model.h5')
if produce_graph:
print(model.summary())
from keras.utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
exit()
# I attempted to use model.fit_generator but there were some problems
# using my data generator with custom batch size and the normal fit function from keras
# works well anyway
for features, labels in data_gen_all:
print('features shape: ', features.shape)
print('labels shape: ', labels.shape)
model.fit(features, labels, validation_split=0.2, shuffle=True, epochs=5, batch_size=64)
# save the model for later recall
model.save('model.h5')
if compile_statistics:
#define an array of bin boundaries and an array of counts (initialized to 0)
bins = np.arange(-10.0,10.0,0.1)
counts = np.arange(-10.0,10.0,0.1) * 0.0
# count greater than, less than and equal to 0 steering angles to validate the data augmentation that is built into the generator
count_gt_zero = 0
count_lt_zero = 0
count_eq_zero = 0
# this loop generates the histogram counts
for batch_ctr, images, sw_angles, throttle, brake_input, speeds in get_batches_multi_dir(dirs, 128):
for sw_angle in sw_angles:
if sw_angle > 0.0 or sw_angle < 0.0:
count_lt_zero = count_lt_zero + 1
count_gt_zero = count_gt_zero + 1
else:
count_eq_zero = count_eq_zero + 2
for sw_angle in sw_angles:
histo_loc = np.argmax(bins >= sw_angle)
counts[histo_loc] = counts[histo_loc] + 1
for sw_angle in sw_angles:
histo_loc = np.argmax(bins >= -1.0 * sw_angle)
counts[histo_loc] = counts[histo_loc] + 1
print('count_gt_zero: ', count_gt_zero)
print('count_lt_zero: ', count_lt_zero)
print('count_eq_zero: ', count_eq_zero)
# plot the histogram
fig = plt.figure()
ax=plt.subplot(111)
plt.plot(bins, counts)
ax.set_xticks(np.arange(-10,10,0.1), minor=True)
ax.set_xticks(np.arange(-10,10,1.0), minor=False)
# ax.set_yticks(np.arange(0, np.max(counts)), minor=True)
plt.grid(which='major', axis='both')
plt.grid(which='minor', axis='both')
plt.show()
# model.fit_generator(data_gen_train, validation_data=data_gen_valid, samples_per_epoch=10, epochs=10)
# //steering: -1 to 1
# // throttle 0 to 1
# // brake 0 1
# // speed 0 30
|
nilq/baby-python
|
python
|
def print_title():
print('---------------------------')
print(' HELLO WORLD')
print('---------------------------')
print()
def main():
print_title()
name_input = input('What is your name? ')
print('Hello ' + name_input)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
Module for the function to_json_string(my_obj) that returns the JSON
representation of an object (string).
"""
import json
def to_json_string(my_obj):
"""
Function that returns the JSON representation of an object.
Args:
my_obj (str): Surce object
Returns:
JSON representation.
"""
return json.dumps(my_obj)
|
nilq/baby-python
|
python
|
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.evaluation.serializers.monthlyMeliaEvaluationSerliazer import MonthlyMeliaEvaluationSerliazer
from apps.hotel.models import Hotel
from backend.extraPermissions import IsFoodAndDrinkBoss
from apps.evaluation.models import MonthlyGastronomyEvaluation, MonthlyMeliaEvaluation
from apps.payTime.models import PayTime
from apps.workers.models import Worker
from backend.utils import insertion_sort
def getGastronomyEvaluationOnPayTime(pay_time: PayTime, worker: Worker):
if MonthlyGastronomyEvaluation.objects.filter(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno).exists():
model = MonthlyGastronomyEvaluation.objects.get(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno)
return model.id
return None
def getMeliaEvaluationOnPayTime(pay_time: PayTime, worker: Worker):
if MonthlyMeliaEvaluation.objects.filter(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno).exists():
model = MonthlyMeliaEvaluation.objects.get(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno)
return model.id
return None
@api_view(['POST'])
@permission_classes([IsAuthenticated, IsFoodAndDrinkBoss])
def getMonthlyPerformanceEvaluationReport(request):
data = request.data
try:
hotel = Hotel.objects.get(pk=int(data.get('hotelId')))
payTime = PayTime.objects.get(pk=int(data.get('payTimeId')))
listToOrder, listNone = [], []
for worker in hotel.workers.filter(activo=True):
evalId = getMeliaEvaluationOnPayTime(payTime, worker)
meliaEvaluation = None if evalId is None else MonthlyMeliaEvaluation.objects.get(pk=evalId)
serializer = None if evalId is None else MonthlyMeliaEvaluationSerliazer(meliaEvaluation, many=False).data
newItem = {
'worker': str(worker.nombreCompleto()).title(),
'meliaEvaluation': serializer,
'total': None if meliaEvaluation is None else meliaEvaluation.totalPoints(),
'discount': None if meliaEvaluation is None else meliaEvaluation.getDisscount(),
}
if newItem['meliaEvaluation'] is None:
listNone.append(newItem)
else:
listToOrder.append(newItem)
insertion_sort(listToOrder)
listToReturn = listToOrder + listNone
return Response(listToReturn, status=status.HTTP_200_OK)
except Exception as e:
return Response({"detail": e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
|
nilq/baby-python
|
python
|
from json import loads
from fastapi.testclient import TestClient
from os.path import abspath, dirname, join
from main import app
class TestTopicsCRUDAsync:
def test_bearer_token(self):
client = TestClient(app)
# Please create new user with the "credentials.json" info
with open(join(dirname(abspath(__file__)), 'data', 'credentials.json'),
mode='r', encoding='utf-8') as f:
example_user = loads(f.read())
data = {
'username': example_user['email'],
'password': example_user['password'],
'grant_type': '', 'scope': '', 'client_id': '', 'client_secret': ''
}
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
response = client.post(f"/auth/token", data=data, headers=headers)
try:
assert response.status_code == 200
assert isinstance(response.json()['access_token'], str)
except (KeyError, AttributeError) as e:
raise ValueError("There is no user who have already registered with this email address.") from e
class TestTopicsErrorsAsync:
def test_create_user_fail(self):
client = TestClient(app)
data = '{\n "email": "user@example.com",\n "password": "string",\n "is_root": false\n}'
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
client.post(f"/auth/users/", data=data, headers=headers)
response = client.post(f"/auth/users/", data=data, headers=headers)
assert response.status_code == 400
def test_bearer_token_fail(self):
client = TestClient(app)
data = {
'username': 'test', 'password': 'test',
'grant_type': '', 'scope': '', 'client_id': '', 'client_secret': ''
}
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
response = client.post(f"/auth/token", data=data, headers=headers)
assert response.status_code == 400
assert response.json()['detail'] == 'There is no user who have already registered with this email address.'
|
nilq/baby-python
|
python
|
from RecSearch.DataWorkers.Abstract import DataWorkers
from RecSearch.ExperimentSupport.ExperimentData import ExperimentData
import pandas as pd
class Metrics(DataWorkers):
"""
Metric class adds metric data.
"""
# Configs inline with [[NAME]]
@classmethod
def set_config(cls):
additional_config = {'required': {'precedence': {'validate': 'integer(default=40)'}}}
cls.cfg = super().update_config(cls.cfg, additional_config)
def __init__(self, name: str, data_worker_config: dict, Data: ExperimentData):
self.class_name = self.get_classname()
super().__init__(self.class_name, name, data_worker_config, Data)
@classmethod
def get_classname(cls):
return cls.__name__
def get_metrics(self, column_name: str, whos: pd.DataFrame, parameters: dict) -> pd.DataFrame:
"""
Get neighborhood (list of ids) for every id in whos.index
:param column_name: output column name
:param whos: who(s) [with related data] to iterate to get metrics
:param parameters: additional parameters
:return: dataframe with column containing metric data for each who in who(s)
"""
df = pd.DataFrame()
for who in whos.itertuples():
data = self.Interface.iget_metric(who._asdict(), **parameters)
df = df.append(pd.Series(data=[v for v in data.values()],
index=['M__' + column_name + k for k in data.keys()], name=who[0]))
return df
def do_work(self):
return self.get_metrics(self.name, self.eval, self.parameters)
Metrics.set_config()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
import logging
import os
import glob
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from apiclient.http import MediaFileUpload
DIRECTORY = '/upload'
SCOPES = [
'https://www.googleapis.com/auth/documents',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file'
]
PORT = int(os.environ.get('PORT', 0))
def get_credentials(port: int = 0):
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('/credentials/token.pickle'):
with open('/credentials/token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
if not os.path.exists('/credentials/credentials.json'):
raise FileNotFoundError(
'credentials.json does not exist. ' +
'Please follow README instruction ' +
'(and go to https://developers.google.com/docs/api/quickstart/python)'
)
flow = InstalledAppFlow.from_client_secrets_file('/credentials/credentials.json', SCOPES)
creds = flow.run_local_server(port=port)
# Save the credentials for the next run
with open('/credentials/token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def upload_images(files, logger):
drive = build('drive', 'v3', credentials=get_credentials(PORT))
uploaded_files = []
file_metadata = {'name': 'photo.png'}
batch = drive.new_batch_http_request()
user_permission = {
'type': 'anyone',
'role': 'reader',
}
logger.info('Uploading images')
for file in files:
logger.info('Uploading %s' % file)
media = MediaFileUpload(file, mimetype='image/png')
file = drive.files().create(body=file_metadata, media_body=media, fields='id').execute()
batch.add(
drive.permissions().create(
fileId=file.get('id'),
body=user_permission,
fields='id',
)
)
uploaded_files.append(file.get('id'))
logger.info('Allowing images access')
batch.execute()
return uploaded_files
def delete_uploaded_files(uploaded_files, logger):
drive = build('drive', 'v3', credentials=get_credentials(PORT))
logger.info('Deleting uploaded images')
for file_id in uploaded_files:
logger.info('Deleting %s' % file_id)
drive.files().delete(fileId=file_id).execute()
def create_document(title, files, logger):
docs = build('docs', 'v1', credentials=get_credentials(PORT))
uploaded_files = upload_images(files, logger)
doc = docs.documents().create(body={'title': title}).execute()
# raise ValueError(doc)
requests_list = [{
'updateDocumentStyle': {
'documentStyle': {
'marginTop': {
'magnitude': 0,
'unit': 'PT',
},
'marginBottom': {
'magnitude': 0,
'unit': 'PT',
},
'marginRight': {
'magnitude': 0,
'unit': 'PT',
},
'marginLeft': {
'magnitude': 0,
'unit': 'PT',
},
},
'fields': 'marginTop,marginBottom,marginRight,marginLeft',
},
}]
for file_id in uploaded_files:
requests_list.append({
'insertInlineImage': {
'location': {
'index': 1
},
'uri':
'https://docs.google.com/uc?id=' + file_id,
'objectSize': {
'height': {
'magnitude': 848,
'unit': 'PT'
},
'width': {
'magnitude': 595,
'unit': 'PT'
}
}
}
})
logger.info('Creating document')
docs.documents().batchUpdate(documentId=doc.get('documentId'), body={'requests': requests_list}).execute()
delete_uploaded_files(uploaded_files, logger)
if __name__ == "__main__":
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
files = [file for file in glob.glob(glob.escape(DIRECTORY) + '/**/*', recursive=True)]
for file_path in files:
logger.info("Converting %s" % file_path)
bashCommand = 'convert -quality 100 -density 150 ' + file_path + ' /app/tmp/%04d.png'
os.system(bashCommand)
files_images = sorted(
[file_image for file_image in glob.glob(glob.escape('/app/tmp') + '/**/*', recursive=True)],
reverse=True
)
create_document(title=os.path.basename(file_path), files=files_images, logger=logger)
logger.info("Removing %s" % file_path)
os.remove(file_path)
for file in files_images:
logger.info('Removing %s' % file)
os.remove(file)
logger.info("Done %s" % file_path)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Wu Tangsheng(lanbaba) <wuts73@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, threading, logging
import os.path
from Queue import *
import hashlib
from ossync.lib import helper
from ossync.lib import queue_model
class QueueThread(threading.Thread):
""" 此线程的作用是将bucket,root, path压入要上传的队列,队列元素格式:
"bucket::root::relpath::action::life"
其中action表示文件是新建还是修改还是删除;life表示重入次数
"""
def __init__(self, bucket, dirs, queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.bucket = bucket
self.queue = queue
self.dirs = dirs
self._terminate = False
self.logger = logging.getLogger('app')
dbpath = 'db/ossync.db'
self.qm = queue_model.QueueModel(dbpath)
def terminate(self):
self._terminate = True
def is_el_queued(self, hashcode):
row = self.qm.get(hashcode)
if row:
return True
return False
def run(self):
files = {}
for d in self.dirs:
files[d] = list(helper.walk_files(os.path.normpath(d), yield_folders = True))
if len(files) > 0:
self.qm.open()
self.logger.info('Queue path ...')
for i in files:
if len(files[i]) > 0:
for path in files[i]:
relpath = os.path.relpath(path, i) # 相对于root的相对路径
el = self.bucket + '::' + i+ '::' + relpath + '::C'
hashcode = helper.calc_el_md5(i, relpath, self.bucket)
if not self.is_el_queued(hashcode):
data={"root": i, "relpath": relpath, "bucket": self.bucket, "action": 'C', "status": 0, "retries" : 0}
self.qm.save(data)
'''queue el, el: element of queue , formated as "bucket::root::path"'''
try:
self.queue.put(el, block = True, timeout = 1)
msg = 'queue element:' + el
#print msg
self.logger.info(msg)
except Full as e:
self.queue.put(None)
self.logger.error(e.message)
self.qm.close()
self.queue.put(None)
#self.queue.join()
return
|
nilq/baby-python
|
python
|
import os
import midinormalizer
from mido import MidiFile, MetaMessage
from MusicRoll import *
def iter_midis_in_path(folder_path):
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith(".mid") or file.endswith(".MID"):
yield (os.path.join(root, file), file)
def perform(path):
print("Processing '{0}'".format(path))
roll = MusicRoll(path, labels = [], tapes = [])
midi = MidiFile(path)
midinormalizer.MidiNormalizer(roll, midi).normalize(chop_loss_percent = 0.002) # 0.2 percent
roll.set_hash(midinormalizer.md5())
roll.dump(self_contained = False)
# from pycallgraph import PyCallGraph
# from pycallgraph.output import GraphvizOutput
if __name__ == "__main__":
# with PyCallGraph(output=GraphvizOutput()):
for path, file in iter_midis_in_path('.'):
roll_name = path[:-4] + '.mrl'
# no music roll file?
if not os.path.isfile(roll_name):
perform(path)
else:
# file is outdated?
old_roll = pickle.load(open(roll_name, 'rb'))
if not (hasattr(old_roll, 'md5') and old_roll.md5 == midinormalizer.md5()):
perform(path)
else:
print("Skipping '{0}'".format(file))
|
nilq/baby-python
|
python
|
from ekstep_data_pipelines.audio_transcription.transcription_sanitizers import (
BaseTranscriptionSanitizer,
)
from ekstep_data_pipelines.common.utils import get_logger
LOGGER = get_logger("GujratiTranscriptionSanitizer")
class GujratiSanitizer(BaseTranscriptionSanitizer):
@staticmethod
def get_instance(**kwargs):
return GujratiSanitizer()
def __init__(self, *args, **kwargs):
pass
def sanitize(self, transcription):
pass
|
nilq/baby-python
|
python
|
from nose.plugins.attrib import attr
from gilda import ground
from indra.sources import hypothesis
from indra.sources import trips
from indra.statements import *
from indra.sources.hypothesis.processor import HypothesisProcessor, \
parse_context_entry, parse_grounding_entry, get_text_refs
from indra.sources.hypothesis.annotator import statement_to_annotations, \
evidence_to_annotation, get_annotation_text
@attr('nonpublic', 'slow', 'notravis')
def test_process_indra_annnotations():
hp = hypothesis.process_annotations(reader=trips.process_text)
assert hp.statements
for stmt in hp.statements:
print(stmt)
print(stmt.evidence[0])
def test_grounding_annotation():
hp = HypothesisProcessor(annotations=[grounding_annot_example])
hp.extract_groundings()
assert hp.groundings['HCQ'] == {'CHEBI': 'CHEBI:5801'}
assert hp.groundings['Plaquenil'] == {'CHEBI': 'CHEBI:5801'}
@attr('slow')
def test_statement_annotation():
hp = HypothesisProcessor(annotations=[statement_annot_example],
reader=trips.process_text)
hp.extract_statements()
assert len(hp.statements) == 1
stmt = hp.statements[0]
assert stmt.subj.name == 'AMPK'
assert stmt.obj.name == 'STAT3'
context = stmt.evidence[0].context
assert context.location.name == 'nucleus', context
assert context.location.db_refs == {'GO': 'GO:0005634', 'TEXT': 'nucleus'}
assert context.organ.name == 'Liver', context
assert context.organ.db_refs == {'MESH': 'D008099', 'TEXT': 'liver'}
def test_get_text_refs_pmid():
url = 'https://www.ncbi.nlm.nih.gov/pubmed/32196952'
refs = get_text_refs(url)
assert refs.get('PMID') == '32196952', refs
assert refs.get('URL') == url, refs
def test_get_text_refs_pmcid():
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7071777/'
refs = get_text_refs(url)
assert refs.get('PMCID') == 'PMC7071777', refs
assert refs.get('URL') == url, refs
def test_get_text_refs_biorxiv():
url = 'https://www.biorxiv.org/content/10.1101/2020.04.16.044016v1'
refs = get_text_refs(url)
assert refs.get('URL') == url, refs
assert refs.get('DOI') == '10.1101/2020.04.16.044016', refs
url = 'https://www.biorxiv.org/content/10.1101/2020.04.16.044016v1.full'
refs = get_text_refs(url)
assert refs.get('URL') == url, refs
assert refs.get('DOI') == '10.1101/2020.04.16.044016', refs
def test_parse_grounding_entry():
entry = '[a and b] -> CHEBI:CHEBI:1234|PUBCHEM:5678'
grounding = parse_grounding_entry(entry)
assert grounding == {'a and b': {'CHEBI': 'CHEBI:1234',
'PUBCHEM': '5678'}}, grounding
def test_parse_invalid_grounding_entry():
entries = ['xxx', '[xxx]->a', 'xxx -> a', 'xxx -> a:1&b:4']
for entry in entries:
assert parse_grounding_entry(entry) is None
def test_parse_context_entry():
context_dict = parse_context_entry('Cell type: antigen presenting cells',
ground, 'antigen presenting cells')
assert len(context_dict) == 1
assert 'cell_type' in context_dict
ref_context = context_dict['cell_type']
assert ref_context.name == 'Antigen-Presenting Cells', ref_context
assert ref_context.db_refs.get('MESH') == 'D000938'
assert ref_context.db_refs.get('TEXT') == 'antigen presenting cells'
def test_parse_invalid_context_entry():
entries = ['xxx: yyy', 'Disease:something', 'xxx']
for entry in entries:
assert parse_context_entry(entry, ground) is None
def test_parse_ungrounded_context_entry():
entry = 'Cell type: CD4+ T-cells'
context_dict = parse_context_entry(entry, ground)
assert len(context_dict['cell_type'].db_refs) == 1, \
context_dict['cell_type'].db_refs
assert context_dict['cell_type'].db_refs['TEXT'] == \
'CD4+ T-cells', context_dict['cell_type'].db_refs
grounding_annot_example = {
'uri': 'https://en.wikipedia.org/wiki/Hydroxychloroquine',
'text': '[Plaquenil] -> CHEBI:CHEBI:5801\n\n[HCQ] -> CHEBI:CHEBI:5801',
'tags': ['gilda'],
'target': [{'source': 'https://en.wikipedia.org/wiki/Hydroxychloroquine'}],
'document': {'title': ['Hydroxychloroquine - Wikipedia']},
}
statement_annot_example = {
'id': '4nBYAmqwEeq1ujf13__Y-w',
'uri': 'https://www.ncbi.nlm.nih.gov/pubmed/32190173',
'text': 'AMPK activates STAT3\nOrgan: liver\nLocation: nucleus',
'tags': [],
}
def test_get_annotation_text():
# Test statement with multiple grounded agents
stmt = Inhibition(
Agent('vemurafenib', db_refs={'CHEBI': 'CHEBI:63637'}),
Agent('BRAF', db_refs={'HGNC': '1097'})
)
annot_text = get_annotation_text(stmt, annotate_agents=True)
assert annot_text == \
'[vemurafenib](https://identifiers.org/CHEBI:63637) inhibits ' \
'[BRAF](https://identifiers.org/hgnc:1097).', annot_text
annot_text = get_annotation_text(stmt, annotate_agents=False)
assert annot_text == 'Vemurafenib inhibits BRAF.', annot_text
# Test statement with ungrounded and None agents
stmt = Phosphorylation(None, Agent('X'))
annot_text = get_annotation_text(stmt, annotate_agents=True)
assert annot_text == 'X is phosphorylated.', annot_text
annot_text = get_annotation_text(stmt, annotate_agents=False)
assert annot_text == 'X is phosphorylated.', annot_text
def test_evidence_to_annot():
# No evidence text
ev = Evidence(source_api='reach')
assert evidence_to_annotation(ev) is None
# No text refs
ev = Evidence(source_api='reach', text='Some text')
assert evidence_to_annotation(ev) is None
# Various text refs
ev = Evidence(source_api='reach', text='Some text',
pmid='12345')
annot = evidence_to_annotation(ev)
assert annot == {'url': 'https://pubmed.ncbi.nlm.nih.gov/12345/',
'target_text': 'Some text',
'tags': ['reach']}, annot
ev = Evidence(source_api='reach', text='Some text',
pmid=None, text_refs={'PMCID': '12345'})
annot = evidence_to_annotation(ev)
assert annot['url'] == 'https://www.ncbi.nlm.nih.gov/pmc/articles/12345/'
ev = Evidence(source_api='reach', text='Some text',
pmid=None, text_refs={'URL': 'https://wikipedia.org'})
annot = evidence_to_annotation(ev)
assert annot['url'] == 'https://wikipedia.org'
def test_statement_to_annotations():
evs = [
# This will get filtered out
Evidence(source_api='reach'),
# This will get added as an annotation
Evidence(source_api='sparser', text='some text 1',
pmid='12345'),
]
stmt = Dephosphorylation(None, Agent('X'), evidence=evs)
annots = statement_to_annotations(stmt)
assert len(annots) == 1
assert annots[0]['target_text'] == 'some text 1'
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, print_function, unicode_literals
from xml.dom.minidom import parseString
from jinja2 import Template
from .forward_parameter import ForwardParametersAction
from .interface import Action
from .multi_action import MultiAction
_SYNC_DESCRIPTION_TEMPLATE = Template(""" <hudson.plugins.descriptionsetter.DescriptionSetterBuilder plugin="description-setter@1.10">
<regexp></regexp>
<description>{{ description | escape }}</description>
</hudson.plugins.descriptionsetter.DescriptionSetterBuilder>""")
class MultiSyncAction(Action):
"""
A MultiSync action wraps many sync actions
in order to generate a coherent description
setting build step.
"""
def __init__(self, output_format, children):
self.multi = MultiAction(output_format, children)
self.children = children
self.output_format = output_format
def generate_parameters(self):
return self.multi.generate_parameters()
def generate_build_steps(self):
return self.description() + self.multi.generate_build_steps() + self.generate_parameter_forwarding_step()
def generate_post_build_steps(self):
return self.multi.generate_post_build_steps()
def description(self):
description_lines = ["<div>"]
child_descriptions = "{}".format("<br/>\n".join([child.description() for child in self.children]))
description_lines.append(child_descriptions)
description_lines.append("</div>")
return [_SYNC_DESCRIPTION_TEMPLATE.render(description="\n".join(description_lines))]
def generate_parameter_forwarding_step(self):
"""
This is a terrible hack to get around the fact that
we take structured data from the configuration and
immediately flatten it into XML strings in these
generators. A proper approach would keep the data
structured and, perhaps, do the conversion to XML
parameter definitions later on, so we did not have
to parse out from XML here. That challenges a basic
assumption of generators, we can revisit that in the
future if SJB is still around.
"""
parameter_names = []
for parameter in self.generate_parameters():
parameter_name = (
parseString(parameter).
getElementsByTagName("hudson.model.StringParameterDefinition")[0].
getElementsByTagName("name")[0].
childNodes[0].nodeValue
)
if parameter_name in parameter_names:
continue
parameter_names.append(parameter_name)
return ForwardParametersAction(parameter_names).generate_build_steps()
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import attr
from string import Formatter
from ._core import Enum
class EmojiSize(Enum):
"""Used to specify the size of a sent emoji"""
LARGE = "369239383222810"
MEDIUM = "369239343222814"
SMALL = "369239263222822"
class MessageReaction(Enum):
"""Used to specify a message reaction"""
LOVE = "😍"
SMILE = "😆"
WOW = "😮"
SAD = "😢"
ANGRY = "😠"
YES = "👍"
NO = "👎"
@attr.s(cmp=False)
class Mention(object):
"""Represents a @mention"""
#: The thread ID the mention is pointing at
thread_id = attr.ib()
#: The character where the mention starts
offset = attr.ib(0)
#: The length of the mention
length = attr.ib(10)
@attr.s(cmp=False)
class Message(object):
"""Represents a Facebook message"""
#: The actual message
text = attr.ib(None)
#: A list of :class:`Mention` objects
mentions = attr.ib(factory=list, converter=lambda x: [] if x is None else x)
#: A :class:`EmojiSize`. Size of a sent emoji
emoji_size = attr.ib(None)
#: The message ID
uid = attr.ib(None, init=False)
#: ID of the sender
author = attr.ib(None, init=False)
#: Timestamp of when the message was sent
timestamp = attr.ib(None, init=False)
#: Whether the message is read
is_read = attr.ib(None, init=False)
#: A list of pepole IDs who read the message, works only with :func:`fbchat.Client.fetchThreadMessages`
read_by = attr.ib(factory=list, init=False)
#: A dict with user's IDs as keys, and their :class:`MessageReaction` as values
reactions = attr.ib(factory=dict, init=False)
#: A :class:`Sticker`
sticker = attr.ib(None)
#: A list of attachments
attachments = attr.ib(factory=list, converter=lambda x: [] if x is None else x)
#: A list of :class:`QuickReply`
quick_replies = attr.ib(factory=list, converter=lambda x: [] if x is None else x)
#: Whether the message is unsent (deleted for everyone)
unsent = attr.ib(False, init=False)
@classmethod
def formatMentions(cls, text, *args, **kwargs):
"""Like `str.format`, but takes tuples with a thread id and text instead.
Returns a `Message` object, with the formatted string and relevant mentions.
```
>>> Message.formatMentions("Hey {!r}! My name is {}", ("1234", "Peter"), ("4321", "Michael"))
<Message (None): "Hey 'Peter'! My name is Michael", mentions=[<Mention 1234: offset=4 length=7>, <Mention 4321: offset=24 length=7>] emoji_size=None attachments=[]>
>>> Message.formatMentions("Hey {p}! My name is {}", ("1234", "Michael"), p=("4321", "Peter"))
<Message (None): 'Hey Peter! My name is Michael', mentions=[<Mention 4321: offset=4 length=5>, <Mention 1234: offset=22 length=7>] emoji_size=None attachments=[]>
```
"""
result = ""
mentions = list()
offset = 0
f = Formatter()
field_names = [field_name[1] for field_name in f.parse(text)]
automatic = "" in field_names
i = 0
for (literal_text, field_name, format_spec, conversion) in f.parse(text):
offset += len(literal_text)
result += literal_text
if field_name is None:
continue
if field_name == "":
field_name = str(i)
i += 1
elif automatic and field_name.isdigit():
raise ValueError(
"cannot switch from automatic field numbering to manual field specification"
)
thread_id, name = f.get_field(field_name, args, kwargs)[0]
if format_spec:
name = f.format_field(name, format_spec)
if conversion:
name = f.convert_field(name, conversion)
result += name
mentions.append(
Mention(thread_id=thread_id, offset=offset, length=len(name))
)
offset += len(name)
message = cls(text=result, mentions=mentions)
return message
|
nilq/baby-python
|
python
|
from distutils.core import setup
from setuptools import setup
setup(
name='pyflask',
version='1.0',
author='liuwill',
author_email='liuwill@live.com',
url='http://www.liuwill.com',
install_requires=[
'flask>=0.12.1',
'Flask-SocketIO>=2.8.6',
'Flask-Cors>=3.0.2',
'Jinja2>=2.9.6'
],
packages=["chat"],
#packages=['']
#py_modules=['foo'],
scripts=["main.py"],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
class DEM(nn.Module):
def __init__(self, channel):
""" Detail Emphasis Module """
super(DEM, self).__init__()
self.conv1 = nn.Sequential(nn.ReflectionPad2d(1),
nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=0),
nn.BatchNorm2d(channel),
nn.ReLU(True))
self.global_path = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channel, channel, kernel_size=1, stride=1, padding=0),
nn.ReLU(True),
nn.Conv2d(channel, channel, kernel_size=1, stride=1, padding=0),
nn.Sigmoid())
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X H X W)
returns :
out : recalibrated feature + input feature
attention: B X C X 1 X 1
"""
out = self.conv1(x)
attention = self.global_path(out)
return out + out * attention.expand_as(out)
|
nilq/baby-python
|
python
|
# encoding: utf-8
"""
test.py
"""
import sys
def data_from_body(body):
if sys.version_info[0] < 3:
return ''.join(chr(_) for _ in body)
# python3
return bytes(body)
|
nilq/baby-python
|
python
|
def spring_summer(): #봄, 여름 함수
global soils
global trees
dead_trees = [[[] for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
trees[i][j].sort()
for idx in range(len((trees[i][j]))):
if soils[i][j]>= trees[i][j][idx]:
soils[i][j] -= trees[i][j][idx]
trees[i][j][idx] += 1
else:
dead_trees[i][j].append(idx)
for idx in range(len(dead_trees[i][j])-1,-1,-1): #죽은 나무가 있을 경우 해당 칸 안만 바뀌기 때문에 칸별로 봄여름을 한번에 진행했다.
temp = trees[i][j][dead_trees[i][j][idx]]
del trees[i][j][dead_trees[i][j][idx]]
soils[i][j] += temp//2
return
delta = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,-1),(-1,1)]
def autumn(): #가을 함수
new_trees = [[[] for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
for tree in trees[i][j]:
if tree%5 == 0:
for dl in delta:
newi = i + dl[0]
newj = j + dl[1]
if -1<newi<N and -1<newj<N:
new_trees[newi][newj].append(1)
for i in range(N):
for j in range(N):
trees[i][j].extend(new_trees[i][j])
return
def winter(): #겨울함수
for i in range(N):
for j in range(N):
soils[i][j] +=fertilizer[i][j]
return
N, M, K = map(int, input().split())
fertilizer = [list(map(int, input().split())) for _ in range(N)]
soils = [[5 for _ in range(N)] for _ in range(N)]
trees = [[[] for _ in range(N)] for _ in range(N)]
for _ in range(M):
x, y, z = map(int, input().split())
trees[x-1][y-1].append(z)
for _ in range(K):
spring_summer()
autumn()
winter()
ans = 0
for i in range(N):
for j in range(N):
ans += len(trees[i][j])
print(ans)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
print(os.getcwd())
labels = ['Middle','Old','Young']
# Keras
from keras.models import load_model
from keras.preprocessing import image
from keras.models import model_from_json
from keras.optimizers import SGD
MODEL_PATH = 'C:/Users/rohan/Desktop/Work/Age_detection_dataset/App/model/model.json'
MODEL_PATH2 = 'C:/Users/rohan/Desktop/Work/Age_detection_dataset/App/model/model.h5'
# opening and store file in a variable
json_file = open('model/model.json','r')
loaded_model_json = json_file.read()
json_file.close()
# use Keras model_from_json to make a loaded model
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights('model/model.h5')
print("Loaded Model from disk")
opt = SGD(lr=0.01)
loaded_model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
loaded_model._make_predict_function()
def model_predict(img_path,loaded_model):
images=[]
img = cv2.imread(img_path)
img = cv2.resize(img , (64,64))
images.append(img)
images = np.array(images, dtype="float") / 255.0
pred=loaded_model.predict(images[0])
return pred
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path, loaded_model)
i=preds.argmax(axis=1)
vals=np.amax(preds,axis=1)
perc_vals = vals*100
perc_vals_rounded = perc_vals.round(2)
label_img = labels[i]
result = label_img+": "+str(perc_vals_rounded)
return result
return None
if __name__ == '__main__':
app.run(debug=True)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
## PyTorch implementation of CDCK2, CDCK5, CDCK6, speaker classifier models
# CDCK2: base model from the paper 'Representation Learning with Contrastive Predictive Coding'
# CDCK5: CDCK2 with a different decoder
# CDCK6: CDCK2 with a shared encoder and double decoders
# SpkClassifier: a simple NN for speaker classification
class CDCK6(nn.Module):
''' CDCK2 with double decoder and a shared encoder '''
def __init__(self, timestep, batch_size, seq_len):
super(CDCK6, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
self.timestep = timestep
self.encoder = nn.Sequential( # downsampling factor = 160
nn.Conv1d(1, 512, kernel_size=10, stride=5, padding=3, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=8, stride=4, padding=2, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True)
)
self.gru1 = nn.GRU(512, 128, num_layers=1, bidirectional=False, batch_first=True)
self.Wk1 = nn.ModuleList([nn.Linear(128, 512) for i in range(timestep)])
self.gru2 = nn.GRU(512, 128, num_layers=1, bidirectional=False, batch_first=True)
self.Wk2 = nn.ModuleList([nn.Linear(128, 512) for i in range(timestep)])
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize gru1 and gru2
for layer_p in self.gru1._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru1.__getattr__(p), mode='fan_out', nonlinearity='relu')
for layer_p in self.gru2._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru2.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(_weights_init)
def init_hidden1(self, batch_size): # initialize gru1
#return torch.zeros(1, batch_size, 128).cuda()
return torch.zeros(1, batch_size, 128)
def init_hidden2(self, batch_size): # initialize gru2
#return torch.zeros(1, batch_size, 128).cuda()
return torch.zeros(1, batch_size, 128)
def forward(self, x, x_reverse, hidden1, hidden2):
batch = x.size()[0]
nce = 0 # average over timestep and batch and gpus
t_samples = torch.randint(self.seq_len/160-self.timestep, size=(1,)).long() # randomly pick time stamps. ONLY DO THIS ONCE FOR BOTH GRU.
# first gru
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output1, hidden1 = self.gru1(forward_seq, hidden1) # output size e.g. 8*100*256
c_t = output1[:,t_samples,:].view(batch, 128) # c_t e.g. size 8*256
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
linear = self.Wk1[i]
pred[i] = linear(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct1 = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# second gru
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x_reverse)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output2, hidden2 = self.gru2(forward_seq, hidden2) # output size e.g. 8*100*256
c_t = output2[:,t_samples,:].view(batch, 128) # c_t e.g. size 8*256
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
linear = self.Wk2[i]
pred[i] = linear(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct2 = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1.*batch*self.timestep
nce /= 2. # over two grus
accuracy = 1.*(correct1.item()+correct2.item())/(batch*2) # accuracy over batch and two grus
#print(torch.cat((output1, output2), dim=2).shape)
return accuracy, nce, hidden1, hidden2
def predict(self, x, x_reverse, hidden1, hidden2):
batch = x.size()[0]
# first gru
# input sequence is N*C*L, e.g. 8*1*20480
z1 = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z1 = z1.transpose(1,2)
output1, hidden1 = self.gru1(z1, hidden1) # output size e.g. 8*128*256
# second gru
z2 = self.encoder(x_reverse)
z2 = z2.transpose(1,2)
output2, hidden2 = self.gru2(z2, hidden2)
return torch.cat((output1, output2), dim=2) # size (64, seq_len, 256)
#return torch.cat((z1, z2), dim=2) # size (64, seq_len, 512*2)
class CDCK5(nn.Module):
''' CDCK2 with a different decoder '''
def __init__(self, timestep, batch_size, seq_len):
super(CDCK5, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
self.timestep = timestep
self.encoder = nn.Sequential( # downsampling factor = 160
nn.Conv1d(1, 512, kernel_size=10, stride=5, padding=3, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=8, stride=4, padding=2, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True)
)
self.gru = nn.GRU(512, 40, num_layers=2, bidirectional=False, batch_first=True)
self.Wk = nn.ModuleList([nn.Linear(40, 512) for i in range(timestep)])
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(_weights_init)
def init_hidden(self, batch_size):
#return torch.zeros(2*1, batch_size, 40).cuda()
return torch.zeros(2*1, batch_size, 40)
def forward(self, x, hidden):
batch = x.size()[0]
t_samples = torch.randint(self.seq_len/160-self.timestep, size=(1,)).long() # randomly pick time stamps
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
nce = 0 # average over timestep and batch
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output, hidden = self.gru(forward_seq, hidden) # output size e.g. 8*100*40
c_t = output[:,t_samples,:].view(batch, 40) # c_t e.g. size 8*40
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
decoder = self.Wk[i]
pred[i] = decoder(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1.*batch*self.timestep
accuracy = 1.*correct.item()/batch
return accuracy, nce, hidden
def predict(self, x, hidden):
batch = x.size()[0]
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
output, hidden = self.gru(z, hidden) # output size e.g. 8*128*40
return output, hidden # return every frame
#return output[:,-1,:], hidden # only return the last frame per utt
class CDCK2(nn.Module):
def __init__(self, timestep, batch_size, seq_len):
super(CDCK2, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
self.timestep = timestep
self.encoder = nn.Sequential( # downsampling factor = 160
nn.Conv1d(1, 512, kernel_size=10, stride=5, padding=3, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=8, stride=4, padding=2, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True)
)
self.gru = nn.GRU(512, 256, num_layers=1, bidirectional=False, batch_first=True)
self.Wk = nn.ModuleList([nn.Linear(256, 512) for i in range(timestep)])
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(_weights_init)
def init_hidden(self, batch_size, use_gpu=True):
if use_gpu: return torch.zeros(1, batch_size, 256).cuda()
else: return torch.zeros(1, batch_size, 256)
def forward(self, x, hidden):
batch = x.size()[0]
t_samples = torch.randint(self.seq_len/160-self.timestep, size=(1,)).long() # randomly pick time stamps
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
nce = 0 # average over timestep and batch
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output, hidden = self.gru(forward_seq, hidden) # output size e.g. 8*100*256
c_t = output[:,t_samples,:].view(batch, 256) # c_t e.g. size 8*256
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
linear = self.Wk[i]
pred[i] = linear(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1.*batch*self.timestep
accuracy = 1.*correct.item()/batch
return accuracy, nce, hidden
def predict(self, x, hidden):
batch = x.size()[0]
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
output, hidden = self.gru(z, hidden) # output size e.g. 8*128*256
return output, hidden # return every frame
#return output[:,-1,:], hidden # only return the last frame per utt
class SpkClassifier(nn.Module):
''' linear classifier '''
def __init__(self, spk_num):
super(SpkClassifier, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, spk_num)
#nn.Linear(256, spk_num)
)
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.apply(_weights_init)
def forward(self, x):
x = self.classifier(x)
return F.log_softmax(x, dim=-1)
|
nilq/baby-python
|
python
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer
from sqlalchemy.orm import sessionmaker
engine = create_engine("sqlite:///tmp.db")
Base = declarative_base()
class Signature(Base):
__tablename__ = "signature"
X = Column(Integer, primary_key=True)
Y = Column(Integer)
Z = Column(Integer)
class Signature2(Base):
__tablename__ = "signature2"
A = Column(Integer, primary_key=True)
B = Column(Integer)
C = Column(Integer)
Session = sessionmaker(bind=engine)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
from lib.utility.SystemUtility import *
from lib.utility.SessionUtility import *
from lib.utility.DocumentUtility import *
from lib.utility.CustomJSONEncoder import *
|
nilq/baby-python
|
python
|
import asyncio
import aiohttp
from asynctest import TestCase
from asynctest.mock import CoroutineMock
from asgard.backends.chronos.impl import ChronosScheduledJobsBackend
from asgard.clients.chronos import ChronosClient
from asgard.conf import settings
from asgard.http.client import http_client
from asgard.models.account import Account
from asgard.models.user import User
from itests.util import USER_WITH_MULTIPLE_ACCOUNTS_DICT, ACCOUNT_DEV_DICT
from tests.utils import with_json_fixture
class ChronosScheduledJobsBackendTest(TestCase):
async def setUp(self):
self.backend = ChronosScheduledJobsBackend()
async def test_get_job_by_id_job_not_found(self):
job_id = "job-not-found"
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
job = await self.backend.get_job_by_id(job_id, user, account)
self.assertIsNone(job)
async def test_add_namespace_to_job_name(self):
self.backend.client = CoroutineMock(spec=ChronosClient)
self.backend.client.get_job_by_id.return_value = None
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
job_id = "my-scheduled-job"
await self.backend.get_job_by_id(job_id, user, account)
self.backend.client.get_job_by_id.assert_awaited_with(
f"{account.namespace}-{job_id}"
)
@with_json_fixture("scheduled-jobs/chronos/infra-purge-logs-job.json")
async def test_get_job_by_id_job_exists(self, job_fixture):
job_fixture["name"] = "dev-scheduled-job"
async with http_client as client:
await client.post(
f"{settings.SCHEDULED_JOBS_SERVICE_ADDRESS}/v1/scheduler/iso8601",
json=job_fixture,
)
# Para dar tempo do chronos registra e responder no request log abaixo
await asyncio.sleep(1)
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
job_id = "scheduled-job"
job = await self.backend.get_job_by_id(job_id, user, account)
self.assertEqual(job_id, job.id)
async def test_get_job_by_id_service_unavailable(self):
"""
Por enquanto deixamos o erro ser propagado.
"""
get_job_by_id_mock = CoroutineMock(
side_effect=aiohttp.ClientConnectionError()
)
self.backend.client = CoroutineMock(spec=ChronosClient)
self.backend.client.get_job_by_id = get_job_by_id_mock
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
with self.assertRaises(aiohttp.ClientConnectionError):
await self.backend.get_job_by_id("job-id", user, account)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import logging
import os
import string
import sys
import yaml
from glob import iglob
import django
from foia_hub.models import Agency, Office, Stats, ReadingRoomUrls
django.setup()
logger = logging.getLogger(__name__)
def check_urls(agency_url, row, field):
# Because only some rows have websites, we only want to update if they do.
row_url = row.get(field, None)
# Check if the existing rec has a url & if it doesn't
# match, then we end up with two conflicting records.
# In this case, we need to reaccess website on agency.
if agency_url and (agency_url != row_url):
logger.warning('Two records with the same agency have two diff urls.')
logger.warning('1:%s | 2:%s' % (agency_url, row_url))
logger.warning('Website: %s, was not saved.' % (row_url))
return agency_url
else:
return row_url
def extract_tty_phone(service_center):
""" Extract a TTY phone number if one exists from the service_center
entry in the YAML. """
tty_phones = [p for p in service_center['phone'] if 'TTY' in p]
if len(tty_phones) > 0:
return tty_phones[0]
def extract_non_tty_phone(public_liaison):
""" Extract a non-TTY number if one exists, otherwise use the TTY number.
If there are multiple options, for now pick the first one. Return None if
no phone number """
if 'phone' in public_liaison:
non_tty = [p for p in public_liaison['phone'] if 'TTY' not in p]
if len(non_tty) > 0:
return non_tty[0]
elif len(public_liaison['phone']) > 0:
return public_liaison['phone'][0]
def contactable_fields(agency, office_dict):
"""Add the Contactable and USAddress fields to the agency based on values
in the office dictionary. This will be called for both parent and child
agencies/offices (as written in our current data set)"""
agency.phone = office_dict.get('phone')
agency.emails = office_dict.get('emails', [])
agency.fax = office_dict.get('fax')
agency.office_url = office_dict.get('website')
agency.request_form_url = office_dict.get('request_form')
service_center = office_dict.get(
'service_center', {'name': None, 'phone': ['']})
agency.TTY_phone = extract_tty_phone(service_center)
agency.person_name = service_center.get('name')
public_liaison = office_dict.get(
'public_liaison', {'name': None, 'phone': []})
agency.public_liaison_phone = extract_non_tty_phone(public_liaison)
agency.public_liaison_name = public_liaison.get('name')
address = office_dict.get('address', )
agency.zip_code = address.get('zip')
agency.state = address.get('state')
agency.city = address.get('city')
agency.street = address.get('street')
agency.address_lines = address.get('address_lines', [])
update_reading_rooms(agency, office_dict)
def add_request_time_statistics(data, agency, office=None):
"""Load stats data about agencies into the database."""
# Delete old stats before adding
Stats.objects.filter(agency=agency, office=office).delete()
if data.get('request_time_stats'):
latest_year = sorted(
data.get('request_time_stats').keys(), reverse=True)[0]
data = data['request_time_stats'].get(latest_year)
if data:
iterator = [('S', 'simple'), ('C', 'complex')]
for arg in iterator:
median = data.get("%s_median_days" % arg[1])
if median:
stat = Stats(
agency=agency,
office=office,
year=int(latest_year),
stat_type=arg[0])
if median == 'less than 1':
stat.median = 1
stat.less_than_one = True
else:
stat.median = median
stat.save()
def update_reading_rooms(contactable, data):
""" This ensures that the reading rooms indicated in `data` are added to
the contactable (agency, office). If the contactable already has reading
rooms, those are deleted first. """
# Delete all existing reading rooms, because we'll re-add them.
contactable.reading_room_urls.all().delete()
for link_text, url in data.get('reading_rooms', []):
rru = ReadingRoomUrls(
content_object=contactable, link_text=link_text, url=url)
rru.save()
def build_abbreviation(agency_name):
""" Given an agency name, guess at an abbrevation. """
abbreviation = ''
for ch in agency_name:
if ch in string.ascii_uppercase:
abbreviation += ch
return abbreviation
def load_agency_fields(agency, data):
""" Loads agency-specific values """
abbreviation = data.get('abbreviation')
if not abbreviation:
abbreviation = build_abbreviation(data.get('name'))
agency.abbreviation = abbreviation
agency.description = data.get('description')
agency.keywords = data.get('keywords')
agency.common_requests = data.get('common_requests', [])
agency.no_records_about = data.get('no_records_about', [])
def load_data(data):
"""
Loads data from each yaml file into the database.
"""
# Load the agency
name = data['name']
slug = Agency.slug_for(name)
a, created = Agency.objects.get_or_create(slug=slug, name=name)
# Load the agency-specific values
load_agency_fields(a, data)
# If the agency only has a single department the contactable fields
if len(data['departments']) == 1:
dept_rec = data['departments'][0]
contactable_fields(a, dept_rec)
a.save()
add_request_time_statistics(data, a)
# Load agency offices
if len(data['departments']) > 1:
for dept_rec in data['departments']:
# If top-level=True office is saved as agency
if dept_rec.get('top_level'):
sub_agency_name = dept_rec['name']
sub_agency_slug = Agency.slug_for(sub_agency_name)
sub_agency, created = Agency.objects.get_or_create(
slug=sub_agency_slug, name=sub_agency_name)
sub_agency.parent = a
load_agency_fields(sub_agency, dept_rec)
contactable_fields(sub_agency, dept_rec)
sub_agency.save()
add_request_time_statistics(dept_rec, sub_agency)
else:
# Just an office
office_name = dept_rec['name']
office_slug = Office.slug_for(office_name)
full_slug = slug + '--' + office_slug
o, created = Office.objects.get_or_create(
agency=a, slug=full_slug)
o.office_slug = office_slug
o.name = office_name
contactable_fields(o, dept_rec)
o.save()
add_request_time_statistics(dept_rec, a, o)
def process_yamls(folder):
"""
Loops through each agency yaml file and loads it into the database
"""
for item in iglob(os.path.join(folder, '*.yaml')):
data = yaml.load(open(item))
load_data(data)
if __name__ == "__main__":
'''
To run this:
python load_agency_contacts $LOCATION_OF_DATA
The data is currently is a folder of yaml that is in the main
foia repo. If you were running this locally, it might look something
like this:
python load_agency_contacts.py ~/Projects/code/foia/foia/contacts/data
# If you want to designate an alternate csv path, specify that as the
# next argument following the yaml dir otherwise
# the script will default
# to the following:
# ../../data/foia-contacts/full-foia-contacts/
'''
yaml_folder = sys.argv[1]
process_yamls(yaml_folder)
|
nilq/baby-python
|
python
|
import logging
from re import search
from flask import Blueprint
from flask_restful import Api
from com_cheese_api.cmm.hom.home import Home
from com_cheese_api.usr.user.resource.user import User, Users
from com_cheese_api.usr.user.resource.login import Login
from com_cheese_api.usr.user.resource.signup import SignUp
from com_cheese_api.cop.itm.cheese.resource.cheese import Cheeses, Cheese, CheeseSearch
from com_cheese_api.cop.itm.cheese.model.cheese_dto import CheeseVo
from com_cheese_api.cop.ord.order.resource.order import Order, Orders
from com_cheese_api.cop.ord.order.resource.search import OrderSearch
from com_cheese_api.cop.ord.order.resource.best import GenderBest, AgeBest
from com_cheese_api.cop.rev.review.model.review_dto import ReviewVo
from com_cheese_api.cop.rev.review.resource.review import Review, Reviews
from com_cheese_api.cop.chat.chatbot.resource.chatbot import Chatbot
from com_cheese_api.cop.rec.recommend.resource.recommend import Recommend
home = Blueprint('home', __name__, url_prefix='/api')
# ================================= User =================================
user = Blueprint('user', __name__, url_prefix='/api/user')
users = Blueprint('users', __name__, url_prefix='/api/users')
login = Blueprint('login', __name__, url_prefix='api/login')
signup = Blueprint('signup', __name__, url_prefix='/api/signup')
# ================================= Cheese =================================
cheese = Blueprint('cheese', __name__, url_prefix='/api/cheese')
cheeses = Blueprint('cheeses', __name__, url_prefix='/api/cheeses')
cheese_search = Blueprint('cheese_search', __name__, url_prefix='/api/cheese/search')
# ================================= Order =================================
order = Blueprint('order', __name__, url_prefix='/api/order')
orders = Blueprint('orders', __name__, url_prefix='/api/orders')
order_search = Blueprint('order_search', __name__, url_prefix='/api/order/search')
gender_best = Blueprint('gender_best', __name__, url_prefix='/api/gender_best')
age_best = Blueprint('age_best', __name__, url_prefix='/api/age_best')
# ================================= Review =================================
review = Blueprint('review', __name__, url_prefix='/api/review')
# review_new = Blueprint('review_new', __name__, url_prefix='/api/review_new')
reviews = Blueprint('reviews', __name__, url_prefix='/api/reviews')
# ================================= Chatbot =================================
chatbot = Blueprint('chatbot', __name__, url_prefix='/api/chatbot/')
# ================================= Chatbot =================================
recommend = Blueprint('recommend', __name__, url_prefix='/api/recommend')
api = Api(home)
api = Api(user)
api = Api(users)
api = Api(login)
api = Api(signup)
# api = Api(cheese)
api = Api(cheeses)
api = Api(cheese_search)
api = Api(order)
api = Api(orders)
api = Api(order_search)
api = Api(gender_best)
api = Api(age_best)
api = Api(review)
# api = Api(review_new)
api = Api(reviews)
api = Api(chatbot)
api = Api(recommend)
####################################################################
def initialize_routes(api):
api.add_resource(Home, '/api')
# ================================= User =================================
api.add_resource(User, '/api/user', '/api/user/<user_id>')
api.add_resource(Users, '/api/users')
api.add_resource(Login, '/api/login')
api.add_resource(SignUp, '/api/signup')
# ================================= Cheese =================================
api.add_resource(Cheese, '/api/cheese', '/api/cheese/<cheese_id>')
api.add_resource(Cheeses, '/api/cheeses')
api.add_resource(CheeseSearch, '/api/cheese/search', '/api/cheese/search/<category>')
# ================================= Order =================================
api.add_resource(Order, '/api/order', '/api/order/<user_id>')
api.add_resource(OrderSearch, '/api/order/search/<order_no>')
api.add_resource(Orders, '/api/orders')
# api.add_resource(OrderBest, '/api/best')
api.add_resource(GenderBest, '/api/gender_best')
api.add_resource(AgeBest, '/api/age_best')
# ================================= Review =================================
api.add_resource(Review, '/api/review', '/api/review/<review_no>')
# api.add_resource(ReviewNew, '/api/review_new/')
api.add_resource(Reviews, '/api/reviews')
# ================================= Chatbot =================================
api.add_resource(Chatbot, '/api/chatbot')
# ================================= Chatbot =================================
api.add_resource(Recommend, '/api/recommend', '/api/recommend/<user_id>')
@home.errorhandler(500)
def home_api_error(e):
logging.exception('An error occurred during home request. %s' % str(e))
return 'An internal error occurred.', 500
@user.errorhandler(500)
def user_api_error(e):
logging.exception('An error occurred during user request. %s' % str(e))
return 'An internal error occurred.', 500
@user.errorhandler(500)
def login_api_error(e):
logging.exception('An error occurred during user request. %s' % str(e))
return 'An internal error occurred.', 500
@user.errorhandler(500)
def auth_api_error(e):
logging.exception('An error occurred during user request. %s' % str(e))
return 'An internal error occurred.', 500
@cheeses.errorhandler(500)
def cheese_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
@order.errorhandler(500)
def order_api_error(e):
logging.exception('An error occurred during home request. %s' % str(e))
return 'An internal error occurred.', 500
@cheeses.errorhandler(500)
def review_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
@chatbot.errorhandler(500)
def review_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
@recommend.errorhandler(500)
def review_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
# ==============================================================
# ==================== =====================
# ==================== TEST =====================
# ==================== =====================
# ==============================================================
# from com_cheese_api.home.api import HomeAPI
# from com_cheese_api.cheese.cheese_api import CheeseAPI
# from com_cheese_api.board.board_api import BoardAPI
# from com_cheese_api.suggest.suggest_api import SuggestAPI
# from com_cheese_api.admin.admin_api import AdminAPI
# from com_cheese_api.login.login_api import LoginAPI
# from com_cheese_api.login.sign_up_api import SignUpAPI
# def initialize_routes(api):
# api.add_resource(HomeAPI, '/api')
# api.add_resource(CheeseAPI, '/api/cheese')
# api.add_resource(BoardAPI, '/api/board')
# api.add_resource(SuggestAPI, '/api/suggest')
# api.add_resource(AdminAPI, '/api/admin')
# api.add_resource(LoginAPI, '/api/login')
# api.add_resource(SignUpAPI, '/api/sign_up')
|
nilq/baby-python
|
python
|
from unicon.plugins.iosxe import IosXEServiceList, IosXESingleRpConnection
from .settings import IosXEIec3400Settings
from . import service_implementation as svc
from .statemachine import IosXEIec3400SingleRpStateMachine
class IosXEIec3400ServiceList(IosXEServiceList):
def __init__(self):
super().__init__()
self.reload = svc.Reload
class IosXEIec3400SingleRpConnection(IosXESingleRpConnection):
os = 'iosxe'
platform = 'iec3400'
chassis_type = 'single_rp'
state_machine_class = IosXEIec3400SingleRpStateMachine
subcommand_list = IosXEIec3400ServiceList
settings = IosXEIec3400Settings()
|
nilq/baby-python
|
python
|
from typing import Any, Iterable, Optional, TypeVar
from reactivex import Observable, abc
from reactivex.disposable import (
CompositeDisposable,
Disposable,
SerialDisposable,
SingleAssignmentDisposable,
)
from reactivex.scheduler import CurrentThreadScheduler
_T = TypeVar("_T")
def catch_with_iterable_(sources: Iterable[Observable[_T]]) -> Observable[_T]:
"""Continues an observable sequence that is terminated by an
exception with the next observable sequence.
Examples:
>>> res = catch([xs, ys, zs])
>>> res = reactivex.catch(src for src in [xs, ys, zs])
Args:
sources: an Iterable of observables. Thus a generator is accepted.
Returns:
An observable sequence containing elements from consecutive
source sequences until a source sequence terminates
successfully.
"""
sources_ = iter(sources)
def subscribe(
observer: abc.ObserverBase[_T], scheduler_: Optional[abc.SchedulerBase] = None
) -> abc.DisposableBase:
_scheduler = scheduler_ or CurrentThreadScheduler.singleton()
subscription = SerialDisposable()
cancelable = SerialDisposable()
last_exception = None
is_disposed = False
def action(scheduler: abc.SchedulerBase, state: Any = None) -> None:
def on_error(exn: Exception) -> None:
nonlocal last_exception
last_exception = exn
cancelable.disposable = _scheduler.schedule(action)
if is_disposed:
return
try:
current = next(sources_)
except StopIteration:
if last_exception:
observer.on_error(last_exception)
else:
observer.on_completed()
except Exception as ex: # pylint: disable=broad-except
observer.on_error(ex)
else:
d = SingleAssignmentDisposable()
subscription.disposable = d
d.disposable = current.subscribe(
observer.on_next,
on_error,
observer.on_completed,
scheduler=scheduler_,
)
cancelable.disposable = _scheduler.schedule(action)
def dispose() -> None:
nonlocal is_disposed
is_disposed = True
return CompositeDisposable(subscription, cancelable, Disposable(dispose))
return Observable(subscribe)
__all__ = ["catch_with_iterable_"]
|
nilq/baby-python
|
python
|
from __future__ import division
import sys, time, csv, h2o
import pandas as pd
import numpy as np
arg = sys.argv
print "Running script:", sys.argv[0]
arg = sys.argv[1:]
print "Arguments passed to script:", arg
load_data_fp = arg[0]
saving_meanImputed_fp = arg[1]
saving_modelImputed_fp = arg[2]
saving_means_fp = arg[3]
saving_models_fp = arg[4]
predictors = arg[5:]
# GWP_lag is treated as an int variable. It has no missings, so no need to impute it.
# But to keep this scripts code simple I impute anything with 'lag' in the var name.
to_impute = [var for var in predictors if 'lag' in var]
h2o.init(min_mem_size_GB=200, max_mem_size_GB = 225)
d = h2o.import_frame(path = load_data_fp)
#######################################################################
print "Making 'time_period' a factor..."
d['time_period'] = d['time_period'].asfactor()
assert d['time_period'].isfactor()
print d.levels(col='time_period')
d.describe()
def impute_data(method = "mean",
to_impute = to_impute,
predictors = predictors):
if method == "mean":
print "Mean imputing missing data for predictors:", to_impute
# find mean for each time period in data for each predictor, save them in a matrix with a col for the mean values of each predictor
# then on holdout use this table to fill in all missing values based on the time period (row) and the variable (col) of this matrix
#if using python module h2o-3.1.0.3131: grouped = data.group_by(["time_period"])
# gm = [grouped.mean(predictor, na="rm").get_frame() for predictor in to_impute]
gm = d["time_period"].unique()
print "Finding means..."
for predictor in to_impute:
gm = gm.cbind(d.group_by(["time_period"], {predictor:["mean", d.names().index(predictor), "rm"]}, order_by = 0))
gm.show()
print "Saving the imputation means to disk..."
h2o.download_csv(gm, filename = saving_means_fp)
# df_py = h2o.as_list(gm)
# Now that's stored for the holdout data, do this a faster way in java for the training data:
for predictor in to_impute:
d.impute(predictor, method='mean', by = ['time_period'], inplace = True)
print "Done imputing", predictor
print "Saving the final mean imputed data to disk..."
h2o.export_file(frame = d, path =saving_meanImputed_fp, force=True)
if method == "model":
# sequentially impute 'newdata', not 'data', so the order of the predictor variables in the loop does not matter
# otherwise, you would be using increasingly imputed data to make predictions as the loop progresses.
newdata = d
# With training data, build a model for each col and predict missing data, save the models, use them on the holdout data to predict all missing data.
for predictor in to_impute:
print "Building model for imputing " + predictor
print "Subsetting the data into missing values for predictor and no missing values for predictor"
na_ind = d[predictor].isna()
not_na_ind = na_ind != 1.0
to_train = d[not_na_ind]
to_predict = d[na_ind]
these_var = [var for var in predictors if var != predictor]
trained = h2o.gbm(x = to_train[these_var],
y = to_train[[predictor]],
ntrees=300,
max_depth=6,
learn_rate=0.2)
print "Saving the imputation tree model for " + predictor
h2o.save_model(trained, dir = saving_models_fp, name = "dl_imputation_model_" + predictor)
print "Imputing the missing " + predictor + " data by predicting with the model..."
predicted = trained.predict(to_predict[these_var])
tofillin = newdata[predictor]
assert len(predicted) == len(tofillin[na_ind])
tofillin[na_ind] = predicted # mutate the column in place
newdata[predictor] = tofillin
print "Saving the final model-imputed data to disk..."
h2o.export_file(frame = d, path =saving_modelImputed_fp, force=True)
def compare_frames(d1 = saving_meanImputed_fp,
d2 = saving_modelImputed_fp,
imputed = to_impute):
print "Comparing the resulting two matrices..."
# Load the saved frames back in
meanI = h2o.import_file(path = d1)
modelI = h2o.import_file(path = d2)
meanIquantiles = h2o.as_list(meanI[imputed].quantile(prob=[0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]))
modelIquantiles = h2o.as_list(modelI[imputed].quantile(prob=[0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]))
meanIcolmeans = [v.mean() for v in meanI[imputed]]
modelIcolmeans = [v.mean() for v in modelI[imputed]]
meanIcolmedians = [v.median() for v in meanI[imputed]]
modelIcolmedians = [v.median() for v in modelI[imputed]]
meanIcolmin = [v.min() for v in meanI[imputed]]
modelIcolmin = [v.min() for v in modelI[imputed]]
# TODO save all this in a csv file
impute_data("mean")
impute_data("model")
# compare_frames()
# Send email
email = False
if(email):
import smtplib
GMAIL_USERNAME = None
GMAIL_PW = None
RECIP = None
SMTP_NUM = None
session = smtplib.SMTP('smtp.gmail.com', SMTP_NUM)
session.ehlo()
session.starttls()
session.login(GMAIL_USERNAME, GMAIL_PW)
headers = "\r\n".join(["from: " + GMAIL_USERNAME,
"subject: " + "Finished running script: " + __file__,
"to: " + RECIP,
"mime-version: 1.0",
"content-type: text/html"])
content = headers + "\r\n\r\n" + "Done running the script.\n Sent from my Python code."
session.sendmail(GMAIL_USERNAME, RECIP, content)
|
nilq/baby-python
|
python
|
import hashlib
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMessageBox
from Model.Register import Register
from Model.Values import Values
from Model.dataUtils import sqlconn
class Login_Window(QtWidgets.QMainWindow):
def __init__(self, gui, reg):
super(Login_Window, self).__init__()
self.setupUi(self)
self.retranslateUi(self)
self.gui = gui
self.reg = reg
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(386, 127)
MainWindow.setWindowIcon(QIcon(''
''
'logo.png'))
MainWindow.setStyleSheet("background-image:url(logo.jpg)")
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit.setGeometry(QtCore.QRect(250, 24, 100, 24))
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit_2.setGeometry(QtCore.QRect(250, 54, 100, 24))
self.lineEdit_2.setText("")
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setObjectName("lineEdit_2")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(200, 24, 48, 24))
self.label.setTextFormat(QtCore.Qt.AutoText)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralWidget)
self.label_2.setGeometry(QtCore.QRect(200, 54, 48, 24))
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(self.centralWidget)
self.pushButton.setGeometry(QtCore.QRect(190, 90, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralWidget)
self.pushButton_2.setGeometry(QtCore.QRect(290, 90, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
MainWindow.setCentralWidget(self.centralWidget)
self.pushButton.clicked.connect(self.word_get)
# self.pushButton_2.clicked.connect(MainWindow.close)
self.pushButton_2.clicked.connect(self.register)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "报刊订阅系统"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "请输入帐号"))
self.lineEdit_2.setPlaceholderText(_translate("MainWindow", "请输入密码"))
self.lineEdit_2.returnPressed.connect(self.word_get)
self.label.setText(_translate("MainWindow", "帐 号"))
self.label_2.setText(_translate("MainWindow", "密 码"))
self.pushButton.setText(_translate("MainWindow", "确定"))
self.pushButton_2.setText(_translate("MainWindow", "注册"))
def register(self):
self.hide()
self.reg.show()
def word_get(self):
connect, cursor = sqlconn()
login_user = self.lineEdit.text()
login_password = self.lineEdit_2.text()
passwd = hashlib.md5(login_password.encode('UTF-8')).hexdigest()
sql_root = "select * from root where usrname='" + login_user + "' and passwd='" + passwd + "'"
sql_user = "select * from user where usrname='" + login_user + "' and passwd='" + passwd + "'"
res_root = cursor.execute(sql_root)
res_user = cursor.execute(sql_user)
if res_root > 0:
Values.IsRootLogin = True
Values.CurrentUser = login_user
self.gui.show()
self.close()
elif res_user > 0:
Values.IsUserLogin = True
Values.CurrentUser = login_user
self.gui.show()
self.close()
else:
QMessageBox.warning(self,
"警告",
"用户名或密码错误!",
QMessageBox.Yes)
self.lineEdit.setFocus()
self.gui.refreshAll()
connect.close()
|
nilq/baby-python
|
python
|
#!/bin/envrun
import z3
import circ as ci
print("z3------")
# XOR test case
# (A + B)* ~(AB)
x = z3.Bool('x')
y = z3.Bool('y')
expr = z3.And( # 'z'
z3.Or(x, y),
z3.Not(z3.And(x, y))
)
print(expr)
print("internal-------")
ix, iy = ci.In(), ci.In()
#ox = ci.Out()
xor = ci.Circuit.fromRAW(
ci.And(
ci.Or(ix, iy),
ci.Not(ci.And(ix, iy))))
print(xor)
print(xor.debug())
print(xor.data)
try:
for x in False,True:
for y in False,True:
out = xor.eval(ix= x, iy= y)
print(f"{x},\t{y}\t= {out}")
assert(out['ox'] == (x ^ y))
except ci.InputConflict as e:
print(e)
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import io
import logging
import os.path as op
import re
import math
import random
import string
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.audio_utils import get_fbank, get_waveform
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator
)
from fairseq.data.audio.speech_to_text_dataset import (
get_features_or_waveform,
_collate_frames
)
logger = logging.getLogger(__name__)
class AudioDictDataset(SpeechToTextDataset):
LANG_TAG_TEMPLATE = "<lang:{}>"
def __init__(
self,
split: str,
is_train_split: bool,
data_cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
audio_dict,
align_time_min,
align_time_max,
total_time,
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
):
self.split, self.is_train_split = split, is_train_split
self.data_cfg = data_cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.tgt_dict = tgt_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = data_cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.data_cfg.get_feature_transforms(split, is_train_split)
)
# For aligned augmentation
self.align_time_min = align_time_min
self.align_time_max = align_time_max
self.audio_dict = audio_dict
self.audio_dict_size = len(self.audio_dict)
self.total_time = total_time
# Used in the +AuioDict part of ADA-LM/ADA-RT
self.max_samp_fbank = self.data_cfg.max_samp_fbank
if self.max_samp_fbank is not None:
assert isinstance(self.max_samp_fbank, int) and \
self.max_samp_fbank >= 1
self.num_samp_fbank = self.data_cfg.num_samp_fbank
# Used in aligned masking (target side only w/o audio dict)
self.max_mask_fbank = self.data_cfg.max_mask_fbank
self.num_mask_fbank = self.data_cfg.num_mask_fbank
# % of data in a mini-batch to be applied with sampleFbank
# prob: should be -1 when sample_fbank is not used
self.sampleFbank_prob = self.data_cfg.sampleFbank_prob
self.apply_alignAugment = self.data_cfg.apply_alignAugment
self.roberta = None
self.skip_roberta = self.data_cfg.skip_roberta
logger.info('Skip roberta: {}'.format(self.skip_roberta))
if self.apply_alignAugment:
if not self.skip_roberta:
from fairseq.models.roberta import RobertaModel
self.roberta = RobertaModel.from_pretrained(
self.data_cfg.path_roberta, checkpoint_file='model.pt'
)
if self.data_cfg.roberta_fp16:
self.roberta.half()
logger.info('Inference of roberta with dtype: {}'.format(
(next(self.roberta.parameters())).dtype)
)
self.roberta.cuda()
self.roberta.eval()
else:
self.audio_dict_keys = list(self.audio_dict.keys())
self.alignAugment_prob = self.data_cfg.alignAugment_prob
self.alignMask = self.data_cfg.alignMask
self.skip_source = self.data_cfg.skip_source
self.percentMaskedTokens = self.data_cfg.percentMaskedTokens
self.thresholdMaskedTokens = self.data_cfg.thresholdMaskedTokens
if self.alignAugment_prob > 0 and self.alignAugment_prob <= 1:
assert self.thresholdMaskedTokens >= 1
self.random_time_mask_N = self.data_cfg.random_time_mask_N
self.random_time_mask_T = self.data_cfg.random_time_mask_T
self.random_time_mask_p = self.data_cfg.random_time_mask_p
self.random_time_mask_limited = self.data_cfg.random_time_mask_limited
if self.random_time_mask_N is not None \
and self.random_time_mask_T is not None:
self.time_mask_max = self.random_time_mask_N * \
self.random_time_mask_T
self.random_freq_mask_N = self.data_cfg.random_freq_mask_N
self.random_freq_mask_F = self.data_cfg.random_freq_mask_F
self.random_mask_value = self.data_cfg.random_mask_value #specaugment after ADA
self.align_mask_value = self.data_cfg.align_mask_value
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
logger.info(self.__repr__())
def __repr__(self):
return (
self.__class__.__name__
+ f'(split="{self.split}", n_samples={self.n_samples}, '
f"prepend_tgt_lang_tag={self.data_cfg.prepend_tgt_lang_tag}, "
f"roberta={self.roberta}, "
f"skip_roberta={self.skip_roberta}, "
f"alignAugment_prob={self.alignAugment_prob}, "
f"self.alignMask={self.alignMask}, "
f"self.skip_source={self.skip_source}, "
f"self.percentMaskedTokens={self.percentMaskedTokens}, "
f"self.thresholdMaskedTokens={self.thresholdMaskedTokens}, "
f"self.random_time_mask_N={self.random_time_mask_N}, "
f"self.random_time_mask_T={self.random_time_mask_T}, "
f"self.random_time_mask_p={self.random_time_mask_p}, "
f"self.random_time_mask_limited={self.random_time_mask_limited}, "
f"self.random_freq_mask_N={self.random_freq_mask_N}, "
f"self.random_freq_mask_F={self.random_freq_mask_F}, "
f"self.random_mask_value={self.random_mask_value}, "
f"self.align_mask_value={self.align_mask_value}, "
f"self.sampleFbank_prob={self.sampleFbank_prob}, "
f"self.max_samp_fbank={self.max_samp_fbank}, "
f"self.num_samp_fbank={self.num_samp_fbank}, "
f"shuffle={self.shuffle}, transforms={self.feature_transforms}, "
)
def _augment_target(self, orig_sentence):
'''
To augment the target side based on Roberta model or
random replacements from the keys of audio dictionary
Arguments:
orig_sentence (str): an input transcription
Return:
1. container (List[Tuple(position, word_from_roberta)])
2. updated (str):
The transcription with words prediced by roberta,
or sampled from the keys of audio dictionary
'''
container, collect_sent = [], []
updated = orig_sentence.split()
positions = random.sample(
range(len(updated)),
min(
max(1, int(len(updated)*self.percentMaskedTokens)),
self.thresholdMaskedTokens
)
)
positions.sort()
if not self.skip_roberta:
with torch.no_grad():
for pos in positions:
sent_list = orig_sentence.split()
sent_list[pos] = '<mask>'
collect_sent.append(' '.join(sent_list))
_info = self.roberta.batch_fill_mask(collect_sent, topk=2)
for pos, info in zip(positions, _info):
try:
item = info[1][-1].strip()
except:
item = info[0][-1].strip()
if item in string.punctuation:
continue
item = item.upper()
updated[pos] = item
container.append((pos, item))
else:
# ADA-RT
idx_tokens = random.sample(
range(self.audio_dict_size),
len(positions)
)
for pos, tok in zip(positions, idx_tokens):
updated[pos] = self.audio_dict_keys[tok]
container.append((pos, self.audio_dict_keys[tok]))
return container, ' '.join(updated), positions
def _sample_fbank(self,
spectrogram,
transcription,
time_min,
time_max,
scaling
):
'''
This is the data augmentation part by sampling from AudioDict.
Since passing the audio_dict to funct inside can be slow.
We do it here
'''
align_time_min = time_min.split('-')
align_time_max = time_max.split('-')
# Sample words for sampling fbanks
transp_list = transcription.split()
len_transp_list = len(transp_list)
if int(self.num_samp_fbank) >= 1:
_number_swapped = int(self.num_samp_fbank)
elif float(self.num_samp_fbank) >= 0. and float(self.num_samp_fbank) < 1.:
_number_swapped = math.floor(len_transp_list*self.num_samp_fbank)
else:
_number_swapped = len_transp_list
number_swapped = min(max(1, _number_swapped), int(self.max_samp_fbank))
positions = np.sort(
np.random.choice(range(0, len_transp_list),
size=number_swapped,
replace=False)
)
positions.sort()
collect_fbank_min_pos, collect_fbank_max_pos = [], []
collect_sampled_fbanks = []
for pos in positions:
if transp_list[pos] not in self.audio_dict.keys():
continue
if len(self.audio_dict[transp_list[pos]]) <= 3:
# Not enough varants for this word
continue
sampled_idx = np.random.choice(
range(len(self.audio_dict[transp_list[pos]])),
replace=False, size=1
)
word_sampled_fbank = self.audio_dict[
transp_list[pos]][sampled_idx[0]
]
sampled_fbank = np.concatenate(
[v for k, v in word_sampled_fbank.items() if k != '_id']
)
fbank_min_pos = int(float(align_time_min[pos]) * scaling)
fbank_max_pos = int(float(align_time_max[pos]) * scaling)
collect_fbank_min_pos.append(fbank_min_pos)
collect_fbank_max_pos.append(fbank_max_pos)
collect_sampled_fbanks.append(sampled_fbank)
if len(collect_fbank_max_pos) == 0:
assert len(collect_fbank_min_pos) == 0
# Words for positions sampled do not exist in AD
return spectrogram
# Update the fbank
collect_fbank_max_pos.insert(0, 0)
collect_fbank_min_pos.append(spectrogram.shape[0])
collect_pos = [(max_pos, min_pos) for max_pos, min_pos in
zip(collect_fbank_max_pos, collect_fbank_min_pos)]
collect_sampled_fbanks.append(np.array([])) # to maintain the same length
fbank_updated = []
for idx, ((max_idx, min_idx), fb) in enumerate(
zip(collect_pos, collect_sampled_fbanks)
):
remained_fbank = spectrogram[max_idx:(min_idx), :]
fbank_updated.append(remained_fbank)
if fb.shape[0] == 0:
# because of the "maintain the same length"
continue
else:
fbank_updated.append(fb)
fbank_updated = np.concatenate(fbank_updated)
return fbank_updated
def _ADAMask(self, spectrogram, frames_masked):
'''
SpecAugment for ADA with extension to control the amount of
random time maskings given the number of frames masked in
aligned time maskings
Note:
#mask_value: in previous version: 0 here but mean in SpecAugment
'''
distorted = spectrogram.copy()
num_frames = spectrogram.shape[0]
num_freqs = spectrogram.shape[1]
if self.random_mask_value is None:
mask_value = spectrogram.mean()
else:
mask_value = self.random_mask_value
for _i in range(self.random_freq_mask_N):
f = np.random.randint(0, self.random_freq_mask_F)
f0 = np.random.randint(0, num_freqs - f)
if f != 0:
distorted[:, f0: f0 + f] = mask_value
if self.random_time_mask_limited:
# Restrict the amount of random time masking given
# the amount of aligned time masking
remained = self.time_mask_max - frames_masked
if remained > 0:
max_time_mask_t = (remained // self.random_time_mask_N)
else:
max_time_mask_t = -1
else:
# Normal specaugment
max_time_mask_t = min(
self.random_time_mask_T,
math.floor(num_frames * self.random_time_mask_p)
)
if max_time_mask_t < 1:
return distorted
for _i in range(self.random_time_mask_N):
t = np.random.randint(0, max_time_mask_t)
t0 = np.random.randint(0, num_frames - t)
if t != 0:
distorted[t0 : t0 + t, :] = mask_value
return distorted
def _alignAugment(self, source, index, scaling, align_mask=False, skip_source=False):
'''
Not sure if it is better to pass copies of align_time_min/max and tgt_texts instead
Arguments:
source: fbanks in numpy format
index: index of data instance
scaling: conversion factor between raw audio time and fbank time steps
align_mask: Replace the corresponding fbanks with variable
align_mask_value
skip_source: No aligned masking or
audio dictionary is applied on source side.
It is used in target-only augmentation
Returns:
1. spectrograms (np array)
2. augmented transcriptions (str)
3. number of frames masked in ADA (int)
4. number of tokens replaced in transcriptions (int)
5. number of hits on audio dictionary (int)
'''
aug_info, aug_tp, positions = self._augment_target(self.tgt_texts[index])
align_time_min = self.align_time_min[index].split('-')
align_time_max = self.align_time_max[index].split('-')
frames_masked = 0
hit_audioDict = 0
assert len(aug_tp.split())==len(align_time_min)==len(align_time_max)
if skip_source:
## Only target side augmentation
return source, aug_tp, frames_masked, len(aug_info), 0
# Generate fbanks for augmented words
collect_fbank_min_pos, collect_fbank_max_pos = [], []
collect_sampled_fbanks = []
if self.align_mask_value is None:
align_mask_value = source.mean()
else:
align_mask_value = self.align_mask_value
for pos, word in aug_info:
fbank_min_pos = int(float(align_time_min[pos]) * scaling)
fbank_max_pos = int(float(align_time_max[pos]) * scaling)
if align_mask or word not in self.audio_dict.keys():
# Return masked spectrogram
frames_masked += (fbank_max_pos - fbank_min_pos + 1)
assert frames_masked >= 0
source[fbank_min_pos:(fbank_max_pos+1),:] = align_mask_value
else:
# sample fbanks from AD
hit_audioDict += 1
sampled_idx = np.random.choice(
range(len(self.audio_dict[word])),
replace=False, size=1
)
word_sampled_fbank = self.audio_dict[word][sampled_idx[0]]
sampled_fbank = np.concatenate(
[v for k, v in word_sampled_fbank.items() if k != '_id']
)
collect_fbank_min_pos.append(fbank_min_pos)
collect_fbank_max_pos.append(fbank_max_pos)
collect_sampled_fbanks.append(sampled_fbank)
if not collect_fbank_min_pos and not collect_fbank_max_pos:
# No augmented words exist in AD or no augmented target words
assert hit_audioDict == 0
return source, aug_tp, frames_masked, len(aug_info), hit_audioDict
# Update the fbank
assert len(collect_fbank_min_pos)==len(collect_fbank_max_pos)\
==len(collect_sampled_fbanks)
collect_fbank_max_pos.insert(0, 0)
collect_fbank_min_pos.append(source.shape[0])
collect_pos = [(max_pos, min_pos) for max_pos, min_pos in
zip(collect_fbank_max_pos, collect_fbank_min_pos)]
collect_sampled_fbanks.append(np.array([])) # to maintain the same length
fbank_updated = []
for idx, ((max_idx, min_idx), fb) in enumerate(
zip(collect_pos, collect_sampled_fbanks)
):
remained_fbank = source[max_idx:(min_idx), :]
fbank_updated.append(remained_fbank)
if fb.shape[0] == 0:
# because of the "maintain the same length"
continue
else:
fbank_updated.append(fb)
fbank_updated = np.concatenate(fbank_updated)
return fbank_updated, aug_tp, frames_masked, len(aug_info), hit_audioDict
def __getitem__(
self, index: int
) -> Tuple[int, torch.Tensor, Optional[torch.Tensor]]:
source = get_features_or_waveform(
self.audio_paths[index], need_waveform=self.data_cfg.use_audio_input
)
if self.feature_transforms is not None:
assert not self.data_cfg.use_audio_input
scaling = source.shape[0] / float(self.total_time[index])
transp_list = self.tgt_texts[index].split()
tgt_texts, align_time_min, align_time_max = None, None, None
if \
self.is_train_split and \
self.apply_alignAugment and \
torch.rand([1]).item() <= float(self.alignAugment_prob) \
:
source, tgt_texts, frames_masked, tokens_masked, hit = \
self._alignAugment(
source, index, scaling,
align_mask=self.alignMask,
skip_source=self.skip_source
)
source = self._ADAMask(source, frames_masked)
else:
if tgt_texts is None:
assert align_time_min is None
assert align_time_max is None
tgt_texts = self.tgt_texts[index]
align_time_min = self.align_time_min[index]
align_time_max = self.align_time_max[index]
if \
self.is_train_split and \
self.audio_dict is not None and \
torch.rand([1]).item() <= self.sampleFbank_prob \
:
## Allow the original fbanks to be used under certain prob
source = self._sample_fbank(
source,
tgt_texts,
align_time_min,
align_time_max,
scaling
)
# Call the standard SpecAugment
source = self.feature_transforms(source)
tokens_masked = hit = 0
source = torch.from_numpy(source).float()
target = None
if self.tgt_texts is not None:
#tokenized = self.tokenize_text(self.tgt_texts[index])
tokenized = self.tokenize_text(tgt_texts)
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
if self.data_cfg.prepend_tgt_lang_tag:
lang_tag = self.LANG_TAG_TEMPLATE.format(self.tgt_langs[index])
lang_tag_idx = self.tgt_dict.index(lang_tag)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
return index, source, target, tokens_masked, hit
def collater(self, samples: List[Tuple[int, torch.Tensor, torch.Tensor]]) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([i for i, _, _, _, _ in samples], dtype=torch.long)
frames = _collate_frames(
[s for _, s, _, _, _ in samples], self.data_cfg.use_audio_input
)
tokens_masked = torch.tensor([i for _, _, _, i, _ in samples])
hit = torch.tensor([i for _, _, _, _, i in samples])
ntokens_masked = torch.sum(tokens_masked)
nhit = torch.sum(hit)
n_frames = torch.tensor([s.size(0) for _, s, _, _, _ in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[t for _, _, t, _, _ in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[t.size(0) for _, _, t, _, _ in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[t for _, _, t, _, _ in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(t.size(0) for _, _, t, _, _ in samples)
out = {
"id": indices,
"net_input": {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
},
"target": target,
"target_lengths": target_lengths,
"ntokens": ntokens,
"nsentences": len(samples),
"ntokens_masked": ntokens_masked,
"nhit": nhit
}
return out
class AudioDictDatasetCreator(SpeechToTextDatasetCreator):
# mandatory columns
KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames"
KEY_TGT_TEXT = "tgt_text"
# optional columns
KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text"
KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang"
# default values
DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = ""
# columns for alignment info.
KEY_TIME_MIN, KEY_TIME_MAX = "align_time_min", "align_time_max"
KEY_TOTAL_TIME = "total_time"
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[List[Dict]],
data_cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
audio_dict,
) -> AudioDictDataset:
audio_paths, n_frames, src_texts, tgt_texts, ids = [], [], [], [], []
speakers, src_langs, tgt_langs = [], [], []
align_time_min, align_time_max, total_time = [], [], []
for s in samples:
ids.extend([ss[cls.KEY_ID] for ss in s])
audio_paths.extend(
[op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s]
)
n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])
tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])
src_texts.extend(
[ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s]
)
speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])
src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])
tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])
align_time_min.extend([ss[cls.KEY_TIME_MIN] for ss in s])
align_time_max.extend([ss[cls.KEY_TIME_MAX] for ss in s])
total_time.extend([ss[cls.KEY_TOTAL_TIME] for ss in s])
return AudioDictDataset(
split_name,
is_train_split,
data_cfg,
audio_paths,
n_frames,
audio_dict,
align_time_min,
align_time_max,
total_time,
src_texts,
tgt_texts,
speakers,
src_langs,
tgt_langs,
ids,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
)
@classmethod
def from_tsv(
cls,
root: str,
data_cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
audio_dict
) -> AudioDictDataset:
samples = []
_splits = splits.split(",")
for split in _splits:
tsv_path = op.join(root, f"{split}.tsv")
if not op.isfile(tsv_path):
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples.append([dict(e) for e in reader])
assert len(samples) > 0
datasets = [
cls._from_list(
name,
is_train_split,
[s],
data_cfg,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
audio_dict
)
for name, s in zip(_splits, samples)
]
if is_train_split and len(_splits) > 1 and data_cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls._get_size_ratios(
_splits, [len(s) for s in samples], alpha=data_cfg.sampling_alpha
)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for d, r in zip(datasets, size_ratios)
]
return ConcatDataset(datasets)
|
nilq/baby-python
|
python
|
from pyrogram import filters
from pyrogram.types import Message
from megumin import megux, Config
from megumin.utils import get_collection
from megumin.utils.decorators import input_str
LOCK_TYPES = ["audio", "link", "video"]
@megux.on_message(filters.command("lock", Config.TRIGGER))
async def lock(c: megux, m: Message):
LOCK = get_collection(f"LOCK {m.chat.id}")
res = input_str(m)
await LOCK.insert_one({"lock": res})
|
nilq/baby-python
|
python
|
import yaml
import collections
# Ordered loading of dictionary items in yaml files
# Taken from: SO link: /questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
def yaml_ordered_load(fp):
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return collections.OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(fp, OrderedLoader)
|
nilq/baby-python
|
python
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Unit tests for ly_test_tools._internal.pytest_plugin.terminal_report
"""
import os
import pytest
import unittest.mock as mock
import ly_test_tools._internal.pytest_plugin.terminal_report as terminal_report
pytestmark = pytest.mark.SUITE_smoke
class TestTerminalReport(object):
@mock.patch('ly_test_tools._internal.pytest_plugin.failed_test_rerun_command.build_rerun_commands')
def test_AddCommands_MockCommands_CommandsAdded(self, mock_build_commands):
mock_build_commands.side_effect = lambda path, nodes, dir: nodes
mock_reporter = mock.MagicMock()
header = 'This is a header'
test_path = 'Foo'
mock_node_ids = ['a', 'b']
terminal_report._add_commands(mock_reporter, header, test_path, mock_node_ids)
mock_reporter.write_line.assert_has_calls([
mock.call(header),
mock.call('a'),
mock.call('b')
])
@mock.patch('ly_test_tools._internal.pytest_plugin.failed_test_rerun_command.build_rerun_commands')
def test_AddCommands_NoCommands_ErrorWritten(self, mock_build_commands):
mock_reporter = mock.MagicMock()
header = 'This is a header'
test_path = 'Foo'
mock_node_ids = []
terminal_report._add_commands(mock_reporter, header, test_path, mock_node_ids)
calls = mock_reporter.write_line.mock_calls
mock_build_commands.assert_not_called()
assert calls[0] == mock.call(header)
assert 'Error' in calls[1][1][0]
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands')
def test_TerminalSummary_NoErrorsNoFailures_EmptyReport(self, mock_add_commands):
mock_report = mock.MagicMock()
mock_report.stats.get.return_value = []
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
mock_add_commands.assert_not_called()
mock_report.config.getoption.assert_not_called()
mock_report.section.assert_not_called()
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands')
def test_TerminalSummary_ErrorsAndFailures_SectionsAdded(self, mock_add_commands):
mock_report = mock.MagicMock()
mock_node = mock.MagicMock()
mock_node.nodeid = 'something'
mock_report.stats.get.return_value = [mock_node, mock_node]
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
assert len(mock_add_commands.mock_calls) == 2
mock_report.config.getoption.assert_called()
mock_report.section.assert_called_once()
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands', mock.MagicMock())
@mock.patch('os.path.basename')
def test_TerminalSummary_Failures_CallsWithBasename(self, mock_basename):
mock_report = mock.MagicMock()
mock_node = mock.MagicMock()
mock_base = 'something'
node_id = os.path.join('C:', mock_base)
mock_node.nodeid = node_id
mock_report.stats.get.side_effect = [[mock_node], []] # first item is failure list
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
mock_basename.assert_called_with(node_id)
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands', mock.MagicMock())
@mock.patch('os.path.basename')
def test_TerminalSummary_Errors_CallsWithBasename(self, mock_basename):
mock_report = mock.MagicMock()
mock_node = mock.MagicMock()
mock_base = 'something'
node_id = os.path.join('C:', mock_base)
mock_node.nodeid = node_id
mock_report.stats.get.side_effect = [[], [mock_node]] # second item is error list
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
mock_basename.assert_called_with(node_id)
|
nilq/baby-python
|
python
|
import platform
from datetime import datetime
from typing import Optional
import discord
from discord.ext import commands
class Stats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f"{self.__class__.__name__} Cog has been loaded\n-----")
@commands.command(name="emojiinfo", aliases=["ei"])
@commands.guild_only()
async def emoji_info(self, ctx, emoji: discord.Emoji = None):
if not emoji:
return await ctx.invoke(self.bot.get_command("help"), entity="emojiinfo")
try:
emoji = await emoji.guild.fetch_emoji(emoji.id)
except discord.NotFound:
return await ctx.send("I could not find this emoji in the given guild.")
is_managed = "Yes" if emoji.managed else "No"
is_animated = "Yes" if emoji.animated else "No"
requires_colons = "Yes" if emoji.require_colons else "No"
creation_time = emoji.created_at.strftime("%I:%M %p %B %d, %Y")
can_use_emoji = (
"Everyone"
if not emoji.roles
else " ".join(role.name for role in emoji.roles)
)
description = f"""
**General:**
**- Name:** {emoji.name}
**- Id:** {emoji.id}
**- URL:** [Link To Emoji]({emoji.url})
**- Author:** {emoji.user.mention}
**- Time Created:** {creation_time}
**- Usable by:** {can_use_emoji}
**Other:**
**- Animated:** {is_animated}
**- Managed:** {is_managed}
**- Requires Colons:** {requires_colons}
**- Guild Name:** {emoji.guild.name}
**- Guild Id:** {emoji.guild.id}
"""
embed = discord.Embed(
title=f"**Emoji Information for:** `{emoji.name}`",
description=description,
colour=discord.Color.blurple(),
)
embed.set_thumbnail(url=emoji.url)
await ctx.send(embed=embed)
@commands.command(name="botinfo", aliases=["bi", "bot", "bot info"])
@commands.guild_only()
async def info_bot(self, message):
"""
This Command Provides us the info of the bot
"""
pythonVersion = platform.python_version()
dpyVersion = discord.__version__
serverCount = len(self.bot.guilds)
memberCount = len(set(self.bot.get_all_members()))
mem1 = self.bot.get_user(854230635425693756)
embed = discord.Embed(
title=f"{mem1.name} Stats ",
description=f"{self.bot.user.name} Bot is a MultiPrupose Bot Customised for FRNz COmmunity. Made By <@448740493468106753>",
colour=discord.Color.blurple(),
timestamp=datetime.utcnow(), )
embed.add_field(name="Bot Version:", value=self.bot.version)
embed.add_field(name="Python Version:", value=pythonVersion)
embed.add_field(name="Discord.Py Version", value=dpyVersion)
embed.add_field(name="Total Guilds:", value=serverCount)
embed.add_field(name="Total Users:", value=memberCount)
embed.add_field(name="Bot Made By:", value="<@448740493468106753>")
embed.set_footer(text=f"{message.guild.name} | {self.bot.user.name}")
embed.set_author(name=self.bot.user.name,
icon_url=self.bot.user.avatar.url)
embed.set_thumbnail(url=self.bot.user.avatar.url)
await message.channel.send(embed=embed)
@commands.command(name="userinfo", aliases=["ui", "memberinfo", "mi", "whois"])
@commands.guild_only()
async def info_user(self, ctx, member: Optional[discord.Member]):
"""
gets info of a user
"""
member1 = member or ctx.author
embed = discord.Embed(title="Member Information",
color=discord.Color.blurple(),
timestamp=datetime.utcnow())
embed.add_field(name="ID", value=f"{member1.id}", inline=False)
embed.add_field(
name="Name", value=f"{member1.name}#{member1.discriminator}")
embed.add_field(name="Top role", value=f"{member1.top_role.mention}")
embed.add_field(name="status",
value=f"{str(member1.activity.type).split('.') if member1.activity else 'N/A'} {member1.activity.name if member1.activity else ''}")
embed.add_field(
name="created at", value=f"{member1.created_at.strftime('%d/%m/%y %H:%M:%S')}")
embed.add_field(
name="Joined at", value=f"{member1.joined_at.strftime('%d/%m/%y %H:%M:%S')}")
embed.add_field(name="Boosted?", value=f"{member1.premium_since}")
await ctx.reply(embed=embed)
@commands.command(name="channelstats", aliases=["cs"])
@commands.guild_only()
async def channel_stats(self, ctx, channel: discord.TextChannel = None):
"""
This Command Provides us the stats of the channel
"""
channel = channel or ctx.channel
embed = discord.Embed(
title=f"Stats for **{channel.name}**",
description=f"{'Category: {}'.format(channel.category.name) if channel.category else 'This channel is not in a category'}",
color=discord.Color.blurple(),
)
embed.add_field(name="Channel Guild",
value=ctx.guild.name, inline=False)
embed.add_field(name="Channel Id", value=channel.id, inline=False)
embed.add_field(
name="Channel Topic",
value=f"{channel.topic if channel.topic else 'No topic.'}",
inline=False,
)
embed.add_field(name="Channel Position",
value=channel.position, inline=False)
embed.add_field(
name="Channel Slowmode Delay", value=channel.slowmode_delay, inline=False
)
embed.add_field(name="Channel is nsfw?",
value=channel.is_nsfw(), inline=False)
embed.add_field(name="Channel is news?",
value=channel.is_news(), inline=False)
embed.add_field(
name="Channel Creation Time", value=channel.created_at, inline=False
)
embed.add_field(
name="Channel Permissions Synced",
value=channel.permissions_synced,
inline=False,
)
embed.add_field(name="Channel Hash", value=hash(channel), inline=False)
await ctx.message.delete()
await ctx.send(embed=embed)
@commands.command(name="serverinfo", aliases=["guildinfo", "si", "gi"])
@commands.guild_only()
async def server_info(self, ctx):
embed = discord.Embed(title="Server information",
color=discord.Color.blurple(),
timestamp=datetime.utcnow())
embed.set_thumbnail(url=ctx.guild.icon.url)
statuses = [len(list(filter(lambda m: str(m.status) == "online", ctx.guild.members))),
len(list(filter(lambda m: str(m.status)
== "idle", ctx.guild.members))),
len(list(filter(lambda m: str(m.status) == "dnd", ctx.guild.members))),
len(list(filter(lambda m: str(m.status) == "offline", ctx.guild.members)))]
fields = [("Owner & owner id", f"{ctx.guild.owner}, {ctx.guild.owner.id}", False),
("Server ID", ctx.guild.id, True),
("Created at", ctx.guild.created_at.strftime(
"%d/%m/%Y %H:%M:%S"), True),
("Region", ctx.guild.region, True),
("Members", len(ctx.guild.members), True),
("Humans", len(list(filter(lambda m: not m.bot, ctx.guild.members))), True),
("Bots", len(list(filter(lambda m: m.bot, ctx.guild.members))), True),
("Banned members", len(await ctx.guild.bans()), True),
("Statuses",
f"🟢 {statuses[0]} 🟠 {statuses[1]} 🔴 {statuses[2]} ⚪ {statuses[3]}", True),
("Text channels", len(ctx.guild.text_channels), True),
("Voice channels", len(ctx.guild.voice_channels), True),
("Categories", len(ctx.guild.categories), True),
("Roles", len(ctx.guild.roles), True),
("Invites", len(await ctx.guild.invites()), True),
("\u200b", "\u200b", True)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Stats(bot))
|
nilq/baby-python
|
python
|
import urllib.request
import sys
import chardet
from html.parser import HTMLParser
from datetime import datetime
pikabuUrl = 'http://pikabu.ru/top50_comm.php'
startTag = 'profile_commented'
endTag = 'b-sidebar-sticky'
newsTag = 'a'
classTag = 'class'
headers = []
links = []
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.readData = False
self.weAreIn = False
def handle_starttag(self, tag, attrs):
for attr in attrs:
if attr[0] == classTag:
if attr[1] == startTag:
self.weAreIn = True
if tag == newsTag and self.weAreIn == True:
links.append( attr[1])
self.readData = True
def handle_data(self, data):
if self.readData:
headers.append(data)
self.weAreIn = False
self.readData = False
def proceed():
request = urllib.request.urlopen(pikabuUrl)
content = request.read()
encoding = chardet.detect(content)['encoding']
print('Encoding Website: ' + str(encoding))
print('Encoding Console: ' + str(sys.stdout.encoding))
html = content.decode(encoding)
parser = MyHTMLParser()
parser.feed(html)
def write():
now = datetime.now();
separator = '-'
timestring = str(now.hour) + separator + str(now.minute) + separator + str(now.second) + separator + str(now.day) + separator +str(now.month) + separator + str(now.year)
filename = str("pikabu " + timestring + '.txt')
outputFile = open(filename, "a")
counter = 1
for header, link in zip(headers, links):
finalstr = str(str(counter) + '. ' + header + ' : ' + link)
outputFile.write(finalstr + "\n")
counter+=1
print(finalstr)
outputFile.close()
print ("Saved to: " + filename)
print ("Pikabu Top 50 Comments")
proceed()
write()
input("Press Enter To Exit")
|
nilq/baby-python
|
python
|
"""
Recipes available to data with tags ['F2', 'IMAGE', 'CAL', 'FLAT']
Default is "makeProcessedFlat".
"""
recipe_tags = {'F2', 'IMAGE', 'CAL', 'FLAT'}
# TODO: This recipe needs serious fixing to be made meaningful to the user.
def makeProcessedFlat(p):
"""
This recipe calls a selection primitive, since K-band F2 flats only have
lamp-off frames, and so need to be treated differently.
Parameters
----------
p : PrimitivesF2 object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
p.addVAR(read_noise=True)
#p.nonlinearityCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.addToList(purpose='forFlat')
p.getList(purpose='forFlat')
p.makeLampFlat()
p.normalizeFlat()
p.thresholdFlatfield()
p.storeProcessedFlat()
return
_default = makeProcessedFlat
|
nilq/baby-python
|
python
|
"""
Notifications
--------------------------------------------
.. NOTE::
Coming soon 🛠
"""
|
nilq/baby-python
|
python
|
# Copyright 2021 Massachusetts Institute of Technology
#
# @file image_gallery.py
# @author W. Nicholas Greene
# @date 2020-07-02 23:44:46 (Thu)
import os
import argparse
def create_simple_gallery(image_dir, num_per_row=3, output_file="index.html", title="Image Gallery"):
"""Create a simple gallery with num_per_row images per row.
"""
# Grab all images.
images = []
for root, dirs, files in os.walk(image_dir):
for filename in sorted(files):
filename_full_path = os.path.join(root, filename)
rel_path = os.path.relpath(filename_full_path, image_dir)
if filename_full_path.endswith(".png") or filename_full_path.endswith(".jpg"):
images.append(rel_path)
images = sorted(images)
# Write html file.
html_file = os.path.join(image_dir, output_file)
with open(html_file, "w") as target:
target.write("<html><head><title>{}</title></head><body><center>\n".format(title))
for image in images:
image_str = "<a href={}><img src=\"{}\" style=\"float: left; width: {}%; image-rendering: pixelated\"></a>\n".format(image, image, 100.0 / num_per_row)
target.write(image_str)
target.write("</center></body></html>\n")
return
def create_training_gallery(image_dir, image_height_pix=256, output_file="index.html", title="Image Gallery", delim="_"):
"""Create a gallery where each rows shows the evolution of an image during training.
Assumes images are in the following format:
<image_id>_<epoch>_<step>.jpg
Epoch and step are optional, but if provided must be zero padded so sorting
will put them in the appropriate order.
"""
# Grab all images.
id_to_images = {}
for root, dirs, files in os.walk(image_dir):
for filename in sorted(files):
filename_full_path = os.path.join(root, filename)
rel_path = os.path.relpath(filename_full_path, image_dir)
if filename_full_path.endswith(".png") or filename_full_path.endswith(".jpg"):
tokens = os.path.splitext(os.path.basename(rel_path))[0].split(delim)
image_id = tokens[0]
if image_id not in id_to_images:
id_to_images[image_id] = []
id_to_images[image_id].append(rel_path)
for image_id, images in id_to_images.items():
id_to_images[image_id] = sorted(images, reverse=True)
# Write html file.
html_file = os.path.join(image_dir, output_file)
with open(html_file, "w") as target:
target.write("<html><head><title>{}</title></head><body>\n".format(title))
target.write("<table>\n")
for image_id, images in id_to_images.items():
target.write("<tr align=\"left\">\n")
for image in images:
image_str = "<td><a href={}><img src=\"{}\" style=\"height: {}; image-rendering: pixelated\"></a></td>\n".format(
image, image, image_height_pix)
target.write(image_str)
target.write("</tr>\n")
target.write("</table>\n")
target.write("</body></html>\n")
return
def main():
# Parse args.
parser = argparse.ArgumentParser(description="Create simple image gallery from a folder of images.")
parser.add_argument("image_dir", help="Path to image directory.")
parser.add_argument("--num_per_row", type=int, default=3, help="Number of images per row.")
parser.add_argument("--output_file", default="index.html", help="Output file name.")
parser.add_argument("--title", default="Image Gallery", help="Gallery name.")
args = parser.parse_args()
create_simple_gallery(args.image_dir, args.num_per_row, args.output_file, args.title)
return
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# %%
# Imports
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from torch.utils.data import Dataset, random_split
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
import torch.optim as optim
import codecs
import tqdm
# %%
# Setting random seed and device
SEED = 1
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# %%
# Load data
train_df = pd.read_csv('data/task-2/train.csv')
test_df = pd.read_csv('data/task-2/dev.csv')
# %%
# Number of epochs
epochs = 20
# Proportion of training data for train compared to dev
train_proportion = 0.8
# %% md
#### Approach 1: Using pre-trained word representations (GLOVE)
# %%
# We define our training loop
def train(train_iter, dev_iter, model, number_epoch):
"""
Training loop for the model, which calls on eval to evaluate after each epoch
"""
print("Training model.")
for epoch in range(1, number_epoch + 1):
model.train()
epoch_loss = 0
epoch_correct = 0
no_observations = 0 # Observations used for training so far
for batch in train_iter:
feature, target = batch
feature, target = feature.to(device), target.to(device)
# for RNN:
model.batch_size = target.shape[0]
no_observations = no_observations + target.shape[0]
model.hidden = model.init_hidden()
predictions = model(feature).squeeze(1)
optimizer.zero_grad()
loss = loss_fn(predictions, target)
correct, __ = model_performance(np.argmax(predictions.detach().cpu().numpy(), axis=1),
target.detach().cpu().numpy())
loss.backward()
optimizer.step()
epoch_loss += loss.item() * target.shape[0]
epoch_correct += correct
valid_loss, valid_acc, __, __ = eval(dev_iter, model)
epoch_loss, epoch_acc = epoch_loss / no_observations, epoch_correct / no_observations
print(f'| Epoch: {epoch:02} | Train Loss: {epoch_loss:.2f} | Train Accuracy: {epoch_acc:.2f} | \
Val. Loss: {valid_loss:.2f} | Val. Accuracy: {valid_acc:.2f} |')
# %%
# We evaluate performance on our dev set
def eval(data_iter, model):
"""
Evaluating model performance on the dev set
"""
model.eval()
epoch_loss = 0
epoch_correct = 0
pred_all = []
trg_all = []
no_observations = 0
with torch.no_grad():
for batch in data_iter:
feature, target = batch
feature, target = feature.to(device), target.to(device)
# for RNN:
model.batch_size = target.shape[0]
no_observations = no_observations + target.shape[0]
model.hidden = model.init_hidden()
predictions = model(feature).squeeze(1)
loss = loss_fn(predictions, target)
# We get the mse
pred, trg = predictions.detach().cpu().numpy(), target.detach().cpu().numpy()
correct, __ = model_performance(np.argmax(pred, axis=1), trg)
epoch_loss += loss.item() * target.shape[0]
epoch_correct += correct
pred_all.extend(pred)
trg_all.extend(trg)
return epoch_loss / no_observations, epoch_correct / no_observations, np.array(pred_all), np.array(trg_all)
# %%
# How we print the model performance
def model_performance(output, target, print_output=False):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
correct_answers = (output == target)
correct = sum(correct_answers)
acc = np.true_divide(correct, len(output))
if print_output:
print(f'| Acc: {acc:.2f} ')
return correct, acc
# %%
# To create our vocab
def create_vocab(data):
"""
Creating a corpus of all the tokens used
"""
tokenized_corpus = [] # Let us put the tokenized corpus in a list
for sentence in data:
tokenized_sentence = []
for token in sentence.split(' '): # simplest split is
tokenized_sentence.append(token)
tokenized_corpus.append(tokenized_sentence)
# Create single list of all vocabulary
vocabulary = [] # Let us put all the tokens (mostly words) appearing in the vocabulary in a list
for sentence in tokenized_corpus:
for token in sentence:
if token not in vocabulary:
if True:
vocabulary.append(token)
return vocabulary, tokenized_corpus
# %%
# Used for collating our observations into minibatches:
def collate_fn_padd(batch):
'''
We add padding to our minibatches and create tensors for our model
'''
batch_labels = [l for f, l in batch]
batch_features = [f for f, l in batch]
batch_features_len = [len(f) for f, l in batch]
seq_tensor = torch.zeros((len(batch), max(batch_features_len))).long()
for idx, (seq, seqlen) in enumerate(zip(batch_features, batch_features_len)):
seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
batch_labels = torch.LongTensor(batch_labels)
return seq_tensor, batch_labels
# We create a Dataset so we can create minibatches
class Task2Dataset(Dataset):
def __init__(self, train_data, labels):
self.x_train = train_data
self.y_train = labels
def __len__(self):
return len(self.y_train)
def __getitem__(self, item):
return self.x_train[item], self.y_train[item]
# %%
class BiLSTM_classification(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, batch_size, device):
super(BiLSTM_classification, self).__init__()
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.device = device
self.batch_size = batch_size
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim, bidirectional=True)
# The linear layer that maps from hidden state space to tag space
self.hidden2label = nn.Linear(hidden_dim * 2, 3)
self.hidden = self.init_hidden()
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly why they have this dimensionality.
# The axes semantics are (num_layers * num_directions, minibatch_size, hidden_dim)
return torch.zeros(2, self.batch_size, self.hidden_dim).to(self.device), \
torch.zeros(2, self.batch_size, self.hidden_dim).to(self.device)
def forward(self, sentence):
embedded = self.embedding(sentence)
embedded = embedded.permute(1, 0, 2)
lstm_out, self.hidden = self.lstm(
embedded.view(len(embedded), self.batch_size, self.embedding_dim), self.hidden)
out = self.hidden2label(lstm_out[-1])
return out
# %%
## Approach 1 code, using functions defined above:
# We set our training data and test data
training_data = train_df['original1']
test_data = test_df['original1']
##### Preproceccing the data
train_df['original1']
# Creating word vectors
training_vocab, training_tokenized_corpus = create_vocab(training_data)
test_vocab, test_tokenized_corpus = create_vocab(test_data)
# Creating joint vocab from test and train:
joint_vocab, joint_tokenized_corpus = create_vocab(pd.concat([training_data, test_data]))
print("Vocab created.")
# We create representations for our tokens
wvecs = [] # word vectors
word2idx = [] # word2index
idx2word = []
# This is a large file, it will take a while to load in the memory!
with codecs.open('glove.6B.100d.txt', 'r', 'utf-8') as f:
# Change
index = 0
for line in f.readlines():
# Ignore the first line - first line typically contains vocab, dimensionality
if len(line.strip().split()) > 3:
word = line.strip().split()[0]
if word in joint_vocab:
(word, vec) = (word, list(map(float, line.strip().split()[1:])))
wvecs.append(vec)
word2idx.append((word, index))
idx2word.append((index, word))
index += 1
wvecs = np.array(wvecs)
word2idx = dict(word2idx)
idx2word = dict(idx2word)
vectorized_seqs = [[word2idx[tok] for tok in seq if tok in word2idx] for seq in training_tokenized_corpus]
INPUT_DIM = len(word2idx)
EMBEDDING_DIM = 100
BATCH_SIZE = 32
model = BiLSTM_classification(EMBEDDING_DIM, 50, INPUT_DIM, BATCH_SIZE, device)
print("Model initialised.")
model.to(device)
# We provide the model with our embeddings
model.embedding.weight.data.copy_(torch.from_numpy(wvecs))
feature = vectorized_seqs
# 'feature' is a list of lists, each containing embedding IDs for word tokens
train_and_dev = Task2Dataset(feature, train_df['label'])
train_examples = round(len(train_and_dev) * train_proportion)
dev_examples = len(train_and_dev) - train_examples
train_dataset, dev_dataset = random_split(train_and_dev,
(train_examples,
dev_examples))
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size=BATCH_SIZE,
collate_fn=collate_fn_padd)
dev_loader = torch.utils.data.DataLoader(dev_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn_padd)
print("Dataloaders created.")
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# %%
epochs = 10
train(train_loader, dev_loader, model, epochs)
# %%
#
# training_data, dev_data, training_y, dev_y = train_test_split(train_df['edit1'], train_df['label'],
# test_size=(1-train_proportion),
# random_state=42)
#
#
# test_loader = torch.utils.data.DataLoader(dev_dataset, batch_size=len(dev_dataset.dataset.y_train), collate_fn=collate_fn_padd)
#
# for batch in test_loader:
# batch_feature, batch_targets = batch
# batch_feature, batch_targets = batch_feature.to(device), batch_targets.to(device)
# model.batch_size = batch_targets.shape[0]
# batch_pred = model(batch_feature)
# batch_correct = model_performance(torch.tensor(np.argmax(batch_pred.detach().cpu().numpy(), axis=1)),
# batch_targets.detach().cpu(), True)
#
# pred = model(test_features)
training_data, dev_data, training_y, dev_y = train_test_split(train_df['edit1'], train_df['label'],
test_size=(1 - train_proportion),
random_state=42)
pred_baseline = torch.zeros(len(dev_y)) + 1 # 1 is most common class
print("\nBaseline performance:")
sse, mse = model_performance(pred_baseline, torch.tensor(dev_y.values), True)
# %%
def score_task_2(truth_loc, prediction_loc):
truth = pd.read_csv(truth_loc, usecols=['id', 'label'])
pred = pd.read_csv(prediction_loc, usecols=['id', 'pred'])
assert (sorted(truth.id) == sorted(pred.id)), "ID mismatch between ground truth and prediction!"
data = pd.merge(truth, pred)
data = data[data.label != 0]
accuracy = np.sum(data.label == data.pred) * 1.0 / len(data)
print("Accuracy = %.3f" % accuracy)
def predict(data_iter, model):
"""
Predict and return result
"""
model.eval()
epoch_loss = 0
epoch_correct = 0
pred_all = []
trg_all = []
no_observations = 0
with torch.no_grad():
for batch in data_iter:
feature, target = batch
feature, target = feature.to(device), target.to(device)
# for RNN:
model.batch_size = target.shape[0]
no_observations = no_observations + target.shape[0]
model.hidden = model.init_hidden()
predictions = model(feature).squeeze(1)
loss = loss_fn(predictions, target)
# We get the mse
pred, trg = predictions.detach().cpu().numpy(), target.detach().cpu().numpy()
correct, __ = model_performance(np.argmax(pred, axis=1), trg)
epoch_loss += loss.item() * target.shape[0]
epoch_correct += correct
pred_all.extend(pred)
trg_all.extend(trg)
return pred_all
#
test_vectorized_seqs = [[word2idx[tok] for tok in seq if tok in word2idx] for seq in test_tokenized_corpus]
outtest_dataset = Task2Dataset(test_vectorized_seqs, test_df['label'])
test_dataset, __ = random_split(test_dataset, (len(test_dataset), 0))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn_padd)
loss, accu, __, __ = eval(test_loader, model)
print("LOSS: {}, ACCURACY: {}".format(loss, accu))
|
nilq/baby-python
|
python
|
"""
Ejercicio 02
Escriba un algoritmo, que dado como dato el sueldo de un trabajador, le aplique un aumento del 15% si su salario bruto es inferior
a $900.000 COP y 12% en caso contrario. Imprima el nuevo sueldo del trabajador.
Entradas
Sueldo_Bruto --> Float --> S_B
Salidas
Sueldo_Neto --> Float --> S_N
"""
# Instrucciones al usuario
print("Este programa le permitira determinar el sueldo neto de un trabajador aplicando un aumento")
# Entradas
S_B = float(input(f"Digite su salario bruto: "))
# Caja Negra
if(S_B < 900000):
S_N = S_B*0.15+S_B
else:
S_N = S_B*0.12+S_B
# Salidas
print(f"Su salario neto es de: ${S_N} COP")
|
nilq/baby-python
|
python
|
import os
import shutil
import cv2
import numpy as np
import pandas as pd
from self_driving_car.augmentation import HorizontalFlipImageDataAugmenter
IMAGE_WIDTH, IMAGE_HEIGHT = 64, 64
CROP_TOP, CROP_BOTTOM = 30, 25
class DatasetHandler(object):
COLUMNS = ('center', 'left', 'right', 'steering_angle', 'speed',
'throttle', 'brake')
TRANSFORMED_COLUMNS = ('pov', 'path', 'steering_angle')
@classmethod
def read(cls, *paths, transform=True):
dataset = pd.concat(pd.read_csv(p, header=None, names=cls.COLUMNS)
for p in paths)
if transform:
dataset = pd.melt(dataset, id_vars=['steering_angle'],
value_vars=['center', 'left', 'right'],
var_name='pov', value_name='path')
return dataset
@classmethod
def write(cls, df, path, transformed=True):
cols = cls.TRANSFORMED_COLUMNS if transformed else cls.COLUMNS
df.to_csv(path, index=False, header=False, columns=cols)
class DatasetPreprocessor(object):
@classmethod
def strip_straight(cls, input_csv_path, output_path,
straight_threshold=0.1):
dataset = DatasetHandler.read(input_csv_path, transform=False)
dataset = dataset[dataset.steering_angle.abs() > straight_threshold]
dataset = cls._copy_images(dataset, output_path)
DatasetHandler.write(
dataset, os.path.join(output_path, 'driving_log.csv'),
transformed=False
)
return dataset
@classmethod
def _copy_images(cls, dataset, output_path):
def build_target_path(orig_path):
return os.path.join(
output_path, 'IMG', os.path.split(orig_path)[1])
def copy_images(row):
shutil.copy(row.center, row.center_target_path)
shutil.copy(row.left, row.left_target_path)
shutil.copy(row.right, row.right_target_path)
os.makedirs(os.path.join(output_path, 'IMG'))
extra_cols = ('center_target_path',
'left_target_path',
'right_target_path')
dataset = dataset.apply(
lambda r: pd.Series(
[r.center, r.left, r.right, r.steering_angle, r.speed,
r.throttle, r.brake, build_target_path(r.center),
build_target_path(r.left), build_target_path(r.right)],
index=DatasetHandler.COLUMNS + extra_cols), axis=1
)
dataset.apply(copy_images, axis=1)
dataset['center'] = dataset['center_target_path']
dataset['left'] = dataset['left_target_path']
dataset['right'] = dataset['right_target_path']
return dataset[list(DatasetHandler.COLUMNS)]
class DatasetGenerator(object):
def __init__(self, training_set, test_set, image_data_augmenters,
steering_correction=None):
self._training_set = training_set
self._test_set = test_set
self._augmenters = image_data_augmenters
if steering_correction:
steer_corr = {
'left': abs(steering_correction),
'center': 0,
'right': -abs(steering_correction)
}
else:
steer_corr = None
self._steering_correction = steer_corr
@classmethod
def from_csv(cls, image_data_augmenters, *csv_paths, test_size=0.25,
use_center_only=False, steering_correction=None):
dataset = DatasetHandler.read(*csv_paths)
center_only = dataset[dataset.pov == 'center']
not_center_only = dataset[dataset.pov != 'center']
test_set = center_only.sample(frac=test_size)
training_set = center_only.iloc[~center_only.index.isin(
test_set.index)]
if not use_center_only:
training_set = pd.concat([training_set, not_center_only])
return cls(training_set, test_set, image_data_augmenters,
steering_correction=steering_correction)
@classmethod
def shuffle_dataset(cls, dataset):
return dataset.sample(frac=1).reset_index(drop=True)
@property
def training_set(self):
return self._training_set
@property
def test_set(self):
return self._test_set
def training_set_batch_generator(self, batch_size):
yield from self._dataset_batch_generator(
self._training_set, batch_size)
def test_set_batch_generator(self, batch_size):
yield from self._dataset_batch_generator(
self._test_set, batch_size)
def _dataset_batch_generator(self, dataset, batch_size):
i = 0
batch_images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
dtype=np.uint8)
batch_steerings = np.empty(batch_size)
while True:
for image, steering_angle in self._flow(
self.shuffle_dataset(dataset)):
batch_images[i] = image
batch_steerings[i] = steering_angle
i += 1
if i == batch_size:
yield batch_images, batch_steerings
i = 0
def _flow(self, dataset):
for _, row in dataset.iterrows():
yield self._flow_from_row(row)
def _flow_from_row(self, row):
image = preprocess_image_from_path(row['path'])
steering_angle = row['steering_angle']
if self._steering_correction:
steering_angle += self._steering_correction[row['pov']]
for aug in self._augmenters:
image, steering_angle = self._augment(
aug, image, steering_angle)
return image, steering_angle
def _augment(self, augmenter, image, steering_angle):
augmented_image = augmenter.process_random(image)
if isinstance(augmenter, HorizontalFlipImageDataAugmenter):
steering_angle = -steering_angle
return augmented_image, steering_angle
def preprocess_image_from_path(image_path):
image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
return preprocess_image(image)
def preprocess_image(image):
# Crop from bottom to remove car parts
# Crop from top to remove part of the sky
cropped_image = image[CROP_TOP:-CROP_BOTTOM, :]
return cv2.resize(cropped_image, (IMAGE_WIDTH, IMAGE_HEIGHT),
interpolation=cv2.INTER_AREA)
|
nilq/baby-python
|
python
|
# Copyright (c) 2021, Rutwik Hiwalkar and Contributors
# See license.txt
# import frappe
import unittest
class TestQMailScheduleRule(unittest.TestCase):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#eval p@20
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage:[pred]'
exit(0)
fi = open(sys.argv[1],'r')
K = 20
#precision@k
P = 0
unum = 943
rank = 0
for line in fi:
rank = int(line.strip())
if rank < K:
P += 1
P/= float(unum*K)
print 'Pre@%d:%.4f\n' %(K,P)
fi.close()
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Optional, Tuple
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.converters.base import IntTupleBox
from .densepose_cse_base import DensePoseCSEBaseSampler
class DensePoseCSEConfidenceBasedSampler(DensePoseCSEBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""
def __init__(
self,
cfg: CfgNode,
use_gt_categories: bool,
embedder: torch.nn.Module,
confidence_channel: str,
count_per_class: int = 8,
search_count_multiplier: Optional[float] = None,
search_proportion: Optional[float] = None,
):
"""
Constructor
Args:
cfg (CfgNode): the config of the model
embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
confidence_channel (str): confidence channel to use for sampling;
possible values:
"coarse_segm_confidence": confidences for coarse segmentation
(default: "coarse_segm_confidence")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""
super().__init__(cfg, use_gt_categories, embedder, count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert (search_count_multiplier is None) or (search_proportion is None), (
f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
f"and search_proportion (={search_proportion})"
)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): a tensor of length k that contains confidences
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
if k == count:
index_sample = list(range(k))
else:
# take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
_, sorted_confidence_indices = torch.sort(values[0])
if self.search_count_multiplier is not None:
search_count = min(int(count * self.search_count_multiplier), k)
elif self.search_proportion is not None:
search_count = min(max(int(k * self.search_proportion), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[-search_count:][sample_from_top]
return index_sample
def _produce_mask_and_results(
self, instance: Instances, bbox_xywh: IntTupleBox
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of
`DensePoseEmbeddingPredictorOutputWithConfidences`
bbox_xywh (IntTupleBox): the corresponding bounding box
Return:
mask (torch.Tensor): shape [H, W], DensePose segmentation mask
embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W]
DensePose CSE Embeddings
other_values: a tensor of shape [1, H, W], DensePose CSE confidence
"""
_, _, w, h = bbox_xywh
densepose_output = instance.pred_densepose
mask, embeddings, _ = super()._produce_mask_and_results(instance, bbox_xywh)
other_values = F.interpolate(
getattr(densepose_output, self.confidence_channel),
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got
# `Tuple[int, int]`.
size=(h, w),
mode="bilinear",
)[0].cpu()
return mask, embeddings, other_values
|
nilq/baby-python
|
python
|
try:
raise KeyboardInterrupt
finally:
print('Goodbye, world!')
|
nilq/baby-python
|
python
|
from django.db import models
class WelcomePage(models.Model):
"""
Welcome page model
"""
content = models.CharField(max_length=2000)
|
nilq/baby-python
|
python
|
import uuid
import json
from textwrap import dedent
import hashlib
from flask import Flask, jsonify, request
from blockchain import Blockchain
# Using Flask as API to communicate with Blockchain
app = Flask(__name__)
# Unique address for node
node_identifier = str(uuid.uuid4()).replace("-", "")
# instantiate Blockchain
blockchain = Blockchain()
# ADD routing addresses
@app.route("/mine", methods=["GET"])
def mine():
# Run proof of work algorithm
last_block = blockchain.last_block
last_proof = last_block["proof"]
proof = blockchain.proof_of_work(last_proof)
# We receive one coin when mined a new block
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
)
# Forge the new block
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
"message": "New Block Forged",
"index": block["index"],
"transactions": block["transactions"],
"proof": block["proof"],
"previous_hash": block["previous_hash"],
}
return jsonify(response), 200
@app.route("/transactions/new", methods=["POST"])
def new_transaction():
values = request.get_json()
# check required fields that are POST to this function
required = ["sender", "recipient", "amount"]
if not all(elem in values for elem in required):
return "Missing values", 400
# New transaction is created
index = blockchain.new_transaction(
values["sender"],
values["recipient"],
values["amount"])
response = {"message": f"Transaction will be added to Block {index}"}
return jsonify(response), 201
@app.route("/chain", methods=["GET"])
def full_chain():
response = {
"chain": blockchain.chain,
"length": len(blockchain.chain),
}
return jsonify(response), 200
if __name__ == "__main__":
app.run(host="127.0.0.1", port=5000)
|
nilq/baby-python
|
python
|
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from py_muvr import FeatureSelector
from py_muvr.data_structures import (
FeatureEvaluationResults,
FeatureRanks,
InputDataset,
OuterLoopResults,
)
ASSETS_DIR = Path(__file__).parent / "assets"
@pytest.fixture(scope="session")
def raw_results():
return [
[
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=4,
model="model",
ranks=FeatureRanks(features=[0, 1], ranks=[1, 2], n_feats=10),
),
max_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(
features=[0, 1, 2, 3], ranks=[1, 2, 4, 3], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(features=[0, 1, 3], ranks=[1, 2, 3], n_feats=10),
),
n_features_to_score_map={5: 4, 4: 3, 3: 3, 2: 3},
),
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=3,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10
),
),
max_eval=FeatureEvaluationResults(
test_score=3,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10
),
),
n_features_to_score_map={5: 5, 4: 4, 3: 5, 2: 5},
),
],
[
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=4,
model="model",
ranks=FeatureRanks(features=[0, 1], ranks=[1, 2], n_feats=10),
),
max_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 2], ranks=[1, 2, 3, 4], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(features=[0, 1, 4], ranks=[2, 1, 3], n_feats=10),
),
n_features_to_score_map={5: 5, 4: 3, 3: 5, 2: 3},
),
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(features=[0, 1], ranks=[1, 2], n_feats=10),
),
max_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(
features=[0, 1, 2, 3, 4], ranks=[1, 2, 5, 4, 3], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(features=[0, 1, 4], ranks=[1, 2, 3], n_feats=10),
),
n_features_to_score_map={5: 5, 4: 6, 3: 5, 2: 5},
),
],
]
@pytest.fixture
def inner_loop_results():
return [
FeatureEvaluationResults(
ranks=FeatureRanks(features=[1, 2, 3, 4], ranks=[3, 2, 1, 4]),
test_score=0.2,
model="estimator",
),
FeatureEvaluationResults(
ranks=FeatureRanks(features=[1, 2, 3, 4], ranks=[1.5, 1.5, 3, 4]),
test_score=0.2,
model="estimator",
),
]
@pytest.fixture
def inner_loop_results_2():
return [
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 3, 4], ranks=[3, 2, 1]),
test_score=0.1,
model="model",
),
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 3, 4], ranks=[1.5, 1.5, 3]),
test_score=0.5,
model="model",
),
]
@pytest.fixture
def inner_loop_results_3():
return [
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 4], ranks=[3, 2, 1]),
test_score=0.3,
model="model",
),
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 4], ranks=[1.5, 1.5, 3]),
test_score=0.25,
model="model",
),
]
@pytest.fixture
def rfe_raw_results(inner_loop_results, inner_loop_results_2, inner_loop_results_3):
return {
(1, 2, 3, 4): inner_loop_results,
(2, 3, 4): inner_loop_results_2,
(2, 4): inner_loop_results_3,
}
@pytest.fixture
def dataset():
X = np.random.rand(12, 12)
y = np.random.choice([0, 1], 12)
return InputDataset(X=X, y=y, groups=np.arange(12))
@pytest.fixture(scope="session")
def mosquito():
df = pd.read_csv(ASSETS_DIR / "mosquito.csv", index_col=0)
df = df.sample(frac=1)
X = df.drop(columns=["Yotu"]).values
y = df.Yotu.values
groups = df.index
return InputDataset(X=X, y=y, groups=groups)
@pytest.fixture(scope="session")
def freelive():
df = pd.read_csv(ASSETS_DIR / "freelive.csv", index_col=0)
X = df.drop(columns=["YR"]).values
y = df.YR.values
groups = df.index
return InputDataset(X=X, y=y, groups=groups)
@pytest.fixture(scope="session")
def fs_results(raw_results):
fs = FeatureSelector(n_outer=3, metric="MISS", estimator="RFC")
fs._raw_results = raw_results
fs.is_fit = True
fs._selected_features = fs._post_processor.select_features(raw_results)
fs._n_features = 5
fs_results = fs.get_feature_selection_results(["A", "B", "C", "D", "E"])
return fs_results
|
nilq/baby-python
|
python
|
from djitellopy import Tello
tello=Tello()
tello.connect()
#tello.takeoff()
#
#move
#tello.move_up(100)
#tello.move_forward(50)
#tello.rotate_clockwise(90)
#tello.move_back(50)
#tello.move_up(50)
#
#tello.land()
import cv2
panel=cv2.imread('./DroneBlocks_TT.jpg')
cv2.imshow('tello panel', panel)
while True:
key=cv2.waitKey(1)
if key==ord('q'):
break
elif key==ord('t'):
tello.takeoff()
elif key==ord('l'):
tello.land()
elif key==ord('u'):
tello.move('up', 50)
elif key==ord('d'):
tello.move('down', 50)
elif key==ord('f'):
tello.move('forward', 50)
elif key==ord('b'):
tello.move('back', 50)
elif key==ord('c'):
tello.rotate_clockwise(90)
elif key==ord('w'):
tello.rotate_counter_clockwise(90)
pass
|
nilq/baby-python
|
python
|
import pytest
from lemur.auth.ldap import * # noqa
from mock import patch, MagicMock
class LdapPrincipalTester(LdapPrincipal):
def __init__(self, args):
super().__init__(args)
self.ldap_server = 'ldap://localhost'
def bind_test(self):
groups = [('user', {'memberOf': ['CN=Lemur Access,OU=Groups,DC=example,DC=com'.encode('utf-8'),
'CN=Pen Pushers,OU=Groups,DC=example,DC=com'.encode('utf-8')]})]
self.ldap_client = MagicMock()
self.ldap_client.search_s.return_value = groups
self._bind()
def authorize_test_groups_to_roles_admin(self):
self.ldap_groups = ''.join(['CN=Pen Pushers,OU=Groups,DC=example,DC=com',
'CN=Lemur Admins,OU=Groups,DC=example,DC=com',
'CN=Lemur Read Only,OU=Groups,DC=example,DC=com'])
self.ldap_required_group = None
self.ldap_groups_to_roles = {'Lemur Admins': 'admin', 'Lemur Read Only': 'read-only'}
return self._authorize()
def authorize_test_required_group(self, group):
self.ldap_groups = ''.join(['CN=Lemur Access,OU=Groups,DC=example,DC=com',
'CN=Pen Pushers,OU=Groups,DC=example,DC=com'])
self.ldap_required_group = group
return self._authorize()
@pytest.fixture()
def principal(session):
args = {'username': 'user', 'password': 'p4ssw0rd'}
yield LdapPrincipalTester(args)
class TestLdapPrincipal:
@patch('ldap.initialize')
def test_bind(self, app, principal):
self.test_ldap_user = principal
self.test_ldap_user.bind_test()
group = 'Pen Pushers'
assert group in self.test_ldap_user.ldap_groups
assert self.test_ldap_user.ldap_principal == 'user@example.com'
def test_authorize_groups_to_roles_admin(self, app, principal):
self.test_ldap_user = principal
roles = self.test_ldap_user.authorize_test_groups_to_roles_admin()
assert any(x.name == "admin" for x in roles)
def test_authorize_required_group_missing(self, app, principal):
self.test_ldap_user = principal
roles = self.test_ldap_user.authorize_test_required_group('Not Allowed')
assert not roles
def test_authorize_required_group_access(self, session, principal):
self.test_ldap_user = principal
roles = self.test_ldap_user.authorize_test_required_group('Lemur Access')
assert len(roles) >= 1
assert any(x.name == "user@example.com" for x in roles)
|
nilq/baby-python
|
python
|
import pyglet
from inspect import getargspec
from element import Element
from processor import Processor
from draw import labelsGroup
from utils import font
class Node(Element, Processor):
'''
Node is a main pyno element, in fact it is a function with in/outputs
'''
def __init__(self, x, y, batch, color=(200, 200, 200), code=None,
connects=None, size=(300, 150)):
Element.__init__(self, x, y, color, batch)
Processor.init_processor(self) # node has a processor for calculation
self.editor_size = size
if connects:
self.connected_to = connects
if code:
self.code = code
else:
self.code = '''def newNode(a=0, b=0):
result = a + b
return result'''
self.name = ''
self.label = pyglet.text.Label(self.name, font_name=font,
bold=True, font_size=11,
anchor_x='center', anchor_y='center',
batch=batch, group=labelsGroup,
color=(255, 255, 255, 230))
self.new_code(self.code)
def new_code(self, code):
# New code, search for in/outputs
self.code = code
def_pos = code.find('def')
if def_pos > -1:
inputs, outputs = self.inputs, self.outputs
bracket = code[def_pos:].find('(')
if bracket > -1:
self.name = code[def_pos + 3:def_pos + bracket].strip()
self.label.text = self.name
S, G = {}, {} # temporally stores and globals to exec function
try:
exec(code[def_pos:]) # dummy function to eject args names
except Exception as ex:
self.problem = True
self.er_label.text = repr(ex)
else:
# got tuple with args names like ('a', 'b')
inputs = tuple(getargspec(eval(self.name)).args)
ret_pos = code.rfind('return')
if ret_pos > -1:
outputs = tuple(x.strip()
for x in code[ret_pos + 6:].split(','))
self.w = max(len(self.name) * 10 + 20,
len(inputs) * 20, len(outputs) * 20, 64)
self.cw = self.w // 2
self.insert_inouts({'inputs': inputs,
'outputs': outputs})
def render_base(self):
Element.render_base(self)
self.label.x, self.label.y = self.x, self.y
def delete(self, fully=False):
Element.delete(self, fully)
self.label.delete()
|
nilq/baby-python
|
python
|
from . import account
from . import balance
from . import bigmap
from . import block
from . import commitment
from . import contract
from . import cycle
from . import delegate
from . import head
from . import operation
from . import protocol
from . import quote
from . import reward
from . import right
from . import software
from . import statistics
from . import voting
|
nilq/baby-python
|
python
|
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base environment for full cluster tests.
Contains functions that all environments should implement along with functions
common to all environments.
"""
# pylint: disable=unused-argument
import json
import random
from vttest import sharding_utils
class VitessEnvironmentError(Exception):
pass
class BaseEnvironment(object):
"""Base Environment."""
def __init__(self):
self.vtctl_helper = None
def create(self, **kwargs):
"""Create the environment.
Args:
**kwargs: kwargs parameterizing the environment.
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'create unsupported in this environment')
def use_named(self, instance_name):
"""Populate this instance based on a pre-existing environment.
Args:
instance_name: Name of the existing environment instance (string).
"""
self.master_capable_tablets = {}
for keyspace, num_shards in zip(self.keyspaces, self.num_shards):
self.master_capable_tablets[keyspace] = {}
for shard_name in sharding_utils.get_shard_names(num_shards):
raw_shard_tablets = self.vtctl_helper.execute_vtctl_command(
['ListShardTablets', '%s/%s' % (keyspace, shard_name)])
split_shard_tablets = [
t.split(' ') for t in raw_shard_tablets.split('\n') if t]
self.master_capable_tablets[keyspace][shard_name] = [
t[0] for t in split_shard_tablets
if (self.get_tablet_cell(t[0]) in self.primary_cells
and (t[3] == 'master' or t[3] == 'replica'))]
def destroy(self):
"""Teardown the environment.
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'destroy unsupported in this environment')
def create_table(self, table_name, schema=None, validate_deadline_s=60):
schema = schema or (
'create table %s (id bigint auto_increment, msg varchar(64), '
'keyspace_id bigint(20) unsigned NOT NULL, primary key (id)) '
'Engine=InnoDB' % table_name)
for keyspace in self.keyspaces:
self.vtctl_helper.execute_vtctl_command(
['ApplySchema', '-sql', schema, keyspace])
def delete_table(self, table_name):
for keyspace in self.keyspaces:
self.vtctl_helper.execute_vtctl_command(
['ApplySchema', '-sql', 'drop table if exists %s' % table_name,
keyspace])
def get_vtgate_conn(self, cell):
"""Gets a connection to a vtgate in a particular cell.
Args:
cell: cell to obtain a vtgate connection from (string).
Returns:
A vtgate connection.
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'get_vtgate_conn unsupported in this environment')
def restart_mysql_task(self, tablet_name, task_name, is_alloc=False):
"""Restart a job within the mysql alloc or the whole alloc itself.
Args:
tablet_name: tablet associated with the mysql instance (string).
task_name: Name of specific task (droid, vttablet, mysql, etc.).
is_alloc: True to restart entire alloc.
Returns:
return restart return val.
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'restart_mysql_task unsupported in this environment')
def restart_vtgate(self, cell=None, task_num=None):
"""Restarts a vtgate task.
If cell and task_num are unspecified, restarts a random task in a random
cell.
Args:
cell: cell containing the vtgate task to restart (string).
task_num: which vtgate task to restart (int).
Returns:
return val for restart.
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'restart_vtgate unsupported in this environment')
def wait_for_good_failover_status(
self, keyspace, shard_name, failover_completion_timeout_s=60):
"""Wait until failover status shows complete.
Repeatedly queries the master tablet for failover status until it is 'OFF'.
Most of the time the failover status check will immediately pass. When a
failover is in progress, it tends to take a good 5 to 10 attempts before
status is 'OFF'.
Args:
keyspace: Name of the keyspace to reparent (string).
shard_name: name of the shard to verify (e.g. '-80') (string).
failover_completion_timeout_s: Failover completion timeout (int).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'wait_for_good_failover_status unsupported in this environment')
def wait_for_healthy_tablets(self, deadline_s=300):
"""Wait until all tablets report healthy status.
Args:
deadline_s: Deadline timeout (seconds) (int).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'wait_for_healthy_tablets unsupported in this environment')
def is_tablet_healthy(self, tablet_name):
vttablet_stream_health = json.loads(self.vtctl_helper.execute_vtctl_command(
['VtTabletStreamHealth', tablet_name]))
return 'health_error' not in vttablet_stream_health['realtime_stats']
def get_next_master(self, keyspace, shard_name, cross_cell=False):
"""Determine what instance to select as the next master.
If the next master is cross-cell, rotate the master cell and use instance 0
as the master. Otherwise, rotate the instance number.
Args:
keyspace: the name of the keyspace to reparent (string).
shard_name: name of the shard to reparent (string).
cross_cell: Whether the desired reparent is to another cell (bool).
Returns:
Tuple of cell, task num, tablet uid (string, int, string).
"""
num_tasks = self.keyspace_alias_to_num_instances_dict[keyspace]['replica']
current_master = self.get_current_master_name(keyspace, shard_name)
current_master_cell = self.get_tablet_cell(current_master)
next_master_cell = current_master_cell
next_master_task = 0
if cross_cell:
next_master_cell = self.primary_cells[(
self.primary_cells.index(current_master_cell) + 1) % len(
self.primary_cells)]
else:
next_master_task = (
(self.get_tablet_task_number(current_master) + 1) % num_tasks)
tablets_in_cell = [tablet for tablet in
self.master_capable_tablets[keyspace][shard_name]
if self.get_tablet_cell(tablet) == next_master_cell]
return (next_master_cell, next_master_task,
tablets_in_cell[next_master_task])
def get_tablet_task_number(self, tablet_name):
"""Gets a tablet's 0 based task number.
Args:
tablet_name: Name of the tablet (string).
Returns:
0 based task number (int).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'get_tablet_task_number unsupported in this environment')
def external_reparent(self, keyspace, shard_name, new_master_name):
"""Perform a reparent through external means (Orchestrator, etc.).
Args:
keyspace: name of the keyspace to reparent (string).
shard_name: shard name (string).
new_master_name: tablet name of the tablet to become master (string).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'external_reparent unsupported in this environment')
def internal_reparent_available(self):
"""Checks if the environment can do a vtctl reparent."""
return 'PlannedReparentShard' in (
self.vtctl_helper.execute_vtctl_command(['help']))
def automatic_reparent_available(self):
"""Checks if the environment can automatically reparent."""
return False
def explicit_external_reparent_available(self):
"""Checks if the environment can explicitly reparent via external tools."""
return False
def internal_reparent(self, keyspace, shard_name, new_master_name,
emergency=False):
"""Performs an internal reparent through vtctl.
Args:
keyspace: name of the keyspace to reparent (string).
shard_name: string representation of the shard to reparent (e.g. '-80').
new_master_name: Name of the new master tablet (string).
emergency: True to perform an emergency reparent (bool).
"""
reparent_type = (
'EmergencyReparentShard' if emergency else 'PlannedReparentShard')
self.vtctl_helper.execute_vtctl_command(
[reparent_type, '%s/%s' % (keyspace, shard_name), new_master_name])
self.vtctl_helper.execute_vtctl_command(['RebuildKeyspaceGraph', keyspace])
def get_current_master_cell(self, keyspace):
"""Obtains current master cell.
This gets the master cell for the first shard in the keyspace, and assumes
that all shards share the same master.
Args:
keyspace: name of the keyspace to get the master cell for (string).
Returns:
master cell name (string).
"""
num_shards = self.num_shards[self.keyspaces.index(keyspace)]
first_shard_name = sharding_utils.get_shard_name(0, num_shards)
first_shard_master_tablet = (
self.get_current_master_name(keyspace, first_shard_name))
return self.get_tablet_cell(first_shard_master_tablet)
def get_current_master_name(self, keyspace, shard_name):
"""Obtains current master's tablet name (cell-uid).
Args:
keyspace: name of the keyspace to get information on the master.
shard_name: string representation of the shard in question (e.g. '-80').
Returns:
master tablet name (cell-uid) (string).
"""
shard_info = json.loads(self.vtctl_helper.execute_vtctl_command(
['GetShard', '{0}/{1}'.format(keyspace, shard_name)]))
master_alias = shard_info['master_alias']
return '%s-%s' % (master_alias['cell'], master_alias['uid'])
def get_random_tablet(self, keyspace=None, shard_name=None, cell=None,
tablet_type=None, task_number=None):
"""Get a random tablet name.
Args:
keyspace: name of the keyspace to get information on the master.
shard_name: shard to select tablet from (None for random) (string).
cell: cell to select tablet from (None for random) (string).
tablet_type: type of tablet to select (None for random) (string).
task_number: a specific task number (None for random) (int).
Returns:
random tablet name (cell-uid) (string).
"""
keyspace = keyspace or random.choice(self.keyspaces)
shard_name = shard_name or (
sharding_utils.get_shard_name(
random.randint(0, self.shards[self.keyspaces.index(keyspace)])))
cell = cell or random.choice(self.cells)
tablets = [t.split(' ') for t in self.vtctl_helper.execute_vtctl_command(
['ListShardTablets', '%s/%s' % (keyspace, shard_name)]).split('\n')]
cell_tablets = [t for t in tablets if self.get_tablet_cell(t[0]) == cell]
if task_number:
return cell_tablets[task_number][0]
if tablet_type:
return random.choice([t[0] for t in cell_tablets if t[3] == tablet_type])
return random.choice(cell_tablets)[0]
def get_tablet_cell(self, tablet_name):
"""Get the cell of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix (string).
Returns:
Tablet's cell (string).
"""
return tablet_name.split('-')[0]
def get_tablet_uid(self, tablet_name):
"""Get the uid of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix (string).
Returns:
Tablet's uid (int).
"""
return int(tablet_name.split('-')[-1])
def get_tablet_keyspace(self, tablet_name):
"""Get the keyspace of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix (string).
Returns:
Tablet's keyspace (string).
"""
return json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))['keyspace']
def get_tablet_shard(self, tablet_name):
"""Get the shard of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix (string).
Returns:
Tablet's shard (string).
"""
return json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))['shard']
def get_tablet_type(self, tablet_name):
"""Get the current type of the tablet as reported via vtctl.
Args:
tablet_name: Name of the tablet, including cell prefix (string).
Returns:
Current tablet type (e.g. spare, replica, rdonly) (string).
"""
return json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))['type']
def get_tablet_ip_port(self, tablet_name):
"""Get the ip and port of the tablet as reported via vtctl.
Args:
tablet_name: Name of the tablet, including cell prefix (string).
Returns:
ip:port (string).
"""
tablet_info = json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))
host = tablet_info['hostname']
if ':' in host:
# If host is an IPv6 address we need to put it into square brackets to
# form a correct "host:port" value.
host = '[%s]' % host
return '%s:%s' % (host, tablet_info['port_map']['vt'])
def get_tablet_types_for_shard(self, keyspace, shard_name):
"""Get the types for all tablets in a shard.
Args:
keyspace: Name of keyspace to get tablet information on (string).
shard_name: single shard to obtain tablet types from (string).
Returns:
List of pairs of tablet's name and type.
"""
tablet_info = []
raw_tablets = self.vtctl_helper.execute_vtctl_command(
['ListShardTablets', '{0}/{1}'.format(keyspace, shard_name)])
raw_tablets = filter(None, raw_tablets.split('\n'))
for tablet in raw_tablets:
tablet_words = tablet.split()
tablet_name = tablet_words[0]
tablet_type = tablet_words[3]
tablet_info.append((tablet_name, tablet_type))
return tablet_info
def get_all_tablet_types(self, keyspace, num_shards):
"""Get the types for all tablets in a keyspace.
Args:
keyspace: Name of keyspace to get tablet information on (string).
num_shards: number of shards in the keyspace (int).
Returns:
List of pairs of tablet's name and type.
"""
tablet_info = []
for shard_name in sharding_utils.get_shard_names(num_shards):
tablet_info += self.get_tablet_types_for_shard(keyspace, shard_name)
return tablet_info
def backup(self, tablet_name):
"""Wait until all tablets report healthy status.
Args:
tablet_name: Name of tablet to backup (string).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'backup unsupported in this environment')
def drain_tablet(self, tablet_name, duration_s=600):
"""Add a drain from a tablet.
Args:
tablet_name: vttablet to drain (string).
duration_s: how long to have the drain exist for, in seconds (int).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'drain_tablet unsupported in this environment')
def is_tablet_drained(self, tablet_name):
"""Checks whether a tablet is drained.
Args:
tablet_name: vttablet to drain (string).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'is_tablet_drained unsupported in this environment')
def undrain_tablet(self, tablet_name):
"""Remove a drain from a tablet.
Args:
tablet_name: vttablet name to undrain (string).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'undrain_tablet unsupported in this environment')
def is_tablet_undrained(self, tablet_name):
"""Checks whether a tablet is undrained.
Args:
tablet_name: vttablet to undrain (string).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'is_tablet_undrained unsupported in this environment')
def poll_for_varz(self, tablet_name, varz, timeout=60.0,
condition_fn=None, converter=str, condition_msg=None):
"""Polls for varz to exist, or match specific conditions, within a timeout.
Args:
tablet_name: the name of the process that we're trying to poll vars from.
varz: name of the vars to fetch from varz.
timeout: number of seconds that we should attempt to poll for.
condition_fn: a function that takes the var as input, and returns a truthy
value if it matches the success conditions.
converter: function to convert varz value.
condition_msg: string describing the conditions that we're polling for,
used for error messaging.
Returns:
dict of requested varz.
Raises:
VitessEnvironmentError: Raised if unsupported or if the varz conditions
aren't met within the given timeout.
"""
raise VitessEnvironmentError(
'poll_for_varz unsupported in this environment')
def truncate_usertable(self, keyspace, shard, table=None):
tablename = table or self.tablename
master_tablet = self.get_current_master_name(keyspace, shard)
self.vtctl_helper.execute_vtctl_command(
['ExecuteFetchAsDba', master_tablet, 'truncate %s' % tablename])
def get_tablet_query_total_count(self, tablet_name):
"""Gets the total query count of a specified tablet.
Args:
tablet_name: Name of the tablet to get query count from (string).
Returns:
Query total count (int).
Raises:
VitessEnvironmentError: Raised if unsupported.
"""
raise VitessEnvironmentError(
'get_tablet_query_total_count unsupported in this environment')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Author: Amar Prakash Pandey
# @Co-Author: Aman Garg
# @Date: 25-10-2016
# @Email: amar.om1994@gmail.com
# @Github username: @amarlearning
# MIT License. You can find a copy of the License
# @http://amarlearning.mit-license.org
# import library here
import pygame
import time
import random
from os import path
# Pygame module initialised
pygame.init()
# Material color init
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
darkBlue = (0,0,128)
white = (255,255,255)
black = (0,0,0)
grey = (211,211,211)
# Frames per second
FPS = 6
# Display width and height are defined
display_width = 950
display_height = 700
# Folder path init
assets = path.join(path.dirname(__file__), 'assets/image')
extras = path.join(path.dirname(__file__), 'extras')
# Init images & sounds
gameIcon = pygame.image.load(path.join(assets + '/gameicon.png'))
grassRoad = pygame.image.load(path.join(assets + '/grassslip.png'))
stripOne = pygame.image.load(path.join(assets + '/stripone.png'))
stripTwo = pygame.image.load(path.join(assets + '/striptwo.png'))
coverImage = pygame.image.load(path.join(assets + '/cover.png'))
SmartCarImage = [pygame.image.load(path.join(assets + '/newcar0_opt.png')),
pygame.image.load(path.join(assets + '/newcar2_opt.png')),
pygame.image.load(path.join(assets + '/newcar3_opt.png'))]
RivalCarImage =pygame.image.load(path.join(assets + '/Black_viper_opt.png'))
Boom =pygame.image.load(path.join(assets + '/exp.png'))
GameOver =pygame.image.load(path.join(assets + '/gameover.png'))
# Game windown, caption initialised
gameDisplay = pygame.display.set_mode((display_width, display_height))
# Game icon init
pygame.display.set_caption('SmartCar')
pygame.display.set_icon(gameIcon)
# Clock init for Frames
clock = pygame.time.Clock()
# Fonts Init
smallfont = pygame.font.SysFont("comicsansms", 15)
mediumfont = pygame.font.SysFont("comicsansms", 40)
largefont = pygame.font.SysFont("comicsansms", 60)
# Engine sound added
pygame.mixer.music.load(path.join(extras, "engine_sound.mp3"))
pygame.mixer.music.play(-1)
# function to init all game assets!
def init():
grassSlip = 0
grass_width = 170
grass_height = 700
# Road and Greenland seperator
border_width = 30
border_height = 700
# Game basic design init [Left side] & [Right side]
gameDisplay.fill(black)
pygame.draw.rect(gameDisplay, grey, (grass_width, 0, border_width, border_height))
pygame.draw.rect(gameDisplay, grey, (display_width - grass_width - border_width, 0, border_width, border_height))
for x in range(0,12):
gameDisplay.blit(grassRoad, (0, grassSlip))
gameDisplay.blit(grassRoad, (780, grassSlip))
grassSlip = grassSlip + 63
# Road under maintainance, be safe!
gameDisplay.blit(stripOne, (380,0))
gameDisplay.blit(stripTwo, (560,0))
pygame.display.update()
# smart car image function
def carImage(x,y, which):
gameDisplay.blit(SmartCarImage[which], (x,y))
# rival car image function
def rivalcarImage(x,y):
gameDisplay.blit(RivalCarImage, (x,y))
def Kaboom(score):
init()
gameDisplay.blit(GameOver,(382,175))
pygame.draw.rect(gameDisplay, white, (200, 400, 550, 50))
text = smallfont.render("Press [RETURN] to continue and [Q] to quit", True, darkBlue)
gameDisplay.blit(text, [370,400])
text = smallfont.render("Score : " + str(score), True, red)
gameDisplay.blit(text, [450,420])
pygame.display.update()
gameExit = True
while gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
gameExit = False
gameloop()
if event.key == pygame.K_q:
pygame.quit()
def Score(score):
pygame.draw.rect(gameDisplay, green, (0,0, 170,45))
text = smallfont.render("Score : " + str(score), True, darkBlue)
gameDisplay.blit(text, [10,10])
def gameloop():
# All necessary variable initalised
init()
# Kickstart variable
gameplay = True
score = 0
# Grass 2D image & Road Divider
Divider = True
# Road's divider width and height
divider_width = 20
divider_height = 80
# carImage Position
carX = 225
carY = 560
# Rival car coordinates
rcarX= [225,415,605]
rcarY= 0
Ya=rcarY
Yb=-140
Yc=-280
# speed Factor
factor = 20
# car change variable
which_car = 0
# Picturising car image, sorry SmartCar image
carImage(carX,carY, which_car)
change_x = 0
rivalcarImage(rcarX[0],rcarY)
# Heart starts beating, Don't stop it!
while gameplay:
# Police siren activated :P
if which_car == 2:
which_car = 0
else:
which_car += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameplay = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
change_x = 190
if event.key == pygame.K_LEFT:
change_x = -190
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
change_x = 0
init()
# changing position of SmartCar
carX += change_x
if (carX<=700 and carX>=205):
carImage(carX, carY, which_car)
else:
carX -= change_x
carImage(carX, carY, which_car)
# controlling movements of traffic
if score > 10:
rivalcarImage(rcarX[0],Ya)
Ya += factor
if Ya > random.randint(1000, 2000):
Ya = 0
if score > 32:
rivalcarImage(rcarX[1],Yb)
Yb += factor
if Yb > random.randint(1000, 2000):
Yb=0
if score > 75:
rivalcarImage(rcarX[2],Yc)
Yc += factor
if Yc > random.randint(1700, 2000):
Yc=0
# car conflict avoiding condition
if (abs(Ya-Yb) < 280) or (abs(Yb-Yc) < 280):
Yb -= 350
# car crash condiiton!
if (carX == rcarX[0] and 470 < Ya <700) or (carX == rcarX[1] and 470 < Yb <700) or (carX == rcarX[2] and 470 < Yc <700):
gameDisplay.blit(Boom, (carX,530))
pygame.display.flip()
time.sleep(1)
Kaboom(score)
# Updating Score
Score(score)
score = score + 1
# Car moving visualization
if Divider == True:
gameDisplay.blit(stripTwo, (380, 0))
gameDisplay.blit(stripOne, (560, 0))
Divider = False
else:
gameDisplay.blit(stripOne, (380, 0))
gameDisplay.blit(stripTwo, (560, 0))
Divider = True
pygame.display.update()
# speed of game.
clock.tick(FPS)
# Game speed increases with increase in time.
if not score %1000:
factor += 10
# Kickstart the game!
gameloop()
# You will win, try one more time. Don't Quit.
pygame.quit()
# you can signoff now, everything looks good!
quit()
|
nilq/baby-python
|
python
|
from django.urls import path
from django_admin_sticky_notes.views import StickyNoteView
urlpatterns = [
path("", StickyNoteView.as_view()),
]
|
nilq/baby-python
|
python
|
import os
import pickle
import cv2
if __name__ == '__main__':
folder = './rearrangement-train/color/000001-2.pkl'
with open(folder, 'rb') as f:
tmp = pickle.load(f)
for i in range(3):
img = tmp[i, 0, ...]
cv2.imshow('haha', img)
cv2.waitKey(0)
|
nilq/baby-python
|
python
|
def numRescueBoats(people, limit):
boats = 0
people.sort()
left = 0, right = len(people)-1
while left <= right:
if left == right:
boats += 1
break
current = people[left]+people[right]
if current <= limit:
left += 1
boats += 1
right -= 1
return boats
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
from orchestra.core import accounts
class ContactsConfig(AppConfig):
name = 'orchestra.contrib.contacts'
verbose_name = 'Contacts'
def ready(self):
from .models import Contact
accounts.register(Contact, icon='contact_book.png')
|
nilq/baby-python
|
python
|
from ... import UndirectedGraph
from unittest import TestCase, main
class TestEq(TestCase):
def test_eq(self) -> None:
edges = {
("a", "b"): 10,
("b", "c"): 20,
("l", "m"): 30
}
vertices = {
"a": 10,
"b": 20,
"z": 30
}
g = UndirectedGraph(edges=edges, vertices=vertices)
self.assertEqual(g, g.copy(), "Should test equality of graphs.")
def test_empty(self) -> None:
self.assertEqual(UndirectedGraph(), UndirectedGraph(), "Should compare empty graphs.")
def test_negative(self) -> None:
edges_one = {
("a", "b"): 10,
("b", "c"): 20,
("l", "m"): 30
}
edges_two = {
("a", "b"): 10,
("b", "c"): 20,
("l", "q"): 30
}
one = UndirectedGraph(edges=edges_one)
two = UndirectedGraph(edges=edges_two)
self.assertNotEqual(one, two, "Should check different graphs.")
vertices_one = {
"a": 2,
"b": 3
}
vertices_two = {
"a": 1,
"b": 3
}
one = UndirectedGraph(vertices=vertices_one)
two = UndirectedGraph(vertices=vertices_two)
self.assertNotEqual(one, two, "Should check different graphs.")
base = {
("a", "b"): 10,
("b", "c"): 20,
("l", "m"): 30
}
one = UndirectedGraph(edges=base)
two = one.copy()
two.set_vertex_weight("a", 10)
self.assertNotEqual(one, two, "Should check different graphs.")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from setuptools import setup, Extension, find_packages
import os
import glob
sources = []
sources += glob.glob("src/*.cpp")
sources += glob.glob("src/*.pyx")
root_dir = os.path.abspath(os.path.dirname(__file__))
ext = Extension("factorizer",
sources = sources,
language = "c++",
extra_compile_args = ["-v", "-std=c++14", "-Wall", "-O3", "-lboost_system"],
extra_link_args = ["-std=c++14"]
)
with open(os.path.join(root_dir, 'README.md'), "r") as fp:
long_description = fp.read()
with open(os.path.join(root_dir, 'requirements.txt'), "r") as fp:
install_requires = fp.read().splitlines()
setup(
name = "factorizer",
version = "0.9.6",
author = "Fulltea",
author_email = "rikuta@furutan.com",
long_description = long_description,
long_description_content_type="text/markdown",
url = "https://github.com/FullteaOfEEIC/factorizer",
packages = find_packages(where="src"),
package_dir = {
"factorizer": "src"
},
install_requires = install_requires,
ext_modules = [ext]
)
if os.path.exists(os.path.join("src", "factorizer.cpp")):
os.remove(os.path.join("src", "factorizer.cpp"))
|
nilq/baby-python
|
python
|
import os
import datetime
try:
from PIL import Image, ImageOps
except ImportError:
import Image
import ImageOps
from ckeditor import settings as ck_settings
from .common import get_media_url
def get_available_name(name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, keep adding an underscore (before the
# file extension, if one exists) to the filename until the generated
# filename doesn't exist.
while os.path.exists(name):
file_root += '_'
# file_ext includes the dot.
name = os.path.join(dir_name, file_root + file_ext)
return name
def get_thumb_filename(file_name):
"""
Generate thumb filename by adding _thumb to end of
filename before . (if present)
"""
return '%s_thumb%s' % os.path.splitext(file_name)
def create_thumbnail(filename):
image = Image.open(filename)
# Convert to RGB if necessary
# Thanks to Limodou on DjangoSnippets.org
# http://www.djangosnippets.org/snippets/20/
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
# scale and crop to thumbnail
imagefit = ImageOps.fit(image, ck_settings.THUMBNAIL_SIZE, Image.ANTIALIAS)
imagefit.save(get_thumb_filename(filename))
return get_media_url(filename)
def get_upload_filename(upload_name, user):
# If CKEDITOR_RESTRICT_BY_USER is True upload file to user specific path.
if ck_settings.RESTRICT_BY_USER:
user_path = user.username
else:
user_path = ''
# Generate date based path to put uploaded file.
date_path = datetime.datetime.now().strftime('%Y/%m/%d')
# Complete upload path (upload_path + date_path).
upload_path = os.path.join(ck_settings.UPLOAD_PATH, user_path, \
date_path)
# Make sure upload_path exists.
if not os.path.exists(upload_path):
os.makedirs(upload_path)
# Get available name and return.
return get_available_name(os.path.join(upload_path, upload_name))
|
nilq/baby-python
|
python
|
# Generated by Django 1.11.23 on 2019-08-19 01:00
from django.conf import settings
from django.db import migrations
def set_site_domain_based_on_setting(apps, schema_editor):
Site = apps.get_model('sites', 'Site')
# The site domain is used to build URLs in some places, such as in password
# reset emails, and 'view on site' links in the admin site's blog post edit
# view. Thus, the domain should correspond to the domain actually being
# used by the current environment: production, staging, or development.
#
# Previously (migration 0001) we hardcoded the domain to
# 'coralnet.ucsd.edu'. Now we set the domain to the environment-dependent
# settings.SITE_DOMAIN.
#
# Note that Django doesn't seem to use this site domain in testing
# environments. Tests will always use a domain of 'testserver' or something
# like that, and the tests should 'just work' that way.
site = Site.objects.get(pk=settings.SITE_ID)
site.domain = settings.SITE_DOMAIN
site.save()
class Migration(migrations.Migration):
dependencies = [
('lib', '0001_set_site_name'),
]
# Reverse operation is a no-op. The forward operation doesn't care if the
# domain is already set correctly.
operations = [
migrations.RunPython(
set_site_domain_based_on_setting, migrations.RunPython.noop),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import csv
import re
from rdkit.Chem import AllChem
from rdkit import Chem
from rdkit import DataStructs
compounds = {}
def load_compounds(filename):
comps = {}
bad_count = 0
blank_count = 0
with open(filename) as csv_file:
csvr = csv.DictReader(csv_file, delimiter='\t')
for row in csvr:
id, inchi = (row['id'], row['structure'])
if inchi:
# print( "input row {0} {1}".format( id, inchi ) )
try:
smarts = Chem.MolToSmarts(Chem.MolFromInchi(inchi))
comps[id] = smarts
# print( "output row {0} {1} {2}".format( id, inchi, smarts ) )
except Exception:
# print( "input row {0} {1}".format( id, inchi ) )
# print( "bizarre", sys.exc_info()[0] )
bad_count = bad_count + 1
else:
comps[id] = ""
blank_count = blank_count + 1
print("# bad inputs count: {0}".format(bad_count))
print("# blank inputs count: {0}".format(blank_count))
return(comps)
def comp_lookup(comp_id):
return(compounds.get(comp_id))
def load_reactions(filename):
rxns = {}
diff_fps = {}
obsolete_count = 0
with open(filename) as csv_file:
csvr = csv.DictReader(csv_file, delimiter='\t')
# for each reaction
for row in csvr:
rxn_id, stoich, is_obsolete = (row['id'], row['stoichiometry'], row['is_obsolete'])
if int(is_obsolete) > 0:
obsolete_count = obsolete_count+1
continue
# print( "{0} {1}".format( id, stoich) )
if stoich: # for now, skip blank stoichiometries (if any)
left_side_compounds = []
right_side_compounds = []
smarts = None
for cstruct in stoich.split(';'):
# print( " cstruct: {0}".format( cstruct ) )
n, compid, state, x, name = re.findall(r'(?:[^:"]|"(?:\\.|[^"])*")+', cstruct)
# print( " {0}: {1} {2} {3} {4}".format( cstruct, n, compid, state, name ) )
smarts = comp_lookup(compid)
if not smarts or (smarts == ""):
smarts = None
break
copies = int(abs(float(n)))
if copies == 0:
copies = copies + 1
if float(n) < 0:
for i in range(0, copies):
left_side_compounds.append(smarts)
else:
for i in range(0, copies):
right_side_compounds.append(smarts)
if smarts is not None:
# print( "left" )
# pprint( left_side_compounds )
# for s in left_side_compounds:
# print( s )
# print( "right" )
# pprint( right_side_compounds )
# for s in right_side_compounds:
# print( s )
rxn_string = ".".join(left_side_compounds) + ">>" + \
".".join(right_side_compounds)
# print( "rxn string {0}".format( rxn_string ) )
fingerprint = AllChem.CreateStructuralFingerprintForReaction(AllChem.ReactionFromSmarts(rxn_string))
# pprint( fingerprint )
# pprint( dir( fingerprint ) )
# pprint( fingerprint.GetNumBits() )
# pprint( fingerprint.ToBinary() )
diff_fingerprint = AllChem.CreateDifferenceFingerprintForReaction(
AllChem.ReactionFromSmarts(rxn_string))
# print( "diff_fingerprint is " )
# pprint( diff_fingerprint )
# pprint( dir( diff_fingerprint ) )
# pprint( diff_fingerprint.GetLength() )
# pprint( diff_fingerprint.GetNonzeroElements() )
# b = diff_fingerprint.ToBinary()
# print( type(b) )
# pprint( b )
rxns[rxn_id] = fingerprint
diff_fps[rxn_id] = diff_fingerprint
print("# obsolete_count = {0}".format(obsolete_count))
return(rxns, diff_fps)
# First load compounds and convert to SMARTS and put in table
# compounds = load_compounds( "compounds.tsv" )
compounds = load_compounds("new_compounds.tsv")
# pprint( compounds )
# Next, load reactions, capture reaction strings and replace compound ids with SMARTS
reactions, diffs = load_reactions("reactions.tsv")
rxn_list = list(reactions.keys()) # list() required for python 3
num_rxns = len(rxn_list)
# num_rxns = 10000
for i in range(0, num_rxns-1):
for j in range(i+1, num_rxns):
rxn_a = rxn_list[i]
rxn_b = rxn_list[j]
print("{0} {1} {2} {3}".format(rxn_a, rxn_b,
DataStructs.FingerprintSimilarity(reactions[rxn_a],
reactions[rxn_b]),
DataStructs.cDataStructs.TanimotoSimilarity(diffs[rxn_a],
diffs[rxn_b])
))
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.