content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from collections import defaultdict
import pandas as pd
import networkx as nx
def get_uniprot2gene_map(dataframe):
"""
assumes: dataframe = dataframe.fillna('empty')
"""
my_columns = dataframe[['Gene','UniProt']]
u2g_map = defaultdict(set)
for row in my_columns.itertuples():
idx,gene,uniprot = row
u2g_map[uniprot].add(gene)
for u,g in u2g_map.iteritems():
u2g_map[u] = sorted(list(g))
return dict(u2g_map)
def get_gene2uniprot_map(dataframe):
"""
assumes: dataframe = dataframe.fillna('empty')
"""
my_columns = dataframe[['Gene','UniProt']]
g2u_map = defaultdict(set)
for row in my_columns.itertuples():
idx,gene,uniprot = row
g2u_map[gene].add(uniprot)
for g,u in g2u_map.iteritems():
g2u_map[g] = sorted(list(u))
return dict(g2u_map) | [
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
3127,
87,
355,
299,
87,
628,
198,
198,
4299,
651,
62,
403,
541,
10599,
17,
70,
1734,
62,
8899,
7,
7890,
14535,
2599,
198,
220,
220,
220,
3... | 1.9182 | 489 |
import json
from presqt.utilities import read_file
| [
11748,
33918,
198,
198,
6738,
906,
39568,
13,
315,
2410,
1330,
1100,
62,
7753,
628,
628
] | 3.4375 | 16 |
# -*- coding: UTF-8 -*-
# Copyright 2012 Luc Saffre
# License: BSD (see file COPYING for details)
"""
No longer used.
"""
from __future__ import unicode_literals
if False:
from lino.history import blogger
#
DEMOS = blogger.ticket("lino.pr", "Demo Sites", """
""")
CMS = blogger.ticket("cms", "Lino as a CMS", """
Managing Plain Web Content.
First proof of concept of
:mod:`lino_xl.lib.pages`
""")
CHANGES = blogger.ticket("lino.dev", "Documenting code changes", """
Now that the :mod:`lino_xl.lib.pages` has passed the proof of
concept phase I started a new attempt to make it easier to
write code change reports, and to find them back when needed.
The current blog system isn't bad, but it has several disadvantages:
- documenting releases is difficult
- no way to make dynamic queries
""")
blogger.ticket("lino.core", "Detail of VirtualTable ",
"""
It seems that `detail_layout` doesn't work on `VirtualTable`.
""")
COSI = blogger.ticket("lino.cosi", "Simple Belgian Accounting", """
First proof of concept of the modules
:mod:`lino.modlib.sales`,
:mod:`lino.modlib.ledger`,
:mod:`lino.modlib.finan` ...
""")
#
blogger.set_date(20121221)
blogger.entry(DEMOS, 0152, "",
"""
The :mod:`garble <lino_welfare.modlib.pcsw.management.commands.garble>` command
now has an option `--noinput`.
The reason for this change is that on lino-framework.org I have a batch
command to run the `initdb_demo` script of all demo sites.
And one of these scripts also calls `garble`, causing a
confirmation to be asked somewhere in the middle of the process.
""")
DCC = blogger.entry(CHANGES, 0152, "Documenting code changes",
"""
Wrote a new module :mod:`lino.modlib.codechanges`,
with a virtual table `CodeChanges`
(:menuselection:`Explorer --> System --> Code Changes`)
displays a list of all changes.
""")
#~ blogger.entry(CHANGES,1157,"Documenting code changes (continued)",
blogger.follow(DCC, 1157,
"""
Continued on module :mod:`lino.modlib.codechanges`.
I abandoned a first approach which used a `changes.py`
file in each module because
code changes must be documented in *one central place
per developer*, not per module.
The next approach is using a package :mod:`lino.history`.
This package is importable Python code where the developer
writes down what he does. The first example is
:srcref:`/lino/lino/history/luc201212.py`,
which contains a report, in Python syntax,
about my work in December 2012 (since yesterday).
A first version just stored these objects in memory
and used the existing CodeChanges table.
While working on this I understand that this system can also
be just an intermediate solution on our way to do all this
directly in :mod:`lino.projects.presto`.
So the virtual table CodeChanges goes away,
and a fixture :mod:`lino.projects.presto.fixtures.history`
imports the :mod:`lino.history` package and yields
them to the deserializer.
""")
blogger.set_date(20121223)
blogger.follow(DCC, 933, """
Continued in :mod:`lino.projects.presto.fixtures.history`.
Side note:
while reading about `tags <http://mercurial.selenic.com/wiki/Tag>`_
in Mercurial I noted that
MoinMoin produces beautiful results.
They have even a bug tracker:
http://moinmo.in/MoinMoinBugs
""")
blogger.entry(DEMOS, 1722, "demos at lino-framework.org still broken", """
There were still a few bugs in the online demo sites.
NameError "global name 'pages' is not defined".
Sphinx 0.6.6 (distributed with Debian Squeeze)
didn't yet have a module
`sphinx.util.nodes` with a function
`split_explicit_title`.
This caused an ImportError. :mod:`lino.utils.restify`
now includes the few lines of code copied from
a newer Sphinx version.
""")
blogger.entry(CMS, 2304, "Started template inheritance", """
The sidebar doesn't yet work.
The best way to solve this is probably using template inheritance.
So in a first step I started to use it,
as described in http://jinja.pocoo.org/docs/api/#loaders,
by defining
an `Environment` instance and my own loader
(in :mod:`lino.core.web`).
I also replaced Django's template engine by Jinja,
as explained in
`Using an alternative template language
<https://docs.djangoproject.com/en/dev/ref/templates/api/#using-an-alternative-template-language>`_.
Lino used Django's template engine only for the mandatory
`500.html` and `404.html` templates.
All this is really great!
I had never used templates because Django's
engine doesn't allow function calls.
In the beginning when I discovered Django,
I felt clearly that this isn't my thing.
Cheetah had this feature, and I need it to generate `linolib.js`,
but I never really fell in love with Cheetah.
I plan to replace this one also by Jinja soon.
I did hear about Jinja, too,
but I just didn't recognize that this was the door to a great new world.
""")
blogger.entry(CMS, 1037, "Sidebar", """
As if to confirm my decision to throw away my own :mod:`lino.utils.memo`,
the Jinja documentation has an entry `Highlighting Active Menu Items
<http://jinja.pocoo.org/docs/tricks/#highlighting-active-menu-items>`_
which is a beginning of the answer to my initial issue.
""")
blogger.set_date(20121227)
blogger.entry(COSI, 728, "Detail Layout for BankStatement", """
There were no workflow buttons in the Detail Layout of
:class:`lino.modlib.finan.models.BankStatement`.
""")
blogger.entry(COSI, 805, "Preferred width of ChoiceList comboboxes", """
:mod:`lino.ui.extjs3.ext_elems`
now adds a hard-coded value to the preferred_width
of ChoiceList fields to account for the width of the trigger button.
""")
blogger.entry(CMS, 1330, "Miscellaneous", """
Still experimenting with the big question on
how to write/generate a user manual for Lino application.
New vision for pages.Page : removed field `language`, and
title and body are now babel fields.
Only one record per ref.
""")
blogger.set_date(20130109)
blogger.entry(CMS, 1607, "The next Lino production site?", """
Started a new Lino site that runs at
`http://wwwbeta.lino-framework.org`_.
But don't (for the moment) waste your time to look at it.
My vision is to make this the first satisfying community development platform in the world.
And I agree that rationally speaking it is pure nonsense to believe
that I could make something better than Googlecode, Sourceforge or Github.
That's why I don't yet write very much about what I'm doing.
""")
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
15069,
2321,
7598,
311,
2001,
260,
198,
2,
13789,
25,
347,
10305,
357,
3826,
2393,
27975,
45761,
329,
3307,
8,
198,
198,
37811,
198,
2949,
2392,
973,
13,
198,
37811,
1... | 3.14326 | 2,129 |
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import InputRequired, Email, Length, ValidationError, URL
from app import *
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
42903,
62,
86,
27110,
13,
7753,
1330,
9220,
15878,
11,
9220,
3237,
6972,
198,
6738,
42903,
62,
18769,
26418,
1330,
18892,
26418,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11... | 3.829268 | 82 |
import pandas as pd
import numpy as np
from nolearn.dbn import DBN
import timeit
train_evaluate_model() | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
645,
35720,
13,
67,
9374,
1330,
360,
15766,
198,
11748,
640,
270,
198,
198,
27432,
62,
49786,
62,
19849,
3419
] | 3.058824 | 34 |
import unittest
from linguine.corpus import Corpus
from linguine.ops.remove_stopwords import RemoveStopwords
from linguine.ops.word_tokenize import WordTokenizeWhitespacePunct
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
20280,
500,
13,
10215,
79,
385,
1330,
44874,
198,
6738,
20280,
500,
13,
2840,
13,
28956,
62,
11338,
10879,
1330,
17220,
19485,
10879,
198,
6738,
20280,
500,
13,
2840,
13,
4775,
62,
30001,
1096,
13... | 3.067568 | 74 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='student.proto',
package='',
serialized_pb='\n\rstudent.proto\"h\n\x07Student\x12\n\n\x02id\x18\x01 \x02(\x05\x12\x12\n\nfirst_name\x18\x02 \x02(\t\x12\x11\n\tlast_name\x18\x03 \x02(\t\x12\x10\n\x08\x63omments\x18\x04 \x01(\t\x12\x18\n\x07\x63ourses\x18\x05 \x03(\x0b\x32\x07.Course\"%\n\x06\x43ourse\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05marks\x18\x02 \x01(\x05')
_STUDENT = descriptor.Descriptor(
name='Student',
full_name='Student',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Student.id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='first_name', full_name='Student.first_name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_name', full_name='Student.last_name', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comments', full_name='Student.comments', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='courses', full_name='Student.courses', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=17,
serialized_end=121,
)
_COURSE = descriptor.Descriptor(
name='Course',
full_name='Course',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='Course.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='marks', full_name='Course.marks', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=123,
serialized_end=160,
)
_STUDENT.fields_by_name['courses'].message_type = _COURSE
DESCRIPTOR.message_types_by_name['Student'] = _STUDENT
DESCRIPTOR.message_types_by_name['Course'] = _COURSE
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
43087,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
3275,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
145... | 2.39373 | 1,595 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
#standard_library.install_aliases()
from builtins import *
#can be mocked
import datetime
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
2003,
1330,
3210,
... | 3.985507 | 69 |
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base class and constants for app identity stubs.
The module offers following objects available for app identity stubs:
Constants:
RSA_LIB_INSTALLED: boolean, pure-Python crypto enabled/disabled.
CRYPTO_LIB_INSTALLED: boolean, C crypto enabled/disabled.
APP_SERVICE_ACCOUNT_NAME: service account hardcoded in the stubs.
APP_DEFAULT_GCS_BUCKET_NAME: GCS bucket hardcoded in the stubs.
X509_PUBLIC_CERT: public certificate hardcoded in the stubs.
Classes:
AppIdentityServiceStubBase: base for app identity stub.
"""
import time
import mock
import rsa
from google.appengine.api import apiproxy_stub
from google.appengine.api import stublib
APP_SERVICE_ACCOUNT_NAME = 'test@localhost'
APP_DEFAULT_GCS_BUCKET_NAME = 'app_default_bucket'
SIGNING_KEY_NAME = 'key'
N = 19119371788959611760073322421014045870056498252163411380847152703712917776733759011400972099255719579701566470175077491500050513917658074590935646529525468755348555932670175295728802986097707368373781743941167574738113348515272061138933984990014969297930973127363812200790406743271047572192133912023914306041356562363557723417403707408838823620411045628159183655215061768071407845537324145892973481372872161981015237572556138317222082306397041309823528068650373958169977675424007883635551170458356632131122901683151395297447872184074888239102348331222079943386530179883880518236689216575776729057173406091195993394637
MODULUS_BYTES = 256
E = 65537
D = 16986504444572720056487621821047100642841595850137583213470349776864799280835251113078612103869013355016302383270733509621770011190160658118800356360958694229960556902751935956316359959542321272425222634888969943798180994410031448370776358545990991384123912313866752051562052322103544805811361355593091450379904792608637886965065110019212136239200637553477192566763015004249754677600683846556806159369233241157779976231822757855748068765507787598014034587835400718727569389998321277712761796543890788269130617890866139616903097422259980026836628018133574943835504630997228592718738382001678104796538128020421537193913
X509_PUBLIC_CERT = """
-----BEGIN CERTIFICATE-----
MIIC/jCCAeagAwIBAgIIQTBFcRw3moMwDQYJKoZIhvcNAQEFBQAwIjEgMB4GA1UE
AxMXcm9ib3RqYXZhLmEuYXBwc3BvdC5jb20wHhcNMTEwMjIzMTUwNzQ5WhcNMTEw
MjI0MTYwNzQ5WjAiMSAwHgYDVQQDExdyb2JvdGphdmEuYS5hcHBzcG90LmNvbTCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJd0YJCQWvQMa+7L/orCt3D0
hVtkdAkeGSikuT4U7mNrxBuOaAbxCIGhRbUe2p+uvRF6MZtLvoU1h9qEFo/wAVDO
HN4WHhw3VLl/OVuredRfe8bBTi0KqdgUBrKr8V61n26N3B4Ma9dkTMbcODC/XCfP
IRJnTIf4Z1vnoEfWQEJDfW9QLJFyJF17hpp9l5S1uuMJBxjYMsZ3ExLqSFhM7IbN
1PDBAb6zGtI7b9AVP+gxS1hjXiJoZA32IWINAZiPV+0k925ecsV0BkI0zV4Ta06F
JexNx040y5ivr4C214GRUM3UKihirTcEOBS1a7SRi5wCPh/wT0A8gN6NNbTNjc0C
AwEAAaM4MDYwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/
BAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQEFBQADggEBAD+h2D+XGIHWMwPCA2DN
JgMhN1yTTJ8dtwbiQIhfy8xjOJbrzZaSEX8g2gDm50qaEl5TYHHr2zvAI1UMWdR4
nx9TN7I9u3GoOcQsmn9TaOKkBDpMv8sPtFBal3AR5PwR5Sq8/4L/M22LX/TN0eIF
Y4LnkW+X/h442N8a1oXn05UYtFo+p/6emZb1S84WZAnONGtF5D1Z6HuX4ikDI5m+
iZbwm47mLkV8yuTZGKI1gJsWmAsElPkoWVy2X0t69ecBOYyn3wMmQhkLk2+7lLlD
/c4kygP/941fe1Wb/T9yGeBXFwEvJ4jWbX93Q4Xhk9UgHlso9xkCu9QeWFvJqufR
5Cc=
-----END CERTIFICATE-----
"""
PREFIX = '3031300d060960864801650304020105000420'
LEN_OF_PREFIX = 19
HEADER1 = '0001'
HEADER2 = '00'
PADDING = 'ff'
LENGTH_OF_SHA256_HASH = 32
class AppIdentityServiceStubBase(apiproxy_stub.APIProxyStub):
"""A base class for the AppIdentityService API stub.
Offers base implementations for following AppIdentityService RPCs:
* AppIdentityService::SignForApp ->
_Dynamic_SignForApp
* AppIdentityService::GetPublicCertificatesForApp ->
_Dynamic_GetPublicCertificatesForApp
* AppIdentityService::GetServiceAccountName ->
_Dynamic_GetServiceAccountName
* AppIdentityService::GetDefaultGcsBucketName ->
_Dynamic_GetDefaultGcsBucketName
* AppIdentityStubService::SetDefaultGcsBucketName ->
_Dynamic_SetDefaultGcsBucketName
* AppIdentityService::GetAccessToken ->
_Dynamic_GetAccessToken
And provides following helpers:
* SetDefaultGcsBucketName: set default bucket name from the request if
possible, set from `APP_DEFAULT_GCS_BUCKET_NAME`
constant otherwise.
* Clear: Reset state of the stub.
Not implemented and must be implemented in an inherited class:
* Create: static method, create a stub.
"""
THREADSAFE = True
def __init__(self, service_name='app_identity_service'):
"""Constructor."""
super(AppIdentityServiceStubBase, self).__init__(service_name)
self.__default_gcs_bucket_name = APP_DEFAULT_GCS_BUCKET_NAME
self.patchers = stublib.Patchers([
mock.patch(
'google.appengine.api.app_identity._metadata_server.'
'get_service_account_token',
side_effect=self._patch_get_service_account_token)])
def _Dynamic_SignForApp(self, request, response):
"""Implementation of AppIdentityService::SignForApp."""
bytes_to_sign = request.bytes_to_sign
signature_bytes = rsa.pkcs1.sign(
bytes_to_sign,
rsa.key.PrivateKey(N, E, D, 3, 5),
'SHA-256')
response.signature_bytes = signature_bytes
response.key_name = SIGNING_KEY_NAME
def _Dynamic_GetPublicCertificatesForApp(self, request, response):
"""Implementation of AppIdentityService::GetPublicCertificatesForApp."""
cert = response.public_certificate_list.add()
cert.key_name = SIGNING_KEY_NAME
cert.x509_certificate_pem = X509_PUBLIC_CERT
def _Dynamic_GetServiceAccountName(self, request, response):
"""Implementation of AppIdentityService::GetServiceAccountName."""
response.service_account_name = APP_SERVICE_ACCOUNT_NAME
def _Dynamic_GetDefaultGcsBucketName(self, unused_request, response):
"""Implementation of AppIdentityService::GetDefaultGcsBucketName."""
response.default_gcs_bucket_name = self.__default_gcs_bucket_name
def _Dynamic_SetDefaultGcsBucketName(self, request, unused_response):
"""Implementation of AppIdentityStubService::SetDefaultGcsBucketName."""
self.SetDefaultGcsBucketName(request.default_gcs_bucket_name)
def _patch_get_service_account_token(self, scopes, service_account=None):
"""test implementation for _metadata_server.get_service_account_token.
This API returns an invalid token, as the `dev_appserver` does not have
access to an actual service account. Subclasses override this function with
more useful implementations.
Args:
scopes: a list of oauth2 scopes.
service_account: the service account to get the token for
Returns:
Tuple of access token and expiration time in epoch
"""
token = ':'.join(scopes)
if service_account:
token += '.%s' % service_account
access_token = 'InvalidToken:%s:%s' % (token, time.time() % 100)
expiration_time = int(time.time()) + 1800
return access_token, expiration_time
@staticmethod
def Clear(self):
"""Resets the state on the App Identity stub."""
self.__default_gcs_bucket_name = APP_DEFAULT_GCS_BUCKET_NAME
stublib.Stub.register(AppIdentityServiceStubBase)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
4343,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
284... | 2.379247 | 3,267 |
import os, collections, threading, Queue, time
import win32api
import win32com.client
import pythoncom, pyHook
"""
MessageName: key down
Message: 256
Time: 112416317
Window: 197094
WindowName: Emacs/Python <ruttbe@LAGER> hookit.py
Ascii: 120 x
Key: X
KeyID: 88
ScanCode: 45
Extended: 0
Injected: 0
Alt 0
Transition 0
"""
# Globals
last_window = None
bufs = { 'Emacs': { 'active_window' : None,
'buf': [] },
'VS': { 'active_window' : None,
'buf': [] } }
valid_abbrev_chars = [chr(x) for x in range(ord('0'), ord('0') + 10)]
valid_abbrev_chars += [chr(x) for x in range(ord('A'), ord('A') + 26)]
valid_abbrev_chars += [chr(x) for x in range(ord('a'), ord('a') + 26)]
shell = win32com.client.Dispatch("WScript.Shell")
JUGGLER_DEFNS = os.getenv("JUGGLER_DEFNS")
JUGGLER_AUTOHOTKEY_SCRIPT = os.getenv("JUGGLER_AUTOHOTKEY_SCRIPT")
assert JUGGLER_DEFNS
assert JUGGLER_AUTOHOTKEY_SCRIPT
langs = 'global python javascript'.split()
expanding_now = False
# map from lang to abbrev to text
defns = collections.defaultdict(dict)
q = Queue.Queue()
helper = HelperThread(q)
helper.setDaemon(True)
helper.start()
# see http://ss64.com/vb/sendkeys.html or better yet https://msdn.microsoft.com/en-us/library/aa266279%28v=vs.60%29.aspx
"""
x bar7 foo foo foo foo foo foo
bar7
ff
lklkjlkj bar7
bar7
x y z bar7
if foo:
"""
# import win32ui
# wnd = win32ui.GetForegroundWindow()
# print wnd.GetWindowText()
if __name__ == "__main__": # when run as a script
main()
| [
11748,
28686,
11,
17268,
11,
4704,
278,
11,
4670,
518,
11,
640,
198,
11748,
1592,
2624,
15042,
198,
11748,
1592,
2624,
785,
13,
16366,
198,
11748,
21015,
785,
11,
12972,
39,
566,
198,
198,
37811,
198,
12837,
5376,
25,
1994,
866,
198,
... | 2.354839 | 651 |
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tf_euler
| [
2,
15069,
12131,
41992,
4912,
31703,
15302,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351... | 4.24 | 200 |
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
import re
import warnings
warnings.filterwarnings("ignore") | [
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
1573,
3262,
355,
266,
77,
198,
198,
6738,
299,
2528,
74,
13,
927,
1330,
9678,
7934,
43,
368,
6759,
7509,
198,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
1573,
3262,
198,
1174... | 2.932203 | 59 |
import os
from shutil import which
from collections import OrderedDict
# Handle state dict from early PyTorch versions
| [
11748,
28686,
198,
6738,
4423,
346,
1330,
543,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
628,
198,
198,
2,
33141,
1181,
8633,
422,
1903,
9485,
15884,
354,
6300,
628,
628
] | 4.032258 | 31 |
import numpy as np
from numpy import einsum
import scipy as sp
from Florence.VariationalPrinciple import VariationalPrinciple
from Florence import QuadratureRule, FunctionSpace
from Florence.FiniteElements.LocalAssembly.KinematicMeasures import *
from Florence.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from Florence.MaterialLibrary.MaterialBase import Material
from Florence.Tensor import trace, Voigt, makezero, issymetric
norm = np.linalg.norm
# from numba import jit
# np.set_printoptions(precision=16)
# @jit(nopython=True)
# svd = np.linalg.svd
svd = svd_rv
# delta = 1e-3
# def Jr(J):
# return 0.5 * (J + np.sqrt(J**2 + delta**2))
# def dJrdF(F):
# J = np.linalg.det(F)
# return 0.5 * (1. + J / np.sqrt(J**2 + delta**2)) * dJdF(F)
# def d2JrdFdF(F):
# J = np.linalg.det(F)
# djdf = dJdF(F)
# gJ = vec(djdf)
# dJrdF = 0.5 * (1. + J / np.sqrt(J**2 + delta**2)) * djdf
# gJr = vec(dJrdF)
# HJr = 0.5 * (1 + J / np.sqrt(J**2 + delta**2)) * d2JdFdF(F) + 0.5 * (delta**2 / (J**2 + delta**2)**(3./2.)) * np.outer(gJ,gJ)
# return HJr
class NeoHookeanF(Material):
"""The fundamental Neo-Hookean internal energy, described in Ogden et. al.
W(C) = mu/2*(C:I-3)- mu*lnJ + lamb/2*(J-1)**2
"""
class PixarNeoHookeanF(Material):
"""The Neo-Hookean internal energy, described in Smith et. al.
W(C) = mu/2*(C:I-3)- mu*(J-1) + lamb/2*(J-1)**2
"""
class MIPSF(Material):
"""The MIPS energy
W(F) = F:F/d/Jr^(2/d)
"""
class MIPSF2(Material):
"""The MIPS energy
W(F) = Fr:Fr/d/Jr^(2/d)
"""
class SymmetricDirichlet(Material):
""" Symmetric Dirichlet model
W(F) = 1/2*(F:F) + 1/2*(F**(-1):F**(-1))
"""
class ARAPF(Material):
"""The fundamental ARAP model
W_arap(F) = (F - R)**2
"""
class SymmetricARAPF(Material):
"""The fundamental ARAP model
W_arap(F) = (F - R)**2
"""
class Corotational(Material):
"""The fundamental ARAP model
W_arap(F) = (F - R)**2
"""
__all__ = ["FBasedDisplacementFormulation"]
| [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
304,
1040,
388,
198,
11748,
629,
541,
88,
355,
599,
198,
6738,
28537,
13,
23907,
864,
42904,
2480,
1330,
15965,
864,
42904,
2480,
198,
6738,
28537,
1330,
20648,
81,
1300,
31929... | 2.145526 | 1,017 |
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from management_api.schemas.elements.models import model_name, model_version
from management_api.schemas.elements.names import endpoint_name, subject_name
from management_api.schemas.elements.resources import replicas, resources
endpoint_post_schema = {
"type": "object",
"title": "Endpoint POST Schema",
"required": [
"endpointName",
"modelName",
"modelVersion",
"subjectName"
],
"properties": {
"endpointName": endpoint_name,
"modelName": model_name,
"modelVersion": model_version,
"subjectName": subject_name,
"replicas": replicas,
"resources": resources
}
}
endpoint_delete_schema = {
"type": "object",
"title": "Endpoint DELETE Schema",
"required": [
"endpointName"
],
"properties": {
"endpointName": endpoint_name
}
}
resources['optional'] = True
endpoint_patch_schema = {
"type": "object",
"title": "Endpoint PATCH Schema",
"oneOf": [{
"required": [
"replicas"
]
},
{
"required": [
"modelName",
"modelVersion"
]
}
],
"properties": {
"replicas": replicas,
"modelName": model_name,
"modelVersion": model_version,
"resources": resources
}
}
| [
2,
198,
2,
15069,
357,
66,
8,
2864,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 2.49359 | 780 |
# The Topical Guide
# Copyright 2010-2011 Brigham Young University
#
# This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>.
#
# The Topical Guide is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# The Topical Guide is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>.
#
# If you have inquiries regarding any further use of the Topical Guide, please
# contact the Copyright Licensing Office, Brigham Young University, 3760 HBLL,
# Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail copyright@byu.edu.
from .top_n import TopNTopicNamer
from .tf_itf import TfitfTopicNamer
name_scheme_classes = [TopNTopicNamer, TfitfTopicNamer]
| [
198,
2,
383,
5849,
605,
10005,
198,
2,
15069,
3050,
12,
9804,
37434,
6960,
2059,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
262,
5849,
605,
10005,
1279,
4023,
1378,
21283,
79,
13,
6359,
13,
1525,
84,
13,
15532,
14,
26652,
62,
40259... | 3.486726 | 339 |
from app import *
| [
6738,
598,
1330,
1635,
628
] | 3.8 | 5 |
# This is the api for object oriented interface
import numpy as np
from math import pi
from scipy import interpolate
# The function assumes uniform field
def curl_2D(ufield, vfield, clat, dlambda, dphi, planet_radius=6.378e+6):
"""
Assuming regular latitude and longitude [in degree] grid, compute the curl
of velocity on a pressure level in spherical coordinates.
"""
ans = np.zeros_like((ufield))
ans[1:-1, 1:-1] = (vfield[1:-1, 2:] - vfield[1:-1, :-2])/(2.*dlambda) - \
(ufield[2:, 1:-1] * clat[2:, np.newaxis] -
ufield[:-2, 1:-1] * clat[:-2, np.newaxis])/(2.*dphi)
ans[0, :] = 0.0
ans[-1, :] = 0.0
ans[1:-1, 0] = ((vfield[1:-1, 1] - vfield[1:-1, -1]) / (2. * dlambda) -
(ufield[2:, 0] * clat[2:] -
ufield[:-2, 0] * clat[:-2]) / (2. * dphi))
ans[1:-1, -1] = ((vfield[1:-1, 0] - vfield[1:-1, -2]) / (2. * dlambda) -
(ufield[2:, -1] * clat[2:] -
ufield[:-2, -1] * clat[:-2]) / (2. * dphi))
ans[1:-1, :] = ans[1:-1, :] / planet_radius / clat[1:-1, np.newaxis]
return ans
class BarotropicField(object):
"""
An object that deals with barotropic (2D) wind and/or PV fields
:param xlon: Longitude array in degree with dimension *nlon*.
:type xlon: sequence of array_like
:param ylat: Latitutde array in degree, monotonically increasing with dimension *nlat*
:type ylat: sequence of array_like
:param area: Differential area at each lon-lat grid points with dimension (nlat,nlon). If 'area=None': it will be initiated as area of uniform grid (in degree) on a spherical surface.
:type area: sequence of array_like
:param dphi: Differential length element along the lat grid with dimension nlat.
:type dphi: sequence of array_like
:param pv_field: Absolute vorticity field with dimension [nlat x nlon]. If 'pv_field=None': pv_field is expected to be computed with u,v,t field.
:type pv_field: sequence of array_like
:returns: an instance of the object BarotropicField
:example:
>>> barofield1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity)
"""
def __init__(self, xlon, ylat, pv_field, area=None, dphi=None,
n_partitions=None, planet_radius=6.378e+6):
"""Create a windtempfield object.
**Arguments:**
*xlon*
Longitude array in degree with dimension [nlon].
*ylat*
Latitutde array in degree, monotonically increasing with dimension
[nlat].
*area*
Differential area at each lon-lat grid points with dimension
[nlat x nlon].
If None, it will be initiated as:
2.*pi*Earth_radius**2 *(np.cos(ylat[:,np.newaxis]*pi/180.)*dphi)/float(nlon) * np.ones((nlat,nlon)).
This would be problematic if the grids are not uniformly distributed in degree.
*dphi*
Differential length element along the lat grid with dimension nlat.
*pv_field*
Absolute vorticity field with dimension [nlat x nlon].
If none, pv_field is expected to be computed with u,v,t field.
"""
self.xlon = xlon
self.ylat = ylat
self.clat = np.abs(np.cos(np.deg2rad(ylat)))
self.nlon = xlon.size
self.nlat = ylat.size
self.planet_radius = planet_radius
if dphi is None:
self.dphi = pi/(self.nlat-1) * np.ones((self.nlat))
else:
self.dphi = dphi
if area is None:
self.area = 2.*pi*planet_radius**2*(np.cos(ylat[:, np.newaxis]*pi/180.)*self.dphi[:, np.newaxis])/float(self.nlon)*np.ones((self.nlat, self.nlon))
else:
self.area = area
self.pv_field = pv_field
if n_partitions is None:
self.n_partitions = self.nlat
else:
self.n_partitions = n_partitions
def equivalent_latitudes(self):
"""
Compute equivalent latitude with the *pv_field* stored in the object.
:returns: an numpy array with dimension (nlat) of equivalent latitude array.
:example:
>>> barofield1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity)
>>> eqv_lat = barofield1.equivalent_latitudes()
"""
from hn2016_falwa import basis
pv_field = self.pv_field
area = self.area
ylat = self.ylat
planet_radius = self.planet_radius
self.eqvlat, dummy = basis.eqvlat(ylat, pv_field, area, self.n_partitions,
planet_radius=planet_radius)
return self.eqvlat
def lwa(self):
"""
Compute the finite-amplitude local wave activity based on the *equivalent_latitudes* and the *pv_field* stored in the object.
:returns: an 2-D numpy array with dimension (nlat,nlon) of local wave activity values.
:example:
>>> barofield1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity)
>>> eqv_lat = barofield1.equivalent_latitudes() # This line is optional
>>> lwa = barofield1.lwa()
"""
from hn2016_falwa import basis
if self.eqvlat is None:
self.eqvlat = self.equivalent_latitudes(self)
lwa_ans, dummy = basis.lwa(self.nlon, self.nlat, self.pv_field, self.eqvlat,
self.planet_radius * self.clat * self.dphi)
return lwa_ans
# === Next is a class of 3D objects ===
class QGField(object):
"""
An object that deals with barotropic (2D) wind and/or PV fields
:param xlon: Longitude array in degree with dimension (*nlon*).
:type xlon: sequence of array_like
:param ylat: Latitutde array in degree, monotonically increasing with dimension (*nlat*)
:type ylat: sequence of array_like
:param zlev: Pseudoheight array in meters, monotonically increasing with dimension (*nlev*)
:type zlev: sequence of array_like
:param u_field: Zonal wind field in meters, with dimension (*nlev*,*nlat*,*nlon*).
:type u_field: sequence of array_like
:param v_field: Meridional wind field in meters, with dimension (*nlev*,*nlat*,*nlon*).
:type v_field: sequence of array_like
:param t_field: Temperature field in Kelvin, with dimension (*nlev*,*nlat*,*nlon*).
:type t_field: sequence of array_like
:param qgpv_field: Quasi-geostrophic potential vorticity field in 1/second, with dimension (*nlev*,*nlat*,*nlon*). If u_field, v_field and t_field are input, qgpv_field can be using the method compute_qgpv.
:type qgpv_field: sequence of array_like
:param area: Differential area at each lon-lat grid points with dimension (*nlat*,*nlon*). If 'area=None': it will be initiated as area of uniform grid (in degree) on a spherical surface.
:type area: sequence of array_like
:param dphi: Differential length element along the lat grid with dimension (*nlat*).
:type dphi: sequence of array_like
:param pv_field: Absolute vorticity field with dimension [nlat x nlon]. If 'pv_field=None': pv_field is expected to be computed with u,v,t field.
:type pv_field: sequence of array_like
:returns: an instance of the object BarotropicField
:example:
>>> qgfield1 = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV)
"""
def __init__(self, xlon, ylat, zlev, u_field, v_field=None, t_field=None,
qgpv_field=None, area=None, dphi=None,
n_partitions=None, rkappa=287./1004., planet_radius=6.378e+6,
scale_height=7000.):
"""Create a windtempfield object.
**Arguments:**
*xlon*
Longitude array in degree with dimension [nlon].
*ylat*
Latitutde array in degree, monotonically increasing with dimension
[nlat].
*zlev*
Pseudoheight array in meters, monotonically increasing with dimension
[nlev].
*u_field*
Zonal wind field in meters, with dimension [nlev x nlat x nlon].
*v_field*
Meridional wind field in meters, with dimension [nlev x nlat x nlon].
*t_field*
Temperature field in Kelvin, with dimension [nlev x nlat x nlon].
*qgpv_field*
Quasi-geostrophic potential vorticity field in 1/second, with dimension
[nlev x nlat x nlon]. If u_field, v_field and t_field are input,
qgpv_field can be using the method compute_qgpv.
*area*
Differential area at each lon-lat grid points with dimension
[nlat x nlon].
If None, it will be initiated as:
2.*pi*Earth_radius**2 *(np.cos(ylat[:,np.newaxis]*pi/180.)*dphi)/float(nlon) * np.ones((nlat,nlon)).
This would be problematic if the grids are not uniformly distributed in degree.
*dphi*
Differential length element along the lat grid with dimension nlat.
*n_partitions*
Number of partitions used to compute equivalent latitude. If not
given, it will be assigned nlat.
"""
self.xlon = xlon
self.ylat = ylat
self.zlev = zlev
self.clat = np.abs(np.cos(np.deg2rad(ylat)))
self.nlon = xlon.size
self.nlat = ylat.size
self.nlev = zlev.size
self.planet_radius = planet_radius
if dphi is None:
self.dphi = pi/(self.nlat-1) * np.ones((self.nlat))
else:
self.dphi = dphi
if area is None:
self.area = 2.*pi*planet_radius**2*(np.cos(ylat[:, np.newaxis]*pi/180.)*self.dphi[:, np.newaxis])/float(self.nlon)*np.ones((self.nlat, self.nlon))
else:
self.area = area
self.qgpv_field = qgpv_field
if n_partitions is None:
self.n_partitions = self.nlat
else:
self.n_partitions = n_partitions
# First, check if the qgpv_field is present
print('check self.qgpv_field')
# print self.qgpv_field
if (qgpv_field is None) & (v_field is None):
raise ValueError('qgpv_field is missing.')
elif (qgpv_field is None):
print('Compute QGPV field from u and v field.')
# === Obtain potential temperature field ===
if t_field:
self.pt_field = t_field[:, :, :] * \
np.exp(rkappa * zlev[:, np.newaxis, np.newaxis]/scale_height)
# Interpolation
f_Thalf = interpolate.interp1d(zlev, self.pt_field.mean(axis=-1),
axis=0)
zlev_half = np.array([zlev[0] + 0.5*(zlev[1]-zlev[0])]*i \
for i in range(zlev.size * 2 + 1))
self.pt_field_half = f_Thalf(zlev_half) # dim = [2*nlev+1,nlat]
print('self.pt_field_half.shape')
print(self.pt_field_half.shape)
def equivalent_latitudes(self, domain_size='half_globe'): # Has to be changed since it is qgpv.
# Use half-globe?
"""
Compute equivalent latitude with the *pv_field* stored in the object.
:param domain_size: domain of grids to be used to compute equivalent latitude. It can he 'half_globe' or 'full_globe'.
:type domain_size: string
:returns: an numpy array with dimension (*nlev*,*nlat*) of equivalent latitude array.
:example:
>>> qgfield1 = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV)
>>> qgfield_eqvlat = qgfield1.equivalent_latitudes(domain_size='half_globe')
"""
area = self.area
ylat = self.ylat
planet_radius = self.planet_radius
self.eqvlat = np.zeros((self.nlev, self.nlat))
for k in range(self.nlev):
pv_field = self.qgpv_field[k, ...]
if domain_size == 'half_globe':
nlat_s = int(self.nlat/2)
qref = np.zeros(self.nlat)
# --- Southern Hemisphere ---
# qref1 = eqv_lat_core(ylat[:nlat_s],vort[:nlat_s,:],area[:nlat_s,:],nlat_s,planet_radius=planet_radius)
qref[:nlat_s] = eqv_lat_core(ylat[:nlat_s], pv_field[:nlat_s,:],
area[:nlat_s, :], nlat_s)
# --- Northern Hemisphere ---
pv_field_inverted = -pv_field[::-1, :] # Added the minus sign, but gotta see if NL_North is affected
qref2 = eqv_lat_core(ylat[:nlat_s], pv_field_inverted[:nlat_s,:],
area[:nlat_s, :], nlat_s)
#qref2 = eqvlat(ylat[:nlat_s],vort2[:nlat_s,:],area[:nlat_s,:],nlat_s,planet_radius=planet_radius)
qref[-nlat_s:] = -qref2[::-1]
elif domain_size == 'full_globe':
qref = eqv_lat_core(ylat, pv_field, area, self.nlat,
planet_radius=planet_radius)
else:
raise ValueError('Domain size is not properly specified.')
self.eqvlat[k, :] = qref
return self.eqvlat
def lwa(self):
"""
Compute the finite-amplitude local wave activity on each pseudoheight layer based on the *equivalent_latitudes* and the *qgpv_field* stored in the object.
:returns: an 3-D numpy array with dimension (*nlev*,*nlat*,*nlon*) of local wave activity values.
:example:
>>> qgfield = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV)
>>> qgfield_lwa = qgfield.lwa()
"""
try:
self.eqvlat
except:
self.eqvlat = self.equivalent_latitudes(domain_size='half_globe')
lwact = np.zeros((self.nlev, self.nlat, self.nlon))
for k in range(self.nlev):
pv_field = self.qgpv_field[k, :, :]
for j in np.arange(0, self.nlat-1):
vort_e = pv_field[:, :]-self.eqvlat[k, j]
vort_boo = np.zeros((self.nlat, self.nlon))
vort_boo[np.where(vort_e[:, :] < 0)] = -1
vort_boo[:j+1, :] = 0
vort_boo[np.where(vort_e[:j+1, :] > 0)] = 1
lwact[k, j, :] = np.sum(vort_e*vort_boo * self.planet_radius *
self.clat[:, np.newaxis] *
self.dphi[:, np.newaxis], axis=0)
return lwact
if __name__ == "__main__":
main()
| [
2,
770,
318,
262,
40391,
329,
2134,
25921,
7071,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
10688,
1330,
31028,
198,
6738,
629,
541,
88,
1330,
39555,
378,
628,
198,
2,
383,
2163,
18533,
8187,
2214,
198,
198,
4299,
29249,
62,
17,
... | 2.110545 | 6,866 |
token = 'VdINGcwGZsIvUTbuZl3PRTvX2MhkdieXFk9cnNRA4J8hP-jw1obnY6KCfXpRJFTA'
import lyricsgenius
genius = lyricsgenius.Genius(token)
muzik = input("Şarkı sözü ara:")
song = genius.search_song(muzik)
print(song.lyrics) | [
30001,
796,
705,
53,
67,
2751,
66,
86,
38,
57,
82,
45766,
3843,
11110,
57,
75,
18,
4805,
51,
85,
55,
17,
44,
71,
74,
11979,
55,
37,
74,
24,
31522,
45,
3861,
19,
41,
23,
71,
47,
12,
73,
86,
16,
672,
77,
56,
21,
36222,
69,
... | 1.836066 | 122 |
from pkg_resources import get_distribution
__version__ = get_distribution('vphas-bandmerge-standalone').version
| [
6738,
279,
10025,
62,
37540,
1330,
651,
62,
17080,
3890,
198,
198,
834,
9641,
834,
796,
651,
62,
17080,
3890,
10786,
85,
5902,
12,
3903,
647,
469,
12,
1481,
17749,
27691,
9641,
198
] | 3.424242 | 33 |
import skimage.data
import instance_occlsegm_lib
| [
11748,
1341,
9060,
13,
7890,
198,
198,
11748,
4554,
62,
420,
565,
325,
39870,
62,
8019,
628
] | 3 | 17 |
"""
This module is for your final visualization code.
One visualization per hypothesis question is required.
A framework for each type of visualization is provided.
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Set specific parameters for the visualizations
large = 22; med = 16; small = 12
params = {'axes.titlesize': large,
'legend.fontsize': med,
'figure.figsize': (16, 10),
'axes.labelsize': med,
'xtick.labelsize': med,
'ytick.labelsize': med,
'figure.titlesize': large}
plt.rcParams.update(params)
plt.style.use('seaborn-whitegrid')
sns.set_style("white")
def overlapping_density(package=None, input_vars=None, target_vars=None):
"""
Set the characteristics of your overlapping density plot
All arguments are set to None purely as a filler right now
Function takes package name, input variables(categories), and target variable as input.
Returns a figure
Should be able to call this function in later visualization code.
PARAMETERS
:param package: should only take sns or matplotlib as inputs, any other value should throw and error
:param input_vars: should take the x variables/categories you want to plot
:param target_vars: the y variable of your plot, what you are comparing
:return: fig to be enhanced in subsequent visualization functions
"""
# Set size of figure
fig = plt.figure(figsize=(16, 10), dpi=80)
# Starter code for figuring out which package to use
if package == "sns":
for variable in input_vars:
sns.kdeplot(...)
elif package == 'matplotlib':
for variable in input_vars:
plt.plot(..., label=None, linewidth=None, color=None, figure = fig)
return fig
def boxplot_plot(package=None, input_vars=None, target_vars=None):
"""
Same specifications and requirements as overlapping density plot
Function takes package name, input variables(categories), and target variable as input.
Returns a figure
PARAMETERS
:param package: should only take sns or matplotlib as inputs, any other value should throw and error
:param input_vars: should take the x variables/categories you want to plot
:param target_vars: the y variable of your plot, what you are comparing
:return: fig to be enhanced in subsequent visualization functions
"""
plt.figure(figsize=(16, 10), dpi=80)
pass
def visualization_one(target_var = None, input_vars= None, output_image_name=None):
"""
The visualization functions are what is used to create each individual image.
The function should be repeatable if not generalizable
The function will call either the boxplot or density plot functions you wrote above
:param target_var:
:param input_vars:
:param output_image_name: the desired name for the image saved
:return: outputs a saved png file and returns a fig object for testing
"""
###
# Main chunk of code here
###
# Starter code for labeling the image
plt.xlabel(None, figure = fig)
plt.ylabel(None, figure = fig)
plt.title(None, figure= fig)
plt.legend()
# exporting the image to the img folder
plt.savefig(f'img/{output_image_name}.png', transparent = True, figure = fig)
return fig
# please fully flesh out this function to meet same specifications of visualization one | [
37811,
198,
1212,
8265,
318,
329,
534,
2457,
32704,
2438,
13,
198,
3198,
32704,
583,
14078,
1808,
318,
2672,
13,
198,
32,
9355,
329,
1123,
2099,
286,
32704,
318,
2810,
13,
198,
37811,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
2948... | 2.96144 | 1,167 |
"""
Purpose:
This file contains the logic to run magicwand experiments.
Copyright:
This research was developed with funding from the Defense Advanced Research Projects
Agency (DARPA) under Contract #HR0011-16-C-0060. This document was cleared for
release under Distribution Statement” A” (Approved for Public Release, Distribution
Unlimited). The views, opinions, and/or findings expressed are those of the authors
and should not be interpreted as representing the official views or policies of the
Department of Defense of the U.S. Government.
The Government has unlimited rights to use, modify, reproduce, release,
perform, display, or disclose computer software or computer software
documentation marked with this legend. Any reproduction of technical data,
computer software, or portions thereof marked with this legend must also
reproduce this marking.
MIT License
(C) 2020 Two Six Labs, LLC. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Python Library Imports
import datetime
import json
import os
import shutil
import signal
import sys
import time
import logging
from functools import partial
from typing import Any, Dict, Union, Mapping, Type, Tuple, List
import pandas as pd # type: ignore
from magicwand.magicwand_components.attacks import *
from magicwand.magicwand_components.benign import *
from magicwand.magicwand_components.sensors import *
from magicwand.magicwand_components.suts import *
from magicwand.magicwand_components.mw_global import *
from magicwand.magicwand_utils.magicwand_utils import (
get_logger,
CIC_CONVERTER_DOCKER_IMAGE,
)
from magicwand.magicwand_config.config import Config
mw_components: Mapping[str, Type[MwComponent]] = {
"apachekill": Apachekill,
"sockstress": Sockstress,
"goloris": Goloris,
"sht_rudeadyet": Sht_rudeadyet,
"sht_slowread": Sht_slowread,
"sht_slowloris": Sht_slowloris,
"httpflood": Httpflood,
"synflood": Synflood,
"mw_locust": MwLocust,
"mw_rtt_sensor": MwRttSensor,
"mw_apache_wp": MwApacheWp,
}
valid_values = {
"attack": [
"apachekill",
"sockstress",
"goloris",
"sht_rudeadyet",
"sht_slowread",
"sht_slowloris",
"httpflood",
"synflood",
],
"benign": ["mw_locust"],
"sut": ["mw_apache_wp"],
"rtt": ["mw_rtt_sensor"],
}
def signal_handler(run_json: Dict[str, Any], sig: Any, frame: partial) -> Any:
"""
Purpose:
Catch Ctrl+C handler
Args:
run_json (String): Path to magicwand json
sig (Object): Signal Object
frame (Object): Frame Object
Returns:
status (Int): -1 if fail, 0 if success
"""
logging.info("You pressed Ctrl+C! Shutting down gracefully")
compose_file_string = run_json["compose_file_string"]
cmd = "docker-compose " + compose_file_string + " down"
status = os.system(cmd)
if status != 0:
logging.error("docker-compose failed")
sys.exit(-1)
logging.info("Run Canceled")
sys.exit(0)
def print_flow_split(cic_data: pd.DataFrame, logger: logging.Logger) -> None:
"""
Purpose:
Print out cic flow split
Args:
cic_data: Dataframe of the cic data
logger: MW Runner logger
Returns:
N/A
"""
# Print out attack benign split for each run
num_total = cic_data.shape[0]
num_benign = cic_data.loc[cic_data["Label"] == "client"].shape[0]
num_attacks = cic_data.loc[cic_data["Label"] == "attack"].shape[0]
num_benign_percent = round(float(num_benign / num_total), 2)
num_attacks_percent = round(float(num_attacks / num_total), 2)
logger.info("Benign Stats:")
logger.info(f"{num_benign} flows out of {num_total} flows: {num_benign_percent}")
logger.info("Attack Stats:")
logger.info(f"{num_attacks} flows out of {num_total} flows: {num_attacks_percent}")
def label_cic(row: Dict[str, Any], attr_map: pd.DataFrame) -> str:
"""
Purpose:
Get label for cic dataset
Args:
row: Dict of values for the CIC flow
attr_map: Dataframe object of the IPs
Returns:
Label: Label for the IP
"""
src_ip = row["Src IP"]
dst_ip = row["Dst IP"]
# find row in attr_map with src_ip
# check if ip exists
ip_val = ""
try:
ip_val = attr_map.loc[attr_map["ip"] == src_ip]["type"].values[0]
# if ipval is sut check the dst
if ip_val == "sut":
ip_val = attr_map.loc[attr_map["ip"] == dst_ip]["type"].values[0]
except Exception as error:
logging.debug(error)
logging.debug("IP " + src_ip + " not in attr map?")
ip_val = "unknown"
return ip_val
def mw_prep_compoent(
compoent: str, run_json: Dict[str, Any], mw_run_json: Dict[str, Any], log_level: int
) -> Union[MwComponent, None]:
"""
Purpose:
Setup run environment variables
Args:
curr: Current Compoent
run_json: Magicwand run level variables
mw_run_json: Magicwand compoent level variables
Returns:
compose_file_string : compose_file_string to use for compoent
"""
if compoent in run_json:
current_compoent_string = run_json[compoent]
else:
logging.error(compoent + " not in JSON")
return None
try:
current_compoent = mw_components[current_compoent_string](log_level)
mw_run_json[compoent] = current_compoent.config
except Exception as error:
raise ValueError(
f"{current_compoent_string} is not a valid value for {compoent}.\n Valid values: {valid_values[compoent]}"
)
# just return compoents so we can use later
return current_compoent
def mem_stats(run_loc: str, run_duration: int) -> int:
"""
Purpose:
Start docker-stats to calculate memory
Args:
run_loc: Where run is located
run_duration: How long run is going for
Returns:
status : 0 if passed, -1 if fail
"""
# write mem_stats
cmd = 'echo "timestamp,memory_percent" >>' + run_loc + "mem_stats.csv"
status = os.system(cmd)
if status != 0:
logging.error("echo failed")
start = time.time()
while True:
cmd = "docker stats --no-stream | grep mw-sut-apachewp | awk '{print $7}'"
try:
mem_percent_raw = os.popen(cmd).read().split("\n")[0]
except Exception as warning:
logging.warning(warning)
logging.warning("docker-stats failed")
return -1
try:
mem_percent = float(mem_percent_raw[:-1])
except Exception as warning:
logging.warning(warning)
logging.warning("docker-stats failed")
return -1
elapsed = round(time.time() - start)
# write to file
filename = run_loc + "mem_stats.csv"
curr_file = open(filename, "a")
curr_file.write(str(elapsed) + "," + str(mem_percent) + "\n")
time.sleep(5)
if elapsed >= run_duration:
break
return 0
def run_tshark(run_loc: str) -> int:
"""
Purpose:
Run tshark on a PCAP to get the CSV version of the PCAP
Args:
run_loc: Location of PCAP file
Returns:
status: 0 if passed, -1 if failed
"""
cmd = (
"tshark -r "
+ run_loc
+ "tcpdump.pcap -i wlan1 -T fields -E header=y -E separator=, -E quote=d -e _ws.col.No. -e _ws.col.Time -e _ws.col.Source -e _ws.col.Destination -e _ws.col.Protocol -e _ws.col.Length -e _ws.col.Info -e http.user_agent -e http.connection -e http.request -e http.request.line > "
+ run_loc
+ "tcpdump_verify.csv"
)
status = os.system(cmd)
if status != 0:
return -1
return 0
| [
37811,
198,
30026,
3455,
25,
198,
220,
220,
220,
770,
2393,
4909,
262,
9156,
284,
1057,
5536,
86,
392,
10256,
13,
198,
198,
15269,
25,
198,
220,
220,
220,
770,
2267,
373,
4166,
351,
4918,
422,
262,
5947,
13435,
4992,
29898,
198,
220... | 2.501552 | 3,543 |
import numpy as np
# List of states
# - forward
# - stop
# - recovery
# - steering_to_rock
# - slow_forward_to_rock
# - pick_up_rock
# Implemention of left wall following method
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
| [
11748,
299,
32152,
355,
45941,
628,
198,
2,
7343,
286,
2585,
198,
2,
532,
2651,
198,
2,
532,
2245,
198,
2,
532,
7628,
198,
2,
532,
19702,
62,
1462,
62,
10823,
198,
2,
532,
3105,
62,
11813,
62,
1462,
62,
10823,
198,
2,
532,
2298,... | 3.585106 | 94 |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from openapi_client.api.fees_api import FeesApi
from openapi_client.api.invoices_api import InvoicesApi
from openapi_client.api.payments_api import PaymentsApi
from openapi_client.api.receipts_api import ReceiptsApi
from openapi_client.api.transactions_api import TransactionsApi
from openapi_client.api.default_api import DefaultApi
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
198,
2,
1330,
2471,
271,
656,
40391,
5301,
198,
6738,
1280,
15042,
62,
16366,
13,
15042,
13,
69,
2841,
62,
15042,
1330,
37691,
32,
14415... | 3.118519 | 135 |
import subprocess
import sys
import time
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict
import torch
import torch.nn as nn
import torch.utils.data
from tqdm import tqdm, trange
from mighty.loss import PairLossSampler, LossPenalty
from mighty.models import AutoencoderOutput, MLP
from mighty.monitor.accuracy import AccuracyEmbedding, \
AccuracyArgmax, Accuracy
from mighty.monitor.batch_timer import timer
from mighty.monitor.monitor import Monitor
from mighty.monitor.mutual_info import MutualInfoNeuralEstimation, \
MutualInfoStub
from mighty.monitor.mutual_info.mutual_info import MutualInfo
from mighty.trainer.mask import MaskTrainer
from mighty.utils.common import find_named_layers, batch_to_cuda, \
input_from_batch
from mighty.utils.constants import CHECKPOINTS_DIR
from mighty.utils.data import DataLoader
from mighty.utils.prepare import prepare_eval
from mighty.utils.var_online import MeanOnline
__all__ = [
"Trainer"
]
class Trainer(ABC):
"""
Trainer base class.
Parameters
----------
model : nn.Module
A neural network to train.
criterion : nn.Module
Loss function.
data_loader : DataLoader
A data loader.
accuracy_measure : Accuracy or None, optional
Calculates the accuracy from the last layer activations.
If None, set to :code:`AccuracyArgmax` for a classification task
and :code:`AccuracyEmbedding` otherwise.
.. code-block:: python
if isinstance(criterion, PairLossSampler):
accuracy_measure = AccuracyEmbedding()
else:
# cross entropy loss
accuracy_measure = AccuracyArgmax()
Default: None
mutual_info : MutualInfo or None, optional
A handle to compute the mutual information I(X; T) and I(Y; T) [1]_.
If None, don't compute the mutual information.
Default: None
env_suffix : str, optional
The suffix to add to the current environment name.
Default: ''
checkpoint_dir : Path or str, optional
The path to store the checkpoints.
Default: ``${HOME}/.mighty/checkpoints``
verbosity : int, optional
* 0 - don't print anything
* 1 - show the progress with each epoch
* 2 - show the progress with each batch
Default: 2
References
----------
.. [1] Shwartz-Ziv, R., & Tishby, N. (2017). Opening the black box of deep
neural networks via information. arXiv preprint arXiv:1703.00810.
Notes
-----
For the choice of ``mutual_info`` refer to
https://github.com/dizcza/entropy-estimators
"""
watch_modules = (nn.Linear, nn.Conv2d, MLP)
# A key-word to determine the criteria for the "best" score.
# The value of the tag is irrelevant.
# By default, the accuracy is used as the best score measure.
best_score_type = 'accuracy'
@property
def epoch(self):
"""
The current epoch, int.
"""
return self.timer.epoch
def checkpoint_path(self, best=False):
"""
Get the checkpoint path, given the mode.
Parameters
----------
best : bool
The best (True) or normal (False) mode.
Returns
-------
Path
Checkpoint path.
"""
checkpoint_dir = self.checkpoint_dir
if best:
checkpoint_dir = self.checkpoint_dir / "best"
return checkpoint_dir / (self.env_name + '.pt')
def monitor_functions(self):
"""
Override this method to register `Visdom` callbacks on each epoch.
"""
pass
def log_trainer(self):
"""
Logs the trainer in `Visdom` text field.
"""
self.monitor.log_model(self.model)
self.monitor.log(f"Criterion: {self.criterion}")
self.monitor.log(repr(self.data_loader))
self.monitor.log_self()
self.monitor.log(repr(self.accuracy_measure))
self.monitor.log(repr(self.mutual_info))
git_dir = Path(sys.argv[0]).parent
while str(git_dir) != git_dir.root:
try:
commit = subprocess.run(['git', '--git-dir',
str(git_dir / '.git'),
'rev-parse', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
except FileNotFoundError:
# Git is not installed
break
if commit.returncode == 0:
self.monitor.log(f"Git location '{str(git_dir)}' "
f"commit: {commit.stdout}")
break
git_dir = git_dir.parent
@abstractmethod
def train_batch(self, batch):
"""
The core function of a trainer to update the model parameters, given
a batch.
Parameters
----------
batch : torch.Tensor or tuple of torch.Tensor
:code:`(X, Y)` or :code:`X` batch of input data.
Returns
-------
loss : torch.Tensor
The batch loss.
"""
raise NotImplementedError()
def update_best_score(self, score):
"""
If :code:`score` is greater than the :code:`self.best_score`, save
the model.
The internal best score is updated and the current model is saved as
"best" if the object's :attr:`best_score_type` tag matches with its
class :attr:`best_score_type`.
Parameters
----------
score : float
The model score at the current epoch. The higher, the better.
The simplest way to use this function is set :code:`score = -loss`.
"""
# This function can be called multiple times from different functions
# but only one call will lead to updating the score and saving the
# best model.
if self.best_score_type == self.__class__.best_score_type \
and score > self.best_score:
self.best_score = score
self.save(best=True)
self.monitor.log(f"[epoch={self.timer.epoch}] "
f"best score: {self.best_score}")
def save(self, best=False):
"""
Saves the trainer and the model parameters to
:code:`self.checkpoint_path(best)`.
Parameters
----------
best : bool
The mode (refer to :func:`Trainer.checkpoint_path`).
See Also
--------
restore : restore the training progress
"""
checkpoint_path = self.checkpoint_path(best)
checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
try:
torch.save(self.state_dict(), checkpoint_path)
except PermissionError as error:
print(error)
def state_dict(self):
"""
Returns
-------
dict
A dict of the trainer state to be saved.
"""
return {
"model_state": self.model.state_dict(),
"epoch": self.timer.epoch,
"env_name": self.env_name,
"best_score": self.best_score,
}
def restore(self, checkpoint_path=None, best=False, strict=True):
"""
Restores the trainer progress and the model from the path.
Parameters
----------
checkpoint_path : Path or None
Trainer checkpoint path to restore. If None, the default path
:code:`self.checkpoint_path()` is used.
Default: None
best : bool
The mode (refer to :func:`Trainer.checkpoint_path`).
strict : bool
Strict model loading or not.
Returns
-------
checkpoint_state : dict
The loaded state of a trainer.
"""
if checkpoint_path is None:
checkpoint_path = self.checkpoint_path(best)
checkpoint_path = Path(checkpoint_path)
if not checkpoint_path.exists():
print(f"Checkpoint '{checkpoint_path}' doesn't exist. "
f"Nothing to restore.")
return None
map_location = None
if not torch.cuda.is_available():
map_location = 'cpu'
checkpoint_state = torch.load(checkpoint_path,
map_location=map_location)
try:
self.model.load_state_dict(checkpoint_state['model_state'],
strict=strict)
except RuntimeError as error:
print(f"Restoring {checkpoint_path} raised {error}")
return None
self.env_name = checkpoint_state['env_name']
self.timer.set_epoch(checkpoint_state['epoch'])
self.best_score = checkpoint_state['best_score']
self.monitor.open(env_name=self.env_name)
print(f"Restored model state from {checkpoint_path}.")
return checkpoint_state
def is_unsupervised(self):
"""
Returns
-------
bool
True, if the training is unsupervised and False otherwise.
"""
return not self.data_loader.has_labels
def full_forward_pass(self, train=True):
"""
Fixes the model weights, evaluates the epoch score and updates the
monitor.
Parameters
----------
train : bool
Either train (True) or test (False) batches to run. In both cases,
the model is set to the evaluation regime via `self.model.eval()`.
Returns
-------
loss : torch.Tensor
The loss of a full forward pass.
"""
mode_saved = self.model.training
self.model.train(False)
self.accuracy_measure.reset_labels()
loss_online = MeanOnline()
if train:
description = "Full forward pass (eval)" \
if self.verbosity >= 2 else None
loader = self.data_loader.eval(description)
self.mutual_info.start_listening()
else:
loader = self.data_loader.get(train)
with torch.no_grad():
for batch in loader:
batch = batch_to_cuda(batch)
output = self._forward(batch)
loss = self._get_loss(batch, output)
self._on_forward_pass_batch(batch, output, train)
loss_online.update(loss)
self.mutual_info.finish_listening()
self.model.train(mode_saved)
loss = loss_online.get_mean()
self.monitor.update_loss(loss, mode='train' if train else 'test')
self.update_accuracy(train=train)
return loss
def update_accuracy(self, train=True):
"""
Updates the accuracy of the model.
Parameters
----------
train : bool
Either train (True) or test (False) mode.
Returns
-------
accuracy : torch.Tensor
A scalar with the accuracy value.
"""
if self.is_unsupervised():
return None
labels_true = torch.cat(self.accuracy_measure.true_labels_cached)
if not train or isinstance(self.accuracy_measure, AccuracyArgmax):
labels_pred = torch.cat(
self.accuracy_measure.predicted_labels_cached)
elif getattr(self.accuracy_measure, 'cache', False):
labels_pred = self.accuracy_measure.predict_cached()
else:
labels_pred = []
with torch.no_grad():
for batch in self.data_loader.eval():
batch = batch_to_cuda(batch)
output = self._forward(batch)
if isinstance(output, AutoencoderOutput):
output = output.latent
labels_pred.append(
self.accuracy_measure.predict(output).cpu())
labels_pred = torch.cat(labels_pred, dim=0)
if labels_true.is_cuda:
warnings.warn("'labels_true' is a cuda tensor")
labels_true = labels_true.cpu()
if labels_pred.is_cuda:
warnings.warn("'labels_pred' is a cuda tensor")
labels_pred = labels_pred.cpu()
accuracy = self.monitor.update_accuracy_epoch(
labels_pred, labels_true, mode='train' if train else 'test')
self.update_best_score(accuracy)
return accuracy
def train_mask(self, mask_explain_params=dict()):
"""
Train mask to see what part of an image is crucial from the network
perspective (saliency map).
Parameters
----------
mask_explain_params : dict, optional
`MaskTrainer` keyword arguments.
"""
images, labels = next(iter(self.train_loader))
mask_trainer = MaskTrainer(self.accuracy_measure,
image_shape=images[0].shape,
**mask_explain_params)
mode_saved = prepare_eval(self.model)
if torch.cuda.is_available():
images = images.cuda()
with torch.no_grad():
proba = self.accuracy_measure.predict_proba(self.model(images))
proba_max, _ = proba.max(dim=1)
sample_max_proba = proba_max.argmax()
image = images[sample_max_proba]
label = labels[sample_max_proba]
self.monitor.plot_explain_input_mask(self.model, mask_trainer=mask_trainer,
image=image, label=label)
mode_saved.restore(self.model)
return image, label
def train_epoch(self, epoch):
"""
Trains an epoch.
Parameters
----------
epoch : int
Epoch ID.
"""
loss_online = MeanOnline()
for batch in tqdm(self.train_loader,
desc="Epoch {:d}".format(epoch),
disable=self.verbosity < 2,
leave=False):
batch = batch_to_cuda(batch)
loss = self.train_batch(batch)
loss_online.update(loss.detach().cpu())
for name, param in self.model.named_parameters():
if torch.isnan(param).any():
warnings.warn(f"NaN parameters in '{name}'")
self.monitor.batch_finished(self.model)
self.monitor.update_loss(loss=loss_online.get_mean(),
mode='batch')
def open_monitor(self, offline=False):
"""
Opens a `Visdom` monitor.
Parameters
----------
offline : bool
Online (False) or offline (True) monitoring.
"""
# visdom can be already initialized via trainer.restore()
if self.monitor.viz is None:
# new environment
self.monitor.open(env_name=self.env_name, offline=offline)
self.monitor.clear()
def training_started(self):
"""
Training is started callback.
This function is called before training the first epoch.
"""
pass
def training_finished(self):
"""
Training is finished callback.
This function is called right before exiting the :func:`Trainer.train`
function.
"""
pass
def train(self, n_epochs=10, mutual_info_layers=0,
mask_explain_params=None):
"""
User-entry function to train the model for :code:`n_epochs`.
Parameters
----------
n_epochs : int
The number of epochs to run.
Default: 10
mutual_info_layers : int, optional
Evaluate the mutual information [1]_ from the last
:code:`mutual_info_layers` layers at each epoch. If set to 0,
skip the (time-consuming) mutual information estimation.
Default: 0
mask_explain_params : dict or None, optional
If not None, a dictionary with parameters for :class:`MaskTrainer`,
that is used to show the "saliency map" [2]_.
Default: None
Returns
-------
loss_epochs : list
A list of epoch loss.
References
----------
.. [1] Shwartz-Ziv, R., & Tishby, N. (2017). Opening the black box of deep
neural networks via information. arXiv preprint arXiv:1703.00810.
.. [2] Fong, R. C., & Vedaldi, A. (2017). Interpretable explanations of
black boxes by meaningful perturbation.
"""
if self.verbosity >= 2:
print(self.model)
self.timer.n_epochs = n_epochs
self._prepare_train(mutual_info_layers)
if n_epochs == 1:
self.monitor.viz.with_markers = True
self.training_started()
loss_epochs = []
for epoch in trange(self.timer.epoch, self.timer.epoch + n_epochs,
disable=self.verbosity != 1):
self.train_epoch(epoch=epoch)
loss = self.full_forward_pass(train=True)
self.full_forward_pass(train=False)
if mask_explain_params:
self.train_mask(mask_explain_params)
self._epoch_finished(loss)
loss_epochs.append(loss.item())
self.training_finished()
return loss_epochs
| [
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
14601,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
198,
198,
11748,
28034,
198,
11748,
28034,
... | 2.152597 | 8,126 |
from distutils.core import setup
setup(
name = 'packson',
packages = ['packson'],
version = '0.4.0',
license='MIT',
description = 'Easily bind JSON to user defined class instances.',
author = 'Eric Falkenberg',
author_email = 'ericsfalkenberg@gmail.com',
url = 'https://github.com/EricFalkenberg/packson',
download_url = 'https://github.com/EricFalkenberg/packson/archive/v0.4.0.tar.gz',
keywords = ['json', 'data', 'bind', 'decorator', 'types'],
install_requires=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
40406,
7,
198,
220,
1438,
796,
705,
8002,
1559,
3256,
198,
220,
10392,
796,
37250,
8002,
1559,
6,
4357,
198,
220,
2196,
796,
705,
15,
13,
19,
13,
15,
3256,
198,
220,
5964,
11639,
36393,
... | 3.020548 | 292 |
from __future__ import absolute_import, division, print_function
import os
import yaml
import click
from compose.cli.docker_client import docker_client
from compose.const import LABEL_PROJECT
from compose.project import Project as ComposeProject
from compose.project import sort_service_dicts
from compose.project import NoSuchService
from bag8.config import Config
from bag8.const import LABEL_BAG8_SERVICE
from bag8.const import LABEL_BAG8_PROJECT
from bag8.exceptions import NoDockerfile
from bag8.exceptions import NoProjectYaml
from bag8.service import Service
from bag8.utils import simple_name
from bag8.yaml import Yaml
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
198,
11748,
3904,
198,
198,
6738,
36664,
13,
44506,
13,
45986,
62,
16366,
1330,
36253,
62,
16366,
198,
... | 3.516667 | 180 |
#!/usr/bin/env python
import os
import datetime
from celery import signals
from spylunking.log.setup_logging import build_colorized_logger
from celery_loaders.work_tasks.get_celery_app import get_celery_app
# Disable celery log hijacking
# https://github.com/celery/celery/issues/2509
@signals.setup_logging.connect
name = "run-tests"
log = build_colorized_logger(name=name)
log.info(("start - {}")
.format(name))
broker_url = os.getenv(
"WORKER_BROKER_URL",
"redis://localhost:6379/9").strip().lstrip()
backend_url = os.getenv(
"WORKER_BACKEND_URL",
"redis://localhost:6379/10").strip().lstrip()
# comma delimited list of task module files:
tasks_str = os.getenv(
"WORKER_TASKS",
("celery_loaders.work_tasks.tasks,"
"celery_loaders.work_tasks.always_fails_tasks"))
include_tasks = tasks_str.split(",")
ssl_options = {}
transport_options = {}
log.info(("broker={} backend={} include_tasks={}")
.format(
broker_url,
backend_url,
include_tasks))
log.info(("broker={} backend={}")
.format(
broker_url,
backend_url))
# Get the Celery app project's get_celery_app
app = get_celery_app(
name=name,
auth_url=broker_url,
backend_url=backend_url,
include_tasks=include_tasks)
# if you want to discover tasks in other directories:
# app.autodiscover_tasks(["some_dir_name_with_tasks"])
user_lookup_data = {
"user_id": 1
}
failure_test_data = {
"test_failure": "Should fail now {}".format(
datetime.datetime.now().isoformat())
}
log.info("calling task - success testing")
path_to_tasks = "celery_loaders.work_tasks.tasks"
task_name = ("{}.do_some_work").format(
path_to_tasks)
job_id = app.send_task(
task_name,
(user_lookup_data,))
log.info(("calling task={} - success job_id={}")
.format(
task_name,
job_id))
log.info("calling task - failure testing")
path_to_tasks = "celery_loaders.work_tasks.always_fails_tasks"
fails_task_name = ("{}.always_fails").format(
path_to_tasks)
job_id = app.send_task(
fails_task_name,
(failure_test_data,))
log.info(("calling task={} - failure job_id={}")
.format(
fails_task_name,
job_id))
log.info(("end - {}")
.format(name))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
6738,
18725,
1924,
1330,
10425,
198,
6738,
599,
2645,
2954,
278,
13,
6404,
13,
40406,
62,
6404,
2667,
1330,
1382,
62,
8043,
1143,
62,
64... | 2.202048 | 1,074 |
import argparse
import os
import numpy as np
import pandas as pd
import datetime
import time
parser = argparse.ArgumentParser(description='Managing experiments')
parser.add_argument('--num_runs', type=int, default=1, metavar='MODEL', \
required=False, help='number of runs of each model')
parser.add_argument('--test', action='store_true',
help='what to do with generated commands: print (to check commands) or os.system (to run comands)')
parser.add_argument('--label', type=str, default="run", metavar='MODEL', required=False, help='label used in naming log folders')
parser.add_argument('--comment_add', type=str, default="", metavar='MODEL', required=False, help='if you wish to add anyth to log folder name')
args = parser.parse_args()
if args.test:
action = print
else:
action = os.system
commands = []
for lang in ["py", "js"]:
for comment, data_type, model_type in [("static", "full", "rnn"), \
("dynamic", "full", "rnn_dynemb_mixed"), \
("static", "ano", "rnn"), \
("dynamic", "ano", "rnn_dynemb")]:
dataname = "python" if lang=="py" else "js"
data_label = "values" if data_type=="full" else "anovalues"
data_options = " --train_src traverse_%s_train.txt --dev_src traverse_%s_test.txt "%(data_label, data_label) # types and targets are specified in default values in train.py
emsize = 500 if comment == "dynamic" else 1200
command = "train.py --dir logs_rnn/"+args.label+"/"+lang+"_"+data_type+"data"+args.comment_add+"/ --data_dir preprocessed_data_vm/ "+data_options+" --comment "+comment+" --max_src_len 250 --print_fq 1 --checkpoint True --learning_rate 0.0001 --grad_clipping 1000 --lr_decay 0.6 --num_epochs 10 --dataset_name "+dataname+" --model_type "+model_type+" --bidirection True --emsize "+str(emsize)+" --emsize_type 300"
commands.append(command)
for command in commands:
for _ in range(1 if args.test else args.num_runs):
action(get_run_command(command))
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
5124,
... | 2.462428 | 865 |
#!/usr/bin/python
"""Main script for provision analytics workspaces."""
import argparse
import logging
import os
import sys
from azure.storage.filedatalake import FileSystemClient
# pylint: disable=W0621
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def create_directories(connection_string, file_system_name):
"""Create File System Directories."""
file_system = FileSystemClient.from_connection_string(
connection_string, file_system_name=file_system_name
)
def main(connection_string):
"""Provision analytics resources."""
print(f"connection string: {connection_string}")
if not connection_string:
raise ValueError("Parameter connection_string is required.")
if __name__ == "__main__":
logging.info("Starting script")
parser = argparse.ArgumentParser(
description="Provision Analytics Workspaces.",
add_help=True,
)
parser.add_argument(
"--connection_string",
"-c",
help="Storage Account Connection String",
)
args = parser.parse_args()
connection_string = args.connection_string or os.environ.get(
"STORAGE_CONNECTION_STRING"
)
main(connection_string)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
13383,
4226,
329,
8287,
23696,
2499,
43076,
526,
15931,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
35560,
495,
13,
35350,
13,
69,
3... | 2.88756 | 418 |
import streamlit as st
from streamlit.errors import StreamlitAPIException
import bubbletea
import os
from dotenv import load_dotenv
load_dotenv()
try:
st.set_page_config(page_title='bubbletea Demos', page_icon=':coin:', layout='wide', initial_sidebar_state='collapsed')
except StreamlitAPIException:
pass
files_str = os.environ.get("demos")
files = sorted(files_str.split(','))
urlvars = bubbletea.parse_url_var([{'key':'demo'}])
try:
selected_demo = urlvars['demo']
except KeyError:
selected_demo = files[0]
pass
code_input = """st.header("bubbletea Demos")"""
if selected_demo in files:
st.sidebar.title(":hot_pepper: Demos")
with st.expander("About Bubbletea"):
with open('README.md', 'r') as file:
intro = file.read()
st.markdown(intro)
display, editor = st.columns((2, 1))
with st.sidebar:
try:
index = files.index(selected_demo)
except ValueError:
index = 0
selected_demo = st.selectbox('🌟 Pick one', files, index=index, on_change=on_demo_change, key='demo_selector')
with open(f'./examples/{selected_demo}', 'r') as file:
code_input = file.read()
with editor:
st.markdown(f'```{code_input}')
with display:
exec(code_input)
else:
with open(f'./examples/{selected_demo}', 'r') as file:
code_input = file.read()
exec(code_input) | [
11748,
4269,
18250,
355,
336,
198,
6738,
4269,
18250,
13,
48277,
1330,
13860,
18250,
17614,
16922,
198,
11748,
14310,
660,
64,
198,
11748,
28686,
198,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
2220,
62,
26518,
24330,
341... | 2.313214 | 613 |
import numpy as np
a = [1,2,3]
b = [2,3,4]
dot_product = np.dot(a,b)
print(dot_product) | [
11748,
299,
32152,
355,
45941,
198,
64,
796,
685,
16,
11,
17,
11,
18,
60,
198,
65,
796,
685,
17,
11,
18,
11,
19,
60,
198,
26518,
62,
11167,
796,
45941,
13,
26518,
7,
64,
11,
65,
8,
198,
4798,
7,
26518,
62,
11167,
8
] | 1.933333 | 45 |
testnum1 = int(input('number 1: '))
testnum2 = int(input('number 2: '))
print(recursiveMultiply(testnum1, testnum2))
| [
198,
198,
9288,
22510,
16,
796,
493,
7,
15414,
10786,
17618,
352,
25,
220,
705,
4008,
198,
9288,
22510,
17,
796,
493,
7,
15414,
10786,
17618,
362,
25,
220,
705,
4008,
198,
4798,
7,
8344,
30753,
15205,
541,
306,
7,
9288,
22510,
16,
... | 2.469388 | 49 |
import logging
from unittest import TestCase
import pytest
from foxylib.tools.log.foxylib_logger import FoxylibLogger
from foxylib.tools.videogame.blizzard.overwatch.overwatch_tool import OwapiTool
| [
11748,
18931,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
21831,
2645,
571,
13,
31391,
13,
6404,
13,
12792,
2645,
571,
62,
6404,
1362,
1330,
5426,
2645,
571,
11187,
1362,
198,
6738,
21831,
... | 3.045455 | 66 |
import json
import logging
import os
import re
import shutil
import string
import sqlalchemy.orm.exc
from galaxy import util
from galaxy.web import url_for
from galaxy.datatypes import checkers
from galaxy.model.orm import and_
from galaxy.model.orm import or_
from tool_shed.util import basic_util
from tool_shed.util import common_util
from tool_shed.util import encoding_util
from tool_shed.util import hg_util
from urllib2 import HTTPError
log = logging.getLogger( __name__ )
MAX_CONTENT_SIZE = 1048576
DATATYPES_CONFIG_FILENAME = 'datatypes_conf.xml'
REPOSITORY_DATA_MANAGER_CONFIG_FILENAME = 'data_manager_conf.xml'
new_repo_email_alert_template = """
Sharable link: ${sharable_link}
Repository name: ${repository_name}
Revision: ${revision}
Change description:
${description}
Uploaded by: ${username}
Date content uploaded: ${display_date}
${content_alert_str}
-----------------------------------------------------------------------------
This change alert was sent from the Galaxy tool shed hosted on the server
"${host}"
-----------------------------------------------------------------------------
You received this alert because you registered to receive email when
new repositories were created in the Galaxy tool shed named "${host}".
-----------------------------------------------------------------------------
"""
email_alert_template = """
Sharable link: ${sharable_link}
Repository name: ${repository_name}
Revision: ${revision}
Change description:
${description}
Changed by: ${username}
Date of change: ${display_date}
${content_alert_str}
-----------------------------------------------------------------------------
This change alert was sent from the Galaxy tool shed hosted on the server
"${host}"
-----------------------------------------------------------------------------
You received this alert because you registered to receive email whenever
changes were made to the repository named "${repository_name}".
-----------------------------------------------------------------------------
"""
contact_owner_template = """
GALAXY TOOL SHED REPOSITORY MESSAGE
------------------------
The user '${username}' sent you the following message regarding your tool shed
repository named '${repository_name}'. You can respond by sending a reply to
the user's email address: ${email}.
-----------------------------------------------------------------------------
${message}
-----------------------------------------------------------------------------
This message was sent from the Galaxy Tool Shed instance hosted on the server
'${host}'
"""
def create_or_update_tool_shed_repository( app, name, description, installed_changeset_revision, ctx_rev, repository_clone_url,
metadata_dict, status, current_changeset_revision=None, owner='', dist_to_shed=False ):
"""
Update a tool shed repository record in the Galaxy database with the new information received.
If a record defined by the received tool shed, repository name and owner does not exist, create
a new record with the received information.
"""
# The received value for dist_to_shed will be True if the ToolMigrationManager is installing a repository
# that contains tools or datatypes that used to be in the Galaxy distribution, but have been moved
# to the main Galaxy tool shed.
if current_changeset_revision is None:
# The current_changeset_revision is not passed if a repository is being installed for the first
# time. If a previously installed repository was later uninstalled, this value should be received
# as the value of that change set to which the repository had been updated just prior to it being
# uninstalled.
current_changeset_revision = installed_changeset_revision
context = app.install_model.context
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
if not owner:
owner = get_repository_owner_from_clone_url( repository_clone_url )
includes_datatypes = 'datatypes' in metadata_dict
if status in [ app.install_model.ToolShedRepository.installation_status.DEACTIVATED ]:
deleted = True
uninstalled = False
elif status in [ app.install_model.ToolShedRepository.installation_status.UNINSTALLED ]:
deleted = True
uninstalled = True
else:
deleted = False
uninstalled = False
tool_shed_repository = \
get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app,
tool_shed,
name,
owner,
installed_changeset_revision )
if tool_shed_repository:
log.debug( "Updating an existing row for repository '%s' in the tool_shed_repository table, status set to '%s'." % \
( str( name ), str( status ) ) )
tool_shed_repository.description = description
tool_shed_repository.changeset_revision = current_changeset_revision
tool_shed_repository.ctx_rev = ctx_rev
tool_shed_repository.metadata = metadata_dict
tool_shed_repository.includes_datatypes = includes_datatypes
tool_shed_repository.deleted = deleted
tool_shed_repository.uninstalled = uninstalled
tool_shed_repository.status = status
else:
log.debug( "Adding new row for repository '%s' in the tool_shed_repository table, status set to '%s'." % \
( str( name ), str( status ) ) )
tool_shed_repository = \
app.install_model.ToolShedRepository( tool_shed=tool_shed,
name=name,
description=description,
owner=owner,
installed_changeset_revision=installed_changeset_revision,
changeset_revision=current_changeset_revision,
ctx_rev=ctx_rev,
metadata=metadata_dict,
includes_datatypes=includes_datatypes,
dist_to_shed=dist_to_shed,
deleted=deleted,
uninstalled=uninstalled,
status=status )
context.add( tool_shed_repository )
context.flush()
return tool_shed_repository
def extract_components_from_tuple( repository_components_tuple ):
'''Extract the repository components from the provided tuple in a backward-compatible manner.'''
toolshed = repository_components_tuple[ 0 ]
name = repository_components_tuple[ 1 ]
owner = repository_components_tuple[ 2 ]
changeset_revision = repository_components_tuple[ 3 ]
components_list = [ toolshed, name, owner, changeset_revision ]
if len( repository_components_tuple ) == 5:
toolshed, name, owner, changeset_revision, prior_installation_required = repository_components_tuple
components_list = [ toolshed, name, owner, changeset_revision, prior_installation_required ]
elif len( repository_components_tuple ) == 6:
toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = repository_components_tuple
components_list = [ toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td ]
return components_list
def generate_sharable_link_for_repository_in_tool_shed( repository, changeset_revision=None ):
"""Generate the URL for sharing a repository that is in the tool shed."""
base_url = url_for( '/', qualified=True ).rstrip( '/' )
protocol, base = base_url.split( '://' )
sharable_url = '%s://%s/view/%s/%s' % ( protocol, base, repository.user.username, repository.name )
if changeset_revision:
sharable_url += '/%s' % changeset_revision
return sharable_url
def generate_tool_guid( repository_clone_url, tool ):
"""
Generate a guid for the installed tool. It is critical that this guid matches the guid for
the tool in the Galaxy tool shed from which it is being installed. The form of the guid is
<tool shed host>/repos/<repository owner>/<repository name>/<tool id>/<tool version>
"""
tmp_url = common_util.remove_protocol_and_user_from_clone_url( repository_clone_url )
return '%s/%s/%s' % ( tmp_url, tool.id, tool.version )
def generate_tool_shed_repository_install_dir( repository_clone_url, changeset_revision ):
"""
Generate a repository installation directory that guarantees repositories with the same
name will always be installed in different directories. The tool path will be of the form:
<tool shed url>/repos/<repository owner>/<repository name>/<installed changeset revision>
"""
tmp_url = common_util.remove_protocol_and_user_from_clone_url( repository_clone_url )
# Now tmp_url is something like: bx.psu.edu:9009/repos/some_username/column
items = tmp_url.split( '/repos/' )
tool_shed_url = items[ 0 ]
repo_path = items[ 1 ]
tool_shed_url = common_util.remove_port_from_tool_shed_url( tool_shed_url )
return common_util.url_join( tool_shed_url, 'repos', repo_path, changeset_revision )
def get_absolute_path_to_file_in_repository( repo_files_dir, file_name ):
"""Return the absolute path to a specified disk file contained in a repository."""
stripped_file_name = basic_util.strip_path( file_name )
file_path = None
for root, dirs, files in os.walk( repo_files_dir ):
if root.find( '.hg' ) < 0:
for name in files:
if name == stripped_file_name:
return os.path.abspath( os.path.join( root, name ) )
return file_path
def get_categories( app ):
"""Get all categories from the database."""
sa_session = app.model.context.current
return sa_session.query( app.model.Category ) \
.filter( app.model.Category.table.c.deleted==False ) \
.order_by( app.model.Category.table.c.name ) \
.all()
def get_category( app, id ):
"""Get a category from the database."""
sa_session = app.model.context.current
return sa_session.query( app.model.Category ).get( app.security.decode_id( id ) )
def get_category_by_name( app, name ):
"""Get a category from the database via name."""
sa_session = app.model.context.current
try:
return sa_session.query( app.model.Category ).filter_by( name=name ).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
def get_ctx_rev( app, tool_shed_url, name, owner, changeset_revision ):
"""
Send a request to the tool shed to retrieve the ctx_rev for a repository defined by the
combination of a name, owner and changeset revision.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( app, tool_shed_url )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( name, owner, changeset_revision )
url = common_util.url_join( tool_shed_url,
'repository/get_ctx_rev%s' % params )
ctx_rev = common_util.tool_shed_get( app, tool_shed_url, url )
return ctx_rev
def get_next_downloadable_changeset_revision( repository, repo, after_changeset_revision ):
"""
Return the installable changeset_revision in the repository changelog after the changeset to which
after_changeset_revision refers. If there isn't one, return None.
"""
changeset_revisions = get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True )
if len( changeset_revisions ) == 1:
changeset_revision = changeset_revisions[ 0 ]
if changeset_revision == after_changeset_revision:
return None
found_after_changeset_revision = False
for changeset in repo.changelog:
changeset_revision = str( repo.changectx( changeset ) )
if found_after_changeset_revision:
if changeset_revision in changeset_revisions:
return changeset_revision
elif not found_after_changeset_revision and changeset_revision == after_changeset_revision:
# We've found the changeset in the changelog for which we need to get the next downloadable changeset.
found_after_changeset_revision = True
return None
def get_next_prior_import_or_install_required_dict_entry( prior_required_dict, processed_tsr_ids ):
"""
This method is used in the Tool Shed when exporting a repository and its dependencies, and in Galaxy
when a repository and its dependencies are being installed. The order in which the prior_required_dict
is processed is critical in order to ensure that the ultimate repository import or installation order is
correctly defined. This method determines the next key / value pair from the received prior_required_dict
that should be processed.
"""
# Return the first key / value pair that is not yet processed and whose value is an empty list.
for key, value in prior_required_dict.items():
if key in processed_tsr_ids:
continue
if not value:
return key
# Return the first key / value pair that is not yet processed and whose ids in value are all included
# in processed_tsr_ids.
for key, value in prior_required_dict.items():
if key in processed_tsr_ids:
continue
all_contained = True
for required_repository_id in value:
if required_repository_id not in processed_tsr_ids:
all_contained = False
break
if all_contained:
return key
# Return the first key / value pair that is not yet processed. Hopefully this is all that is necessary
# at this point.
for key, value in prior_required_dict.items():
if key in processed_tsr_ids:
continue
return key
def get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True ):
"""
Return an ordered list of changeset_revisions that are associated with metadata
where order is defined by the repository changelog.
"""
if downloadable:
metadata_revisions = repository.downloadable_revisions
else:
metadata_revisions = repository.metadata_revisions
changeset_tups = []
for repository_metadata in metadata_revisions:
changeset_revision = repository_metadata.changeset_revision
ctx = hg_util.get_changectx_for_changeset( repo, changeset_revision )
if ctx:
rev = '%04d' % ctx.rev()
else:
rev = '-1'
changeset_tups.append( ( rev, changeset_revision ) )
sorted_changeset_tups = sorted( changeset_tups )
sorted_changeset_revisions = [ str( changeset_tup[ 1 ] ) for changeset_tup in sorted_changeset_tups ]
return sorted_changeset_revisions
def get_prior_import_or_install_required_dict( app, tsr_ids, repo_info_dicts ):
"""
This method is used in the Tool Shed when exporting a repository and its dependencies,
and in Galaxy when a repository and its dependencies are being installed. Return a
dictionary whose keys are the received tsr_ids and whose values are a list of tsr_ids,
each of which is contained in the received list of tsr_ids and whose associated repository
must be imported or installed prior to the repository associated with the tsr_id key.
"""
# Initialize the dictionary.
prior_import_or_install_required_dict = {}
for tsr_id in tsr_ids:
prior_import_or_install_required_dict[ tsr_id ] = []
# Inspect the repository dependencies for each repository about to be installed and populate the dictionary.
for repo_info_dict in repo_info_dicts:
repository, repository_dependencies = get_repository_and_repository_dependencies_from_repo_info_dict( app, repo_info_dict )
if repository:
encoded_repository_id = app.security.encode_id( repository.id )
if encoded_repository_id in tsr_ids:
# We've located the database table record for one of the repositories we're about to install, so find out if it has any repository
# dependencies that require prior installation.
prior_import_or_install_ids = get_repository_ids_requiring_prior_import_or_install( app, tsr_ids, repository_dependencies )
prior_import_or_install_required_dict[ encoded_repository_id ] = prior_import_or_install_ids
return prior_import_or_install_required_dict
def get_repo_info_tuple_contents( repo_info_tuple ):
"""Take care in handling the repo_info_tuple as it evolves over time as new tool shed features are introduced."""
if len( repo_info_tuple ) == 6:
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, tool_dependencies = repo_info_tuple
repository_dependencies = None
elif len( repo_info_tuple ) == 7:
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple
return description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies
def get_repository_and_repository_dependencies_from_repo_info_dict( app, repo_info_dict ):
"""Return a tool_shed_repository or repository record defined by the information in the received repo_info_dict."""
repository_name = repo_info_dict.keys()[ 0 ]
repo_info_tuple = repo_info_dict[ repository_name ]
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \
get_repo_info_tuple_contents( repo_info_tuple )
if hasattr( app, "install_model" ):
# In a tool shed client (Galaxy, or something install repositories like Galaxy)
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
repository = get_repository_for_dependency_relationship( app, tool_shed, repository_name, repository_owner, changeset_revision )
else:
# We're in the tool shed.
repository = get_repository_by_name_and_owner( app, repository_name, repository_owner )
return repository, repository_dependencies
def get_repository_by_id( app, id ):
"""Get a repository from the database via id."""
if is_tool_shed_client( app ):
return app.install_model.context.query( app.install_model.ToolShedRepository ).get( app.security.decode_id( id ) )
else:
sa_session = app.model.context.current
return sa_session.query( app.model.Repository ).get( app.security.decode_id( id ) )
def get_repository_by_name( app, name ):
"""Get a repository from the database via name."""
repository_query = get_repository_query( app )
return repository_query.filter_by( name=name ).first()
def get_repository_by_name_and_owner( app, name, owner ):
"""Get a repository from the database via name and owner"""
repository_query = get_repository_query( app )
if is_tool_shed_client( app ):
return repository_query \
.filter( and_( app.install_model.ToolShedRepository.table.c.name == name,
app.install_model.ToolShedRepository.table.c.owner == owner ) ) \
.first()
# We're in the tool shed.
user = get_user_by_username( app, owner )
if user:
return repository_query \
.filter( and_( app.model.Repository.table.c.name == name,
app.model.Repository.table.c.user_id == user.id ) ) \
.first()
return None
def get_repository_dependency_types( repository_dependencies ):
"""
Inspect the received list of repository_dependencies tuples and return boolean values
for has_repository_dependencies and has_repository_dependencies_only_if_compiling_contained_td.
"""
# Set has_repository_dependencies, which will be True only if at least one repository_dependency
# is defined with the value of
# only_if_compiling_contained_td as False.
has_repository_dependencies = False
for rd_tup in repository_dependencies:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple( rd_tup )
if not util.asbool( only_if_compiling_contained_td ):
has_repository_dependencies = True
break
# Set has_repository_dependencies_only_if_compiling_contained_td, which will be True only if at
# least one repository_dependency is defined with the value of only_if_compiling_contained_td as True.
has_repository_dependencies_only_if_compiling_contained_td = False
for rd_tup in repository_dependencies:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple( rd_tup )
if util.asbool( only_if_compiling_contained_td ):
has_repository_dependencies_only_if_compiling_contained_td = True
break
return has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td
def get_repository_for_dependency_relationship( app, tool_shed, name, owner, changeset_revision ):
"""
Return an installed tool_shed_repository database record that is defined by either the current changeset
revision or the installed_changeset_revision.
"""
# This method is used only in Galaxy, not the Tool Shed. We store the port (if one exists) in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url( tool_shed )
if tool_shed is None or name is None or owner is None or changeset_revision is None:
message = "Unable to retrieve the repository record from the database because one or more of the following "
message += "required parameters is None: tool_shed: %s, name: %s, owner: %s, changeset_revision: %s " % \
( str( tool_shed ), str( name ), str( owner ), str( changeset_revision ) )
raise Exception( message )
repository = get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app=app,
tool_shed=tool_shed,
name=name,
owner=owner,
installed_changeset_revision=changeset_revision )
if not repository:
repository = get_tool_shed_repository_by_shed_name_owner_changeset_revision( app=app,
tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=changeset_revision )
if not repository:
# The received changeset_revision is no longer installable, so get the next changeset_revision
# in the repository's changelog in the tool shed that is associated with repository_metadata.
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( app, tool_shed )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( name, owner, changeset_revision )
url = common_util.url_join( tool_shed_url,
'repository/next_installable_changeset_revision%s' % params )
text = common_util.tool_shed_get( app, tool_shed_url, url )
if text:
repository = get_tool_shed_repository_by_shed_name_owner_changeset_revision( app=app,
tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=text )
return repository
def get_repository_file_contents( file_path ):
"""Return the display-safe contents of a repository file for display in a browser."""
if checkers.is_gzip( file_path ):
return '<br/>gzip compressed file<br/>'
elif checkers.is_bz2( file_path ):
return '<br/>bz2 compressed file<br/>'
elif checkers.check_zip( file_path ):
return '<br/>zip compressed file<br/>'
elif checkers.check_binary( file_path ):
return '<br/>Binary file<br/>'
else:
safe_str = ''
for i, line in enumerate( open( file_path ) ):
safe_str = '%s%s' % ( safe_str, basic_util.to_html_string( line ) )
# Stop reading after string is larger than MAX_CONTENT_SIZE.
if len( safe_str ) > MAX_CONTENT_SIZE:
large_str = \
'<br/>File contents truncated because file size is larger than maximum viewing size of %s<br/>' % \
util.nice_size( MAX_CONTENT_SIZE )
safe_str = '%s%s' % ( safe_str, large_str )
break
if len( safe_str ) > basic_util.MAX_DISPLAY_SIZE:
# Eliminate the middle of the file to display a file no larger than basic_util.MAX_DISPLAY_SIZE.
# This may not be ideal if the file is larger than MAX_CONTENT_SIZE.
join_by_str = \
"<br/><br/>...some text eliminated here because file size is larger than maximum viewing size of %s...<br/><br/>" % \
util.nice_size( basic_util.MAX_DISPLAY_SIZE )
safe_str = util.shrink_string_by_size( safe_str,
basic_util.MAX_DISPLAY_SIZE,
join_by=join_by_str,
left_larger=True,
beginning_on_size_error=True )
return safe_str
def get_repository_files( folder_path ):
"""Return the file hierarchy of a tool shed repository."""
contents = []
for item in os.listdir( folder_path ):
# Skip .hg directories
if item.startswith( '.hg' ):
continue
if os.path.isdir( os.path.join( folder_path, item ) ):
# Append a '/' character so that our jquery dynatree will function properly.
item = '%s/' % item
contents.append( item )
if contents:
contents.sort()
return contents
def get_repository_ids_requiring_prior_import_or_install( app, tsr_ids, repository_dependencies ):
"""
This method is used in the Tool Shed when exporting a repository and its dependencies,
and in Galaxy when a repository and its dependencies are being installed. Inspect the
received repository_dependencies and determine if the encoded id of each required
repository is in the received tsr_ids. If so, then determine whether that required
repository should be imported / installed prior to its dependent repository. Return a
list of encoded repository ids, each of which is contained in the received list of tsr_ids,
and whose associated repositories must be imported / installed prior to the dependent
repository associated with the received repository_dependencies.
"""
prior_tsr_ids = []
if repository_dependencies:
for key, rd_tups in repository_dependencies.items():
if key in [ 'description', 'root_key' ]:
continue
for rd_tup in rd_tups:
tool_shed, \
name, \
owner, \
changeset_revision, \
prior_installation_required, \
only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple( rd_tup )
# If only_if_compiling_contained_td is False, then the repository dependency
# is not required to be installed prior to the dependent repository even if
# prior_installation_required is True. This is because the only meaningful
# content of the repository dependency is its contained tool dependency, which
# is required in order to compile the dependent repository's tool dependency.
# In the scenario where the repository dependency is not installed prior to the
# dependent repository's tool dependency compilation process, the tool dependency
# compilation framework will install the repository dependency prior to compilation
# of the dependent repository's tool dependency.
if not util.asbool( only_if_compiling_contained_td ):
if util.asbool( prior_installation_required ):
if is_tool_shed_client( app ):
# We store the port, if one exists, in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url( tool_shed )
repository = get_repository_for_dependency_relationship( app,
tool_shed,
name,
owner,
changeset_revision )
else:
repository = get_repository_by_name_and_owner( app, name, owner )
if repository:
encoded_repository_id = app.security.encode_id( repository.id )
if encoded_repository_id in tsr_ids:
prior_tsr_ids.append( encoded_repository_id )
return prior_tsr_ids
def get_repository_in_tool_shed( app, id ):
"""Get a repository on the tool shed side from the database via id."""
sa_session = app.model.context.current
return sa_session.query( app.model.Repository ).get( app.security.decode_id( id ) )
def get_repository_metadata_by_changeset_revision( app, id, changeset_revision ):
"""Get metadata for a specified repository change set from the database."""
# Make sure there are no duplicate records, and return the single unique record for the changeset_revision.
# Duplicate records were somehow created in the past. The cause of this issue has been resolved, but we'll
# leave this method as is for a while longer to ensure all duplicate records are removed.
sa_session = app.model.context.current
all_metadata_records = sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.repository_id == app.security.decode_id( id ),
app.model.RepositoryMetadata.table.c.changeset_revision == changeset_revision ) ) \
.order_by( app.model.RepositoryMetadata.table.c.update_time.desc() ) \
.all()
if len( all_metadata_records ) > 1:
# Delete all records older than the last one updated.
for repository_metadata in all_metadata_records[ 1: ]:
sa_session.delete( repository_metadata )
sa_session.flush()
return all_metadata_records[ 0 ]
elif all_metadata_records:
return all_metadata_records[ 0 ]
return None
def get_repository_owner( cleaned_repository_url ):
"""Gvien a "cleaned" repository clone URL, return the owner of the repository."""
items = cleaned_repository_url.split( '/repos/' )
repo_path = items[ 1 ]
if repo_path.startswith( '/' ):
repo_path = repo_path.replace( '/', '', 1 )
return repo_path.lstrip( '/' ).split( '/' )[ 0 ]
def get_repository_owner_from_clone_url( repository_clone_url ):
"""Given a repository clone URL, return the owner of the repository."""
tmp_url = common_util.remove_protocol_and_user_from_clone_url( repository_clone_url )
tool_shed = tmp_url.split( '/repos/' )[ 0 ].rstrip( '/' )
return get_repository_owner( tmp_url )
def get_tool_panel_config_tool_path_install_dir( app, repository ):
"""
Return shed-related tool panel config, the tool_path configured in it, and the relative path to
the directory where the repository is installed. This method assumes all repository tools are
defined in a single shed-related tool panel config.
"""
tool_shed = common_util.remove_port_from_tool_shed_url( str( repository.tool_shed ) )
relative_install_dir = '%s/repos/%s/%s/%s' % ( tool_shed,
str( repository.owner ),
str( repository.name ),
str( repository.installed_changeset_revision ) )
# Get the relative tool installation paths from each of the shed tool configs.
shed_config_dict = repository.get_shed_config_dict( app )
if not shed_config_dict:
# Just pick a semi-random shed config.
for shed_config_dict in app.toolbox.shed_tool_confs:
if ( repository.dist_to_shed and shed_config_dict[ 'config_filename' ] == app.config.migrated_tools_config ) \
or ( not repository.dist_to_shed and shed_config_dict[ 'config_filename' ] != app.config.migrated_tools_config ):
break
shed_tool_conf = shed_config_dict[ 'config_filename' ]
tool_path = shed_config_dict[ 'tool_path' ]
return shed_tool_conf, tool_path, relative_install_dir
def get_tool_path_by_shed_tool_conf_filename( app, shed_tool_conf ):
"""
Return the tool_path config setting for the received shed_tool_conf file by searching the tool box's in-memory list of shed_tool_confs for the
dictionary whose config_filename key has a value matching the received shed_tool_conf.
"""
for shed_tool_conf_dict in app.toolbox.shed_tool_confs:
config_filename = shed_tool_conf_dict[ 'config_filename' ]
if config_filename == shed_tool_conf:
return shed_tool_conf_dict[ 'tool_path' ]
else:
file_name = basic_util.strip_path( config_filename )
if file_name == shed_tool_conf:
return shed_tool_conf_dict[ 'tool_path' ]
return None
def get_tool_shed_repository_by_id( app, repository_id ):
"""Return a tool shed repository database record defined by the id."""
# This method is used only in Galaxy, not the tool shed.
return app.install_model.context.query( app.install_model.ToolShedRepository ) \
.filter( app.install_model.ToolShedRepository.table.c.id == app.security.decode_id( repository_id ) ) \
.first()
def get_tool_shed_repository_by_shed_name_owner_changeset_revision( app, tool_shed, name, owner, changeset_revision ):
"""
Return a tool shed repository database record defined by the combination of a tool_shed, repository name,
repository owner and current changeet_revision.
"""
# This method is used only in Galaxy, not the Tool Shed.
repository_query = get_repository_query( app )
# We store the port, if one exists, in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url( tool_shed )
return repository_query \
.filter( and_( app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed,
app.install_model.ToolShedRepository.table.c.name == name,
app.install_model.ToolShedRepository.table.c.owner == owner,
app.install_model.ToolShedRepository.table.c.changeset_revision == changeset_revision ) ) \
.first()
def get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app, tool_shed, name, owner,
installed_changeset_revision ):
"""
Return a tool shed repository database record defined by the combination of a tool_shed,
repository name, repository owner and installed_changeet_revision.
"""
# This method is used only in Galaxy, not the tool shed.
repository_query = get_repository_query( app )
# We store the port, if one exists, in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url( tool_shed )
return repository_query \
.filter( and_( app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed,
app.install_model.ToolShedRepository.table.c.name == name,
app.install_model.ToolShedRepository.table.c.owner == owner,
app.install_model.ToolShedRepository.table.c.installed_changeset_revision == installed_changeset_revision ) ) \
.first()
def get_tool_shed_status_for_installed_repository( app, repository ):
"""
Send a request to the tool shed to retrieve information about newer installable repository revisions,
current revision updates, whether the repository revision is the latest downloadable revision, and
whether the repository has been deprecated in the tool shed. The received repository is a ToolShedRepository
object from Galaxy.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( app, str( repository.tool_shed ) )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( str( repository.name ),
str( repository.owner ),
str( repository.changeset_revision ) )
url = common_util.url_join( tool_shed_url,
'repository/status_for_installed_repository%s' % params )
try:
encoded_tool_shed_status_dict = common_util.tool_shed_get( app, tool_shed_url, url )
tool_shed_status_dict = encoding_util.tool_shed_decode( encoded_tool_shed_status_dict )
return tool_shed_status_dict
except HTTPError, e:
# This should handle backward compatility to the Galaxy 12/20/12 release. We used to only handle updates for an installed revision
# using a boolean value.
log.debug( "Error attempting to get tool shed status for installed repository %s: %s\nAttempting older 'check_for_updates' method.\n" % \
( str( repository.name ), str( e ) ) )
params = '?name=%s&owner=%s&changeset_revision=%s&from_update_manager=True' % ( str( repository.name ),
str( repository.owner ),
str( repository.changeset_revision ) )
url = common_util.url_join( tool_shed_url,
'repository/check_for_updates%s' % params )
try:
# The value of text will be 'true' or 'false', depending upon whether there is an update available for the installed revision.
text = common_util.tool_shed_get( app, tool_shed_url, url )
return dict( revision_update=text )
except Exception, e:
# The required tool shed may be unavailable, so default the revision_update value to 'false'.
return dict( revision_update='false' )
except Exception, e:
log.exception( "Error attempting to get tool shed status for installed repository %s: %s" % ( str( repository.name ), str( e ) ) )
return {}
def get_tool_shed_repository_status_label( app, tool_shed_repository=None, name=None, owner=None, changeset_revision=None, repository_clone_url=None ):
"""Return a color-coded label for the status of the received tool-shed_repository installed into Galaxy."""
if tool_shed_repository is None:
if name is not None and owner is not None and repository_clone_url is not None:
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
tool_shed_repository = get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app,
tool_shed,
name,
owner,
changeset_revision )
if tool_shed_repository:
status_label = tool_shed_repository.status
if tool_shed_repository.status in [ app.install_model.ToolShedRepository.installation_status.CLONING,
app.install_model.ToolShedRepository.installation_status.SETTING_TOOL_VERSIONS,
app.install_model.ToolShedRepository.installation_status.INSTALLING_REPOSITORY_DEPENDENCIES,
app.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES,
app.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES ]:
bgcolor = app.install_model.ToolShedRepository.states.INSTALLING
elif tool_shed_repository.status in [ app.install_model.ToolShedRepository.installation_status.NEW,
app.install_model.ToolShedRepository.installation_status.UNINSTALLED ]:
bgcolor = app.install_model.ToolShedRepository.states.UNINSTALLED
elif tool_shed_repository.status in [ app.install_model.ToolShedRepository.installation_status.ERROR ]:
bgcolor = app.install_model.ToolShedRepository.states.ERROR
elif tool_shed_repository.status in [ app.install_model.ToolShedRepository.installation_status.DEACTIVATED ]:
bgcolor = app.install_model.ToolShedRepository.states.WARNING
elif tool_shed_repository.status in [ app.install_model.ToolShedRepository.installation_status.INSTALLED ]:
if tool_shed_repository.repository_dependencies_being_installed:
bgcolor = app.install_model.ToolShedRepository.states.WARNING
status_label = '%s, %s' % ( status_label,
app.install_model.ToolShedRepository.installation_status.INSTALLING_REPOSITORY_DEPENDENCIES )
elif tool_shed_repository.missing_repository_dependencies:
bgcolor = app.install_model.ToolShedRepository.states.WARNING
status_label = '%s, missing repository dependencies' % status_label
elif tool_shed_repository.tool_dependencies_being_installed:
bgcolor = app.install_model.ToolShedRepository.states.WARNING
status_label = '%s, %s' % ( status_label,
app.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES )
elif tool_shed_repository.missing_tool_dependencies:
bgcolor = app.install_model.ToolShedRepository.states.WARNING
status_label = '%s, missing tool dependencies' % status_label
else:
bgcolor = app.install_model.ToolShedRepository.states.OK
else:
bgcolor = app.install_model.ToolShedRepository.states.ERROR
else:
bgcolor = app.install_model.ToolShedRepository.states.WARNING
status_label = 'unknown status'
return '<div class="count-box state-color-%s">%s</div>' % ( bgcolor, status_label )
def get_updated_changeset_revisions( app, name, owner, changeset_revision ):
"""
Return a string of comma-separated changeset revision hashes for all available updates to the received changeset
revision for the repository defined by the received name and owner.
"""
repository = get_repository_by_name_and_owner( app, name, owner )
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
# Get the upper bound changeset revision.
upper_bound_changeset_revision = get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
# Build the list of changeset revision hashes defining each available update up to, but excluding
# upper_bound_changeset_revision.
changeset_hashes = []
for changeset in hg_util.reversed_lower_upper_bounded_changelog( repo, changeset_revision, upper_bound_changeset_revision ):
# Make sure to exclude upper_bound_changeset_revision.
if changeset != upper_bound_changeset_revision:
changeset_hashes.append( str( repo.changectx( changeset ) ) )
if changeset_hashes:
changeset_hashes_str = ','.join( changeset_hashes )
return changeset_hashes_str
return ''
def get_updated_changeset_revisions_from_tool_shed( app, tool_shed_url, name, owner, changeset_revision ):
"""
Get all appropriate newer changeset revisions for the repository defined by
the received tool_shed_url / name / owner combination.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( app, tool_shed_url )
if tool_shed_url is None or name is None or owner is None or changeset_revision is None:
message = "Unable to get updated changeset revisions from the Tool Shed because one or more of the following "
message += "required parameters is None: tool_shed_url: %s, name: %s, owner: %s, changeset_revision: %s " % \
( str( tool_shed_url ), str( name ), str( owner ), str( changeset_revision ) )
raise Exception( message )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( name, owner, changeset_revision )
url = common_util.url_join( tool_shed_url,
'repository/updated_changeset_revisions%s' % params )
text = common_util.tool_shed_get( app, tool_shed_url, url )
return text
def get_user( app, id ):
"""Get a user from the database by id."""
sa_session = app.model.context.current
return sa_session.query( app.model.User ).get( app.security.decode_id( id ) )
def get_user_by_username( app, username ):
"""Get a user from the database by username."""
sa_session = app.model.context.current
try:
user = sa_session.query( app.model.User ) \
.filter( app.model.User.table.c.username == username ) \
.one()
return user
except Exception, e:
return None
def handle_email_alerts( app, host, repository, content_alert_str='', new_repo_alert=False, admin_only=False ):
"""
There are 2 complementary features that enable a tool shed user to receive email notification:
1. Within User Preferences, they can elect to receive email when the first (or first valid)
change set is produced for a new repository.
2. When viewing or managing a repository, they can check the box labeled "Receive email alerts"
which caused them to receive email alerts when updates to the repository occur. This same feature
is available on a per-repository basis on the repository grid within the tool shed.
There are currently 4 scenarios for sending email notification when a change is made to a repository:
1. An admin user elects to receive email when the first change set is produced for a new repository
from User Preferences. The change set does not have to include any valid content. This allows for
the capture of inappropriate content being uploaded to new repositories.
2. A regular user elects to receive email when the first valid change set is produced for a new repository
from User Preferences. This differs from 1 above in that the user will not receive email until a
change set tha tincludes valid content is produced.
3. An admin user checks the "Receive email alerts" check box on the manage repository page. Since the
user is an admin user, the email will include information about both HTML and image content that was
included in the change set.
4. A regular user checks the "Receive email alerts" check box on the manage repository page. Since the
user is not an admin user, the email will not include any information about both HTML and image content
that was included in the change set.
"""
sa_session = app.model.context.current
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
sharable_link = generate_sharable_link_for_repository_in_tool_shed( repository, changeset_revision=None )
smtp_server = app.config.smtp_server
if smtp_server and ( new_repo_alert or repository.email_alerts ):
# Send email alert to users that want them.
if app.config.email_from is not None:
email_from = app.config.email_from
elif host.split( ':' )[0] == 'localhost':
email_from = 'galaxy-no-reply@' + socket.getfqdn()
else:
email_from = 'galaxy-no-reply@' + host.split( ':' )[0]
tip_changeset = repo.changelog.tip()
ctx = repo.changectx( tip_changeset )
try:
username = ctx.user().split()[0]
except:
username = ctx.user()
# We'll use 2 template bodies because we only want to send content
# alerts to tool shed admin users.
if new_repo_alert:
template = new_repo_email_alert_template
else:
template = email_alert_template
display_date = hg_util.get_readable_ctx_date( ctx )
admin_body = string.Template( template ).safe_substitute( host=host,
sharable_link=sharable_link,
repository_name=repository.name,
revision='%s:%s' %( str( ctx.rev() ), ctx ),
display_date=display_date,
description=ctx.description(),
username=username,
content_alert_str=content_alert_str )
body = string.Template( template ).safe_substitute( host=host,
sharable_link=sharable_link,
repository_name=repository.name,
revision='%s:%s' %( str( ctx.rev() ), ctx ),
display_date=display_date,
description=ctx.description(),
username=username,
content_alert_str='' )
admin_users = app.config.get( "admin_users", "" ).split( "," )
frm = email_from
if new_repo_alert:
subject = "Galaxy tool shed alert for new repository named %s" % str( repository.name )
subject = subject[ :80 ]
email_alerts = []
for user in sa_session.query( app.model.User ) \
.filter( and_( app.model.User.table.c.deleted == False,
app.model.User.table.c.new_repo_alert == True ) ):
if admin_only:
if user.email in admin_users:
email_alerts.append( user.email )
else:
email_alerts.append( user.email )
else:
subject = "Galaxy tool shed update alert for repository named %s" % str( repository.name )
email_alerts = json.loads( repository.email_alerts )
for email in email_alerts:
to = email.strip()
# Send it
try:
if to in admin_users:
util.send_mail( frm, to, subject, admin_body, app.config )
else:
util.send_mail( frm, to, subject, body, app.config )
except Exception, e:
log.exception( "An error occurred sending a tool shed repository update alert by email." )
def is_tool_shed_client( app ):
"""
The tool shed and clients to the tool (i.e. Galaxy) require a lot
of similar functionality in this file but with small differences. This
method should determine if the app performing the action is the tool shed
or a client of the tool shed.
"""
return hasattr( app, "install_model" )
def open_repository_files_folder( folder_path ):
"""
Return a list of dictionaries, each of which contains information for a file or directory contained
within a directory in a repository file hierarchy.
"""
try:
files_list = get_repository_files( folder_path )
except OSError, e:
if str( e ).find( 'No such file or directory' ) >= 0:
# We have a repository with no contents.
return []
folder_contents = []
for filename in files_list:
is_folder = False
if filename and filename[ -1 ] == os.sep:
is_folder = True
if filename:
full_path = os.path.join( folder_path, filename )
node = { "title" : filename,
"isFolder" : is_folder,
"isLazy" : is_folder,
"tooltip" : full_path,
"key" : full_path }
folder_contents.append( node )
return folder_contents
def repository_was_previously_installed( app, tool_shed_url, repository_name, repo_info_tuple, from_tip=False ):
"""
Find out if a repository is already installed into Galaxy - there are several scenarios where this
is necessary. For example, this method will handle the case where the repository was previously
installed using an older changeset_revsion, but later the repository was updated in the tool shed
and now we're trying to install the latest changeset revision of the same repository instead of
updating the one that was previously installed. We'll look in the database instead of on disk since
the repository may be currently uninstalled.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( app, tool_shed_url )
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \
get_repo_info_tuple_contents( repo_info_tuple )
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
# See if we can locate the repository using the value of changeset_revision.
tool_shed_repository = get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app,
tool_shed,
repository_name,
repository_owner,
changeset_revision )
if tool_shed_repository:
return tool_shed_repository, changeset_revision
# Get all previous changeset revisions from the tool shed for the repository back to, but excluding,
# the previous valid changeset revision to see if it was previously installed using one of them.
params = '?galaxy_url=%s&name=%s&owner=%s&changeset_revision=%s&from_tip=%s' % ( url_for( '/', qualified=True ),
str( repository_name ),
str( repository_owner ),
changeset_revision,
str( from_tip ) )
url = common_util.url_join( tool_shed_url,
'repository/previous_changeset_revisions%s' % params )
text = common_util.tool_shed_get( app, tool_shed_url, url )
if text:
changeset_revisions = util.listify( text )
for previous_changeset_revision in changeset_revisions:
tool_shed_repository = get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app,
tool_shed,
repository_name,
repository_owner,
previous_changeset_revision )
if tool_shed_repository:
return tool_shed_repository, previous_changeset_revision
return None, None
def set_image_paths( app, encoded_repository_id, text ):
"""
Handle tool help image display for tools that are contained in repositories in
the tool shed or installed into Galaxy as well as image display in repository
README files. This method will determine the location of the image file and
return the path to it that will enable the caller to open the file.
"""
if text:
if is_tool_shed_client( app ):
route_to_images = 'admin_toolshed/static/images/%s' % encoded_repository_id
else:
# We're in the tool shed.
route_to_images = '/repository/static/images/%s' % encoded_repository_id
# We used to require $PATH_TO_IMAGES, but we now eliminate it if it's used.
text = text.replace( '$PATH_TO_IMAGES', '' )
# Eliminate the invalid setting of ./static/images since the routes will
# properly display images contained in that directory.
text = text.replace( './static/images', '' )
# Eliminate the default setting of /static/images since the routes will
# properly display images contained in that directory.
text = text.replace( '/static/images', '' )
# Use regex to instantiate routes into the defined image paths, but replace
# paths that start with neither http:// nor https://, which will allow for
# settings like .. images:: http_files/images/help.png
for match in re.findall( '.. image:: (?!http)/?(.+)', text ):
text = text.replace( match, match.replace( '/', '%2F' ) )
text = re.sub( r'\.\. image:: (?!https?://)/?(.+)', r'.. image:: %s/\1' % route_to_images, text )
return text
def tool_shed_is_this_tool_shed( toolshed_base_url ):
"""Determine if a tool shed is the current tool shed."""
cleaned_toolshed_base_url = common_util.remove_protocol_from_tool_shed_url( toolshed_base_url )
cleaned_tool_shed = common_util.remove_protocol_from_tool_shed_url( str( url_for( '/', qualified=True ) ) )
return cleaned_toolshed_base_url == cleaned_tool_shed
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
4423,
346,
198,
11748,
4731,
198,
11748,
44161,
282,
26599,
13,
579,
13,
41194,
198,
198,
6738,
16161,
1330,
7736,
198,
6738,
16161,
13,
12384,
1330,
19016,
... | 2.318275 | 26,210 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2022/3/7 5:12 下午
# @File : stream_push.py
# @author : Akaya
# @Software: PyCharm
# stream_push : SIGABRT error
import cv2 as cv
import time
import subprocess as sp
import multiprocessing
import platform
import psutil
# 对获取的帧做一些画面处理的方法,返回完成处理的帧。
# 向服务器推送
# 启动运行
if __name__ == '__main__':
# 根据不同的操作系统,设定读取哪个摄像头
cap = None
if platform.system() == 'Linux': # 如果是Linux系统
cap = cv.VideoCapture(10) # 绑定编号为10的摄像头
cap.set(3, 640) # 设置摄像头画面的宽
cap.set(4, 480) # 设置摄像头画面的高
elif platform.system() == 'Darwin': # 如果是苹果的OS X系统
cap = cv.VideoCapture(0) # 绑定编号为0的摄像头
cap.set(3, 640)
cap.set(4, 480)
else: # 没有windows系统,所以就不判断了
exit(0)
rtmpUrl = "rtmp://10.10.14.120:1935/stream/pupils_trace" # 用vcl等直播软件播放时,也用这个地址
raw_q = multiprocessing.Queue() # 定义一个向推流对象传入帧及其他信息的队列
my_pusher = stream_pusher(rtmp_url=rtmpUrl, raw_frame_q=raw_q) # 实例化一个对象
my_pusher.run() # 让这个对象在后台推送视频流
for i in range(1000):
_, raw_frame = cap.read()
info = (raw_frame, '2', '3', '4') # 把需要送入队列的内容进行封装
if not raw_q.full(): # 如果队列没满
raw_q.put(info) # 送入队列
cv.waitKey(1)
cap.release()
print('finish')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33160,
14,
18,
14,
22,
642,
25,
1065,
220,
10310,
233,
39355,
230,
198,
2,
... | 1.304304 | 999 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import re
import flask
from oslo_config import cfg
from oslo_utils import uuidutils
import werkzeug
from ironic_inspector import api_tools
from ironic_inspector.common.i18n import _
from ironic_inspector.common import ironic as ir_utils
from ironic_inspector.common import swift
from ironic_inspector import conf # noqa
from ironic_inspector import introspect
from ironic_inspector import node_cache
from ironic_inspector import process
from ironic_inspector import rules
from ironic_inspector import utils
CONF = cfg.CONF
app = flask.Flask(__name__)
LOG = utils.getProcessingLogger(__name__)
MINIMUM_API_VERSION = (1, 0)
CURRENT_API_VERSION = (1, 12)
DEFAULT_API_VERSION = CURRENT_API_VERSION
_LOGGING_EXCLUDED_KEYS = ('logs',)
_DEFAULT_API_VERSION = _format_version(DEFAULT_API_VERSION)
@app.before_request
@app.after_request
def generate_introspection_status(node):
"""Return a dict representing current node status.
:param node: a NodeInfo instance
:return: dictionary
"""
started_at = node.started_at.isoformat()
finished_at = node.finished_at.isoformat() if node.finished_at else None
status = {}
status['uuid'] = node.uuid
status['finished'] = bool(node.finished_at)
status['state'] = node.state
status['started_at'] = started_at
status['finished_at'] = finished_at
status['error'] = node.error
status['links'] = create_link_object(
["v%s/introspection/%s" % (CURRENT_API_VERSION[0], node.uuid)])
return status
@app.route('/', methods=['GET'])
@convert_exceptions
@app.route('/<version>', methods=['GET'])
@convert_exceptions
@app.route('/v1/continue', methods=['POST'])
@convert_exceptions
# TODO(sambetts) Add API discovery for this endpoint
@app.route('/v1/introspection/<node_id>', methods=['GET', 'POST'])
@convert_exceptions
@app.route('/v1/introspection', methods=['GET'])
@convert_exceptions
@app.route('/v1/introspection/<node_id>/abort', methods=['POST'])
@convert_exceptions
@app.route('/v1/introspection/<node_id>/data', methods=['GET'])
@convert_exceptions
@app.route('/v1/introspection/<node_id>/data/unprocessed', methods=['POST'])
@convert_exceptions
@app.route('/v1/rules', methods=['GET', 'POST', 'DELETE'])
@convert_exceptions
@app.route('/v1/rules/<uuid>', methods=['GET', 'DELETE'])
@convert_exceptions
@app.errorhandler(404)
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.869268 | 1,025 |
# O(n^2) overall | [
2,
440,
7,
77,
61,
17,
8,
4045
] | 2 | 8 |
import random
import sys
import shutil
import glob
import os
import subprocess
k = 4
os.chdir("all")
all_files = glob.glob("*.csv")
# random.shuffle(all_files)
# test_files = random.sample(all_files, len(all_files)//k)
# train_files = list(set(all_files) - set(test_files))
os.chdir("..")
test_files = ['0914.csv', '0480.csv', '0362.csv', '0140.csv', '0779.csv', '0181.csv', '0172.csv', '0414.csv', '0250.csv', '0205.csv', '0404.csv', '0039.csv', '0407.csv', '0930.csv', '0885.csv', '0460.csv', '0322.csv', '1045.csv', '0114.csv', '0355.csv', '0975.csv', '0690.csv', '0174.csv', '0199.csv', '0856.csv', '0438.csv', '0840.csv', '0716.csv', '0531.csv', '0633.csv', '0073.csv', '1014.csv', '0126.csv', '0240.csv', '0580.csv', '1029.csv', '0288.csv', '0676.csv', '0442.csv', '0697.csv', '0443.csv', '0216.csv', '0616.csv', '0150.csv', '0447.csv', '0819.csv', '1009.csv', '0493.csv', '1054.csv', '0622.csv', '1025.csv', '0836.csv', '0211.csv', '0667.csv', '0703.csv', '0610.csv', '0320.csv', '0906.csv', '0682.csv', '0623.csv', '0451.csv', '0119.csv', '0235.csv', '0889.csv', '0793.csv', '0585.csv', '0596.csv', '0503.csv', '0308.csv', '0312.csv', '0476.csv', '0754.csv', '0239.csv', '0428.csv', '0487.csv', '0956.csv', '0802.csv', '0743.csv', '0650.csv', '0831.csv', '0720.csv', '1040.csv', '1001.csv', '0044.csv', '0019.csv', '0817.csv', '0721.csv', '0768.csv', '0574.csv', '0203.csv', '0466.csv', '0278.csv', '0265.csv', '0786.csv', '0471.csv', '0201.csv', '0669.csv', '0054.csv', '0731.csv', '0815.csv', '0241.csv', '0611.csv', '1050.csv', '0258.csv', '0711.csv', '0967.csv', '0867.csv', '0894.csv', '0518.csv', '0692.csv', '0219.csv', '1067.csv', '0630.csv', '0304.csv', '0999.csv', '0828.csv', '0918.csv', '0128.csv', '0534.csv', '0353.csv', '0968.csv', '0825.csv', '0012.csv', '0058.csv', '0655.csv', '1017.csv', '0701.csv', '0243.csv', '0935.csv', '0259.csv', '0321.csv', '0735.csv', '0557.csv', '0427.csv', '0154.csv', '0522.csv', '0723.csv', '0023.csv', '0117.csv', '0824.csv', '0387.csv', '0749.csv', '0352.csv', '1004.csv', '1076.csv', '0046.csv', '0346.csv', '1074.csv', '0299.csv', '0358.csv', '0866.csv', '0842.csv', '0483.csv', '0138.csv', '0348.csv', '0087.csv', '0280.csv', '0535.csv', '0898.csv', '0059.csv', '0780.csv', '0925.csv', '0392.csv', '0614.csv', '0037.csv', '0521.csv', '0873.csv', '0970.csv', '0916.csv', '0853.csv', '0467.csv', '0991.csv', '0759.csv', '0713.csv', '0277.csv', '1068.csv', '0997.csv', '0136.csv', '0328.csv', '0593.csv', '0245.csv', '0984.csv', '0345.csv', '0510.csv', '0306.csv', '0942.csv', '0847.csv', '0439.csv', '0987.csv', '1065.csv', '0843.csv', '0875.csv', '0671.csv', '0269.csv', '1037.csv', '0903.csv', '0256.csv', '0349.csv', '0771.csv', '0091.csv', '0444.csv', '0152.csv', '1006.csv', '1048.csv', '0560.csv', '0341.csv', '0196.csv', '0375.csv', '0354.csv', '0042.csv', '0185.csv', '0083.csv', '0583.csv', '0937.csv', '0380.csv', '0162.csv', '0895.csv', '0396.csv', '0384.csv', '0973.csv', '0053.csv', '0486.csv', '0533.csv', '0093.csv', '0040.csv', '0463.csv', '0705.csv', '0877.csv', '0086.csv', '0270.csv', '0344.csv', '0990.csv', '0473.csv', '1024.csv', '0652.csv', '0561.csv', '0310.csv', '1018.csv', '0319.csv', '0566.csv', '0477.csv', '1021.csv', '0760.csv', '0142.csv', '0821.csv', '0171.csv', '0772.csv', '0569.csv', '0478.csv', '0284.csv', '0156.csv', '0852.csv', '1061.csv', '0303.csv', '0386.csv', '0960.csv', '0455.csv', '0774.csv', '0625.csv', '0157.csv', '0262.csv', '0548.csv', '0283.csv', '0634.csv', '0972.csv', '0553.csv', '0879.csv', '0811.csv', '0357.csv']
# for each_files in test_files:
# shutil.copy2(str("all/"+each_files), str("data/dev/"+each_files))
# for each_files in train_files:
# shutil.copy2(str("all/"+each_files), str("all/train/"+each_files))
for each_file in all_files:
if each_file in test_files:
shutil.copy2(str("all/" + each_file), str("data/dev/" + each_file))
else:
shutil.copy2(str("all/" + each_file), str("data/train/" + each_file))
print("TEST: ",len(test_files))
# print("TRAIN: ",len(train_files)) | [
11748,
4738,
201,
198,
11748,
25064,
201,
198,
11748,
4423,
346,
201,
198,
11748,
15095,
201,
198,
11748,
28686,
201,
198,
11748,
850,
14681,
201,
198,
201,
198,
201,
198,
74,
796,
604,
201,
198,
201,
198,
418,
13,
354,
15908,
7203,
... | 2.058586 | 1,980 |
# Originally from https://github.com/kuza55/keras-extras/blob/master/utils/multi_gpu.py
# SHHawley updated to Keras 2, added get_serial_part & get_available_gpus, and gpu_count=-1 flag
from keras.layers import concatenate
from keras.layers.core import Lambda
import keras.backend as K
from keras.models import Model
from keras.callbacks import Callback
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_serial_part(model, parallel=True):
"""
Undoes make_parallel, but keyword included in case it's called on a serial model
TODO: even better would be an internal check to see if model is parallel or serial
"""
if (parallel):
return model.layers[-2]
else:
return model # if model's already serial, return original model
def get_available_gpus():
"""
from https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow
but updated thanks to http://blog.datumbox.com/5-tips-for-multi-gpu-training-with-keras/
"""
local_device_protos = K.get_session().list_devices() # device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def make_parallel(model):
"""
Taken from https://github.com/kuza55/keras-extras/blob/master/utils/multi_gpu.py
Uses all available GPUs. To limit number of GPUs used, call program from command line with CUDA_VISIBLE_DEVICES=...
"""
gpu_list = get_available_gpus()
gpus_detected = len(gpu_list)
if (gpus_detected < 2):
return model # no GPU parallelism
gpu_count = gpus_detected
print("make_parallel:",gpus_detected,"GPUs detected. Parallelizing across",gpu_count,"GPUs...")
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
#merged.append(merge(outputs, mode='concat', concat_axis=0))
merged.append(concatenate(outputs, axis=0))
return Model(inputs=model.inputs, outputs=merged)
| [
198,
198,
2,
19486,
422,
3740,
1378,
12567,
13,
785,
14,
23063,
4496,
2816,
14,
6122,
292,
12,
2302,
8847,
14,
2436,
672,
14,
9866,
14,
26791,
14,
41684,
62,
46999,
13,
9078,
198,
2,
6006,
33055,
1636,
6153,
284,
17337,
292,
362,
... | 2.402226 | 1,258 |
# -*- coding: utf_8 -*-
dict_test = {"a": 10, "e": 1, "b": 3}
print(dict_test)
sorted(dict_test)
print(dict_test)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
62,
23,
532,
9,
12,
201,
198,
201,
198,
201,
198,
11600,
62,
9288,
796,
19779,
64,
1298,
838,
11,
366,
68,
1298,
352,
11,
366,
65,
1298,
513,
92,
201,
198,
4798,
7,
11600,
62,
9288,
8,
201... | 1.84058 | 69 |
s_rate= int(input("what is sample rate? "))
b_depth= int(input("what is depth? "))
duration = int(input("what is sample rate? "))
f_size = s_rate*b_depth*duration
print(f_size) | [
82,
62,
4873,
28,
493,
7,
15414,
7203,
10919,
318,
6291,
2494,
30,
366,
4008,
198,
65,
62,
18053,
28,
493,
7,
15414,
7203,
10919,
318,
6795,
30,
366,
4008,
198,
32257,
796,
493,
7,
15414,
7203,
10919,
318,
6291,
2494,
30,
366,
400... | 2.681818 | 66 |
#!flask/bin/python
import os
import sys
import uuid
from flask import Flask, Request, request, jsonify
from .modules.response import ok, bad_request, forbidden, not_found, server_error
from .modules.manager import Manager
app = Flask(__name__)
mgr = Manager()
#########################################
# Routes
#########################################
@app.route('/api/user', methods=['GET'])
@app.route('/api/user/answers', methods=['POST'])
@app.route('/api/scores', methods=['GET']) | [
2,
0,
2704,
2093,
14,
8800,
14,
29412,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
334,
27112,
198,
198,
6738,
42903,
1330,
46947,
11,
19390,
11,
2581,
11,
33918,
1958,
198,
6738,
764,
18170,
13,
26209,
1330,
12876,
11,
208... | 3.286667 | 150 |
import flask
app = flask.Flask(__name__) | [
11748,
42903,
198,
198,
1324,
796,
42903,
13,
7414,
2093,
7,
834,
3672,
834,
8
] | 2.733333 | 15 |
"""
Pylibui test suite.
"""
from pylibui.controls import Tab, Button
from tests.utils import WindowTestCase
| [
37811,
198,
350,
2645,
571,
9019,
1332,
18389,
13,
198,
198,
37811,
198,
198,
6738,
279,
2645,
571,
9019,
13,
13716,
82,
1330,
16904,
11,
20969,
198,
6738,
5254,
13,
26791,
1330,
26580,
14402,
20448,
628
] | 3.111111 | 36 |
import os
import cv2
from ..utils.Camera import *
resolution = res_720p
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
269,
85,
17,
198,
6738,
11485,
26791,
13,
35632,
1330,
1635,
198,
198,
29268,
796,
581,
62,
23906,
79,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
... | 2.651163 | 43 |
from credentials import API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET, ACCOUNT_ID, ACCOUNT_NAME
import tweepy
from tweepy import Stream
from tweepy.streaming import StreamListener
import json
import time
from logger import *
import requests
import os
if __name__ == "__main__":
try:
followStream()
except Exception:
time.sleep(10)
logging.exception("Fatal exception. Consult logs.")
followStream()
finally:
time.sleep(10)
logging.exception("IN FINALLY")
print("IN FINALLY")
followStream() | [
6738,
18031,
1330,
7824,
62,
20373,
11,
7824,
62,
23683,
26087,
11,
15859,
7597,
62,
10468,
43959,
11,
15859,
7597,
62,
10468,
43959,
62,
23683,
26087,
11,
15859,
28270,
62,
2389,
11,
15859,
28270,
62,
20608,
198,
11748,
4184,
538,
88,
... | 2.533898 | 236 |
# Copyright (C) 2015-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
import os
import collections
from ocellaris.utils import ocellaris_error, OcellarisError, get_root_value
from ocellaris_post import read_yaml_input_file
import yaml
UNDEFINED = UndefinedParameter()
# Some things that could be better in this implementation
# TODO: do not subclass OrderedDict! This makes it hard to get read/write
# sub-Input views of a part of the input tree
# TODO: get_value should have required_type as the second argument. This
# would shorten the standard get-key-that-must-exist use case and
# passing 'any' would be a good documentation if the type can really
# be anything
def eval_python_expression(simulation, value, pathstr, safe_mode=False):
"""
We run eval with the math functions and user constants available on string
values that are prefixed with "py$" indicating that they are dynamic
expressions and not just static strings
"""
if not isinstance(value, str) or not value.startswith('py$'):
return value
if safe_mode:
ocellaris_error(
'Cannot have Python expression here',
'Not allowed to have Python expression here: %s' % pathstr,
)
# remove "py$" prefix
expr = value[3:]
# Build dictionary of locals for evaluating the expression
eval_locals = {}
import math
for name in dir(math):
if not name.startswith('_'):
eval_locals[name] = getattr(math, name)
global_inp = simulation.input
user_constants = global_inp.get_value(
'user_code/constants', {}, 'dict(string:basic)', safe_mode=True
)
for name, const_value in user_constants.items():
eval_locals[name] = const_value
eval_locals['simulation'] = simulation
eval_locals['t'] = eval_locals['time'] = simulation.time
eval_locals['it'] = eval_locals['timestep'] = simulation.timestep
eval_locals['dt'] = simulation.dt
eval_locals['ndim'] = simulation.ndim
try:
value = eval(expr, globals(), eval_locals)
except Exception:
simulation.log.error('Cannot evaluate python code for %s' % pathstr)
simulation.log.error('Python code is %s' % expr)
raise
return value
| [
2,
15069,
357,
34,
8,
1853,
12,
23344,
309,
579,
375,
6379,
316,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
28686,
198,
11748,
17268,
198,
6738,
267,
3846,
20066,
13,
26791,
1330,
26... | 2.77129 | 822 |
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap the buildout project itself.
This is different from a normal boostrapping process because the
buildout egg itself is installed as a develop egg.
"""
import sys
if sys.version_info < (2, 7):
raise SystemError("Outside Python 2.7, no support for Python 2.x.")
if sys.version_info > (3, ) and sys.version_info < (3, 5):
raise SystemError("No support for Python 3.x under 3.5.")
import os, shutil, subprocess, tempfile
for d in 'eggs', 'develop-eggs', 'bin', 'parts':
if not os.path.exists(d):
os.mkdir(d)
bin_buildout = os.path.join('bin', 'buildout')
if os.path.isfile(bin_buildout):
os.remove(bin_buildout)
if os.path.isdir('build'):
shutil.rmtree('build')
#######################################################################
try:
import pip
except ImportError:
install_pip()
######################################################################
need_restart = False
for package in ['pip', 'setuptools', 'wheel']:
did_upgrade = check_upgrade(package)
show(package)
need_restart = need_restart or did_upgrade
if need_restart:
print("Restart")
sys.stdout.flush()
return_code = subprocess.call(
[sys.executable] + sys.argv
)
sys.exit(return_code)
######################################################################
print('')
print('Install buildout')
print('')
sys.stdout.flush()
if subprocess.call(
[sys.executable] +
['setup.py', '-q', 'develop', '-m', '-x', '-d', 'develop-eggs'],
):
raise RuntimeError("buildout build failed.")
import pkg_resources
pkg_resources.working_set.add_entry('src')
import zc.buildout.easy_install
zc.buildout.easy_install.scripts(
['zc.buildout'], pkg_resources.working_set, sys.executable, 'bin')
######################################################################
try:
import coverage
except ImportError:
install_coverage()
######################################################################
print('')
print('Run buildout')
print('')
bin_buildout = os.path.join('bin', 'buildout')
if sys.platform.startswith('java'):
# Jython needs the script to be called twice via sys.executable
assert subprocess.Popen([sys.executable, bin_buildout, '-N']).wait() == 0
sys.stdout.flush()
sys.exit(subprocess.Popen(bin_buildout).wait())
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
5075,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789... | 3.174381 | 929 |
"""
Lorem ipsum
"""
__version__ = '0.1.0'
__name__ = 'AMazingTurtle'
__author__ = 'Michael K. - m1ch'
__email__ = 'my@email'
__github__ = 'https://github.com/m1ch/AMazingTurtle'
__readthedocs__ = 'no idea' | [
37811,
198,
43,
29625,
220,
2419,
388,
198,
37811,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
16,
13,
15,
6,
198,
834,
3672,
834,
796,
705,
2390,
4070,
51,
17964,
6,
198,
834,
9800,
834,
796,
705,
13256,
509,
13,
532,
285,
16,
... | 2.263736 | 91 |
import random
from neuralpp.inference.graphical_model.representation.random.random_model import (
generate_model,
)
from neuralpp.inference.graphical_model.variable.integer_variable import IntegerVariable
from neuralpp.inference.graphical_model.variable_elimination import VariableElimination
from neuralpp.util.util import join, repeat
if __name__ == "__main__":
model = generate_model(number_of_factors=6, number_of_variables=4, cardinality=3)
print("Model:")
print(join(model, "\n"))
print()
ds = generate_dataset(
model=model,
number_of_sets_of_observed_and_query_variables=5,
number_of_query_variables=random.randint(0, 2),
number_of_observed_variables=2,
number_of_observations_per_random_set_of_observed_and_query_variables=1,
datapoints_per_observation=10,
)
print()
print("Generated dataset:")
print(join(ds, "\n"))
| [
11748,
4738,
198,
198,
6738,
17019,
381,
13,
259,
4288,
13,
34960,
605,
62,
19849,
13,
15603,
341,
13,
25120,
13,
25120,
62,
19849,
1330,
357,
198,
220,
220,
220,
7716,
62,
19849,
11,
198,
8,
198,
6738,
17019,
381,
13,
259,
4288,
... | 2.563889 | 360 |
'''
SEG2020 is a script that runs all the computations for the different results
presented at the SEG2020 conference (Houston, Texas).
***Improving BEL1D accuracy for geophysical imaging of the subsurface***
It runs at first the numerical benchmark for a dataset that is created directly.
- Creating the dataset
- Running BEL1D (initialization + first iteration)
- Presenting graphs of the results
- Applying IPR
- Presenting graphs of the improved result
- Comparing to McMC results from DREAM (results provided in github)
Then, it runs the same for the Mont Rigi dataset and presents simply the obtained profile.
Author:
Hadrien MICHEL
ULiège, UGent, F.R.S.-FNRS
hadrien[dot]michel[at]uliege[dot]be
(c) August 2020
'''
if __name__=='__main__':
import numpy as np
from pyBEL1D import BEL1D
from pyBEL1D.utilities import Tools
from matplotlib import pyplot
from pathos import multiprocessing as mp
from pathos import pools as pp
import time
################################################################################
### ###
### Numerical Benchmarking ###
### ###
################################################################################
Benchmark = True
if Benchmark:
# 1) Creating the dataset:
from pygimli.physics import sNMR
KernelBench = "Data/sNMR/Tx50Rx50.mrsk" # A kernel file generated by MRSMatlab (Mueller-Petke et al., 2012)
nbLayer = 3 # 2 layers and a half-space
TimingBench = np.arange(0.005,0.5,0.001) # The time vector for the model
# KFile = sNMR.MRS()
# KFile.loadKernel(KernelBench)
# ModellingMethod = sNMR.MRS1dBlockQTModelling(nlay=nbLayer,K=KFile.K,zvec=KFile.z,t=TimingBench)
ModelBench = np.asarray([25, 25, 0.05, 0.25, 0.1, 0.1, 0.2, 0.05])
Noise = 10 # nV
# DatasetBench = ModellingMethod.response(ModelBench)
# DatasetBench += np.random.normal(loc=0, scale=Noise*1e-9,size=DatasetBench.shape) # Adding noise to the dataset
# np.save('sNMR_Bench_Dataset',DatasetBench)# To use in the McMC algorithm
DatasetBench = np.load('sNMR_Bench_Dataset.npy') # Load the dataset that was already created and run in McMC
# 2) Initializing BEL1D:
priorBench = np.array([[0.0, 50.0, 0.0, 0.15, 0.0, 0.5], [0.0, 50.0, 0.15, 0.50, 0.0, 0.5], [0.0, 0.0, 0.0, 0.15, 0.0, 0.5]])
# Initialsizing the parameters:
start = time.time()
ModelParam = BEL1D.MODELSET.SNMR(prior=priorBench, Kernel=KernelBench, Timing=TimingBench)
# 3) Running pre-BEL operations:
nbSampled = 10000
PreBEL_Bench = BEL1D.PREBEL(ModelParam,nbModels=nbSampled)
PreBEL_Bench.run()
# 4) Sampling 10000 models from the posterior:
PostBEL_Bench = BEL1D.POSTBEL(PreBEL_Bench)
PostBEL_Bench.run(Dataset=DatasetBench,nbSamples=nbSampled,NoiseModel=Noise)
end = time.time()
PostBEL_Bench.KDE.ShowKDE(Xvals=PostBEL_Bench.CCA.transform(PostBEL_Bench.PCA['Data'].transform(np.reshape(DatasetBench,(1,-1)))))
PostBEL_Bench.ShowDataset(RMSE=True)
CurrentGraph = pyplot.gcf()
CurrentGraph = CurrentGraph.get_axes()[0]
CurrentGraph.plot(TimingBench,DatasetBench[:len(TimingBench)],'k',linestyle='None',marker='o',markerfacecolor='None')
# Graph for the CCA space parameters loads
_, ax = pyplot.subplots()
B = PostBEL_Bench.CCA.y_loadings_
B = np.divide(np.abs(B).T,np.repeat(np.reshape(np.sum(np.abs(B),axis=0),(1,B.shape[0])),B.shape[0],axis=0).T)
ind = np.asarray(range(B.shape[0]))+1
ax.bar(x=ind,height=B[0],label=r'${}$'.format(PostBEL_Bench.MODPARAM.paramNames["NamesSU"][0]))
for i in range(B.shape[0]+1)[1:-1]:
ax.bar(x=ind,height=B[i],bottom=np.reshape(np.sum(B[0:i],axis=0),(B.shape[0],)),label=r'${}$'.format(PostBEL_Bench.MODPARAM.paramNames["NamesSU"][i]))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.4), ncol=3)
ax.set_ylabel('Relative contribution')
ax.set_xlabel('CCA dimension')
pyplot.show(block=False)
PostBEL_Bench.ShowPostModels(TrueModel=ModelBench,RMSE=True)
PostBEL_Bench.ShowPostCorr(TrueModel=ModelBench)
pyplot.show(block=False)
MODELS_1stITER = PostBEL_Bench.SAMPLES
MODELS_1stITER_DATA = PostBEL_Bench.SAMPLESDATA
PRE_MODS = PreBEL_Bench.MODELS
PRE_DATA = PreBEL_Bench.FORWARD
Postbel = PostBEL_Bench
Prebel = PreBEL_Bench
# 5) Applying IPR
nbIter = 100 # maximum number of iterations
tolerance = 5e-3 # Tolerance on the normalized difference between the distributions
nbParam = int(priorBench.size/2 - 1)
means = np.zeros((nbIter,nbParam))
stds = np.zeros((nbIter,nbParam))
timings = np.zeros((nbIter,))
MODELS_ITER = np.zeros((nbIter,nbSampled,nbParam)) # Store the models that have been computed
diverge = True
distancePrevious = 1e10
MixingUpper = 0
MixingLower = 1
for idxIter in range(nbIter):
PostbelLast = Postbel
PrebelLast = Prebel
if idxIter == 0: # Initialization: already done (see 2 and 3)
# PostbelTest.KDE.ShowKDE(Xvals=PostbelTest.CCA.transform(PostbelTest.PCA['Data'].transform(np.reshape(Dataset,(1,-1)))))
means[idxIter,:], stds[idxIter,:] = Postbel.GetStats()
timings[idxIter] = end-start
ModLastIter = Prebel.MODELS
else:
ModLastIter = Postbel.SAMPLES
# Here, we will use the POSTBEL2PREBEL function that adds the POSTBEL from previous iteration to the prior (Iterative prior resampling)
# However, the computations are longer with a lot of models, thus you can opt-in for the "simplified" option which randomely select up to 10 times the numbers of models
MixingUpper += 1
MixingLower += 1
Mixing = MixingUpper/MixingLower
Prebel = BEL1D.PREBEL.POSTBEL2PREBEL(PREBEL=Prebel,POSTBEL=Postbel,Dataset=DatasetBench,NoiseModel=Noise,Simplified=True,nbMax=nbSampled,MixingRatio=Mixing)
# Since when iterating, the dataset is known, we are not computing the full relationship but only the posterior distributions directly to gain computation timing
print(idxIter+1)
Postbel = BEL1D.POSTBEL(Prebel)
Postbel.run(DatasetBench,nbSamples=nbSampled,NoiseModel=None)
means[idxIter,:], stds[idxIter,:] = Postbel.GetStats()
end = time.time()
timings[idxIter] = end-start
# The distance is computed on the normalized distributions. Therefore, the tolerance is relative.
diverge, distance = Tools.ConvergeTest(SamplesA=ModLastIter,SamplesB=Postbel.SAMPLES, tol=tolerance)
print('Wasserstein distance: {}'.format(distance))
if not(diverge) or (abs((distancePrevious-distance)/distancePrevious)*100<1):# If the distance between the distributions is not changing, we converged as well
# Convergence acheived if:
# 1) Distance below threshold
# 2) Distance does not vary significantly (less than 2.5%)
print('Model has converged at iter {}!'.format(idxIter+1))
MODELS_ITER[idxIter,:,:] = Postbel.SAMPLES
break
distancePrevious = distance
MODELS_ITER[idxIter,:,:] = Postbel.SAMPLES
start = time.time()
timings = timings[:idxIter+1]
means = means[:idxIter+1,:]
stds = stds[:idxIter+1,:]
MODELS_ITER = MODELS_ITER[:idxIter+1,:,:]
np.save('ModelsIteration',MODELS_ITER)
# 6) Graphs for the results:
Postbel.ShowDataset(RMSE=True)
CurrentGraph = pyplot.gcf()
CurrentGraph = CurrentGraph.get_axes()[0]
CurrentGraph.plot(TimingBench,DatasetBench[:len(TimingBench)],'k',linestyle='None',marker='o',markerfacecolor='None')
#Postbel.ShowPostCorr(TrueModel=ModelBench,OtherMethod=PRE_MODS)
Postbel.ShowPostModels(TrueModel=ModelBench,RMSE=True)
# Graph for the CCA space parameters loads
_, ax = pyplot.subplots()
B = Postbel.CCA.y_loadings_
B = np.divide(np.abs(B).T,np.repeat(np.reshape(np.sum(np.abs(B),axis=0),(1,B.shape[0])),B.shape[0],axis=0).T)
ind = np.asarray(range(B.shape[0]))+1
ax.bar(x=ind,height=B[0],label=r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][0]))
for i in range(B.shape[0]+1)[1:-1]:
ax.bar(x=ind,height=B[i],bottom=np.reshape(np.sum(B[0:i],axis=0),(B.shape[0],)),label=r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][i]))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.4), ncol=3)
ax.set_ylabel('Relative contribution')
ax.set_xlabel('CCA dimension')
pyplot.show(block=False)
# Add KDE graph at last iteration:
PrebelGraph = BEL1D.PREBEL.POSTBEL2PREBEL(PREBEL=PrebelLast,POSTBEL=PostbelLast, NoiseModel=Noise,RemoveOutlier=True,Simplified=True,nbMax=nbSampled,MixingRatio=Mixing)
PostbelGraph = BEL1D.POSTBEL(PREBEL=PrebelGraph)
print('Postbel for graphs initialized')
PostbelGraph.run(Dataset=DatasetBench, nbSamples=10000)
print('Printing KDE Graphs')
PostbelGraph.KDE.ShowKDE(Xvals=PostbelGraph.CCA.transform(PostbelGraph.PCA['Data'].transform(np.reshape(DatasetBench,(1,-1)))))
print('Total computation time: {} seconds'.format(np.sum(timings)))
# Comparison with McMC:
OtherMethod = np.load('Data/sNMR/SEG2020_Bench.npy')
fig = pyplot.figure(figsize=[10,10])# Creates the figure space
axs = fig.subplots(nbParam, nbParam)
for i in range(nbParam):
for j in range(nbParam):
if i == j: # Diagonal
if i != nbParam-1:
axs[i,j].get_shared_x_axes().join(axs[i,j],axs[-1,j])# Set the xaxis limit
axs[i,j].hist(np.squeeze(MODELS_ITER[0,:,j]),color='gray',density=True,alpha=0.5)
axs[i,j].hist(np.squeeze(MODELS_ITER[3,:,j]),color='b',density=True,alpha=0.5)
axs[i,j].hist(np.squeeze(MODELS_ITER[6,:,j]),color='m',density=True,alpha=0.5)
axs[i,j].hist(OtherMethod[:,j],color='y',density=True,alpha=0.5)
axs[i,j].hist(np.squeeze(MODELS_ITER[-1,:,j]),color='g',density=True,alpha=0.5)
axs[i,j].plot([ModelBench[i],ModelBench[i]],np.asarray(axs[i,j].get_ylim()),'r')
if nbParam > 8:
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
elif i > j: # Below the diagonal -> Scatter plot
if i != nbParam-1:
axs[i,j].get_shared_x_axes().join(axs[i,j],axs[-1,j])# Set the xaxis limit
if j != nbParam-1:
if i != nbParam-1:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-1])# Set the yaxis limit
else:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-2])# Set the yaxis limit
axs[i,j].plot(np.squeeze(MODELS_ITER[0,:,j]),np.squeeze(MODELS_ITER[0,:,i]),color='gray',linestyle='None',marker='.')
axs[i,j].plot(np.squeeze(MODELS_ITER[3,:,j]),np.squeeze(MODELS_ITER[3,:,i]),'.b')
axs[i,j].plot(np.squeeze(MODELS_ITER[6,:,j]),np.squeeze(MODELS_ITER[6,:,i]),'.m')
axs[i,j].plot(np.squeeze(MODELS_ITER[-1,:,j]),np.squeeze(MODELS_ITER[-1,:,i]),'.g')
axs[i,j].plot(ModelBench[j],ModelBench[i],'.r')
if nbParam > 8:
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
elif OtherMethod is not None:
if i != nbParam-1:
axs[i,j].get_shared_x_axes().join(axs[i,j],axs[-1,j])# Set the xaxis limit
if j != nbParam-1:
if i != 0:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-1])# Set the yaxis limit
else:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-2])# Set the yaxis limit
axs[i,j].plot(np.squeeze(MODELS_ITER[0,:,j]),np.squeeze(MODELS_ITER[0,:,i]),color='gray',linestyle='None',marker='.')
axs[i,j].plot(OtherMethod[:,j],OtherMethod[:,i],'.y')
axs[i,j].plot(ModelBench[j],ModelBench[i],'.r')
if nbParam > 8:
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
else:
axs[i,j].set_visible(False)
if j == 0: # First column of the graph
if ((i==0)and(j==0)) or not(i==j):
axs[i,j].set_ylabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][i]))
if i == nbParam-1: # Last line of the graph
axs[i,j].set_xlabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][j]))
if j == nbParam-1:
if not(i==j):
axs[i,j].yaxis.set_label_position("right")
axs[i,j].yaxis.tick_right()
axs[i,j].set_ylabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][i]))
if i == 0:
axs[i,j].xaxis.set_label_position("top")
axs[i,j].xaxis.tick_top()
axs[i,j].set_xlabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][j]))
fig.suptitle("Posterior model space visualtization")
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color='gray', lw=4),
Line2D([0], [0], color='y', lw=4),
Line2D([0], [0], color='b', lw=4),
Line2D([0], [0], color='m', lw=4),
Line2D([0], [0], color='g', lw=4),
Line2D([0], [0], color='r', lw=4)]
fig.legend(custom_lines, ['Prior', 'DREAM', '3rd Iteration', '6th Iteration', 'Last Iteration', 'Benchmark'],loc='lower center',ncol=6)
for ax in axs.flat:
ax.label_outer()
pyplot.show(block=False)
# Graph with the models:
DREAM = OtherMethod # Already loaded
Prior = PRE_MODS
Iter5 = np.squeeze(MODELS_ITER[3,:,:])
Iter10 = np.squeeze(MODELS_ITER[6,:,:])
IterLast = np.squeeze(MODELS_ITER[-1,:,:])
pltidx = [1,4,2,5,8,3,6,9]
fig = pyplot.figure(figsize=[10,10])
for idx in range(8):
ax = pyplot.subplot(3,3,pltidx[idx])
pyplot.hist(Prior[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='Prior')
pyplot.hist(Iter5[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='3rd iteration')
pyplot.hist(Iter10[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='6th iteration')
pyplot.hist(IterLast[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='Last iteration')
pyplot.hist(DREAM[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='DREAM')
ax.plot([ModelBench[idx],ModelBench[idx]],np.asarray(ax.get_ylim()),label='Benchmark')
pyplot.plot()
if pltidx[idx]==1:
ax.set_ylabel('Layer 1')
elif pltidx[idx]==4:
ax.set_ylabel('Layer 2')
elif pltidx[idx]==8:
ax.set_xlabel('Water content [/]')
elif pltidx[idx]==9:
ax.set_xlabel('Relaxation time [sec]')
handles, labels = ax.get_legend_handles_labels()
ax = pyplot.subplot(3,3,7)# Not used but labels needed
ax.set_xlabel('Thickness [m]')
ax.set_ylabel('Half-space')
ax.spines['bottom'].set_color('None')
ax.spines['top'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['left'].set_color('None')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
#pyplot.axis('off')
CurrentGraph = pyplot.gcf()
CurrentGraph.legend(handles, labels,loc='lower center', ncol=6)
# Graph for the i-th pusle moment:
import matplotlib
from matplotlib import colors
from scipy import stats
RMS = np.sqrt(np.square(np.subtract(DatasetBench,Postbel.SAMPLESDATA)).mean(axis=-1))
quantiles = np.divide([stats.percentileofscore(RMS,a,'strict') for a in RMS],100)
sortIndex = np.argsort(RMS)
sortIndex = np.flip(sortIndex)
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
colormap = matplotlib.cm.get_cmap('jet')
for j in sortIndex:
ax.plot(Postbel.MODPARAM.forwardFun["Axis"],np.squeeze(Postbel.SAMPLESDATA[j,-5*len(Postbel.MODPARAM.forwardFun["Axis"]):-4*len(Postbel.MODPARAM.forwardFun["Axis"])]),color=colormap(quantiles[j]))
ax.plot(TimingBench,DatasetBench[-5*len(Postbel.MODPARAM.forwardFun["Axis"]):-4*len(Postbel.MODPARAM.forwardFun["Axis"])],'k',linestyle='None',marker='o',markerfacecolor='None')
ax.set_xlabel(r'${}$'.format(Postbel.MODPARAM.paramNames["DataAxis"]),fontsize=14)
ax.set_ylabel(r'${}$'.format(Postbel.MODPARAM.paramNames["DataName"]),fontsize=14)
fig.subplots_adjust(bottom=0.30)
ax_colorbar = fig.add_axes([0.10, 0.15, 0.80, 0.05])
nb_inter = 1000
color_for_scale = colormap(np.linspace(0,1,nb_inter,endpoint=True))
cmap_scale = colors.ListedColormap(color_for_scale)
scale = [stats.scoreatpercentile(RMS,a,limit=(np.min(RMS),np.max(RMS)),interpolation_method='lower') for a in np.linspace(0,100,nb_inter,endpoint=True)]
norm = colors.BoundaryNorm(scale,len(color_for_scale))
data = np.atleast_2d(np.linspace(np.min(RMS),np.max(RMS),nb_inter,endpoint=True))
ax_colorbar.imshow(data, aspect='auto',cmap=cmap_scale,norm=norm)
ax_colorbar.set_xlabel('Root Mean Square Error {}'.format(Postbel.MODPARAM.paramNames["DataUnits"]),fontsize=12)
ax_colorbar.yaxis.set_visible(False)
nbTicks = 5
ax_colorbar.set_xticks(ticks=np.linspace(0,nb_inter,nbTicks,endpoint=True))
ax_colorbar.set_xticklabels(labels=round_to_5([stats.scoreatpercentile(RMS,a,limit=(np.min(RMS),np.max(RMS)),interpolation_method='lower') for a in np.linspace(0,100,nbTicks,endpoint=True)],n=5),rotation=15,ha='center')
pyplot.show()
################################################################################
### ###
### Case study: Mont Rigi ###
### ###
################################################################################
MtRigi = False
if MtRigi:
# Load the field data
from pygimli.physics import sNMR
Dataset = "Data/sNMR/SEG2020_MtRigi.mrsd"
Kernel = "Data/sNMR/SEG2020_MtRigi.mrsk"
ModelParam = sNMR.MRS()
sNMR.MRS.loadKernel(ModelParam,Kernel)
sNMR.MRS.loadMRSI(ModelParam,Dataset)
FieldData = np.ravel(ModelParam.dcube)
TimingField = ModelParam.t
Noise = 18 #nV
# Initialize BEL1D:
nbSampled = 5000
priorMtRigi = np.asarray([[0.0, 7.5, 0.30, 0.80, 0.0, 0.200], [0, 0, 0.0, 0.15, 0.100, 0.400]])
start = time.time()
MODEL_MtRigi = BEL1D.MODELSET().SNMR(prior=priorMtRigi,Kernel=Kernel, Timing=TimingField)
PREBEL_MtRigi = BEL1D.PREBEL(MODEL_MtRigi,nbModels=nbSampled)
PREBEL_MtRigi.run()
POSTBEL_MtRigi = BEL1D.POSTBEL(PREBEL_MtRigi)
POSTBEL_MtRigi.run(FieldData,nbSamples=nbSampled,NoiseModel=Noise)
end = time.time()
POSTBEL_MtRigi.ShowDataset(RMSE=True,Prior=True)
CurrentGraph = pyplot.gcf()
CurrentGraph = CurrentGraph.get_axes()[0]
CurrentGraph.plot(TimingField,FieldData[:len(TimingField)],'k',linestyle='None',marker='o',markerfacecolor='None')
pyplot.show(block=False)
POSTBEL_MtRigi.ShowPostModels(RMSE=True)
# Iterations:
Postbel = POSTBEL_MtRigi
Prebel = PREBEL_MtRigi
nbIter = 100 # maximum number of iterations
tolerance = 5e-3 # Tolerance on the normalized difference between the distributions
nbParam = int(priorMtRigi.size/2 - 1)
means = np.zeros((nbIter,nbParam))
stds = np.zeros((nbIter,nbParam))
timings = np.zeros((nbIter,))
MODELS_ITER = np.zeros((nbIter,nbSampled,nbParam)) # Store the models that have been computed
diverge = True
distancePrevious = 1e10
MixingUpper = 0
MixingLower = 1
for idxIter in range(nbIter):
PostbelLast = Postbel
PrebelLast = Prebel
if idxIter == 0: # Initialization: already done (see 2 and 3)
# PostbelTest.KDE.ShowKDE(Xvals=PostbelTest.CCA.transform(PostbelTest.PCA['Data'].transform(np.reshape(Dataset,(1,-1)))))
means[idxIter,:], stds[idxIter,:] = Postbel.GetStats()
timings[idxIter] = end-start
ModLastIter = Prebel.MODELS
else:
ModLastIter = Postbel.SAMPLES
# Here, we will use the POSTBEL2PREBEL function that adds the POSTBEL from previous iteration to the prior (Iterative prior resampling)
# However, the computations are longer with a lot of models, thus you can opt-in for the "simplified" option which randomely select up to 10 times the numbers of models
MixingUpper += 1
MixingLower += 1
Mixing = MixingUpper/MixingLower
Prebel = BEL1D.PREBEL.POSTBEL2PREBEL(PREBEL=Prebel,POSTBEL=Postbel,Dataset=FieldData,NoiseModel=Noise,Simplified=True,nbMax=nbSampled,MixingRatio=Mixing)
# Since when iterating, the dataset is known, we are not computing the full relationship but only the posterior distributions directly to gain computation timing
print(idxIter+1)
Postbel = BEL1D.POSTBEL(Prebel)
Postbel.run(FieldData,nbSamples=nbSampled,NoiseModel=Noise)
means[idxIter,:], stds[idxIter,:] = Postbel.GetStats()
end = time.time()
timings[idxIter] = end-start
# The distance is computed on the normalized distributions. Therefore, the tolerance is relative.
diverge, distance = Tools.ConvergeTest(SamplesA=ModLastIter,SamplesB=Postbel.SAMPLES, tol=tolerance)
print('Wasserstein distance: {}'.format(distance))
if not(diverge) or (abs((distancePrevious-distance)/distancePrevious)*100<1):# If the distance between the distributions is not changing, we converged as well
# Convergence acheived if:
# 1) Distance below threshold
# 2) Distance does not vary significantly (less than 2.5%)
print('Model has converged at iter {}!'.format(idxIter+1))
MODELS_ITER[idxIter,:,:] = Postbel.SAMPLES
break
distancePrevious = distance
MODELS_ITER[idxIter,:,:] = Postbel.SAMPLES
start = time.time()
timings = timings[:idxIter+1]
means = means[:idxIter+1,:]
stds = stds[:idxIter+1,:]
MODELS_ITER = MODELS_ITER[:idxIter+1,:,:]
np.save('ModelsIteration',MODELS_ITER)
Postbel.ShowPostModels(RMSE=True)
Postbel.ShowDataset(RMSE=True,Prior=True)
CurrentGraph = pyplot.gcf()
CurrentGraph = CurrentGraph.get_axes()[0]
CurrentGraph.plot(TimingField,FieldData[:len(TimingField)],'k',linestyle='None',marker='o',markerfacecolor='None')
pyplot.show(block=False)
# i = int(np.ceil(idxIter/2))
# Postbel.ShowPostCorr(OtherMethod=np.squeeze(MODELS_ITER[i,:,:]))
print('CPU time: {} seconds'.format(np.sum(timings)))
pyplot.show()
| [
7061,
6,
198,
5188,
38,
42334,
318,
257,
4226,
326,
4539,
477,
262,
2653,
602,
329,
262,
1180,
2482,
220,
198,
25579,
276,
379,
262,
311,
7156,
42334,
4495,
357,
33387,
11,
3936,
737,
198,
198,
8162,
23028,
1075,
29991,
16,
35,
9922... | 1.985252 | 12,544 |
""" Data manipulation utils, many based on
https://www.kaggle.com/dcstang/see-like-a-radiologist-with-systematic-windowing
"""
from pathlib import Path
from typing import List, Dict
import attr
import numpy as np
import pandas as pd
import pydicom
from sklearn.model_selection import GroupKFold
import tqdm
ROOT = Path(__file__).parent.parent / 'data'
TRAIN_ROOT = ROOT / 'stage_1_train_images'
WINDOW_CONFIG = {
# center, width
'brain': (40, 80),
'blood': (80, 200),
'soft': (40, 480),
'bone': (600, 2800),
}
CLASSES = [
'any',
'epidural',
'intraparenchymal',
'intraventricular',
'subarachnoid',
'subdural',
]
@attr.s(auto_attribs=True)
| [
37811,
6060,
17512,
3384,
4487,
11,
867,
1912,
319,
198,
5450,
1378,
2503,
13,
74,
9460,
293,
13,
785,
14,
17896,
301,
648,
14,
3826,
12,
2339,
12,
64,
12,
6335,
31599,
12,
4480,
12,
10057,
1512,
12,
7972,
7855,
198,
37811,
198,
6... | 2.418118 | 287 |
import pickle
import pandas as pd
import numpy as np
#import missingno as msno
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler , Normalizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from scipy.stats import norm
from scipy import stats
from sklearn import metrics
import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import RandomForestClassifier
rfc1=RandomForestClassifier(criterion= 'gini', max_depth= 4, max_features= 'sqrt', n_estimators= 100)
binary_file = open('data.obj', 'rb')
rfc1 = pickle.load(binary_file)
binary_file.close()
import random
a = [0, 1, 1, 1, 0]
for i in range(5, 23):
a.append(1)
pred = rfc1.predict([a])
# print(pred)
| [
11748,
2298,
293,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
2,
11748,
4814,
3919,
355,
13845,
3919,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
4... | 2.923077 | 260 |
# -*- coding: utf-8 -*-
import socket
import json
import os
import urllib3
import sys
import argparse
from .utils.constants import LAPUA_VERSION, STATUS
from .api.server import ApiServer
from .thread.jobs_manager import JobsManager
from .thread.command_handler import CommandHandler
from .utils.config import get_config
from .utils.logging import get_logger, get_global_logger
__author__ = 'Devo (Daniel Alonso)'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="Path to config file")
parser.add_argument("-v", help="Show Lapua version", action="store_true")
args = parser.parse_args()
if args.v:
print('v{}'.format(LAPUA_VERSION))
sys.exit(0)
CONF = get_config(args.config)
try:
init_paths()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
DEFAULT_LOGGER = get_global_logger()
LOGGER = get_logger('main', **DEFAULT_LOGGER)
except FileNotFoundError as excpt:
print('Cannot initialize paths.')
print(excpt)
sys.exit(1)
try:
LOGGER.debug('Initializing server.')
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.bind((CONF['server']['address'], CONF['server']['port']))
SERVER.listen(1)
LOGGER.info('Lapua server v%s listening on port %d.',
LAPUA_VERSION, CONF['server']['port'])
LOGGER.debug('Running with the following configuration: %s',
json.dumps(CONF))
except Exception as excpt:
LOGGER.error('Cannot create server at %s:%d',
CONF['server']['address'],
CONF['server']['port'])
LOGGER.error(excpt, exc_info=True)
sys.exit(1)
LOGGER.info('Loading status file.')
MANAGER = JobsManager(os.path.join(CONF['paths']['cache'], 'status.json'))
LOGGER.info('Running jobs.')
for job in MANAGER.find_jobs(status=STATUS.RUNNING):
try:
MANAGER.run_job(job)
except Exception as excpt:
LOGGER.error('Error running job %s', job)
LOGGER.error(excpt, exc_info=True)
try:
API_SERVER = ApiServer(CONF.get('server', {}).get('api', {}))
API_SERVER.start()
MANAGER.start()
while True:
CONN, ADDR = SERVER.accept()
HANDLER = CommandHandler(CONN, ADDR)
HANDLER.start()
except Exception as excpt:
LOGGER.error(excpt, exc_info=True)
finally:
LOGGER.info('Closing socket...')
try:
HANDLER.stop()
except:
pass
try:
API_SERVER.stop()
except:
pass
MANAGER.stop_all_jobs()
MANAGER.stop()
SERVER.close()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
17802,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
2956,
297,
571,
18,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
764,
26791,
13,
9979,
1187,
1330... | 2.154789 | 1,305 |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
def update_user_count_eponymous(set_of_contributors, anonymous_coward_comments_counter):
"""
Eponymous user count update.
Input: - set_of_contributors: A python set of user ids.
- anonymous_coward_comments_counter: The number of comments posted by anonymous user(s).
Output: - user_count: The number of eponymous users active in the information cascade.
"""
user_count = len(set_of_contributors)
return user_count
def update_user_count_estimated(set_of_contributors, anonymous_coward_comments_counter):
"""
Total user count estimate update in the presence of anonymous users.
Currently we use a very simplistic model for estimating the full user count.
Inputs: - set_of_contributors: A python set of user ids.
- anonymous_coward_comments_counter: The number of comments posted by anonymous user(s).
Output: estimated_anonymous_contributor_count: The estimated number of users active in the information cascade.
"""
eponymous_user_count = len(set_of_contributors)
if anonymous_coward_comments_counter > 0:
# TODO: Of course, I can use a much more sophisticated model.
estimated_anonymous_user_count = (1 + anonymous_coward_comments_counter)/2
else:
estimated_anonymous_user_count = 0.0
estimated_user_count = eponymous_user_count + estimated_anonymous_user_count
return estimated_user_count
def update_user_hirsch_eponymous(contributor_comment_counts, minimum_hirsch_value, maximum_hirsch_value):
"""
Calculates the Hirsch index for a user-comment occurrence vector.
Inputs: - contributor_comment_counts: A map from user id to comments posted in numpy array format.
- minimum_hirsch_value: This is the previous Hirsch value.
- maximum_hirsch_value: This is the depth of the latest node added to the tree.
Output: - hirsch: The Hirsch index.
"""
sorted_indices = np.argsort(contributor_comment_counts)
# This is the previous hirsch index value
hirsch_index = minimum_hirsch_value
if maximum_hirsch_value > contributor_comment_counts.size:
maximum_hirsch_value = contributor_comment_counts.size
if maximum_hirsch_value > minimum_hirsch_value:
comment_count = contributor_comment_counts[sorted_indices[-maximum_hirsch_value]]
if comment_count >= maximum_hirsch_value:
hirsch_index = maximum_hirsch_value
# # Check from maximum to minimum (not inclusive) possible hirsch values
# for active_contributors in np.arange(maximum_hirsch_value, minimum_hirsch_value, -1):
# comment_count = contributor_comment_counts[sorted_indices[-active_contributors]]
# if comment_count >= active_contributors:
# hirsch = active_contributors
# break
return hirsch_index
def update_graph_outdegree_entropy(contributor_comment_count):
"""
Calculates the entropy of the user-to-comment distribution for eponymous users.
Input: - contributor_comment_count: A map from user id to comments posted in numpy array format.
Output: - comment_entropy: The entropy of the user-to-comment distribution
"""
# TODO: The vector also contains a position for the Anonymous user. However, the value should always remain zero.
number_of_comments = np.sum(contributor_comment_count)
if number_of_comments < 2:
comment_entropy = 0
return comment_entropy
comment_distribution = contributor_comment_count/number_of_comments
comment_distribution = comment_distribution[comment_distribution > 0]
comment_entropy = np.abs(-np.sum(np.multiply(comment_distribution,
np.log(comment_distribution))))
return comment_entropy
def update_normalized_graph_outdegree_entropy(contributor_comment_count,
set_of_users,
within_discussion_anonymous_coward):
"""
Calculates the ratio of the entropy of the user-to-comment distribution to the maximum possible for eponymous users.
Inputs: - contributor_comment_count: A map from user id to comments posted in numpy array format.
- set_of_users: A python set of user ids.
- within_discussion_anonymous_coward: The name of the Anonymous user for this dataset.
Output: - normalized_eponymous_contributor_recurrence: The ratio of the entropy of the user-to-comment distribution
to the maximum possible for eponymous users.
"""
number_of_comments = np.sum(contributor_comment_count)
if number_of_comments < 2:
normalized_eponymous_contributor_recurrence = 1.0
return normalized_eponymous_contributor_recurrence
if within_discussion_anonymous_coward is not None:
number_of_users = len(set_of_users) - 1
else:
number_of_users = len(set_of_users)
# Calculate the user-to-comment distribution entropy.
comment_distribution = contributor_comment_count/number_of_comments
comment_distribution = comment_distribution[comment_distribution > 0]
comment_entropy = np.abs(-np.sum(np.multiply(comment_distribution,
np.log(comment_distribution))))
# Calculate the maximum possible user-to-comment distribution entropy given the number of comments.
uniform_comment_count = np.zeros(number_of_users, dtype=np.float64)
uniform_comment_count += (number_of_comments // number_of_users)
uniform_comment_count[:(number_of_comments % number_of_users)] += 1
uniform_comment_distribution = uniform_comment_count/number_of_comments
uniform_comment_distribution = uniform_comment_distribution[uniform_comment_distribution > 0]
max_comment_entropy = np.abs(-np.sum(np.multiply(uniform_comment_distribution,
np.log(uniform_comment_distribution))))
# Calculate normalized user-to-comment entropy.
if max_comment_entropy == 0.0:
normalized_eponymous_contributor_recurrence = 1.0
else:
normalized_eponymous_contributor_recurrence = np.abs(comment_entropy/max_comment_entropy)
return normalized_eponymous_contributor_recurrence
| [
834,
9800,
834,
796,
705,
33428,
4267,
371,
528,
418,
357,
469,
273,
1362,
528,
418,
31,
8846,
13,
2164,
33047,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
4296,
62,
7220,
62,
9127,
62,
538,
6704,
7,
2617,
62,
1659,
6... | 2.728358 | 2,345 |
numless = 1
total = 100
while numless <= 99:
total *= 100 - numless
numless += 1
total = str(total)
newtotal = 0
for letter in total:
newtotal += int(letter)
print(newtotal)
| [
22510,
1203,
796,
352,
198,
23350,
796,
1802,
198,
4514,
997,
1203,
19841,
7388,
25,
198,
220,
220,
220,
2472,
1635,
28,
1802,
532,
997,
1203,
198,
220,
220,
220,
997,
1203,
15853,
352,
198,
23350,
796,
965,
7,
23350,
8,
198,
3605,
... | 2.657143 | 70 |
import RPi.GPIO as GPIO
from time import sleep
| [
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
6738,
640,
1330,
3993,
198
] | 3.357143 | 14 |
# This file is just for organization purposes,
# normally there will be more functions and validation in a complex proect
# to justify the existance of routes and client
from client.weather import get_gdl_wheather
| [
2,
770,
2393,
318,
655,
329,
4009,
4959,
11,
198,
2,
7685,
612,
481,
307,
517,
5499,
290,
21201,
287,
257,
3716,
386,
478,
198,
2,
284,
12051,
262,
2152,
590,
286,
11926,
290,
5456,
198,
6738,
5456,
13,
23563,
1330,
651,
62,
21287... | 4.367347 | 49 |
import sys
import logging
logging.basicConfig(level=logging.INFO)
sys.path.append(".")
from haystack.nodes._json_schema import update_json_schema
update_json_schema(update_index=True)
| [
11748,
25064,
198,
11748,
18931,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
8,
628,
198,
17597,
13,
6978,
13,
33295,
7203,
19570,
198,
6738,
27678,
25558,
13,
77,
4147,
13557,
17752,
62,
15952,
2611,
1... | 2.9375 | 64 |
import torch
import numpy as np
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
628,
198
] | 3.076923 | 13 |
import numpy as np
import numpy.random as rnd
import sys
import matplotlib.pyplot as plt
import scipy.interpolate as interp
from pixel import Pixel
import cProfile
import time
import pickle
class Histogram(object):
""" This is a wrapper class around the data from a fits image. It is
used to get precise containment(radius) and radius(containment) values"""
def __init__(self, x_values=None, y_values=None):
""" One of two ways to build the Histogram, this takes a list of x and y values
The bulk of the work is done by setup
Args:
x_values (numpy array, optional): If this is not present, an empty Histogram is constructed.
Otherwise, it is used to build the internal 2d histogram.
y_values (numpy array, optional): If this is not present, an empty Histogram is constructed.
Otherwise, it is used to build the internal 2d histogram.
"""
if x_values is None or y_values is None:
return
self._x_bin_max = 400
self._y_bin_max = 400
hist, xedges, yedges = np.histogram2d(x_values, y_values,
bins=[self._x_bin_max, self._y_bin_max], range=[[-2,2],[-2,2]])
self.empty_hist = False
self.setup(hist, xedges, yedges)
def setup(self, hist, xedges, yedges):
""" If an empty histogram was constructed from __init__, this must be called
to fill it. Takes equivalent output as from the numpy function histogram2d, but
those values need not be constructed using that function.
Args:
hist (numpy array): The 2d histogram of data
xedges (numpy array): The xedges, constituting a nbins+1 array with left and right
edges for each x-axis bin
yedges (numpy array): As xedges, but for y.
"""
self._hist = hist
self._xedges = xedges
self._yedges = yedges
self._total = np.sum(self._hist)
if self._total == 0:
self._empty_hist = True
else:
self._empty_hist = False
self._x_del = self._xedges[1] - self._xedges[0]
self._y_del = self._yedges[1] - self._yedges[0]
self._search_radius = np.sqrt(self._x_del**2 + self._y_del**2) * 2
self._pixels = []
self._middle_rad = []
for i,x in enumerate(self._xedges[:-1]):
for j,y in enumerate(self._yedges[:-1]):
v = self._hist[i][j]
self._pixels.append((Pixel(x,y,self._x_del, self._y_del, v), np.sqrt((x+self._x_del/2.)**2 + (y+self._y_del/2.)**2), v))
self._containment_cache = {}
self._min_containment = None
self._max_containment = None
self._build_containment_cache()
def _value(self,pixel_set, radius):
""" A shorthand function, so we don't actually call pixels that are either fully below the radius
or fully above the radius. If above, 0 is returned. If below, the number of photons in this
pixel are returned. If in between, we call the pixel to get the value.
Args:
pixel_set (list): The first entry is the pixel, the second is the min radius for the pixel,
the third is the max radius for the pixel.
radius (float): The radius at which to search
Return:
float: The value corresponding to the weghted number of photons contained in the radius
for this pixel.
"""
if pixel_set[1] > radius + self._search_radius:
return 0.
if pixel_set[1] < radius - self._search_radius:
return pixel_set[2]
return pixel_set[0].value(radius)
return pixel_set[0].value(radius)
#@do_profile(['Histogram._get_integral'])
def _get_integral(self,radius):
""" Integrates over all the pixels from 0 to the specified radius
Args:
radius (float): The max radius to integrate up to
Return:
float: The integral of all pixels up to radius
"""
integral = 0
#print 'Starting integral'
# start = time.time()
for i, pixel in enumerate(self._pixels):
# if i % 2000 == 0:
# sys.stdout.write('.')
# if i > 0 and i % 100000 == 0:
# sys.stdout.write(' = %d of %d (%f)\n'%(i,len(self._pixels), time.time()-start))
# sys.stdout.flush()
integral += self._value(pixel, radius)
# if i % 100000 != 0:
# sys.stdout.write(' = %d of %d (%f)\n'%(i,len(self._pixels), time.time()-start))
# print
return integral
def _containment_single(self, radius):
""" Run a single radius through the containment machinery. Caches values we've seen before to
(greatly) speed up computation time.
Args:
radius (float): Radius for which to determine containment
Return:
float: The containment fraction for this radius
"""
if self._empty_hist:
return 1.
if radius in self._containment_cache:
return self._containment_cache[radius]
else:
integral = self._get_integral(radius)
if self._total > 0:
containment = integral/self._total
else:
containment = 1.
self._containment_cache[radius] = containment
assert not np.isnan(containment)
return containment
def containment(self, radius):
""" A vectorized version of the containment search.
Args:
radius (numpy array): A 1d array of floats to get the containment for
Return:
numpy array: A 1d array of containment fractions corresponding to the radius array
"""
f_vec = np.vectorize(self._containment_single)
return f_vec(radius)
def _step_containment(self, start, stop, check_value, step, done):
"""Identify and cache the containments for a series of radii to build a
cdf curve.
Args:
start (float): Radius at which to start the stepping
stop (float): Radius to stop at
check_value (float): Part of the short-circut machinery. Stops execution of we've gone
higher than the requested containment value
step (float): The step value for the loop
done (function): Part of the short-circuit machinery. Takes two floats, the current
containment and the desired, and should return true if we can stop executing.
Return:
(float, float): The last radius checked, and the last containment value reached
"""
for r in np.arange(start,stop,step):
contain = self.containment(r)
if done(contain, check_value):
return (r,contain)
return (stop-step, self.containment(stop-step))
def _setup_containment_fcn(self):
""" Sets up the containment interpolation curve from the containment_cache."""
containment_curve = []
for key,value in self._containment_cache.iteritems():
containment_curve.append((key,value))
containment_curve.sort(key=lambda x:x[0])
y,x = zip(*containment_curve)
self._contain_fcn = interp.interp1d(x,y)
def _build_containment_cache(self):
""" Creates the initial containment cache values from containment = (0.34, 0.95)
or radius=(0, 2.1), whichever is smaller."""
self._max_containment = self._step_containment(.6, 2.1,0.95,.1, done=lambda x,y: x>y)
self._min_containment = self._step_containment(.6, 0,0.34,-.1, done=lambda x,y: x<y)
self._setup_containment_fcn()
def _radius_single(self,containment):
""" Gets the radius for a single containment value. If the cache extends beyond the requested
value, just return the interpolated containment function for the requested value. Otherwise
build the cache out until that is true. In the case that the requested containment is
greater than the size of the image (true if significant photons are in the corners--because
we stop searching a r=R_0, not x,y=R_0), return the size of the image.
Args:
containment (float): The containment for which to find a radius
Return:
float: The radius corresponding to the desired containment
"""
if self._empty_hist:
return 0.
if containment < self._min_containment[1]:
self._min_containment = self._step_containment(self._min_containment[0],0,containment,-.1, done=lambda x,y: x<y)
self._setup_containment_fcn()
elif containment > self._max_containment[1]:
self._max_containment = self._step_containment(self._max_containment[0],2.1, containment,.1, done=lambda x,y: x>y)
self._setup_containment_fcn()
radius = self._contain_fcn(containment)
return radius
def radius(self, containment):
""" A vectorized version of the radius search.
Args:
containment (numpy array): A 1d array of floats to get the radius for
Return:
numpy array: A 1d array of radius fractions corresponding to the containment array
"""
f_vec = np.vectorize(self._radius_single)
value = f_vec(containment)
assert not np.any(np.isnan(value)), 'Value is nan, '+str(value)
return value
#def test_radius():
# dist = Distribution()
# h = Histogram(*dist.values(10))
# assert ( h._radius(199,199) == h._radius(200,200))
# assert ( h._radius(0,0) == h._radius(399,399))
def test_fraction_of_area():
""" Basic pixel and short_cut funciton testing"""
dist = Distribution()
h = Histogram(*dist.values(10))
assert(h._fraction_of_area(.1, 201,201) == 1.)
assert(h._fraction_of_area(.01, 201,201) == 0.)
r = np.arange(0.01,0.03,0.001)
fractions = h._fraction_of_area(r,201*np.ones(len(r)),201*np.ones(len(r)))
assert np.all(fractions - [ 0.,0.,0.,0.,0.,0.00833252,0.03529066, 0.07989413,
0.14155475, 0.22046858, 0.31604745, 0.42789923, 0.55641604,
0.69199098, 0.80109793, 0.88412901, 0.94382904, 0.98137438,
0.99852956, 1.] < 1e-6 )
def test_containment():
""" Make sure the containment is right"""
dist = Distribution()
print dist
dist._rotation = 0
dist._std_one = .5
dist._std_two = .5
dist._set_containment()
h = Histogram(*dist.values(10000))
x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
y = h.containment(x)
test_y = np.array([ 0.019119 , 0.07709066, 0.15880573, 0.26645272, 0.38526369,
0.50733592, 0.61797834, 0.71550907, 0.79519714, 0.85742165,
0.90597305, 0.93793006, 0.96381745, 0.97971121, 0.98867297,
0.99379649, 0.99773116, 0.9987 , 0.99924042])
assert np.all(test_y == y)
def test_contain_radius():
""" Make sure the radius value are right"""
dist = Distribution()
print dist
dist._rotation = 0
dist._std_one = .5
dist._std_two = .5
dist._set_containment()
h = Histogram(*dist.values(1000000))
radius01 = h.radius(.1)
radius02 = h.radius(.2)
radius03 = h.radius(.3)
radius04 = h.radius(.4)
radius05 = h.radius(.5)
radius06 = h.radius(.6)
radius07 = h.radius(.7)
radius08 = h.radius(.8)
radius09 = h.radius(.9)
good = np.array([ 0.22684681, 0.33301551, 0.42216387, 0.50615889,
0.59069347, 0.68062232, 0.77999141, 0.90031203,
1.07865906])
trial = np.array([radius01, radius02, radius03, radius04, radius05,
radius06, radius07, radius08, radius09,])
assert np.all( np.abs(good - trial) < 3e-3 ), np.abs(good - trial)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
25120,
355,
374,
358,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
629,
541,
88,
13,
3849,
16104,
378,
355,
987,
79,
198,
6738,... | 2.18467 | 5,675 |
# -*- coding: utf-8 -*-
"""
Management of OpenStack Keystone Services
=========================================
.. versionadded:: 2018.3.0
:depends: shade
:configuration: see :py:mod:`salt.modules.keystoneng` for setup instructions
Example States
.. code-block:: yaml
create service:
keystone_service.present:
- name: glance
- type: image
delete service:
keystone_service.absent:
- name: glance
create service with optional params:
keystone_service.present:
- name: glance
- type: image
- enabled: False
- description: 'OpenStack Image'
"""
from __future__ import absolute_import, print_function, unicode_literals
__virtualname__ = "keystone_service"
def present(name, auth=None, **kwargs):
"""
Ensure an service exists and is up-to-date
name
Name of the group
type
Service type
enabled
Boolean to control if service is enabled
description
An arbitrary description of the service
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["keystoneng.setup_clouds"](auth)
service = __salt__["keystoneng.service_get"](name=name)
if service is None:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Service will be created."
return ret
kwargs["name"] = name
service = __salt__["keystoneng.service_create"](**kwargs)
ret["changes"] = service
ret["comment"] = "Created service"
return ret
changes = __salt__["keystoneng.compare_changes"](service, **kwargs)
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Service will be updated."
return ret
kwargs["name"] = service
__salt__["keystoneng.service_update"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated service"
return ret
def absent(name, auth=None):
"""
Ensure service does not exist
name
Name of the service
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
__salt__["keystoneng.setup_clouds"](auth)
service = __salt__["keystoneng.service_get"](name=name)
if service:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"id": service.id}
ret["comment"] = "Service will be deleted."
return ret
__salt__["keystoneng.service_delete"](name=service)
ret["changes"]["id"] = service.id
ret["comment"] = "Deleted service"
return ret
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
48032,
286,
4946,
25896,
29055,
6168,
198,
10052,
2559,
28,
198,
198,
492,
2196,
29373,
3712,
2864,
13,
18,
13,
15,
198,
198,
25,
10378,
2412,
25,
17979,
1... | 2.420601 | 1,165 |
from tkinter import *
if __name__ == '__main__':
root = Tk()
gui = CoreGUI(root)
root.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
6808,
796,
309,
74,
3419,
198,
220,
220,
220,
11774,
796,
7231,
40156,
7,
15763,
8,
198,
220,
220,
220,
6808,
... | 2.291667 | 48 |
from __future__ import division, print_function
import argparse
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions
from beam_nuggets.io import relational_db
if __name__ == '__main__':
main()
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
198,
11748,
2471,
4891,
62,
40045,
355,
15584,
198,
6738,
2471,
4891,
62,
40045,
13,
25811,
13,
79,
541,
4470,
62,
25811,
1330,
37709,
29046,
... | 3.368421 | 76 |
import discord
from discord.ext import commands
import requests
from bs4 import BeautifulSoup
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
220,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198
] | 4.130435 | 23 |
# -*- coding: utf-8 -*-
"""Twitter_Sentiment_Analysis_Code.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WDOG_nS5eB8wzeVHRijPP4XEhobAgptY
"""
import pandas as pd
import numpy as np
import nltk
pwd
cd '/content/drive/My Drive/Deep_Learning /Twitter_Sentiment_Analysis'
ls
df = pd.read_csv('train.csv')
df.shape
df.head(n=15)
nltk.download('stopwords')
nltk.download('punkt')
print(len(df[df.label == 0]), 'Non-Hatred Tweets')
print(len(df[df.label == 1]), 'Hatred Tweets')
"""# Cleaning data"""
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.snowball import PorterStemmer,SnowballStemmer
from nltk.stem.lancaster import LancasterStemmer
tokenizer = RegexpTokenizer("[a-zA-Z]+")
ps = PorterStemmer()
sw = set(stopwords.words('english'))
df['cleaned_tweet'] = df.tweet.apply(cleanTweet)
df.head(n=15)
"""# Creating Word Cloud for Hated Tweets"""
from wordcloud import WordCloud
hated_words = " ".join(df[df['label']==1].cleaned_tweet)
print(hated_words)
import matplotlib.pyplot as plt
wordcloud = WordCloud(height=4000, width=4000, stopwords=sw, background_color='white')
wordcloud = wordcloud.generate(hated_words)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
"""# Creating Word Cloud for all Tweets"""
all_words = " ".join(df.cleaned_tweet)
wordcloud2 = WordCloud(height=5000, width=5000, stopwords=sw, background_color='white')
wordcloud2 = wordcloud2.generate(all_words)
plt.imshow(wordcloud2)
plt.axis('off')
plt.show()
countt = [29720,2242]
from collections import Counter
counter = Counter(df['label'])
labels = counter.keys()
counts = counter.values()
indexes = np.arange(len(labels))
width = 0.7
plt.bar(indexes,counts, width)
plt.xticks(indexes + width * 0.5, labels)
plt.title('Counts of 0 and 1')
plt.show()
"""# Building a Common Corpus"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix, classification_report, f1_score
corpus = []
for i in range(0,31962):
corpus.append(df['cleaned_tweet'][i])
cv = CountVectorizer(stop_words=sw)
cv.fit(corpus)
X = cv.transform(corpus).toarray()
y = df.iloc[:,1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
classifier1 = LogisticRegression()
classifier1.fit(X_train, y_train)
y_pred = classifier1.predict(X_test)
y_prob = classifier1.predict_proba(X_test)
print(f1_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
14254,
62,
31837,
3681,
62,
32750,
62,
10669,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
... | 2.630298 | 1,109 |
import pickle # saving and loading data
#import numpy as np # array manipulation
import random # for sampling
from sklearn.neighbors import KNeighborsClassifier
import os # folder creation
#--------------------------------------------------------------------------------------------------
# SAVING
def save_data(data, path):
"""save the data as a pickled file"""
with open(path, 'wb') as file:
pickle.dump(data, file)
print("Data is saved.", flush=True)
def load_data(path):
"""load the data from the disk"""
with open(path, 'rb') as file:
data = pickle.load(file)
print("Data is loaded.", flush=True)
return data
#--------------------------------------------------------------------------------------------------
# processing
def extract_trainset(dataset, sample_size=None):
"""converts the dataset into two lists"""
result_labels = []
result_points = []
for(label, points) in dataset:
if not (sample_size is None):
points = random.sample(points, sample_size)
labels = [label]*len(points)
result_points.extend(points)
result_labels.extend(labels)
print("the data is now two lists of lenght {l}.".format(l=len(result_points)), flush=True)
return result_labels, result_points
def crop_axis(points, axis_number):
"""keeps only the axis_number most important axis"""
return [point[:axis_number] for point in points]
def max_sample_size(dataset):
"""extract the length of the smallest label"""
result = float('inf')
for label, points in dataset:
result = min(result, len(points))
print("The maximum possible sample size for the dataset is {n}".format(n=result))
return result
#--------------------------------------------------------------------------------------------------
# KNN
#--------------------------------------------------------------------------------------------------
# TEST
input_folder = "./png_scaled_contrast_data/" + "preprocessed/"
train_sample_size = 1865
test_sample_size = 467
trainset = load_data(input_folder + "training/" + "compressed/" + "dataset.pfull")
#train_sample_size = max_sample_size(trainset)
trainlabels, trainpoints = extract_trainset(trainset, train_sample_size)
del trainset
testset = load_data(input_folder + "testing/" + "compressed/" + "dataset.pfull")
#test_sample_size = max_sample_size(testset)
testlabels, testpoints = extract_trainset(testset, test_sample_size)
del testset
#max_neighbours_number = 50
#max_axe_number = 80
#scores, neigbours, axes = cross_validate(input_folder, max_neighbours_number, max_axe_number,
# trainlabels, trainpoints, testlabels, testpoints)
#scores, neigbours, axes = load_data(input_folder + "model/" + "ska.scores")
#--------------------------------------------------------------------------------------------------
# grid search
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
parameters = dict(knn__n_neighbors=range(1,50,2), crop__axis_number=range(2,100))
classifier = Pipeline([('crop',Crop()), ('knn',KNeighborsClassifier())])
gridSearch = GridSearchCV(classifier, parameters, cv=3, scoring='precision_weighted', verbose=2)
gridSearch.fit(trainpoints, trainlabels)
bestparam = gridSearch.best_params_
bestscore = gridSearch.best_score_
score = gridSearch.score(testpoints, testlabels)
print("The best score is {bs} ({s} on test set) which was reached at {p}.".format(bs=bestscore, s=score, p=bestparam))
#The best score is 0.5492849131003329 (0.5654278291654744 on test set) which was reached at {'crop__axis_number': 80, 'knn__n_neighbors': 35}. | [
11748,
2298,
293,
220,
1303,
8914,
290,
11046,
1366,
198,
2,
11748,
299,
32152,
355,
45941,
220,
1303,
7177,
17512,
198,
11748,
4738,
220,
1303,
329,
19232,
198,
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
509,
46445,
32289,
9487,
748... | 3.084074 | 1,237 |
# -*- coding: UTF-8 -*-
# Copyright (c) 2015-2016 Łukasz Szpakowski
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from os import sep
from os.path import isfile, join
import platform
import re
import threading
import jinja2
from espact.filters import default_filters, shsqe
from espact.variables import default_variables
_env = threading.local()
default_functions = {}
default_functions["configure"] = configure
default_functions["enter_to_build_dir"] = enter_to_build_dir
default_functions["leave_from_build_dir"] = leave_from_build_dir
default_functions["configure_for_autoconf"] = configure_for_autoconf
default_functions["enter_to_build_dir_for_autoconf"] = enter_to_build_dir_for_autoconf
default_functions["leave_from_build_dir_for_autoconf"] = leave_from_build_dir_for_autoconf
default_functions["configure_for_cmake"] = configure_for_cmake
default_functions["enter_to_build_dir_for_cmake"] = enter_to_build_dir_for_cmake
default_functions["leave_from_build_dir_for_cmake"] = leave_from_build_dir_for_cmake
default_functions["make"] = make
default_functions["gnu_make"] = gnu_make
default_functions["bsd_make"] = bsd_make
default_functions["packages"] = packages
default_functions["listdir"] = listdir
default_functions["walk"] = walk
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
1853,
12,
5304,
25370,
223,
2724,
292,
89,
27974,
41091,
12079,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1... | 3.247863 | 702 |
# pro_config
# !/usr/bin/env python
from .config import Config
class ProConfig(Config):
"""
Pro config
"""
# Application config
DEBUG = False
DB_HOST = 'localhost'
DB_NAME = 'todoStu'
DB_USER = 'root'
DB_PASSWORD = 'root'
DB_PORT = 3306
| [
2,
386,
62,
11250,
198,
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
764,
11250,
1330,
17056,
628,
198,
4871,
1041,
16934,
7,
16934,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1041,
4566,
198,
220,
220,
220,
... | 2.361345 | 119 |
import logging
from pathlib import Path
from concrete_settings.contrib.frameworks.django30 import Django30Settings
from concrete_settings.contrib.frameworks.django30_template import MyProjectSettings
| [
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
10017,
62,
33692,
13,
3642,
822,
13,
19298,
19653,
13,
28241,
14208,
1270,
1330,
37770,
1270,
26232,
198,
6738,
10017,
62,
33692,
13,
3642,
822,
13,
19298,
19653,
13,
282... | 3.942308 | 52 |
import numpy as np
from gtsam import *
from gpmp2 import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from gpmp2_python.datasets.generate3Ddataset import generate3Ddataset
from gpmp2_python.robots.generateArm import generateArm
from gpmp2_python.utils.plot_utils import *
from gpmp2_python.utils.signedDistanceField3D import signedDistanceField3D
# dataset
dataset = generate3Ddataset("WAMDeskDataset")
origin = np.asarray([dataset.origin_x, dataset.origin_y, dataset.origin_z])
origin_point3 = Point3(origin)
cell_size = dataset.cell_size
# sdf
print("calculating signed distance field ...")
field = signedDistanceField3D(dataset.map, dataset.cell_size)
print("calculating signed distance field done")
# arm: WAM arm
arm = generateArm("WAMArm")
start_conf = np.asarray([-0.8, -1.70, 1.64, 1.29, 1.1, -0.106, 2.2])
end_conf = np.asarray([-0.0, 0.94, 0, 1.6, 0, -0.919, 1.55])
start_vel = np.zeros(7)
end_vel = np.zeros(7)
# plot problem setting
figure0 = plt.figure(0)
axis0 = Axes3D(figure0)
axis0.set_title("Problem Settings")
set3DPlotRange(figure0, axis0, dataset)
plotRobotModel(figure0, axis0, arm, start_conf)
plotRobotModel(figure0, axis0, arm, end_conf)
plotMap3D(figure0, axis0, dataset.corner_idx, origin, cell_size)
## settings
total_time_sec = 2.0
total_time_step = 10
total_check_step = 100
delta_t = total_time_sec / total_time_step
check_inter = total_check_step / total_time_step - 1
# GP
Qc = np.identity(7)
Qc_model = noiseModel_Gaussian.Covariance(Qc)
# algo settings
cost_sigma = 0.02
epsilon_dist = 0.2
# noise model
fix_sigma = 0.0001
pose_fix_model = noiseModel_Isotropic.Sigma(7, fix_sigma)
vel_fix_model = noiseModel_Isotropic.Sigma(7, fix_sigma)
# init sdf
sdf = SignedDistanceField(
origin_point3, cell_size, field.shape[0], field.shape[1], field.shape[2]
)
for z in range(field.shape[2]):
sdf.initFieldData(
z, field[:, :, z]
) # TODO: check this line with its matlab counterpart
#% plot settings
plot_inter_traj = False
plot_inter = 4
if plot_inter_traj:
total_plot_step = total_time_step * (plot_inter + 1)
else:
total_plot_step = total_time_step
pause_time = total_time_sec / total_plot_step
## initial traj
init_values = initArmTrajStraightLine(start_conf, end_conf, total_time_step)
# plot initial traj
if plot_inter_traj:
plot_values = interpolateArmTraj(init_values, Qc_model, delta_t, plot_inter)
else:
plot_values = init_values
# plot init values
figure1 = plt.figure(1)
axis1 = Axes3D(figure1)
axis1.set_title("Initial Values")
# plot world
plotMap3D(figure1, axis1, dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure1, axis1, dataset)
for i in range(total_plot_step):
conf = plot_values.atVector(symbol(ord("x"), i))
plotArm(figure1, axis1, arm.fk_model(), conf, "b", 2)
plt.pause(pause_time)
## init optimization
graph = NonlinearFactorGraph()
graph_obs = NonlinearFactorGraph()
for i in range(total_time_step + 1):
key_pos = symbol(ord("x"), i)
key_vel = symbol(ord("v"), i)
# priors
if i == 0:
graph.push_back(PriorFactorVector(key_pos, start_conf, pose_fix_model))
graph.push_back(PriorFactorVector(key_vel, start_vel, vel_fix_model))
elif i == total_time_step:
graph.push_back(PriorFactorVector(key_pos, end_conf, pose_fix_model))
graph.push_back(PriorFactorVector(key_vel, end_vel, vel_fix_model))
# GP priors and cost factor
if i > 0:
key_pos1 = symbol(ord("x"), i - 1)
key_pos2 = symbol(ord("x"), i)
key_vel1 = symbol(ord("v"), i - 1)
key_vel2 = symbol(ord("v"), i)
graph.push_back(
GaussianProcessPriorLinear(
key_pos1, key_vel1, key_pos2, key_vel2, delta_t, Qc_model
)
)
# cost factor
graph.push_back(
ObstacleSDFFactorArm(key_pos, arm, sdf, cost_sigma, epsilon_dist)
)
graph_obs.push_back(
ObstacleSDFFactorArm(key_pos, arm, sdf, cost_sigma, epsilon_dist)
)
# GP cost factor
if check_inter > 0:
for j in range(1, check_inter + 1):
tau = j * (total_time_sec / total_check_step)
graph.push_back(
ObstacleSDFFactorGPArm(
key_pos1,
key_vel1,
key_pos2,
key_vel2,
arm,
sdf,
cost_sigma,
epsilon_dist,
Qc_model,
delta_t,
tau,
)
)
graph_obs.push_back(
ObstacleSDFFactorGPArm(
key_pos1,
key_vel1,
key_pos2,
key_vel2,
arm,
sdf,
cost_sigma,
epsilon_dist,
Qc_model,
delta_t,
tau,
)
)
## optimize!
use_LM = False
use_trustregion_opt = True
if use_LM:
parameters = LevenbergMarquardtParams() # Todo: check why this fails
parameters.setVerbosity("ERROR")
# parameters.setVerbosityLM('LAMBDA');
parameters.setlambdaInitial(1000.0)
optimizer = LevenbergMarquardtOptimizer(graph, init_values, parameters)
elif use_trustregion_opt:
parameters = DoglegParams()
parameters.setVerbosity("ERROR")
optimizer = DoglegOptimizer(graph, init_values, parameters)
else:
parameters = GaussNewtonParams()
parameters.setVerbosity("ERROR")
optimizer = GaussNewtonOptimizer(graph, init_values, parameters)
print("Initial Error = %d\n", graph.error(init_values))
print("Initial Collision Cost: %d\n", graph_obs.error(init_values))
optimizer.optimizeSafely()
result = optimizer.values()
print("Error = %d\n", graph.error(result))
print("Collision Cost End: %d\n", graph_obs.error(result))
# plot results
if plot_inter_traj:
plot_values = interpolateArmTraj(result, Qc_model, delta_t, plot_inter)
else:
plot_values = result
# plot final values
figure2 = plt.figure(2)
axis2 = Axes3D(figure2)
axis2.set_title("Result Values")
plotMap3D(figure2, axis2, dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure2, axis2, dataset)
for i in range(total_plot_step):
conf = plot_values.atVector(symbol(ord("x"), i))
plotArm(figure2, axis2, arm.fk_model(), conf, "b", 2)
plt.pause(pause_time)
# plot final values
figure3 = plt.figure(3)
axis3 = Axes3D(figure3)
axis3.set_title("Result Values")
plotMap3D(figure3, axis3, dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure3, axis3, dataset)
for i in range(total_plot_step):
conf = plot_values.atVector(symbol(ord("x"), i))
plotRobotModel(figure3, axis3, arm, conf)
plt.pause(pause_time)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
308,
912,
321,
1330,
1635,
198,
6738,
27809,
3149,
17,
1330,
1635,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
6... | 2.138321 | 3,275 |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 18:14:28 2020
@author: danish
"""
from tensorflow.keras.layers import Conv3D,ConvLSTM2D,Conv3DTranspose, Input
from tensorflow.keras.models import Model
from tensorflow.compat.v1.keras.backend import set_session
import tensorflow as tf
from PIL import ImageFont, ImageDraw, Image
import numpy as np
import cv2
import sys
#from tensorflow.keras.models import load_model
def TF_GPUsetup(GB=4):
"""
Restrict TensorFlow to only allocate 1*X GB of memory on the first GPU. Often Needed
When GPU run out of memory. It would be one of the solution for the issue: Failed to
get convolution algorithm. This is probably because cuDNN failed to initialize,
Parameters
----------
GB : int, optional
The amount of GPU memory you want to use. It is recommended to use 1 GB
less than your total GPU memory. The default is 4.
Returns
-------
None.
"""
if type(GB)!=int:
raise TypeError('Type of Parameter `GB` must be `int` and it should be 1 GB less than your GPU memory')
gpus = tf.config.experimental.list_physical_devices('GPU')
config = [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=(1024*GB))]
if gpus:
# Restrict TensorFlow to only allocate 1*X GB of memory on the first GPU
try:
tf.config.experimental.set_virtual_device_configuration(gpus[0], config)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
print('\nTensorflow GPU installed: '+str(tf.test.is_built_with_cuda()))
print('Is Tensorflow using GPU: '+str(tf.test.is_gpu_available()))
def ShowVideo(cap, v_frame, text):
"""
Parameters
----------
cap : Object
Object to the cv2.VideoCapture() class.
v_frame : TYPE
DESCRIPTION.
text : TYPE
DESCRIPTION.
Returns
-------
None.
"""
v_frame = OverlayText2Img(v_frame, text)
#cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('Real Time Anomaly Detection - Github.com/irdanish11',v_frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
if cap is not None:
cap.release()
cv2.destroyAllWindows()
raise KeyboardInterrupt('Real Time Anomoly Detection Stopped due to Keyboard Interrupt!')
def MSE(x1,x2):
"""
Compute Euclidean Distance Loss between input frame and the reconstructed frame and then
compute the Mean Squared Error
Parameters
----------
x1 : TYPE
DESCRIPTION.
x2 : TYPE
DESCRIPTION.
Returns
-------
mean_dist : TYPE
DESCRIPTION.
"""
diff=x1-x2
a,b,c,d,e=diff.shape
n_samples=a*b*c*d*e
sq_diff=diff**2
Sum=sq_diff.sum()
dist=np.sqrt(Sum)
mean_dist=dist/n_samples
return mean_dist
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2758,
220,
352,
1248,
25,
1415,
25,
2078,
12131,
198,
198,
31,
9800,
25,
288,
7115,
198,
37811,
198,
198,
6738,
11192,
273,
11125,
13,
61... | 2.539867 | 1,204 |
# !/bin/python
# -*- coding:utf-8 -*-
# @Author : Daniel.Pei
# @Email : peixq1222@icloud.com
# @Created : 2019/12/11 22:57
def hello_world(name: str = "World"):
"""
Say hello
:param name: Visit name.
"""
return "Hello, {name}!".format(name=name)
class CalculatorUtil(object):
"""
Calculator utils class.
Support add, minus, multiply and divide operations.
"""
@classmethod
def add_num(cls, num_x, num_y):
"""
Add up two number.
:param num_x:
:param num_y:
:return:
"""
if not num_x or not num_y:
raise ValueError("Invalid input : ( {num_x}, {num_y} )".format(num_x=num_x, num_y=num_y))
return num_x + num_y
| [
2,
5145,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
2488,
13838,
220,
1058,
7806,
13,
6435,
72,
198,
2,
2488,
15333,
220,
220,
1058,
613,
844,
80,
1065,
1828,
31,
291,
75,
2778,... | 2.135838 | 346 |
from timemachines.skating import prior, posterior, residuals
from timemachines.skatertools.visualization.priorplot import prior_plot, prior_plot_exogenous | [
6738,
4628,
368,
620,
1127,
13,
8135,
803,
1330,
3161,
11,
34319,
11,
29598,
82,
198,
6738,
4628,
368,
620,
1127,
13,
8135,
729,
31391,
13,
41464,
1634,
13,
3448,
273,
29487,
1330,
3161,
62,
29487,
11,
3161,
62,
29487,
62,
1069,
278... | 3.581395 | 43 |
from utils import create_permuted_mnist_task
from model import cnn_model
import matplotlib.pyplot as plt
#igore the warning messages. Cause the kal drawback is slower than normal process, keras
#will print some warning messages.
import warnings
warnings.warn = warn
#Plot the accuracy of test data
#Parameters:
# - name: the name of the model. It will be used in label
# - acc: list of accuracy
# - data_num: which data is plotted(D1,D2 or D3)
#Load the drift data
#task = create_permuted_mnist_task(3)
train_set,test_set,vali_set = create_disjoint_mnist_task()
#record the test accuracy.
#Parameters:
# - model: the instance of model
# - acc_test_d1: record the accuracy of model on D1
# - acc_test_d2: record the accuracy of model on D2
# - acc_test_d3: record the accuracy of model on D3
# - Train the model.
# - The validation dataset is alwasy D1_test: model.val_data(X_test[:TEST_NUM],y_test[:TEST_NUM])
# - After the model is trained on each dataset, it will record the accuracy of model on test
# data of D1,D2 and D3 by using test_acc()
# - Return: this function will return the accuracy of D1_test,D2_test,D3_test after being trained
# on each dataset.
global model
first_model = None
if __name__ == '__main__':
#Train the model.
print('--'*10,'kal','--'*10)
kal_d1,kal_d2,kal_d3 = train('kal')
save_acc('kal',[kal_d1,kal_d2,kal_d3])
print('--' * 10, 'nor', '--' * 10)
nor_d1, nor_d2, nor_d3 = train('nor')
save_acc('nor', [nor_d1, nor_d2, nor_d3])
#print('--'*10,'kal_cur','--'*10)
#kal_cur_d1, kal_cur_d2, kal_cur_d3 = train('kal_cur')
#save_acc('kal_cur',[kal_cur_d1, kal_cur_d2, kal_cur_d3])
#print('--'*10,'kal_pre','--'*10)
#kal_pre_d1, kal_pre_d2, kal_pre_d3 = train('kal_pre')
#save_acc('kal_pre',[kal_pre_d1, kal_pre_d2, kal_pre_d3])
#Plot the accuracy on test data.
acc_plot('nor',nor_d1,1)
acc_plot('nor',nor_d2,2)
acc_plot('nor',nor_d3,3)
acc_plot('kal',kal_d1,1)
acc_plot('kal',kal_d2,2)
acc_plot('kal',kal_d3,3)
#acc_plot('kal_pre',kal_pre_d1,1)
#acc_plot('kal_pre',kal_pre_d2,2)
#acc_plot('kal_pre',kal_pre_d3,3)
#acc_plot('kal_cur',kal_cur_d1,1)
#acc_plot('kal_cur',kal_cur_d2,2)
#acc_plot('kal_cur',kal_cur_d3,3)
| [
6738,
3384,
4487,
1330,
2251,
62,
16321,
7241,
62,
10295,
396,
62,
35943,
198,
6738,
2746,
1330,
269,
20471,
62,
19849,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
2,
328,
382,
262,
6509,
6218,
13,
24228,
... | 2.240201 | 995 |
"""
Cursor
A cursor is a lazy iterator.
"""
import typing
import pymongo.cursor
from yuno.direction import SortDirectionType, SortDirection
class Cursor():
"""
A cursor is a lazy iterator.
Example
-------
>>> for document in collection.find(defered=True):
... print(document) # documents are loaded as they are used
"""
def __init__(self, cursor: pymongo.cursor.Cursor, verification: typing.Callable = None) -> None:
"""
Initialize the cursor.
Parameters
----------
cursor : pymongo.cursor.Cursor
The cursor to wrap.
verification : typing.Callable
A function to verify each object.
"""
self.cursor = cursor
self.id = self.cursor.cursor_id
self.verification = verification if verification is not None else lambda x: x
def __next__(self):
"""Returns the next object."""
return self.next()
def next(self):
"""Returns the next object."""
return self.verification(self.cursor.next())
def try_next(self):
"""
Try to get the next object without raising an exception.
"""
try:
return self.next() # should change it to have the same behavior as Watch's __next__
except StopIteration:
return None
def __iter__(self) -> typing.Iterator:
"""
Returns the iterator.
"""
return self
def __repr__(self) -> str:
"""String representation of the cursor."""
return "{}(id={})".format(self.__class__.__name__, self.id)
# @property
# def collection(self) -> "collection.YunoCollection":
# """Collection the cursor is iterating over."""
# _collection = self.cursor.collection
# return collection.YunoCollection(_collection.database, _collection.name)
def close(self) -> None:
"""Closes the cursor."""
self.cursor.close()
@property
def disk_use(self) -> bool:
"""Wether are not to allow disk use"""
return self.cursor.__allow_disk_use
@disk_use.setter
def disk_use(self, allow: bool) -> bool:
"""
Wether are not to allow disk use
Parameters
----------
allow : bool
Wether are not to allow disk use
"""
return self.cursor.allow_disk_use(allow)
def explain(self) -> typing.Any:
"""Explain the query plan."""
return self.cursor.explain()
def hint(self, index: str):
"""Hint the query to use the given index and returns the cursor object to use chaining."""
self.cursor.hint(index)
return self
def limit(self, limit: int):
"""Limit the number of objects to return and returns the cursor object to use chaining."""
self.cursor.limit(limit)
return self
def skip(self, number: int):
"""Skip the first `number` objects and returns the cursor object to use chaining."""
self.cursor.skip(number)
return self
def sort(self, field: typing.Union[str, typing.List[typing.Tuple[str, SortDirectionType]]], direction: SortDirectionType = SortDirection.ASCENDING):
"""
Sort the objects by the given field.
Parameters
----------
field : str or list[tuple[str, SortDirectionType]]
The field to sort by.
If this is a list, each tuple is a field and the direction to sort by.
direction : SortDirectionType
The direction to sort by.
Returns
-------
Cursor
The current object to use chaining.
"""
if not isinstance(field, str) and isinstance(field, typing.Iterable):
for index, element in enumerate(field):
if isinstance(element, str) or not isinstance(element, typing.Iterable):
field[index] = (str(element), direction)
direction = None
self.cursor.sort(field, direction)
return self
def where(self, code: str):
"""
Add a where clause to the query.
Parameters
----------
code : str
The code to add.
Returns
-------
Cursor
The current object to use chaining.
"""
self.cursor.where(code)
return self
@property
def alive(self):
"""Does this cursor have the potential to return more data?"""
return self.cursor.alive
def __enter__(self):
"""
Enter the context manager.
Example
-------
>>> with db.watch() as stream: # <-- this line calls __enter__
... for event in stream:
... print(event)
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Exit the context manager.
Example
-------
>>> with db.watch() as stream:
... for event in stream:
... print(event)
... # <-- this line calls __exit__
"""
self.close()
| [
37811,
198,
34,
21471,
198,
198,
32,
23493,
318,
257,
16931,
41313,
13,
198,
37811,
198,
198,
11748,
19720,
198,
198,
11748,
279,
4948,
25162,
13,
66,
21471,
198,
6738,
331,
36909,
13,
37295,
1330,
33947,
35,
4154,
6030,
11,
33947,
35... | 2.366543 | 2,158 |
from SterownikDrukarki import SterownikDrukarki
from SDNR import SDNR
| [
6738,
18949,
593,
1134,
35,
622,
74,
668,
72,
1330,
18949,
593,
1134,
35,
622,
74,
668,
72,
198,
6738,
9834,
24723,
1330,
9834,
24723,
198
] | 2.692308 | 26 |
#### Running Amanzi to generate an output file ####
#### Takes in chemistry database for native Amanzi chemistry
import os, sys, subprocess, shutil
| [
4242,
18162,
42614,
17027,
284,
7716,
281,
5072,
2393,
1303,
21017,
198,
4242,
33687,
287,
16585,
6831,
329,
6868,
42614,
17027,
16585,
198,
11748,
28686,
11,
25064,
11,
850,
14681,
11,
4423,
346,
198
] | 4.352941 | 34 |
from typing import Optional
from sqlalchemy.orm import Session
from sqlalchemy.exc import SQLAlchemyError
from app.db.base_class import Base
from app.utils.repeater import repeats
from fastapi.logger import logger
from app import schemas
from app.core.config import settings
@repeats(amount=3, delay=20, message="Could not init database", logger=logger)
| [
6738,
19720,
1330,
32233,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
23575,
198,
6738,
44161,
282,
26599,
13,
41194,
1330,
16363,
2348,
26599,
12331,
198,
6738,
598,
13,
9945,
13,
8692,
62,
4871,
1330,
7308,
198,
6738,
598,
13,
26791,... | 3.670103 | 97 |
input = "1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,1,6,19,2,19,6,23,1,23,5,27,1,9,27,31,1,31,10,35,2,35,9,39,1,5,39,43,2,43,9,47,1,5,47,51,2,51,13,55,1,55,10,59,1,59,10,63,2,9,63,67,1,67,5,71,2,13,71,75,1,75,10,79,1,79,6,83,2,13,83,87,1,87,6,91,1,6,91,95,1,10,95,99,2,99,6,103,1,103,5,107,2,6,107,111,1,10,111,115,1,115,5,119,2,6,119,123,1,123,5,127,2,127,6,131,1,131,5,135,1,2,135,139,1,139,13,0,99,2,0,14,0"
intcode_data = [int(x) for x in input.split(',')]
for noun in range(100):
for verb in range(100):
if test(noun, verb) == 19690720:
print(100 * noun + verb)
break | [
15414,
796,
366,
16,
11,
15,
11,
15,
11,
18,
11,
16,
11,
16,
11,
17,
11,
18,
11,
16,
11,
18,
11,
19,
11,
18,
11,
16,
11,
20,
11,
15,
11,
18,
11,
17,
11,
16,
11,
21,
11,
1129,
11,
17,
11,
1129,
11,
21,
11,
1954,
11,
... | 1.513854 | 397 |
"""
Classes to calculate gravity-based measures of potential spatial accessibility.
These measures assign accessibility scores to demand locations based on their proximity to supply
locations. The main model used here is a gravitational model using non-standard decay functions.
References:
Luo, W. and Qi, Y. (2009) An enhanced two-step floating catchment area (E2SFCA) method for
measuring spatial accessibility to primary care physicians. Health and Place 15, 11001107.
Luo, W. and Wang, F. (2003) Measures of spatial accessibility to health care in a GIS
environment: synthesis and a case study in the Chicago region. Environment and Planning B:
Planning and Design 30, 865884.
Wang, F. (2012) Measurement, optimization, and impact of health care accessibility:
a methodological review. Annals of the Association of American Geographers 102, 11041112.
Wan, Neng & Zou, Bin & Sternberg, Troy. (2012). A 3-step floating catchment area method for
analyzing spatial access to health services. International Journal of Geographical Information
Science. 26. 1073-1089. 10.1080/13658816.2011.624987.
"""
import inspect
import functools
import warnings
import sys
import numpy as np
from aceso import decay
class GravityModel(object):
"""Represents an instance of a gravitational model of spatial interaction.
Different choices of decay function lead to the following models:
- Standard gravity models
- Two-Step Floating Catchment Area (2SFCA) models
- Enhanced 2SFCA (E2SFCA) models
- Three-Step FCA (3SFCA) models
- Modified 2SFCA (M2SFCA) models
- Kernel Density 2SFCA (KD2SFCA) models
"""
def __init__(
self, decay_function, decay_params={}, huff_normalization=False, suboptimality_exponent=1.0
):
"""Initialize a gravitational model of spatial accessibility.
Parameters
----------
decay_function : callable or str
If str, the name of a decay function in the ``decay`` module.
Some available names are 'uniform', 'raised_cosine', and 'gaussian_decay'.
If callable, a vectorized numpy function returning demand dropoffs by distance.
decay_params : mapping
Parameter: value mapping for each argument of the specified decay function.
These parameters are bound to the decay function to create a one-argument callable.
huff_normalization: bool
Flag used to normalize demand through Huff-like interaction probabilities.
Used in 3SFCA to curtail demand over-estimation.
suboptimality_exponent: float
Used in M2SFCA to indicate the extent to account for network suboptimality in access.
This parameter allows for the differentiation between two scenarios:
1. Three demand locations each at a distance of 1.0 mile from the sole provider;
2. Three demand locations each at a distance of 2.0 miles from the sole provider.
Values greater than 1.0 for this parameter will result in accessibility scores
whose weighted average is less than the overall supply.
"""
self.decay_function = self._bind_decay_function_parameters(decay_function, decay_params)
self.huff_normalization = huff_normalization
self.suboptimality_exponent = suboptimality_exponent
@staticmethod
def _bind_decay_function_parameters(decay_function, decay_params):
"""Bind the given parameters for the decay function.
Returns
-------
callable
A one-argument callable that accepts one-dimensional numpy arrays.
"""
# If a name was passed, get the callable corresponding to that name.
if isinstance(decay_function, str):
decay_function = decay.get_decay_function(decay_function)
if sys.version_info[0] >= 3:
missing_params = {
k for k in list(inspect.signature(decay_function).parameters)[1:]
if (k not in decay_params)
}
valid_params = {
k: v for k, v in decay_params.items()
if k in inspect.signature(decay_function).parameters
}
elif sys.version_info[0] == 2:
missing_params = {
k for k in inspect.getargspec(decay_function).args[1:]
if (k not in decay_params)
}
valid_params = {
k: v for k, v in decay_params.items()
if k in inspect.getargspec(decay_function).args
}
# If any required parameters are missing, raise an error.
if missing_params:
raise ValueError(
'Parameter(s) "{}" must be specified!'.format(', '.join(missing_params)))
# Warn users if a parameter was passed that the specified function does not accept.
for param in decay_params:
if param not in valid_params:
warnings.warn('Invalid parameter {param} was passed to {func}!'.format(
param=param,
func=decay_function
))
# If any valid parameters are present, bind their values.
if valid_params:
decay_function = functools.partial(decay_function, **valid_params)
return decay_function
def calculate_accessibility_scores(
self,
distance_matrix,
demand_array=None,
supply_array=None
):
"""Calculate accessibility scores from a 2D distance matrix.
Parameters
----------
distance_matrix : np.ndarray(float)
A matrix whose entry in row i, column j is the distance between demand point i
and supply point j.
demand_array : np.array(float) or None
A one-dimensional array containing demand multipliers for each demand location.
The length of the array must match the number of rows in distance_matrix.
supply_array : np.array(float) or None
A one-dimensional array containing supply multipliers for each supply location.
The length of the array must match the number of columns in distance_matrix.
Returns
-------
array
An array of access scores at each demand location.
"""
if demand_array is None:
demand_array = np.ones(distance_matrix.shape[0])
if supply_array is None:
supply_array = np.ones(distance_matrix.shape[1])
demand_potentials = self._calculate_demand_potentials(
distance_matrix=distance_matrix,
demand_array=demand_array,
)
inverse_demands = np.reciprocal(demand_potentials)
inverse_demands[np.isinf(inverse_demands)] = 0.0
access_ratio_matrix = supply_array * inverse_demands
access_ratio_matrix = access_ratio_matrix * np.power(
self.decay_function(distance_matrix),
self.suboptimality_exponent
)
if self.huff_normalization:
access_ratio_matrix *= self._calculate_interaction_probabilities(distance_matrix)
return np.nansum(access_ratio_matrix, axis=1)
def _calculate_demand_potentials(self, distance_matrix, demand_array):
"""Calculate the demand potential at each input location.
Returns
-------
array
An array of demand at each supply location.
"""
demand_matrix = demand_array.reshape(-1, 1) * self.decay_function(distance_matrix)
if self.huff_normalization:
demand_matrix *= self._calculate_interaction_probabilities(distance_matrix)
return np.nansum(demand_matrix, axis=0)
def _calculate_interaction_probabilities(self, distance_matrix):
"""Calculate the demand potential at each input location.
Parameters
----------
distance_matrix : np.ndarray(float)
A matrix whose entry in row i, column j is the distance between demand point i
and supply point j.
Returns
-------
array
A 2D-array of the interaction probabilities between each demand point and supply point.
"""
# FIXME: Use alternative decay function to capture the Huff model of spatial interaction.
# This particular function isn't well-behaved near 0.
weights = np.power(distance_matrix, -1)
# FIXME: Handle the case of 0 distance more intelligently.
weights[np.isinf(weights)] = 10**8
return weights / np.nansum(weights, axis=1)[:, np.newaxis]
class TwoStepFCA(GravityModel):
"""Represents an instance of the standard Two-Step Floating Catchment Area (2SFCA) model."""
def __init__(self, radius):
"""Initialize a 2SFCA model with the specified radius.
Parameters
----------
radius : float
The radius of each floating catchment.
Pairs of points further than this distance apart are deemed mutually inaccessible.
Points within this radius contribute the full demand amount (with no decay).
"""
super(TwoStepFCA, self).__init__(decay_function='uniform', decay_params={'scale': radius})
class ThreeStepFCA(GravityModel):
"""Represents an instance of the Three-Step Floating Catchment Area (3SFCA) model.
In 3SFCA, the presence of nearby options influences the amount of demand pressure each demand
location places on other supply locations. A demand location with many nearby options will not
exert the same demand on faraway supply locations as a demand location at the same distance
that has no nearby alternatives.
This model is designed to account for this observation and reduce the demand over-estimation
that may take place with ordinary 2SFCA.
References
----------
Wan, Neng & Zou, Bin & Sternberg, Troy. (2012). A 3-step floating catchment area method for
analyzing spatial access to health services. International Journal of Geographical Information
Science. 26. 1073-1089. 10.1080/13658816.2011.624987.
"""
def __init__(self, decay_function, decay_params):
"""Initialize a gravitational model of spatial accessibility using Huff-like normalization.
Parameters
----------
decay_function : callable or str
If str, the name of a decay function in the ``decay`` module.
Some available names are 'uniform', 'raised_cosine', and 'gaussian_decay'.
If callable, a vectorized numpy function returning demand dropoffs by distance.
decay_params : mapping
Parameter: value mapping for each argument of the specified decay function.
These parameters are bound to the decay function to create a one-argument callable.
"""
super(ThreeStepFCA, self).__init__(
decay_function=decay_function,
decay_params=decay_params,
huff_normalization=True,
)
| [
37811,
198,
9487,
274,
284,
15284,
13522,
12,
3106,
5260,
286,
2785,
21739,
28969,
13,
198,
198,
4711,
5260,
8333,
28969,
8198,
284,
3512,
7064,
1912,
319,
511,
20387,
284,
5127,
198,
17946,
602,
13,
383,
1388,
2746,
973,
994,
318,
25... | 2.661143 | 4,164 |
#!/usr/bin/python3
r'''Study the precision and accuracy of the various triangulation routines'''
import sys
import argparse
import re
import os
args = parse_args()
import numpy as np
import numpysane as nps
import gnuplotlib as gp
import pickle
import os.path
# I import the LOCAL mrcal
scriptdir = os.path.dirname(os.path.realpath(__file__))
sys.path[:0] = f"{scriptdir}/../..",
import mrcal
############ bias visualization
#
# I simulate pixel noise, and see what that does to the triangulation. Play with
# the geometric details to get a sense of how these behave
model0 = mrcal.cameramodel( intrinsics = ('LENSMODEL_PINHOLE',
np.array((1000., 1000., 500., 500.))),
imagersize = np.array((1000,1000)) )
model1 = mrcal.cameramodel( intrinsics = ('LENSMODEL_PINHOLE',
np.array((1100., 1100., 500., 500.))),
imagersize = np.array((1000,1000)) )
# square camera layout
t01 = np.array(( 1., 0.1, -0.2))
R01 = mrcal.R_from_r(np.array((0.001, -0.002, -0.003)))
Rt01 = nps.glue(R01, t01, axis=-2)
p = np.array(args.observed_point)
q0 = mrcal.project(p, *model0.intrinsics())
sigma = 0.1
cache_file = "/tmp/triangulation-study-cache.pickle"
if args.cache is None or args.cache == 'write':
v0local_noisy, v1local_noisy,v0_noisy,v1_noisy,_,_,_,_ = \
mrcal.synthetic_data. \
_noisy_observation_vectors_for_triangulation(p,Rt01,
model0.intrinsics(),
model1.intrinsics(),
args.Nsamples,
sigma = sigma)
p_sampled_geometric = mrcal.triangulate_geometric( v0_noisy, v1_noisy, t01 )
p_sampled_lindstrom = mrcal.triangulate_lindstrom( v0local_noisy, v1local_noisy, Rt01 )
p_sampled_leecivera_l1 = mrcal.triangulate_leecivera_l1( v0_noisy, v1_noisy, t01 )
p_sampled_leecivera_linf = mrcal.triangulate_leecivera_linf( v0_noisy, v1_noisy, t01 )
p_sampled_leecivera_mid2 = mrcal.triangulate_leecivera_mid2( v0_noisy, v1_noisy, t01 )
p_sampled_leecivera_wmid2 = mrcal.triangulate_leecivera_wmid2(v0_noisy, v1_noisy, t01 )
q0_sampled_geometric = mrcal.project(p_sampled_geometric, *model0.intrinsics())
q0_sampled_lindstrom = mrcal.project(p_sampled_lindstrom, *model0.intrinsics())
q0_sampled_leecivera_l1 = mrcal.project(p_sampled_leecivera_l1, *model0.intrinsics())
q0_sampled_leecivera_linf = mrcal.project(p_sampled_leecivera_linf, *model0.intrinsics())
q0_sampled_leecivera_mid2 = mrcal.project(p_sampled_leecivera_mid2, *model0.intrinsics())
q0_sampled_leecivera_wmid2 = mrcal.project(p_sampled_leecivera_wmid2, *model0.intrinsics())
range_sampled_geometric = nps.mag(p_sampled_geometric)
range_sampled_lindstrom = nps.mag(p_sampled_lindstrom)
range_sampled_leecivera_l1 = nps.mag(p_sampled_leecivera_l1)
range_sampled_leecivera_linf = nps.mag(p_sampled_leecivera_linf)
range_sampled_leecivera_mid2 = nps.mag(p_sampled_leecivera_mid2)
range_sampled_leecivera_wmid2 = nps.mag(p_sampled_leecivera_wmid2)
if args.cache is not None:
with open(cache_file,"wb") as f:
pickle.dump((v0local_noisy,
v1local_noisy,
v0_noisy,
v1_noisy,
p_sampled_geometric,
p_sampled_lindstrom,
p_sampled_leecivera_l1,
p_sampled_leecivera_linf,
p_sampled_leecivera_mid2,
p_sampled_leecivera_wmid2,
q0_sampled_geometric,
q0_sampled_lindstrom,
q0_sampled_leecivera_l1,
q0_sampled_leecivera_linf,
q0_sampled_leecivera_mid2,
q0_sampled_leecivera_wmid2,
range_sampled_geometric,
range_sampled_lindstrom,
range_sampled_leecivera_l1,
range_sampled_leecivera_linf,
range_sampled_leecivera_mid2,
range_sampled_leecivera_wmid2),
f)
print(f"Wrote cache to {cache_file}")
else:
with open(cache_file,"rb") as f:
(v0local_noisy,
v1local_noisy,
v0_noisy,
v1_noisy,
p_sampled_geometric,
p_sampled_lindstrom,
p_sampled_leecivera_l1,
p_sampled_leecivera_linf,
p_sampled_leecivera_mid2,
p_sampled_leecivera_wmid2,
q0_sampled_geometric,
q0_sampled_lindstrom,
q0_sampled_leecivera_l1,
q0_sampled_leecivera_linf,
q0_sampled_leecivera_mid2,
q0_sampled_leecivera_wmid2,
range_sampled_geometric,
range_sampled_lindstrom,
range_sampled_leecivera_l1,
range_sampled_leecivera_linf,
range_sampled_leecivera_mid2,
range_sampled_leecivera_wmid2) = \
pickle.load(f)
plot_options = {}
if args.set is not None:
plot_options['set'] = args.set
if args.unset is not None:
plot_options['unset'] = args.unset
if args.hardcopy is not None:
plot_options['hardcopy'] = args.hardcopy
if args.terminal is not None:
plot_options['terminal'] = args.terminal
if args.ellipses:
# Plot the reprojected pixels and the fitted ellipses
data_tuples = \
[ *mrcal.utils._plot_args_points_and_covariance_ellipse( q0_sampled_geometric, 'geometric' ),
*mrcal.utils._plot_args_points_and_covariance_ellipse( q0_sampled_lindstrom, 'lindstrom' ),
*mrcal.utils._plot_args_points_and_covariance_ellipse( q0_sampled_leecivera_l1, 'lee-civera-l1' ),
*mrcal.utils._plot_args_points_and_covariance_ellipse( q0_sampled_leecivera_linf, 'lee-civera-linf' ),
*mrcal.utils._plot_args_points_and_covariance_ellipse( q0_sampled_leecivera_mid2, 'lee-civera-mid2' ),
*mrcal.utils._plot_args_points_and_covariance_ellipse( q0_sampled_leecivera_wmid2,'lee-civera-wmid2' ), ]
if not args.samples:
# Not plotting samples. Get rid of all the "dots" I'm plotting
data_tuples = [ t for t in data_tuples if \
not (isinstance(t[-1], dict) and \
'_with' in t[-1] and \
t[-1]['_with'] == 'dots') ]
if args.title is not None:
title = args.title
else:
title = 'Reprojected triangulated point'
if args.extratitle is not None:
title += ': ' + args.extratitle
gp.plot( *data_tuples,
( q0,
dict(_with = 'points pt 3 ps 2',
tuplesize = -2,
legend = 'Ground truth')),
square = True,
wait = 'hardcopy' not in plot_options,
title = title,
**plot_options)
elif args.ranges:
# Plot the range distribution
range_ref = nps.mag(p)
if args.title is not None:
title = args.title
else:
title = "Range distribution"
if args.extratitle is not None:
title += ': ' + args.extratitle
gp.plot( nps.cat( range_sampled_geometric,
range_sampled_lindstrom,
range_sampled_leecivera_l1,
range_sampled_leecivera_linf,
range_sampled_leecivera_mid2,
range_sampled_leecivera_wmid2 ),
legend = np.array(( 'range_sampled_geometric',
'range_sampled_lindstrom',
'range_sampled_leecivera_l1',
'range_sampled_leecivera_linf',
'range_sampled_leecivera_mid2',
'range_sampled_leecivera_wmid2' )),
histogram=True,
binwidth=200,
_with='lines',
_set = f'arrow from {range_ref},graph 0 to {range_ref},graph 1 nohead lw 5',
wait = 'hardcopy' not in plot_options,
title = title,
**plot_options)
else:
raise Exception("Getting here is a bug")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
81,
7061,
6,
39841,
262,
15440,
290,
9922,
286,
262,
2972,
1333,
648,
1741,
31878,
7061,
6,
628,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
302,
198,
11748,
28686,
628,
... | 1.777317 | 4,823 |
# Copyright 2021 by Daniel Springall.
# This file is part of UViewSD, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
from uviewsd.core import usdextractor as uc_usdextractor
from uviewsd.gl import shape as gl_shape
import os
import logging
logger = logging.getLogger(__name__)
| [
2,
15069,
33448,
416,
7806,
8225,
439,
13,
198,
2,
770,
2393,
318,
636,
286,
471,
7680,
10305,
11,
290,
318,
2716,
739,
262,
366,
36393,
13789,
12729,
1911,
198,
2,
4222,
766,
262,
38559,
24290,
2393,
326,
815,
423,
587,
3017,
355,
... | 3.557692 | 104 |
import warnings
import torch
from torch import optim
from inclearn import models
from inclearn.convnet import my_resnet, resnet
from inclearn.lib import data, schedulers
| [
11748,
14601,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
6436,
198,
198,
6738,
753,
35720,
1330,
4981,
198,
6738,
753,
35720,
13,
42946,
3262,
1330,
616,
62,
411,
3262,
11,
581,
3262,
198,
6738,
753,
35720,
13,
8019,
1330,
1366,
... | 3.653061 | 49 |
""" Define interfaces for your add-on.
"""
import zope.interface
class IAddOnInstalled(zope.interface.Interface):
""" A layer specific for this add-on product.
This interface is referred in browserlayer.xml.
All views and viewlets register against this layer will appear on
your Plone site only when the add-on installer has been run.
""" | [
37811,
2896,
500,
20314,
329,
534,
751,
12,
261,
13,
198,
37811,
198,
198,
11748,
1976,
3008,
13,
39994,
198,
198,
4871,
314,
4550,
2202,
6310,
4262,
7,
89,
3008,
13,
39994,
13,
39317,
2599,
198,
220,
220,
220,
37227,
317,
7679,
217... | 3.480769 | 104 |
import sys
print("each saved file is a sesion!")
newfile = int(input("do you want to: 1=make a new file, 2=read a file, 3=Add sth to a file or 4= delete a file?"))
if newfile == 1:
filename = input("What name should the file have?:\n")
DD = input("Which DAY (of the month) should the session be?:\n")
MM = input("Which MONTH (in numbers) will be the session?:\n")
YYYY = input ("Which year (4 DIGITS: example 2022) will be te session?:\n")
HH = input("On which hour will be the session?:\n")
MIN = input("On which Minute will be the session:\n")
ROWS = int(input("how many rows are in the session?:\n"))
SEATS = int(input("how much seats per row are there?:\n"))
OOFIMAN = SEATS
file = open(f"{filename}.txt", 'w')
while ROWS > 0:
while OOFIMAN > 0:
sys.stdout.write("#")
OOFIMAN = OOFIMAN - 1
OOFIMAN = SEATS
print("")
ROWS = ROWS - 1 | [
198,
11748,
25064,
628,
198,
4798,
7203,
27379,
7448,
2393,
318,
257,
264,
274,
295,
2474,
8,
198,
3605,
7753,
796,
493,
7,
15414,
7203,
4598,
345,
765,
284,
25,
352,
28,
15883,
257,
649,
2393,
11,
362,
28,
961,
257,
2393,
11,
513... | 2.44186 | 387 |
print(split_pairs("abc"))
print (split_pairs("abcdef"))
a = "This is an example!"
words = a.split()
invers = ""
for word in words:
for c in reversed(word):
invers += c
invers += " "
print(invers)
x, y = rezolvare()
print(type(x))
print(type(y))
print(isinstance(x,Adam))
print(isinstance(x,Human)) | [
4798,
7,
35312,
62,
79,
3468,
7203,
39305,
48774,
198,
4798,
357,
35312,
62,
79,
3468,
7203,
39305,
4299,
48774,
628,
628,
628,
198,
64,
796,
366,
1212,
318,
281,
1672,
2474,
198,
10879,
796,
257,
13,
35312,
3419,
198,
259,
690,
796... | 2.389706 | 136 |
#!/usr/bin/env python
# import general use modules
import os
import re
from pprint import pprint as pp
# import nornir specifics
from nornir import InitNornir
from nornir.core.task import Result
from nornir.plugins.functions.text import print_result
from nornir.core.filter import F
from nornir.plugins.tasks import networking
from nornir.plugins.tasks.networking import netmiko_file_transfer
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.plugins.tasks.networking import netmiko_send_config
if __name__=="__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
1330,
2276,
779,
13103,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
279,
4798,
1330,
279,
4798,
355,
9788,
198,
2,
1330,
299,
1211,
343,
23514,
198,
6738,
299,
1211,
343,
1330,
... | 3.139665 | 179 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import exception
ALIAS = "os-server-diagnostics"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class ServerDiagnostics(extensions.V3APIExtensionBase):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = ALIAS
version = 1
| [
2,
15069,
2813,
4946,
25896,
5693,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
... | 3.333333 | 315 |
import cv2
import os, time, uuid, shutil
# Opens the Video file
#converter('pose/videos/serve/djok/djokserveside.mp4', 'djok', '1')
# Creating a directory with the name of id to store frame images
# def is_empty(id):
# path = '/Users/jacoblapkin/Documents/GitHub/UCL_Thesis/pose/user_serves'
# dir = os.path.join(path, str(id))
# if not os.path.exists(dir):
#make_dir(10)
| [
11748,
269,
85,
17,
198,
11748,
28686,
11,
640,
11,
334,
27112,
11,
4423,
346,
628,
198,
198,
2,
8670,
641,
262,
7623,
2393,
198,
198,
2,
1102,
332,
353,
10786,
3455,
14,
32861,
14,
2655,
303,
14,
28241,
482,
14,
28241,
482,
3168,... | 2.377246 | 167 |
import json
#import ssl
import urllib.parse as urlparse
from auth import (authenticate_user_credentials, generate_access_token,
verify_client_info, JWT_LIFE_SPAN)
from flask import Flask, redirect, render_template, request
from urllib.parse import urlencode
app = Flask(__name__)
@app.route('/auth')
@app.route('/signin', methods = ['POST'])
if __name__ == '__main__':
#context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
#context.load_cert_chain('domain.crt', 'domain.key')
#app.run(port = 5000, debug = True, ssl_context = context)
app.run(port = 5001, debug = True) | [
11748,
33918,
198,
2,
11748,
264,
6649,
198,
11748,
2956,
297,
571,
13,
29572,
355,
19016,
29572,
198,
198,
6738,
6284,
1330,
357,
41299,
5344,
62,
7220,
62,
66,
445,
14817,
11,
7716,
62,
15526,
62,
30001,
11,
220,
220,
198,
220,
22... | 2.606987 | 229 |
from typing import Any, Union
JSON = Union[dict[str, Any], list[Any], int, str, float, bool, type[None]]
| [
6738,
19720,
1330,
4377,
11,
4479,
198,
198,
40386,
796,
4479,
58,
11600,
58,
2536,
11,
4377,
4357,
1351,
58,
7149,
4357,
493,
11,
965,
11,
12178,
11,
20512,
11,
2099,
58,
14202,
11907,
198
] | 3.028571 | 35 |
from flask import Flask
from flask_babel import Babel
try:
import urllib.parse
quote = urllib.parse.quote
except:
import urllib
quote = urllib.quote_plus
from config import constants
app = MyFlask(__name__,
template_folder=constants.TEMPLATE_ROOT,
static_folder=constants.STATIC_ROOT)
# `.encode('utf8')` will not be needed for python 3
app.jinja_env.filters['quote_plus'] = lambda u: quote(u.encode('utf8'))
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
65,
9608,
1330,
50175,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
2956,
297,
571,
13,
29572,
198,
220,
220,
220,
9577,
796,
2956,
297,
571,
13,
29572,
13,
22708,
198,
16341,
25,
19... | 2.616766 | 167 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: api/operation.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='api/operation.proto',
package='blueapi.api',
syntax='proto3',
serialized_options=b'\n\031cloud.alphaus.blueapi.apiB\021ApiOperationProtoZ&github.com/alphauslabs/blue-sdk-go/api',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13\x61pi/operation.proto\x12\x0b\x62lueapi.api\x1a\x17google/rpc/status.proto\x1a\x19google/protobuf/any.proto\"\xa8\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12#\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"a\n\x1cOperationImportCurMetadataV1\x12\r\n\x05month\x18\x01 \x01(\t\x12\x10\n\x08\x61\x63\x63ounts\x18\x02 \x03(\t\x12\x0f\n\x07\x63reated\x18\x03 \x01(\t\x12\x0f\n\x07updated\x18\x04 \x01(\t\"\x88\x01\n$OperationAwsCalculateCostsMetadataV1\x12\r\n\x05orgId\x18\x01 \x01(\t\x12\r\n\x05month\x18\x02 \x01(\t\x12\x10\n\x08groupIds\x18\x03 \x03(\t\x12\x0e\n\x06status\x18\x04 \x01(\t\x12\x0f\n\x07\x63reated\x18\x05 \x01(\t\x12\x0f\n\x07updated\x18\x06 \x01(\tBV\n\x19\x63loud.alphaus.blueapi.apiB\x11\x41piOperationProtoZ&github.com/alphauslabs/blue-sdk-go/apib\x06proto3'
,
dependencies=[google_dot_rpc_dot_status__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_OPERATION = _descriptor.Descriptor(
name='Operation',
full_name='blueapi.api.Operation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='blueapi.api.Operation.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='blueapi.api.Operation.metadata', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='done', full_name='blueapi.api.Operation.done', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='blueapi.api.Operation.error', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='response', full_name='blueapi.api.Operation.response', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='result', full_name='blueapi.api.Operation.result',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=89,
serialized_end=257,
)
_OPERATIONIMPORTCURMETADATAV1 = _descriptor.Descriptor(
name='OperationImportCurMetadataV1',
full_name='blueapi.api.OperationImportCurMetadataV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='month', full_name='blueapi.api.OperationImportCurMetadataV1.month', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='accounts', full_name='blueapi.api.OperationImportCurMetadataV1.accounts', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created', full_name='blueapi.api.OperationImportCurMetadataV1.created', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updated', full_name='blueapi.api.OperationImportCurMetadataV1.updated', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=356,
)
_OPERATIONAWSCALCULATECOSTSMETADATAV1 = _descriptor.Descriptor(
name='OperationAwsCalculateCostsMetadataV1',
full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='orgId', full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1.orgId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='month', full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1.month', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='groupIds', full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1.groupIds', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1.status', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created', full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1.created', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updated', full_name='blueapi.api.OperationAwsCalculateCostsMetadataV1.updated', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=359,
serialized_end=495,
)
_OPERATION.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_OPERATION.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_OPERATION.fields_by_name['response'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_OPERATION.oneofs_by_name['result'].fields.append(
_OPERATION.fields_by_name['error'])
_OPERATION.fields_by_name['error'].containing_oneof = _OPERATION.oneofs_by_name['result']
_OPERATION.oneofs_by_name['result'].fields.append(
_OPERATION.fields_by_name['response'])
_OPERATION.fields_by_name['response'].containing_oneof = _OPERATION.oneofs_by_name['result']
DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
DESCRIPTOR.message_types_by_name['OperationImportCurMetadataV1'] = _OPERATIONIMPORTCURMETADATAV1
DESCRIPTOR.message_types_by_name['OperationAwsCalculateCostsMetadataV1'] = _OPERATIONAWSCALCULATECOSTSMETADATAV1
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), {
'DESCRIPTOR' : _OPERATION,
'__module__' : 'api.operation_pb2'
# @@protoc_insertion_point(class_scope:blueapi.api.Operation)
})
_sym_db.RegisterMessage(Operation)
OperationImportCurMetadataV1 = _reflection.GeneratedProtocolMessageType('OperationImportCurMetadataV1', (_message.Message,), {
'DESCRIPTOR' : _OPERATIONIMPORTCURMETADATAV1,
'__module__' : 'api.operation_pb2'
# @@protoc_insertion_point(class_scope:blueapi.api.OperationImportCurMetadataV1)
})
_sym_db.RegisterMessage(OperationImportCurMetadataV1)
OperationAwsCalculateCostsMetadataV1 = _reflection.GeneratedProtocolMessageType('OperationAwsCalculateCostsMetadataV1', (_message.Message,), {
'DESCRIPTOR' : _OPERATIONAWSCALCULATECOSTSMETADATAV1,
'__module__' : 'api.operation_pb2'
# @@protoc_insertion_point(class_scope:blueapi.api.OperationAwsCalculateCostsMetadataV1)
})
_sym_db.RegisterMessage(OperationAwsCalculateCostsMetadataV1)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
40391,
14,
27184,
13,
1676,
1462,
198,
37811,
8645,
515,
8435,
11876,
2438... | 2.452016 | 4,960 |
#!/usr/bin/python3
import requests
import re
import sys
import subprocess
import shlex
from bs4 import BeautifulSoup
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http':'http://127.0.0.1:8080','https':'https://127.0.0.1:8080'}
# Instantiate our interface class
global output
output = Interface()
output.header()
security_level = "medium"
target_ip = '172.17.0.2' #change target ip
target_port = '80' #change target port
localhost = '172.17.0.1' #change localhost ip
localport = '4444' #change localport
url = 'http://'+target_ip+':'+target_port+'/login.php'
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
427,
2588,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
2956,
297,
571,
18,
198,
... | 2.692308 | 260 |
"""bot.py
This script creates three threads to do the following,
Thread 1 (created by Telepot): Receiving messages from Telegram
Thread 2: Handle incoming message from queue, send to predict.
Thread 3: Send prediction results back to user.
This script is a part of a submission of Assignment 2, IEMS5780, S1 2019-2020, CUHK.
Copyright (c)2019 Junru Zhong.
Last modified on Nov. 28, 2019
"""
import base64
import json
import logging
import socket
import time
from io import BytesIO
from queue import Queue
from threading import Thread
import requests
import telepot
from PIL import Image
from telepot.loop import MessageLoop
def get_logger():
"""Initialize logger. Copy from sample code on course website.
:return logging.Logger.
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s, %(threadName)s, [%(levelname)s] : %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def send_to_predict(image_queue, output_queue):
"""Send images in the input queue to the server for prediction.
Then receive the result, and put it to output queue.
:param image_queue: Queue for incoming images (with chat ID).
:param output_queue: Queue for sending prediction result back.
"""
logger.info('Predicting thread started.')
# Waiting for incoming images.
while True:
if not image_queue.empty():
# Predict all images in the queue.
# Get image from queue.
logger.debug('Getting image from queue.')
incoming_message = image_queue.get()
# TCP socket initialize.
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.settimeout(5)
soc.connect(('localhost', 8888))
logger.info('Connected to the server.')
# Encode the image in base64.
buffered = BytesIO()
image = incoming_message['image']
image.save(buffered, format='PNG')
encoded_image = base64.b64encode(buffered.getvalue())
data_send = json.dumps(dict({'image': encoded_image.decode('ascii'), 'chat_id': incoming_message['chat_id']}))
# TCP client: encoded image send to the server. Waiting for receiving predictions.
terminate = '##END##'
data_send += terminate
soc.sendall(str.encode(data_send, 'utf8'))
chunks = []
while True:
current_data = soc.recv(8192).decode('utf8', 'strict')
if terminate in current_data:
chunks.append(current_data[:current_data.find(terminate)])
break
chunks.append(current_data)
if len(chunks) > 1:
last_pair = chunks[-2] + chunks[-1]
if terminate in last_pair:
chunks[-2] = last_pair[:last_pair.find(terminate)]
chunks.pop()
break
received_data = ''.join(chunks)
# JSON decode.
decoded_data = json.loads(received_data)
# Format
predictions = ''
idx = 1
for item in decoded_data['predictions']:
predictions += '{}. {} ({})\n'.format(idx, item['label'], item['proba'])
idx += 1
send_back = {
'predictions': predictions,
'chat_id': incoming_message['chat_id']
}
# Put to queue.
output_queue.put(send_back)
def send_predictions_back(output_queue):
"""Keep polling the output queue, send back the predictions to users.
:param output_queue: Queue variable.
"""
# Waiting for incoming predictions.
logger.info('Send back thread started.')
while True:
if not output_queue.empty():
# Send all predictions back.
send_back = output_queue.get()
# Send message.
bot.sendMessage(send_back['chat_id'], send_back['predictions'])
def handle(msg):
"""
A function that will be invoked when a message is received by the bot.
:param msg: Incoming Telegram message.
"""
content_type, chat_type, chat_id = telepot.glance(msg)
logging.info('Handling incoming message {}.'.format(chat_id))
if content_type == "text":
content = msg["text"]
# Try to download URL.
try:
image_response = requests.get(content)
if image_response.status_code != 200:
raise Exception('Response code is not 200 OK.')
if 'image' not in image_response.headers['content-type']:
raise Exception('The URL does not contains an image.')
i = Image.open(BytesIO(image_response.content))
# Pass to predicting server.
message_to_predict = {'image': i, 'chat_id': chat_id}
logger.debug('Putting image to queue.')
image_queue.put(message_to_predict)
except Exception as e:
help_info = 'To try out the image classification, please send an image or a image URL instead.'
reply = "You said: {}\n{}\n{}".format(content, help_info, str(e))
bot.sendMessage(chat_id, reply)
return
# Handle photos.
if content_type == 'photo':
try:
# Download image.
bot.download_file(msg['photo'][-1]['file_id'], 'file.png')
# Open the image.
i = Image.open('file.png')
# Put to the queue.
message_to_predict = {'image': i, 'chat_id': chat_id}
logger.debug('Putting image to queue.')
image_queue.put(message_to_predict)
except Exception as e:
error_info = 'An exception was caught when handling incoming image: {}'.format(str(e))
logging.WARNING(error_info)
bot.sendMessage(chat_id, error_info)
if __name__ == "__main__":
logger = get_logger()
# Message queues as global variables.
image_queue = Queue()
output_queue = Queue()
# Provide your bot's token
bot = telepot.Bot("YOUR API KEY")
logger.info('Bot script starting...')
MessageLoop(bot, handle).run_as_thread()
# Start threads.
send_to_predict_thread = Thread(target=send_to_predict, args=(image_queue, output_queue), daemon=True)
send_back_thread = Thread(target=send_predictions_back, args=(output_queue,), daemon=True)
send_to_predict_thread.start()
send_back_thread.start()
send_to_predict_thread.join()
send_back_thread.join()
while True:
time.sleep(10)
| [
37811,
13645,
13,
9078,
198,
1212,
4226,
8075,
1115,
14390,
284,
466,
262,
1708,
11,
198,
16818,
352,
357,
25598,
416,
14318,
13059,
2599,
19520,
1412,
6218,
422,
50203,
198,
16818,
362,
25,
33141,
15619,
3275,
422,
16834,
11,
3758,
284... | 2.334026 | 2,886 |
import tensorflow as tf
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
# for video
cap = cv2.VideoCapture(-1)
while (cap.isOpened()):
_, frame = cap.read()
load_image = np.copy(frame)
canny_image = canny(load_image)
cropped_image = region_of_interest(canny_image)
find_num(load_image, cropped_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
329,
2008,
198,
11128,
796,
269,
85,
17,
... | 2.556886 | 167 |
from typing import Any, Optional
from eopf.product.store import EOProductStore
from eopf.product.store.rasterio import EORasterIOAccessor
| [
6738,
19720,
1330,
4377,
11,
32233,
198,
198,
6738,
304,
404,
69,
13,
11167,
13,
8095,
1330,
412,
3185,
2076,
310,
22658,
198,
6738,
304,
404,
69,
13,
11167,
13,
8095,
13,
81,
1603,
952,
1330,
412,
1581,
1603,
9399,
15457,
273,
628
... | 3.255814 | 43 |