content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import sys
from urlparse import urlparse
import re
import logging
import IPy
import requests
import rfc3986
import stun
# List taken from natvpn project and tested manually.
# NOTE: This needs periodic updating.
_STUN_SERVERS = (
'stun.l.google.com',
'stun1.l.google.com',
'stun2.l.google.com',
'stun3.l.google.com',
'stun4.l.google.com',
)
IP_DETECT_SITE = 'https://icanhazip.com'
def set_stun_servers(servers=_STUN_SERVERS):
"""Manually set the list of good STUN servers."""
stun.stun_servers_list = tuple(servers)
def get_NAT_status(stun_host=None):
"""
Given a server hostname, initiate a STUN request to it;
and return the response in the form of a dict.
"""
response = stun.get_ip_info(stun_host=stun_host, source_port=0, stun_port=19302)
return {'nat_type': response[0],
'external_ip': response[1],
'external_port': response[2]}
def str_to_ipy(addr):
"""Convert an address to an IPy.IP object or None if unsuccessful."""
try:
return IPy.IP(addr)
except ValueError as err:
print 'Not IP address:', err
return None
def get_peer_url(address, port, protocol='tcp'):
"""
Return a URL.
@param address: An IPv4 address, an IPv6 address or a DNS name.
@type address: str
@param port: The port that will be used to connect to the peer
@type port: int
@param protocol: The connection protocol
@type protocol: str
@rtype: str
"""
try:
# is_ipv6_address will throw an exception for a DNS name
is_ipv6 = is_ipv6_address(address)
except ValueError:
is_ipv6 = False
if is_ipv6:
# An IPv6 address must be enclosed in brackets.
return '%s://[%s]:%s' % (protocol, address, port)
else:
return '%s://%s:%s' % (protocol, address, port)
def test_stun_servers(servers=_STUN_SERVERS):
"""Check responses of the listed STUN servers."""
for s in servers:
print 'Probing', s, '...',
sys.stdout.flush()
status = get_NAT_status(s)
if status['external_ip'] is None or status['external_port'] is None:
print 'FAIL'
else:
print 'OK'
def is_valid_openbazaar_scheme(uri):
"""Check for OpenBazaar appropriate scheme"""
return rfc3986.uri_reference(uri).scheme == u'tcp'
PACKET_STATS = PacketStats()
PACKET_STATS_LOGS_EVERY_N_PACKETS = 50
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
6738,
19016,
29572,
1330,
19016,
29572,
198,
11748,
302,
198,
11748,
18931,
198,
11748,
6101,
88,
198,
11748,
7007,
198,
11748,
374,
16072,
2670,
4521,
198,
11748,
7991,
628,
198,
2,
7343,
2077,
422,
34664,
85,
21999,
... | 2.398445 | 1,029 |
from readux.books.forms import BookSearch
def book_search(request):
'''Template context processor: add book search form
(:class:`~readux.books.forms.BookSearch`) to context so it can be
used on any page.'''
return {
'search_form': BookSearch()
}
| [
6738,
1100,
2821,
13,
12106,
13,
23914,
1330,
4897,
18243,
628,
198,
4299,
1492,
62,
12947,
7,
25927,
2599,
198,
220,
220,
220,
705,
7061,
30800,
4732,
12649,
25,
751,
1492,
2989,
1296,
198,
220,
220,
220,
357,
25,
4871,
25,
63,
93,... | 2.79798 | 99 |
import os
import sys
import mxnet as mx
import logging
LOAD_EXISTING_MODEL = False
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
285,
87,
3262,
355,
285,
87,
198,
11748,
18931,
628,
198,
35613,
62,
6369,
8808,
2751,
62,
33365,
3698,
796,
10352,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 2.645833 | 48 |
# -*- coding: utf-8 -*-
# Contributors : [srinivas.v@toyotaconnected.co.in,srivathsan.govindarajan@toyotaconnected.co.in,
# harshavardhan.thirupathi@toyotaconnected.co.in,
# ashok.ramadass@toyotaconnected.com ]
class CoefficientNotinRangeError(Exception):
"""
Class to throw exception when a coefficient is not in the specified range
"""
class InvalidImageArrayError(Exception):
"""
Class to throw exception when an image is not valid
"""
class CrucialValueNotFoundError(Exception):
"""
Class to throw exception when an expected value is not found
"""
class OperationNotFoundOrImplemented(Exception):
"""
Class to throw exception when an operation is not found
"""
class ConfigurationError(Exception):
"""
Class to throw exception when a configuration is not right
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
25767,
669,
1058,
685,
82,
12769,
38630,
13,
85,
31,
83,
726,
313,
7807,
1606,
276,
13,
1073,
13,
259,
11,
82,
15104,
776,
12807,
13,
9567,
521,
283,
1228,
272,
... | 2.869565 | 299 |
# Python Standard Library
from collections import OrderedDict
import itertools
# 3rd Party Libraries
import arrow, yaml
# Local
from stablogen.config import *
from stablogen.util import make_url, find_files
# Uses OrderedDict to keep order the data in the order below
yaml.add_representer(OrderedDict, lambda self, data:
self.represent_mapping('tag:yaml.org,2002:map', data.items())
)
post_yaml_tags = ('title', 'tags', 'created', 'when', 'last_edited')
# Support Arrow Objects in PyYAML (At least date time equilvalent)
arrow_tag='!arrow.Arrow'
yaml.add_representer(arrow.Arrow, lambda dumper, data:
dumper.represent_scalar(arrow_tag, str(data))
)
yaml.add_constructor(arrow_tag, lambda loader, node:
arrow.get(loader.construct_scalar(node))
)
# Core code
class Post:
'''Core type of the program, represents a post in the blog.
'''
inventory = dict()
loaded = False
@staticmethod
@classmethod
@classmethod
| [
2,
11361,
8997,
10074,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
340,
861,
10141,
198,
198,
2,
513,
4372,
3615,
46267,
198,
11748,
15452,
11,
331,
43695,
198,
198,
2,
10714,
198,
6738,
8303,
6404,
268,
13,
11250,
1330,... | 2.888218 | 331 |
"""API app factory."""
from __future__ import annotations
import random
import traceback
from typing import TYPE_CHECKING
from flask import Flask, Response, jsonify, request
from voiceassistant.config import Config
from voiceassistant.exceptions import ConfigValidationError, SkillError
from voiceassistant.utils.datastruct import DottedDict
from .auth import authorized
if TYPE_CHECKING:
from voiceassistant.core import VoiceAssistant
def api_factory(vass: VoiceAssistant, app: Flask) -> Flask:
"""Get REST API app."""
name = "api"
@app.route(f"/{name}/say", methods=["POST"])
@authorized
def say() -> Response:
"""Pronounce text.
Sample payload:
{"text": "Hello, World"}
"""
try:
text = (request.get_json() or {})["text"]
if isinstance(text, list):
vass.interfaces.speech.output(random.choice(text))
else:
vass.interfaces.speech.output(text)
return Response(status=200)
except (KeyError, TypeError):
return Response("Payload must have 'text' key", status=406)
@app.route(f"/{name}/skills", methods=["GET"])
@authorized
def get_skills() -> Response:
"""Get an array of all available skill names."""
return jsonify(vass.skills.names)
@app.route(f"/{name}/skills/<skill_name>", methods=["GET", "POST"])
@authorized
def execute_skill(skill_name: str) -> Response:
"""Execure specified skill.
Sample payload (Optional):
{"entities": {"location": "Moscow"}}
"""
payload = request.get_json() or {}
try:
vass.skills.run(
name=skill_name,
entities=DottedDict(payload.get("entities", {})),
interface=vass.interfaces.speech,
)
return Response(status=200)
except SkillError as e:
return Response(str(e), status=501)
@app.route(f"/{name}/config", methods=["GET"])
@authorized
def get_config() -> Response:
"""Get Voice Assistant config."""
return jsonify(Config)
@app.route(f"/{name}/config", methods=["POST"])
@authorized
def set_config() -> Response:
"""Set Voice Assistant config.
Payload must be a new config.
"""
new_config = request.get_json() or {}
try:
Config.write(new_config)
return Response(status=200)
except ConfigValidationError:
return Response("Invalid config", status=406)
@app.route("/callback/<app>", methods=["GET"])
def callback(app: str) -> Response:
"""Set callback request args to shared cache."""
app_data = {app: request.args.to_dict()}
if "callback" not in vass.data:
vass.data["callback"] = app_data
else:
vass.data["callback"].update(app_data)
return Response(
f"<b>{app.capitalize()} setup is successful, you can close this tab</b>", status=200
)
@app.route("/reload", methods=["GET"])
@authorized
@app.route("/trigger", methods=["GET"])
@authorized
return app
| [
37811,
17614,
598,
8860,
526,
15931,
201,
198,
201,
198,
6738,
11593,
37443,
834,
1330,
37647,
201,
198,
201,
198,
11748,
4738,
201,
198,
11748,
12854,
1891,
201,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
201,
198,
201,
198,
6738,... | 2.221774 | 1,488 |
import pytest
from thefuck.rules.open import is_arg_url, match, get_new_command
from tests.utils import Command
@pytest.fixture
@pytest.mark.parametrize('script', [
'open foo.com',
'open foo.edu',
'open foo.info',
'open foo.io',
'open foo.ly',
'open foo.me',
'open foo.net',
'open foo.org',
'open foo.se',
'open www.foo.ru'])
@pytest.mark.parametrize('script', ['open foo', 'open bar.txt', 'open egg.doc'])
@pytest.mark.parametrize('script', [
'open foo.com',
'xdg-open foo.com',
'gnome-open foo.com',
'kde-open foo.com',
'open nonest'])
@pytest.mark.parametrize('script, new_command', [
('open foo.io', ['open http://foo.io']),
('xdg-open foo.io', ['xdg-open http://foo.io']),
('gnome-open foo.io', ['gnome-open http://foo.io']),
('kde-open foo.io', ['kde-open http://foo.io']),
('open nonest', ['touch nonest && open nonest',
'mkdir nonest && open nonest'])])
| [
11748,
12972,
9288,
198,
6738,
262,
31699,
13,
38785,
13,
9654,
1330,
318,
62,
853,
62,
6371,
11,
2872,
11,
651,
62,
3605,
62,
21812,
198,
6738,
5254,
13,
26791,
1330,
9455,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
90... | 2.252315 | 432 |
#!/usr/bin/env python3
# Psuedocode from wikipedia (old version; removed):
# https://en.wikipedia.org/w/index.php?title=Mersenne_Twister&oldid=209555438
if __name__ == '__main__':
mt_rand = MT19937(123456)
data = [mt_rand.extractNumber() for i in range(1000)]
expected = [int(i.strip()) for i in open("c21_data.txt").readlines()]
assert data == expected
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
33610,
1739,
420,
1098,
422,
47145,
11151,
357,
727,
2196,
26,
4615,
2599,
220,
198,
2,
3740,
1378,
268,
13,
31266,
13,
2398,
14,
86,
14,
9630,
13,
10121,
30,
7839,
28,... | 2.54 | 150 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-24 18:35
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
319,
1584,
12,
2713,
12,
1731,
1248,
25,
2327,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
... | 2.735849 | 53 |
#!/usr/bin/env python
def get_help_data_12589():
"""
Ingestion help.
Data store of information to be presented when a help request is made for port 12589.
Returns a list of dictionaries associated with various requests supported on that port.
"""
help_data = [
{
'root': 'ingest',
'endpoint': 'ingestrequest',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all ingestrequest records.',
'data_required': False,
'data_format': None,
'samples': [{
'sample_request': 'ingestrequest',
'sample_response': None
}]
},
{
'root': 'ingest',
'endpoint': 'ingestrequest/{id}',
'method': 'PUT',
'permission_required': True,
'description': 'Update an ingest request identified by the id provided.',
'data_required': True,
'data_format': [
{ 'name': 'username',
'type': 'str',
'description': 'The username responsible for the ingestrequest.',
'valid_values': None,
'default': None
},
{ 'name': 'state',
'type': 'str',
'description': 'An enumeration value.',
'valid_values': ['STAGE'],
'default': None
},
{ 'name': 'reccurring',
'type': 'bool',
'description': '',
'valid_values': None,
'default': None
},
{ 'name': 'options',
'type': 'dict',
'description': 'Ingestion options: \'csvName\'(str), \'maxNumFiles\'(int), ' +
'\'checkExistingFiles\'(bool), \'beginFileDate\'(str), ' +
'\'endFileDate\'(str)',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'ingestrequest/76',
'sample_data': {
"username": "ms804",
"state": "STAGE",
"options": {"csvName":"CE01ISSP_D00001_ingest",
"maxNumFiles":6,
"checkExistingFiles":"false",
"beginFileDate":"2013-04-06",
"endFileDate":"2017-04-06"},
"recurring": "true"
},
'sample_response': None
}]
},
{
'root': 'ingest',
'endpoint': 'ingestrequest',
'method': 'POST',
'permission_required': True,
'description': 'Create an ingestrequest record.',
'data_required': True,
'data_format':
[
{ 'name': 'username',
'type': 'str',
'description': 'The username responsible for the ingestrequest.',
'valid_values': None,
'default': None
},
{ 'name': 'state',
'type': 'str',
'description': 'An enumeration value.',
'valid_values': ['STAGE'],
'default': None
},
{ 'name': 'reccurring',
'type': 'bool',
'description': '',
'valid_values': None,
'default': None
},
{ 'name': 'options',
'type': 'dict',
'description': 'Ingestion options: \'csvName\'(str), \'maxNumFiles\'(int), ' +
'\'checkExistingFiles\'(bool), \'beginFileDate\'(str), ' +
'\'endFileDate\'(str)',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'ingestrequest',
'sample_data': {
"username": "ms804",
"state": "STAGE",
"options": {"csvName":"CE01ISSP_D00001_ingest",
"maxNumFiles":6,
"checkExistingFiles":"false",
"beginFileDate":"2013-04-06",
"endFileDate":"2017-04-06"},
"recurring": "true"
},
'sample_response': None
}]
}
]
return help_data
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
4299,
651,
62,
16794,
62,
7890,
62,
1065,
44169,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
554,
3495,
295,
1037,
13,
198,
220,
220,
220,
6060,
3650,
286,
1321,
284,
... | 1.402969 | 4,782 |
import unittest
# O(n). Two pointers, backtracking.
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
628,
198,
198,
2,
440,
7,
77,
737,
4930,
32007,
11,
736,
36280,
13,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.5 | 42 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : DeepNN.
# @File : data_processor_utils
# @Time : 2020/4/21 12:04 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
import numpy as np
_norm = lambda x, ord=1: x / np.linalg.norm(x, ord, axis=len(x.shape) > 1, keepdims=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
16775,
220,
220,
220,
220,
220,
1058,
10766,
6144,
13,
198,
2,
2488,
8979,
220,
220,
220,
220,
220,
220,
2... | 2.005435 | 184 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.10.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.8.5
# latex_envs:
# LaTeX_envs_menu_present: true
# autoclose: false
# autocomplete: false
# bibliofile: biblio.bib
# cite_by: apalike
# current_citInitial: 1
# eqLabelWithNumbers: true
# eqNumInitial: 1
# hotkeys:
# equation: Ctrl-E
# itemize: Ctrl-I
# labels_anchors: false
# latex_user_defs: false
# report_style_numbering: false
# user_envs_cfg: false
# toc:
# base_numbering: 1
# nav_menu: {}
# number_sections: true
# sideBar: true
# skip_h1_title: false
# title_cell: Table of Contents
# title_sidebar: Contents
# toc_cell: false
# toc_position: {}
# toc_section_display: true
# toc_window_display: false
# ---
# %% [markdown]
# ## Introduction: Keynes, Friedman, Modigliani
#
# [](https://econ-ark.org/materials/keynesfriedmanmodigliani#launch)
# %% {"code_folding": []}
# Some initial setup
import sys
import os
from matplotlib import pyplot as plt
import numpy as np
plt.style.use('seaborn-darkgrid')
palette = plt.get_cmap('Dark2')
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import datetime as dt
import scipy.stats as stats
import statsmodels.formula.api as sm
from copy import deepcopy
import pandas_datareader.data as web
from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType
from HARK.utilities import plot_funcs_der, plot_funcs
# %% [markdown]
# ### 1. The Keynesian consumption function
#
# Keynes:
# 1. "The amount of aggregate consumption mainly depends on the amount of aggregate income."
# 1. It is a "fundamental psychological rule ... that when ... real income increases ... consumption [will increase], but by less than the increase in income."
# 1. More generally, "as a rule, a greater proportion of income ... is saved as real income increases."
#
# This can be formalized as:
#
# $
# \begin{eqnarray}
# c_t & = & a_0 + a_{1}y_t
# \\ c_t - c_{t-1} & = & a_{1}(y_t - y_{t-1})
# \end{eqnarray}
# $
#
# for $a_0 > 0, a_1 < 1$
#
# %% [markdown]
# #### The Keynesian Consumption Function
# %% {"code_folding": []}
class KeynesianConsumer:
"""
This class represents consumers that behave according to a
Keynesian consumption function, representing them as a
special case of HARK's PerfForesightConsumerType
Methods:
- cFunc: computes consumption/permanent income
given total income/permanent income.
"""
# %% {"code_folding": []}
# Plot cFunc(Y)=Y against the Keynesian consumption function
# Deaton-Friedman consumption function is a special case of perfect foresight model
# We first create a Keynesian consumer
KeynesianExample = KeynesianConsumer()
# and then plot its consumption function
income = np.linspace(0, 30, 20) # pick some income points
plt.figure(figsize=(9,6))
plt.plot(income, KeynesianExample.cFunc(income), label = 'Consumption function') #plot income versus the consumption
plt.plot(income, income, 'k--', label = 'C=Y')
plt.title('Consumption function')
plt.xlabel('Income (y)')
plt.ylabel('Normalized Consumption (c)')
plt.ylim(0, 20)
plt.legend()
plt.show()
# %% {"code_folding": []}
# This looks like the first of the three equations, consumption as a linear function of income!
# This means that even in a microfounded model (that HARK provides), the consumption function can match Keynes reduced form
# prediction (given the right parameterization).
# We can even find a_0 and a_1
a_0 = KeynesianExample.a0
a_1 = KeynesianExample.a1
print('a_0 is ' + str(a_0))
print('a_1 is ' + str(a_1))
# %% [markdown]
# #### The Keynesian consumption function: Evidence
# %% [markdown]
# Aggregate Data:
#
# Long-term time-series estimates: $a_0$ close to zero, $a_1$ close to 1 (saving rate stable over time - Kuznets).<br>
# Short-term aggregate time-series estimates of change in consumption on change in income find $a_1 << 1$.<br>
# $c_t = a_0 + a_{1}y_t + a_{2}c_{t-1}$ finds significant $a_2$, near 1.
# %% {"code_folding": []}
# Lets have a look at some aggregate data
sdt = dt.datetime(1980, 1, 1) #set startdate
edt = dt.datetime (2017, 1, 1) #set end date
df = web.DataReader(["PCECC96", "DPIC96"], "fred", sdt, edt) #import the data from Fred
# Plot the data
plt.figure(figsize=(9,6))
plt.plot(df.DPIC96, df.PCECC96, 'go', markersize=3.0, label='Data')
slope, intercept, r_value, p_value, std_err = stats.linregress(df.DPIC96, df.PCECC96)
plt.plot(df.DPIC96, intercept+slope*df.DPIC96, 'k-', label = 'Line of best fit')
plt.plot(df.DPIC96, df.DPIC96, 'k--', label = 'C=Y')
plt.xlabel('Income (y)')
plt.ylabel('Consumption (c)')
plt.legend()
plt.show()
print('a_0 is ' + str(intercept))
print('a_1 is ' + str(slope))
# %%
# However, our consumption data is [non-stationary](https://www.reed.edu/economics/parker/312/tschapters/S13_Ch_4.pdf) and this drives the previous
# estimate.
df.DPIC96.plot()
plt.xlabel('Date')
plt.ylabel('Consumption (c)')
# %%
# Lets use our second equation to try to find an estimate of a_1
df_diff = df.diff() #create dataframe of differenced values
# Plot the data
plt.figure(figsize=(9,6))
plt.plot(df_diff.DPIC96, df_diff.PCECC96, 'go', markersize=3.0, label = 'Data')
slope, intercept, r_value, p_value, std_err = stats.linregress(df_diff.DPIC96[1:], df_diff.PCECC96[1:]) # find line of best fit
plt.plot(df_diff.DPIC96[1:], intercept+slope*df_diff.DPIC96[1:], 'k-', label = 'Line of best fit')
plt.plot(np.array([-200, 200]), np.array([-200, 200]), 'k--', label = 'C=Y')
plt.xlabel('Change in income (dy)')
plt.ylabel('Change in consumption (dc)')
plt.legend()
plt.show()
print('a_1 is ' + str(slope))
# %% [markdown]
# a_1 is now much lower, as we expected
# %% [markdown]
# ### Household Data:
#
# Cross-section plots of consumption and income: very large and significant $a_0$, $a_1$ maybe 0.5. <br>
#
# Further facts:
# 1. Black households save more than whites at a given income level.<br>
# 0. By income group:
# * low-income: Implausibly large dissaving (spend 2 or 3 times income)
# * high-income: Remarkably high saving
# %% [markdown]
# ### 2. Duesenberry
# %% [markdown]
# Habit formation may explain why $c_{t-1}$ affects $c_t$.<br>
# Relative Income Hypothesis suggests that you compare your consumption to consumption of ‘peers’.<br>
# May explain high saving rates of Black HHs.<br>
#
# Problems with Duesenberry: <br>
# No budget constraint<br>
# No serious treatment of intertemporal nature of saving
# %% [markdown]
# #### Dusenberry: Evidence
# %%
# Even if we control for income, past consumption seems to be significantly related to current consumption
df_habit = df.copy()
df_habit.columns = ['cons', 'inc']
df_habit['cons_m1'] = df.PCECC96.shift()
df_habit.dropna()
result = sm.ols(formula = "cons ~ inc + cons_m1", data=df_habit.dropna()).fit()
result.summary()
# %%
# The coefficient on lagged consumption is very significant.
# But regression may be statistically problematic for the usual [non-stationarity](https://towardsdatascience.com/stationarity-in-time-series-analysis-90c94f27322) reasons.
# %% [markdown]
# ### 3. Friedman's Permanent Income Hypothesis
# %% [markdown]
# $$c = p + u$$
# $$y = p + v$$
#
# We can try to test this theory across households. If we run a regression of the form:
# $$c_i = a_0 + a_{1}y_{i} + u_{i}$$
#
# And if Friedman is correct, and the "true" coefficient on permanent income $p$ is 1, then the coefficient on $y$ will be:
# $$a_1 = \frac{s^2_{p}}{(s^2_{v} + s^2_{p})}$$
# %% [markdown]
# #### Friedman's Permanent Income Hypothesis: HARK
#
# We begin by creating a class that class implements the Friedman PIH consumption function as a special case of the [Perfect Foresight CRRA](http://econ.jhu.edu/people/ccarroll/courses/choice/lecturenotes/consumption/PerfForesightCRRA) model.
# %% {"code_folding": []}
class FriedmanPIHConsumer:
"""
This class represents consumers that behave according to
Friedman's permanent income hypothesis, representing them as a
special case of HARK's PerfForesightConsumerType
Methods:
- cFunc: computes consumption/permanent income
given total income/permanent income.
"""
# %%
# We can now create a PIH consumer
PIHexample = FriedmanPIHConsumer()
# Plot the perfect foresight consumption function
income = np.linspace(0, 10, 20) # pick some income points
plt.figure(figsize=(9,6))
plt.plot(income, PIHexample.cFunc(income), label = 'Consumption function') #plot income versus the consumption
plt.plot(income, income, 'k--', label = 'C=Y')
plt.title('Consumption function')
plt.xlabel('Normalized Income (y)')
plt.ylabel('Normalized Consumption (c)')
plt.legend()
plt.show()
# %% [markdown] {"code_folding": []}
# We can see that regardless of the income our agent receives, they consume their permanent income, which is normalized to 1.
# %% [markdown]
# We can also draw out some implications of the PIH that we can then test with evidence
#
# If we look at HH's who have very similar permanent incomes, we should get a small estimate of $a_1$, because $s^2_v$ is large relative to $s^2_p$.
#
# Lets simulate this using our HARK consumer.
# %%
# Permanent income has the same variance
# as transitory income.
perm_inc = np.random.normal(1., 0.1, 50)
trans_inc = np.random.normal(0.5, 0.1, 50)
total_inc = perm_inc + trans_inc
slope, intercept, r_value, p_value, std_err = stats.linregress(total_inc, PIHexample.cFunc(total_inc)*perm_inc)
plt.figure(figsize=(9,6))
plt.plot(total_inc, PIHexample.cFunc(total_inc)*perm_inc, 'go', label='Simulated data')
plt.plot(total_inc, intercept + slope*total_inc, 'k-', label='Line of best fit')
plt.plot(np.linspace(1, 2, 5), np.linspace(1, 2, 5), 'k--', label='C=Y')
plt.xlabel('Income (y)')
plt.ylabel('Consumption (c)')
plt.legend()
plt.ylim(0, 2)
plt.xlim(1.1, 1.9)
plt.show()
print('a_0 is ' + str(intercept))
print('a_1 is ' + str(slope))
# %%
# Permanent income with higher variance
perm_inc = np.random.normal(1., 0.5, 50)
trans_inc = np.random.normal(0.5, 0.1, 50)
total_inc = perm_inc + trans_inc
slope, intercept, r_value, p_value, std_err = stats.linregress(total_inc, PIHexample.cFunc(total_inc)*perm_inc)
plt.figure(figsize=(9,6))
plt.plot(total_inc, PIHexample.cFunc(total_inc)*perm_inc, 'go', label='Simulated data')
plt.plot(total_inc, intercept + slope*total_inc, 'k-', label='Line of best fit')
plt.plot(np.linspace(0, 2, 5), np.linspace(0, 2, 5), 'k--', label='C=Y')
plt.xlabel('Income (y)')
plt.ylabel('Consumption (c)')
plt.legend()
plt.ylim(0, 2)
plt.show()
print('a_0 is ' + str(intercept))
print('a_1 is ' + str(slope))
# %% [markdown]
# We can see that as we increase the variance of permanent income, the estimate of a_1 rises
# %% [markdown]
# #### Friedman's Permanent Income Hypothesis: Evidence
# %% [markdown]
# We can now consider the empirical evidence for the claims our HARK model made about the PIH.
#
# If we take a long time series, then the differences in permanent income should be the main driver of the variance in total income. This implies that a_1 should be high.
#
# If we take higher frequency time series (or cross sectional data), transitory shocks should dominate, and our estimate of a_1 should be lower.
#
# Consider quarterly differences first:
# %% {"code_folding": []}
# Lets use the data from FRED that we used before.
# Using quarterly data (copying from above), we had:
plt.figure(figsize=(9,6))
plt.plot(df_diff.DPIC96, df_diff.PCECC96, 'go', markersize=3.0, label = 'Data')
slope, intercept, r_value, p_value, std_err = stats.linregress(df_diff.DPIC96[1:], df_diff.PCECC96[1:]) # find line of best fit
plt.plot(df_diff.DPIC96[1:], intercept+slope*df_diff.DPIC96[1:], 'k-', label = 'Line of best fit')
plt.plot(np.array([-200, 200]), np.array([-200, 200]), 'k--', label = 'C=Y')
plt.xlabel('Change in income (dy)')
plt.ylabel('Change in consumption (dc)')
plt.legend()
plt.show()
print('a_1 is ' + str(slope))
# %% [markdown]
# And now consider longer time differences, 20 quarters for instance, where the changes in permanent income should dominate transitory effects
# %% {"code_folding": []}
# Using longer differences
df_diff_long = df.diff(periods = 20) #create dataframe of differenced values
df_diff_long.columns = ['cons', 'inc']
plt.figure(figsize=(9,6))
plt.plot(df_diff_long.inc, df_diff_long.cons, 'go', label='Data')
slope, intercept, r_value, p_value, std_err = stats.linregress(df_diff_long.inc[20:], df_diff_long.cons[20:]) # find line of best fit
plt.plot(df_diff_long.inc[1:], intercept+slope*df_diff_long.inc[1:], 'k-', label='Line of best fit')
plt.plot(np.linspace(-100, 2000, 3), np.linspace(-100, 2000, 3), 'k--', label='C=Y')
plt.legend()
plt.xlabel('Change in income (dy)')
plt.ylabel('Change in consumption (dc)')
plt.show()
print('a_0 is ' + str(intercept))
print('a_1 is ' + str(slope))
# %% [markdown]
# The estimate of $a_1$ using the longer differences is much higher because permanent income is playing a much more important role in explaining the variation in consumption.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
2685,
62,
38993,
62,
17752,
25,
2081,
198,
2,
220,... | 2.706764 | 5,071 |
#!/usr/local/bin/python
# Code Fights Longest Digits Prefix Problem
import re
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
2,
6127,
376,
2337,
5882,
395,
7367,
896,
3771,
13049,
20647,
198,
198,
11748,
302,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1... | 2.688889 | 45 |
#!/usr/bin/env python
"""
<Program Name>
test_delegations.py
<Author>
Konstantin Andrianov
<Started>
February 19, 2012
<Copyright>
See LICENSE for licensing information.
<Purpose>
Ensure that TUF meets expectations about target delegations.
"""
import os
import time
import tempfile
import unittest
import tuf.formats
import tuf.repo.keystore as keystore
import tuf.repo.signercli as signercli
import tuf.repo.signerlib as signerlib
import tuf.tests.util_test_tools as util_test_tools
version = 1
# Modify the number of iterations (from the higher default count) so the unit
# tests run faster.
keystore._PBKDF2_ITERATIONS = 1000
class TestInitialUpdateWithTargetDelegations(TestDelegationFunctions):
"""We show that making target delegations results in a successful initial
update of targets."""
class TestBreachOfTargetDelegation(TestDelegationFunctions):
"""We show that a delegated targets role B cannot talk about targets that A
did not delegate to B."""
def test_that_initial_update_fails_with_undelegated_signing_of_targets(self):
"""We expect to see ForbiddenTargetError on initial update because
delegated targets roles sign for targets that they were not delegated
to."""
# http://docs.python.org/2/library/unittest.html#unittest.TestCase.assertRaises
with self.assertRaises(tuf.NoWorkingMirrorError) as context_manager:
self.do_update()
mirror_errors = context_manager.exception.mirror_errors
forbidden_target_error = False
for mirror_url, mirror_error in mirror_errors.iteritems():
if isinstance(mirror_error, tuf.ForbiddenTargetError):
forbidden_target_error = True
break
self.assertEqual(forbidden_target_error, True)
class TestOrderOfTargetDelegationWithSuccess(TestDelegationFunctions):
"""We show that when multiple delegated targets roles talk about a target,
the first one in order of appearance of delegation wins.
In this case, the first role has the correct metadata about the target."""
class TestOrderOfTargetDelegationWithFailure(TestDelegationFunctions):
"""We show that when multiple delegated targets roles talk about a target,
the first one in order of appearance of delegation wins.
In this case, the first role has the wrong metadata about the target."""
def test_that_initial_update_fails_with_many_roles_sharing_a_target(self):
"""We expect to see BadHashError on initial update because the hash
metadata mismatches the target."""
# http://docs.python.org/2/library/unittest.html#unittest.TestCase.assertRaises
with self.assertRaises(tuf.NoWorkingMirrorError) as context_manager:
self.do_update()
mirror_errors = context_manager.exception.mirror_errors
bad_hash_error = False
for mirror_url, mirror_error in mirror_errors.iteritems():
if isinstance(mirror_error, tuf.BadHashError):
bad_hash_error = True
break
self.assertEqual(bad_hash_error, True)
class TestConservationOfTargetDelegation(TestDelegationFunctions):
"""We show that delegated targets roles have to neither sign for targets
delegated to them nor further delegate them."""
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
27,
15167,
6530,
29,
198,
220,
1332,
62,
2934,
1455,
602,
13,
9078,
198,
198,
27,
13838,
29,
198,
220,
17431,
18797,
259,
843,
4484,
709,
198,
198,
27,
10434,
276,
... | 3.188976 | 1,016 |
#Arz Cazibesi
#Semih Akkoc
# TODO:
# we got alot to do global and thingy
from tkinter import *
from matplotlib import pyplot as plt
from gpiozero import LED as relay
from gpiozero import MotionSensor
import pandas as pd
import time as t
import array
import math
import smbus2
import bme280
infra = MotionSensor(27) #use queue_len if it detects twitchy
vaccum = relay(17)
magnet = relay(18)
#turn off vaccum turn on magnet
vaccum.on()
magnet.off()
port = 1
address = 0x76
bus = smbus2.SMBus(port)
cp = bme280.load_calibration_params(bus, address) #calibration_params
pressureData = bme280.sample(bus, address, cp)
print(pressureData)
print(pressureData.temperature)
curExNum=0 #current experiment number
# main code goes in here
#returns bme280 datas (pressure, temperature, humidty, time)
# position-time graphic
# speed-time graphic
# acceleration-time graphic
# writes datas on txt file
# prints values in console
# fills values manualy
# currently offline
if __name__ == '__main__':
screen = gui()
| [
2,
3163,
89,
327,
1031,
571,
46551,
201,
198,
2,
13900,
4449,
9084,
74,
420,
201,
198,
201,
198,
2,
16926,
46,
25,
201,
198,
2,
356,
1392,
43158,
284,
466,
3298,
290,
1517,
88,
201,
198,
201,
198,
6738,
256,
74,
3849,
1330,
1635... | 2.580275 | 436 |
from .BaseModel import BaseModel
from enum import Enum
| [
6738,
764,
14881,
17633,
1330,
7308,
17633,
198,
6738,
33829,
1330,
2039,
388,
628,
198
] | 3.8 | 15 |
class AdminSearchPlusMixin:
"""
Admin mixin that limits searches to specified field
"""
admin_search_plus = True
show_full_result_count = False
show_result_count = False
| [
198,
198,
4871,
32053,
18243,
17860,
35608,
259,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
32053,
5022,
259,
326,
7095,
15455,
284,
7368,
2214,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13169,
62,
12947,
62,
9541,
796,
... | 2.882353 | 68 |
# app.py
# Author: Jerrad Flores
import colored_text
import blockchain
import socket
import time
| [
2,
598,
13,
9078,
198,
2,
6434,
25,
4230,
6335,
40222,
198,
11748,
16396,
62,
5239,
198,
11748,
11779,
198,
11748,
17802,
198,
11748,
640,
628,
198,
220,
220,
220,
220,
220,
220
] | 3.181818 | 33 |
import os
import argparse
import json
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.190476 | 42 |
from django.urls import path
from django.contrib import admin
from . import views
admin.autodiscover()
urlpatterns = [
path(r'', views.DefinePathwayPage, name='index'),
path(r'build_model', views.BuildPathwayModel),
path(r'results', views.PathwayResultPage),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
1330,
5009,
198,
28482,
13,
2306,
375,
29392,
3419,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7,
... | 2.824742 | 97 |
factsales_query = '''
SELECT *, YEAR(OrderDate) AS OrderYear, MONTH(OrderDate) AS OrderMonth, DAY(OrderDate) AS OrderDay
FROM [AdventureWorksDW2017].[dbo].[FactInternetSales]
''' | [
37473,
2040,
62,
22766,
796,
705,
7061,
198,
46506,
1635,
11,
32914,
7,
18743,
10430,
8,
7054,
8284,
17688,
11,
25000,
4221,
7,
18743,
10430,
8,
7054,
8284,
31948,
11,
24644,
7,
18743,
10430,
8,
7054,
8284,
12393,
198,
220,
16034,
685... | 3.016667 | 60 |
import numpy as np
import pandas as pd
from pytz import UTC, timezone
def days_at_time(days, t, tz, day_offset=0):
"""
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
"""
days = pd.DatetimeIndex(days).tz_localize(None)
if len(days) == 0:
return days.tz_localize(UTC)
# Offset days without tz to avoid timezone issues.
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert(UTC)
def vectorized_sunday_to_monday(dtix):
"""A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
"""
values = dtix.values.copy()
values[dtix.weekday == 6] += np.timedelta64(1, 'D')
return pd.DatetimeIndex(values)
def all_trading_minutes(start, end,
am_start='09:31', am_end='11:30',
pm_start='13:01', pm_end='15:00',
tz=timezone('Asia/Shanghai')):
"""除去午休时刻的交易分钟
Args:
start (datetime-like): 开始时刻
end (datetime-like): 结束时刻
am_start (time): 上午开盘时刻
am_end (time): 上午收盘时刻
pm_start (time): 下午开盘时刻
pm_end (time): 下午收盘时刻
tz (时区): 所在时区
Returns:
DatetimeIndex: 交易分钟
Notes:
输出UTC时区交易分钟
"""
minutes = pd.date_range(start, end, freq='min')
if minutes.tz is None:
minutes = minutes.tz_localize(tz)
elif minutes.tz != tz:
minutes = minutes.tz_convert(tz)
am_locs = minutes.indexer_between_time(am_start, am_end)
pm_locs = minutes.indexer_between_time(pm_start, pm_end)
return minutes[am_locs].append(minutes[pm_locs]).sort_values().tz_convert(UTC)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
12972,
22877,
1330,
18119,
11,
640,
11340,
628,
198,
4299,
1528,
62,
265,
62,
2435,
7,
12545,
11,
256,
11,
256,
89,
11,
1110,
62,
28968,
28,
15,
2599,
... | 2.087385 | 1,419 |
# -*- coding: utf-8 -*-
# Description: Unsupervised Monolingual Word Segmentation
# It is based on goldwater-etal-2006-contextual
# https://www.aclweb.org/anthology/P06-1085/
# Base distribution - Geometric distribution
import regex as re
import numpy as np
import matplotlib.pyplot as plt
import grapheme
import pickle
import math
import argparse
from utils import utilities as utilities
from mpmath import gamma
import sys
import csv
np.random.seed(163)
def parse_args():
"""
Argument Parser
"""
parser = argparse.ArgumentParser(description="Mixture Model")
parser.add_argument("-k", "--cluster", dest="cluster", type=int,
default=3, metavar="INT", help="Cluster size [default:3]")
parser.add_argument("-d", "--data_points", dest="data_points", type=int,
default=5000, metavar="INT", help="Total data points [default:5000]")
parser.add_argument("-i", "--iteration", dest="iteration", type=int,
default=5, metavar="INT", help="Iterations [default:50]")
parser.add_argument("-a", "--alpha", dest="alpha", type=float,
default=0.9, metavar="FLOAT", help="Alpha")
parser.add_argument("--alpha_0", dest="alpha_0", type=float,
default=1.0, metavar="FLOAT", help="Beta Geometric Alpha")
parser.add_argument("--beta_0", dest="beta_0", type=float,
default=2.0, metavar="FLOAT", help="Beta Geometric Beta")
parser.add_argument("-p", "--prob_c", dest="prob_c", type=float,
default=0.5, metavar="FLOAT", help="Probability of joining new cluster")
parser.add_argument("-m", "--method", dest="method", type=str, default='collapsed',
choices=['mle', 'nig', 'collapsed'], metavar="STR",
help="Method Selection [default:collapsed]")
parser.add_argument("--input_filename", dest="input_filename", type=str, default='./data/train.txt',
metavar="PATH", help="Input Filename [default:train.txt]")
parser.add_argument("-f", "--model_filename", dest="model_filename", type=str,
default='./models/segmentation_model.pkl', metavar="STR",
help="File name [default:segmentation_model.pkl]")
parser.add_argument("-l", "--log_filename", dest="log_filename", type=str, default='./logs/segmentation.log',
metavar="PATH", help="File name [default:segmentation.log]")
parser.add_argument('-t', "--inference", default=False, action="store_true", help="For inference purpose only")
parser.add_argument('-w', "--word", default="नेपालको", metavar="STR", help="Input inference word")
parser.add_argument('-e', "--evaluation", default=False, action="store_true", help="For evaluation purpose only")
parser.add_argument("-g", "--gold_file", dest="gold_file", type=str,
default='./data/gold_standard.txt', required='--evaluate' in sys.argv,
metavar="PATH", help="Gold standard file name [default:gold_standard.txt]")
parser.add_argument("-r", "--result_filename", dest="result_filename", type=str,
default='./logs/result_file.txt',
metavar="PATH", help="Result file name [default:result_file.txt]")
args = parser.parse_args()
return args
# Single split at each possible boundary
# Single split at each possible boundary
# Geometric base distribution
# Remove given data from cluster
# Read file
# Generates data, splitting into stem/suffix
# Random assignment by generating random number
# between 0 and given number of cluster
# Beta geometric conjuate prior
# Helper fit function
# Main fit function
# Accumulate the data into cluster {key: value} pairs
# Display log likelihood plot
# Inference
# Get posterior probability based on the cluster assignment
# of given morpheme, assumption is a morpheme is assigned to only one cluster
# Inference
# Get posterior probability based on sampling among the cluster assignment
# of given morpheme, because a morpheme can be assigned to multiple clusters
# Inference
# Evaluate
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
12489,
25,
791,
16668,
16149,
2892,
40949,
723,
9678,
1001,
5154,
341,
198,
2,
632,
318,
1912,
319,
3869,
7050,
12,
316,
282,
12,
13330,
12,
22866,
723,
198,
... | 2.5367 | 1,703 |
from math import cos, sin
import numpy as np
import pytest
from manim import Circle, Line, Mobject, RegularPolygon, Square, VDict, VGroup, VMobject
from manim.constants import PI
def test_vgroup_init():
"""Test the VGroup instantiation."""
VGroup()
VGroup(VMobject())
VGroup(VMobject(), VMobject())
with pytest.raises(TypeError):
VGroup(Mobject())
with pytest.raises(TypeError):
VGroup(Mobject(), Mobject())
def test_vgroup_add():
"""Test the VGroup add method."""
obj = VGroup()
assert len(obj.submobjects) == 0
obj.add(VMobject())
assert len(obj.submobjects) == 1
with pytest.raises(TypeError):
obj.add(Mobject())
assert len(obj.submobjects) == 1
with pytest.raises(TypeError):
# If only one of the added object is not an instance of VMobject, none of them should be added
obj.add(VMobject(), Mobject())
assert len(obj.submobjects) == 1
with pytest.raises(ValueError):
# a Mobject cannot contain itself
obj.add(obj)
def test_vgroup_add_dunder():
"""Test the VGroup __add__ magic method."""
obj = VGroup()
assert len(obj.submobjects) == 0
obj + VMobject()
assert len(obj.submobjects) == 0
obj += VMobject()
assert len(obj.submobjects) == 1
with pytest.raises(TypeError):
obj += Mobject()
assert len(obj.submobjects) == 1
with pytest.raises(TypeError):
# If only one of the added object is not an instance of VMobject, none of them should be added
obj += (VMobject(), Mobject())
assert len(obj.submobjects) == 1
with pytest.raises(ValueError):
# a Mobject cannot contain itself
obj += obj
def test_vgroup_remove():
"""Test the VGroup remove method."""
a = VMobject()
c = VMobject()
b = VGroup(c)
obj = VGroup(a, b)
assert len(obj.submobjects) == 2
assert len(b.submobjects) == 1
obj.remove(a)
b.remove(c)
assert len(obj.submobjects) == 1
assert len(b.submobjects) == 0
obj.remove(b)
assert len(obj.submobjects) == 0
def test_vgroup_remove_dunder():
"""Test the VGroup __sub__ magic method."""
a = VMobject()
c = VMobject()
b = VGroup(c)
obj = VGroup(a, b)
assert len(obj.submobjects) == 2
assert len(b.submobjects) == 1
assert len(obj - a) == 1
assert len(obj.submobjects) == 2
obj -= a
b -= c
assert len(obj.submobjects) == 1
assert len(b.submobjects) == 0
obj -= b
assert len(obj.submobjects) == 0
def test_vmob_add_to_back():
"""Test the Mobject add_to_back method."""
a = VMobject()
b = Line()
c = "text"
with pytest.raises(ValueError):
# Mobject cannot contain self
a.add_to_back(a)
with pytest.raises(TypeError):
# All submobjects must be of type Mobject
a.add_to_back(c)
# No submobject gets added twice
a.add_to_back(b)
a.add_to_back(b, b)
assert len(a.submobjects) == 1
a.submobjects.clear()
a.add_to_back(b, b, b)
a.add_to_back(b, b)
assert len(a.submobjects) == 1
a.submobjects.clear()
# Make sure the ordering has not changed
o1, o2, o3 = Square(), Line(), Circle()
a.add_to_back(o1, o2, o3)
assert a.submobjects.pop() == o3
assert a.submobjects.pop() == o2
assert a.submobjects.pop() == o1
def test_vdict_init():
"""Test the VDict instantiation."""
# Test empty VDict
VDict()
# Test VDict made from list of pairs
VDict([("a", VMobject()), ("b", VMobject()), ("c", VMobject())])
# Test VDict made from a python dict
VDict({"a": VMobject(), "b": VMobject(), "c": VMobject()})
# Test VDict made using zip
VDict(zip(["a", "b", "c"], [VMobject(), VMobject(), VMobject()]))
# If the value is of type Mobject, must raise a TypeError
with pytest.raises(TypeError):
VDict({"a": Mobject()})
def test_vdict_add():
"""Test the VDict add method."""
obj = VDict()
assert len(obj.submob_dict) == 0
obj.add([("a", VMobject())])
assert len(obj.submob_dict) == 1
with pytest.raises(TypeError):
obj.add([("b", Mobject())])
def test_vdict_remove():
"""Test the VDict remove method."""
obj = VDict([("a", VMobject())])
assert len(obj.submob_dict) == 1
obj.remove("a")
assert len(obj.submob_dict) == 0
with pytest.raises(KeyError):
obj.remove("a")
def test_vgroup_supports_item_assigment():
"""Test VGroup supports array-like assignment for VMObjects"""
a = VMobject()
b = VMobject()
vgroup = VGroup(a)
assert vgroup[0] == a
vgroup[0] = b
assert vgroup[0] == b
assert len(vgroup) == 1
def test_vgroup_item_assignment_at_correct_position():
"""Test VGroup item-assignment adds to correct position for VMObjects"""
n_items = 10
vgroup = VGroup()
for _i in range(n_items):
vgroup.add(VMobject())
new_obj = VMobject()
vgroup[6] = new_obj
assert vgroup[6] == new_obj
assert len(vgroup) == n_items
def test_vgroup_item_assignment_only_allows_vmobjects():
"""Test VGroup item-assignment raises TypeError when invalid type is passed"""
vgroup = VGroup(VMobject())
with pytest.raises(TypeError, match="All submobjects must be of type VMobject"):
vgroup[0] = "invalid object"
def test_bounded_become():
"""Tests that align_points generates a bounded number of points.
https://github.com/ManimCommunity/manim/issues/1959
"""
o = VMobject()
# o must contain some points, or else become behaves differently
draw_circle(o, 2)
for _ in range(20):
# Alternate between calls to become with different subpath sizes
a = VMobject()
draw_circle(a, 20)
o.become(a)
b = VMobject()
draw_circle(b, 15)
draw_circle(b, 15, x=3)
o.become(b)
# The number of points should be similar to the size of a and b
assert len(o.points) <= (20 + 15 + 15) * 4
| [
6738,
10688,
1330,
8615,
11,
7813,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
582,
320,
1330,
16291,
11,
6910,
11,
337,
15252,
11,
23603,
34220,
14520,
11,
9276,
11,
569,
35,
713,
11,
569,
13247,... | 2.412641 | 2,484 |
#!/usr/bin/env python
"""
#
#------------------------------------------------------------------------------
# University of Minnesota
# Copyright 2017, Regents of the University of Minnesota
#------------------------------------------------------------------------------
# Author:
#
# James E Johnson
#
#------------------------------------------------------------------------------
"""
from __future__ import print_function
from __future__ import unicode_literals
import sys
from time import sleep
import requests
server = "https://rest.ensembl.org"
ext = "/info/assembly/homo_sapiens?"
max_region = 4000000
debug = False
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
2,
198,
2,
10097,
26171,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2059,
286,
89... | 3.929825 | 171 |
from typing import TYPE_CHECKING, Optional, Type
from .ast_node import ASTNode
from .conditional import is_castable
from ..core.helpers import truncate_int
if TYPE_CHECKING:
from ..compiler import RDLEnvironment
from ..source_ref import SourceRefBase
OptionalSourceRef = Optional[SourceRefBase]
# Integer unary operators:
# + - ~
# Normal expression context rules
| [
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
32233,
11,
5994,
198,
198,
6738,
764,
459,
62,
17440,
1330,
29273,
19667,
198,
6738,
764,
17561,
1859,
1330,
318,
62,
2701,
540,
198,
198,
6738,
11485,
7295,
13,
16794,
364,
1330,
40122,
... | 3.318966 | 116 |
from django.conf.urls.static import static
from django.conf import settings
from django.urls import path
from .views import index, about_us, runcode, process_speech, audio_test
urlpatterns = [
path('', index, name='homepage'),
path('about_us/', about_us, name='AboutUs'),
path('runcode', runcode, name="runcode"),
path('process_speech', process_speech, name="process_speech"),
path('audio_test', audio_test, name="audio_test"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
33571,
1330,
6376,
11,
546,
62,
385,
11,
1057,
8189... | 2.984925 | 199 |
"""
Creates the following tables that are used in the front-end:
- CountryTopicOutput: Shows a country's total citations and paper volume by year and topic.
- AllMetrics: Combines all the metrics (gender diversity, research diversity, RCA) we've derived by year and topic.
- PaperCountry: Shows the paper IDs of a country. Used in the particle visualisation.
- PaperTopics: Shows the paper IDs of a topic. Used in the particle visualisation.
- PaperYear: Shows the paper IDs of a year. Used in the particle visualisation.
Note: Topics are fetched from the FilteredFos table.
"""
import logging
import pandas as pd
from sqlalchemy import create_engine, func, distinct, and_
from sqlalchemy.orm import sessionmaker
from orion.packages.utils.s3_utils import store_on_s3
from sqlalchemy.orm.exc import NoResultFound
from collections import defaultdict
import pyarrow as pa
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from orion.core.orms.mag_orm import (
FilteredFos,
Paper,
PaperFieldsOfStudy,
AffiliationLocation,
AuthorAffiliation,
FieldOfStudy,
AllMetrics,
GenderDiversityCountry,
ResearchDiversityCountry,
MetricCountryRCA,
PaperCountry,
PaperTopics,
PaperYear,
PaperTopicsGrouped,
CountryTopicOutputsMetrics,
BlobArrow,
DocVector,
)
class CreateVizTables(BaseOperator):
"""Creates tables used in visualisation."""
@apply_defaults
| [
37811,
198,
16719,
274,
262,
1708,
8893,
326,
389,
973,
287,
262,
2166,
12,
437,
25,
198,
12,
12946,
33221,
26410,
25,
25156,
257,
1499,
338,
2472,
33499,
290,
3348,
6115,
416,
614,
290,
7243,
13,
198,
12,
1439,
9171,
10466,
25,
143... | 3.282511 | 446 |
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import copy
import logging
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
# import os, IPython, sys
import math
import random
import time
import scipy.stats as stats
from vstsim.visualization.GL_visualizer3d import GL_Visualizer
try:
import pcl
except ImportError:
logging.warning('Failed to import pcl!')
import vstsim
import itertools as it
import multiprocessing
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import datetime
import pickle
from vstsim.grasping import Grasp, Contact3D, ParallelJawPtGrasp3D, PointGraspMetrics3D, \
VacuumPoint, GraspQuality_Vacuum, DexterousVacuumPoint, DexterousQuality_Vacuum, \
ChameleonTongueContact, ChameleonTongue_Quality
from vstsim.grasping import GraspInfo, GraspInfo_TongueGrasp
from vstsim.grasping import math_robot
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from autolab_core import RigidTransform
import scipy
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
USE_OPENRAVE = True
try:
import openravepy as rave
except ImportError:
logger.warning('Failed to import OpenRAVE')
USE_OPENRAVE = False
try:
import rospy
import moveit_commander
except ImportError:
logger.warning("Failed to import rospy, you can't grasp now.")
try:
from mayavi import mlab
except ImportError:
mlab = []
logger.warning('Do not have mayavi installed, please set the vis to False')
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Classes for sampling grasps.
Author: Jeff Mahler
"""
"""
Modified by: Hui Zhang
Email : hui.zhang@kuleuven.be
Date : 23/02/2020 09:53
"""
color_ls = np.ones((14, 3), dtype=np.int32)
color_ls[0, :] = np.array([255, 0, 0]) # Red
color_ls[1, :] = np.array([60, 180, 75]) # Green
color_ls[2, :] = np.array([255, 225, 25]) # Yellow
color_ls[3, :] = np.array([0, 130, 200]) # Blue
color_ls[4, :] = np.array([245, 130, 48]) # Orange
color_ls[5, :] = np.array([145, 30, 180]) # Purple
color_ls[6, :] = np.array([70, 240, 240]) # Cyan
color_ls[7, :] = np.array([240, 50, 230]) # Magenta
color_ls[8, :] = np.array([210, 245, 60]) # Lime
color_ls[9, :] = np.array([250, 190, 190]) # Pink
color_ls[10, :] = np.array([0, 128, 128]) # Teal
color_ls[11, :] = np.array([128, 0, 0]) # Maroon
color_ls[12, :] = np.array([128, 128, 0]) # Olive
color_ls[13, :] = np.array([0, 0, 128]) # Navy
# class GraspSampler(metaclass=ABCMeta):
class GraspSampler:
""" Base class for various methods to sample a number of grasps on an object.
Should not be instantiated directly.
Attributes
----------
gripper : :obj:`RobotGripper`
the gripper to compute grasps for
config : :obj:`YamlConfig`
configuration for the grasp sampler
"""
__metaclass__ = ABCMeta
def _configure(self, config):
""" Configures the grasp generator."""
#########################################
if 'sampling_friction_coef' in list(config.keys()):
self.friction_coef = config['sampling_friction_coef']
else:
self.friction_coef = 2.0
#########################################
if 'num_cone_faces' in list(config.keys()):
self.num_cone_faces = config['num_cone_faces']
else:
self.num_cone_faces = 8
#########################################
if 'grasp_samples_per_surface_point' in list(config.keys()):
self.num_samples = config['grasp_samples_per_surface_point']
else:
self.num_samples = 1
#########################################
if 'target_num_grasps' in list(config.keys()):
self.target_num_grasps = config['target_num_grasps']
else:
self.target_num_grasps = 1
#########################################
if self.target_num_grasps is None:
self.target_num_grasps = 1
#########################################
if 'min_contact_dist' in list(config.keys()):
self.min_contact_dist = config['min_contact_dist']
else:
self.min_contact_dist = 0.0
#########################################
if 'num_grasp_rots' in list(config.keys()):
self.num_grasp_rots = config['num_grasp_rots']
else:
self.num_grasp_rots = 0.0
###########################################
###########################################
# parameters for virtual camera
#########################################
if 'back_up_dis' in list(config.keys()):
self.back_up_dis = config['back_up_dis']
else:
self.back_up_dis = 1
#########################################
if 'max_projection_dis' in list(config.keys()):
self.max_projection_dis = config['max_projection_dis']
else:
self.max_projection_dis = 1
#########################################
if 'num_projection_steps' in list(config.keys()):
self.num_projection_steps = config['num_projection_steps']
else:
self.num_projection_steps = 20
#########################################
if 'resolution_pc' in list(config.keys()):
self.resolution_pc = config['resolution_pc']
else:
self.resolution_pc = 24
#########################################
if 'angle_range_max' in list(config.keys()):
self.angle_range = config['angle_range_max']
else:
self.angle_range = 30.0
#########################################
if 'angle_range_max' in list(config.keys()):
self.angle_range_max = config['angle_range_max']
else:
self.angle_range_max = 5.0
#########################################
if 'angle_range_min' in list(config.keys()):
self.angle_range_min = config['angle_range_min']
else:
self.angle_range_min = 1.0
#########################################
if 'num_angle_steps' in list(config.keys()):
self.num_angle_steps = config['num_angle_steps']
else:
self.num_angle_steps = 5
#########################################
if 'scale_obj' in list(config.keys()):
self.scale_obj = config['scale_obj']
else:
self.scale_obj = 1.0
#########################################
if 'dim_grasp_matrix' in list(config.keys()):
self.dim_grasp_matrix = config['dim_grasp_matrix']
else:
self.dim_grasp_matrix = 100
#########################################
if 'max_num_surface_points' in list(config.keys()):
self.max_num_surface_points_ = config['max_num_surface_points']
else:
self.max_num_surface_points_ = 100
#########################################
if 'grasp_dist_thresh' in list(config.keys()):
self.grasp_dist_thresh_ = config['grasp_dist_thresh']
else:
self.grasp_dist_thresh_ = 0
@abstractmethod
def sample_grasps(self, graspable, num_grasps_generate, vis, **kwargs):
"""
Create a list of candidate grasps for a given object.
Must be implemented for all grasp sampler classes.
Parameters
---------
graspable : :obj:`GraspableObject3D`
object to sample grasps on
num_grasps_generate : int
vis : bool
"""
grasp = []
return grasp
# pass
def generate_grasps_stable_poses(self, graspable, stable_poses, target_num_grasps=None, grasp_gen_mult=5,
max_iter=3, sample_approach_angles=False, vis=False, **kwargs):
"""Samples a set of grasps for an object, aligning the approach angles to the object stable poses.
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
stable_poses : :obj:`list` of :obj:`meshpy.StablePose`
list of stable poses for the object with ids read from the database
target_num_grasps : int
number of grasps to return, defualts to self.target_num_grasps
grasp_gen_mult : int
number of additional grasps to generate
max_iter : int
number of attempts to return an exact number of grasps before giving up
sample_approach_angles : bool
whether or not to sample approach angles
vis : bool
Return
------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
# sample dense grasps
unaligned_grasps = self.generate_grasps(graspable, target_num_grasps=target_num_grasps,
grasp_gen_mult=grasp_gen_mult,
max_iter=max_iter, vis=vis)
# align for each stable pose
grasps = {}
print(sample_approach_angles) # add by Liang
for stable_pose in stable_poses:
grasps[stable_pose.id] = []
for grasp in unaligned_grasps:
aligned_grasp = grasp.perpendicular_table(grasp)
grasps[stable_pose.id].append(copy.deepcopy(aligned_grasp))
return grasps
def generate_grasps(self, graspable, target_num_grasps=None, grasp_gen_mult=5, max_iter=3,
sample_approach_angles=False, vis=False, **kwargs):
"""Samples a set of grasps for an object.
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
target_num_grasps : int
number of grasps to return, defualts to self.target_num_grasps
grasp_gen_mult : int
number of additional grasps to generate
max_iter : int
number of attempts to return an exact number of grasps before giving up
sample_approach_angles : bool
whether or not to sample approach angles
vis : bool
whether show the grasp on picture
Return
------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
# get num grasps
if target_num_grasps is None:
target_num_grasps = self.target_num_grasps
num_grasps_remaining = target_num_grasps
grasps = []
k = 1
while num_grasps_remaining > 0 and k <= max_iter:
# SAMPLING: generate more than we need
num_grasps_generate = grasp_gen_mult * num_grasps_remaining
new_grasps = self.sample_grasps(graspable, num_grasps_generate, vis, **kwargs)
# COVERAGE REJECTION: prune grasps by distance
pruned_grasps = []
for grasp in new_grasps:
min_dist = np.inf
for cur_grasp in grasps:
dist = ParallelJawPtGrasp3D.distance(cur_grasp, grasp)
if dist < min_dist:
min_dist = dist
for cur_grasp in pruned_grasps:
dist = ParallelJawPtGrasp3D.distance(cur_grasp, grasp)
if dist < min_dist:
min_dist = dist
if min_dist >= self.grasp_dist_thresh_:
pruned_grasps.append(grasp)
# ANGLE EXPANSION sample grasp rotations around the axis
candidate_grasps = []
if sample_approach_angles:
for grasp in pruned_grasps:
# construct a set of rotated grasps
for i in range(self.num_grasp_rots):
rotated_grasp = copy.copy(grasp)
delta_theta = 0 # add by Hongzhuo Liang
print("This function can not use yes, as delta_theta is not set. --Hongzhuo Liang")
rotated_grasp.set_approach_angle(i * delta_theta)
candidate_grasps.append(rotated_grasp)
else:
candidate_grasps = pruned_grasps
# add to the current grasp set
grasps += candidate_grasps
logger.info('%d/%d grasps found after iteration %d.',
len(grasps), target_num_grasps, k)
grasp_gen_mult *= 2
num_grasps_remaining = target_num_grasps - len(grasps)
k += 1
# shuffle computed grasps
random.shuffle(grasps)
if len(grasps) > target_num_grasps:
logger.info('Truncating %d grasps to %d.',
len(grasps), target_num_grasps)
grasps = grasps[:target_num_grasps]
logger.info('Found %d grasps.', len(grasps))
return grasps
class UniformGraspSampler(GraspSampler):
""" Sample grasps by sampling pairs of points on the object surface uniformly at random.
"""
def sample_grasps(self, graspable, num_grasps, vis=False, max_num_samples=1000, **kwargs):
"""
Returns a list of candidate grasps for graspable object using uniform point pairs from the SDF
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
num_grasps : int
the number of grasps to generate
vis :
max_num_samples :
Returns
-------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
# get all surface points
surface_points, _ = graspable.sdf.surface_points(grid_basis=False)
num_surface = surface_points.shape[0]
i = 0
grasps = []
# get all grasps
while len(grasps) < num_grasps and i < max_num_samples:
# get candidate contacts
indices = np.random.choice(num_surface, size=2, replace=False)
c0 = surface_points[indices[0], :]
c1 = surface_points[indices[1], :]
gripper_distance = np.linalg.norm(c1 - c0)
if self.gripper.min_width < gripper_distance < self.gripper.max_width:
# compute centers and axes
grasp_center = ParallelJawPtGrasp3D.center_from_endpoints(c0, c1)
grasp_axis = ParallelJawPtGrasp3D.axis_from_endpoints(c0, c1)
g = ParallelJawPtGrasp3D(ParallelJawPtGrasp3D.configuration_from_params(grasp_center,
grasp_axis,
self.gripper.max_width))
# keep grasps if the fingers close
if 'random_approach_angle' in kwargs and kwargs['random_approach_angle']:
angle_candidates = np.arange(-90, 120, 30)
np.random.shuffle(angle_candidates)
for grasp_angle in angle_candidates:
g.approach_angle_ = grasp_angle
# get true contacts (previous is subject to variation)
success, contacts = g.close_fingers(graspable, vis=vis)
if not success:
continue
break
else:
continue
else:
success, contacts = g.close_fingers(graspable, vis=vis)
if success:
grasps.append(g)
i += 1
return grasps
class GaussianGraspSampler(GraspSampler):
""" Sample grasps by sampling a center from a gaussian with mean at the object center of mass
and grasp axis by sampling the spherical angles uniformly at random.
"""
def sample_grasps(self, graspable, num_grasps, vis=False, sigma_scale=2.5, **kwargs):
"""
Returns a list of candidate grasps for graspable object by Gaussian with
variance specified by principal dimensions.
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
num_grasps : int
the number of grasps to generate
sigma_scale : float
the number of sigmas on the tails of the Gaussian for each dimension
vis : bool
visualization
Returns
-------
:obj:`list` of obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
# get object principal axes
center_of_mass = graspable.mesh.center_of_mass
principal_dims = graspable.mesh.principal_dims()
sigma_dims = principal_dims / (2 * sigma_scale)
# sample centers
grasp_centers = stats.multivariate_normal.rvs(
mean=center_of_mass, cov=sigma_dims ** 2, size=num_grasps)
# samples angles uniformly from sphere
u = stats.uniform.rvs(size=num_grasps)
v = stats.uniform.rvs(size=num_grasps)
thetas = 2 * np.pi * u
phis = np.arccos(2 * v - 1.0)
grasp_dirs = np.array([np.sin(phis) * np.cos(thetas), np.sin(phis) * np.sin(thetas), np.cos(phis)])
grasp_dirs = grasp_dirs.T
# convert to grasp objects
grasps = []
for i in range(num_grasps):
grasp = ParallelJawPtGrasp3D(
ParallelJawPtGrasp3D.configuration_from_params(grasp_centers[i, :], grasp_dirs[i, :],
self.gripper.max_width))
if 'random_approach_angle' in kwargs and kwargs['random_approach_angle']:
angle_candidates = np.arange(-90, 120, 30)
np.random.shuffle(angle_candidates)
for grasp_angle in angle_candidates:
grasp.approach_angle_ = grasp_angle
# get true contacts (previous is subject to variation)
success, contacts = grasp.close_fingers(graspable, vis=vis)
if not success:
continue
break
else:
continue
else:
success, contacts = grasp.close_fingers(graspable, vis=vis)
# add grasp if it has valid contacts
if success and np.linalg.norm(contacts[0].point - contacts[1].point) > self.min_contact_dist:
grasps.append(grasp)
# visualize
if vis:
for grasp in grasps:
plt.clf()
plt.gcf()
plt.ion()
grasp.close_fingers(graspable, vis=vis)
plt.show(block=False)
time.sleep(0.5)
grasp_centers_grid = graspable.sdf.transform_pt_obj_to_grid(grasp_centers.T)
grasp_centers_grid = grasp_centers_grid.T
com_grid = graspable.sdf.transform_pt_obj_to_grid(center_of_mass)
plt.clf()
ax = plt.gca(projection='3d')
# graspable.sdf.scatter()
ax.scatter(grasp_centers_grid[:, 0], grasp_centers_grid[:, 1], grasp_centers_grid[:, 2], s=60, c='m')
ax.scatter(com_grid[0], com_grid[1], com_grid[2], s=120, c='y')
ax.set_xlim3d(0, graspable.sdf.dims_[0])
ax.set_ylim3d(0, graspable.sdf.dims_[1])
ax.set_zlim3d(0, graspable.sdf.dims_[2])
plt.show()
return grasps
class AntipodalGraspSampler(GraspSampler):
""" Samples antipodal pairs using rejection sampling.
The proposal sampling ditribution is to choose a random point on
the object surface, then sample random directions within the friction cone,
then form a grasp axis along the direction,
close the fingers, and keep the grasp if the other contact point is also in the friction cone.
"""
def sample_from_cone(self, n, tx, ty, num_samples=1):
""" Samples directoins from within the friction cone using uniform sampling.
Parameters
----------
n : 3x1 normalized :obj:`numpy.ndarray`
surface normal
tx : 3x1 normalized :obj:`numpy.ndarray`
tangent x vector
ty : 3x1 normalized :obj:`numpy.ndarray`
tangent y vector
num_samples : int
number of directions to sample
Returns
-------
v_samples : :obj:`list` of 3x1 :obj:`numpy.ndarray`
sampled directions in the friction cone
"""
v_samples = []
for i in range(num_samples):
theta = 2 * np.pi * np.random.rand()
r = self.friction_coef * np.random.rand()
v = n + r * np.cos(theta) * tx + r * np.sin(theta) * ty
v = -v / np.linalg.norm(v)
v_samples.append(v)
return v_samples
def within_cone(self, cone, n, v):
"""
Checks whether or not a direction is in the friction cone.
This is equivalent to whether a grasp will slip using a point contact model.
Parameters
----------
cone : 3xN :obj:`numpy.ndarray`
supporting vectors of the friction cone
n : 3x1 :obj:`numpy.ndarray`
outward pointing surface normal vector at c1
v : 3x1 :obj:`numpy.ndarray`
direction vector
Returns
-------
in_cone : bool
True if alpha is within the cone
alpha : float
the angle between the normal and v
"""
if (v.dot(cone) < 0).any(): # v should point in same direction as cone
v = -v # don't worry about sign, we don't know it anyway...
f = -n / np.linalg.norm(n)
alpha = np.arccos(f.T.dot(v) / np.linalg.norm(v))
return alpha <= np.arctan(self.friction_coef), alpha
def perturb_point(self, x, scale):
""" Uniform random perturbations to a point """
x_samp = x + (scale / 2.0) * (np.random.rand(3) - 0.5)
return x_samp
def sample_grasps(self, graspable, num_grasps, vis=False, **kwargs):
"""Returns a list of candidate grasps for graspable object.
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
num_grasps : int
number of grasps to sample
vis : bool
whether or not to visualize progress, for debugging
Returns
-------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
the sampled grasps
"""
# get surface points
grasps = []
surface_points, _ = graspable.sdf.surface_points(grid_basis=False)
np.random.shuffle(surface_points)
shuffled_surface_points = surface_points[:min(self.max_num_surface_points_, len(surface_points))]
logger.info('Num surface: %d' % (len(surface_points)))
for k, x_surf in enumerate(shuffled_surface_points):
# print("k:", k, "len(grasps):", len(grasps))
start_time = time.clock()
# perturb grasp for num samples
for i in range(self.num_samples):
# perturb contact (TODO: sample in tangent plane to surface)
x1 = self.perturb_point(x_surf, graspable.sdf.resolution)
# compute friction cone faces
c1 = Contact3D(graspable, x1, in_direction=None)
_, tx1, ty1 = c1.tangents()
cone_succeeded, cone1, n1 = c1.friction_cone(self.num_cone_faces, self.friction_coef)
if not cone_succeeded:
continue
cone_time = time.clock()
# sample grasp axes from friction cone
v_samples = self.sample_from_cone(n1, tx1, ty1, num_samples=1)
sample_time = time.clock()
for v in v_samples:
if vis:
x1_grid = graspable.sdf.transform_pt_obj_to_grid(x1)
cone1_grid = graspable.sdf.transform_pt_obj_to_grid(cone1, direction=True)
plt.clf()
plt.gcf()
plt.ion()
ax = plt.gca(projection=Axes3D)
for j in range(cone1.shape[1]):
ax.scatter(x1_grid[0] - cone1_grid[0], x1_grid[1] - cone1_grid[1],
x1_grid[2] - cone1_grid[2], s=50, c='m')
# random axis flips since we don't have guarantees on surface normal directoins
if random.random() > 0.5:
v = -v
# start searching for contacts
grasp, c1, c2 = ParallelJawPtGrasp3D.grasp_from_contact_and_axis_on_grid(
graspable, x1, v, self.gripper.max_width,
min_grasp_width_world=self.gripper.min_width, vis=vis)
if grasp is None or c2 is None:
continue
if 'random_approach_angle' in kwargs and kwargs['random_approach_angle']:
angle_candidates = np.arange(-90, 120, 30)
np.random.shuffle(angle_candidates)
for grasp_angle in angle_candidates:
grasp.approach_angle_ = grasp_angle
# get true contacts (previous is subject to variation)
success, c = grasp.close_fingers(graspable, vis=vis)
if not success:
continue
break
else:
continue
else:
success, c = grasp.close_fingers(graspable, vis=vis)
if not success:
continue
c1 = c[0]
c2 = c[1]
# make sure grasp is wide enough
x2 = c2.point
if np.linalg.norm(x1 - x2) < self.min_contact_dist:
continue
v_true = grasp.axis
# compute friction cone for contact 2
cone_succeeded, cone2, n2 = c2.friction_cone(self.num_cone_faces, self.friction_coef)
if not cone_succeeded:
continue
if vis:
plt.figure()
ax = plt.gca(projection='3d')
c1_proxy = c1.plot_friction_cone(color='m')
c2_proxy = c2.plot_friction_cone(color='y')
ax.view_init(elev=5.0, azim=0)
plt.show(block=False)
time.sleep(0.5)
plt.close() # lol
# check friction cone
if PointGraspMetrics3D.force_closure(c1, c2, self.friction_coef):
grasps.append(grasp)
# randomly sample max num grasps from total list
random.shuffle(grasps)
return grasps
class GpgGraspSampler(GraspSampler):
"""
Sample grasps by GPG.
http://journals.sagepub.com/doi/10.1177/0278364917735594
"""
def sample_grasps(self, graspable, num_grasps, vis=False, max_num_samples=30, **kwargs):
"""
Returns a list of candidate grasps for graspable object using uniform point pairs from the SDF
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
num_grasps : int
the number of grasps to generate
vis :
max_num_samples :
Returns
-------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
params = {
'num_rball_points': 27, # FIXME: the same as meshpy..surface_normal()
'num_dy': 10, # number
'dtheta': 10, # unit degree
'range_dtheta': 90,
'debug_vis': False,
'r_ball': self.gripper.hand_height,
'approach_step': 0.005,
'max_trail_for_r_ball': 3000,
'voxel_grid_ratio': 5, # voxel_grid/sdf.resolution
}
# get all surface points
surface_points, _ = graspable.sdf.surface_points(grid_basis=False)
all_points = surface_points
# construct pynt point cloud and voxel grid
p_cloud = pcl.PointCloud(surface_points.astype(np.float32))
voxel = p_cloud.make_voxel_grid_filter()
voxel.set_leaf_size(*([graspable.sdf.resolution * params['voxel_grid_ratio']] * 3))
surface_points = voxel.filter().to_array()
num_surface = surface_points.shape[0]
sampled_surface_amount = 0
grasps = []
processed_potential_grasp = []
hand_points = self.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
# get all grasps
while len(grasps) < num_grasps and sampled_surface_amount < max_num_samples:
# get candidate contacts
ind = np.random.choice(num_surface, size=1, replace=False)
selected_surface = surface_points[ind, :].reshape(3)
# cal major principal curvature
# r_ball = max(self.gripper.hand_depth, self.gripper.hand_outer_diameter)
r_ball = params['r_ball'] # FIXME: for some relative small obj, we need to use pre-defined radius
point_amount = params['num_rball_points']
max_trial = params['max_trail_for_r_ball']
# TODO: we can not directly sample from point clouds so we use a relatively small radius.
ret = self.cal_surface_property(graspable, selected_surface, r_ball,
point_amount, max_trial, vis=params['debug_vis'])
if ret is None:
continue
else:
old_normal, new_normal, major_pc, minor_pc = ret
# Judge if the new_normal has the same direction with old_normal, here the correct
# direction in modified meshpy is point outward.
if np.dot(old_normal, new_normal) < 0:
new_normal = -new_normal
minor_pc = -minor_pc
for normal_dir in [1.]: # FIXME: here we can now know the direction of norm, outward
if params['debug_vis']:
# example of show grasp frame
self.show_grasp_norm_oneside(selected_surface, new_normal * normal_dir, major_pc * normal_dir,
minor_pc, scale_factor=0.001)
self.show_points(selected_surface, color='g', scale_factor=.002)
# self.show_points(all_points)
# some magic number referred from origin paper
potential_grasp = []
for dtheta in np.arange(-params['range_dtheta'],
params['range_dtheta'] + 1,
params['dtheta']):
dy_potentials = []
x, y, z = minor_pc
rotation = RigidTransform.rotation_from_quaternion(np.array([dtheta / 180 * np.pi, x, y, z]))
for dy in np.arange(-params['num_dy'] * self.gripper.finger_width,
(params['num_dy'] + 1) * self.gripper.finger_width,
self.gripper.finger_width):
# compute centers and axes
tmp_major_pc = np.dot(rotation, major_pc * normal_dir)
tmp_grasp_normal = np.dot(rotation, new_normal * normal_dir)
tmp_grasp_bottom_center = selected_surface + tmp_major_pc * dy
# go back a bite after rotation dtheta and translation dy!
tmp_grasp_bottom_center = self.gripper.init_bite * (
-tmp_grasp_normal * normal_dir) + tmp_grasp_bottom_center
open_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points, "p_open")
bottom_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points,
"p_bottom")
if open_points is True and bottom_points is False:
left_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points,
"p_left")
right_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points,
"p_right")
if left_points is False and right_points is False:
dy_potentials.append([tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc])
if len(dy_potentials) != 0:
# we only take the middle grasp from dy direction.
potential_grasp.append(dy_potentials[int(np.ceil(len(dy_potentials) / 2) - 1)])
approach_dist = self.gripper.hand_depth # use gripper depth
num_approaches = int(approach_dist / params['approach_step'])
for ptg in potential_grasp:
for approach_s in range(num_approaches):
tmp_grasp_bottom_center = ptg[1] * approach_s * params['approach_step'] + ptg[0]
tmp_grasp_normal = ptg[1]
tmp_major_pc = ptg[2]
minor_pc = ptg[3]
is_collide = self.check_collide(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable, hand_points)
if is_collide:
# if collide, go back one step to get a collision free hand position
tmp_grasp_bottom_center += (-tmp_grasp_normal) * params['approach_step']
# final check
open_points, _ = self.check_collision_square(tmp_grasp_bottom_center,
tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points, "p_open")
is_collide = self.check_collide(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable, hand_points)
if open_points and not is_collide:
processed_potential_grasp.append([tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, tmp_grasp_bottom_center])
self.show_points(selected_surface, color='r', scale_factor=.005)
if params['debug_vis']:
logger.info('usefull grasp sample point original: %s', selected_surface)
self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable, hand_points,
"p_open", vis=True)
break
logger.info("processed_potential_grasp %d", len(processed_potential_grasp))
sampled_surface_amount += 1
logger.info("current amount of sampled surface %d", sampled_surface_amount)
if not sampled_surface_amount % 20: # params['debug_vis']:
# self.show_all_grasps(all_points, processed_potential_grasp)
# self.show_points(all_points)
# mlab.show()
return processed_potential_grasp
#
# g = ParallelJawPtGrasp3D(ParallelJawPtGrasp3D.configuration_from_params(
# tmp_grasp_center,
# tmp_major_pc,
# self.gripper.max_width))
# grasps.append(g)
return processed_potential_grasp
class PointGraspSampler(GraspSampler):
"""
Sample grasps by PointGraspSampler
TODO: since gpg sampler changed a lot, this class need to totally rewrite
"""
def sample_grasps(self, graspable, num_grasps, vis=False, max_num_samples=1000, **kwargs):
"""
Returns a list of candidate grasps for graspable object using uniform point pairs from the SDF
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
num_grasps : int
the number of grasps to generate
vis :
max_num_samples :
Returns
-------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
params = {
'num_rball_points': 27, # FIXME: the same as meshpy..surface_normal()
'num_dy': 10, # number
'dtheta': 10, # unit degree
'range_dtheta': 90,
'debug_vis': False,
'approach_step': 0.005,
'max_trail_for_r_ball': 3000,
'voxel_grid_ratio': 5, # voxel_grid/sdf.resolution
}
# get all surface points
surface_points, _ = graspable.sdf.surface_points(grid_basis=False)
all_points = surface_points
# construct pynt point cloud and voxel grid
p_cloud = pcl.PointCloud(surface_points.astype(np.float32))
voxel = p_cloud.make_voxel_grid_filter()
voxel.set_leaf_size(*([graspable.sdf.resolution * params['voxel_grid_ratio']] * 3))
surface_points = voxel.filter().to_array()
num_surface = surface_points.shape[0]
sampled_surface_amount = 0
grasps = []
processed_potential_grasp = []
hand_points = self.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
# get all grasps
while len(grasps) < num_grasps and sampled_surface_amount < max_num_samples:
# get candidate contacts
# begin of modification 5: Gaussian over height, select more point in the middle
# we can use the top part of the point clouds to generate more sample points
min_height = min(surface_points[:, 2])
max_height = max(surface_points[:, 2])
selected_height = min_height + np.random.normal(3 * (max_height - min_height) / 4,
(max_height - min_height) / 6)
ind_10 = np.argsort(abs(surface_points[:, 2] - selected_height))[:10]
ind = ind_10[np.random.choice(len(ind_10), 1)]
# end of modification 5
# ind = np.random.choice(num_surface, size=1, replace=False)
selected_surface = surface_points[ind, :].reshape(3)
# cal major principal curvature
r_ball = max(self.gripper.hand_depth, self.gripper.hand_outer_diameter)
point_amount = params['num_rball_points']
max_trial = params['max_trail_for_r_ball']
# TODO: we can not directly sample from point clouds so we use a relatively small radius.
ret = self.cal_surface_property(graspable, selected_surface, r_ball,
point_amount, max_trial, vis=params['debug_vis'])
if ret is None:
continue
else:
old_normal, new_normal, major_pc, minor_pc = ret
for normal_dir in [-1., 1.]: # FIXME: here we do not know the direction of the object normal
grasp_bottom_center = self.gripper.init_bite * new_normal * -normal_dir + selected_surface
new_normal = normal_dir * new_normal
major_pc = normal_dir * major_pc
if params['debug_vis']:
# example of show grasp frame
self.show_grasp_norm_oneside(selected_surface, new_normal, major_pc,
minor_pc, scale_factor=0.001)
self.show_points(selected_surface, color='g', scale_factor=.002)
# some magic number referred from origin paper
potential_grasp = []
extra_potential_grasp = []
for dtheta in np.arange(-params['range_dtheta'],
params['range_dtheta'] + 1,
params['dtheta']):
dy_potentials = []
x, y, z = minor_pc
rotation = RigidTransform.rotation_from_quaternion(np.array([dtheta / 180 * np.pi, x, y, z]))
for dy in np.arange(-params['num_dy'] * self.gripper.finger_width,
(params['num_dy'] + 1) * self.gripper.finger_width,
self.gripper.finger_width):
# compute centers and axes
tmp_major_pc = np.dot(rotation, major_pc)
tmp_grasp_normal = np.dot(rotation, new_normal)
tmp_grasp_bottom_center = grasp_bottom_center + tmp_major_pc * dy
open_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points, "p_open")
bottom_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points,
"p_bottom")
if open_points is True and bottom_points is False:
left_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points,
"p_left")
right_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points,
"p_right")
if left_points is False and right_points is False:
dy_potentials.append([tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc])
if len(dy_potentials) != 0:
# we only take the middle grasp from dy direction.
# potential_grasp += dy_potentials
potential_grasp.append(dy_potentials[int(np.ceil(len(dy_potentials) / 2) - 1)])
# get more potential_grasp by moving along minor_pc
if len(potential_grasp) != 0:
self.show_points(selected_surface, color='r', scale_factor=.005)
for pt in potential_grasp:
for dz in range(-5, 5):
new_center = minor_pc * dz * 0.01 + pt[0]
extra_potential_grasp.append([new_center, pt[1], pt[2], pt[3]])
approach_dist = self.gripper.hand_depth # use gripper depth
num_approaches = int(approach_dist // params['approach_step'])
for ptg in extra_potential_grasp:
for _ in range(num_approaches):
tmp_grasp_bottom_center = ptg[1] * params['approach_step'] + ptg[0]
tmp_grasp_normal = ptg[1]
tmp_major_pc = ptg[2]
minor_pc = ptg[3]
not_collide = self.check_approach_grasp(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable, hand_points)
if not not_collide:
# if collide, go back one step to get a collision free hand position
tmp_grasp_bottom_center = -ptg[1] * params['approach_step'] + ptg[0]
# final check
open_points, _ = self.check_collision_square(tmp_grasp_bottom_center,
tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable,
hand_points, "p_open")
not_collide = self.check_approach_grasp(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable, hand_points)
if open_points and not_collide:
processed_potential_grasp.append([tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc])
self.show_points(selected_surface, color='r', scale_factor=.005)
if params['debug_vis']:
logger.info('usefull grasp sample point original: %s', selected_surface)
self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, graspable, hand_points,
"p_open", vis=True)
break
logger.info("processed_potential_grasp %d", len(processed_potential_grasp))
sampled_surface_amount += 1
logger.info("current amount of sampled surface %d", sampled_surface_amount)
if not sampled_surface_amount % 60: # params['debug_vis']:
self.show_all_grasps(all_points, processed_potential_grasp)
#
# g = ParallelJawPtGrasp3D(ParallelJawPtGrasp3D.configuration_from_params(
# tmp_grasp_center,
# tmp_major_pc,
# self.gripper.max_width))
# grasps.append(g)
return processed_potential_grasp
class OldPointGraspSampler(GraspSampler):
"""
Sample grasps by PointGraspSampler
"""
def sample_grasps(self, graspable, num_grasps, vis=False, max_num_samples=1000, **kwargs):
"""
Returns a list of candidate grasps for graspable object using uniform point pairs from the SDF
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
num_grasps : int
the number of grasps to generate
vis :
max_num_samples :
Returns
-------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
params = {
'num_rball_points': 27, # FIXME: the same as meshpy..surface_normal()
'num_dy': 0.3,
'range_dtheta': 0.30,
'max_chain_length': 20,
'max_retry_times': 100
}
# get all surface points
surface_points, _ = graspable.sdf.surface_points(grid_basis=False)
num_surface = surface_points.shape[0]
i = 0
self.grasps = []
# ____count = 0
# get all grasps
while len(self.grasps) < num_grasps and i < max_num_samples:
# print('sample times:', ____count)
# ____count += 1
# begin of modification 5: Gaussian over height
# we can use the top part of the point clouds to generate more sample points
# min_height = min(surface_points[:, 2])
# max_height = max(surface_points[:, 2])
# selected_height = max_height - abs(np.random.normal(max_height, (max_height - min_height)/3)
# - max_height)
# ind_10 = np.argsort(abs(surface_points[:, 2] - selected_height))[:10]
# ind = ind_10[np.random.choice(len(ind_10), 1)]
# end of modification 5
ind = np.random.choice(num_surface, size=1, replace=False)
grasp_bottom_center = surface_points[ind, :]
grasp_bottom_center = grasp_bottom_center.reshape(3)
for ind in range(params['max_chain_length']):
# if not graspable.sdf.on_surface(graspable.sdf.transform_pt_obj_to_grid(grasp_bottom_center))[0]:
# print('first damn it!')
# from IPython import embed; embed()
new_grasp_bottom_center = self.sample_chain(grasp_bottom_center, graspable,
params, vis)
if new_grasp_bottom_center is None:
i += ind
break
else:
grasp_bottom_center = new_grasp_bottom_center
else:
i += params['max_chain_length']
print('Chain broken, length:', ind, 'amount:', len(self.grasps))
return self.grasps
class GpgGraspSamplerPcl(GraspSampler):
"""
Sample grasps by GPG with pcl directly.
http://journals.sagepub.com/doi/10.1177/0278364917735594
"""
def sample_grasps(self, point_cloud, points_for_sample, all_normal, num_grasps=20, max_num_samples=200,
show_final_grasp=False,
**kwargs):
"""
Returns a list of candidate grasps for graspable object using uniform point pairs from the SDF
Parameters
----------
point_cloud :
all_normal :
num_grasps : int
the number of grasps to generate
show_final_grasp :
max_num_samples :
Returns
-------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
params = {
'num_rball_points': 27, # FIXME: the same as meshpy..surface_normal()
'num_dy': 10, # number
'dtheta': 10, # unit degree
'range_dtheta': 90,
'debug_vis': False,
'r_ball': self.gripper.hand_height,
'approach_step': 0.005,
'max_trail_for_r_ball': 1000,
'voxel_grid_ratio': 5, # voxel_grid/sdf.resolution
}
# get all surface points
all_points = point_cloud.to_array()
sampled_surface_amount = 0
grasps = []
processed_potential_grasp = []
hand_points = self.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
# get all grasps
while len(grasps) < num_grasps and sampled_surface_amount < max_num_samples:
# begin of modification 5: Gaussian over height
# we can use the top part of the point clouds to generate more sample points
# min_height = min(all_points[:, 2])
# max_height = max(all_points[:, 2])
# selected_height = max_height - abs(np.random.normal(max_height, (max_height - min_height)/3)
# - max_height)
# ind_10 = np.argsort(abs(all_points[:, 2] - selected_height))[:10]
# ind = ind_10[np.random.choice(len(ind_10), 1)]
# end of modification 5
# for ros, we neded to judge if the robot is at HOME
if rospy.get_param("/robot_at_home") == "false":
robot_at_home = False
else:
robot_at_home = True
if not robot_at_home:
rospy.loginfo("robot is moving! wait untill it go home. Return empty gpg!")
return []
scipy.random.seed() # important! without this, the worker will get a pseudo-random sequences.
ind = np.random.choice(points_for_sample.shape[0], size=1, replace=False)
selected_surface = points_for_sample[ind, :].reshape(3, )
if show_final_grasp:
mlab.points3d(selected_surface[0], selected_surface[1], selected_surface[2],
color=(1, 0, 0), scale_factor=0.005)
# cal major principal curvature
# r_ball = params['r_ball'] # FIXME: for some relative small obj, we need to use pre-defined radius
r_ball = max(self.gripper.hand_outer_diameter - self.gripper.finger_width, self.gripper.hand_depth,
self.gripper.hand_height / 2.0)
# point_amount = params['num_rball_points']
# max_trial = params['max_trail_for_r_ball']
# TODO: we can not directly sample from point clouds so we use a relatively small radius.
M = np.zeros((3, 3))
# neighbor = selected_surface + 2 * (np.random.rand(3) - 0.5) * r_ball
selected_surface_pc = pcl.PointCloud(selected_surface.reshape(1, 3))
kd = point_cloud.make_kdtree_flann()
kd_indices, sqr_distances = kd.radius_search_for_cloud(selected_surface_pc, r_ball, 100)
for _ in range(len(kd_indices[0])):
if sqr_distances[0, _] != 0:
# neighbor = point_cloud[kd_indices]
normal = all_normal[kd_indices[0, _]]
normal = normal.reshape(-1, 1)
if np.linalg.norm(normal) != 0:
normal /= np.linalg.norm(normal)
M += np.matmul(normal, normal.T)
if sum(sum(M)) == 0:
print("M matrix is empty as there is no point near the neighbour.")
print("Here is a bug, if points amount is too little it will keep trying and never go outside.")
continue
else:
logger.info("Selected a good sample point.")
eigval, eigvec = np.linalg.eig(M) # compared computed normal
minor_pc = eigvec[:, np.argmin(eigval)].reshape(3) # minor principal curvature !!! Here should use column!
minor_pc /= np.linalg.norm(minor_pc)
new_normal = eigvec[:, np.argmax(eigval)].reshape(3) # estimated surface normal !!! Here should use column!
new_normal /= np.linalg.norm(new_normal)
major_pc = np.cross(minor_pc, new_normal) # major principal curvature
if np.linalg.norm(major_pc) != 0:
major_pc = major_pc / np.linalg.norm(major_pc)
# Judge if the new_normal has the same direction with old_normal, here the correct
# direction in modified meshpy is point outward.
if np.dot(all_normal[ind], new_normal) < 0:
new_normal = -new_normal
minor_pc = -minor_pc
for normal_dir in [1]: # FIXED: we know the direction of norm is outward as we know the camera pos
if params['debug_vis']:
# example of show grasp frame
self.show_grasp_norm_oneside(selected_surface, new_normal * normal_dir, major_pc * normal_dir,
minor_pc, scale_factor=0.001)
self.show_points(selected_surface, color='g', scale_factor=.002)
self.show_points(all_points)
# show real norm direction: if new_norm has very diff than pcl cal norm, then maybe a bug.
self.show_line(selected_surface, (selected_surface + all_normal[ind] * 0.05).reshape(3))
mlab.show()
# some magic number referred from origin paper
potential_grasp = []
for dtheta in np.arange(-params['range_dtheta'],
params['range_dtheta'] + 1,
params['dtheta']):
dy_potentials = []
x, y, z = minor_pc
dtheta = np.float64(dtheta)
rotation = RigidTransform.rotation_from_quaternion(np.array([dtheta / 180 * np.pi, x, y, z]))
for dy in np.arange(-params['num_dy'] * self.gripper.finger_width,
(params['num_dy'] + 1) * self.gripper.finger_width,
self.gripper.finger_width):
# compute centers and axes
tmp_major_pc = np.dot(rotation, major_pc * normal_dir)
tmp_grasp_normal = np.dot(rotation, new_normal * normal_dir)
tmp_grasp_bottom_center = selected_surface + tmp_major_pc * dy
# go back a bite after rotation dtheta and translation dy!
tmp_grasp_bottom_center = self.gripper.init_bite * (
-tmp_grasp_normal * normal_dir) + tmp_grasp_bottom_center
open_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points,
hand_points, "p_open")
bottom_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points,
hand_points,
"p_bottom")
if open_points is True and bottom_points is False:
left_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points,
hand_points,
"p_left")
right_points, _ = self.check_collision_square(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points,
hand_points,
"p_right")
if left_points is False and right_points is False:
dy_potentials.append([tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc])
if len(dy_potentials) != 0:
# we only take the middle grasp from dy direction.
center_dy = dy_potentials[int(np.ceil(len(dy_potentials) / 2) - 1)]
# we check if the gripper has a potential to collide with the table
# by check if the gripper is grasp from a down to top direction
finger_top_pos = center_dy[0] + center_dy[1] * self.gripper.hand_depth
# [- self.gripper.hand_depth * 0.5] means we grasp objects as a angel larger than 30 degree
if finger_top_pos[2] < center_dy[0][2] - self.gripper.hand_depth * 0.5:
potential_grasp.append(center_dy)
approach_dist = self.gripper.hand_depth # use gripper depth
num_approaches = int(approach_dist / params['approach_step'])
for ptg in potential_grasp:
for approach_s in range(num_approaches):
tmp_grasp_bottom_center = ptg[1] * approach_s * params['approach_step'] + ptg[0]
tmp_grasp_normal = ptg[1]
tmp_major_pc = ptg[2]
minor_pc = ptg[3]
is_collide = self.check_collide(tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc, point_cloud, hand_points)
if is_collide:
# if collide, go back one step to get a collision free hand position
tmp_grasp_bottom_center += (-tmp_grasp_normal) * params['approach_step'] * 3
# minus 3 means we want the grasp go back a little bitte more.
# here we check if the gripper collide with the table.
hand_points_ = self.get_hand_points(tmp_grasp_bottom_center,
tmp_grasp_normal,
tmp_major_pc)[1:]
min_finger_end = hand_points_[:, 2].min()
min_finger_end_pos_ind = np.where(hand_points_[:, 2] == min_finger_end)[0][0]
safety_dis_above_table = 0.01
if min_finger_end < safety_dis_above_table:
min_finger_pos = hand_points_[min_finger_end_pos_ind] # the lowest point in a gripper
x = -min_finger_pos[2] * tmp_grasp_normal[0] / tmp_grasp_normal[2] + min_finger_pos[0]
y = -min_finger_pos[2] * tmp_grasp_normal[1] / tmp_grasp_normal[2] + min_finger_pos[1]
p_table = np.array([x, y, 0]) # the point that on the table
dis_go_back = np.linalg.norm([min_finger_pos, p_table]) + safety_dis_above_table
tmp_grasp_bottom_center_modify = tmp_grasp_bottom_center - tmp_grasp_normal * dis_go_back
else:
# if the grasp is not collide with the table, do not change the grasp
tmp_grasp_bottom_center_modify = tmp_grasp_bottom_center
# final check
_, open_points = self.check_collision_square(tmp_grasp_bottom_center_modify,
tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points,
hand_points, "p_open")
is_collide = self.check_collide(tmp_grasp_bottom_center_modify, tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points, hand_points)
if (len(open_points) > 10) and not is_collide:
# here 10 set the minimal points in a grasp, we can set a parameter later
processed_potential_grasp.append([tmp_grasp_bottom_center, tmp_grasp_normal,
tmp_major_pc, minor_pc,
tmp_grasp_bottom_center_modify])
if params['debug_vis']:
self.show_points(selected_surface, color='r', scale_factor=.005)
logger.info('usefull grasp sample point original: %s', selected_surface)
self.check_collision_square(tmp_grasp_bottom_center_modify, tmp_grasp_normal,
tmp_major_pc, minor_pc, all_points, hand_points,
"p_open", vis=True)
break
logger.info("processed_potential_grasp %d", len(processed_potential_grasp))
sampled_surface_amount += 1
logger.info("current amount of sampled surface %d", sampled_surface_amount)
print("current amount of sampled surface:", sampled_surface_amount)
if params['debug_vis']: # not sampled_surface_amount % 5:
if len(all_points) > 10000:
pc = pcl.PointCloud(all_points)
voxel = pc.make_voxel_grid_filter()
voxel.set_leaf_size(0.01, 0.01, 0.01)
point_cloud = voxel.filter()
all_points = point_cloud.to_array()
self.show_all_grasps(all_points, processed_potential_grasp)
self.show_points(all_points, scale_factor=0.008)
mlab.show()
print("The grasps number got by modified GPG:", len(processed_potential_grasp))
if len(processed_potential_grasp) >= num_grasps or sampled_surface_amount >= max_num_samples:
if show_final_grasp:
self.show_all_grasps(all_points, processed_potential_grasp)
self.show_points(all_points, scale_factor=0.002)
mlab.points3d(0, 0, 0, scale_factor=0.01, color=(0, 1, 0))
table_points = np.array([[-1, 1, 0], [1, 1, 0], [1, -1, 0], [-1, -1, 0]]) * 0.5
triangles = [(1, 2, 3), (0, 1, 3)]
mlab.triangular_mesh(table_points[:, 0], table_points[:, 1], table_points[:, 2],
triangles, color=(0.8, 0.8, 0.8), opacity=0.5)
mlab.show()
return processed_potential_grasp
#
# g = ParallelJawPtGrasp3D(ParallelJawPtGrasp3D.configuration_from_params(
# tmp_grasp_center,
# tmp_major_pc,
# self.gripper.max_width))
# grasps.append(g)
return processed_potential_grasp
class VacuumGraspSampler(GraspSampler):
""" Sample grasps by sampling pairs of points on the object surface uniformly at random.
"""
pass
######################################################################################
######################################################################################
######################################################################################
'''
waiting for being completed
coded by Hui Zhang (hui.zhang@kuleuven.be 07.10.2021)
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
11748,
4866,
198,
11748,
18931,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
48... | 1.881753 | 37,092 |
# -*- coding: utf8 -*-
__author__ = 'sergey'
from dedupsqlfs.db.mysql.table import Table
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
2655,
39608,
6,
198,
198,
6738,
4648,
4739,
80,
1652,
82,
13,
9945,
13,
28744,
13976,
13,
11487,
1330,
8655,
198
] | 2.275 | 40 |
from flask import Blueprint, render_template, request, redirect, flash, jsonify
from flask import current_app, url_for
from flask import Response
from flask_wtf.file import FileField, FileRequired, FileAllowed
from flask_user import login_required, roles_required
from wtforms import StringField, SelectField
from wtforms.validators import DataRequired, Length
from flask_wtf import FlaskForm
from werkzeug.utils import secure_filename
import requests
from pandas import DataFrame
from pandas.io.json import json_normalize
import json
mod = Blueprint('site', __name__, template_folder='templates')
# TODO: Needs to be split in multiple files - blueprints already in place so it's simple
@mod.route('/')
# The Members page is only accessible to authenticated users
@mod.route('/members')
@login_required # Use of @login_required decorator
# The Admin page requires an 'Admin' role.
@mod.route('/admin')
@roles_required('Admin') # Use of @roles_required decorator
@mod.route('/datasets', methods=['POST'])
@mod.route('/datasets', methods=['GET'])
@mod.route('/datasets/<int:dataset_id>')
@mod.route('/datasets/<int:dataset_id>/delete')
@mod.route('/models', methods=['GET'])
@mod.route('/models', methods=['POST'])
@mod.route('/models/<int:model_id>')
@mod.route('/ml', methods=['GET'])
@mod.route('/ml/train', methods=['GET', 'POST'])
@mod.route('/ml/predict', methods=['GET', 'POST'])
@mod.route('/labeling', methods=['GET', 'POST'])
| [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
2581,
11,
18941,
11,
7644,
11,
33918,
1958,
198,
6738,
42903,
1330,
1459,
62,
1324,
11,
19016,
62,
1640,
198,
6738,
42903,
1330,
18261,
198,
198,
6738,
42903,
62,
86,
27110,
13,
7753,... | 3.036961 | 487 |
#!/usr/bin/env python3
"""
Challenge:
Have the function ChessboardTraveling(str) read str which will be a string
consisting of the location of a space on a standard 8x8 chess board with no
pieces on the board along with another space on the chess board. The structure
of str will be the following: "(x y)(a b)" where (x y) represents the position
you are currently on with x and y ranging from 1 to 8 and (a b) represents
some other space on the chess board with a and b also ranging from 1 to 8
where a > x and b > y. Your program should determine how many ways there are
of traveling from (x y) on the board to (a b) moving only up and to the right.
For example:
if str is (1 1)(2 2) then your program should output 2 because there are only
two possible ways to travel from space (1 1) on a chessboard to space (2 2)
while making only moves up and to the right.
Hard challenges are worth 15 points and you are not timed for them.
Sample Test Cases:
Case 1:
Input:"(1 1)(3 3)"
Output:6
Case 2:
Input:"(2 2)(4 3)"
Output:3
"""
routes = [
[0, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 3, 4, 5, 6, 7, 8],
[1, 3, 6, 10, 15, 21, 28, 36],
[1, 4, 10, 20, 35, 56, 84, 120],
[1, 5, 15, 35, 70, 126, 210, 330],
[1, 6, 21, 56, 126, 252, 462, 792],
[1, 7, 28, 84, 210, 462, 924, 1716],
[1, 8, 36, 120, 330, 792, 1716, 3432]
]
# def procCoordinates(coords):
# coords = coords.replace('(', '').replace(')', '').replace(' ', '')
# return int(coords[0]), int(coords[1]), int(coords[2]), int(coords[3])
if __name__ == '__main__':
print(chessboardTraveling(input('enter coordinates:> ')))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
41812,
3540,
25,
198,
11980,
262,
2163,
25774,
3526,
33074,
278,
7,
2536,
8,
1100,
965,
543,
481,
307,
257,
4731,
198,
5936,
9665,
286,
262,
4067,
286,
257,
2272... | 2.663446 | 621 |
import sdre
from sdre.helper import *
from scipy.optimize import minimize, Bounds, NonlinearConstraint
# from socket import gethostname
from time import time
# dimension, num of samples, dimension of theta
d = 5
n = 500
dimTheta = d
# construct feature function f
f = lambda X: vstack([X,X**2])
b = dimTheta*2
# log P(x;theta) model, unnormalized
grad_logp = grad(logpBar)
# Dual objective function
obj = lambda para: mean(-log(-para[:n]) - 1) - mean(para[:n])
grad_obj = grad(obj)
# theta: parameter of the unormalized log-density function, XData: dataset
if __name__ == '__main__':
digit.seed(1)
# Dataset
theta_star = tile(pi,[5,1])
XData = digit.standard_normal((d, n)) + theta_star
# Run the main programme
theta_hat = trial(-ones([d,1])*.2, XData)
print('theta_hat', theta_hat)
print('theta_star', theta_star.flatten())
| [
11748,
45647,
260,
201,
198,
6738,
45647,
260,
13,
2978,
525,
1330,
1635,
201,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
17775,
11,
347,
3733,
11,
8504,
29127,
3103,
2536,
2913,
201,
198,
2,
422,
17802,
1330,
651,
4774,
3672,
... | 2.471074 | 363 |
from onfleet.request import Request | [
6738,
319,
33559,
13,
25927,
1330,
19390
] | 5 | 7 |
import torch
import numpy as np
from .types import assert_numpy
clip_grad = _ClipGradient.apply
class LossReporter:
"""
Simple reporter use for reporting losses and plotting them.
""" | [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
19199,
1330,
6818,
62,
77,
32152,
628,
628,
198,
15036,
62,
9744,
796,
4808,
2601,
541,
42731,
1153,
13,
39014,
628,
198,
4871,
22014,
6207,
4337,
25,
198,
220,
22... | 3.074627 | 67 |
# -*- coding: utf-8 -*-
#from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework import status as http_status
import datetime
import serializers as api_serializers
import wordpress_rest.apps.wordpress.models as wp_models
#
# Posts
#
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
... | 3.27619 | 105 |
from sensors.sensors import sense_characteristics, sense_pedestrians | [
6738,
15736,
13,
82,
641,
669,
1330,
2565,
62,
22769,
3969,
11,
2565,
62,
9124,
395,
19151
] | 4 | 17 |
__version__ = '0.1.0'
__author__ = 'pastchick3'
from .src import *
| [
834,
9641,
834,
796,
705,
15,
13,
16,
13,
15,
6,
198,
834,
9800,
834,
796,
705,
30119,
354,
624,
18,
6,
198,
198,
6738,
764,
10677,
1330,
1635,
198
] | 2.266667 | 30 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing UnicodeCharTokenizer op in DE
"""
import numpy as np
import mindspore.dataset as ds
from mindspore import log as logger
import mindspore.dataset.text as text
DATA_FILE = "../data/dataset/testTokenizerData/1.txt"
NORMALIZE_FILE = "../data/dataset/testTokenizerData/normalize.txt"
REGEX_REPLACE_FILE = "../data/dataset/testTokenizerData/regex_replace.txt"
REGEX_TOKENIZER_FILE = "../data/dataset/testTokenizerData/regex_tokenizer.txt"
def split_by_unicode_char(input_strs):
"""
Split utf-8 strings to unicode characters
"""
out = []
for s in input_strs:
out.append([c for c in s])
return out
def test_unicode_char_tokenizer_default():
"""
Test UnicodeCharTokenizer
"""
input_strs = ("Welcome to Beijing!", "北京欢迎您!", "我喜欢English!", " ")
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.UnicodeCharTokenizer()
dataset = dataset.map(operations=tokenizer)
tokens = []
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['text']).tolist()
tokens.append(token)
logger.info("The out tokens is : {}".format(tokens))
assert split_by_unicode_char(input_strs) == tokens
def test_unicode_char_tokenizer_with_offsets():
"""
Test UnicodeCharTokenizer
"""
input_strs = ("Welcome to Beijing!", "北京欢迎您!", "我喜欢English!", " ")
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.UnicodeCharTokenizer(with_offsets=True)
dataset = dataset.map(operations=tokenizer, input_columns=['text'],
output_columns=['token', 'offsets_start', 'offsets_limit'],
column_order=['token', 'offsets_start', 'offsets_limit'])
tokens = []
expected_offsets_start = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
[0, 3, 6, 9, 12, 15], [0, 3, 6, 9, 10, 11, 12, 13, 14, 15, 16], [0, 1]]
expected_offsets_limit = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[3, 6, 9, 12, 15, 18], [3, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17], [1, 2]]
count = 0
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['token']).tolist()
tokens.append(token)
np.testing.assert_array_equal(i['offsets_start'], expected_offsets_start[count])
np.testing.assert_array_equal(i['offsets_limit'], expected_offsets_limit[count])
count += 1
logger.info("The out tokens is : {}".format(tokens))
assert split_by_unicode_char(input_strs) == tokens
def test_whitespace_tokenizer_default():
"""
Test WhitespaceTokenizer
"""
whitespace_strs = [["Welcome", "to", "Beijing!"],
["北京欢迎您!"],
["我喜欢English!"],
[""]]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.WhitespaceTokenizer()
dataset = dataset.map(operations=tokenizer)
tokens = []
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['text']).tolist()
tokens.append(token)
logger.info("The out tokens is : {}".format(tokens))
assert whitespace_strs == tokens
def test_whitespace_tokenizer_with_offsets():
"""
Test WhitespaceTokenizer
"""
whitespace_strs = [["Welcome", "to", "Beijing!"],
["北京欢迎您!"],
["我喜欢English!"],
[""]]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.WhitespaceTokenizer(with_offsets=True)
dataset = dataset.map(operations=tokenizer, input_columns=['text'],
output_columns=['token', 'offsets_start', 'offsets_limit'],
column_order=['token', 'offsets_start', 'offsets_limit'])
tokens = []
expected_offsets_start = [[0, 8, 11], [0], [0], [0]]
expected_offsets_limit = [[7, 10, 19], [18], [17], [0]]
count = 0
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['token']).tolist()
tokens.append(token)
np.testing.assert_array_equal(i['offsets_start'], expected_offsets_start[count])
np.testing.assert_array_equal(i['offsets_limit'], expected_offsets_limit[count])
count += 1
logger.info("The out tokens is : {}".format(tokens))
assert whitespace_strs == tokens
def test_unicode_script_tokenizer_default():
"""
Test UnicodeScriptTokenizer when para keep_whitespace=False
"""
unicode_script_strs = [["Welcome", "to", "Beijing", "!"],
["北京欢迎您", "!"],
["我喜欢", "English", "!"],
[""]]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.UnicodeScriptTokenizer(keep_whitespace=False)
dataset = dataset.map(operations=tokenizer)
tokens = []
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['text']).tolist()
tokens.append(token)
logger.info("The out tokens is : {}".format(tokens))
assert unicode_script_strs == tokens
def test_unicode_script_tokenizer_default2():
"""
Test UnicodeScriptTokenizer when para keep_whitespace=True
"""
unicode_script_strs2 = [["Welcome", " ", "to", " ", "Beijing", "!"],
["北京欢迎您", "!"],
["我喜欢", "English", "!"],
[" "]]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.UnicodeScriptTokenizer(keep_whitespace=True)
dataset = dataset.map(operations=tokenizer)
tokens = []
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['text']).tolist()
tokens.append(token)
logger.info("The out tokens is :", tokens)
assert unicode_script_strs2 == tokens
def test_unicode_script_tokenizer_with_offsets():
"""
Test UnicodeScriptTokenizer when para keep_whitespace=False and with_offsets=True
"""
unicode_script_strs = [["Welcome", "to", "Beijing", "!"],
["北京欢迎您", "!"],
["我喜欢", "English", "!"],
[""]]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.UnicodeScriptTokenizer(keep_whitespace=False, with_offsets=True)
dataset = dataset.map(operations=tokenizer, input_columns=['text'],
output_columns=['token', 'offsets_start', 'offsets_limit'],
column_order=['token', 'offsets_start', 'offsets_limit'])
tokens = []
expected_offsets_start = [[0, 8, 11, 18], [0, 15], [0, 9, 16], [0]]
expected_offsets_limit = [[7, 10, 18, 19], [15, 18], [9, 16, 17], [0]]
count = 0
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['token']).tolist()
tokens.append(token)
np.testing.assert_array_equal(i['offsets_start'], expected_offsets_start[count])
np.testing.assert_array_equal(i['offsets_limit'], expected_offsets_limit[count])
count += 1
logger.info("The out tokens is : {}".format(tokens))
assert unicode_script_strs == tokens
def test_unicode_script_tokenizer_with_offsets2():
"""
Test UnicodeScriptTokenizer when para keep_whitespace=True and with_offsets=True
"""
unicode_script_strs2 = [["Welcome", " ", "to", " ", "Beijing", "!"],
["北京欢迎您", "!"],
["我喜欢", "English", "!"],
[" "]]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
tokenizer = text.UnicodeScriptTokenizer(keep_whitespace=True, with_offsets=True)
dataset = dataset.map(operations=tokenizer, input_columns=['text'],
output_columns=['token', 'offsets_start', 'offsets_limit'],
column_order=['token', 'offsets_start', 'offsets_limit'])
tokens = []
expected_offsets_start = [[0, 7, 8, 10, 11, 18], [0, 15], [0, 9, 16], [0]]
expected_offsets_limit = [[7, 8, 10, 11, 18, 19], [15, 18], [9, 16, 17], [2]]
count = 0
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['token']).tolist()
tokens.append(token)
np.testing.assert_array_equal(i['offsets_start'], expected_offsets_start[count])
np.testing.assert_array_equal(i['offsets_limit'], expected_offsets_limit[count])
count += 1
logger.info("The out tokens is :", tokens)
assert unicode_script_strs2 == tokens
def test_case_fold():
"""
Test CaseFold
"""
expect_strs = ["welcome to beijing!", "北京欢迎您!", "我喜欢english!", " "]
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
op = text.CaseFold()
dataset = dataset.map(operations=op)
lower_strs = []
for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
token = text.to_str(i['text']).tolist()
lower_strs.append(token)
assert lower_strs == expect_strs
def test_normalize_utf8():
"""
Test NormalizeUTF8
"""
expect_normlize_data = [
# NFC
[b'\xe1\xb9\xa9', b'\xe1\xb8\x8d\xcc\x87', b'q\xcc\xa3\xcc\x87',
b'\xef\xac\x81', b'2\xe2\x81\xb5', b'\xe1\xba\x9b\xcc\xa3'],
# NFKC
[b'\xe1\xb9\xa9', b'\xe1\xb8\x8d\xcc\x87', b'q\xcc\xa3\xcc\x87',
b'fi', b'25', b'\xe1\xb9\xa9'],
# NFD
[b's\xcc\xa3\xcc\x87', b'd\xcc\xa3\xcc\x87', b'q\xcc\xa3\xcc\x87',
b'\xef\xac\x81', b'2\xe2\x81\xb5', b'\xc5\xbf\xcc\xa3\xcc\x87'],
# NFKD
[b's\xcc\xa3\xcc\x87', b'd\xcc\xa3\xcc\x87', b'q\xcc\xa3\xcc\x87',
b'fi', b'25', b's\xcc\xa3\xcc\x87']
]
assert normalize(text.utils.NormalizeForm.NFC) == expect_normlize_data[0]
assert normalize(text.utils.NormalizeForm.NFKC) == expect_normlize_data[1]
assert normalize(text.utils.NormalizeForm.NFD) == expect_normlize_data[2]
assert normalize(text.utils.NormalizeForm.NFKD) == expect_normlize_data[3]
def test_regex_replace():
"""
Test RegexReplace
"""
regex_replace(1, 2, ['H____ W____', "L__'_ G_"], "\\p{Ll}", '_')
regex_replace(3, 5, ['hello', 'world', '31:beijing'], "^(\\d:|b:)", "")
regex_replace(6, 6, ["WelcometoChina!"], "\\s+", "")
regex_replace(7, 8, ['我不想长大', 'WelcometoShenzhen!'], "\\p{Cc}|\\p{Cf}|\\s+", "")
def test_regex_tokenizer_default():
"""
Test RegexTokenizer
"""
regex_tokenizer(1, 1, [['Welcome', 'to', 'Shenzhen!']], "\\s+", "")
regex_tokenizer(1, 1, [['Welcome', ' ', 'to', ' ', 'Shenzhen!']], "\\s+", "\\s+")
regex_tokenizer(2, 2, [['北', '京', '欢', '迎', '您', '!Welcome to Beijing!']], r"\p{Han}", r"\p{Han}")
regex_tokenizer(3, 3, [['12', '¥+', '36', '¥=?']], r"[\p{P}|\p{S}]+", r"[\p{P}|\p{S}]+")
regex_tokenizer(3, 3, [['12', '36']], r"[\p{P}|\p{S}]+", "")
regex_tokenizer(3, 3, [['¥+', '¥=?']], r"[\p{N}]+", "")
def test_regex_tokenizer_with_offsets():
"""
Test RegexTokenizer
"""
regex_tokenizer(1, 1, [['Welcome', 'to', 'Shenzhen!']], [[0, 8, 11]], [[7, 10, 20]], "\\s+", "")
regex_tokenizer(1, 1, [['Welcome', ' ', 'to', ' ', 'Shenzhen!']], [[0, 7, 8, 10, 11]], [[7, 8, 10, 11, 20]],
"\\s+", "\\s+")
regex_tokenizer(2, 2, [['北', '京', '欢', '迎', '您', '!Welcome to Beijing!']], [[0, 3, 6, 9, 12, 15]],
[[3, 6, 9, 12, 15, 35]], r"\p{Han}", r"\p{Han}")
regex_tokenizer(3, 3, [['12', '¥+', '36', '¥=?']], [[0, 2, 6, 8]], [[2, 6, 8, 13]],
r"[\p{P}|\p{S}]+", r"[\p{P}|\p{S}]+")
regex_tokenizer(3, 3, [['12', '36']], [[0, 6]], [[2, 8]], r"[\p{P}|\p{S}]+", "")
regex_tokenizer(3, 3, [['¥+', '¥=?']], [[2, 8]], [[6, 13]], r"[\p{N}]+", "")
if __name__ == '__main__':
test_unicode_char_tokenizer_default()
test_unicode_char_tokenizer_with_offsets()
test_whitespace_tokenizer_default()
test_whitespace_tokenizer_with_offsets()
test_unicode_script_tokenizer_default()
test_unicode_script_tokenizer_default2()
test_unicode_script_tokenizer_with_offsets()
test_unicode_script_tokenizer_with_offsets2()
test_case_fold()
test_normalize_utf8()
test_regex_replace()
test_regex_tokenizer_default()
test_regex_tokenizer_with_offsets()
| [
2,
15069,
12131,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.100828 | 6,278 |
# -*- coding: utf-8 -*-
"""
flask.ext.datastore.mongoalchemy
~~~~~~~~~~~~~~
:copyright: (c) 2011 by wilsaj.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import types
import mongoalchemy as ma
from mongoalchemy.document import Document
from wtforms import fields as f
from wtforms import form, validators, widgets
from wtforms.form import Form
from flask.ext.admin.datastore import AdminDatastore
from flask.ext.admin import wtforms as admin_wtf
from flask.ext.admin import util
class MongoAlchemyDatastore(AdminDatastore):
"""A datastore for accessing MongoAlchemy document models.
The `models` parameter should be either a module or an iterable
that contains the MongoAlchemy models that will be made available
through the admin interface.
`db_session` should be an initialized MongoAlchemy session
object. See the `MongoAlchemy documentation`_ for information on
how to do that.
By default, a form for adding and editing data will be
automatically generated for each MongoAlchemy model. Only
primitive MongoAlchemy types are supported so if you need to
support other fields you will need to create custom forms. You can
also use custom forms if you want more control over form behavior.
To use custom forms, set the `model_forms` parameter to be a dict
with model names as keys matched to custom forms for the forms you
want to override. Forms should be WTForms form objects; see the
`WTForms documentation`_ for more information on how to configure
forms.
A dict with model names as keys, mapped to WTForm Form objects
that should be used as forms for creating and editing instances of
these models.
.. _MongoAlchemy documentation: http://www.mongoalchemy.org/api/session.html
.. _WTForms documentation: http://wtforms.simplecodes.com/
"""
def create_model_pagination(self, model_name, page, per_page=25):
"""Returns a pagination object for the list view."""
model_class = self.get_model_class(model_name)
query = self.db_session.query(model_class).skip(
(page - 1) * per_page).limit(per_page)
return MongoAlchemyPagination(page, per_page, query)
def delete_model_instance(self, model_name, model_keys):
"""Deletes a model instance. Returns True if model instance
was successfully deleted, returns False otherwise.
"""
model_class = self.get_model_class(model_name)
try:
model_instance = self.find_model_instance(model_name, model_keys)
self.db_session.remove(model_instance)
return True
except ma.query.BadResultException:
return False
def find_model_instance(self, model_name, model_keys):
"""Returns a model instance, if one exists, that matches
model_name and model_keys. Returns None if no such model
instance exists.
"""
model_key = model_keys[0]
model_class = self.get_model_class(model_name)
return self.db_session.query(model_class).filter(
model_class.mongo_id == model_key).one()
def get_model_class(self, model_name):
"""Returns a model class, given a model name."""
return self.model_classes.get(model_name, None)
def get_model_form(self, model_name):
"""Returns a form, given a model name."""
return self.form_dict.get(model_name, None)
def get_model_keys(self, model_instance):
"""Returns the keys for a given a model instance."""
return [model_instance.mongo_id]
def list_model_names(self):
"""Returns a list of model names available in the datastore."""
return self.model_classes.keys()
def save_model(self, model_instance):
"""Persists a model instance to the datastore. Note: this
could be called when a model instance is added or edited.
"""
return model_instance.commit(self.db_session.db)
def update_from_form(self, model_instance, form):
"""Returns a model instance whose values have been updated
with the values from a given form.
"""
for field in form:
# handle FormFields that were generated for mongoalchemy
# TupleFields as a special case
if field.__class__ == f.FormField:
data_tuple = tuple([subfield.data for subfield in field])
setattr(model_instance, field.name, data_tuple)
continue
# don't use the mongo id from the form - it comes from the
# key/url and if someone tampers with the form somehow, we
# should ignore that
elif field.name != 'mongo_id':
setattr(model_instance, field.name, field.data)
return model_instance
def _form_for_model(document_class, db_session):
"""returns a wtform Form object for a given document model class.
"""
#XXX: needs to be implemented
return model_form(document_class)
#-----------------------------------------------------------------------
# mongo alchemy form generation: to be pushed upstream
#-----------------------------------------------------------------------
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Generate a dictionary of fields for a given MongoAlchemy model.
See `model_form` docstring for description of parameters.
"""
if not issubclass(model, Document):
raise TypeError('model must be a mongoalchemy document model')
converter = converter or ModelConverter()
field_args = field_args or {}
ma_fields = ((name, field) for name, field in model.get_fields().items())
if only:
ma_fields = (x for x in ma_fields if x[0] in only)
elif exclude:
ma_fields = (x for x in ma_fields if x[0] not in exclude)
field_dict = {}
for name, field in ma_fields:
wtfield = converter.convert(model, field, field_args.get(name))
if wtfield is not None:
field_dict[name] = wtfield
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None,
field_args=None, converter=None):
"""
Create a wtforms Form for a given MongoAlchemy model class::
from wtforms.ext.mongoalchemy.orm import model_form
from myapp.models import User
UserForm = model_form(User)
:param model:
A MongoAlchemy mapped model class.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, only, exclude, field_args, converter)
return type(model.__name__ + 'Form', (base_class, ), field_dict)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
42903,
13,
2302,
13,
19608,
459,
382,
13,
76,
25162,
282,
26599,
198,
220,
220,
220,
220,
15116,
8728,
4907,
628,
198,
220,
220,
220,
1058,
... | 2.81307 | 2,632 |
_CN_NUM = {
"〇": 0,
"一": 1,
"二": 2,
"三": 3,
"四": 4,
"五": 5,
"六": 6,
"七": 7,
"八": 8,
"九": 9,
"零": 0,
"壹": 1,
"贰": 2,
"叁": 3,
"肆": 4,
"伍": 5,
"陆": 6,
"柒": 7,
"捌": 8,
"玖": 9,
"貮": 2,
"两": 2,
}
_CN_UNIT = {
"十": 10,
"拾": 10,
"百": 100,
"佰": 100,
"千": 1000,
"仟": 1000,
"万": 10000,
"萬": 10000,
}
def chinese_to_arabic(cn: str) -> int:
"""
https://blog.csdn.net/hexrain/article/details/52790126
:type cn: str
:rtype: int
"""
unit = 0 # current
l_dig = [] # digest
for cn_dig in reversed(cn):
if cn_dig in _CN_UNIT:
unit = _CN_UNIT[cn_dig]
if unit == 10000 or unit == 100000000:
l_dig.append(unit)
unit = 1
else:
dig = _CN_NUM[cn_dig]
if unit:
dig *= unit
unit = 0
l_dig.append(dig)
if unit == 10:
l_dig.append(10)
val, tmp = 0, 0
for x in reversed(l_dig):
if x == 10000 or x == 100000000:
val += tmp * x
tmp = 0
else:
tmp += x
val += tmp
return val
| [
62,
44175,
62,
41359,
796,
1391,
198,
220,
220,
220,
366,
5099,
229,
1298,
657,
11,
198,
220,
220,
220,
366,
31660,
1298,
352,
11,
198,
220,
220,
220,
366,
12859,
234,
1298,
362,
11,
198,
220,
220,
220,
366,
49011,
1298,
513,
11,
... | 1.546954 | 788 |
#!/usr/bin/env python
import os
from rdbtools import __version__
long_description = '''
Parse Redis dump.rdb files, Analyze Memory, and Export Data to JSON
Rdbtools is a parser for Redis' dump.rdb files. The parser generates events similar to an xml sax parser, and is very efficient memory wise.
In addition, rdbtools provides utilities to :
1. Generate a Memory Report of your data across all databases and keys
2. Convert dump files to JSON
3. Compare two dump files using standard diff tools
Rdbtools is written in Python, though there are similar projects in other languages. See FAQs (https://github.com/sripathikrishnan/redis-rdb-tools/wiki/FAQs) for more information.
'''
sdict = {
'name' : 'rdbtools',
'version' : __version__,
'description' : 'Utilities to convert Redis RDB files to JSON or SQL formats',
'long_description' : long_description,
'url': 'https://github.com/sripathikrishnan/redis-rdb-tools',
'download_url': 'https://github.com/sripathikrishnan/redis-rdb-tools/archive/rdbtools-%s.tar.gz' % __version__,
'author': 'Sripathi Krishnan, Redis Labs',
'author_email' : 'Sripathi.Krishnan@gmail.com',
'maintainer': 'Sripathi Krishnan, Redis Labs',
'maintainer_email': 'oss@redislabs.com',
'keywords' : ['Redis', 'RDB', 'Export', 'Dump', 'Memory Profiler'],
'license' : 'MIT',
'packages' : ['rdbtools', 'rdbtools.cli'],
'package_data' : {
'rdbtools': ['templates/*'],
},
'test_suite' : 'tests.all_tests',
'install_requires': ['redis'],
'entry_points' : {
'console_scripts' : [
'rdb = rdbtools.cli.rdb:main',
'redis-memory-for-key = rdbtools.cli.redis_memory_for_key:main',
'redis-profiler = rdbtools.cli.redis_profiler:main'],
},
'classifiers' : [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(**sdict)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
6738,
374,
9945,
31391,
1330,
11593,
9641,
834,
198,
198,
6511,
62,
11213,
796,
705,
7061,
198,
10044,
325,
2297,
271,
10285,
13,
4372,
65,
3696,
11,
16213,
2736,
14... | 2.658182 | 825 |
from MusicTheory.temperament.EqualTemperament import EqualTemperament
from MusicTheory.temperament.eq12scales.ScaleIntervals import ScaleIntervals
from MusicTheory.temperament.eq12scales.Scale import Scale
import Wave.Player
import Wave.Sampler
import Wave.BaseWaveMaker
import Wave.WaveFile
import pathlib
# p.Close()
if __name__ == '__main__':
et = EqualTemperament()
et.Denominator = 12
et.SetBaseKey(keyId=9, pitch=4, hz=440)
scale = Scale(et)# scale.Temperament = et
print(f'BaseKey: {GetToneName(et.BaseKeyId)}{et.BaseKeyPitch} {et.BaseFrequency}Hz')
print(f'{et.Denominator}平均律')
"""
for scaleKeyId in range(et.Denominator):
tones = scale.Get(scaleKeyId, ScaleIntervals.Major)
for tone in tones: print('{:2}'.format(GetToneName(tone[0])), end=' ')
print()
PlayAndMaker.Run(scaleKeyId, [tone[2] for tone in tones])
"""
"""
for scaleKeyId in range(et.Denominator):
scale.Key = scaleKeyId
for tone in scale.Tones: print('{:2}'.format(GetToneName(tone[0])), end=' ')
print()
"""
for interval_name in ['Major', 'Minor', 'Diminished', 'HarmonicMinor', 'MelodicMinor', 'MajorPentaTonic', 'MinorPentaTonic', 'BlueNote']:
print('=====', interval_name, '=====')
scale.Intervals = getattr(ScaleIntervals, interval_name)
for scaleKeyId in range(et.Denominator):
scale.Key = scaleKeyId
for tone in scale.Tones: print('{:2}'.format(GetToneName(tone[0])), end=' ')
print()
| [
6738,
7849,
464,
652,
13,
11498,
525,
3263,
13,
36,
13255,
12966,
525,
3263,
1330,
28701,
12966,
525,
3263,
198,
6738,
7849,
464,
652,
13,
11498,
525,
3263,
13,
27363,
1065,
1416,
2040,
13,
29990,
9492,
12786,
1330,
21589,
9492,
12786,
... | 2.349925 | 663 |
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, SelectField
from wtforms.validators import Required
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
8255,
30547,
15878,
11,
39900,
15878,
11,
9683,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
20906,
198
] | 3.918919 | 37 |
#!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
from typing import List
# class Solution:
# def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
# if not obstacleGrid: return 0
# m, n = len(obstacleGrid), len(obstacleGrid[0])
# current = [1] + [0] * (n - 1)
# for i in range(m):
# for j in range(n):
# if obstacleGrid[i][j] == 1:
# current[j] = 0
# elif j > 0:
# current[j] += current[j - 1]
# return current[-1]
if __name__ == '__main__':
# print(Solution().uniquePathsWithObstacles([[0,0,0],[0,1,0],[0,0,0]]))
# print(Solution().uniquePathsWithObstacles([[0,0,0]]))
# print(Solution().uniquePathsWithObstacles([[0,1,0]]))
# print(Solution().uniquePathsWithObstacles([[0,0]]))
# print(Solution().uniquePathsWithObstacles([[0]]))
# print(Solution().uniquePathsWithObstacles([]))
# print(Solution().uniquePathsWithObstacles([[]]))
# print(Solution().uniquePathsWithObstacles([[1]]))
print(Solution().uniquePathsWithObstacles([[0,0],[1,1],[0,0]]))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
12,
9,
12,
198,
198,
834,
9800,
834,
796,
366,
33,
1236,
654,
1,
198,
198,
6738,
19720,
1330,
7343,
198,
198,
2,
1398,
28186,
25,
198,
... | 2.094545 | 550 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from src.detector.loader import Loader
from src.comparator.file_set_comparator import FileSetComparator
from src.findings.finding_container import FindingContainer
from src.comparator.wrappers import FileSet
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.617647 | 238 |
import csv
import os
import numpy as np
from . import color_support
import plotly.plotly as py
import plotly.graph_objs as go
path = (os.path.dirname(os.path.abspath(__file__)))
FILE = path +'/static/maps/Data/city_health_stats.csv'
py.sign_in('healthy_neighborhoods','d806djbyh8')
DEFAULT = None
# necessary specifications to fit plotly matrix
ROWS = 9
COLS = 3
X_ID = 0
Y_ID = 1
NAME_ID = 2
def get_lists(var1, var2):
'''
Takes two variable strings
Returns three lists:
xs: list of floats
ys: list of floats
rt: list of tuples (name, x, y)
Xs and ys will not have any DEFAULT, but rt has DEFAULT for every missing values in the csv
Which is used to generate gray color in google maps
But each of them needs to be skipped for the scatter array
'''
xs = []
ys = []
rt = []
headers = {}
with open (FILE,'rU') as f:
fields = csv.reader(f)
for i, e in enumerate(next(fields)):
headers[e] = i
for row in fields:
add_to_lists(row, xs, ys, rt, headers, var1, var2)
return xs, ys, rt
def google_maps(var1, var2):
'''
Takes two variables,
Returns a tuple
(correlation coefficient, list of tuples)
'''
xs, ys, rt = get_lists(var1, var2)
return assign_colors(xs, ys, rt, [])
def get_correlation_coefficient(xs, ys):
'''
Takes both xs and ys, finds the correlation coefficient and adds to final list
'''
return np.corrcoef(xs, ys)[1,0]
def initialize_scatter(rt):
'''
Takes an empty list,
Initializes a 9-3 array
'''
for i in range(ROWS):
l = []
for j in range(COLS):
l.append([])
rt.append(l)
return rt
def get_scatter_array(var1, var2):
'''
Takes two variables from plot_graph
Returns an array and a list of colors
scatter: a 9-3 array built to match plotly specifications
list of colors: list of strings, where the index space of each color matches
the index space of the correct color quadrant, 0-8
'''
# if the second variable is default, overwrite it as the first
if var2 == DEFAULT:
var2 = var1
xs, ys, rt = get_lists(var1, var2)
scatter = initialize_scatter([])
for (name, x, y) in rt:
# only add a value to the scatter array if the measurement is not DEFAULT,
# which means the observation is actually present for that variable
if (x != DEFAULT) and (y != DEFAULT):
inner = scatter[get_color(x, y, xs, ys, True)]
inner[X_ID].append(x)
inner[Y_ID].append(y)
inner[NAME_ID].append(name)
return scatter, color_support.scatter_color_list
def get_color(x, y, xs, ys, scatter = False):
'''
Takes one x observation, one y observation, a list of x measurements and a list of y measurements,
and a boolean value for whether or not the color should match the scatter plot array
Walks through the short list of thresholds for the x observation and the y observation
Returns a single string that corresponds to the correct color for that neighborhood
'''
for idx, (low, high) in enumerate(get_thresholds(xs)):
if (x >= low) and (x <= high):
x_id = color_support.index_matrix[idx]
for idx, (low, high) in enumerate(get_thresholds(ys)):
if (y >= low) and (y <= high):
y_id = color_support.index_matrix[idx]
if not scatter:
return color_support.color_matrix[(x_id, y_id)]
return color_support.scatter_matrix[(x_id, y_id)]
def assign_colors(xs, ys, rt, final):
'''
Takes a list of tuples, assigns the correct color
'''
for (name, x, y) in rt:
if (x == DEFAULT) or (y == DEFAULT):
final.append((name, color_support.color_matrix[DEFAULT]))
else:
final.append((name, get_color(x, y, xs, ys, False)))
return (get_correlation_coefficient(xs, ys), final)
def get_thresholds(xs):
'''
Takes a list of numerical values, returns a list of 3 tuples assigning thresholds
'''
low = min(xs)
high = max(xs)
m1 = (high-low)/3 + low
m2 = 2 * (high - low)/3 + low
return [(low, m1), (m1, m2), (m2, high)]
def add_to_lists(row, xs, ys, rt, headers, var1, var2):
'''
Takes a row, a list of xs, ys, rt, dictionary of headers, two variables
Adds to lists appropriately
'''
name = row[1]
x = get_val(row[headers[var1]])
y = get_val(row[headers[var2]])
rt.append((name, x, y))
if (y != DEFAULT) and (x != DEFAULT):
ys.append(y)
xs.append(x)
def get_val(x):
'''
Takes a measurement from the row, tries to convert to float and add to values_list
If not, returns None
'''
try:
return float(x)
except:
return DEFAULT
def main(variable_1, variable_2 = DEFAULT):
'''
How this file interacts with Django framework
Takes two variables that are passed from user input
Calls plot_graph
And returns google maps data structure
'''
plot_graph(variable_1, variable_2)
return google_maps(variable_1, variable_2)
def compare(var1, var2):
'''
Takes two variable names and returns the correlation coefficient
Useful for systematically comparing lists of variables
We used this in generating some of our analysis,
such as finding the strongest health correlation for each economic indicator
But it's not explicitly tied to the website visualizations
'''
xs, ys, rt = get_lists(var1, var2)
return get_correlation_coefficient(xs, ys)
def create_trace(i, val, colors):
'''
Takes index, list of X, Y, and neighborhood values and creates a trace object with color
'''
trace = "var" + str(i)
color = colors[i]
x_vals = val[X_ID]
if len(x_vals) == 0:
return None
y_vals = val[Y_ID]
neighborhoods = val[NAME_ID]
trace = go.Scatter(
x = x_vals,
y = y_vals,
mode = 'markers',
marker = dict(color = color, size = 12,
line = dict(width = 1)
),
text = neighborhoods,
name = "")
return trace
def plot_graph(var1, var2):
'''
input: Receives two string variables from django
Receives scatter, list of 9 lists, each interior list with an x, y, and neighborhood get_scatter_array
Receives colors, an ordered list corresponding with the interior lists of scatter
output: Creates a trace for each of 9 lists , append to data file and creating scatterplot
'''
scatter, colors = get_scatter_array(var1, var2)
graph_title = var1 + ' vs ' + var2
data = []
for i, val in enumerate(scatter):
trace = create_trace(i, val, colors)
if trace:
data.append(trace)
layout = go.Layout(
showlegend = False,
title = graph_title,
hovermode = 'closest',
xaxis = dict(
title = var1,
ticklen = 5,
zeroline = False,
gridwidth = 2,
),
yaxis = dict(
title = var2,
ticklen = 5,
gridwidth = 2,
),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='healthy-neighborhoods') | [
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
1330,
3124,
62,
11284,
198,
11748,
7110,
306,
13,
29487,
306,
355,
12972,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355,
467,
198,
198,
6... | 2.505976 | 2,761 |
from django.urls import path
from . import views
app_name = "dummy"
urlpatterns = [
path(
"callback/",
views.CallbackView.as_view(),
name="callback",
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
366,
67,
13513,
1,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7,
198,
220,
220,
220,
220,
220,
220,
... | 2.209302 | 86 |
from app import db
# One course to many students and to many forms (one form per week at most)
# Student
# One form corresponds to one sheet in a spreadsheet
# It collects the answers for one week from the students in one course
# One form is in a many to one database.relationship with a Course
# Questions for each course (many to one)
# Question type is in fact answer type : grade (1 to 5), choice (non numeric), text
# Question type is inferred by the app
# Answer : one answer per student per form (=> per week) per question
| [
6738,
598,
1330,
20613,
628,
198,
2,
1881,
1781,
284,
867,
2444,
290,
284,
867,
5107,
357,
505,
1296,
583,
1285,
379,
749,
8,
628,
198,
2,
13613,
628,
198,
2,
1881,
1296,
24866,
284,
530,
9629,
287,
257,
30117,
198,
2,
632,
26609,... | 4.037313 | 134 |
# Generated by Django 3.1.7 on 2021-05-31 07:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phone_field.models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
2713,
12,
3132,
8753,
25,
2624,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.101695 | 59 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-02 21:54
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
16,
319,
2177,
12,
2919,
12,
2999,
2310,
25,
4051,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.706897 | 58 |
import logging
from .service import ApiResource, ApiService
logger = logging.getLogger(__name__)
class ApiServiceLicenses(ApiService):
"""
Documentation: https://developer.ciscospark.com/resource-licenses.html
"""
_resource = ApiResource('licenses', 'cursor')
def list_licenses(self, org_id=None, limit=None, cursor=None, paginate=True, **kwargs):
"""
List all licenses for a given organization.
:return: async_generator object that produces the list of items.
"""
params = {
'orgId': org_id,
'max': limit,
'cursor': cursor
}
logger.debug('Getting licenses using parameters: %s', params)
return self.get_items(params, paginate=paginate, **kwargs)
| [
11748,
18931,
198,
198,
6738,
764,
15271,
1330,
5949,
72,
26198,
11,
5949,
72,
16177,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4871,
5949,
72,
16177,
26656,
4541,
7,
32,
14415,
16177,
... | 2.477564 | 312 |
import flask
import sys, os
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/away', methods=['GET'])
@app.route('/home', methods=['GET'])
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
11748,
42903,
198,
11748,
25064,
11,
28686,
198,
198,
1324,
796,
42903,
13,
7414,
2093,
7,
834,
3672,
834,
8,
198,
1324,
13,
11250,
14692,
30531,
8973,
796,
6407,
198,
198,
31,
1324,
13,
38629,
10786,
14,
8272,
3256,
5050,
28,
17816,
... | 2.347826 | 92 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord
import astropy.units as u
from regions import CircleSkyRegion
from ...data import DataStore, ObservationTableSummary, ObservationSummary
from ...data import ObservationStats, ObservationStatsList, ObservationList
from ...data import Target
from ...utils.testing import requires_data, requires_dependency
from ...background import reflected_regions_background_estimate as refl
from ...image import SkyMask
@requires_data('gammapy-extra')
@requires_data('gammapy-extra')
@requires_dependency('scipy')
@requires_dependency('matplotlib')
class TestObservationSummary:
"""
Test observation summary.
"""
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
6738,
299,
... | 3.662338 | 231 |
"""
Database of neural networks to support easy API
To add a new network, edit `net_paths`, `net_io_layers`,
`all_classifiers`/`all_generators`, and `net_scales`
"""
from os.path import join, exists
from numpy import array
from .local_settings import nets_dir
__all__ = ['refresh_available_nets',
'net_paths_exist', 'available_nets',
'available_classifiers', 'available_generators']
# paths for nets
# - manual entries for two classifiers
net_paths = {
'caffe': {
'caffenet': {'definition': join(nets_dir, 'caffenet', 'caffenet.prototxt'),
'weights': join(nets_dir, 'caffenet', 'bvlc_reference_caffenet.caffemodel')},
},
'pytorch': {
'alexnet': {'weights': join(nets_dir, 'pytorch', 'alexnet', 'alexnet-owt-4df8aa71.pt')}
# 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth'
}
}
# - DeePSiM generators
_deepsim_layers = ('norm1', 'norm2', 'conv3', 'conv4', 'pool5', 'fc6', 'fc7', 'fc8')
for n in _deepsim_layers:
net_paths['caffe'][f'deepsim-{n}'] = {
'definition': join(nets_dir, 'caffe', 'deepsim', n, 'generator_no_batch.prototxt'),
'weights': join(nets_dir, 'caffe', 'deepsim', n, 'generator.caffemodel')
}
net_paths['pytorch'][f'deepsim-{n}'] = {
'weights': join(nets_dir, 'pytorch', 'deepsim', f'{n}.pt')
}
# #####################################
# Metadata
# #####################################
# 1. Type: generator or classifier
# 2. Layer info:
# Layer info is various used in modules that use a neural net.
# It is usually inferred from a loaded net when availble. However, in
# multiprocessing cases involving caffe, nets have to be loaded in each
# subprocess separately, while dependent modules like Generator need to
# be constructed beforehand in the main process. This necessiates a
# static catelog of layer info.
# For caffe classifiers only,
# input layer shape should be defined (name assumed to be 'data' by
# convention); this is used to automatically construct a data transformer
# when loading net
# For generators,
# if caffe: input and output layer shapes and names should be defined
# (see prototxt for names);
# if pytorch: input layer shape should be defined.
# Input shapes allow Generator to know the shape of the image code.
# Output shape is only used when Generator.reencode() is called
# Shape is without the first (batch) dimension.
# This is not per-engine; i.e., nets with the same name are
# assumed to have the same layer shapes & names regardless of engine.
# 3. Scale for preprocessing:
# The range of image data input/output for net (without subtracting mean)
# E.g., scale of 255 means image is on the scale of 0-255.
# Notably, inception networks use scale 0-1, and pretrained pytorch networks
# use something else (see 'alexnet').
net_meta = {'caffenet': {'type': 'classifier', 'input_layer_shape': (3, 227, 227,)},
'alexnet': {'type': 'classifier', 'input_layer_shape': (3, 224, 224,),
'scale': 1 / array([0.229, 0.224, 0.225])[:, None, None]},
'deepsim-norm1': {'input_layer_shape': (96, 27, 27,)},
'deepsim-norm2': {'input_layer_shape': (256, 13, 13,)},
'deepsim-conv3': {'input_layer_shape': (384, 13, 13,)},
'deepsim-conv4': {'input_layer_shape': (384, 13, 13,)},
'deepsim-pool5': {'input_layer_shape': (256, 6, 6,)},
'deepsim-fc6': {'input_layer_shape': (4096,)},
'deepsim-fc7': {'input_layer_shape': (4096,)},
'deepsim-fc8': {'input_layer_shape': (1000,)}}
for d in net_meta.values():
d.setdefault('scale', 255)
for n in _deepsim_layers:
net_meta[f'deepsim-{n}'].update({
'type': 'generator',
'input_layer_name': 'feat',
'output_layer_name': 'generated',
'output_layer_shape': (3, 240, 240,) if 'norm' in n else (3, 256, 256)
})
net_paths_exist = available_nets = available_classifiers \
= available_generators = None
refresh_available_nets()
| [
37811,
198,
38105,
286,
17019,
7686,
284,
1104,
2562,
7824,
198,
2514,
751,
257,
649,
3127,
11,
4370,
4600,
3262,
62,
6978,
82,
47671,
4600,
3262,
62,
952,
62,
75,
6962,
47671,
198,
220,
220,
220,
4600,
439,
62,
4871,
13350,
63,
14,... | 2.433198 | 1,729 |
'''
Given two binary strings, return their sum (also a binary string).
For example,
a = "11"
b = "1"
Return "100".
'''
if __name__ == "__main__":
assert Solution().addBinary("111", "1") == "1000" | [
7061,
6,
198,
15056,
734,
13934,
13042,
11,
1441,
511,
2160,
357,
14508,
257,
13934,
4731,
737,
198,
198,
1890,
1672,
11,
198,
64,
796,
366,
1157,
1,
198,
65,
796,
366,
16,
1,
198,
13615,
366,
3064,
1911,
198,
7061,
6,
628,
198,
... | 2.72973 | 74 |
"""
inspect_games.py
Check if all games are created correctly.
"""
import os
from config import Config
from environment.game import Game
if __name__ == '__main__':
os.chdir("../..")
config = Config()
for g_id in [1, 2, 3]:
try:
game = Game(
game_id=g_id,
config=config,
save_path="environment/games_db/",
overwrite=False,
silent=True,
)
game.close()
game.reset()
game.get_blueprint()
game.get_observation()
game.step(0, 0)
except Exception:
print(f"Bug in game: {g_id}, please manually redo this one")
| [
37811,
198,
1040,
806,
62,
19966,
13,
9078,
198,
198,
9787,
611,
477,
1830,
389,
2727,
9380,
13,
198,
37811,
198,
11748,
28686,
198,
198,
6738,
4566,
1330,
17056,
198,
6738,
2858,
13,
6057,
1330,
3776,
198,
198,
361,
11593,
3672,
834,... | 1.874036 | 389 |
from absl import logging
from typing import Iterable, Callable
from faker import Factory
logging.info(logging.DEBUG)
def highlight_texts(text: str, list_of_mentions: Iterable[Mention], get_color: Callable[[Mention], str]) -> str:
"""Produce HTML that labels the entities in the given span. """
mentions = [[m.start_char_offset, m.end_char_offset, m.entity_id, m.end_char_offset-m.start_char_offset, m] for m in list_of_mentions]
mentions = sorted(mentions, key=lambda x: (x[1], -x[0]))
while mentions:
last_m = mentions[-1]
outer_most_containing = [m for m in mentions[:-1] if last_m[1]>= m[0]]
if outer_most_containing:
outer_most_containing = max(outer_most_containing, key=lambda x: x[1])
else:
outer_most_containing = None
logging.debug('last_m %s', str(last_m))
logging.debug('outer_most_containing %s', str(outer_most_containing))
# if e is candidate and is not overlapping
if outer_most_containing is None or last_m[0] > outer_most_containing[1]:
logging.debug('case 1: adding %s', str(last_m))
s, e, t, span_len, m = last_m
color = get_color(m)
rs = get_mention_string(text[s:e], color, t)
text = text[:s] + rs + text[e:]
logging.debug('len(rs)%s', len(rs))
mentions.pop()
elif outer_most_containing is None or last_m[1] > outer_most_containing[0]:
logging.debug('case 2: adding %s', str(last_m))
s, e, t, span_len, m = last_m
color = get_color(m)
rs = get_mention_string(text[s:e], color, t)
text = text[:s] + rs + text[e:]
logging.debug('len(rs)%s', len(rs))
mentions.pop()
outer_most_containing[1] += len(rs) - (e -s)
else:
logging.debug('case 3: adding %s', str(outer_most_containing))
s, e, t, span_len, m = outer_most_containing
color = get_color(m)
rs = get_mention_string(text[s:e], color, t)
text = text[:s] + rs + text[e:]
mentions.pop()
mentions.pop()
mentions.insert(last_m, 0)
while mentions:
s, e, t, link = mentions.pop()
color = get_color(t)
rs = get_mention_string(text[s:e], color, t)
text = text[:s] + rs + text[e:]
return text
| [
198,
6738,
2352,
75,
1330,
18931,
198,
6738,
19720,
1330,
40806,
540,
11,
4889,
540,
198,
6738,
277,
3110,
1330,
19239,
198,
198,
6404,
2667,
13,
10951,
7,
6404,
2667,
13,
30531,
8,
628,
198,
198,
4299,
7238,
62,
5239,
82,
7,
5239,
... | 2.116505 | 1,133 |
import sys,re
# ************************************************************************************************
#
# Compiler Exception
#
# ************************************************************************************************
# ************************************************************************************************
#
# Standard Dulcimer Bar class
#
# ************************************************************************************************
#
# Initialise and create from definition.
#
#
# Create Rendering for a bar
#
# ************************************************************************************************
#
# Compiler class
#
# ************************************************************************************************
#
# Reset the compiler
#
#
# Load and pre-process source
#
#
# Compile all the bars.
#
#
# Convert from format 1 (DAA style note tunes, which can be transposed.)
#
#
# Render the JSON.
#
#
# Render to a file
#
#
# Compile a single file. Does them all in order.
#
if __name__ == '__main__':
c = DulcimerCompiler()
c.compileFile("demo.song","../app/music.json") | [
198,
11748,
25064,
11,
260,
198,
198,
2,
41906,
17174,
17174,
198,
2,
198,
2,
197,
197,
197,
197,
197,
197,
197,
197,
197,
197,
3082,
5329,
35528,
198,
2,
198,
2,
41906,
17174,
17174,
198,
198,
2,
41906,
17174,
17174,
198,
2,
198,... | 4 | 297 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 14:15
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2177,
12,
2713,
12,
2075,
1478,
25,
1314,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738... | 2.8 | 55 |
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
#
# Complete the 'roadsAndLibraries' function below.
#
# The function is expected to return a LONG_INTEGER.
# The function accepts following parameters:
# 1. INTEGER n
# 2. INTEGER c_lib
# 3. INTEGER c_road
# 4. 2D_INTEGER_ARRAY cities
#
if __name__ == '__main__':
fptr = sys.stdout
sys.stdin = open("input.txt", 'r')
q = int(input().strip())
for q_itr in range(q):
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
c_lib = int(first_multiple_input[2])
c_road = int(first_multiple_input[3])
cities = []
for _ in range(m):
cities.append(list(map(int, input().rstrip().split())))
result = roadsAndLibraries(n, c_lib, c_road, cities)
fptr.write(str(result) + '\n')
fptr.close()
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
17268,
1330,
4277,
11600,
628,
198,
2,
198,
2,
13248,
262,
705,
21372,
1870,
43,
11127,
6,
2163... | 2.399015 | 406 |
# -*- coding: utf-8 -*-
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
"""
logger_utils module contains some functions for logging management:
- Logger wrapper
- Fuctions for pretty print:
- log_print_request
- log_print_response
This code is based on:
https://pdihub.hi.inet/fiware/fiware-iotqaUtils/raw/develop/iotqautils/iotqaLogger.py
"""
import logging
import logging.config
from xml.dom.minidom import parseString
import json
import os
__author__ = "Javier Fernández"
__email__ = "jfernandez@tcpsi.es"
__copyright__ = "Copyright 2015"
__license__ = " Apache License, Version 2.0"
__version__ = "1.0.0"
HEADER_CONTENT_TYPE = u'content-type'
HEADER_REPRESENTATION_JSON = u'application/json'
HEADER_REPRESENTATION_XML = u'application/xml'
HEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'
# Load logging configuration from file if it exists
if os.path.exists("./resources/logging.conf"):
logging.config.fileConfig("./resources/logging.conf")
def get_logger(name):
"""
Create new logger with the given name
:param name: Name of the logger
:return: Logger
"""
logger = logging.getLogger(name)
return logger
def __get_pretty_body__(headers, body):
"""
Return a pretty printed body using the Content-Type header information
:param headers: Headers for the request/response (dict)
:param body: Body to pretty print (string)
:return: Body pretty printed (string)
"""
if HEADER_CONTENT_TYPE in headers:
if HEADER_REPRESENTATION_XML == headers[HEADER_CONTENT_TYPE]:
xml_parsed = parseString(body)
pretty_xml_as_string = xml_parsed.toprettyxml()
return pretty_xml_as_string
else:
if HEADER_REPRESENTATION_JSON in headers[HEADER_CONTENT_TYPE]:
parsed = json.loads(body)
return json.dumps(parsed, sort_keys=True, indent=4)
else:
return body
else:
return body
def log_print_request(logger, method, url, query_params=None, headers=None, body=None):
"""
Log an HTTP request data.
:param logger: Logger to use
:param method: HTTP method
:param url: URL
:param query_params: Query parameters in the URL
:param headers: Headers (dict)
:param body: Body (raw body, string)
:return: None
"""
log_msg = '>>>>>>>>>>>>>>>>>>>>> Request >>>>>>>>>>>>>>>>>>> \n'
log_msg += '\t> Method: %s\n' % method
log_msg += '\t> Url: %s\n' % url
if query_params is not None:
log_msg += '\t> Query params: {}\n'.format(str(query_params))
if headers is not None:
log_msg += '\t> Headers: {}\n'.format(str(headers))
if body is not None:
log_msg += '\t> Payload sent:\n {}\n'.format(__get_pretty_body__(headers, body))
logger.debug(log_msg)
def log_print_response(logger, response):
"""
Log an HTTP response data
:param logger: logger to use
:param response: HTTP response ('Requests' lib)
:return: None
"""
log_msg = '<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\n'
log_msg += '\t< Response code: {}\n'.format(str(response.status_code))
log_msg += '\t< Headers: {}\n'.format(str(dict(response.headers)))
try:
log_msg += '\t< Payload received:\n {}'\
.format(__get_pretty_body__(dict(response.headers), response.content))
except ValueError:
log_msg += '\t< Payload received:\n {}'\
.format(__get_pretty_body__(dict(response.headers), response.content.text))
logger.debug(log_msg)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
1853,
14318,
69,
18840,
3970,
16203,
32009,
18840,
331,
2935,
283,
2487,
78,
11,
311,
13,
32,
13,
52,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
18930,... | 2.649969 | 1,617 |
#!/usr/bin/env python3
import argparse
import eccodes as ec
import numpy as np
import os
import pkg_resources
import pyarrow as pa
import re
import sys
import traceback
from datetime import datetime, timedelta, timezone
from pyarrow import csv
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
11748,
21399,
4147,
355,
9940,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
279,
10025,
62,
37540,
198,
11748,
12972,
6018,
355,
14187,
... | 3.179775 | 89 |
#!/usr/bin/env python
import argparse
import logging
import sys
from metasv.defaults import *
from metasv.age import run_age_parallel
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run AGE on files assembled under MetaSV.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--reference", help="Reference FASTA", required=True, type=file)
parser.add_argument("--assembly", help="Assembly FASTA", required=True, type=file)
parser.add_argument("--age", help="Path to AGE executable", required=True, type=file)
parser.add_argument("--work", help="Work directory", default="work")
parser.add_argument("--pad", help="Padding to apply on both sides of the bed regions", type=int, default=AGE_PAD)
parser.add_argument("--nthreads", help="Number of threads to use", type=int, default=1)
parser.add_argument("--chrs", help="Chromosome list to process", nargs="+", default=[])
parser.add_argument("--sv_types", help="SV types list to process (INS, DEL, INV)", nargs="+", default=[])
parser.add_argument("--timeout", help="Max time for assembly processes to run", type=int, default=AGE_TIMEOUT)
parser.add_argument("--keep_temp", help="Don't delete temporary files", action="store_true")
parser.add_argument("--assembly_tool", help="Tool used for assembly", choices=["spades", "tigra"], default="spades")
parser.add_argument("--min_contig_len", help="Minimum length of contig to consider", type=int,
default=AGE_MIN_CONTIG_LENGTH)
parser.add_argument("--max_region_len", help="Maximum length of an SV interval", type=int,
default=AGE_MAX_REGION_LENGTH)
parser.add_argument("--min_del_subalign_len", help="Minimum length of deletion sub-alginment", type=int,
default=MIN_DEL_SUBALIGN_LENGTH)
parser.add_argument("--min_inv_subalign_len", help="Minimum length of inversion sub-alginment", type=int,
default=MIN_INV_SUBALIGN_LENGTH)
parser.add_argument("--age_window", help="Window size for AGE to merge nearby breakpoints", type=int,
default=AGE_WINDOW_SIZE)
parser.add_argument("--intervals_bed", help="BED file for assembly", type=file, required=True)
args = parser.parse_args()
logger.info("Command-line: {}".format(" ".join(sys.argv)))
run_age_parallel(intervals_bed=args.intervals_bed.name, reference=args.reference.name, assembly=args.assembly.name,
pad=args.pad, age=args.age.name, age_workdir=args.work, timeout=args.timeout,
keep_temp=args.keep_temp, assembly_tool=args.assembly_tool, chrs=args.chrs, nthreads=args.nthreads,
min_contig_len=args.min_contig_len, max_region_len=args.max_region_len, sv_types=args.sv_types,
min_del_subalign_len=args.min_del_subalign_len, min_inv_subalign_len=args.min_inv_subalign_len,
age_window = args.age_window) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
25064,
198,
6738,
1138,
292,
85,
13,
12286,
82,
1330,
1635,
198,
6738,
1138,
292,
85,
13,
496,
1330,
1057,
62,
496,
62,
1845,
... | 2.52723 | 1,267 |
# class for static variables persistent for all NEDGE charms
| [
2,
1398,
329,
9037,
9633,
16218,
329,
477,
399,
1961,
8264,
41700,
198
] | 4.692308 | 13 |
"""Modify FilamentType to use a profile instead of cost
Revision ID: 36b213001624
Revises: e64fcfd066ac
Create Date: 2020-06-14 13:52:56.574015
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '36b213001624'
down_revision = 'e64fcfd066ac'
branch_labels = None
depends_on = None
| [
37811,
5841,
1958,
7066,
3263,
6030,
284,
779,
257,
7034,
2427,
286,
1575,
198,
198,
18009,
1166,
4522,
25,
4570,
65,
26427,
405,
1433,
1731,
198,
18009,
2696,
25,
304,
2414,
69,
12993,
67,
15,
2791,
330,
198,
16447,
7536,
25,
12131,
... | 2.612403 | 129 |
import frappe
from frappe.utils import nowdate
from frappe import get_print
@frappe.whitelist(allow_guest=True)
@frappe.whitelist(allow_guest=True)
@frappe.whitelist(allow_guest=True)
| [
11748,
5306,
27768,
198,
6738,
5306,
27768,
13,
26791,
1330,
783,
4475,
198,
6738,
5306,
27768,
1330,
651,
62,
4798,
628,
198,
198,
31,
69,
430,
27768,
13,
1929,
270,
46331,
7,
12154,
62,
5162,
395,
28,
17821,
8,
628,
198,
31,
69,
... | 2.513158 | 76 |
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import types
import unittest
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
357,
4023,
1378,
2503,
13,
19024,
13,
785,
8,
198,
11748,
3858,
198,
11748,
555,
715,
395,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,... | 2.907407 | 54 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import socket
import thrift.transport.TSocket as TSocket
import unittest
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
17802,
198,
... | 3.75 | 60 |
from tkinter import *
from pytube import YouTube
import tkinter.messagebox as thh
from tkinter import ttk
import webbrowser
root = Tk()
root.geometry('655x584')
root.minsize(655,584)
root.maxsize(655,584)
root.title("Downloader Dude--By Aryan")
root.config(bg="purple")
root.iconbitmap("resource/dude.ico")
entry = StringVar()
title = Label(root, text="Download--Dude", font="lucida 30 bold", bg="purple", fg="yellow")
title.pack(pady=15)
url = Label(root,text="Url in This Box" ,fg="orange", bg="purple", font="helvetica 25 bold")
url.pack(pady=10)
entry = Entry(root, font="helvetica 20 italic", width=35)
entry.pack(pady=5)
button = Button(root, text="download Now", font='lucida 18 bold', bg='yellow', fg='blue', command=internet)
button.pack(pady=20)
progres = Label(root, text="Progress here", font='lucida 18 bold', bg='purple', fg='orange')
progres.pack(pady=5)
progress = ttk.Progressbar(root, orient=HORIZONTAL, length=400, mode='determinate')
progress.pack(pady=40)
root.mainloop() | [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
12972,
29302,
1330,
7444,
201,
198,
11748,
256,
74,
3849,
13,
20500,
3524,
355,
294,
71,
201,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
201,
198,
11748,
3992,
40259,
201,
198,
201,
... | 2.547912 | 407 |
import torch
import cv2
import math
# PARAMETERS
# frame - frame that the function will be applied
# model - detect model that will be applied in each frame
# frame_number - number of current frame
# id_boxes - dictionary where the IDs will be saved
# id_count - counter for new IDs detected
# target_object - string for text variables
| [
11748,
28034,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
10688,
201,
198,
201,
198,
2,
29463,
2390,
2767,
4877,
201,
198,
201,
198,
2,
5739,
532,
5739,
326,
262,
2163,
481,
307,
5625,
201,
198,
2,
2746,
532,
4886,
2746,
326,
4... | 3.436893 | 103 |
listsaya=["apel","pisang","pepaya"]
listsaya.append("mangga")
print(listsaya) | [
20713,
11729,
28,
14692,
499,
417,
2430,
79,
271,
648,
2430,
431,
79,
11729,
8973,
198,
20713,
11729,
13,
33295,
7203,
76,
648,
4908,
4943,
198,
4798,
7,
20713,
11729,
8
] | 2.483871 | 31 |
# Find the cube root of a perfect cube
x = int(raw_input('Enter an integer:'))
for ans in range(0, abs(x)+1):
if ans**3 >= abs(x):
break
if ans**3 != abs(x):
print x, 'is not a perfect cube'
else:
if x < 0:
ans = -ans
print 'Cube root of', x, 'is', ans
| [
2,
9938,
262,
23441,
6808,
286,
257,
2818,
23441,
198,
87,
796,
493,
7,
1831,
62,
15414,
10786,
17469,
281,
18253,
32105,
4008,
198,
1640,
9093,
287,
2837,
7,
15,
11,
2352,
7,
87,
47762,
16,
2599,
198,
220,
220,
220,
611,
9093,
11... | 2.288 | 125 |
import numpy as np
from astropy import units as u
from astropy.convolution import convolve
from astropy.io import fits as pyfits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from radio_beam import Beam
def fits_transfer_coordinates(fromfits, tofits):
"""
transfer RA and Dec from one fits to another
"""
with pyfits.open(fromfits) as f:
hdr = f[0].header
crval1 = f[0].header['CRVAL1']
crval2 = f[0].header['CRVAL2']
with pyfits.open(tofits, mode='update') as hdul:
hdul[0].header['CRVAL1'] = crval1
hdul[0].header['CRVAL2'] = crval2
print('fits_transfer_coordinates: Updating fits header (CRVAL1/2) in %s', tofits)
hdul.flush()
return tofits
def fits_squeeze(fitsfile, out=None):
"""
remove extra dimentions from data and modify header
"""
if out is None:
print('fits_squeeze: Overwriting file %s', fitsfile)
out = fitsfile
with pyfits.open(fitsfile) as hdul:
data = np.squeeze(hdul[0].data)
header = hdul[0].header
hdul[0].header['NAXIS'] = 2
for i in [3, 4]:
for key in ['NAXIS', 'CTYPE', 'CRPIX', 'CRVAL', 'CDELT', 'CUNIT']:
header.remove('{}{}'.format(key,i), ignore_missing=True,
remove_all=True)
header.remove('LTYPE', ignore_missing=True)
header.remove('LSTART', ignore_missing=True)
header.remove('LSTEP', ignore_missing=True)
header.remove('LWIDTH', ignore_missing=True)
header.remove('LONPOLE', ignore_missing=True)
header.remove('LATPOLE', ignore_missing=True)
header.remove('RESTFRQ', ignore_missing=True)
header.remove('WCSAXES', ignore_missing=True)
pyfits.writeto(out, data=data, header=header, overwrite=True)
return out
def fits_reconvolve_psf(fitsfile, newpsf, out=None):
""" Convolve image with deconvolution of (newpsf, oldpsf) """
newparams = newpsf.to_header_keywords()
if out is None:
print('fits_reconvolve: Overwriting file %s', fitsfile)
out = fitsfile
with pyfits.open(fitsfile) as hdul:
hdr = hdul[0].header
currentpsf = Beam.from_fits_header(hdr)
print(currentpsf)
print(newpsf)
if currentpsf != newpsf:
kern = newpsf.deconvolve(currentpsf).as_kernel(pixscale=hdr['CDELT2']*u.deg)
hdr.set('BMAJ', newparams['BMAJ'])
hdr.set('BMIN', newparams['BMIN'])
hdr.set('BPA', newparams['BPA'])
hdul[0].data = convolve(hdul[0].data, kern)
pyfits.writeto(out, data=hdul[0].data, header=hdr, overwrite=True)
return out
def fits_operation(fitsfile, other, operation='-', out=None):
"""
perform operation on fits file and other fits/array/number,
keeping header of the original FITS one
"""
if out is None:
print('fits_operation: Overwriting file %s', fitsfile)
out = fitsfile
if isinstance(other, str):
other_data = pyfits.getdata(other)
elif isinstance(other, np.ndarray) or isinstance(other, np.float):
other_data = other
with pyfits.open(fitsfile) as hdul:
data = hdul[0].data
if operation == '-':
print('fits_operation: Subtracting data')
data -= other_data
elif operation == '+':
print('fits_operation: Adding data')
data += other_data
elif operation == '*':
print('fits_operation: Multiplying data')
data *= other_data
elif operation == '/':
print('fits_operation: Dividing data')
data /= other_data
pyfits.writeto(out, data=data, header=hdul[0].header, overwrite=True)
return out
| [
11748,
299,
32152,
355,
45941,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
6738,
6468,
28338,
13,
42946,
2122,
1330,
3063,
6442,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
355,
12972,
21013,
198,
6738,
6468,
28338,
13,
358,
78... | 2.167513 | 1,773 |
# imports
import streamlit as st
import cx_Oracle
from Energy_reliability import *
from Energy_CO2 import *
from Energy_fuel import *
from Energy_sales import *
from Energy_reliability import *
from PIL import Image
# vars
states = ["""""", """AL""", """AK""", """AZ""", """AR""", """CA""", """CO""", """CT""", """DE""", """FL""", """GA""",
"""HI""", """ID""", """IL""", """IN""", """IA""", """KS""", """KY""", """LA""", """ME""", """MD""", """MA""",
"""MI""", """MN""", """MS""", """MO""", """MT""", """NE""", """NV""", """NH""", """NJ""", """NM""", """NY""",
"""NC""", """ND""", """OH""", """OK""", """OR""", """PA""", """RI""", """SC""", """SD""", """TN""", """TX""",
"""UT""", """VT""", """VA""", """WA""", """WV""", """WI""", """WY"""]
# Oracle connection
dsn = cx_Oracle.makedsn("oracle.cise.ufl.edu", 1521, service_name="orcl")
userpwd = "ApexLegend2020"
#################
# Functions
#################
#######################
# Sidebar menu#
#######################
# imports
# Streamlit Display
st.title = 'Energy Industry'
image = Image.open('database_project_banner.jpg')
st.image(image, use_column_width=True)
# functions
functionDict = {
'Fuel Utilization': getFuelTrendCall,
'CO2 Change': displayCO2Call,
'CO2 State Comparison': displayCO2byStateCall,
'Grid Reliability': displayReliabilityCall,
'Sales': displaySalesCall
}
menu = st.sidebar.radio('Pick a topic', ('Fuel Utilization', 'CO2 Change', 'CO2 State Comparison', 'Grid Reliability','Sales'))
functionCall = functionDict[menu]
functionCall()
| [
2,
17944,
201,
198,
11748,
4269,
18250,
355,
336,
201,
198,
11748,
43213,
62,
48625,
201,
198,
6738,
6682,
62,
2411,
12455,
1330,
1635,
201,
198,
6738,
6682,
62,
8220,
17,
1330,
1635,
201,
198,
6738,
6682,
62,
25802,
1330,
1635,
201,
... | 2.609873 | 628 |
from __future__ import print_function
from keras import backend
import numpy as np
from keras import layers
from keras.layers import (Activation, BatchNormalization, AveragePooling2D,
Conv2D, Dense, Flatten, Input, MaxPooling2D,
ZeroPadding2D,GlobalAveragePooling2D)
from keras.models import Model
if __name__ == '__main__':
model = ResNet50()
model.summary()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
41927,
292,
1330,
30203,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
1330,
11685,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
357,
25526,
341,
11,
220,
347,
... | 2.44 | 175 |
from .ble import BLEFramer
from .driver import DriverFramer
from .tirtos import TIRTOSFramer | [
6738,
764,
903,
1330,
347,
2538,
21055,
263,
198,
6738,
764,
26230,
1330,
12434,
21055,
263,
198,
6738,
764,
83,
2265,
418,
1330,
309,
48771,
2640,
21055,
263
] | 3.285714 | 28 |
from typing import Any, Callable
from browser import document # type: ignore # pylint: disable=import-error
from barde.display import call_passage
from barde.state import STORAGE, State
from barde.interface import (
hide_sidebar,
select_style,
open_restart_confirm,
close_restart_confirm,
)
from barde.save import close_save_menu, open_save_menu, render_save_list
import barde.globals as globs
StateType = Any
Passage = Callable[..., None]
| [
6738,
19720,
1330,
4377,
11,
4889,
540,
198,
198,
6738,
6444,
1330,
3188,
220,
1303,
2099,
25,
8856,
1303,
279,
2645,
600,
25,
15560,
28,
11748,
12,
18224,
198,
198,
6738,
275,
45093,
13,
13812,
1330,
869,
62,
6603,
496,
198,
6738,
... | 2.955414 | 157 |
"""
Imperial Properties:
Module to assign and access the most common properties of water
used in fluid mechanics in english units
"""
# H20 properties in imperial units @ 50 deg celcius
density = 1.94 # slugs/ft^3
specific_weight = 62.4 # lbf/ft^3
dynamic_viscosity = 2.73e-5 # lb*s/ft^2
kinematic_viscosity = 1.407e-5 # ft^2/s
boiling_point = 212 # F
freezing_point = 32 # F
vapor_pressure = 1.781e-1 # psi (absolute)
speed_of_sound = 4748 # ft/s
g = 32.2 # ft/s^2
def unit_info():
"""Displays default unit information.\n
- density = 1.94 slugs/ft^3
- specific_weight = 62.4 lb/ft^3
- dynamic_viscosity = 2.73e-5 lb*s/ft^2
- kinematic_viscosity = 1.407e-5 ft^2/s
- boiling_point = 212 F
- freezing_point = 32 F
- vapor_pressure = 1.781e-1 psia
- speed_of_sound = 4748 ft/s
- g = 32.2 ft/s^2
"""
info = f"""
density = {density} slugs/ft^3
specific_weight = {specific_weight} lb/ft^3
dynamic_viscosity = {dynamic_viscosity} lb*s/ft^2
kinematic_viscosity = {kinematic_viscosity} ft^2/s
boiling_point = {boiling_point} F
freezing_point = {freezing_point} F
vapor_pressure = {vapor_pressure} psia
speed_of_sound = {speed_of_sound} ft/s
g = {g} ft/s^2
"""
print(info)
return
| [
37811,
198,
3546,
7629,
24946,
25,
198,
220,
220,
220,
19937,
284,
8333,
290,
1895,
262,
749,
2219,
6608,
286,
1660,
198,
220,
220,
220,
973,
287,
11711,
12933,
287,
46932,
4991,
198,
37811,
198,
198,
2,
367,
1238,
6608,
287,
14312,
... | 2.002743 | 729 |
import numpy as np
def pass_cut_based_id(df, working_point):
"""Checks which electrons pass a given cut-based working point.
Parameters
----------
df : pandas.DataFrame
A data frame with electron data.
working_point: str
The name of the working point, i.e. ``veto``, ``loose``, ``medium`` or ``tight``.
Returns
-------
pandas.Series
The ID decisions for each electron.
Notes
-----
* Check the NanoAOD documentation for the ``Electron_cutBased`` branch (here for the latest 102X campaign [1]) to know which ID is used
References
----------
[1] https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html#Electron
"""
working_points = ["veto", "loose", "medium", "tight"]
if not working_point in working_points:
raise ValueError('working_point has to be any of "' + '", "'.join(working_points) + '".')
return df["Electron_cutBased"] > working_points.index(working_point)
def supercluster_eta(df):
"""Calculates :math:`\eta` of the superclusters corresponding to the electrons.
Parameters
----------
df : pandas.DataFrame
A data frame with electron data.
Returns
-------
pandas.Series
The locations of the superclusters in :math:`eta` for each electron.
Notes
-----
* NanoAOD stores only the difference between the supercluster eta and the electron :math:`eta` [1], hence the supercluster eta has to be reconstructed by subtracting the electron :math:`\eta` from that difference
* It's not clear to me what is stored as the supercluser :math:`\eta` in case there was no supercluster (tracker-driven electron)
* This function was written for NanoAOD produced with 102X in the ``Nano14Dec2018`` campaign [2]
References
----------
[1] https://github.com/cms-sw/cmssw/blob/CMSSW_10_2_X/PhysicsTools/NanoAOD/python/electrons_cff.py#L320
[2] https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html#Electron
"""
return df["Electron_deltaEtaSC"] + df["Electron_eta"]
def is_endcap(df):
"""Checks which electrons were measured in the ECAL endcaps.
Parameters
----------
df : pandas.DataFrame
A data frame with electron data.
Returns
-------
pandas.Series
If each electrons supercluster was measured in the ECAL endcap or not.
Notes
-----
* It is checked if the supercluster :math:`|\eta| >= 1.479`, a value also used within the EGM POG [1].
References
----------
[1] https://github.com/cms-sw/cmssw/blob/CMSSW_10_2_X/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_tools.py#L5
"""
return np.abs(supercluster_eta(df)) > 1.479
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
1208,
62,
8968,
62,
3106,
62,
312,
7,
7568,
11,
1762,
62,
4122,
2599,
198,
220,
220,
220,
37227,
7376,
4657,
543,
28722,
1208,
257,
1813,
2005,
12,
3106,
1762,
966,
13,
628,
220,
220,
... | 2.766105 | 1,009 |
# Copyright 2016-2020, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Optional
import pulumi
import pulumi._types as _types
CAMEL_TO_SNAKE_CASE_TABLE = {
"firstValue": "first_value",
"secondValue": "second_value",
}
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
| [
2,
15069,
1584,
12,
42334,
11,
21624,
12994,
10501,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.131579 | 304 |
#!/usr/bin/env python3
#
# Converter from Keras sequential NN to JSON
"""____________________________________________________________________
Variable specification file
In additon to the standard Keras architecture and weights files, you
must provide a "variable specification" json file with the following
format:
{
"inputs": [
{"name": variable_name,
"scale": scale,
"offset": offset,
"default": default_value},
...
],
"class_labels": [output_class_1_name, output_class_2_name, ...],
"miscellaneous": {"key": "value"}
}
where `scale` and `offset` account for any scaling and shifting to the
input variables in preprocessing. The "default" value is optional.
The "miscellaneous" object is also optional and can contain (key,
value) pairs of strings to pass to the application.
"""
import argparse
import json
import h5py
from collections import Counter
import sys
import importlib
from keras_layer_converters_common import skip_layers
def _run():
"""Top level routine"""
args = _get_args()
with open(args.arch_file, 'r') as arch_file:
arch = json.load(arch_file)
with open(args.variables_file, 'r') as inputs_file:
inputs = json.load(inputs_file)
_check_version(arch)
if arch["class_name"] != "Sequential":
sys.exit("this is not a Sequential model, try using kerasfunc2json")
with h5py.File(args.hdf5_file, 'r') as h5:
for group in h5:
if group == "model_weights":
sys.exit("The weight file has been saved incorrectly.\n"
"Please see https://github.com/lwtnn/lwtnn/wiki/Keras-Converter#saving-keras-models \n"
"on how to correctly save weights.")
out_dict = {
'layers': _get_layers(arch, inputs, h5),
}
out_dict.update(_parse_inputs(inputs))
print(json.dumps(out_dict, indent=2, sort_keys=True))
# __________________________________________________________________________
# master layer converter / inputs function
if __name__ == '__main__':
_run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
35602,
353,
422,
17337,
292,
35582,
399,
45,
284,
19449,
198,
37811,
27193,
1427,
198,
43015,
20855,
2393,
198,
198,
818,
751,
37752,
284,
262,
3210,
17337,
292,
10959,
... | 2.777036 | 749 |
default_app_config = 'froide.frontpage.apps.FrontpageConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
69,
305,
485,
13,
8534,
7700,
13,
18211,
13,
25886,
7700,
16934,
6,
198
] | 2.904762 | 21 |
from Analysis import youra
from VirtualBox import scheduler
from os.path import join, abspath
from os import getcwd
from time import sleep
import mysql.connector as mariadb
import logging
from subprocess import call
from shutil import copy
logging.basicConfig(format='%(levelname)s:\t%(message)s', level=logging.DEBUG)
con = mariadb.connect(user='root', password='',
host='localhost', database='ama')
c = con.cursor()
while True:
c.execute(
'SELECT filename,tid from tasks where tid=(SELECT min(tid) from tasks where finished=0)')
res = c.fetchone()
if res:
fname = res[0]
fullpath = join(abspath("Web/uploads"), fname)
f = open(abspath('Analysis/const.py'), 'r')
lines = f.readlines()
lines[1] = 'FILE_TO_SCAN = "' + str(fullpath) + '"\n'
tid = res[1]
lines[2] = 'TID = ' + str(tid) + '\n'
f.close()
f = open(abspath('Analysis/const.py'), 'w')
f.writelines(lines)
f.close()
print 'applying yara'
youra.applyara()
f = open(abspath('VirtualBox/const.py'), 'r')
lines = f.readlines()
lines[2] = 'SOURCE_PATH = "' + str(fullpath) + '"\n'
lines[4] = 'DEST_PATH = "C:/Project/' + str(fname) + '"\n'
f.close()
f = open(abspath('VirtualBox/const.py'), 'w')
f.writelines(lines)
f.close()
print 'scheduling'
scheduler.schedule()
pwd = 'your_system_password'
cmd = 'chmod 777 -R ' + getcwd()
perm = call('echo {} | sudo -S {}'.format(pwd, cmd), shell=True)
print 'apllying suri'
cmd = 'suricata -c /etc/suricata/suricata.yaml -r ' + \
abspath('VirtualBox/gen/ama.pcap')
suri = call('echo {} | sudo -S {}'.format(pwd, cmd), shell=True)
copy(abspath('Analysis/logs/fast.log'),
abspath('Analysis/logs/'+str(tid)+'net.log'))
imageconv = call([abspath('Analysis/volatility/vol.py'), '-f', abspath(
'VirtualBox/gen/coredump'), '--profile=Win7SP1x64', 'imagecopy', '-O', abspath('VirtualBox/gen/copy.raw')])
volatil = call([abspath('Analysis/volatility/vol.py'), '-f', abspath('VirtualBox/gen/copy.raw'), '--profile=Win7SP1x64',
'yarascan', '-y', abspath('Analysis/malware_rules.yar'), '--output-file=./Analysis/logs/'+str(tid)+'vmem.txt'])
c.execute('UPDATE tasks SET finished = 1 where tid=%s' % tid)
con.commit()
else:
print('Nothing left. Sleeping')
sleep(300)
| [
6738,
14691,
1330,
345,
430,
198,
6738,
15595,
14253,
1330,
6038,
18173,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
2352,
6978,
198,
6738,
28686,
1330,
651,
66,
16993,
198,
6738,
640,
1330,
3993,
198,
11748,
48761,
13,
8443,
273,
355,
... | 2.150465 | 1,183 |
"""Install hyperglass."""
# Standard Library
import os
import shutil
from filecmp import dircmp
from pathlib import Path
# Third Party
import inquirer
# Local
from .echo import error, success, warning
from .util import create_dir
USER_PATH = Path.home() / "hyperglass"
ROOT_PATH = Path("/etc/hyperglass/")
ASSET_DIR = Path(__file__).parent.parent / "images"
IGNORED_FILES = [".DS_Store"]
INSTALL_PATHS = [
inquirer.List(
"install_path",
message="Choose a directory for hyperglass",
choices=[USER_PATH, ROOT_PATH],
)
]
def prompt_for_path() -> str:
"""Recursively prompt the user for an app path until one is provided."""
answer = inquirer.prompt(INSTALL_PATHS)
if answer is None:
warning("A directory for hyperglass is required")
answer = prompt_for_path()
return answer["install_path"]
class Installer:
"""Install hyperglass."""
def __init__(self, unattended: bool):
"""Initialize installer."""
self.unattended = unattended
def install(self) -> None:
"""Complete the installation."""
self.app_path = self._get_app_path()
self._scaffold()
self._migrate_static_assets()
def _get_app_path(self) -> Path:
"""Find the app path from env variables or a prompt."""
if self.unattended:
return USER_PATH
app_path = os.environ.get("HYPERGLASS_PATH", None)
if app_path is None:
app_path = prompt_for_path()
return app_path
def _scaffold(self) -> None:
"""Create the file structure necessary for hyperglass to run."""
ui_dir = self.app_path / "static" / "ui"
images_dir = self.app_path / "static" / "images"
favicon_dir = images_dir / "favicons"
custom_dir = self.app_path / "static" / "custom"
create_dir(self.app_path)
for path in (ui_dir, images_dir, favicon_dir, custom_dir):
create_dir(path, parents=True)
def _migrate_static_assets(self) -> bool:
"""Synchronize the project assets with the installation assets."""
target_dir = self.app_path / "static" / "images"
if not target_dir.exists():
shutil.copytree(ASSET_DIR, target_dir)
# Compare the contents of the project's asset directory (considered
# the source of truth) with the installation directory. If they do
# not match, delete the installation directory's asset directory and
# re-copy it.
compare_initial = dircmp(ASSET_DIR, target_dir, ignore=IGNORED_FILES)
if not compare_initial.left_list == compare_initial.right_list:
shutil.rmtree(target_dir)
shutil.copytree(ASSET_DIR, target_dir)
# Re-compare the source and destination directory contents to
# ensure they match.
compare_post = dircmp(ASSET_DIR, target_dir, ignore=IGNORED_FILES)
if not compare_post.left_list == compare_post.right_list:
error(
"Files in {a} do not match files in {b}",
a=str(ASSET_DIR),
b=str(target_dir),
)
return False
success("Migrated assets from {a} to {b}", a=str(ASSET_DIR), b=str(target_dir))
return True
| [
37811,
15798,
8718,
20721,
526,
15931,
198,
198,
2,
8997,
10074,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
2393,
48991,
1330,
288,
1980,
3149,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
2,
10467,
3615,
198,
11748,
38212,
8... | 2.383788 | 1,394 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import datetime
import threading
from applicationinsights import TelemetryClient
from applicationinsights.exceptions import enable
from azclishell import __version__
from azure.cli.core._profile import Profile
from azure.cli.core.telemetry import _user_agrees_to_telemetry
INSTRUMENTATION_KEY = '762871d5-45a2-4d67-bf47-e396caf53d9d'
def my_context(tel_client):
""" context for the application """
tel_client.context.application.id = 'Azure CLI Shell'
tel_client.context.application.ver = __version__
tel_client.context.user.id = Profile().get_installation_id()
tel_client.context.instrumentation_key = INSTRUMENTATION_KEY
class Telemetry(TelemetryClient):
""" base telemetry sessions """
start_time = None
end_time = None
def track_ssg(self, gesture, cmd):
""" track shell specific gestures """
self.track_event('Shell Specific Gesture', {gesture : cmd})
def track_key(self, key):
""" tracks the special key bindings """
self.track_event('Key Press', {"key": key})
@_user_agrees_to_telemetry
def start(self):
""" starts recording stuff """
self.start_time = str(datetime.datetime.now())
@_user_agrees_to_telemetry
def conclude(self):
""" concludings recording stuff """
self.end_time = str(datetime.datetime.now())
self.track_event('Run', {'start time' : self.start_time,
'end time' : self.end_time})
thread1 = TelThread(self.flush)
thread1.start()
class TelThread(threading.Thread):
""" telemetry thread for exiting """
TC = Telemetry(INSTRUMENTATION_KEY)
enable(INSTRUMENTATION_KEY)
my_context(TC)
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
628,... | 2.997072 | 683 |
from django.conf import settings
from django.template import Origin, TemplateDoesNotExist
from django.template.loaders.base import Loader as BaseLoader
from post_office.utils import get_email_template
from unicef_notification.models import EmailTemplate
EMAIL_TEMPLATE_PREFIX = getattr(
settings,
"UNICEF_NOTIFICATION_EMAIL_TEMPLATE_PREFIX",
'email-templates/'
)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
28243,
1330,
19349,
11,
37350,
13921,
3673,
3109,
396,
198,
6738,
42625,
14208,
13,
28243,
13,
2220,
364,
13,
8692,
1330,
8778,
263,
355,
7308,
17401,
198,
198,
673... | 2.960938 | 128 |
from ...isa.inst import *
import numpy as np
| [
6738,
2644,
9160,
13,
8625,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
220,
220,
220,
220,
220,
220,
220,
198
] | 2.409091 | 22 |
"""The User class both validates the registration payload (with the validation class) and is an interface to the DB."""
from unicodedata import normalize
from api_app.db import Base, db_session
from api_app.validation import RegistrationError, Email, PayloadError, validate_password
from api_app.validation import String as StringValidator # To prevent namespace collision with sqlalchemy String
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin, login_user, current_user
from sqlalchemy import Column, Integer, String
class User(UserMixin, Base):
"""Represents a user, includes extensive validation at initialisation."""
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(64), index=True, unique=True)
email = Column(String(64), index=True, unique=True)
password_hash = Column(String(128))
'Not as elegant as in questionnaire, but I do not know how to use both db.Column and the validations classes.'
_username = StringValidator(minsize=1, maxsize=64)
_email = Email()
def __init__(self, **kwargs):
""" Initialises and validates new user.
Checks if
1) user is already logged in
2) parameters are valid
3) username or email is already in database
and normalises the username string and sets the password as a hash.
Database commit and login is only triggered by calling register().
"""
'1) is user already logged in?'
if current_user.is_authenticated:
raise RegistrationError("Already authenticated.")
'2) parameter validation:'
self.validate_payload(**kwargs)
'Init:'
self.username = normalize('NFC', self._username)
self.email = self._email
self.set_password(kwargs['password'])
'3) registration validation:'
self.validate_registration()
def validate_payload(self, **kwargs):
"""
Validates the payload.
"""
if not kwargs:
raise PayloadError('''Invalid data. Must be of type application/json and contain the following fields:
username: string,
email: string,
password: string
''')
try:
self._username = kwargs.get('username', None)
self._email = kwargs.get('email', None)
validate_password(kwargs.get('password', None))
except (ValueError, PayloadError) as e:
raise PayloadError(str(e))
except Exception as e:
print(e)
raise PayloadError('''Invalid registration: Must be of format:
"username": String,
"password": String
''')
def validate_registration(self):
"""Checks if the username or the email already exists."""
try:
session = db_session()
if session.query(User).filter_by(username=self.username).first() is not None:
raise RegistrationError("Username is already taken.")
if session.query(User).filter_by(email=self.email).first() is not None:
raise RegistrationError("Email is already registered.")
except ValueError as e:
raise RegistrationError(str(e))
def register(self):
"""Save the user instance in the database and log the user in."""
db_session.add(self)
db_session.commit()
login_user(self)
| [
37811,
464,
11787,
1398,
1111,
4938,
689,
262,
9352,
21437,
357,
4480,
262,
21201,
1398,
8,
290,
318,
281,
7071,
284,
262,
20137,
526,
15931,
198,
6738,
28000,
9043,
1045,
1330,
3487,
1096,
198,
6738,
40391,
62,
1324,
13,
9945,
1330,
... | 2.474472 | 1,469 |
import re
import os
import string
import pandas as pd
from lexicalAnalyzer import getTockens
writeLines=[]
ids = []
if __name__=="__main__":
productions=dict()
grammar = readFile('grammar', 'r')
first_dict = dict()
follow_dict = dict()
flag = 1
start = ""
for line in grammar:
l = re.split("( |->|\n|\||)*", line)
lhs = l[0]
rhs = set(l[1:-1])-{''}
if flag :
flag = 0
start = lhs
productions[lhs] = rhs
print ('\nFirst\n')
for lhs in productions:
first_dict[lhs] = first(lhs, productions)
for f in first_dict:
print (str(f) + " : " + str(first_dict[f]))
print ("")
print ('\nFollow\n')
for lhs in productions:
follow_dict[lhs] = set()
follow_dict[start] = follow_dict[start].union('$')
for lhs in productions:
follow_dict = follow(lhs, productions, follow_dict)
for lhs in productions:
follow_dict = follow(lhs, productions, follow_dict)
for f in follow_dict:
print (str(f) + " : " + str(follow_dict[f]))
ll1Table = ll1(follow_dict, productions)
try:
for idx, line in enumerate(getTockens()):
print('Stack Current input')
parse(line, start, ll1Table, idx)
print('\n')
python = readFile('example.py', 'w+')
# escribir el main
python.write('if __name__ == "__main__": \n')
for j in ids:
python.write('\t \t ' + j + ' = 0 \n')
python.write("\t \t print('ingrese un valor para la variable: %s') \n" %j)
python.write('\t \t ' + j + "=input() \n")
python.write("\t \t print('Los resultados de las operaciones son:') \n")
for i in writeLines:
python.write("\t \t print('%s' + ' = ' + str(%s)) \n" %(i,i))
# python.write('\t \t print('+i+')\n')
python.close()
os.system('python example.py')
except Exception as e:
print (" Sintax not accepted in line: "+ str(e)) | [
198,
11748,
302,
198,
11748,
28686,
198,
11748,
4731,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
31191,
605,
37702,
9107,
1330,
651,
51,
735,
641,
198,
13564,
43,
1127,
28,
21737,
198,
2340,
796,
17635,
198,
198,
361,
11593,
367... | 2.371467 | 743 |
import os
import glob
import hashlib
import random, string
import time
videoInfo = {'path': "", 'audio_name': "", 'video_name': "", 'final_name': ""}
##Sample videoInfo
#videoInfo = {'final_name': u'C:\\Users\\maleficarium/Desktop\\- -bMftWj-e2T4.mp4', 'path': 'C:\\Users\\maleficarium/Desktop/', 'video_name': 'C:\\Users\\maleficarium/Desktop/937bcebcc1c100815b2c75536ee6724afd07ec61f07c76d6c670ae23.mp4', 'audio_name': 'C:\\Users\\maleficarium/Desktop/4cf3614c8e8619703ed6c6ddfaddd19d31ba4ad4cd600d301123ce28.m4a'}
#Hash the file names to drop unicode characters for FFMPEG
#Merge the video files and rename the output to the stored filename.
if __name__ == '__main__':
coreMerge() | [
11748,
28686,
198,
11748,
15095,
198,
11748,
12234,
8019,
198,
11748,
4738,
11,
4731,
198,
11748,
640,
198,
198,
15588,
12360,
796,
1391,
6,
6978,
10354,
366,
1600,
705,
24051,
62,
3672,
10354,
366,
1600,
705,
15588,
62,
3672,
10354,
36... | 2.516364 | 275 |
import json
import os
import pickle
import collections
import numpy
import yaml
from chemdataextractor.nlp import ChemSentenceTokenizer
from sklearn import ensemble
from sklearn.ensemble.forest import _generate_unsampled_indices, _generate_sample_indices
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
from synthesis_paragraph_classifier.topics import LightLDAInference
class SynthesisClassifier(object):
"""
!WARNING!: Careful when editing this class. You might destroy all the pickle'd classifiers.
"""
@staticmethod
def load_pickle(s):
"""
:param s:
:return:
:rtype: SynthesisClassifier
"""
return pickle.loads(s)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
198,
11748,
17268,
198,
11748,
299,
32152,
198,
11748,
331,
43695,
198,
6738,
4607,
7890,
2302,
40450,
13,
21283,
79,
1330,
12870,
31837,
594,
30642,
7509,
198,
6738,
1341,
3572... | 2.952569 | 253 |
import requests
import pprint
pp = pprint.PrettyPrinter(indent=4)
BASE = "http://127.0.0.1:5000/"
user_id = '00001'
image_name = 'halyard-0-9-14-aosp-master-aosp-cf-x86-phone-userdebug-6796612'
images = Resource('image-list')
instances = Resource('instance-list')
image_body = {"build_instance": "my-build", "tags": ['kradtke-ssh'], "respin": True}
instance_body = {"sig_server_port": 8444, "tags": ['kradtke-ssh']}
# Test Image List Endpoints
images.get('')
images.post(image_body)
# Test Instance List Endpoints
instances.get('')
instances.post(instance_body) | [
11748,
7007,
198,
11748,
279,
4798,
198,
381,
796,
279,
4798,
13,
35700,
6836,
3849,
7,
521,
298,
28,
19,
8,
198,
198,
33,
11159,
796,
366,
4023,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
27641,
30487,
628,
198,
7220,
62,
312,
79... | 2.581818 | 220 |
import numpy as np
import torch
from tqdm import tqdm
import random
import os
from sklearn.preprocessing import normalize
def get2diff_below(number):
"""
:param low: include low
:param high: include high
:return: two different number
"""
first = random.randint(0, number - 1)
offset = random.randint(1, number - 1)
second = (first + offset) % number
return first + 1, second + 1
feature_map = {}
for i in range(1, 31):
feature_map[i] = np.load("../../feature/train/" + str(i) + ".npy")
pos1 = []
pos2 = []
neg = []
for i in tqdm(range(10000)):
cate1, cate2 = get2diff_below(30)
cate1_list = feature_map[cate1]
cate2_list = feature_map[cate2]
cate1_len = len(cate1_list)
cate2_len = len(cate2_list)
cate1_line = random.randint(1, cate1_len)
cate2_line1, cate2_line2 = get2diff_below(cate2_len)
cate1_line_tensor = cate1_list[cate1_line - 1]
cate2_line1_tensor = cate2_list[cate2_line1 - 1]
cate2_line2_tensor = cate2_list[cate2_line2 - 1]
# print(cate2_line1_tensor.shape)
pos1.append(cate2_line1_tensor /np.linalg.norm(cate2_line1_tensor))
pos2.append(cate2_line2_tensor/np.linalg.norm(cate2_line2_tensor))
neg.append(cate1_line_tensor/np.linalg.norm(cate1_line_tensor))
try:
os.remove('../../feature/generated_dataset/pos1_fea.pt')
except:
pass
try:
os.remove('../../feature/generated_dataset/pos2_fea.pt')
except:
pass
try:
os.remove('../../feature/generated_dataset/neg_fea.pt')
except:
pass
with open('../../feature/generated_dataset/pos1_fea.pt', 'wb') as f:
torch.save(pos1, f)
with open('../../feature/generated_dataset/pos2_fea.pt', 'wb') as f:
torch.save(pos2, f)
with open('../../feature/generated_dataset/neg_fea.pt', 'wb') as f:
torch.save(neg, f)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
4738,
198,
11748,
28686,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
3487,
1096,
628,
198,
4299,
651,
17,
26069,
62,
35993... | 2.228113 | 811 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# | #
# | ______________________________________________________________ #
# | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| #
# | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| #
# | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| #
# | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| #
# | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| #
# | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| #
# | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ..............................................................| #
# | ..............................................................| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| #
# | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| #
# | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| #
# | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| #
# | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| #
# | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| #
# | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| #
# | #
# | Rebirth Addon #
# | Copyright (C) 2017 Cypher #
# | #
# | This program is free software: you can redistribute it and/or modify #
# | it under the terms of the GNU General Public License as published by #
# | the Free Software Foundation, either version 3 of the License, or #
# | (at your option) any later version. #
# | #
# | This program is distributed in the hope that it will be useful, #
# | but WITHOUT ANY WARRANTY; without even the implied warranty of #
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# | GNU General Public License for more details. #
# | #
################################################################################
import re
import sys
import urllib
import string
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
14468,
198,
2,
930,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 1.922642 | 1,590 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from typing import Union
from .constants import *
def verify(return_values: Union[int, tuple]):
"""Strip the result code from a library call, asserting success.
Args:
return_values: Values returned from the library function call. It is expected that the
first return value is a HRESULT.
Returns:
The input return values excluding the result code.
If return_values only contains the result code, `None` is returned.
If return_values contains the result code and one other value, the other value is returned.
If return_values contains the result code and multiple other values, the other values
are returned as a tuple.
"""
if isinstance(return_values, int):
result = return_values
unwrapped = None
else:
result = return_values[0]
if len(return_values) == 1:
unwrapped = None
elif len(return_values) == 2:
unwrapped = return_values[1]
else:
unwrapped = return_values[1:]
result = ResultCode(result)
assert result.is_success(), f'unsuccessful result code: {ResultCode(result).to_hex()} ({ResultCode(result).name})'
return unwrapped
| [
6738,
19720,
1330,
4479,
198,
198,
6738,
764,
9979,
1187,
1330,
1635,
628,
198,
4299,
11767,
7,
7783,
62,
27160,
25,
4479,
58,
600,
11,
46545,
60,
2599,
198,
220,
220,
220,
37227,
1273,
5528,
262,
1255,
2438,
422,
257,
5888,
869,
11... | 2.734234 | 444 |
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#6zwu&dq_5z5s6nkgzwb1nc40863jq4znvx5j)#%+sns_@7&1u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('PARROT_ENV', 'development') != 'production'
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# We use WhiteNoise to serve static files (index.html) using gunicorn
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'regress.db'),
} if DEBUG else {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['PARROT_DB_NAME'],
'USER': os.environ['PARROT_DB_USER'],
'PASSWORD': os.environ['PARROT_DB_PASSWORD'],
'HOST': os.environ['PARROT_DB_HOST'],
'PORT': os.environ['PARROT_DB_PORT'],
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
if DEBUG:
# Note the slashes '/.../' are necessary for STATIC_URL
STATIC_URL = '/frontend/dist/static/'
STATICFILES_DIRS = [
# First locate the built assets. When static files with the same names
# exist in both directories, the ones from dist will be loaded since
# they're compiled assets (e.g. index.html).
os.path.join(BASE_DIR, 'frontend/dist'),
os.path.join(BASE_DIR, 'frontend/public'),
]
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# http://whitenoise.evans.io/en/stable/django.html
# Serve index.html when visit /
WHITENOISE_INDEX_FILE = True
# Allow the app being hosted on PARROT_HOST to prevent Host Header Attack
ALLOWED_HOSTS.append(os.environ['PARROT_HOST'])
| [
37811,
198,
35,
73,
14208,
6460,
329,
30203,
1628,
13,
198,
198,
8645,
515,
416,
705,
28241,
14208,
12,
28482,
923,
16302,
6,
1262,
37770,
362,
13,
16,
13,
20,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
198,
5450,
137... | 2.363538 | 1,843 |
from common.ulam_spiral import *
from common.primes import *
SIEVE = PrimeSieve(100000)
| [
6738,
2219,
13,
377,
321,
62,
2777,
21093,
1330,
1635,
198,
6738,
2219,
13,
1050,
999,
1330,
1635,
198,
198,
50,
10008,
6089,
796,
5537,
50,
12311,
7,
3064,
830,
8,
198
] | 2.78125 | 32 |