content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from email.mime import image
from django.shortcuts import render
from django.http import HttpResponse
from .models import Images, Location
from images.models import Images
# Create your views here.
| [
6738,
3053,
13,
76,
524,
1330,
2939,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
764,
27530,
1330,
5382,
11,
13397,
198,
198,
6738,
4263,
13,
27530,
1330,... | 3.884615 | 52 |
import os
import re
import sklearn
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
vector_parameters = [[2,0.8],[3,0.8],[3,0.8],[3,0.8],[3,0.8],[3,0.8],[3,0.8],[3,0.8],[3,0.8],
[3,0.8],[3,0.8],[3,0.8]]
| [
11748,
28686,
198,
11748,
302,
198,
11748,
1341,
35720,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
11748,
299,
32152,
355,
45941,
220,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
75... | 2.055556 | 126 |
# -*- coding: utf-8 -*-
import unittest
from brew.utilities.efficiency import calculate_brew_house_yield
from fixtures import recipe
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
555,
715,
395,
198,
198,
6738,
9059,
13,
315,
2410,
13,
45888,
1330,
15284,
62,
11269,
62,
4803,
62,
88,
1164,
198,
6738,
34609,
1330,
8364,
628
] | 3.214286 | 42 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
201,
198,
201,
198
] | 2.909091 | 11 |
import numpy as np
from challenge import Challenge
########################################################################
# Load data #
########################################################################
########################################################################
# Exercises #
########################################################################
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4427,
1330,
13879,
628,
220,
220,
220,
1303,
29113,
29113,
4242,
21017,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.835165 | 182 |
"""
Settings module
"""
settings = {
'file': None,
'autostart': True,
'min_node_width': 50,
'min_node_height': 50,
'char_width': 9,
'char_height': 10,
'ver_spacing': 200,
'margin': 20,
'draw_calls': True,
'draw_boundary': False,
'color_background': 'gray',
'color_num_background': '#27def4',
'color_boundary': '#f400b8',
'color_boundary_stroke': 'white',
'color_node_body': '#27def4',
'color_node_stroke': 'black',
'color_text': 'black',
'color_num_text': 'black',
'color_connection': 'black',
'node_stroke_width': 2,
'node_stroke_bezier': True,
'node_conn_width': 2,
'node_opacity': 1.0,
'node_radius': 5,
'node_num_radius': 15,
'node_bound_radius': 20,
}
| [
37811,
628,
220,
220,
220,
16163,
8265,
198,
198,
37811,
198,
33692,
796,
1391,
198,
220,
220,
220,
705,
7753,
10354,
6045,
11,
198,
220,
220,
220,
705,
2306,
455,
433,
10354,
6407,
11,
198,
220,
220,
220,
705,
1084,
62,
17440,
62,
... | 2.178977 | 352 |
#num is also the first terms of spiral
num = 1
x = 2
final_sum = 0
for i in range(500):
#for each for number in one circle (four numbers on diagonals), the numbers increases by the same numbers (x)
#in first circle it is 3,5,7,9, increases by 2
#in second it is 13,17,21,25, increases by 4...
for i in range (1,5):
num += x
final_sum += num
x += 2
#we print the final sum
print(final_sum+1) | [
2,
22510,
318,
635,
262,
717,
2846,
286,
23642,
198,
22510,
796,
352,
198,
87,
796,
362,
198,
20311,
62,
16345,
796,
657,
198,
1640,
1312,
287,
2837,
7,
4059,
2599,
198,
220,
220,
220,
1303,
1640,
1123,
329,
1271,
287,
530,
9197,
... | 2.613497 | 163 |
import torch
import torch.nn.functional as F
from torch import nn
from collagen.core import Module
from collagen.modelzoo.modules import ConvBlock
class FPNBlock(Module):
"""
Extended implementation from https://github.com/qubvel/segmentation_models.pytorch
"""
class SegmentationBlock(Module):
"""
Extended implementation from https://github.com/qubvel/segmentation_models.pytorch
"""
class FPNDecoder(Module):
"""
Extended implementation from https://github.com/qubvel/segmentation_models.pytorch
"""
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
45452,
13,
7295,
1330,
19937,
198,
6738,
45452,
13,
19849,
89,
2238,
13,
18170,
1330,
34872,
12235,
628,
198,
4871,
376,
... | 3.137143 | 175 |
"""Representation of a text block within the HTML canvas."""
from html import unescape
from inscriptis.html_properties import WhiteSpace
class Block:
"""The current block of text.
A block usually refers to one line of output text.
.. note::
If pre-formatted content is merged with a block, it may also contain
multiple lines.
Args:
idx: the current block's start index.
prefix: prefix used within the current block.
"""
__slots__ = ('idx', 'prefix', '_content', 'collapsable_whitespace')
def merge(self, text: str, whitespace: WhiteSpace) -> None:
"""Merge the given text with the current block.
Args:
text: the text to merge.
whitespace: whitespace handling.
"""
if whitespace == WhiteSpace.pre:
self.merge_pre_text(text)
else:
self.merge_normal_text(text)
def merge_normal_text(self, text: str) -> None:
"""Merge the given text with the current block.
Args:
text: the text to merge
"""
normalized_text = []
for ch in text:
if not ch.isspace():
normalized_text.append(ch)
self.collapsable_whitespace = False
elif not self.collapsable_whitespace:
normalized_text.append(' ')
self.collapsable_whitespace = True
if normalized_text:
text = ''.join((self.prefix.first, *normalized_text)) if not \
self._content else ''.join(normalized_text)
text = unescape(text)
self._content += text
self.idx += len(text)
def merge_pre_text(self, text: str) -> None:
"""Merge the given pre-formatted text with the current block.
Args:
text: the text to merge
"""
text = ''.join((self.prefix.first,
text.replace('\n', '\n' + self.prefix.rest)))
text = unescape(text)
self._content += text
self.idx += len(text)
self.collapsable_whitespace = False
@property
def new_block(self) -> 'Block':
"""Return a new Block based on the current one."""
self.prefix.consumed = False
return Block(idx=self.idx + 1, prefix=self.prefix)
| [
37811,
40171,
341,
286,
257,
2420,
2512,
1626,
262,
11532,
21978,
526,
15931,
198,
6738,
27711,
1330,
555,
41915,
198,
6738,
1035,
6519,
271,
13,
6494,
62,
48310,
1330,
2635,
14106,
628,
198,
4871,
9726,
25,
198,
220,
220,
220,
37227,
... | 2.259512 | 1,025 |
from unittest import TestCase
from cmfsapy.dimension.correction import correct_estimates, polynom_func, \
compute_mFSA_correction_coef, correct_mFSA
import numpy as np
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
12067,
9501,
12826,
13,
46156,
13,
10215,
8243,
1330,
3376,
62,
395,
26748,
11,
745,
6213,
296,
62,
20786,
11,
3467,
198,
220,
220,
220,
24061,
62,
76,
37,
4090,
62,
10215,
8243,
62... | 2.949153 | 59 |
import pytest
from graphql_relay import to_global_id
from django.contrib.auth import get_user_model
from creator.studies.models import Membership
from creator.data_reviews.factories import DataReviewFactory
from creator.ingest_runs.factories import ValidationResultsetFactory
from creator.studies.models import Study
User = get_user_model()
DATA_REVIEW = """
query ($id: ID!) {
dataReview(id: $id) {
id
kfId
createdAt
name
description
state
validationResultset {
id failed passed didNotRun downloadReportUrl downloadResultsUrl
}
study { id kfId }
versions { edges { node { id kfId } } }
}
}
"""
ALL_DATA_REVIEWS = """
query {
allDataReviews {
edges {
node {
id
kfId
createdAt
name
description
state
validationResultset {
id failed passed didNotRun downloadReportUrl downloadResultsUrl # noqa
}
study { id kfId }
versions { edges { node { id kfId } } }
}
}
}
}
"""
ALL_DATA_REVIEWS_BY_STUDY = """
query($studyKfId: String) {
allDataReviews(studyKfId: $studyKfId) {
edges {
node {
id
kfId
createdAt
name
description
state
validationResultset { id failed passed didNotRun }
study { id kfId }
versions { edges { node { id kfId } } }
}
}
}
}
"""
@pytest.mark.parametrize(
"user_group,allowed",
[
("Administrators", True),
("Services", False),
("Developers", True),
("Investigators", True),
("Bioinformatics", True),
(None, False),
],
)
@pytest.mark.parametrize(
"user_group,allowed",
[
("Administrators", True),
("Services", False),
("Developers", True),
("Investigators", True),
("Bioinformatics", True),
(None, False),
],
)
@pytest.mark.parametrize(
"user_group,allowed",
[
("Administrators", True),
],
)
| [
11748,
12972,
9288,
198,
6738,
4823,
13976,
62,
2411,
323,
1330,
284,
62,
20541,
62,
312,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
13172,
13,
19149,
444,
13,
27530,
1330,
37939,
198... | 1.970512 | 1,153 |
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
Direct Policy Gradient network.
Takes in STATE_DIM size vector, and outputs ACTIONS_DIM size vector.
Action select (sampling), and reward backprop is done in another function.
Make sure to subtract a baseline!
'''
#
# '''
# This Policy net takes in state which is the per class acc, and then returns the index of a class to select.
# '''
# class Policy_Class_Net(nn.Module):
# def __init__(self, state_dim, actions_dim, hidden_dim=64):
# super(PolicyNet, self).__init__()
# self.input_layer = nn.Linear(state_dim, hidden_dim)
# self.hidden = nn.Linear(hidden_dim, actions_dim)
#
#
# def forward(self,x):
# x = F.relu(self.input_layer(x))
# return F.softmax(self.hidden(x)) # return a valid prob distribution..or not!
| [
628,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
7061,
6,
198,
13470,
7820,
17701,
1153,
3127,
13,
220,
198,
198,
51,
1124,
287,
35454,
62,
35,
3955,
25... | 2.688889 | 315 |
from __future__ import annotations
from typing import Protocol, AsyncContextManager, Iterator
MAX_ROOMS_PER_TEN_MINUTES = 50
MAX_CONNECTIONS_PER_USER = 10
MAX_CONNECTIONS_PER_ROOM = 20
SERVER_LIVENESS_EXPIRATION_SECONDS = 60 * 10
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
20497,
11,
1081,
13361,
21947,
13511,
11,
40806,
1352,
198,
198,
22921,
62,
13252,
2662,
50,
62,
18973,
62,
51,
1677,
62,
23678,
3843,
1546,
796,
2026,
198,
22921,
62,
... | 2.744186 | 86 |
#!/usr/local/Caskroom/miniconda/base/envs/music_venv/bin/pythonw
from pyo import *
s = Server().boot()
# white noise generator
n = Noise(0.5)
# Common cutoff frequency control
freq = Sig(1000)
freq.ctrl([SLMap(50, 5000, "lin", "value", 1000)], title = "Cutoff Frequency")
# Three different lowpass filters
tone = Tone(n, freq)
butlp = ButLP(n, freq)
mooglp = MoogLP(n, freq)
# Interpolates between input objects to produce a single output
sel = Selector([tone, butlp, mooglp]).out()
sel.ctrl(title = "Filter selector (0=Tone, 1=ButLP, 2=MoogLP)")
# Displays the spectrum contents of the chosen source
sp = Spectrum(sel)
s.gui(locals())
| [
2,
48443,
14629,
14,
12001,
14,
34,
2093,
3823,
14,
1084,
291,
13533,
14,
8692,
14,
268,
14259,
14,
28965,
62,
574,
85,
14,
8800,
14,
29412,
86,
198,
198,
6738,
279,
8226,
1330,
1635,
198,
198,
82,
796,
9652,
22446,
18769,
3419,
6... | 2.748936 | 235 |
from django.urls import path
from .views import index, qs
urlpatterns = [
path('', index, name='index_faq'),
path('qs', qs, name='qs'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
33571,
1330,
6376,
11,
10662,
82,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
6376,
11,
1438,
11639,
9630,
62,
13331,
80,
33809,
198,
220,
... | 2.508475 | 59 |
import pygame
from board import Board | [
11748,
12972,
6057,
198,
198,
6738,
3096,
1330,
5926
] | 4.222222 | 9 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
"""
I/O functions in fvecs, bvecs, ivecs formats
definition of the formats here: http://corpus-texmex.irisa.fr/
"""
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
... | 3.242424 | 99 |
from .rot13 import Rot13
| [
6738,
764,
10599,
1485,
1330,
18481,
1485,
628
] | 3.25 | 8 |
import numpy as np
'''view Example :- Board casting hoti h es me'''
arr = np.array([5,10,15])
arr_slice= arr[0:2]
# print(arr_slice) #[5,10]
arr_slice[:]= 0
# print(arr_slice) #[0,0]
# print(arr) #[0,0,15]
'''Copy Example :- Board casting hoti h es me'''
arr_copy = np.array([5,10,15])
arr_slice2= arr_copy[0:2].copy()
print(arr_slice2) #[5,10]
arr_slice2[:]=0
print(arr_slice2) #[0,0]
print(arr_copy)
| [
11748,
299,
32152,
355,
45941,
198,
198,
7061,
6,
1177,
17934,
1058,
12,
5926,
13092,
3024,
72,
289,
1658,
502,
7061,
6,
198,
3258,
796,
45941,
13,
18747,
26933,
20,
11,
940,
11,
1314,
12962,
198,
198,
3258,
62,
48369,
28,
5240,
58,... | 2.09596 | 198 |
#
# Gunicorn (https://docs.gunicorn.org/en/stable/configure.html)
#
# Metamapper uses Gunicorn to handle web requests by default. We recommend
# spinning up a few of these and putting them behind a reverse proxy like nginx.
#
# You can override these default settings by creating your own file and referencing the
# path to the Python file via the METAMAPPER_GUNICORN_CONFIG_PATH environment variable.
#
bind = '0.0.0.0:5050'
| [
2,
198,
2,
6748,
291,
1211,
357,
5450,
1378,
31628,
13,
7145,
291,
1211,
13,
2398,
14,
268,
14,
31284,
14,
11250,
495,
13,
6494,
8,
198,
2,
198,
2,
3395,
321,
11463,
3544,
6748,
291,
1211,
284,
5412,
3992,
7007,
416,
4277,
13,
7... | 3.335938 | 128 |
import polyphony
import sub1
from sub3 import SUB3_GLOBAL_ARRAY, SUB3_GLOBAL_TUPLE
@polyphony.testbench
test()
| [
11748,
7514,
23021,
198,
11748,
850,
16,
198,
6738,
850,
18,
1330,
28932,
18,
62,
8763,
9864,
1847,
62,
1503,
30631,
11,
28932,
18,
62,
8763,
9864,
1847,
62,
51,
8577,
2538,
628,
628,
628,
198,
31,
35428,
23021,
13,
9288,
26968,
628... | 2.531915 | 47 |
import pytest
import requests
import json
import os
from jsonschema import validate
API_LINK = 'https://jsonplaceholder.typicode.com/posts'
TMP_FILE = 'test_jsonplaceholder.json'
@pytest.yield_fixture(scope='session', autouse=True)
def make_tmp_file():
"""Create temporary file and delete in the end of the test session"""
with open(TMP_FILE, 'w') as f:
pass
yield
os.remove(TMP_FILE)
@pytest.fixture(scope='module')
def get_api_responce():
"""Get response from API"""
responce = requests.get(API_LINK)
if responce.ok:
print(responce)
with open(TMP_FILE, 'w') as write_file:
json.dump(responce.json(), write_file, indent=4)
return responce.json()
else:
print(f"\nSomething went wrong. Can't get responce from api. Status_code= {responce.status_code}")
def test_check_json_schema(get_api_responce):
"""Validate json-schema for https://jsonplaceholder.typicode.com/guide.html"""
schema = {
'type': 'object',
'properties': {
'id': {"type" : "number"},
'title': {"type" : "string"},
'body': {"type" : "string"},
'userId': {"type" : "number"},
}
}
responce = get_api_responce
validate(responce[0], schema) # -> If schema is valid return None. If haven't validate raise exception.
assert True
def test_get_single_resource():
"""Check that possible to get one resource"""
responce = requests.get(f'{API_LINK}/1').json()
with open('test_jsonplaceholder.json') as f:
assert json.load(f)[0] == responce
@pytest.mark.parametrize("_id", [1, 100])
| [
11748,
12972,
9288,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
44804,
684,
2395,
2611,
1330,
26571,
628,
198,
17614,
62,
43,
17248,
796,
705,
5450,
1378,
17752,
5372,
13829,
13,
774,
16564,
1098,
13,
785,
14,
248... | 2.456036 | 671 |
# -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X PyBossa plugin.
#
# Author: Jeremy Othieno (j.othieno@gmail.com)
#
# Copyright (c) 2016 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from flask import Blueprint, render_template, abort
blueprint = Blueprint("geotagx-blog", __name__)
@blueprint.route("/", defaults={"page": 1})
@blueprint.route("/page/<int:page>")
def index(page):
"""Renders the blog page with the specified number.
Args:
page (int): A page number.
Returns:
unicode: The page's rendered HTML.
"""
from pybossa.model.blogpost import Blogpost
from sqlalchemy import desc
from pybossa.util import Pagination
page = 1 if page < 1 else page
POSTS_PER_PAGE = 20
pagination = Pagination(page, POSTS_PER_PAGE, Blogpost.query.count())
posts_from = (page - 1) * POSTS_PER_PAGE
if posts_from > pagination.total_count:
abort(404)
posts_to = posts_from + POSTS_PER_PAGE
posts = Blogpost.query.order_by(desc(Blogpost.created)).slice(posts_from, posts_to).all()
for p in posts:
p.cover_image = _find_cover_image(p.body) # _find_cover_image must be called before the body is truncated by _summarize.
p.body = _summarize(p.body)
return render_template("blog/index.html", posts=posts, pagination=pagination)
@blueprint.route("/post/<int:id>", endpoint="post")
def render_post(id):
"""Renders the blog post with the specified identifier.
Args:
id (int): A blog post's unique identifier.
Returns:
unicode: The page's rendered HTML.
"""
return render_template("blog/post.html", post=_get_post(id))
def _get_post(id):
"""Returns the blog post with the specified id.
Args:
id (int): A blog post's unique identifier.
Returns:
Blogpost | None: If found, an instance of the post with the specified id, None otherwise.
"""
from pybossa.core import blog_repo
return blog_repo.get(id)
def _find_cover_image(body):
"""Attempts to find a cover image to use for a summarized blog post.
The cover image will be the first image found in the specified body.
Because the body is written in markdown format, the algorithm works by
looking for the pattern "" where <label> is an image
label and <URL> is the URL to the image. If an occurrence of the
aforementioned pattern is found, <URL> is returned.
Args:
body (str): A blog post's body.
Returns:
str | None: A URL to an image if successful, None otherwise.
"""
from re import search
result = None
match = search("!\[.*\]\((.*)\)", body)
if match:
result = match.group(1)
return result
def _summarize(body):
"""Summarizes the specified blog post's body.
This function will extract the first paragraph from the specified body.
While the function does a reasonable job, it is far from robust as it does not cover
a few corner-cases of the markdown format. A better solution would be to introduce
a 'summary' field to the Blogpost class, allowing authors to write their own summaries.
Args:
body (str): A blog post's body.
Returns:
str: A summary of the specified body.
"""
summary = ""
if body:
# The first summary is at least a quarter of the original body's length.
# Note that body is truncated after a paragraph.
summary = body[:body.find("\r\n", len(body)/4)]
# Remove all images from the summary since the cover image is already in use.
from re import findall
for image in findall('(!\[.*\]\(.*\))', summary):
summary = summary.replace(image, "")
# With the images removed, get rid of any leading whitespace that may have been introduced.
summary = summary.lstrip()
markdown_delimiters = set(["*", "#", "_"])
limit = 0
if summary[0] in markdown_delimiters:
delimiter = summary[0]
delimiter_length = 1
while summary[delimiter_length] == delimiter:
delimiter_length += 1
delimiter *= delimiter_length
limit = summary.find(delimiter, delimiter_length + 1) + delimiter_length
else:
minimum_length = 200
limit = 0
while limit < minimum_length:
k = summary.find("\r\n", limit + 1)
if k < limit:
break
else:
limit = k
return summary[:limit]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
8265,
318,
636,
286,
262,
32960,
24835,
12,
55,
9485,
37310,
64,
13877,
13,
198,
2,
198,
2,
6434,
25,
11753,
440,
400,
2013,
78,
357,
73,
13,
849,
... | 2.758231 | 2,035 |
import boto, boto.ec2
import re, datetime
from dateutil import parser
from dateutil import tz
import collections
from collections import OrderedDict
import ast
from operator import add
from atlas_helper_methods import AtlasHelper
from aws_helper import AwsHelper
| [
11748,
275,
2069,
11,
275,
2069,
13,
721,
17,
198,
11748,
302,
11,
4818,
8079,
198,
6738,
3128,
22602,
1330,
30751,
198,
6738,
3128,
22602,
1330,
256,
89,
198,
11748,
17268,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
64... | 3.666667 | 72 |
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import time
import inspect
from copy import deepcopy
import bpy
from .human_readable import convert_actions_to_human_readable, convert_human_readable_to_actions
from .maths import Point2D, Vec2D
from .debug import dprint
from .decorators import blender_version_wrapper
from . import blender_preferences as bprefs
kmi_to_char = {
'ZERO': '0', 'NUMPAD_0': '0',
'ONE': '1', 'NUMPAD_1': '1',
'TWO': '2', 'NUMPAD_2': '2',
'THREE': '3', 'NUMPAD_3': '3',
'FOUR': '4', 'NUMPAD_4': '4',
'FIVE': '5', 'NUMPAD_5': '5',
'SIX': '6', 'NUMPAD_6': '6',
'SEVEN': '7', 'NUMPAD_7': '7',
'EIGHT': '8', 'NUMPAD_8': '8',
'NINE': '9', 'NUMPAD_9': '9',
'PERIOD': '.', 'NUMPAD_PERIOD': '.',
'PLUS': '+', 'NUMPAD_PLUS': '+',
'MINUS': '-', 'NUMPAD_MINUS': '-',
'SLASH': '/', 'NUMPAD_SLASH': '/',
'NUMPAD_ASTERIX': '*',
'BACK_SLASH': '\\',
'SPACE': ' ',
'EQUAL': '=',
'SEMI_COLON': ';', 'COMMA': ',',
'LEFT_BRACKET': '[', 'RIGHT_BRACKET': ']',
'QUOTE': "'", 'ACCENT_GRAVE': '`',
'GRLESS': '>',
'A':'a', 'B':'b', 'C':'c', 'D':'d',
'E':'e', 'F':'f', 'G':'g', 'H':'h',
'I':'i', 'J':'j', 'K':'k', 'L':'l',
'M':'m', 'N':'n', 'O':'o', 'P':'p',
'Q':'q', 'R':'r', 'S':'s', 'T':'t',
'U':'u', 'V':'v', 'W':'w', 'X':'x',
'Y':'y', 'Z':'z',
'SHIFT+ZERO': ')',
'SHIFT+ONE': '!',
'SHIFT+TWO': '@',
'SHIFT+THREE': '#',
'SHIFT+FOUR': '$',
'SHIFT+FIVE': '%',
'SHIFT+SIX': '^',
'SHIFT+SEVEN': '&',
'SHIFT+EIGHT': '*',
'SHIFT+NINE': '(',
'SHIFT+PERIOD': '>',
'SHIFT+PLUS': '+',
'SHIFT+MINUS': '_',
'SHIFT+SLASH': '?',
'SHIFT+BACK_SLASH': '|',
'SHIFT+EQUAL': '+',
'SHIFT+SEMI_COLON': ':', 'SHIFT+COMMA': '<',
'SHIFT+LEFT_BRACKET': '{', 'SHIFT+RIGHT_BRACKET': '}',
'SHIFT+QUOTE': '"', 'SHIFT+ACCENT_GRAVE': '~',
'SHIFT+GRLESS': '<',
'SHIFT+A':'A', 'SHIFT+B':'B', 'SHIFT+C':'C', 'SHIFT+D':'D',
'SHIFT+E':'E', 'SHIFT+F':'F', 'SHIFT+G':'G', 'SHIFT+H':'H',
'SHIFT+I':'I', 'SHIFT+J':'J', 'SHIFT+K':'K', 'SHIFT+L':'L',
'SHIFT+M':'M', 'SHIFT+N':'N', 'SHIFT+O':'O', 'SHIFT+P':'P',
'SHIFT+Q':'Q', 'SHIFT+R':'R', 'SHIFT+S':'S', 'SHIFT+T':'T',
'SHIFT+U':'U', 'SHIFT+V':'V', 'SHIFT+W':'W', 'SHIFT+X':'X',
'SHIFT+Y':'Y', 'SHIFT+Z':'Z',
'ESC': 'Escape',
'BACK_SPACE': 'Backspace',
'RET': 'Enter', 'NUMPAD_ENTER': 'Enter',
'HOME': 'Home', 'END': 'End',
'LEFT_ARROW': 'ArrowLeft', 'RIGHT_ARROW': 'ArrowRight',
'UP_ARROW': 'ArrowUp', 'DOWN_ARROW': 'ArrowDown',
'PAGE_UP': 'PageUp', 'PAGE_DOWN': 'PageDown',
'DEL': 'Delete',
'TAB': 'Tab',
}
re_blenderop = re.compile(r'(?P<keymap>.+?) *\| *(?P<operator>.+)')
def i18n_translate(text):
''' bpy.app.translations.pgettext tries to translate the given parameter '''
return bpy.app.translations.pgettext(text)
| [
7061,
6,
198,
15269,
357,
34,
8,
33448,
29925,
39606,
198,
4023,
1378,
66,
36484,
18055,
13,
785,
198,
31373,
31,
66,
36484,
18055,
13,
785,
198,
198,
41972,
416,
11232,
5601,
768,
11,
11232,
34974,
628,
220,
220,
220,
770,
1430,
31... | 1.945199 | 1,989 |
"""
Interpolate using a second order Lagrange polynomial.
Based on NPSS implementation.
"""
import numpy as np
from openmdao.components.interp_util.interp_algorithm import InterpAlgorithm
class InterpLagrange2(InterpAlgorithm):
"""
Interpolate using a second order Lagrange polynomial.
"""
def __init__(self, grid, values, interp, **kwargs):
"""
Initialize table and subtables.
Parameters
----------
grid : tuple(ndarray)
Tuple containing x grid locations for this dimension and all subtable dimensions.
values : ndarray
Array containing the table values for all dimensions.
interp : class
Interpolation class to be used for subsequent table dimensions.
**kwargs : dict
Interpolator-specific options to pass onward.
"""
super(InterpLagrange2, self).__init__(grid, values, interp, **kwargs)
self.k = 3
self._name = 'lagrange2'
def interpolate(self, x, idx, slice_idx):
"""
Compute the interpolated value over this grid dimension.
Parameters
----------
x : ndarray
The coordinates to sample the gridded data at. First array element is the point to
interpolate here. Remaining elements are interpolated on sub tables.
idx : integer
Interval index for x.
slice_idx : List of <slice>
Slice object containing indices of data points requested by parent interpolating
tables.
Returns
-------
ndarray
Interpolated values.
ndarray
Derivative of interpolated values with respect to this independent and child
independents.
ndarray
Derivative of interpolated values with respect to values for this and subsequent table
dimensions.
ndarray
Derivative of interpolated values with respect to grid for this and subsequent table
dimensions.
"""
grid = self.grid
subtable = self.subtable
# Complex Step
if self.values.dtype == np.complex:
dtype = self.values.dtype
else:
dtype = x.dtype
# Extrapolate high
ngrid = len(grid)
if idx > ngrid - 3:
idx = ngrid - 3
derivs = np.empty(len(x))
xx1 = x[0] - grid[idx]
xx2 = x[0] - grid[idx + 1]
xx3 = x[0] - grid[idx + 2]
if subtable is not None:
# Interpolate between values that come from interpolating the subtables in the
# subsequent dimensions.
nx = len(x)
slice_idx.append(slice(idx, idx + 3))
tshape = self.values[tuple(slice_idx)].shape
nshape = list(tshape[:-nx])
nshape.append(nx)
derivs = np.empty(tuple(nshape), dtype=dtype)
c12 = grid[idx] - grid[idx + 1]
c13 = grid[idx] - grid[idx + 2]
c23 = grid[idx + 1] - grid[idx + 2]
subval, subderiv, _, _ = subtable.evaluate(x[1:], slice_idx=slice_idx)
q1 = subval[..., 0] / (c12 * c13)
q2 = subval[..., 1] / (c12 * c23)
q3 = subval[..., 2] / (c13 * c23)
dq1_dsub = subderiv[..., 0, :] / (c12 * c13)
dq2_dsub = subderiv[..., 1, :] / (c12 * c23)
dq3_dsub = subderiv[..., 2, :] / (c13 * c23)
derivs[..., 1:] = xx3 * (dq1_dsub * xx2 - dq2_dsub * xx1) + dq3_dsub * xx1 * xx2
else:
values = self.values[tuple(slice_idx)]
nshape = list(values.shape[:-1])
nshape.append(1)
derivs = np.empty(tuple(nshape), dtype=dtype)
c12 = grid[idx] - grid[idx + 1]
c13 = grid[idx] - grid[idx + 2]
c23 = grid[idx + 1] - grid[idx + 2]
q1 = values[..., idx] / (c12 * c13)
q2 = values[..., idx + 1] / (c12 * c23)
q3 = values[..., idx + 2] / (c13 * c23)
derivs[..., 0] = q1 * (2.0 * x[0] - grid[idx + 1] - grid[idx + 2]) - \
q2 * (2.0 * x[0] - grid[idx] - grid[idx + 2]) + \
q3 * (2.0 * x[0] - grid[idx] - grid[idx + 1])
return xx3 * (q1 * xx2 - q2 * xx1) + q3 * xx1 * xx2, derivs, None, None
| [
37811,
198,
9492,
16104,
378,
1262,
257,
1218,
1502,
21003,
9521,
745,
6213,
49070,
13,
198,
198,
15001,
319,
399,
3705,
50,
7822,
13,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1280,
9132,
5488,
13,
5589,
3906,
1... | 2.030502 | 2,131 |
import os
from collections import namedtuple
import PIL
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.properties import ListProperty
from kivy.uix.anchorlayout import AnchorLayout
from .utils import fix_android_image
MODULE_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
class XZbarDecoder(ZBarDecoder):
"""Proxy-like that deals with all the implementations."""
available_implementations = {
'pyzbar': PyZBarDecoder,
'zbarlight': ZBarLightDecoder,
}
zbar_decoder = None
class ZBarCam(AnchorLayout):
"""
Widget that use the Camera and zbar to detect qrcode.
When found, the `codes` will be updated.
"""
resolution = ListProperty([640, 480])
symbols = ListProperty([])
Symbol = namedtuple('Symbol', ['type', 'data'])
# checking all possible types by default
code_types = ListProperty(XZbarDecoder().get_available_code_types())
def _setup(self):
"""
Postpones some setup tasks that require self.ids dictionary.
"""
self._remove_shoot_button()
# `self.xcamera._camera` instance may not be available if e.g.
# the `CAMERA` permission is not granted
self.xcamera.bind(on_camera_ready=self._on_camera_ready)
# camera may still be ready before we bind the event
if self.xcamera._camera is not None:
self._on_camera_ready(self.xcamera)
def _on_camera_ready(self, xcamera):
"""
Starts binding when the `xcamera._camera` instance is ready.
"""
xcamera._camera.bind(on_texture=self._on_texture)
def _remove_shoot_button(self):
"""
Removes the "shoot button", see:
https://github.com/kivy-garden/garden.xcamera/pull/3
"""
xcamera = self.xcamera
shoot_button = xcamera.children[0]
xcamera.remove_widget(shoot_button)
@classmethod
@property
| [
11748,
28686,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
11748,
350,
4146,
198,
6738,
479,
452,
88,
13,
15750,
1330,
21328,
198,
6738,
479,
452,
88,
13,
17204,
1330,
35869,
198,
6738,
479,
452,
88,
13,
6404,
1362,
1330,
5972,
... | 2.551948 | 770 |
from src.modules.papers import Paper, PaperList
| [
6738,
12351,
13,
18170,
13,
40491,
1330,
14962,
11,
14962,
8053,
198
] | 4 | 12 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 20 18:12:59 2021
@author: Traftmine
"""
#the computer choose a number between 1 and a number x chosen by the user
import random as rm
import time as t
NUMBER_LIM = int(input("What's the limit number you're ok with? "))
Y = int(input("How many rounds do you wanna play? "))
X = 1
SCORE = Y
START = t.time()
#We create a loop, the user has to choose how many questions it wants.
while X <= Y:
NUMBER_ONE = rm.randint(1, NUMBER_LIM)
NUMBER_TWO = rm.randint(1, NUMBER_LIM)
#the computer's making a calculation and is collecting the user's answer
CALCULATION = NUMBER_ONE*NUMBER_TWO
print(NUMBER_ONE, "times", NUMBER_TWO)
ANSWER = int(input("is equal to: "))
#the computer is checking if the answer is right
if ANSWER == CALCULATION:
print("Well done!")
else:
print("You're wrong, the answer was :", CALCULATION, "; keep going!")
SCORE = SCORE-1
X = X + 1
END = t.time()
TIME = round(END - START)
#we give the score to the user
print(SCORE, "on", Y)
print("In", TIME, "seconds")
t.sleep(5)
print("""Thanks for playing see ya!
|-----------------------------|
|/////////////////////////////|
|.............................|
|~~~~~~~~~~TRAFTMINE~~~~~~~~~~|
|.............................|
|/////////////////////////////|
|-----------------------------|""")
t.sleep(2) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
7653,
1160,
1248,
25,
1065,
25,
3270,
33448,
201,
198,
201,
198,
31,
9800,
25,
309,
1617,
3810,
201,
198,
37811,
201,
198,
201,... | 2.566372 | 565 |
#! /usr/bin/env python
import sys
from setuptools import setup
import versioneer
setup_args = dict(
name = 'pyctdev',
description = 'python packaging common tasks for developers',
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
version = versioneer.get_version().lstrip('v'),
cmdclass = versioneer.get_cmdclass(),
license = 'BSD-3',
url = 'http://github.com/pyviz/pyctdev',
packages=['pyctdev'],
python_requires=">=2.7",
include_package_data = True,
install_requires=[
# otherwise py2 users will just get an error (should really
# be fixed in doit)
'doit' if sys.version_info[0]>2 else 'doit <0.30',
# doit requires cloudpickle but does not specify the dependency
'cloudpickle',
## tox
# because tox.ini is currently the master list of
# tests/environments, some of tox is required - just the
# config reading bit. But that's tied in with all of tox. And
# tox is not in anaconda defaults. Further, tox and virtualenv
# may be problematic with conda, or someone may have/want a
# customized version, so we don't cause them to be installed
# and just vendor them.
#'tox'
#'virtualenv'
'pluggy', # for tox
'py', # for tox
#'argparse', # for virtualenv
##
# Pretty much part of every python distribution now anyway.
# Use it e.g. to be able to read pyproject.toml
# pinning to avoid https://github.com/pyviz/pyctdev/issues/12
'pip >=19.1.1'
],
extras_require={
'tests': ['flake8'],
'ecosystem_pip': ['tox','twine','wheel'],
# pins are supposed to be for when it became possible to
# install them outside of root/base env, and when api appeared;
# not sure exactly which versions
# (actually, cb pin is for tested/known good version
'ecosystem_conda': ['conda >=4.4', 'conda-build >=3.10.1']
}
)
if __name__ == "__main__":
setup(**setup_args)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
11748,
2196,
28153,
198,
198,
40406,
62,
22046,
796,
8633,
7,
198,
220,
220,
220,
1438,
796,
705,
9078,
310,
... | 2.488152 | 844 |
from git import base
import torch
from PIL import Image
from torchvision import transforms
from google.cloud import storage
import io
import base64
BUCKET_NAME = "cloud_fn_storage"
MODEL_FILE = "deployable_model.pt"
client = storage.Client()
bucket = client.get_bucket(BUCKET_NAME)
blob = bucket.get_blob(MODEL_FILE)
my_model = blob.download_as_string()
my_model = io.BytesIO(my_model)
model = torch.jit.load(my_model)
transform = transforms.ToTensor() | [
6738,
17606,
1330,
2779,
198,
11748,
28034,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
28034,
10178,
1330,
31408,
198,
6738,
23645,
13,
17721,
1330,
6143,
198,
11748,
33245,
198,
11748,
2779,
2414,
198,
198,
33,
16696,
2767,
62,
20608,
... | 2.954545 | 154 |
import sys, os
# 为了兼容-m模式运行,我们主动将当前目录加入到sys.path,这样就能搜索本项目的的模块
sys.path.append(os.path.dirname(__file__))
from app import main
if __name__ == '__main__':
main()
| [
11748,
25064,
11,
28686,
201,
198,
2,
220,
10310,
118,
12859,
228,
17739,
120,
22522,
117,
12,
76,
162,
101,
94,
28156,
237,
32573,
238,
26193,
234,
171,
120,
234,
22755,
239,
20015,
105,
10310,
119,
27950,
101,
49546,
37605,
241,
302... | 1.272059 | 136 |
"""
Modipyd: Autotest for Python, and more
=======================================
**Modipyd** is a `Python`_ module dependency analysis and monitoring
modification framework, written by Takanori Ishikawa and licensed
under `the MIT license`_.
**This project aims to provide:**
* Automated testing tool **pyautotest** (like `ZenTest's autotest <http://
www.zenspider.com/ZSS/Products/ZenTest/>`_) for Python
* *Plugin architecture* designed to be simple enough to allow user to customize
action triggered by the module modification event
* API for *Bytecode analysis*, *Module dependency analysis*, and *Monitoring
module modification*
:copyright: 2008-2010 by Takanori Ishikawa
:license: MIT, see LICENSE for more details.
.. _Python: http://www.python.org/
.. _the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
__version__ = '1.1'
__author__ = 'Takanori Ishikawa <takanori.ishikawa@gmail.com>'
__url__ = 'http://www.metareal.org/p/modipyd/'
__license__ = 'MIT License'
__docformat__ = 'restructuredtext'
__all__ = ['LOGGER', 'HAS_RELATIVE_IMPORTS', 'BYTECODE_PROCESSORS']
import os
import sys
# ----------------------------------------------------------------
# Logger
# ----------------------------------------------------------------
def __configure_logger():
"""Configure project-wide logger"""
import logging
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'[%(levelname)s] %(message)s '))
# Fully descriptive format
#handler.setFormatter(logging.Formatter(
# '%(asctime)s [%(levelname)s] %(message)s '
# '(File "%(pathname)s", line %(lineno)d)'))
logger = logging.getLogger(__name__)
logger.addHandler(handler)
# If a line below is uncommented, LOGGER's level is accidentally
# changed when this module is reloaded
#logger.setLevel(logging.WARN)
return logger
# Logger object for project
LOGGER = __configure_logger()
# ----------------------------------------------------------------
# Python version compatibility
# ----------------------------------------------------------------
# The Absolute and Relative Imports has been implemented in Python 2.5
# http://docs.python.org/whatsnew/pep-328.html
HAS_RELATIVE_IMPORTS = (sys.hexversion >= 0x2050000)
# ----------------------------------------------------------------
# Bytecode Processors
# ----------------------------------------------------------------
# The modipyd.bytecode.BytecodeProcessor subclasses
# for disassembling bytecode, and populating properties.
#
# See modipyd.bytecode.BytecodeProcessor class and standard
# processorsfor for more details.
#
BYTECODE_PROCESSORS = [
# Standard processors
'modipyd.bytecode.ImportProcessor',
'modipyd.bytecode.ClassDefinitionProcessor',
]
| [
37811,
198,
5841,
541,
5173,
25,
5231,
313,
395,
329,
11361,
11,
290,
517,
198,
10052,
1421,
18604,
198,
198,
1174,
5841,
541,
5173,
1174,
318,
257,
4600,
37906,
63,
62,
8265,
20203,
3781,
290,
9904,
198,
4666,
2649,
9355,
11,
3194,
... | 3.420925 | 822 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import Config
app = Flask(__name__)
app.config.from_object(Config())
db = SQLAlchemy(app)
db.engine.execute('select 1')
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
6738,
4566,
1330,
17056,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
11250,
13,
6738,
62,
15252,
7,
16934,
289... | 3.15873 | 63 |
# from website import score_comp | [
2,
422,
3052,
1330,
4776,
62,
5589
] | 4.571429 | 7 |
# from big_ol_pile_of_manim_imports import *
from manimlib.imports import *
from manimlib.constants import *
TEX_USE_CTEX = True
| [
2,
422,
1263,
62,
349,
62,
79,
576,
62,
1659,
62,
805,
320,
62,
320,
3742,
1330,
1635,
201,
198,
6738,
582,
320,
8019,
13,
320,
3742,
1330,
1635,
201,
198,
6738,
582,
320,
8019,
13,
9979,
1187,
1330,
1635,
201,
198,
51,
6369,
62... | 2.538462 | 52 |
from a10sdk.common.A10BaseClass import A10BaseClass
class MemberList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param status: {"type": "string", "format": "string"}
:param passive: {"type": "number", "format": "number"}
:param connect_success: {"type": "number", "format": "number"}
:param member_name: {"type": "string", "format": "string"}
:param address: {"type": "string", "format": "ipv4-address"}
:param sync_sequence_number: {"type": "number", "format": "number"}
:param connect_fail: {"type": "number", "format": "number"}
:param priority: {"type": "number", "format": "number"}
:param group_name: {"type": "string", "format": "string"}
:param open_out: {"type": "number", "format": "number"}
:param learn: {"type": "number", "format": "number"}
:param is_master: {"type": "number", "format": "number"}
:param open_success: {"type": "number", "format": "number"}
:param open_in: {"type": "number", "format": "number"}
:param sys_id: {"type": "number", "format": "number"}
:param update_in: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param member_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"status": {"type": "string", "format": "string"}, "passive": {"type": "number", "format": "number"}, "connect-success": {"type": "number", "format": "number"}, "member-name": {"type": "string", "format": "string"}, "address": {"type": "string", "format": "ipv4-address"}, "sync-sequence-number": {"type": "number", "format": "number"}, "connect-fail": {"type": "number", "format": "number"}, "priority": {"type": "number", "format": "number"}, "group-name": {"type": "string", "format": "string"}, "open-out": {"type": "number", "format": "number"}, "learn": {"type": "number", "format": "number"}, "is_master": {"type": "number", "format": "number"}, "open-success": {"type": "number", "format": "number"}, "open-in": {"type": "number", "format": "number"}, "optional": true, "sys-id": {"type": "number", "format": "number"}, "update-in": {"type": "number", "format": "number"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class GroupInfo(A10BaseClass):
"""Class Description::
Operational Status for the object group-info.
Class group-info supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/gslb/group-info/oper`.
"""
| [
6738,
257,
940,
21282,
74,
13,
11321,
13,
32,
940,
14881,
9487,
1330,
317,
940,
14881,
9487,
628,
198,
4871,
10239,
8053,
7,
32,
940,
14881,
9487,
2599,
198,
220,
220,
220,
220,
198,
220,
220,
220,
37227,
1212,
1398,
857,
407,
1104,... | 3.00197 | 1,015 |
class InvalidFactorialError(RuntimeError):
"""Error generated if an invalid factorial input is given."""
def factorial(n: int) -> int:
"""Computes the factorial through a recursive algorithm.
Args:
n: A positive input value.
Raises:
InvalidFactorialError: If n is less than 0.
Returns:
Computed factorial.
"""
if n < 0:
raise InvalidFactorialError(f'n is less than zero: {n}')
elif n == 0:
return 1
return n * factorial(n - 1)
| [
4871,
17665,
29054,
5132,
12331,
7,
41006,
12331,
2599,
198,
220,
220,
220,
37227,
12331,
7560,
611,
281,
12515,
1109,
5132,
5128,
318,
1813,
526,
15931,
628,
198,
4299,
1109,
5132,
7,
77,
25,
493,
8,
4613,
493,
25,
198,
220,
220,
2... | 2.583756 | 197 |
"""
Defines a finite product topology. This is a product topology formed by
multiplying two finite topologies.
"""
import abc
from .finite_topology import FiniteTopology
from .product_topology import ProductTopology
from typing import TypeVar, Tuple, Set, Generic
X = TypeVar('X')
Y = TypeVar('Y')
class FiniteProductTopology(
FiniteTopology[Tuple[X, Y]], ProductTopology[X, Y], Generic[X, Y],
metaclass=abc.ABCMeta
):
"""
Defines the interface for a topology consisting of the product of two
finite topologies. The product of two finite topologies is finite. This is
true because topologies with a finite number of elements have a finite
number of open sets.
"""
@property
@abc.abstractmethod
def open_rectangles(self) -> Set[Set[Tuple[X, Y]]]:
"""
:return: The open rectangles on this topology. This is another name for
the open sets of this topology
"""
raise NotImplementedError()
| [
37811,
198,
7469,
1127,
257,
27454,
1720,
1353,
1435,
13,
770,
318,
257,
1720,
1353,
1435,
7042,
416,
198,
16680,
541,
3157,
734,
27454,
1353,
5823,
13,
198,
37811,
198,
11748,
450,
66,
198,
6738,
764,
69,
9504,
62,
4852,
1435,
1330,
... | 2.92515 | 334 |
from transformers import T5TokenizerFast
import torch
import torch.nn.functional as F
from lm_eval.base import LM
from lm_eval import utils
from tqdm import tqdm
### I very much dislike this solution. TODO: fix this abomination for jay-z repo
import os
import sys
from pathlib import Path
path = Path(os.path.realpath(__file__))
workfolder = str(path.parent.parent.parent.parent)
sys.path.append(workfolder)
###
from models.decoder_only_t5 import DecoderOnlyT5LMHeadModel
| [
6738,
6121,
364,
1330,
309,
20,
30642,
7509,
22968,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
300,
76,
62,
18206,
13,
8692,
1330,
37125,
198,
6738,
300,
76,
62,
18206,
1330,
3384,
4487,
198,
6738... | 3.195946 | 148 |
from .models import Campaign
| [
6738,
764,
27530,
1330,
13718,
628
] | 5 | 6 |
#!/usr/bin/env python3.8
from user import User
from credentials import Credential
import random
import string
Process = []
#sign in user
#program running
program_run = True
#exit program
#sign up user
def signup_user(username, password):
'''
Function to sign up a user
'''
new_user = User(username,password)
return new_user
#save user
def save_users(user):
'''
Function to save user
'''
user.save_user()
#check existing username
def check_existing_user(username):
'''
Function that check if a user exists with that username and return a Boolean
'''
return User.user_exist(username)
#check existing password
def check_existing_password(password):
'''
Function that checks if a password exists
'''
return User.password_exist(password)
#log in
def signin():
'''
Function to log in a user
'''
global program_run
print('++++++++SIGN IN++++++++')
print("Enter Username")
username = input()
if username == check_existing_user:
print("Enter Password")
password = input()
if password == check_existing_password:
print("logged in")
cred()
else:
print("Invalid password")
else:
print("User doesn't exist. Kindly Try again")
print('\n')
program_run = False
#sign up
def signup():
'''
Function to sign up a user
'''
global program_run
print('++++++++SIGN UP++++++++')
print("Enter a Username")
username = input()
print('\n')
print("Enter am password")
password = input()
print('\n')
save_users(signup_user(username, password))
print('\n')
print("Account created")
print('\n')
print("Logged in...")
print('\n')
cred()
#create credential
def create_credential(uname,account,cpass):
'''
Function to create credential
'''
new_credential = Credential(uname,account,cpass)
return new_credential
#save credential
def save_credential(credential):
'''
Function to save credential
'''
credential.save_credential()
#delete credential
def del_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
#find credential
def find_credential(account):
'''
Function to find credentials using account
'''
return Credential.find_by_account(account)
#check if a credential exists
def check_existing_credential(account):
'''
Function to check if a credential exists
'''
return Credential.credential_exist(account)
#display all credentials
def display_credentials():
'''
Function that returns all saved credentials
'''
return Credential.display_credentials()
#main Function
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
23,
198,
6738,
2836,
1330,
11787,
198,
6738,
18031,
1330,
327,
445,
1843,
198,
11748,
4738,
198,
11748,
4731,
198,
198,
18709,
796,
17635,
198,
2,
12683,
287,
2836,
628,
198,
2,
2... | 2.648673 | 1,093 |
import sys
import os
import django
CHOPEN_PATH = os.path.dirname(os.path.realpath(__file__))
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
42625,
14208,
198,
198,
3398,
3185,
1677,
62,
34219,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
198
] | 2.611111 | 36 |
#!/usr/bin/env python3
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import sys, path
import synth_common
trace = synth_common.create_trace()
trace.add_packet(ts=1)
trace.add_process(10, 0, "processa")
trace.add_process(20, 0, "processb")
trace.add_ftrace_packet(0)
# Add a very long (~1 month long) sched slice.
trace.add_sched(ts=50, prev_pid=10, next_pid=20)
end_ts = 1 * 30 * 24 * 60 * 60 * 60 * 1000 * 1000 * 1000
trace.add_sched(ts=end_ts, prev_pid=20, next_pid=10)
sys.stdout.buffer.write(trace.trace.SerializeToString())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
34,
8,
13130,
383,
5565,
4946,
8090,
4935,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,... | 3.087079 | 356 |
# FootAndBall: Integrated Player and Ball Detector
# Jacek Komorowski, Grzegorz Kurzejamski, Grzegorz Sarwas
# Copyright (c) 2020 Sport Algorithmics and Gaming
from torchvision import transforms
from PIL import Image
import numpy as np
import numbers
import random
import cv2
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
# Labels starting from 0
BALL_LABEL = 1
PLAYER_LABEL = 2
# Size of the ball bbox in pixels (fixed as we detect only ball center)
BALL_BBOX_SIZE = 40
NORMALIZATION_MEAN = [0.485, 0.456, 0.406]
NORMALIZATION_STD = [0.229, 0.224, 0.225]
# Tensor to numpy transform
denormalize_trans = transforms.Compose([transforms.Normalize(mean=[0., 0., 0.], std=[1.0 / e for e in NORMALIZATION_STD]),
transforms.Normalize(mean=[-e for e in NORMALIZATION_MEAN], std=[1., 1., 1.])])
normalize_trans = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(NORMALIZATION_MEAN, NORMALIZATION_STD)])
def apply_transform_and_clip(boxes, labels, M, shape):
"""
:param points:
:param M: affine transformation matrix
:param shape: (width, height) tuple
:return:
"""
assert len(boxes) == len(labels)
ones = np.ones((len(boxes), 1))
ext_pts1 = np.append(boxes[:, :2], ones, 1).transpose() # Upper right corner
ext_pts2 = np.append(boxes[:, 2:4], ones, 1).transpose() # Lower left corner
transformed_pts1 = np.dot(M[:2], ext_pts1).transpose()
transformed_pts2 = np.dot(M[:2], ext_pts2).transpose()
# We need to find out which corner is top right and which is bottom left, after the transform
transformed_boxes = np.zeros_like(boxes)
transformed_boxes[:, 0] = np.minimum(transformed_pts1[:, 0], transformed_pts2[:, 0])
transformed_boxes[:, 1] = np.minimum(transformed_pts1[:, 1], transformed_pts2[:, 1])
transformed_boxes[:, 2] = np.maximum(transformed_pts1[:, 0], transformed_pts2[:, 0])
transformed_boxes[:, 3] = np.maximum(transformed_pts1[:, 1], transformed_pts2[:, 1])
assert boxes.shape == transformed_boxes.shape
return clip(transformed_boxes, labels, shape)
def clip(boxes, labels, shape):
"""
:param boxes: list of (x1, y1, x2, y2) coordinates
:param shape: (width, height) tuple
:return:
"""
box_contained = lambda e: 0 <= e[0] < shape[0] and 0 <= e[1] < shape[1] and 0 <= e[2] < shape[0] and 0 <= e[3] < shape[1]
mask = [box_contained(box) for box in boxes]
return boxes[mask], labels[mask]
class RandomAffine:
"""Random affine transformation of the image keeping center invariant
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or float or int, optional): Range of degrees to select from.
If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the
range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,
a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
Will not apply shear by default
"""
def get_params(self, h, w):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(self.degrees[0], self.degrees[1])
if self.translate is not None:
max_dx = self.translate[0] * w
max_dy = self.translate[1] * h
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if self.scale is not None:
scale = random.uniform(self.scale[0], self.scale[1])
else:
scale = 1.0
if self.shear is not None:
if len(self.shear) == 2:
shear = [random.uniform(self.shear[0], self.shear[1]), 0.]
elif len(self.shear) == 4:
shear = [random.uniform(self.shear[0], self.shear[1]), random.uniform(self.shear[2], self.shear[3])]
else:
assert NotImplementedError('Incorrect shear: {}'.format(self.shear))
else:
shear = 0.0
return angle, translations, scale, shear
class RandomCrop:
"""
Crop the given PIL Image at a random location.
Args:
size: Desired output size of the crop (height,width)
"""
# Convert image to tensors and normalize the image, ground truth is not changed
| [
2,
7870,
1870,
23410,
25,
35432,
7853,
290,
6932,
4614,
9250,
198,
2,
49674,
74,
32364,
273,
12079,
11,
1902,
89,
1533,
273,
89,
18132,
2736,
73,
4105,
4106,
11,
1902,
89,
1533,
273,
89,
6866,
9776,
198,
2,
15069,
357,
66,
8,
1213... | 2.490711 | 2,207 |
import pytest # noinspection PyPackageRequirements
import asyncio
from aionetworking.compatibility import (supports_task_name, get_task_name, get_current_task_name, set_task_name,
set_current_task_name, current_task)
| [
11748,
12972,
9288,
220,
220,
1303,
645,
1040,
14978,
9485,
27813,
42249,
198,
11748,
30351,
952,
198,
198,
6738,
257,
295,
316,
16090,
13,
5589,
25901,
1330,
357,
18608,
2096,
62,
35943,
62,
3672,
11,
651,
62,
35943,
62,
3672,
11,
65... | 2.40367 | 109 |
# Copyright 2020, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The opentelemetry-instrumentation-aws-lambda package provides an `Instrumentor`
to traces calls whithin a Python AWS Lambda function.
Usage
-----
.. code:: python
# Copy this snippet into an AWS Lambda function
import boto3
from opentelemetry.instrumentation.botocore import AwsBotocoreInstrumentor
from opentelemetry.instrumentation.aws_lambda import AwsLambdaInstrumentor
# Enable instrumentation
AwsBotocoreInstrumentor().instrument()
AwsLambdaInstrumentor().instrument()
# Lambda function
def lambda_handler(event, context):
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print(bucket.name)
return "200 OK"
API
---
The `instrument` method accepts the following keyword args:
tracer_provider (TracerProvider) - an optional tracer provider
event_context_extractor (Callable) - a function that returns an OTel Trace
Context given the Lambda Event the AWS Lambda was invoked with
this function signature is: def event_context_extractor(lambda_event: Any) -> Context
for example:
.. code:: python
from opentelemetry.instrumentation.aws_lambda import AwsLambdaInstrumentor
def custom_event_context_extractor(lambda_event):
# If the `TraceContextTextMapPropagator` is the global propagator, we
# can use it to parse out the context from the HTTP Headers.
return get_global_textmap().extract(lambda_event["foo"]["headers"])
AwsLambdaInstrumentor().instrument(
event_context_extractor=custom_event_context_extractor
)
"""
import logging
import os
from importlib import import_module
from typing import Any, Callable, Collection
from wrapt import wrap_function_wrapper
from opentelemetry.context.context import Context
from opentelemetry.instrumentation.aws_lambda.package import _instruments
from opentelemetry.instrumentation.aws_lambda.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.propagate import get_global_textmap
from opentelemetry.propagators.aws.aws_xray_propagator import (
TRACE_HEADER_KEY,
AwsXRayPropagator,
)
from opentelemetry.semconv.resource import ResourceAttributes
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace import (
SpanKind,
TracerProvider,
get_tracer,
get_tracer_provider,
)
from opentelemetry.trace.propagation import get_current_span
logger = logging.getLogger(__name__)
_HANDLER = "_HANDLER"
_X_AMZN_TRACE_ID = "_X_AMZN_TRACE_ID"
ORIG_HANDLER = "ORIG_HANDLER"
OTEL_INSTRUMENTATION_AWS_LAMBDA_FLUSH_TIMEOUT = (
"OTEL_INSTRUMENTATION_AWS_LAMBDA_FLUSH_TIMEOUT"
)
def _default_event_context_extractor(lambda_event: Any) -> Context:
"""Default way of extracting the context from the Lambda Event.
Assumes the Lambda Event is a map with the headers under the 'headers' key.
This is the mapping to use when the Lambda is invoked by an API Gateway
REST API where API Gateway is acting as a pure proxy for the request.
Protects headers from being something other than dictionary, as this
is what downstream propagators expect.
See more:
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
Args:
lambda_event: user-defined, so it could be anything, but this
method counts on it being a map with a 'headers' key
Returns:
A Context with configuration found in the event.
"""
headers = None
try:
headers = lambda_event["headers"]
except (TypeError, KeyError):
logger.debug(
"Extracting context from Lambda Event failed: either enable X-Ray active tracing or configure API Gateway to trigger this Lambda function as a pure proxy. Otherwise, generated spans will have an invalid (empty) parent context."
)
if not isinstance(headers, dict):
headers = {}
return get_global_textmap().extract(headers)
def _determine_parent_context(
lambda_event: Any, event_context_extractor: Callable[[Any], Context]
) -> Context:
"""Determine the parent context for the current Lambda invocation.
See more:
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/instrumentation/aws-lambda.md#determining-the-parent-of-a-span
Args:
lambda_event: user-defined, so it could be anything, but this
method counts it being a map with a 'headers' key
Returns:
A Context with configuration found in the carrier.
"""
parent_context = None
xray_env_var = os.environ.get(_X_AMZN_TRACE_ID)
if xray_env_var:
parent_context = AwsXRayPropagator().extract(
{TRACE_HEADER_KEY: xray_env_var}
)
if (
parent_context
and get_current_span(parent_context)
.get_span_context()
.trace_flags.sampled
):
return parent_context
if event_context_extractor:
parent_context = event_context_extractor(lambda_event)
else:
parent_context = _default_event_context_extractor(lambda_event)
return parent_context
| [
2,
15069,
12131,
11,
4946,
31709,
41935,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.928785 | 2,008 |
from rest_framework import viewsets, status
from rest_framework.response import Response
from backend.api.models import Usuario, Miembro, Proyecto, RolProyecto, Horario
from backend.api.serializers import MiembroSerializer, HorarioSerializer
from rest_framework.decorators import action
from django.db import transaction
from backend.api.decorators import FormValidator
from backend.api.forms import CreateMiembroForm, UpdateMiembroForm
class MiembroViewSet(viewsets.ViewSet):
"""
MiembroViewSet View para el modelo de Miembro
Args:
viewsets (ViewSet): Tipo de clase basado en View
"""
def retrieve(self, request, pk=None):
"""
retrieve Obtiene un miembro por su pk
Args:
request (Any): request
pk (integer, opcional): Primary Key
Return:
JSON: Miembro con la pk especificada
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
miembro = Miembro.objects.get(pk=pk)
miembro_request = Miembro.objects.filter(usuario=usuario_request, proyecto=miembro.proyecto)
if miembro_request.count() != 1:
response = {"message": "Usted no es miembro de este proyecto"}
return Response(response, status=status.HTTP_403_FORBIDDEN)
if not miembro_request[0].tiene_permiso("ver_miembros"):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": ["ver_miembros"]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
serializer = MiembroSerializer(miembro, many=False)
return Response(serializer.data)
except Miembro.DoesNotExist:
response = {
"message": "No existe el miembro",
"error": "not_found"
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
@transaction.atomic
@FormValidator(form=CreateMiembroForm)
def create(self, request):
"""
create Crea un miembro nuevo
Args:
request (Any): request
Return:
JSON: Miembro creado
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
proyecto = Proyecto.objects.get(pk=request.data["proyecto"])
miembro_request = Miembro.objects.get(usuario=usuario_request, proyecto=proyecto)
if not (miembro_request.tiene_permiso("agregar_miembros") and
miembro_request.tiene_permiso("ver_roles_proyecto") and
usuario_request.tiene_permiso("ver_usuarios")):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": [
"agregar_miembros",
"ver_roles_proyecto",
"ver_usuarios"
]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
usuario = Usuario.objects.get(pk=request.data["usuario"])
rol = RolProyecto.objects.get(pk=request.data["rol"])
miembro = Miembro.objects.create(usuario=usuario, proyecto=proyecto, rol=rol)
horario = Horario.objects.create(
lunes=request.data["horario"]["lunes"],
martes=request.data["horario"]["martes"],
miercoles=request.data["horario"]["miercoles"],
jueves=request.data["horario"]["jueves"],
viernes=request.data["horario"]["viernes"],
sabado=request.data["horario"]["sabado"],
domingo=request.data["horario"]["domingo"]
)
horario.asignar_horario(miembro)
serializer = MiembroSerializer(miembro, many=False)
return Response(serializer.data)
except Miembro.DoesNotExist:
response = {"message": "Usted no es miembro de este proyecto"}
return Response(response, status=status.HTTP_403_FORBIDDEN)
def destroy(self, request, pk=None):
"""
destroy Elimina un miembro con la pk especificada
Args:
request (Any): request
pk (integer, opcional): Primary Key del miembro a eliminar
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
miembro = Miembro.objects.get(pk=pk)
miembro_request = Miembro.objects.filter(usuario=usuario_request, proyecto=miembro.proyecto)
if miembro_request.count() != 1:
response = {"message": "Usted no es miembro de este proyecto"}
return Response(response, status=status.HTTP_403_FORBIDDEN)
if not (miembro_request[0].tiene_permiso("eliminar_miembros") and
miembro_request[0].tiene_permiso("ver_roles_proyecto") and
usuario_request.tiene_permiso("ver_usuarios")):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": [
"eliminar_miembros",
"ver_roles_proyecto",
"ver_usuarios"
],
"error": "forbidden"
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
if miembro_request[0].id == miembro.id:
response = {
"message": "No puedes eliminarte a ti mismo",
"error": "bad_request"
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
if miembro.rol.nombre == "Scrum Master":
response = {
"message": "No se puede eliminar el miembro Scrum Master",
"error": "forbidden"
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
miembro.delete()
response = {"message": "Miembro Eliminado"}
return Response(response)
except Miembro.DoesNotExist:
response = {
"message": "No existe el miembro",
"error": "not_found"
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
@FormValidator(form=UpdateMiembroForm)
def update(self, request, pk=None):
"""
update Modifica un miembro con la pk especificada
Args:
request (Any): request
pk (integer, opcional): Primary key del miembro a modificar
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
miembro = Miembro.objects.get(pk=pk)
miembro_request = Miembro.objects.filter(usuario=usuario_request, proyecto=miembro.proyecto)
if miembro_request.count() != 1:
response = {"message": "Usted no es miembro de este proyecto"}
return Response(response, status=status.HTTP_403_FORBIDDEN)
if not (miembro_request[0].tiene_permiso("modificar_miembros") and
miembro_request[0].tiene_permiso("ver_roles_proyecto") and
usuario_request.tiene_permiso("ver_usuarios")):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": [
"modificar_miembros",
"ver_roles_proyecto",
"ver_usuarios"
]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
if miembro_request[0].id == miembro.id:
response = {
"message": "No puedes modificar tu rol",
"error": "bad_request"
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
if miembro.rol.nombre == "Scrum Master":
response = {
"message": "No se puede modificar el miembro Scrum Master",
"error": "forbidden"
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
rol = RolProyecto.objects.get(pk=request.data["rol"])
if rol.proyecto != miembro.proyecto:
response = {
"message": "El rol no pertenece a este proyecto",
"error": "forbidden"
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
miembro.rol = rol
miembro.save()
serializer = MiembroSerializer(miembro, many=False)
return Response(serializer.data)
except Miembro.DoesNotExist:
response = {
"message": "No existe el miembro",
"error": "not_found"
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
@action(detail=True, methods=["GET"])
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
11,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
30203,
13,
15042,
13,
27530,
1330,
471,
2385,
4982,
11,
13756,
368,
7957,
11,
1041,
88,
478,
78,
11,
371,
349,
2964,
8... | 1.905217 | 4,811 |
from django.shortcuts import render
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
628,
198
] | 3.8 | 10 |
from django import forms
from wagtail.wagtailadmin.forms import WagtailAdminPageForm
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
266,
363,
13199,
13,
86,
363,
13199,
28482,
13,
23914,
1330,
21309,
13199,
46787,
9876,
8479,
628
] | 3.48 | 25 |
# -*- coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# Copyright (C) 2019 Thibauld Nion
#
# This file is part of WaterOnMars (https://github.com/tibonihoo/wateronmars)
#
# WaterOnMars is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WaterOnMars is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with WaterOnMars. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import re
from collections import defaultdict
MAX_CONTENT_SIZE_CHARS = 140
HASHTAG_REGEX = re.compile(r"(^|\s)#([\w\-\.]+)", re.UNICODE)
SUBJECT_REGEX = re.compile(r"(^\s*)([^:]{1,20})(:\s+\S+)", re.UNICODE)
NO_TAG = "<NO_TAG>"
from dateutil.parser import parse as parse_date
if __name__=="__main__":
print("Generating a test HTML")
activities = [
{
"url": "http://t/one/1",
"object": {
"published": "2012-01-19 17:21:00",
"author": {
"displayName": "One",
"username": "o.ne"
},
"content": "Lorem1 #bla"
},
},
{
"url": f"http://t/talkative/1",
"object": {
"published": f"2012-01-19 15:12:00",
"author": {
"displayName": f"Talkative",
"username": f"t.alkative"
},
"content": f"{6*'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '} #Mouf #blip #glop #glip #groumpf #hop #hip #blop #paglop #lorem #talk #grr"
},
}
]
for i in range(10):
activities.append(
{
"url": f"http://t/two/{i}",
"object": {
"published": f"2012-01-19 18:{i:02}:00",
"author": {
"displayName": "Deux",
"username": "t.wo"
},
"content": f"Lorem2 {i}"
},
}
)
for i in range(10):
activities.append(
{
"url": f"http://t/u{i}/1",
"object": {
"published": f"2012-01-19 19:{i:02}:00",
"author": {
"displayName": f"User{i}",
"username": f"u.{i}"
},
"content": f"Lorem2 {i} #Mouf"
},
}
)
threshold_date = parse_date("2011-01-19 17:21:00")
html = generate_basic_html_summary(activities, threshold_date)
html_file = "./tweet_summarizer_demo.html"
with open(html_file, "w") as f:
f.write(html)
import webbrowser as w
w.open(html_file)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
26,
21015,
12,
521,
298,
25,
362,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
13130,
49486,
30406,
399,
295,
198,
2,
198,
2,
770,
... | 2.213938 | 1,234 |
from setuptools import setup
with open('README.rst', 'r') as f:
long_discription = f.read()
setup(
name='SWSHplotting',
version='0.0.1',
author='Jonas Freißmann',
author_email='jonas.freissmann@hs-flensburg.de',
description='Plotting package for SWSH project.',
long_discription=long_discription,
long_discription_content_type='text/x-rst',
url='https://github.com/jfreissmann/SWSHplotting',
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License'
],
py_modules=['SWSHplotting'],
package_dir={'': 'src'},
python_requires='>=3.6',
install_requires=['matplotlib>=3.1.0']
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
81,
301,
3256,
705,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
890,
62,
15410,
2918,
796,
277,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
220,
... | 2.376344 | 279 |
"""The Ping (Socket) integration."""
from __future__ import annotations
from homeassistant.core import HomeAssistant
from .const import DOMAIN
| [
37811,
464,
34263,
357,
39105,
8,
11812,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
1363,
562,
10167,
13,
7295,
1330,
5995,
48902,
198,
198,
6738,
764,
9979,
1330,
24121,
29833,
198
] | 4.027778 | 36 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from grafo import *
global resultados_pruebas
resultados_pruebas = { "OK" : 0, "ERROR" : 0 }
def correr_prueba(caso_prueba, descripcion):
''' Comprueba la igualdad pasada por parámetro como caso_prueba y
muestra la descripción y su resultado en pantalla.
Si no se cumple la igualdad se suma uno a la clave correspondiente en el
diccionario resultados_pruebas.
caso_prueba es una igualdad a la que se le va a aplicar assert.
descripcion es un texto descriptivo con el que se va a imprimir el
resultado de la operación.
resultados_pruebas es un diccionario con las claves "OK" y "ERROR", que
identifican valores numéricos con la cantidad de pruebas pasadas y
falladas, respectivamente.
'''
try:
assert caso_prueba
print "%s: OK" % descripcion
resultados_pruebas["OK"] += 1
except AssertionError:
print "%s: ERROR" % descripcion
resultados_pruebas["ERROR"] += 1
if __name__ == '__main__':
prueba_creacion_grafo_no_dirigido()
prueba_agregar_vertices_grafo_no_dirigido()
prueba_eliminar_vertices_grafo_no_dirigido()
prueba_agregar_aristas_grafo_no_dirigido()
prueba_eliminar_aristas_grafo_no_dirigido()
prueba_creacion_grafo_dirigido()
prueba_aristas_grafo_dirigido()
prueba_volumen_grafo_no_dirigido()
prueba_volumen_grafo_dirigido()
print "\nPruebas corridas: %d OK, %d errores." % \
(resultados_pruebas["OK"], resultados_pruebas["ERROR"])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
11748,
640,
201,
198,
6738,
7933,
6513,
1330,
1635,
201,
198,
201,
198,
20541,
1255,
22484,
62,
1050,
518,
... | 1.965363 | 895 |
""" Automatic creation of pydantic model classes from a sqlalchemy table
SEE: Copied and adapted from https://github.com/tiangolo/pydantic-sqlalchemy/blob/master/pydantic_sqlalchemy/main.py
"""
import json
import warnings
from datetime import datetime
from typing import Any, Callable, Container, Dict, List, Optional, Tuple, Type
from uuid import UUID
import sqlalchemy as sa
import sqlalchemy.sql.functions
from pydantic import BaseConfig, BaseModel, Field, create_model
from pydantic.types import NonNegativeInt
from sqlalchemy.sql.schema import Column
warnings.warn(
"This is still a concept under development. "
"Currently only inteded for testing. "
"DO NOT USE in production.",
category=UserWarning,
)
_RESERVED = {
"schema",
# e.g. Field name "schema" shadows a BaseModel attribute; use a different field name with "alias='schema'".
}
def _eval_defaults(
column: Column, pydantic_type: Type, *, include_server_defaults: bool = True
):
"""
Uses some heuristics to determine the default value/factory produced
parsing both the client and the server (if include_server_defaults==True) defaults
in the sa model.
"""
default: Optional[Any] = None
default_factory: Optional[Callable] = None
if (
column.default is None
and (include_server_defaults and column.server_default is None)
and not column.nullable
):
default = ...
if column.default and column.default.is_scalar:
assert not column.default.is_server_default # nosec
default = column.default.arg
if include_server_defaults and column.server_default:
assert column.server_default.is_server_default # nosec
#
# FIXME: Map server's DefaultClauses to correct values
# Heuristics based on test against all our tables
#
if pydantic_type:
if issubclass(pydantic_type, list):
assert column.server_default.arg == "{}" # nosec
default_factory = list
elif issubclass(pydantic_type, dict):
assert column.server_default.arg.text.endswith("::jsonb") # nosec
default = json.loads(
column.server_default.arg.text.replace("::jsonb", "").replace(
"'", ""
)
)
elif issubclass(pydantic_type, datetime):
assert isinstance( # nosec
column.server_default.arg, sqlalchemy.sql.functions.now
)
default_factory = datetime.now
return default, default_factory
PolicyCallable = Callable[[Column, Any, Type], Tuple[Any, Type]]
def eval_name_policy(column: Column, default: Any, pydantic_type: Type):
"""All string columns including 'uuid' in their name are set as UUIDs"""
new_default, new_pydantic_type = default, pydantic_type
if "uuid" in str(column.name).split("_") and pydantic_type == str:
new_pydantic_type = UUID
if isinstance(default, str):
new_default = UUID(default)
return new_default, new_pydantic_type
DEFAULT_EXTRA_POLICIES = [
eval_name_policy,
]
| [
37811,
30199,
6282,
286,
279,
5173,
5109,
2746,
6097,
422,
257,
44161,
282,
26599,
3084,
198,
198,
36078,
25,
6955,
798,
290,
16573,
422,
3740,
1378,
12567,
13,
785,
14,
83,
15483,
14057,
14,
79,
5173,
5109,
12,
25410,
282,
26599,
14,... | 2.487119 | 1,281 |
from setuptools import setup, find_packages
from patchworkdocker.meta import VERSION, DESCRIPTION, PACKAGE_NAME, EXECUTABLE_NAME
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Colin Nolan",
author_email="cn580@alumni.york.ac.uk",
packages=find_packages(exclude=["tests"]),
install_requires=open("requirements.txt", "r").readlines(),
url="https://github.com/colin-nolan/patchwork-docker",
license="MIT",
description=DESCRIPTION,
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
entry_points={
"console_scripts": [
f"{EXECUTABLE_NAME}={PACKAGE_NAME}.cli:entrypoint"
]
},
zip_safe=True
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
8529,
1818,
45986,
13,
28961,
1330,
44156,
2849,
11,
22196,
40165,
11,
47035,
11879,
62,
20608,
11,
7788,
2943,
3843,
17534,
62,
20608,
198,
198,
40406,
7,
198,
... | 2.503472 | 288 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
config = {
'test': TestingConfig,
'production': Config,
'default': DevelopmentConfig
}
| [
11748,
28686,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
628,
628,
198,
198,
11250,
796,
1391,
198,
220,
220,
220,
705,
9288,
10354,
23983,
16934,
11,
198,
... | 2.650794 | 63 |
'''
Created on 20 Jan 2021
@author: thomasgumbricht
'''
# Standard library imports
from os import path, makedirs
from sys import exit
# Third party imports
import psycopg2
from base64 import b64encode, b64decode
import netrc
from pyproj import Proj, transform
# Package application imports
from geoimagine.ancillary import ProcessAncillary
from geoimagine.postgresdb import ManageAncillary
from geoimagine.modis import ProcessModis
from geoimagine.postgresdb import ManageProcess, ManageRegion, ManageMODIS
from geoimagine.gis import kt_gis as ktgis
from geoimagine.params import JsonParams
from geoimagine.params.layers import VectorLayer
from geoimagine.postgresdb.easegrid import ManageEASEgrid
from geoimagine.region import IntersectRegions
def DbConnect(db):
'''
'''
# the HOST must exist in the .netrc file in the users home directory
HOST = 'karttur'
# Retrieve login and password from the .netrc file
secrets = netrc.netrc()
# Authenticate username, account and password
username, account, password = secrets.authenticators( HOST )
# Encode the password before sending it
password = b64encode(password.encode())
# Create a query dictionary for connecting to the Postgres server
query = {'db':db, 'user':username, 'pswd':password}
return query
class PGsession:
"""Connect to postgres server"""
def __init__(self, query):
"""Connect to selected database"""
query['pswd'] = b64decode(query['pswd']).decode('ascii')
conn_string = "host='localhost' dbname='%(db)s' user='%(user)s' password='%(pswd)s'" %query
self.conn = psycopg2.connect(conn_string)
self.cursor = self.conn.cursor()
self.name = 'Setup_process'
def _DictToSelect(self, queryD):
'''
Converts a dictionary to Select statement
'''
selectL = []
for key in queryD:
#statement = key operator value
statement = ' %(col)s %(op)s \'%(val)s\'' %{'col':key.replace('#',''), 'op':queryD[key]['op'], 'val':queryD[key]['val']}
selectL.append(statement)
self.select_query = "WHERE %(where)s" %{'where':' AND '.join(selectL)}
return self.select_query
def _SelectRootProcess(self,queryD):
'''
'''
self.cursor.execute("SELECT rootprocid, minuserstratum FROM process.subprocesses WHERE subprocid = '%(subprocid)s';" %queryD)
record = self.cursor.fetchone()
return record
def _SelectUserCred(self, queryD):
'''
'''
sql = "SELECT userid, usercat, stratum FROM userlocale.users WHERE userid = '%(user)s';" %queryD
self.cursor.execute(sql)
self.userid, self.usercat, self.stratum = self.cursor.fetchone()
def _SelectTractDefRegion(self, queryD):
'''
'''
#First check if this region is itself a defregion
sql = "SELECT regionid FROM regions.defregions WHERE regionid = '%(tract)s';" %queryD
self.cursor.execute(sql)
rec = self.cursor.fetchone()
if rec != None:
return (rec[0], 'D')
sql = "SELECT parentid FROM regions.tracts WHERE tractid = '%(tract)s';" %queryD
self.cursor.execute(sql)
rec = self.cursor.fetchone()
if rec == None:
return rec
return (rec[0], 'T')
def _SelectProcessSystem(self, queryD, paramL):
''' Select system for this process
'''
queryD['cols'] = " ,".join(paramL)
sql = "SELECT %(cols)s FROM process.procsys WHERE subprocid = '%(subprocid)s' and system = '%(system)s';" %queryD
self.cursor.execute(sql)
record = self.cursor.fetchone()
if record == None:
self.cursor.execute("SELECT srcsystem, dstsystem, srcdivision, dstdivisio FROM process.procsys WHERE subprocid = '%(subprocid)s' and system = '*';" %queryD)
record = self.cursor.fetchone()
if record == None:
exitstr = 'No records in _setup_process_class.PGsession.SelectProcessSystem'
exit(exitstr)
return dict(zip(paramL,record))
def _MultiSearch(self,queryD, paramL, schema, table):
''' Select multiple records from any schema.table
'''
selectQuery = {}
for item in queryD:
if isinstance(queryD[item],dict):
#preset operator and value
selectQuery[item] = queryD[item]
else:
selectQuery[item] = {'op':'=', 'val':queryD[item]}
wherestatement = self._DictToSelect(selectQuery)
if len(paramL) == 1:
cols = paramL[0]
else:
cols = ','.join(paramL)
selectQuery = {'schema':schema, 'table':table, 'select': wherestatement, 'cols':cols}
query = "SELECT %(cols)s FROM %(schema)s.%(table)s %(select)s" %selectQuery
self.cursor.execute(query)
self.records = self.cursor.fetchall()
return self.records
def _SetSystem(self,system):
'''
'''
self.system = system
class ProcessProcess:
""""class for processes defining other processes"""
def __init__(self, pp):
""""The constructor requires an instance of the main process, and the json object defining the process to setup"""
self.pp = pp
self.verbose = pp.process.verbose
if self.verbose:
infostr = ' Processing %s' %(self.pp.process.processid)
print (infostr)
self.session = ManageProcess(self.pp.postgresdb.db)
if self.pp.process.processid == 'addrootproc':
if self.verbose:
print (' %s' %(self.pp.process.parameters.rootprocid))
queryD = {'rootprocid':self.pp.process.parameters.rootprocid,
'title':self.pp.process.parameters.title,
'label':self.pp.process.parameters.label,
'creator':self.pp.userproject.userid}
self.session._ManageRootProcess(self.pp.process, queryD)
elif self.pp.process.processid == 'addsubproc':
if self.verbose:
print (' %s' %(self.pp.process.parameters.subprocid))
queryD = {'rootprocid':self.pp.process.parameters.rootprocid,
'subprocid':self.pp.process.parameters.subprocid,
'title':self.pp.process.parameters.title,
'label':self.pp.process.parameters.label,
'version':self.pp.process.parameters.version,
'minuserstratum':self.pp.process.parameters.minuserstratum,
'creator':self.pp.userproject.userid}
self.session._ManageSubProcess(self.pp.process, queryD)
else:
exitstr = 'subprocess %s not defined in manageprocess' %(self.process.processid)
exit( exitstr )
class ProcessDefaultRegions(IntersectRegions):
'''
'''
def _DefaultRegionRegister(self,layer):
'''
'''
# Get the projection
projection = ktgis.GetVectorProjection(layer.FPN)
#Set lonlat projection
lonlatproj = ktgis.MjProj()
lonlatproj.SetFromEPSG(4326)
# Get the boundary
boundsD = ktgis.GetFeatureBounds(layer.FPN,'REGIONID')
if len(boundsD) != 1:
exitstr = 'Default regions must consist on only one (1) feature (polygon or multipolygon): %s' %(layer.FPN)
exit(exitstr)
k = list(boundsD)[0]
layer._SetBounds(projection.epsg,boundsD[k][0], boundsD[k][1], boundsD[k][2], boundsD[k][3] )
#Get the corners in lonlat
llD = ktgis.ReprojectBounds(layer.BoundsPtL,projection.proj_cs,lonlatproj.proj_cs)
queryD = {'regionid': self.pp.process.parameters.regionid,
'regionname': self.pp.process.parameters.regionname,
'parentid': self.pp.process.parameters.parentid,
'regioncat': self.pp.process.parameters.regioncat,
'parentcat': self.pp.process.parameters.parentcat,
'title': self.pp.process.parameters.title,
'label':self.pp.process.parameters.label,
'epsg':self.pp.procsys.dstepsg}
if ' ' in queryD['regionid'] or ' ' in queryD['parentid']:
exit('regionid or parentid with whuite space in setup_process_process')
self.session._InsertDefRegion(layer, queryD, boundsD[self.pp.process.parameters.regionid], llD, self.pp.process.overwrite, self.pp.process.delete )
def _DefaultRegionFromCoords(self):
'''
'''
for locus in self.pp.dstLayerD:
for datum in self.pp.dstLayerD[locus]:
for comp in self.pp.dstLayerD[locus][datum]:
layer = self.pp.dstLayerD[locus][datum][comp]
#The destination region must be forced,this is because the locus to be created did not exists when checking for the default locus
layer.locus.locus = self.pp.process.parameters.regionid.lower()
layer.locus.path = self.pp.process.parameters.regionid.lower()
layer._SetPath()
fieldDD = self._SetfieldD()
layer.CreateVectorAttributeDef(fieldDD)
layer._SetBounds(self.pp.procsys.dstepsg,
self.pp.process.parameters.minx,
self.pp.process.parameters.miny,
self.pp.process.parameters.maxx,
self.pp.process.parameters.maxy)
projection = ktgis.MjProj()
projection.SetFromEPSG(self.pp.procsys.dstepsg)
if not layer._Exists() or self.pp.process.overwrite:
ktgis.CreateESRIPolygonPtL(layer.FPN, layer.fieldDefL, layer.BoundsPtL, projection.proj_cs, self.pp.process.parameters.regionid)
self._DefaultRegionRegister(layer)
def _DefaultRegFromVec(self):
'''
'''
# dstLayerD and srcLayerD are almost identical
for locus in self.pp.dstLayerD:
for datum in self.pp.dstLayerD[locus]:
for comp in self.pp.dstLayerD[locus][datum]:
srcLayer = self.pp.srcLayerD[locus][datum][comp]
if not path.isfile(srcLayer.FPN):
exitstr = 'No source layer in _DefaultRegFromVec', srcLayer.FPN
exit(exitstr)
p = self.pp.process.parameters
fieldL = [p.vector_db_id, p.vector_db_name,
p.vector_db_category, p.vector_db_parentid,
p.vector_db_parentcat, p.vector_db_stratum,
p.vector_db_title, p.vector_db_label]
fieldD = ktgis.GetFeatureAttributeList(srcLayer.FPN, fieldL, p.vector_db_id)
if not fieldD:
exit('setup_process_class: fieldD failed in _DefaultRegFromVec')
for key in fieldD:
# Convert the field data to a dict
params = ['regionid', 'regionname', 'regioncat', 'stratum', 'parentid', 'parentcat', 'title', 'label']
values = [ str(fieldD[key][p.vector_db_id]).lower().replace(' ', '-'),
str(fieldD[key][p.vector_db_name]),
str(fieldD[key][p.vector_db_category].lower()),
int(fieldD[key][p.vector_db_stratum]),
str(fieldD[key][p.vector_db_parentid]).lower().replace(' ', '-'),
str(fieldD[key][p.vector_db_parentcat].lower()),
str(fieldD[key][p.vector_db_title]),
str(fieldD[key][p.vector_db_label]) ]
d = dict(zip(params, values))
# Replace the process class parameter with the dict
self.pp.process.parameters = lambda:None
for k,v in d.items():
setattr(self.pp.process.parameters, k, v)
fieldDD = self._SetfieldD()
regionid = self.pp.process.parameters.regionid
#Construct the locus for this region
locusD = {'locus':regionid,'path':regionid}
# Recreate the composition
compDstCopy = self.pp.dstLayerD[locus][datum][comp].comp
# Set layerid and prefix to "defreg"
compDstCopy.layerid = compDstCopy.prefix = 'defreg'
# Set content ot roi (region of interest)
compDstCopy.content = 'roi'
# Reset the compid
compDstCopy._SetCompid()
# Recreate the vector Layer
dstLayer = VectorLayer(compDstCopy, locusD, self.pp.dstPeriod.datumD[datum])
dstLayer.CreateVectorAttributeDef(fieldDD)
fieldname = p.vector_db_id
valueLL = [[fieldD[key][p.vector_db_id]]]
if not dstLayer._Exists() or self.pp.process.overwrite: #or overwrite
ktgis.ExtractFeaturesToNewDS(srcLayer.FPN, dstLayer.FPN, fieldname,valueLL, dstLayer.fieldDefL)
self._DefaultRegionRegister(dstLayer)
'''
fieldname = 'REGIONID'
#Get the epsg and bounds
boundsD = ktgis.GetFeatureBounds(dstLayer.FPN,fieldname)
if len(boundsD) != 1:
exitstr = 'Default regions must consist on only one (1) feature (polygon or multipolygon): %s' %(dstLayer.FPN)
exit(exitstr)
projection = ktgis.GetVectorProjection(dstLayer.FPN)
k = list(boundsD)[0]
bounds = boundsD[k]
dstLayer._SetBounds(projection.epsg,boundsD[k][0], boundsD[k][1], boundsD[k][2], boundsD[k][3] )
_DefaultRegionRegister(self,dstLayer, projection)
#Set lonlat projection
lonlatproj = ktgis.MjProj()
lonlatproj.SetFromEPSG(4326)
#Get the corners in lonlat
llD = ktgis.ReprojectBounds(dstLayer.BoundsPtL,projection.proj_cs,lonlatproj.proj_cs)
title = label = 'default region %s' %(regionid)
query = {'regionname':regionname,'regioncat':regioncat, 'parentid':parentid, 'parentcat':parentcat,'regionid':regionid, 'title':title,'label':label,'epsg':projection.epsg}
session._InsertDefRegion(self.process, dstLayer, query, bounds, llD )
'''
def _SetfieldD(self):
''' Set the fields for default region layers
'''
fieldDD = {}
fieldDD['REGIONID'] = {'name':'REGIONID', 'type':'string','width':32,
'precision':0,'transfer':'constant','source':self.pp.process.parameters.regionid }
fieldDD['NAME'] = {'name':'NAME', 'type':'string','width':64,
'precision':0,'transfer':'constant','source':self.pp.process.parameters.regionname }
fieldDD['CATEGORY'] = {'name':'CATEGORY', 'type':'string','width':32,
'precision':0,'transfer':'constant','source':self.pp.process.parameters.regioncat }
fieldDD['STRATUM'] = {'name':'STRATUM', 'type':'integer','width':4,
'precision':0,'transfer':'constant','source':self.pp.process.parameters.stratum }
fieldDD['PARENTID'] = {'name':'PARENTID', 'type':'string','width':32,
'precision':0,'transfer':'constant','source':self.pp.process.parameters.parentid }
fieldDD['PARENTCAT'] = {'name':'PARENTCAT', 'type':'string','width':32,
'precision':0,'transfer':'constant','source':self.pp.process.parameters.parentcat }
return fieldDD
def SetupProcessesRegions(docpath, projFN, db):
'''
Setup processes
'''
srcFP = path.join(path.dirname(__file__),docpath)
projFPN = path.join(srcFP,projFN)
# Get the full path to the project text file
dirPath = path.split(projFPN)[0]
if not path.exists(projFPN):
exitstr = 'EXITING, project file missing: %s' %(projFPN)
exit( exitstr )
infostr = 'Processing %s' %(projFPN)
print (infostr)
# Open and read the text file linking to all json files defining the project
with open(projFPN) as f:
jsonL = f.readlines()
# Clean the list of json objects from comments and whithespace etc
jsonL = [path.join(dirPath,x.strip()) for x in jsonL if len(x) > 10 and x[0] != '#']
# Get the user and password for connecting to the db
query = DbConnect(db)
# Connect to the Postgres Server
session = PGsession(query)
ProcPar = JsonParams(session)
processL = []
#Loop over all json files and create Schemas and Tables
for jsonObj in jsonL:
infostr = ' reading json file: %s' %(path.split(jsonObj)[1])
print (infostr)
processL.append( ProcPar._JsonObj(jsonObj) )
# Close the db connection for getting processes and user
session._Close()
for processD in processL:
for k in range(len(processD)):
print(' ', path.split(processD[k]['PP'].jsonFPN)[1] )
print (' ',k, processD[k])
if processD[k]['PP'].rootprocid == 'manageprocess':
ProcessProcess(processD[k]['PP'])
elif processD[k]['PP'].rootprocid == 'ManageRegion':
#ProcessDefaultRegions(db, process, self.procsys, self.userproject, self.userid, self.usercat, self.stratum)
ProcessDefaultRegions(processD[k]['PP'])
elif processD[k]['PP'].rootprocid == 'Ancillary':
session = ManageAncillary(db)
ProcessAncillary(processD[k]['PP'], session)
session._Close()
elif processD[k]['PP'].rootprocid == 'MODISProc':
session = ManageMODIS(db)
ProcessModis(processD[k]['PP'], session)
session._Close()
else:
print (processD[k]['PP'].rootprocid)
print (processD[k]['PP'].subprocid)
def ModisTileCoords(db, verbose = 1):
''' Create the MODIS defaut tiling system
'''
#Open the db session for MODIS
session = ManageMODIS(db)
SINproj = ktgis.MjProj()
SINproj.SetFromProj4('+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs')
LatLonproj = ktgis.MjProj()
LatLonproj.SetFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0')
ptL = []
for lon in range(360):
ptL.append((lon-180,90))
for lat in range(180):
ptL.append((180,-1*(lat-90)))
for lon in range(360):
ptL.append((-1*(lon-180),-90))
for lat in range(180):
ptL.append((-180,lat-90))
worldgeom = ktgis.ShapelyPolyGeom(ptL)
worldgeom.ShapelyToOgrGeom()
worldgeom.GeoTransform(LatLonproj,SINproj)
worldgeom.OgrGeomToShapely()
home = path.expanduser("~")
tarShpFP = path.join(path.dirname(__file__),'data')
if not path.exists(tarShpFP):
makedirs(tarShpFP)
FN = 'modtiles-multi_karttur_global_epsg6842.shp'
tarShpFPN = path.join(tarShpFP,FN)
fieldDefD = {'type':'string','transfer':'constant','source':'globe','width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
ktgis.CreateESRIPolygonGeom(tarShpFPN, fieldDefL, worldgeom, SINproj.proj_cs, 'globe')
# Create a shape file for all individual tiles in SIN proj
FN = 'modtiles-single_karttur_global_epsg6842.shp'
tarShpFPN = path.join(tarShpFP,FN)
tarDS,tarLayer = ktgis.ESRICreateDSLayer(tarShpFPN, SINproj.proj_cs, 'polygon', 'tiles', fieldDefL)
# Create a shape file for all individual tiles in Geographic coordinates
FN = 'modtiles_karttur_global_0.shp'
tarShpFPN = path.join(tarShpFP,FN)
tarDSLonLat,tarLayerLonLat = ktgis.ESRICreateDSLayer(tarShpFPN, LatLonproj.proj_cs, 'polygon', 'tiles', fieldDefL)
#create a region with all tiles
tlen = 20015109.3539999984204769
tlen /= 18
for h in range(36):
minx = tlen*(18-36)+h*tlen
maxx = minx+tlen
for v in range(18):
maxy = tlen*(9-18)+(18-v)*tlen
miny = maxy-tlen
ptL = [(minx,maxy),(maxx,maxy),(maxx,miny),(minx,miny)]
tilegeom = ktgis.ShapelyMultiPointGeom(ptL)
#convert to ogr
tilegeom.ShapelyToOgrGeom()
#write target feature
tilegeom.GeoTransform(SINproj,LatLonproj)
tilegeom.OgrGeomToShapely()
coordL = []
for point in [ptgeom for ptgeom in tilegeom.shapelyGeom]:
coordL.extend([list(point.coords)[0][0],list(point.coords)[0][1]])
ullon, ullat, urlon, urlat, lrlon, lrlat, lllon, lllat = coordL
tilepoly = ktgis.ShapelyPolyGeom([(minx, maxy), (maxx, maxy), (maxx, miny), (minx,miny)])
#Test if this tile is inside the globe
if tilepoly.shapelyGeom.intersects(worldgeom.shapelyGeom):
if h < 10:
htile = 'h0%s' %(h)
else:
htile = 'h%s' %(h)
if v < 10:
vtile = 'v0%s' %(v)
else:
vtile = 'v%s' %(v)
hvtile = '%s%s' %(htile,vtile)
polytilegeom = ktgis.ShapelyPolyGeom(ptL)
polytilegeom.ShapelyToOgrGeom()
fieldDefD = {'type':'string','transfer':'constant','source':hvtile,'width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
#create target feature
tarFeat = ktgis.ogrFeature(tarLayer)
tarFeat.CreateOgrFeature(polytilegeom.ogrGeom, fieldDefL)
if h == 17:
pass
else:
#to be correct 5 points are needed and also the lat must be fitted
if h < 18 and ullon > 0:
ullon = -180
if h < 18 and lllon > 0:
lllon = -180
if h < 18 and urlon > 0:
urlon = -180
if h < 18 and lrlon > 0:
lrlon = -180
if h > 18 and urlon < 0:
urlon = 180
if h > 18 and lrlon < 0:
lrlon = 180
if h > 18 and ullon < 0:
ullon = 180
if h > 18 and lllon < 0:
lllon = 180
if hvtile == 'h24v01':
urlon = 180
if hvtile == 'h24v16':
lrlon = 180
if hvtile == 'h11v01':
ullon = -180
if hvtile == 'h11v16':
lllon = -180
if ullon > urlon:
print ('ERROR','ullon > urlon',hvtile,ullon,urlon)
if lllon > lrlon:
print ('ERROR','lllon > lrlon',hvtile, lllon, lrlon)
#
polytilegeom = ktgis.ShapelyPolyGeom([(ullon, ullat), (urlon, urlat), (lrlon, lrlat), (lllon,lllat)])
polytilegeom.ShapelyToOgrGeom()
#polytilegeom.GeoTransform(SINproj,LatLonproj)
#create target feature
tarLonLatFeat = ktgis.ogrFeature(tarLayerLonLat)
tarLonLatFeat.CreateOgrFeature(polytilegeom.ogrGeom, fieldDefL)
west,south,east,north = polytilegeom.shapelyGeom.bounds
session._InsertModisTileCoord(hvtile,h,v,
round(minx,8), round(maxy,8), round(maxx,8), round(miny,8),
round(west,8), round(south,8), round(east,8), round(north,8),
round(ullat,8), round(ullon,8), round(lrlon,8), round(lrlat,8),
round(urlon,8), round(urlat,8), round(lllon,8), round(lllat,8))
query = {'system':'system','table':'regions','h':h,'v':v,'hvtile':hvtile,'regionid':'global','regioncat':'global','regiontype':'default','delete':False}
session._InsertModisRegionTile(query)
tarDS.CloseDS()
tarDSLonLat.CloseDS()
session._Close()
if verbose > 1:
print ('Check the shape file',tarShpFPN)
return (tarShpFPN)
def Ease2PolarTileCoords(db, verbose = 1):
''' Create the Ease2 polar tiling system
'''
eD = {'ease2n':6931, 'ease2s':6932}
for easegrid in ['ease2n','ease2s']:
latlonProj = Proj('epsg:4326') # 4326 represents geographic coordinates
projstr = 'epsg:%(e)d' %{'e':eD[easegrid]}
# Set the target projection (EASE-grid)
easeProj = Proj(projstr) # 6933 represents the global/tropial EASE grid
session = ManageEASEgrid(db)
home = path.expanduser("~")
Ease2proj = ktgis.MjProj()
Ease2proj.SetFromEPSG(eD[easegrid])
# Create a shape file for all individual tiles in Geographic coordinates
FN = '%(e)stiles-multi_karttur_epsg%(e)s.shp' %{'e': eD[easegrid]}
tarShpFPN = path.join(home,FN)
fieldDefD = {'type':'string','transfer':'constant','source':'globe','width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
# Create a shape file for all individual tiles in SIN proj
tarDS,tarLayer = ktgis.ESRICreateDSLayer(tarShpFPN, Ease2proj.proj_cs, 'polygon', 'tiles', fieldDefL)
# Define the side of a tile
tileside = 900000
# Set initial maxx
maxx = -9000000
for x in range(20):
maxx += tileside
minx = maxx-tileside
maxy = -9000000
for y in range(20):
maxy += tileside
miny = maxy-tileside
ptL = [(minx,maxy),(maxx,maxy),(maxx,miny),(minx,miny)]
tilegeom = ktgis.ShapelyPolyGeom(ptL)
#convert to ogr
tilegeom.ShapelyToOgrGeom()
if x < 10:
xtile = 'x0%s' %(x)
else:
xtile = 'x%s' %(x)
if y < 10:
ytile = 'y0%s' %(y)
else:
ytile = 'y%s' %(y)
xytile = '%s%s' %(xtile,ytile)
fieldDefD = {'type':'string','transfer':'constant','source':xytile,'width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
#create target feature
tarFeat = ktgis.ogrFeature(tarLayer)
tarFeat.CreateOgrFeature(tilegeom.ogrGeom, fieldDefL)
west,south,east,north = tilegeom.shapelyGeom.bounds
corners = ['ul','ur','lr','ll']
llD = {}
for z, pt in enumerate(ptL):
lat,lon = transform(easeProj, latlonProj, pt[0], pt[1])
key = '%(c)slat' %{'c':corners[z]}
llD[key] = round(lat,5)
key = '%(c)slon' %{'c':corners[z]}
llD[key] = round(lon,5)
# Write tile to db
session._InsertTileCoord(easegrid,xytile,x,y,round(minx,2),round(maxy,2),round(maxx,2),
round(miny,2),round(west,2),round(south,2),round(east,2),round(north,2),
llD['ullat'],llD['ullon'],llD['lrlat'],llD['lrlon'],llD['urlat'],llD['urlon'],llD['lllat'],llD['lllon'])
query = {'system':easegrid,'easegrid': easegrid, 'table':'regions','xtile':x,'ytile':y,'xytile':xytile,'regionid':'global','regioncat':'global','regiontype':'default','delete':False}
session._InsertRegionTile(query)
tarDS.CloseDS()
if verbose > 0:
print ('Check the shape file',tarShpFPN)
def Ease2GlobalTileCoords(db, verbose = 1):
''' Create the Ease2 global tiling system
'''
easegrid = 'ease2t'
latlonProj = Proj('epsg:4326') # 4326 represents geographic coordinates
# Set the target projection (EASE-grid)
easeProj = Proj('epsg:6933') # 6933 represents the global/tropial EASE grid
session = ManageEASEgrid(db)
home = path.expanduser("~")
Ease2proj = ktgis.MjProj()
Ease2proj.SetFromEPSG(6933)
# Create a shape file for all individual tiles in Geographic coordinates
FN = '%(e)stiles-multi_karttur_epsg%(e)s.shp' %{'e': '6933'}
tarShpFPN = path.join(home,FN)
fieldDefD = {'type':'string','transfer':'constant','source':'globe','width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
# Create a shape file for all individual tiles in SIN proj
tarDS,tarLayer = ktgis.ESRICreateDSLayer(tarShpFPN, Ease2proj.proj_cs, 'polygon', 'tiles', fieldDefL)
# Define the side of a tile
tileside = 936837.98
xtileside = 1441284.79
ytileside = 936837.98-180163.01
# Set initial maxx
maxx = -17367530.45
for x in range(36):
if x == 0 or x == 35:
maxx += xtileside
else:
maxx += tileside
if x == 0 or x == 35:
minx = maxx-xtileside
else:
minx = maxx-tileside
# Maxy is not the real maxy, the last rows of data will instead be omitted to keep the dimensions
maxy = -7314540.83
for y in range(1,17):
if y == 1 or y == 16:
maxy += ytileside
miny = maxy-ytileside
else:
maxy += tileside
miny = maxy-tileside
print (x,y,minx,miny,maxx,maxy)
ptL = [(minx,maxy),(maxx,maxy),(maxx,miny),(minx,miny)]
tilegeom = ktgis.ShapelyPolyGeom(ptL)
#convert to ogr
tilegeom.ShapelyToOgrGeom()
if x < 10:
xtile = 'x0%s' %(x)
else:
xtile = 'x%s' %(x)
if y < 10:
ytile = 'y0%s' %(y)
else:
ytile = 'y%s' %(y)
xytile = '%s%s' %(xtile,ytile)
fieldDefD = {'type':'string','transfer':'constant','source':xytile,'width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
#create target feature
tarFeat = ktgis.ogrFeature(tarLayer)
tarFeat.CreateOgrFeature(tilegeom.ogrGeom, fieldDefL)
# Extract bounds
west,south,east,north = tilegeom.shapelyGeom.bounds
# Reproject to lat/lon
# Transform the coordinate point
corners = ['ul','ur','lr','ll']
llD = {}
for z, pt in enumerate(ptL):
lat,lon = transform(easeProj, latlonProj, pt[0], pt[1])
key = '%(c)slat' %{'c':corners[z]}
llD[key] = round(lat,5)
key = '%(c)slon' %{'c':corners[z]}
llD[key] = round(lon,5)
# Write tile to db
session._InsertTileCoord(easegrid,xytile,x,y,round(minx,2),round(maxy,2),round(maxx,2),
round(miny,2),round(west,2),round(south,2),round(east,2),round(north,2),
llD['ullat'],llD['ullon'],llD['lrlat'],llD['lrlon'],llD['urlat'],llD['urlon'],llD['lllat'],llD['lllon'])
query = {'system':easegrid,'easegrid': easegrid, 'table':'regions','xtile':x,'ytile':y,'xytile':xytile,'regionid':'global','regioncat':'global','regiontype':'default','delete':False}
session._InsertRegionTile(query)
tarDS.CloseDS()
if verbose > 0:
print ('Check the shape file',tarShpFPN)
def Ease2GlobalTileCoordsOld(db, verbose = 1):
''' Create the Ease2 polar tiling system
'''
session = ManageEASEgrid(db)
home = path.expanduser("~")
Ease2proj = ktgis.MjProj()
Ease2proj.SetFromEPSG(6933)
# Create a shape file for all individual tiles in Geographic coordinates
FN = '%(e)stiles-multi_karttur_epsg%(e)s.shp' %{'e': '6933'}
tarShpFPN = path.join(home,FN)
fieldDefD = {'type':'string','transfer':'constant','source':'globe','width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
# Create a shape file for all individual tiles in SIN proj
tarDS,tarLayer = ktgis.ESRICreateDSLayer(tarShpFPN, Ease2proj.proj_cs, 'polygon', 'tiles', fieldDefL)
# Define the side of a tile
#tileside = 720644.42
tileside = 1801611.043
# Set initial maxx
#maxx = -17367530.45
maxx = -17115304.904
for x in range(19):
maxx += tileside
minx = maxx-tileside
# Maxy is not the real maxy, the last rows of data will instead be omitted to keep the dimensions
# maxy = -7090347.50497537
maxy = -7206444.16748768
for y in range(8):
maxy += tileside
miny = maxy-tileside
print (x,y,minx,miny,maxx,maxy)
ptL = [(minx,maxy),(maxx,maxy),(maxx,miny),(minx,miny)]
tilegeom = ktgis.ShapelyPolyGeom(ptL)
#convert to ogr
tilegeom.ShapelyToOgrGeom()
if x < 10:
xtile = 'x0%s' %(x)
else:
xtile = 'x%s' %(x)
if y < 10:
ytile = 'y0%s' %(y)
else:
ytile = 'y%s' %(y)
xytile = '%s%s' %(xtile,ytile)
fieldDefD = {'type':'string','transfer':'constant','source':xytile,'width':8}
fieldDefL = [ktgis.FieldDef('name',fieldDefD)]
#create target feature
tarFeat = ktgis.ogrFeature(tarLayer)
tarFeat.CreateOgrFeature(tilegeom.ogrGeom, fieldDefL)
# Write to db
west,south,east,north = tilegeom.shapelyGeom.bounds
session._InsertTileCoord('ease2t',xytile,x,y,minx,maxy,maxx,miny,west,south,east,north)
query = {'system':'system','easegrid':'ease2t', 'table':'regions','x':x,'y':y,'xytile':xytile,'regionid':'global','regioncat':'global','regiontype':'default','delete':False}
session._InsertRegionTile(query)
tarDS.CloseDS()
if verbose > 0:
print ('Check the shape file',tarShpFPN)
| [
7061,
6,
198,
41972,
319,
1160,
2365,
33448,
198,
198,
31,
9800,
25,
294,
16911,
70,
388,
1671,
30830,
198,
7061,
6,
198,
198,
2,
8997,
5888,
17944,
198,
198,
6738,
28686,
1330,
3108,
11,
285,
4335,
17062,
198,
198,
6738,
25064,
133... | 1.946681 | 18,155 |
import django
from rdflib.namespace import DC
from unittest import TestCase
from fedoralink.common_namespaces.dc import DCObject
from fedoralink.indexer.fields import IndexedTextField
django.setup()
| [
11748,
42625,
14208,
198,
6738,
374,
67,
2704,
571,
13,
14933,
10223,
1330,
6257,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
11672,
6864,
676,
13,
11321,
62,
14933,
43076,
13,
17896,
1330,
6257,
10267,
198,
6738,
... | 3.238095 | 63 |
##
# Copyright : Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File : opt_server_sync.py
#
# Purpose : Demonstrates how to use MOSEK OptServer
# to solve optimization problem asynchronously
##
import mosek
import sys
if len(sys.argv) <= 3:
print("Missing argument, syntax is:")
print(" opt_server_sync inputfile host port")
else:
inputfile = sys.argv[1]
host = sys.argv[2]
port = sys.argv[3]
# Create the mosek environment.
with mosek.Env() as env:
# Create a task object linked with the environment env.
# We create it with 0 variables and 0 constraints initially,
# since we do not know the size of the problem.
with env.Task(0, 0) as task:
task.set_Stream(mosek.streamtype.log, streamprinter)
# We assume that a problem file was given as the first command
# line argument (received in `argv')
task.readdata(inputfile)
# Solve the problem remotely
task.optimizermt(host, port)
# Print a summary of the solution
task.solutionsummary(mosek.streamtype.log) | [
2235,
198,
2,
220,
15069,
1058,
15069,
357,
66,
8,
337,
14058,
42,
5949,
50,
11,
16490,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
9220,
1058,
220,
220,
220,
220,
220,
2172,
62,
15388,
62,
27261,
13,
9078,
198,
2,
198,
2,
... | 2.452229 | 471 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################
# OPENAPI-URI: /api/node/tasks
########################################################################
# get:
# responses:
# '200':
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/TaskList'
# description: Node task list
# default:
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/Error'
# description: unexpected error
# summary: Returns a list of tasks assigned to a given node
#
########################################################################
"""
This is the node task list handler for Apache Warble
"""
import json
import plugins.crypto
import plugins.registry
import plugins.tasks
import base64
import time
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
2... | 3.370221 | 497 |
#!/usr/bin/env python3
import os
import time
import json
import subprocess
WARMUPS = 8
SAMPLES = 64
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
850,
14681,
198,
198,
16279,
42422,
3705,
796,
807,
198,
49302,
6489,
1546,
796,
5598,
198,
198,
361,
11593,
3672,
... | 2.563636 | 55 |
import sys
from . import main
sys.exit(main.main())
| [
11748,
25064,
198,
198,
6738,
764,
1330,
1388,
198,
198,
17597,
13,
37023,
7,
12417,
13,
12417,
28955,
198
] | 2.842105 | 19 |
'''
Problem Statement: Assign tasks to workers so that the time it takes
to complete all the tasks is minimized given a count of workers and an
array where each element indicates the duration of a task.
Each worker must work on exactly two tasks.
'''
A = [5, 6, 1, 8, 3, 8]
A = sorted(A)
for i in range(len(A)//2):
print(A[i], A[~i]) | [
7061,
6,
198,
40781,
21983,
25,
2195,
570,
8861,
284,
3259,
523,
326,
262,
640,
340,
2753,
198,
1462,
1844,
477,
262,
8861,
318,
49491,
1813,
257,
954,
286,
3259,
290,
281,
198,
18747,
810,
1123,
5002,
9217,
262,
9478,
286,
257,
487... | 3.081818 | 110 |
import random
from django.contrib.auth import get_user_model
from django.http import HttpResponse
from django.contrib.auth.views import PasswordResetView
UserModel = get_user_model()
| [
11748,
4738,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
33571,
1330,
... | 3.321429 | 56 |
import psycopg2
from .queries import ALL_RESIDENTIAL_FIRES, RESIDENTIAL_FIRES_BY_FDID_STATE_HAZARD
from psycopg2.extras import DictCursor
class Backend(object):
"""
Backend mixin that should be used to implement APIs to read data.
"""
def connect(self):
"""
Connect to the backend.
"""
raise NotImplementedError
def close_connection(self):
"""
Close the connection to the backend.
"""
raise NotImplementedError
def query(self):
"""
Query the backend.
"""
raise NotImplementedError
class FileBackend(Backend):
"""
Parse a set of NFIRS incident flat files for structure fires.
Args:
flatfiles (list): a list of file pathnames for files to be parsed.
Returns:
changes the values of the firespread_count attributes to calculated
values
"""
pass
class PostgresBackend(Backend):
"""
The Postgres Backend.
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
11748,
17331,
22163,
70,
17,
198,
198,
6738,
764,
421,
10640,
1330,
11096,
62,
19535,
25256,
12576,
62,
39776,
1546,
11,
15731,
25256,
12576,
62,
39776,
1546,
62,
17513,
62,
26009,
2389,
62,
44724,
62,
7801,
57,
9795,
198,
6738,
17331,
... | 2.5 | 426 |
import io
import os
import abc
import mimetypes
from json import dumps as jsondumps
from typing import Any, Dict, Iterable, List, Optional, Union
from hyperad.constants import (
CONTENT_TYPE, CONTENT_DISPOSITION,
APPLICATION_JSON, MULTIPART_FORM_DATA, TEXT_PLAIN,
APPLICATION_X_WWW_FORM_URLENCODED, APPLICATION_OCTET_STREAM,
)
from hyperad.errors import DuplicateValue
_REQUEST_PARAMS = Dict
class Content(abc.ABC):
""" The abstract base class for a content submited to the web server.
@param `name`: Name of `Content`. It can be a `str` or `bytes` object.
"""
@abc.abstractmethod
def _build(self) -> _REQUEST_PARAMS:
""" Construct the paramters that are passed into `requests.request()`
method.
"""
pass
@abc.abstractmethod
def enctype(self) -> str:
""" Return the Content-Type of this `Content`.
"""
pass
def name(self) -> str:
""" Return name of `Content`.
"""
return self._name
class ParamContent(Content):
""" A simple `Content` represents a NAME-VALUE data and is sent as a
parameter in url.
@param `name`: Name of `Content`.
@param `value`: Value data. It can be `str` or `bytes` object.
"""
class FieldContent(Content):
""" A simple `Content` represents a NAME-VALUE data and is sent inside the
body.
@param `name`: Name of `Content`.
@param `value`: Value data. It can be `str` or `bytes` object.
"""
class FileContent(Content):
""" A `Content` supports to upload file. The file could be a `bytes`,
`str`, `io`, or `iterable` object. The filename need to be indicated
explicitly if the file is not `io` object.
@param `name`: Name of `Content`.
@param `file`: A `bytes` or `io` or `str` or `iterable` object\
representing the file uploaded to the web server.
@param `filename`: (Optional) Name of file. It is neccessary if\
file doesn't have `name` attribute.
"""
def _build(self) -> Dict:
""" The file will be sent in the HTTP request body. This method
specifies two headers, which are Content-Type and Content-Disposition.
They can be received and processed at the server.
"""
return {
"data": self._file,
"headers": {
CONTENT_TYPE: self.enctype(),
CONTENT_DISPOSITION: "attachment; filename={}"
.format(self._filename),
}
}
class JSONContent(Content):
""" A `Content` helps send JSON to webserver.
@param `name`: Name of `Content`.
@param `json`: A serializable JSON object, such as `dict`.
"""
class MultiContent(Content):
""" A `Content` send data as multipart/form-data.
@param name: Name of `Content`.
"""
def add(self, *contents: Content):
""" Add `Content`s to the `MultiContent` form.
- `Param` always appears in url path.
- Other `Content`s will be put inside the body of request.
@param `contents`: One or many `Content`s.
"""
for content in contents:
if not _exact_is_instance(content, (
ParamContent, FieldContent, FileContent, JSONContent
)):
raise TypeError(
"Unsupported Content ({})"
.format(type(content).__name__)
)
if not isinstance(content, (FieldContent, ParamContent)):
self._enctype = MULTIPART_FORM_DATA
self._content_list.append(content)
return None
| [
11748,
33245,
198,
11748,
28686,
198,
11748,
450,
66,
198,
11748,
17007,
2963,
12272,
198,
6738,
33918,
1330,
45514,
355,
44804,
623,
8142,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
40806,
540,
11,
7343,
11,
32233,
11,
4479,
198,
... | 2.448113 | 1,484 |
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
Deltax = 0.01
#set up arrays
xinitial = -5.0
xfinal = 5.0
n = ceil((xfinal-xinitial)/Deltax)
x = np.zeros(n)
for i in range(n):
x[i] = xinitial+i*Deltax
V = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
alpha = 10.0
beta = 2.0
d = 1.0; v0= 0.1;
V = v0*((x**2-d**2)**2)/(d**4)
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('x[m]')
ax.set_xlabel('V[s]')
ax.plot(x, V)
fig.tight_layout()
plt.show()
| [
2,
8070,
17944,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
10688,
1330,
1635,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
35,
2120,
897,
796,
657,
13,
486,
198,
2... | 2.177122 | 271 |
try:
from unittest import mock
except ImportError:
import mock
from . import BaseTestCase
from freon.cache import Cache
from freon.backends.redis import RedisBackend
from freon.serializers.msgpack import MsgpackSerializer
@mock.patch.object(Cache, 'set')
| [
28311,
25,
198,
220,
220,
220,
422,
555,
715,
395,
1330,
15290,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
15290,
198,
198,
6738,
764,
1330,
7308,
14402,
20448,
198,
198,
6738,
2030,
261,
13,
23870,
1330,
34088,
198,
6738... | 3 | 92 |
import os
APPLICATION_DEBUG = os.environ.get('APPLICATION_DEBUG', True)
APPLICATION_HOST = os.environ.get('APPLICATION_HOST', '0.0.0.0')
APPLICATION_PORT = os.environ.get('APPLICATION_PORT', 5000)
AWS_REGION = os.environ.get('AWS_REGION', 'us-east-1')
DATABASE_HOST = os.environ.get('DATABASE_HOST', 'hgpoker.c13gml6mujwk.us-east-2.rds.amazonaws.com')
DATABASE_MAX_CONNECTION = os.environ.get('DATABASE_MAX_CONNECTION', 20)
DATABASE_NAME = os.environ.get('DATABASE_NAME', 'hgpoker')
DATABASE_PASSWORD = os.environ.get('DATABASE_PASSWORD', 'pass-hgpoker')
DATABASE_PORT = os.environ.get('DATABASE_PORT', 5432)
DATABASE_PRINT_SQL = os.environ.get('DATABASE_PRINT_SQL', True)
DATABASE_USERNAME = os.environ.get('DATABASE_USERNAME', 'postgres')
FLASK_ENV = os.environ.get('FLASK_ENV', 'development') | [
11748,
28686,
198,
198,
2969,
31484,
6234,
62,
30531,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
2969,
31484,
6234,
62,
30531,
3256,
6407,
8,
198,
2969,
31484,
6234,
62,
39,
10892,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
2969,
31... | 2.219444 | 360 |
import random
import subprocess
import pickle
| [
11748,
4738,
198,
11748,
850,
14681,
198,
11748,
2298,
293,
628,
628
] | 4.083333 | 12 |
# https://atcoder.jp/contests/math-and-algorithm/tasks/math_and_algorithm_o
a, b = map(int, input().split())
a, b = max(a, b), min(a, b)
while b > 0:
if a > b:
a, b = b, a % b
else:
a, b = b % a, a
print(a) | [
2,
3740,
1378,
265,
66,
12342,
13,
34523,
14,
3642,
3558,
14,
11018,
12,
392,
12,
282,
42289,
14,
83,
6791,
14,
11018,
62,
392,
62,
282,
42289,
62,
78,
198,
198,
64,
11,
275,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198... | 1.909091 | 121 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Yuemeng Li
"""
from backbone import Backbone
import torch
import argparse
import os
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch.nn import DataParallel
import nibabel as nib
import pdb
#######run command###########
#python CUDA_VISIBLE_DEVICES=0 test.py --data-name /home/yli/MRI_project/resampled/testing-images/1003_3.nii.gz --save-dir ./MRI_model/MALC_coarse/output --seg-task coarse
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
10605,
368,
1516,
7455,
198,
37811,
198,
198,
6738,
32774,
1330,
5157,
15992,
198,
11748,... | 2.575342 | 219 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# LegiscanAPI.py -- Pull data from Legiscan.com API
# By Uchechukwu Uboh and Tony Pearson, IBM, 202
#
# LegiscanAPI class automates the retrival and curation of bill data for a
# particular US state. Before running this class, your Legiscan.com apikey
# needs to be an environmental variable with the key of "LEGISCAN_API_KEY".
# Visit https://legiscan.com/legiscan to create your own Legiscan.com apikey.
#
# See /docs/Legiscan/ for API manual and Entity relationship diagram (ERD)
#
# Debug with: # import pdb; pdb.set_trace()
"""
# System imports
import json
import logging
import os
# Application imports
from .data_bundle import DataBundle
# import pdb; pdb.set_trace()
logger = logging.getLogger(__name__)
EXCEEDED = "maximum query count"
LEGISCAN_ID = {
1: {"code": "AL", "name": "Alabama", "capital": "Montgomery"},
2: {"code": "AK", "name": "Alaska", "capital": "Juneau"},
3: {"code": "AZ", "name": "Arizona", "capital": "Phoenix"},
4: {"code": "AR", "name": "Arkansas", "capital": "Little Rock"},
5: {"code": "CA", "name": "California", "capital": "Sacramento"},
6: {"code": "CO", "name": "Colorado", "capital": "Denver"},
7: {"code": "CT", "name": "Connecticut", "capital": "Hartford"},
8: {"code": "DE", "name": "Delaware", "capital": "Dover"},
9: {"code": "FL", "name": "Florida", "capital": "Tallahassee"},
10: {"code": "GA", "name": "Georgia", "capital": "Atlanta"},
11: {"code": "HI", "name": "Hawaii", "capital": "Honolulu"},
12: {"code": "ID", "name": "Idaho", "capital": "Boise"},
13: {"code": "IL", "name": "Illinois", "capital": "Springfield"},
14: {"code": "IN", "name": "Indiana", "capital": "Indianapolis"},
15: {"code": "IA", "name": "Iowa", "capital": "Des Moines"},
16: {"code": "KS", "name": "Kansas", "capital": "Topeka"},
17: {"code": "KY", "name": "Kentucky", "capital": "Frankfort"},
18: {"code": "LA", "name": "Louisiana", "capital": "Baton Rouge"},
19: {"code": "ME", "name": "Maine", "capital": "Augusta"},
20: {"code": "MD", "name": "Maryland", "capital": "Annapolis"},
21: {"code": "MA", "name": "Massachusetts", "capital": "Boston"},
22: {"code": "MI", "name": "Michigan", "capital": "Lansing"},
23: {"code": "MN", "name": "Minnesota", "capital": "Saint Paul"},
24: {"code": "MS", "name": "Mississippi", "capital": "Jackson"},
25: {"code": "MO", "name": "Missouri", "capital": "Jefferson City"},
26: {"code": "MT", "name": "Montana", "capital": "Helena"},
27: {"code": "NE", "name": "Nebraska", "capital": "Lincoln"},
28: {"code": "NV", "name": "Nevada", "capital": "Carson City"},
29: {"code": "NH", "name": "New Hampshire", "capital": "Concord"},
30: {"code": "NJ", "name": "New Jersey", "capital": "Trenton"},
31: {"code": "NM", "name": "New Mexico", "capital": "Santa Fe"},
32: {"code": "NY", "name": "New York", "capital": "Albany"},
33: {"code": "NC", "name": "North Carolina", "capital": "Raleigh"},
34: {"code": "ND", "name": "North Dakota", "capital": "Bismarck"},
35: {"code": "OH", "name": "Ohio", "capital": "Columbus"},
36: {"code": "OK", "name": "Oklahoma", "capital": "Oklahoma City"},
37: {"code": "OR", "name": "Oregon", "capital": "Salem"},
38: {"code": "PA", "name": "Pennsylvania", "capital": "Harrisburg"},
39: {"code": "RI", "name": "Rhode Island", "capital": "Providence"},
40: {"code": "SC", "name": "South Carolina", "capital": "Columbia"},
41: {"code": "SD", "name": "South Dakota", "capital": "Pierre"},
42: {"code": "TN", "name": "Tennessee", "capital": "Nashville"},
43: {"code": "TX", "name": "Texas", "capital": "Austin"},
44: {"code": "UT", "name": "Utah", "capital": "Salt Lake City"},
45: {"code": "VT", "name": "Vermont", "capital": "Montpelier"},
46: {"code": "VA", "name": "Virginia", "capital": "Richmond"},
47: {"code": "WA", "name": "Washington", "capital": "Olympia"},
48: {"code": "WV", "name": "West Virginia", "capital": "Charleston"},
49: {"code": "WI", "name": "Wisconsin", "capital": "Madison"},
50: {"code": "WY", "name": "Wyoming", "capital": "Cheyenne"},
51: {"code": "DC", "name": "Washington D.C.", "capital": "Washington, DC"},
52: {"code": "US", "name": "US Congress", "capital": "Washington, DC"},
}
class LegiscanError(Exception):
""" Define exception for general Legiscan errors """
pass
class APIkeyError(LegiscanError):
""" Define exception for specific errors related to API key """
pass
class LegiscanAPI:
""" Constructor for LegiscanAPI. Checks if a LegiscanAPI apikey exists."""
def get_datasetlist(self, apikey='Good'):
""" Get list of datasets for all 50 states """
key = self.bad_key
if apikey == 'Good':
key = self.api_key
list_data = None
dsl_bundle = DataBundle('getDataSetList')
dsl_params = {'key': key, 'op': 'getDatasetList'}
# import pdb; pdb.set_trace()
success = self.invoke_api(dsl_bundle, dsl_params)
if success:
if 'datasetlist' in dsl_bundle.json_pkg:
list_data = json.dumps(dsl_bundle.json_pkg, indent=2)
else:
dsl_bundle.status_ok = False
dsl_bundle.status_code = 487
if not dsl_bundle.status_ok:
logger.error(f"Failure: {dsl_bundle}")
self.api_ok = False
list_data = None
return list_data
def get_dataset(self, session_id, access_key, apikey='Good'):
""" Get datasets for individual legislative session """
key = self.bad_key
if apikey == 'Good':
key = self.api_key
sesh_data = None
sesh_bundle = DataBundle('getDataset')
sesh_params = {'key': key, 'op': 'getDataset',
'id': session_id, 'access_key': access_key}
success = self.invoke_api(sesh_bundle, sesh_params)
if success:
if 'dataset' in sesh_bundle.json_pkg:
sesh_data = json.dumps(sesh_bundle.json_pkg, indent=2)
else:
sesh_bundle.status_ok = False
sesh_bundle.status_code = 487
if not sesh_bundle.status_ok:
logger.error(f"Failure: {sesh_bundle}")
self.api_ok = False
sesh_data = None
return sesh_data
def get_bill_text(self, document_id, apikey='Good'):
""" Get specific document identified in bill """
key = self.bad_key
if apikey == 'Good':
key = self.api_key
bill_data = None
bill_bundle = DataBundle('getBillText')
bill_params = {'key': key, 'op': 'getBillText', 'id': document_id}
success = self.invoke_api(bill_bundle, bill_params)
if success:
if 'text' in bill_bundle.json_pkg:
bill_data = json.dumps(bill_bundle.json_pkg, indent=2)
else:
bill_bundle.status_ok = False
bill_bundle.status_code = 487
if not bill_bundle.status_ok:
logger.error(f"Failure: {bill_bundle}")
self.api_ok = False
bill_data = None
return bill_data
def invoke_api(self, bundle, params):
""" Invoke the Legiscan API """
bundle.status_ok = False
result = None
if self.api_ok:
try:
response = bundle.make_request(self.url, params)
result = bundle.load_response(response)
except RuntimeError as exc:
logger.error(f"210:Error {exc}", exc_info=True)
self.api_ok = False
bundle.status_ok = False
bundle.status_code = 403
result = None
if result:
self.check_result(bundle, params)
else:
bundle.status_code = 405
return bundle.status_ok
@staticmethod
def dump_id_table():
""" Dump legiscan_id to JSON output """
output_string = json.dumps(LEGISCAN_ID, indent=2)
return output_string
def check_result(self, bundle, params):
""" check result matches expectations """
if bundle.extension == 'json':
if bundle.json_pkg['status'] == 'ERROR':
self.api_ok = False
bundle.status_ok = False
bundle.status_code = 406
pkg = bundle.json_pkg
if 'alert' in pkg:
bundle.name += ' *ERROR*'
bundle.msgtext = (f"[ERROR] {pkg['alert']['message']} "
f"{self.url} {json.dumps(params)}")
if EXCEEDED in bundle.msgtext:
bundle.status_code = 429
raise LegiscanError(f"{bundle.name} {bundle.msgtext}")
else:
bundle.status_ok = False
bundle.status_code = 415
return None
if __name__ == "__main__":
leg = LegiscanAPI()
out_str = leg.dump_id_table()
# with open("Legiscan_id.json", "w") as out_file:
# out_file.write(out_str)
leg.api_ok = False
params = {}
bundle01 = DataBundle('Test1')
print('Test with API_OK, no params, API_OK', leg.api_ok)
r01 = leg.invoke_api(bundle01, params)
print('Result:', r01, bundle01)
leg.api_ok = True
bundle02 = DataBundle('Test2')
print('Test with no params, API_OK=', leg.api_ok)
r02 = leg.invoke_api(bundle02, params)
print('Result:', r02, bundle02)
bundle02b = DataBundle('Test2b')
dsl_params = {'key': leg.bad_key}
r02b = leg.invoke_api(bundle02b, params)
print('Result:', r02b, bundle02b)
bundle02c = DataBundle('Test2c')
dsl_params = {'op': 'getDatasetList'}
r02c = leg.invoke_api(bundle02c, params)
print('Result:', r02c, bundle02c)
# import pdb; pdb.set_trace()
print('Test getDatasetList with bad API key')
r03 = leg.get_datasetlist(apikey='Bad')
print('Result: ', r03, 'API_OK=', leg.api_ok)
leg.api_ok = True
print('Test getDatasetList with good API key')
r04 = leg.get_datasetlist(apikey='Good')
print('Result:', len(r04))
r04_pkg = json.loads(r04)
print(r04_pkg['status'], len(r04_pkg['datasetlist']))
dsl = r04_pkg['datasetlist']
first_sesh = dsl[0]
session_id = first_sesh['session_id']
access_key = first_sesh['access_key']
print('Test getDataset with bad API key')
r05 = leg.get_dataset(session_id, access_key, apikey='Bad')
print('Result: ', r05)
leg.api_ok = True
print('Test getDataset with invalid session id')
r06 = leg.get_dataset(9999, access_key, apikey='Good')
print('Result: ', r06)
leg.api_ok = True
print('Test getDataset with invalid access key')
r07 = leg.get_dataset(session_id, '', apikey='Good')
print('Result: ', r07)
leg.api_ok = True
print('Test getDataset')
r08 = leg.get_dataset(session_id, access_key, apikey='Good')
print('Result: ', len(r08))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
2,
3564,
2304,
272,
17614,
13,
9078,
1377,
21429,
1366,
422,
3564,
2304,
272,
13,
785,
7824,
198,
2,
... | 2.264105 | 4,892 |
from flask import jsonify
from app import db
from datetime import datetime
from sqlalchemy import Column, Integer, DateTime, String, Boolean, Text | [
6738,
42903,
1330,
33918,
1958,
198,
6738,
598,
1330,
20613,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
7536,
7575,
11,
10903,
11,
41146,
11,
8255
] | 4.171429 | 35 |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.exceptions import SparkPandasIndexingError
from databricks.koalas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| [
2,
198,
2,
15069,
357,
34,
8,
13130,
16092,
397,
23706,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.279883 | 343 |
#!/usr/bin/env python
"""
Python source code - replace this with a description of the code and write the code below this text.
"""
import os.path as osp
from sklearn import grid_search
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.cross_validation import StratifiedKFold
if __name__ == "__main__":
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
37906,
2723,
2438,
532,
6330,
428,
351,
257,
6764,
286,
262,
2438,
290,
3551,
262,
2438,
2174,
428,
2420,
13,
198,
37811,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
... | 3.354839 | 124 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Checks if all the libraries in setup.py are listed in installation.rst file
"""
import os
import re
import sys
from os.path import dirname
from typing import Dict, List, Set
from rich import print
from rich.console import Console
from rich.table import Table
AIRFLOW_SOURCES_DIR = os.path.join(dirname(__file__), os.pardir, os.pardir, os.pardir)
SETUP_PY_FILE = 'setup.py'
DOCS_FILE = os.path.join('docs', 'apache-airflow', 'extra-packages-ref.rst')
PY_IDENTIFIER = r'[a-zA-Z_][a-zA-Z0-9_\.]*'
sys.path.insert(0, AIRFLOW_SOURCES_DIR)
from setup import ( # noqa # isort:skip
add_all_provider_packages,
EXTRAS_DEPRECATED_ALIASES,
EXTRAS_REQUIREMENTS,
PROVIDERS_REQUIREMENTS,
PREINSTALLED_PROVIDERS,
)
def get_extras_from_setup() -> Set[str]:
"""Returns a set of regular (non-deprecated) extras from setup."""
return set(EXTRAS_REQUIREMENTS.keys()) - set(EXTRAS_DEPRECATED_ALIASES.keys())
def get_extras_from_docs() -> Set[str]:
"""
Returns a list of extras from docs.
"""
docs_content = get_file_content(DOCS_FILE)
extras_section_regex = re.compile(
rf'\|[^|]+\|.*pip install .apache-airflow\[({PY_IDENTIFIER})][^|]+\|[^|]+\|',
re.MULTILINE,
)
doc_extra_set: Set[str] = set()
for doc_extra in extras_section_regex.findall(docs_content):
doc_extra_set.add(doc_extra)
return doc_extra_set
def get_preinstalled_providers_from_docs() -> List[str]:
"""
Returns list of pre-installed providers from the doc.
"""
docs_content = get_file_content(DOCS_FILE)
preinstalled_section_regex = re.compile(
rf'\|\s*({PY_IDENTIFIER})\s*\|[^|]+pip install[^|]+\|[^|]+\|\s+\*\s+\|$',
re.MULTILINE,
)
return preinstalled_section_regex.findall(docs_content)
def get_deprecated_extras_from_docs() -> Dict[str, str]:
"""
Returns dict of deprecated extras from docs (alias -> target extra)
"""
deprecated_extras = {}
docs_content = get_file_content(DOCS_FILE)
deprecated_extras_section_regex = re.compile(
r'\| Deprecated extra \| Extra to be used instead \|\n(.*)\n', re.DOTALL # noqa
)
deprecated_extras_content = deprecated_extras_section_regex.findall(docs_content)[0]
deprecated_extras_regexp = re.compile(r'\|\s(\S+)\s+\|\s(\S*)\s+\|$', re.MULTILINE)
for extras in deprecated_extras_regexp.findall(deprecated_extras_content):
deprecated_extras[extras[0]] = extras[1]
return deprecated_extras
def check_extras(console: Console) -> bool:
"""
Checks if non-deprecated extras match setup vs. doc.
:param console: print table there in case of errors
:return: True if all ok, False otherwise
"""
extras_table = Table()
extras_table.add_column("NAME", justify="right", style="cyan")
extras_table.add_column("SETUP", justify="center", style="magenta")
extras_table.add_column("DOCS", justify="center", style="yellow")
non_deprecated_setup_extras = get_extras_from_setup()
non_deprecated_docs_extras = get_extras_from_docs()
for extra in non_deprecated_setup_extras:
if extra not in non_deprecated_docs_extras:
extras_table.add_row(extra, "V", "")
for extra in non_deprecated_docs_extras:
if extra not in non_deprecated_setup_extras:
extras_table.add_row(extra, "", "V")
if extras_table.row_count != 0:
print(
f"""\
[red bold]ERROR!![/red bold]
The "[bold]CORE_EXTRAS_REQUIREMENTS[/bold]", "[bold]ADDITIONAL_PROVIDERS_REQUIREMENTS[/bold]", and
"[bold]PROVIDERS_REQUIREMENTS[/bold]"
sections in the setup file: [bold yellow]{SETUP_PY_FILE}[/bold yellow]
should be synchronized with the "Extra Packages Reference"
in the documentation file: [bold yellow]{DOCS_FILE}[/bold yellow].
Below is the list of extras that:
* are used but are not documented,
* are documented but not used,
[bold]Please synchronize setup/documentation files![/bold]
"""
)
console.print(extras_table)
return False
return True
def check_deprecated_extras(console: Console) -> bool:
"""
Checks if deprecated extras match setup vs. doc.
:param console: print table there in case of errors
:return: True if all ok, False otherwise
"""
deprecated_setup_extras = EXTRAS_DEPRECATED_ALIASES
deprecated_docs_extras = get_deprecated_extras_from_docs()
deprecated_extras_table = Table()
deprecated_extras_table.add_column("DEPRECATED_IN_SETUP", justify="right", style="cyan")
deprecated_extras_table.add_column("TARGET_IN_SETUP", justify="center", style="magenta")
deprecated_extras_table.add_column("DEPRECATED_IN_DOCS", justify="right", style="cyan")
deprecated_extras_table.add_column("TARGET_IN_DOCS", justify="center", style="magenta")
for extra in deprecated_setup_extras.keys():
if extra not in deprecated_docs_extras:
deprecated_extras_table.add_row(extra, deprecated_setup_extras[extra], "", "")
elif deprecated_docs_extras[extra] != deprecated_setup_extras[extra]:
deprecated_extras_table.add_row(
extra, deprecated_setup_extras[extra], extra, deprecated_docs_extras[extra]
)
for extra in deprecated_docs_extras.keys():
if extra not in deprecated_setup_extras:
deprecated_extras_table.add_row("", "", extra, deprecated_docs_extras[extra])
if deprecated_extras_table.row_count != 0:
print(
f"""\
[red bold]ERROR!![/red bold]
The "[bold]EXTRAS_DEPRECATED_ALIASES[/bold]" section in the setup file:\
[bold yellow]{SETUP_PY_FILE}[/bold yellow]
should be synchronized with the "Extra Packages Reference"
in the documentation file: [bold yellow]{DOCS_FILE}[/bold yellow].
Below is the list of deprecated extras that:
* are used but are not documented,
* are documented but not used,
* or have different target extra specified in the documentation or setup.
[bold]Please synchronize setup/documentation files![/bold]
"""
)
console.print(deprecated_extras_table)
return False
return True
def check_preinstalled_extras(console: Console) -> bool:
"""
Checks if preinstalled extras match setup vs. doc.
:param console: print table there in case of errors
:return: True if all ok, False otherwise
"""
preinstalled_providers_from_docs = get_preinstalled_providers_from_docs()
preinstalled_providers_from_setup = PREINSTALLED_PROVIDERS
preinstalled_providers_table = Table()
preinstalled_providers_table.add_column("PREINSTALLED_IN_SETUP", justify="right", style="cyan")
preinstalled_providers_table.add_column("PREINSTALLED_IN_DOCS", justify="center", style="magenta")
for provider in preinstalled_providers_from_setup:
if provider not in preinstalled_providers_from_docs:
preinstalled_providers_table.add_row(provider, "")
for provider in preinstalled_providers_from_docs:
if provider not in preinstalled_providers_from_setup:
preinstalled_providers_table.add_row("", provider)
if preinstalled_providers_table.row_count != 0:
print(
f"""\
[red bold]ERROR!![/red bold]
The "[bold]PREINSTALLED_PROVIDERS[/bold]" section in the setup file:\
[bold yellow]{SETUP_PY_FILE}[/bold yellow]
should be synchronized with the "Extra Packages Reference"
in the documentation file: [bold yellow]{DOCS_FILE}[/bold yellow].
Below is the list of preinstalled providers that:
* are used but are not documented,
* or are documented but not used.
[bold]Please synchronize setup/documentation files![/bold]
"""
)
console.print(preinstalled_providers_table)
return False
return True
if __name__ == '__main__':
status: List[bool] = []
# force adding all provider package dependencies, to check providers status
add_all_provider_packages()
main_console = Console()
status.append(check_extras(main_console))
status.append(check_deprecated_extras(main_console))
status.append(check_preinstalled_extras(main_console))
if all(status):
print("All extras are synchronized: [green]OK[/]")
sys.exit(0)
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,... | 2.714931 | 3,322 |
from unittest import TestCase
import psycopg2 as pg
import psycopg2.extensions as pg_extensions
import tests.env
import karaoke
from karaoke.queue import Queue
from karaoke.exceptions import QueueError
from tests.conftest import KaraokeTestCase
class TestQueue(KaraokeTestCase):
'''
Test some methods of the Queue class.
'''
@classmethod
def setUpClass(cls):
'''
Set up a test client and database.
'''
# Create the test database using an external connection
cls.ext_conn = karaoke.connect_db()
cls.ext_conn.set_isolation_level(pg_extensions.ISOLATION_LEVEL_AUTOCOMMIT)
with cls.ext_conn:
with cls.ext_conn.cursor() as curs:
curs.execute('CREATE DATABASE karaoke_test;')
# Set up the test client
super().setUpClass()
# Initialize the test database
with karaoke.app.app_context():
karaoke.init_db()
# Connect to the test database and create a queue to test
cls.conn = karaoke.connect_db()
cls.queue = Queue(cls.conn)
# Load some fake song data
with cls.conn:
with cls.conn.cursor() as curs:
curs.execute('''
INSERT INTO song
(title, artist, url)
VALUES
('foo', 'bar', 'baz')
''')
@classmethod
def tearDownClass(cls):
'''
Remove the test database and close out the connection.
'''
cls.conn.close()
with cls.ext_conn:
with cls.ext_conn.cursor() as curs:
curs.execute('DROP DATABASE karaoke_test')
# Close out all connections
cls.ext_conn.close()
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
17331,
22163,
70,
17,
355,
23241,
198,
11748,
17331,
22163,
70,
17,
13,
2302,
5736,
355,
23241,
62,
2302,
5736,
198,
198,
11748,
5254,
13,
24330,
198,
11748,
479,
3301,
2088,
19... | 2.084806 | 849 |
import sys
import cPickle
import numpy
import matplotlib.pyplot as plt
import argparse
import pdb
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-f",
"--result-file",
type=str,
default=None)
parser.add_argument("-m",
"--max-epoch",
type=float,
default=None)
parser.add_argument("-v",
"--include-training-variance",
type=bool,
default=False)
_ = parser.parse_args()
plot_results(_.result_file, _.max_epoch, _.include_training_variance)
| [
11748,
25064,
198,
11748,
269,
31686,
293,
198,
11748,
299,
32152,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
1822,
29572,
198,
198,
11748,
279,
9945,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417... | 1.786967 | 399 |
# pong-online
# https://github.com/VilhelmPrytz/pong-online
# Copyright (C) Vilhelm Prytz 2019
from components.tools import random_string
# 1 - waiting for opponent to join
# 2 - in game
| [
2,
279,
506,
12,
25119,
198,
2,
3740,
1378,
12567,
13,
785,
14,
53,
346,
33485,
47,
563,
22877,
14,
79,
506,
12,
25119,
198,
2,
15069,
357,
34,
8,
34037,
33485,
32500,
22877,
13130,
198,
198,
6738,
6805,
13,
31391,
1330,
4738,
62,... | 3.064516 | 62 |
#!/usr/bin/python
import sys, warnings
if sys.version_info[0] < 3:
warnings.warn('Need Python 3.0 for this program to run', RuntimeWarning)
else:
print('Procees as normal') | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
11,
14601,
198,
198,
361,
25064,
13,
9641,
62,
10951,
58,
15,
60,
1279,
513,
25,
198,
220,
220,
220,
14601,
13,
40539,
10786,
23037,
11361,
513,
13,
15,
329,
428,
1430,
... | 2.84375 | 64 |
from datetime import datetime
from django.core.cache import cache
from custom.icds.const import (
DATA_PULL_CACHE_KEY,
DATA_PULL_PERMITTED_END_HOUR,
DATA_PULL_PERMITTED_START_HOUR,
)
from custom.icds_reports.const import INDIA_TIMEZONE
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
198,
6738,
2183,
13,
291,
9310,
13,
9979,
1330,
357,
198,
220,
220,
220,
42865,
62,
5105,
3069,
62,
34,
2246,
13909,
62,
20373,
11,
... | 2.460784 | 102 |
from datetime import date, datetime
print(str(datetime.today())[:4])
| [
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
198,
4798,
7,
2536,
7,
19608,
8079,
13,
40838,
28955,
58,
25,
19,
12962,
198
] | 3 | 23 |
# -*- coding: utf-8 -*-
"""Contain schemata required by cenv-tool."""
from marshmallow import fields
from marshmallow import Schema
from marshmallow import validate
class SNPackage(Schema):
"""Contain the ``package``-section inside a ``meta.yaml``."""
name = fields.String(strict=True, required=True)
version = fields.String(strict=True, required=True)
class SNSource(Schema):
"""Contain the ``source``-section inside a ``meta.yaml``."""
path = fields.String(strict=True, required=True)
class SNBuild(Schema):
"""Contain the ``build``-section inside a ``meta.yaml``.
The ``build``-section requires to define the build-number, if the egg-dir
should be preserved, the script to run on installation and if any
entrypoints are defined for the package.
"""
build = fields.String(strict=True, required=True)
preserve_egg_dir = fields.String(
strict=True,
required=True,
validate=validate.OneOf(['True', 'False']),
)
script = fields.String(strict=True, required=True)
entry_points = fields.List(
fields.String(strict=True, required=False),
strict=True,
required=False,
)
class SNRequirements(Schema):
"""Contain ``requirements``-section inside a ``meta.yaml``.
The underlying ``build``- and ``run``-sections have to be valid!
"""
build = fields.List(
fields.String(strict=True, required=True),
strict=True,
required=True,
)
run = fields.List(
fields.String(
strict=True,
required=True,
validate=lambda x: '=' in x if 'python' not in x else True,
error_messages=dict(validator_failed='Version must be specified'),
),
strict=True,
required=True,
)
run_constrained = fields.List(
fields.String(
strict=True,
required=False,
validate=lambda x: '=' in x,
error_messages=dict(validator_failed='Version must be specified'),
),
strict=True,
required=False,
)
class SNTest(Schema):
"""Contain ``tests``-section inside a ``meta.yaml``."""
imports = fields.List(
fields.String(strict=True, required=False),
strict=True,
required=False,
)
commands = fields.List(
fields.String(strict=True, required=False),
strict=True,
required=False,
)
class SNExtra(Schema):
"""Contain the ``extra``-section inside a ``meta.yaml``.
The ``extra``-section has to contains the information where to find the
conda-folder, the name of the conda environment to use for the current
project and the cenv-version used when the ``meta.yaml`` file was created.
"""
cenv = fields.Nested(SNCenv, strict=True, required=True)
class SMetaYaml(Schema):
"""Contain the representable of a complete ``meta.yaml`` file.
Schema for a ``meta.yaml`` file to be used for cenv.
Ensure the meta.yaml to load contains the relevant information about
the ``package``, ``source``, ``build``, ``requirements`` and ``extra``.
The ``test``-section is optional.
"""
package = fields.Nested(SNPackage, strict=True, required=True)
source = fields.Nested(SNSource, strict=True, required=True)
build = fields.Nested(SNBuild, strict=True, required=True)
requirements = fields.Nested(SNRequirements, strict=True, required=True)
test = fields.Nested(SNTest, strict=True, required=False)
extra = fields.Nested(SNExtra, strict=True, required=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
4264,
391,
3897,
76,
1045,
2672,
416,
269,
24330,
12,
25981,
526,
15931,
198,
6738,
22397,
42725,
1330,
7032,
198,
6738,
22397,
42725,
1330,
10011,
2611,
198,
6738,... | 2.615836 | 1,364 |
import numpy as np
from matplotlib import pyplot as plt
from lmfit.models import GaussianModel as gauss_mod
from lmfit.models import VoigtModel as voigt_mod
from lmfit.models import LinearModel as lin_mod
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
300,
76,
11147,
13,
27530,
1330,
12822,
31562,
17633,
355,
31986,
1046,
62,
4666,
198,
6738,
300,
76,
11147,
13,
27530,
1330,
2068... | 3.153846 | 65 |
import requests
import json
import random
from . import secretKey
print(secret_key)
# TODO: get some of questions only, not all.
# TODO: present tense, verbs with "s"
# TODO: full stop of each sentence
# TODO: will print the error msg out "error:rm_answer_to_blank"
# TODO: quality of the dictionary is too low
# Load vocabulary list
text_file = open("temp/ietls-vocab-list.txt", "r")
vocab_list = json.load(text_file)
last_vocab = "yield"
last_vocab_index = vocab_list.index(last_vocab)
print(last_vocab_index)
# Load tenses list
tenses_file = open('temp/tenses-list.json', "r")
tenses_dict = json.load(tenses_file)
# TODO
# TODO
"""
index_list = random.sample(range(500, last_vocab_index), 40)
temp_qes = ""
temp_ans = ""
n = 1
for index in index_list:
print(n)
request_obj = RequestFillInTheBlanksTest(vocab_list[index])
result = request_obj.get_fill_in_the_blanks_test()
if len(result['question']) > 0:
temp_qes += result['question']
temp_ans += result['answer']
n += 1
print("Question:\n" + temp_qes)
print("Answer:\n" + temp_ans)
"""
# Get 10 words for definition test
# Get 5 words for synonyms test
# Get 25 words for fill-in-the-blanks test
text_file.close()
tenses_file.close()
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
4738,
198,
198,
6738,
764,
1330,
3200,
9218,
198,
198,
4798,
7,
21078,
62,
2539,
8,
198,
198,
2,
16926,
46,
25,
651,
617,
286,
2683,
691,
11,
407,
477,
13,
198,
2,
16926,
46,
25,
1944,
... | 2.642706 | 473 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 14:49:28 2017
@author: Arpan
Description: Use c3d trained model for prediction. To be executed after
training_model_m4.py
"""
import json
import os
import utils
import numpy as np
import h5py
import pandas as pd
import collections
import cv2
import caffe
from joblib import Parallel, delayed
# Temporal Proposals : Pretrained
#VIDEOPATH = '/home/arpan/DATA_Drive/ActivityNet/videos'
#ANNOTATION_FILE = '/home/arpan/DATA_Drive/ActivityNet/ActivityNet-master/Evaluation/data/activity_net.v1-3.min.json'
#PROPOSALS_FILENAME = '/home/arpan/DATA_Drive/ActivityNet/extra_features/Temporal Activity Proposals/activitynet_v1-3_proposals.hdf5'
#SHUFFLE = '/home/arpan/DATA_Drive/ActivityNet/extra_features/ImageNet Shuffle Features/ImageNetShuffle2016_features.h5'
#MBH = "/home/arpan/VisionWorkspace/ActivityNet/MBH Features/MBH_Videos_features.h5"
#MBH_IDS = "/home/arpan/VisionWorkspace/ActivityNet/MBH Features/MBH_Videos_quids.txt"
#C3D = "/home/arpan/DATA_Drive/ActivityNet/extra_features/C3D/sub_activitynet_v1-3.c3d.hdf5"
#C3D_PCA = "/home/arpan/DATA_Drive/ActivityNet/extra_features/C3D/PCA_activitynet_v1-3.hdf5"
#SHUFFLE_IDS = '/home/arpan/DATA_Drive/ActivityNet/extra_features/ImageNet Shuffle Features/ImageNetShuffle2016_quids.txt'
#MODEL = "/home/arpan/DATA_Drive/ActivityNet/ActivityNet-master/caffe_models/deploy_c3d_fc_net.prototxt"
#PRETRAINED = "/home/arpan/DATA_Drive/ActivityNet/ActivityNet-master/caffe_models/snapshots/c3d_4k_1k/c3d_fc_net_snap_iter_400000.caffemodel"
#MEANFILE = "/home/arpan/DATA_Drive/ActivityNet/ActivityNet-master/caffe_models/mean_c3d_4k.binaryproto"
#SUBSET = 'validation'
VIDEOPATH = '/home/hadoop/VisionWorkspace/ActivityNet/ActivityNet-master/Crawler/videos'
ANNOTATION_FILE = '/home/hadoop/VisionWorkspace/ActivityNet/ActivityNet-master/Evaluation/data/activity_net.v1-3.min.json'
PROPOSALS_FILENAME = '/home/hadoop/VisionWorkspace/ActivityNet/Downloads/Temporal Activity Proposals/activitynet_v1-3_proposals.hdf5'
SHUFFLE = '/home/hadoop/VisionWorkspace/ActivityNet/Downloads/ImageNet Shuffle Features/ImageNetShuffle2016_features.h5'
MBH = "/home/hadoop/VisionWorkspace/ActivityNet/Downloads/MBH Features/MBH_Videos_features.h5"
C3D = "/home/hadoop/VisionWorkspace/ActivityNet/Downloads/C3D Features/sub_activitynet_v1-3.c3d.hdf5"
C3D_PCA = "/home/hadoop/VisionWorkspace/ActivityNet/Downloads/C3D Features/PCA_activitynet_v1-3.hdf5"
SHUFFLE_IDS = '/home/hadoop/VisionWorkspace/ActivityNet/Downloads/ImageNet Shuffle Features/ImageNetShuffle2016_quids.txt'
LMDB_FOLDER = "/home/hadoop/VisionWorkspace/ActivityNet/new_lmdb"
MODEL = "/home/hadoop/VisionWorkspace/ActivityNet/ActivityNet-master/caffe_models/deploy_c3d_fc_net.prototxt"
PRETRAINED = "/home/hadoop/VisionWorkspace/ActivityNet/ActivityNet-master/caffe_models/snapshots/c3d_4k_1k/c3d_fc_net_snap_iter_400000.caffemodel"
MEANFILE = "/home/arpan/DATA_Drive/ActivityNet/ActivityNet-master/caffe_models/mean_c3d_4k.binaryproto"
MEANFILE = "/home/hadoop/VisionWorkspace/ActivityNet/ActivityNet-master/caffe_models/mean_c3d_4k.binaryproto"
SUBSET = 'validation'
def get_c3d_feature(fc3d, vid, pos, vfps):
'''
Read the feature vector that is near the pos of video
c3d features are taken for every 8th frame
'''
row = int(pos/8)
while not row <= fc3d[vid]['c3d_features'].shape[0]:
print "Decrement by 1"
row -= 1
assert row <= fc3d[vid]['c3d_features'].shape[0]
vec = fc3d[vid]['c3d_features'][row,:]
return vec
def globalPrediction(vid, category_names, vid_probs):
"""
Get a matrix of probabilities over the classes for the c3d features of
a video. Generate the top 3 predictions from the prob matrix
"""
anno_list = []
# Idea 1 : To form the hist over the categories, each bin has sum of probs
vprobs_sum = vid_probs.sum(axis=0)
top_n = vprobs_sum.sort_values(ascending = False)[:3]
labels = top_n.index.tolist()
scores = top_n.values.tolist()
for idx,score in enumerate(scores):
anno_list.append({'score': score, 'label':labels[idx]})
# Idea 2 : Detect temporal continuity of category predicted. Longer the better
# Idea 3 : Count the number of highest votes for top category. (Worse than 1)
# If equal votes for >1 category then use Idea 1
# finds the max val index among the columns for each row and the freq of the
# occurrence of the column names (in decreasing order)
# labels = vid_probs.idxmax(axis=1).value_counts()[:3].index.tolist()
# scores = probs_sum[labels].tolist()
# for idx,score in enumerate(scores):
# anno_list.append({'score': score, 'label':labels[idx]})
return anno_list, vprobs_sum
def get_rows_ignored(vid, bgThresh, v_no):
"""
Use background subtraction to decide which frames to ignore while prediction
"""
# process the video frame by frame
print "For video : {} " .format(v_no)
W, H = 160, 120
vpath = os.path.join(VIDEOPATH, 'v_'+vid+'.mp4')
cap = cv2.VideoCapture(vpath)
if not cap.isOpened():
raise IOError("Capture object not opened !")
#fps = cap.get(cv2.CAP_PROP_FPS)
frms_ig = []
frms_msec = []
fgbg = cv2.createBackgroundSubtractorMOG2() #bg subtractor
ret, prev_frame = cap.read()
prev_frame = cv2.resize(prev_frame, (W, H) )
fgmask = fgbg.apply(prev_frame)
# convert frame to GRAYSCALE
prev_frame = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
# iterate over the frames
count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (W, H))
curr_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# To find the background mask and skip the frame if foreground is absent
fgmask = fgbg.apply(frame)
if np.sum(fgmask)<bgThresh:
#print "BG frame skipped !!"
#print "FGMASK : {}" .format(np.sum(fgmask))
frms_ig.append(cap.get(cv2.CAP_PROP_POS_FRAMES))
frms_msec.append(cap.get(cv2.CAP_PROP_POS_MSEC))
count += 1
#cv2.imshow("BG Ignored", curr_frame)
#waitTillEscPressed()
prev_frame = curr_frame
continue
#print "Total Frames : {}" .format(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#print "Skipped Frames : {}" .format(count)
#print frms_ig
#print frms_msec
cap.release()
#cv2.destroyAllWindows()
return frms_ig
if __name__=='__main__':
# Read the database, version and taxonomy from JSON file
with open(ANNOTATION_FILE, "r") as fobj:
data = json.load(fobj)
database = data["database"]
taxonomy = data["taxonomy"]
version = data["version"]
non_existing_videos = utils.crosscheck_videos(VIDEOPATH, ANNOTATION_FILE)
print "No of non-existing videos: %d" % len(non_existing_videos)
train_vids_all = []
[train_vids_all.append(x) for x in database if database[x]['subset']=='training']
# Find list of available training videos
train_existing_vids = list(set(train_vids_all) - set(non_existing_videos))
val_vids_all = []
[val_vids_all.append(x) for x in database if database[x]['subset']==SUBSET]
# Find list of available training videos
val_existing_vids = list(set(val_vids_all) - set(non_existing_videos))
###########################################################################
# Get categories information from the database (Train+Validation sets)
category = []
for x in database:
cc = []
for l in database[x]["annotations"]:
cc.append(l["label"])
category.extend(list(set(cc)))
category_count = collections.Counter(category)
category_names = sorted(category_count.keys())
print "Total No of classes: %d" % len(category_names)
#print category_names
###########################################################################
# MBH and ImageNetShuffle Features in training_model_m2.py
###########################################################################
# C3D features
# Read the meta_info and sample_positions files
samples_csv = "samples.csv"
samples_val_csv = "samples_val.csv"
with open("training_data_meta_info.json", "r") as fobj:
meta_info = json.load(fobj)
#construct_dataset(meta_info, samples_csv, category_names)
with open("val_data_meta_info.json", "r") as fobj:
val_meta_info = json.load(fobj)
#construct_dataset(val_meta_info, samples_val_csv, category_names)
###########################################################################
# Consider Taxonomy of the classes
# Temporal Proposals
###########################################################################
caffe.set_mode_gpu()
# load the model
bgThresholds = [105000, 115000]
net = caffe.Net(MODEL, PRETRAINED, caffe.TEST)
# Predict on the validation set videos for each value of bgThreshold
#for th in bgThresholds:
pred, c3d_probs = get_predictions(net, val_existing_vids, category_names)
print "Predicted Labels : "
print c3d_probs.head()
#
out_dict = {'version':version}
subset_video_ids = []
ext_data_dict = {'used': True, 'details': 'C3D features.'}
out_dict['results'] = pred
out_dict['external_data'] = ext_data_dict
json_filename = 'submission_t3_framewise_'+SUBSET+'.json'
with open(json_filename, 'w') as fp:
json.dump(out_dict, fp)
#
##############################################################################
# Use LMDB to get the predictions
# MEANFILE is the path to the training mean binaryproto file
# train_mean = get_training_mean(MEANFILE)
# print "Mean file : {}" .format(train_mean)
# import lmdb
# lmdb_env = lmdb.open(LMDB_FOLDER+'/val_c3d_lmdb')
# lmdb_txn = lmdb_env.begin()
# lmdb_cursor = lmdb_txn.cursor()
# count = 0
# correct = 0
# for key, value in lmdb_cursor:
# print "Count:"
# print count
# count = count + 1
# datum = caffe.proto.caffe_pb2.Datum()
# datum.ParseFromString(value)
# label = int(datum.label)
# image = caffe.io.datum_to_array(datum)
# print "Shape 1 : {}" .format(image.shape)
# #image = image.astype(np.uint8)
# image = image - train_mean
# print "Shape 2 : {}" .format(image.shape)
# print "Asarray shape : {}" .format(np.asarray([image]).shape)
# out = net.forward_all(data=np.asarray([image]))
# print "out Shape : {}" .format(out['prob'].shape)
# predicted_label = out['prob'][0].argmax(axis=0)
# print "Predicted Label : {}" .format(predicted_label)
#
# if count == 3:
# break
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
5979,
220,
513,
1478,
25,
2920,
25,
2078,
2177,
198,
198,
31,
9800,
25,
943,
6839,
... | 2.442627 | 4,462 |
import argparse
from fractions import Fraction
import math
import numpy as np
import pandas as pd
from tabulate import tabulate
# Formatting function
#def format_fraction(val):
# f = Fraction(val)
# return '{:>5} {:>3} /{:>3}'.format(, f.numerator, f.denominator)
# Parse command line
parser = argparse.ArgumentParser(description="This tool is for calculating an arc on a piece of wood. See README.md for details.")
parser.add_argument('A', type=int, help='The width to the center of the arc.')
parser.add_argument('B', type=int, help='The height of the arc at the center (highest point).')
parser.add_argument('--round', help='Number of digits to round decimals to. If not specified, the number will not be rounded to 5 digits.', type=int, default=5)
args = parser.parse_args()
# Compute the radius of the circle
radius = (.5 * math.sqrt(math.pow(args.A, 2) + math.pow(args.B, 2)))/math.cos(math.atan(args.A/args.B))
# Add all values for X-axis
df = pd.DataFrame({
# 'X': pd.Series(np.arange(1, (args.A * 2) + 1, .5))
'X': pd.Series(np.arange(1, (args.A * 2) + 1, 0.0625))
})
# Compute all values for Y-axis
df['Y'] = df['X'].map(lambda X: math.sqrt(math.pow(radius, 2) - math.pow((X - args.A), 2)) - (radius - args.B))
# Format and return results.
#df['X Fraction'] = df['X'].apply(format_fraction)
df['X Denominator'] = df['X'].apply(lambda X: Fraction(X).denominator)
df['X Whole Number'] = df['X'].apply(lambda X: int(X))
#df['X Numerator'] = df.apply(lambda row: (row['X'] - row['X Whole Number']) * row['X Denominator'])
df['X Numerator'] = df.apply(lambda row: (row['X'] - row['X Whole Number']) * row['X Denominator'], axis=1)
#df['X Value'] = df.apply(lambda row: ('{} {}/{}'.format(row['X Whole Number'], row['X Numerator'], row['X Denominator']), axis=1)
df['X Value'] = df.apply(lambda row: '{:6d} {:2d} / {:2d}'.format(int(row['X Whole Number']), int(row['X Numerator']), int(row['X Denominator'])), axis=1)
df['X Denominator'] = df['X'].apply(lambda X: Fraction(X).denominator)
df['Y 16th Offset'] = df['Y'].apply(lambda Y: abs((Y * 16) - round(Y * 16, 0)))
df['Y Rounded to 16th'] = df['Y'].apply(lambda Y: round(Y * 16, 0) / 16)
df['Y'] = df['Y'].round(args.round)
#df = df[df['Y Denominator'] <= 16]
#df = df[df['X'] == 6]
print(tabulate(df, headers='keys', tablefmt='psql', showindex=False))
| [
11748,
1822,
29572,
198,
6738,
49876,
1330,
376,
7861,
198,
11748,
10688,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
7400,
5039,
1330,
7400,
5039,
198,
198,
2,
18980,
889,
2163,
198,
2,
4... | 2.577263 | 906 |
#!/usr/bin/env python3
# This sample demonstrates how to use query parameters with a REST API
# endpoint.
# For a list of the endpoints that you can use along with the parameters that
# they accept you can view the REST API interactive help page on your
# deployment at https://<hostname>/api_doc
# You can also retrieve a list of available endpoints through the API itself
# at the /api/help/endpoints endpoint.
import sys
import os
import Cleanup
import importlib
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
770,
6291,
15687,
703,
284,
779,
12405,
10007,
351,
257,
30617,
7824,
198,
2,
36123,
13,
198,
198,
2,
1114,
257,
1351,
286,
262,
886,
13033,
326,
345,
460,
779,
1863,
351,
2... | 3.539267 | 191 |
password = {
"password": ""#password here
} | [
28712,
796,
1391,
198,
220,
220,
220,
366,
28712,
1298,
13538,
2,
28712,
994,
198,
92
] | 2.9375 | 16 |
from typing import List, Tuple, Optional
import numpy as np
g = Graph([
[0, 1, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
])
print(g.BFS())
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
32233,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
198,
70,
796,
29681,
26933,
198,
220,
220,
220,
685,
15,
11,
352,
11,
352,
11,
657,
11,
352,
4357,
198,
220,
220,
220,
685,
15,... | 1.827273 | 110 |
"""Provides key, name, description, and location for
cookiecutter apps templates
"""
__all__ = ['COOKIECUTTER_URI', 'DIRECTORY', 'CHECKOUT']
COOKIECUTTER_URI = 'https://github.com/TACC-Cloud/cc-tapis-v2-app.git'
DIRECTORY = 'default'
CHECKOUT = 'master'
| [
37811,
15946,
1460,
1994,
11,
1438,
11,
6764,
11,
290,
4067,
329,
198,
44453,
8968,
353,
6725,
24019,
198,
37811,
198,
198,
834,
439,
834,
796,
37250,
34,
15308,
40,
2943,
3843,
5781,
62,
47269,
3256,
705,
17931,
23988,
15513,
3256,
7... | 2.666667 | 96 |
# Copyright (c) 2012-2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
__doc__ = """
Inference over Gaussian process latent functions
In all our GP models, the consistency propery means that we have a Gaussian
prior over a finite set of points f. This prior is
math:: N(f | 0, K)
where K is the kernel matrix.
We also have a likelihood (see GPy.likelihoods) which defines how the data are
related to the latent function: p(y | f). If the likelihood is also a Gaussian,
the inference over f is tractable (see exact_gaussian_inference.py).
If the likelihood object is something other than Gaussian, then exact inference
is not tractable. We then resort to a Laplace approximation (laplace.py) or
expectation propagation (ep.py).
The inference methods return a
:class:`~GPy.inference.latent_function_inference.posterior.Posterior`
instance, which is a simple
structure which contains a summary of the posterior. The model classes can then
use this posterior object for making predictions, optimizing hyper-parameters,
etc.
"""
from .exact_gaussian_inference import ExactGaussianInference
from .laplace import Laplace,LaplaceBlock
import os, sys
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
sys.path.append(os.environ.get("PROJECT_ROOT"))
sys.path.append(os.path.join(os.environ.get("PROJECT_ROOT"), 'test'))
from GPy_1_0_5.inference.latent_function_inference.var_dtc import VarDTC
from .expectation_propagation import EP, EPDTC
from .dtc import DTC
from .fitc import FITC
from .var_dtc_parallel import VarDTC_minibatch
from .var_gauss import VarGauss
# class FullLatentFunctionData(object):
#
#
# class EMLikeLatentFunctionInference(LatentFunctionInference):
# def update_approximation(self):
# """
# This function gets called when the
# """
#
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
#
# class VariationalLatentFunctionInference(LatentFunctionInference):
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
| [
2,
15069,
357,
66,
8,
2321,
12,
4967,
11,
5436,
1168,
86,
444,
325,
293,
11,
3700,
367,
641,
805,
198,
2,
49962,
739,
262,
347,
10305,
513,
12,
565,
682,
5964,
357,
3826,
38559,
24290,
13,
14116,
8,
198,
198,
834,
15390,
834,
79... | 2.987166 | 935 |
import datetime
import logging
import sys
from random import shuffle
logger = logging.getLogger(__name__)
if __name__ == "__main__":
run_spark(sys.argv)
| [
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
6738,
4738,
1330,
36273,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
83... | 2.810345 | 58 |
from typing import Tuple
from hypothesis import given
from gon.base import Vector
from tests.utils import implication
from . import strategies
@given(strategies.vectors)
@given(strategies.vectors)
@given(strategies.vectors_pairs)
| [
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
14078,
1330,
1813,
198,
198,
6738,
35140,
13,
8692,
1330,
20650,
198,
6738,
5254,
13,
26791,
1330,
26863,
198,
6738,
764,
1330,
10064,
628,
198,
31,
35569,
7,
2536,
2397,
444,
13,
303,
52... | 3.352113 | 71 |
# Generated by Django 3.0.10 on 2021-01-22 22:02
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
940,
319,
33448,
12,
486,
12,
1828,
2534,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.840909 | 44 |
from django.conf.urls.defaults import *
from django.views.generic import DetailView, ListView
from archive.models import Catalogue, Document
urlpatterns = patterns('archive.views',
url(r'^$',
ListView.as_view(
context_object_name="catalogues",
queryset=Catalogue.objects.all(),
template_name='archive/catalogues.html'), name='catalogue-list'),
url(r'^(?P<pk>\d+)$',
DetailView.as_view(
context_object_name="catalogue",
queryset=Catalogue.objects.all(),
template_name='archive/catalogue.html'), name='catalogue-detail'),
url(r'^documents/$',
ListView.as_view(
context_object_name="documents",
queryset=Document.objects.all(),
template_name='archive/documents.html'), name='document-list'),
url(r'^document/(?P<pk>\d+)/$',
DetailView.as_view(
context_object_name="document",
queryset=Document.objects.all(),
template_name='archive/document.html'), name='document-detail'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
1635,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
42585,
7680,
11,
7343,
7680,
198,
198,
6738,
15424,
13,
27530,
1330,
16758,
5119,
11,
16854,
198,
198,
6371,
... | 2.020478 | 586 |
text = input("Text: ")
spam = False
if("make money fast"):
spam = True
print("Spam Detected!")
elif("subscribe this"):
spam = True
print("Spam Detected!")
elif("buy now"):
spam = True
print("Spam Detected!")
elif("purchase now"):
spam = True
print("Spam Detected!")
elif("for free"):
spam = True
print("Spam Detected!")
elif("click this"):
spam = True
print("Spam Detected!")
else:
spam = False
print("Spam Not Found!")
| [
5239,
796,
5128,
7203,
8206,
25,
366,
8,
201,
198,
2777,
321,
796,
10352,
201,
198,
201,
198,
361,
7203,
15883,
1637,
3049,
1,
2599,
201,
198,
220,
220,
220,
18084,
796,
6407,
201,
198,
220,
220,
220,
3601,
7203,
4561,
321,
46497,
... | 2.258929 | 224 |
# Exercício 032
from datetime import date
ano = int(input('Digite um ano qualquer. Digite 0 para analisar o ano ATUAL '))
if ano == 0:
ano = date.today().year
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print(f'O ano de \033[1m{ano}\033[m \033[1;32mé BISSEXTO!\033[m')
else:
print(f'O Ano de \033[1m{ano}\033[m \033[1;32mNÃO BISSEXTO!\033[m')
| [
2,
1475,
2798,
8836,
66,
952,
657,
2624,
198,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
5733,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
281,
78,
4140,
10819,
13,
7367,
578,
657,
31215,
2037,
271,
283,
267,
281,
78,
5161,
2562... | 2.073446 | 177 |