content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from tkinter import Tk, StringVar, Label, Entry, OptionMenu, RIDGE, mainloop
master = Tk()
master.title("Star-Wars-Years")
# Calculate and Return Conversion Formulae Map
# Calculate Year in BBY
# Convert Calendars
# List of Calendars
CalendarList = list(GetFormulae(0).keys())
# Default Calendar
DefaultCalendar = CalendarList[0]
# UserCalendar Class
# Calendar Class
# Purpose: Validate player entered integer, valid equation, or 'x' cancel
# Parameters: User input (string)
# Returns: Converted user input (integer) or raw user input (string), validity (boolean)
# Conversion Table
# Conversion Prompt Label
ConversionPrompt = Label(master, text="Type the year value and select the associated calendar\nto solve for all the calendars.")
ConversionPrompt.grid(row=0, column=0, columnspan=2)
# Conversion User Input
ConversionCalendar = UserCalendar()
ConversionCalendar.yearValue.trace('w', lambda *pargs: ConvertCalendars())
ConversionCalendar.calendarValue.trace('w', lambda *pargs: ConvertCalendars())
ConversionCalendar.yearEntry.focus()
# Calendar Instance List
Calendars = []
# Generate Calendar Instances List
for index, element in enumerate(CalendarList, start=2):
# Start at row 2
Calendars.append(Calendar(CalendarList[index - 2], index))
# Duration Solver
# Duration Prompt Label
DurationPrompt = Label(master, text="Type the year value and select the associated calendar\nfor the start and end to solve for the age.")
DurationPrompt.grid(row=0, column=2, columnspan=2)
# Start User Input
StartLabel = Label(master, text="Start Year:", relief=RIDGE, width=39, height=1)
StartLabel.grid(row=1, column=2, columnspan=2)
StartCalendar = UserCalendar(2, 2)
StartCalendar.yearValue.trace('w', lambda *pargs: CalculateDuration())
StartCalendar.calendarValue.trace('w', lambda *pargs: CalculateDuration())
# End User Input
StartLabel = Label(master, text="End Year:", relief=RIDGE, width=39, height=1)
StartLabel.grid(row=3, column=2, columnspan=2)
EndCalendar = UserCalendar(4, 2)
EndCalendar.yearValue.trace('w', lambda *pargs: CalculateDuration())
EndCalendar.calendarValue.trace('w', lambda *pargs: CalculateDuration())
# Duration Result
DurationLabel = Label(master, text="Resulting Age", relief=RIDGE, width=39, height=1)
DurationLabel.grid(row=5, column=2, columnspan=2)
DurationResult = Calendar("Galactic Standard Years", 6, 2, 17, 19)
mainloop()
| [
6738,
256,
74,
3849,
1330,
309,
74,
11,
10903,
19852,
11,
36052,
11,
21617,
11,
16018,
23381,
11,
371,
2389,
8264,
11,
1388,
26268,
201,
198,
201,
198,
9866,
796,
309,
74,
3419,
201,
198,
9866,
13,
7839,
7203,
8248,
12,
41508,
12,
... | 3.0369 | 813 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from docutils import nodes
from sphinx.util.compat import Directive, make_admonition
DANGER_MESSAGE = """
This is a "Hazardous Materials" module. You should **ONLY** use it if you're
100% absolutely sure that you know what you're doing because this module is
full of land mines, dragons, and dinosaurs with laser guns.
"""
DANGER_ALTERNATE = """
You may instead be interested in :doc:`{alternate}`.
"""
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.625442 | 283 |
import json
import os
import pickle
import sys
import cv2
import numpy as np
this_filepath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(this_filepath, '../src/detectron2/projects/DensePose/'))
from densepose import add_densepose_config, add_hrnet_config
from densepose.data.structures import DensePoseResult
from densepose.vis.base import CompoundVisualizer
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
from densepose.vis.densepose import (DensePoseResultsContourVisualizer,
DensePoseResultsFineSegmentationVisualizer,
DensePoseResultsUVisualizer, DensePoseResultsVVisualizer)
from densepose.vis.extractor import CompoundExtractor, create_extractor
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from src.dataset.UFOP import UFOPDataset
from src.utils.image import (create_context, recover_original_mask_size, select_body_parts)
from tqdm import tqdm
# IN THE UFOP-DATASET WE HAVE TO BE A LITTLE MORE CAREFULL
# AS THE GESTURE IS PERFORMED MORE THAN ONCE IN EACH VIDEO
# return annotations
if __name__ == "__main__":
main()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
25064,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
5661,
62,
7753,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
... | 2.691964 | 448 |
from typing import Tuple, List, Dict, Any, NamedTuple
E = NamedTuple('E', [('input_name', str),
('key', str)]
)
AdaptingRecipe = Any
Results = Dict[str, Any]
AllInputs = Dict[str, Any]
| [
6738,
19720,
1330,
309,
29291,
11,
7343,
11,
360,
713,
11,
4377,
11,
34441,
51,
29291,
198,
198,
36,
796,
34441,
51,
29291,
10786,
36,
3256,
685,
10786,
15414,
62,
3672,
3256,
965,
828,
198,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.102804 | 107 |
from unittest.mock import call
from commands.command import Command
from handlers.handler import Handler
ONCE_HANDLED_DATA = 'ONCE_HANDLED_DATA'
HANDLED_DATA = None
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
869,
198,
198,
6738,
9729,
13,
21812,
1330,
9455,
198,
6738,
32847,
13,
30281,
1330,
32412,
198,
198,
1340,
5222,
62,
39,
6981,
30465,
62,
26947,
796,
705,
1340,
5222,
62,
39,
6981,
30465,
62,... | 3.053571 | 56 |
# Import Python Packages
import os
import spacy
import streamlit as st
from sumy.nlp.tokenizers import Tokenizer
from sumy.parsers.plaintext import PlaintextParser
from sumy.summarizers.lex_rank import LexRankSummarizer
from textblob import TextBlob
from gensim.summarization import summarize
# Function to Perform Sumy Summarization
# Function to Extract Token and Lemma
@st.cache
# Function to Extract Named Entities
@st.cache
# Main Function - WebApp
def main():
"""Process Your Text With NLP"""
# Title and Sub-Header
st.title("PandoraNLP")
st.subheader("Perform NLP on Your Text\nSelect the NLP Operation you Want to Perform\n")
# Text Tokenization and Lemma Display
if st.checkbox("Display Tokens and Lemma"):
st.subheader("Tokenize your Text and Extract Lemma")
input = st.text_area("Enter Your Text","Enter the text to be tokenized")
if st.button("Analyze Text"):
tokenize = tokenAndLemma(input)
st.json(tokenize)
# Entity Extraction
if st.checkbox("Extract Entities from Text"):
st.subheader("Analyze the Text and Extract Entities")
input = st.text_area("Enter Your Text","Enter the text to be analyzed")
if st.button("Extract Entities"):
entity = namedEntity(input)
st.json(entity)
# Sentiment Analysis
if st.checkbox("Sentiment Analysis"):
st.subheader("Analyze the Underlying Sentiment ")
input = st.text_area("Enter Your Text","Enter the text to be analyzed")
if st.button("Extract Sentiment"):
blob = TextBlob(input)
sentiment = blob.sentiment
st.success(sentiment)
# Text Summarization
if st.checkbox("Summarize the Text"):
st.subheader("Create a short and Descriptive Summary")
input = st.text_area("Enter Your Text","Enter the text to be summarized")
summarizer = st.selectbox("Choose Summarizer",['Sumy','Gensim'])
if st.button("Summarize Text"):
# Using Sumy Summarizer
if summarizer == 'sumy':
st.text("You have Selected Sumy Summarizer")
result = sumySummarizer(input)
# Using Gensim Summarizer
elif summarizer == 'gensim':
st.text("You have Selected Sumy Summarizer")
result = gensim(input)
# Default Summarizer, When No Choice is Made
else:
result = sumySummarizer(input)
st.success(result)
st.sidebar.subheader("PandoraNLP")
st.sidebar.text("Perform NLP tasks using Spacy, Textblob, Sumy and Gensim Module")
st.sidebar.subheader("Developed By")
st.sidebar.text("Ashwin Raj")
if __name__ == '__main__':
main()
| [
2,
17267,
11361,
6400,
1095,
198,
198,
11748,
28686,
198,
11748,
599,
1590,
198,
11748,
4269,
18250,
355,
336,
198,
198,
6738,
2160,
88,
13,
21283,
79,
13,
30001,
11341,
1330,
29130,
7509,
198,
6738,
2160,
88,
13,
79,
945,
364,
13,
... | 2.793953 | 893 |
# -*- coding: utf-8 -*-
'''
File name: code\nim\sol_301.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #301 :: Nim
#
# For more information see:
# https://projecteuler.net/problem=301
# Problem Statement
'''
Nim is a game played with heaps of stones, where two players take it in turn to remove any number of stones from any heap until no stones remain.
We'll consider the three-heap normal-play version of Nim, which works as follows:
- At the start of the game there are three heaps of stones.
- On his turn the player removes any positive number of stones from any single heap.
- The first player unable to move (because no stones remain) loses.
If (n1,n2,n3) indicates a Nim position consisting of heaps of size n1, n2 and n3 then there is a simple function X(n1,n2,n3) — that you may look up or attempt to deduce for yourself — that returns:
zero if, with perfect strategy, the player about to move will eventually lose; or
non-zero if, with perfect strategy, the player about to move will eventually win.For example X(1,2,3) = 0 because, no matter what the current player does, his opponent can respond with a move that leaves two heaps of equal size, at which point every move by the current player can be mirrored by his opponent until no stones remain; so the current player loses. To illustrate:
- current player moves to (1,2,1)
- opponent moves to (1,0,1)
- current player moves to (0,0,1)
- opponent moves to (0,0,0), and so wins.
For how many positive integers n ≤ 230 does X(n,2n,3n) = 0 ?
'''
# Solution
# Solution Approach
'''
'''
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
9220,
1438,
25,
2438,
59,
77,
320,
59,
34453,
62,
18938,
13,
9078,
198,
220,
220,
220,
6434,
25,
569,
1698,
291,
8518,
72,
198,
2... | 3.335366 | 492 |
# -*- coding: utf-8 -*-
"""
sphinx.util.typing
~~~~~~~~~~~~~~~~~~
The composit types for Sphinx.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import Callable, Dict, List, Tuple
from docutils import nodes
from docutils.parsers.rst.states import Inliner
from six import text_type
# common role functions
RoleFunction = Callable[[text_type, text_type, text_type, int, Inliner, Dict, List[text_type]],
Tuple[List[nodes.Node], List[nodes.Node]]]
# title getter functions for enumerable nodes (see sphinx.domains.std)
TitleGetter = Callable[[nodes.Node], text_type]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
599,
20079,
87,
13,
22602,
13,
774,
13886,
198,
220,
220,
220,
220,
27156,
4907,
628,
220,
220,
220,
383,
552,
7434,
3858,
329,
45368,
28413... | 2.715415 | 253 |
import json
from OpenSSL import crypto
import logging
import jose
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from flask import Flask, request
from utils import dec_b64
from base64 import b64decode
app = Flask(__name__)
app.logger.setLevel(logging.DEBUG)
GSR_ROOT = """-----BEGIN CERTIFICATE-----
MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----"""
@app.route('/safetynet/validate', methods=['POST'])
| [
11748,
33918,
198,
6738,
4946,
31127,
1330,
21473,
198,
11748,
18931,
198,
11748,
474,
577,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
1330,
46621,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
13,
4107,
3020,
194... | 1.547788 | 1,130 |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for multigrid environments."""
import gym
import numpy as np
import tensorflow as tf
from tf_agents.train import actor
from tf_agents.utils import common
from social_rl.multiagent_tfagents.joint_attention import drivers
class LSTMStateWrapper(gym.ObservationWrapper):
"""Wrapper to add LSTM state to observation dicts."""
class StateActor(actor.Actor):
"""An Actor that adds uses the StatePyDriver."""
def __init__(self,
*args,
steps_per_run=None,
episodes_per_run=None,
**kwargs):
"""Initializes a StateActor.
Args:
*args: See superclass.
steps_per_run: Number of steps evaluated per run call.
episodes_per_run: Number of episodes evaluated per run call.
**kwargs: See superclass.
"""
super(StateActor, self).__init__(*args,
steps_per_run,
episodes_per_run,
**kwargs)
self._driver = drivers.StatePyDriver(
self._env,
self._policy,
self._observers,
max_steps=steps_per_run,
max_episodes=episodes_per_run)
self.reset()
def write_metric_summaries(self):
"""Generates scalar summaries for the actor metrics."""
super().write_metric_summaries()
if self._metrics is None:
return
with self._summary_writer.as_default(), \
common.soft_device_placement(), \
tf.summary.record_if(lambda: True):
# Generate summaries against the train_step
for m in self._metrics:
tag = m.name
if 'Multiagent' in tag:
for a in range(m.n_agents):
tf.compat.v2.summary.scalar(name=tag + '_agent' + str(a),
data=m.result_for_agent(a),
step=self._train_step)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.389313 | 1,048 |
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from analizer_pl.modules import code
import analizer_pl.modules.expressions as expression
from analizer_pl.abstract.expression import incTemp
from analizer_pl.abstract.expression import newTemp
from analizer_pl.abstract.expression import TYPE
from optimizer_folder.optimizer import Optimizer
from analizer_pl.tokens import *
import ply.lex as lex
import ply.yacc as yacc
from analizer_pl.C3D.operations.BackFill import BackFill
from analizer_pl.reports.Nodo import Nodo
from analizer_pl.reports import AST
from analizer_pl.sql_statement.select import select
ast = AST.AST()
# Construccion del analizador léxico
current_etiq = 0
next_etiq = 0
if_stmt = 0
back_fill = BackFill()
optimizer_ = Optimizer()
lexer = lex.lex()
# Asociación de operadores y precedencia
listInst = []
repGrammar = []
precedence = (
("left", "R_UNION", "R_INTERSECT", "R_EXCEPT"),
("right", "R_NOT"),
("left", "R_AND", "R_OR"),
(
"left",
"R_BETWEEN",
"R_IS",
),
(
"left",
"S_IGUAL",
"OL_DISTINTODE",
"OL_MAYORQUE",
"OL_MENORQUE",
"OL_MAYORIGUALQUE",
"OL_MENORIGUALQUE",
),
("left", "OC_CONCATENAR"),
("left", "O_SUMA", "O_RESTA"),
("left", "O_PRODUCTO", "O_DIVISION", "O_MODULAR"),
("right", "UO_SUMA", "UO_RESTA"),
("left", "O_EXPONENTE"),
)
# Definición de la gramática
isBlock = False
# region PL/SQL
def p_init(t):
"""
init : istructionList
"""
t[0] = t[1]
def p_instruction_list(t):
"""istructionList : istructionList instruction"""
t[1].append(t[2])
t[0] = t[1]
repGrammar.append(t.slice)
def p_instruction_u(t):
"""istructionList : instruction"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_instruction(t):
"""
instruction : stmt
| block
| execute S_PUNTOCOMA
| drop_func
"""
try:
if t[1].dot():
listInst.append(t[1].dot())
except Exception as e:
print(e)
t[0] = t[1]
repGrammar.append(t.slice)
def p_block(t):
"""
block : function_stmt isblock_ R_AS S_DOLAR S_DOLAR declaration_stmt R_BEGIN block_stmts exception_stmts R_END label S_PUNTOCOMA S_DOLAR S_DOLAR language_function_1
"""
t[0] = code.Block(t[1], t[6], t[8], t[9], t[11], t[1].row, t[1].column)
global isBlock
isBlock = False
repGrammar.append(t.slice)
def p_isblock_(t):
"""
isblock_ : language_function
"""
global isBlock
isBlock = True
def p_isblock_f(t):
"""
isblock_f :
"""
global isBlock
isBlock = False
# endregion
# region function
def p_function_stmt(t):
"""
function_stmt : R_CREATE orReplace R_FUNCTION ID function_opt
| R_CREATE orReplace R_PROCEDURE ID procedure_opt
"""
t[0] = code.FunctionDeclaration(
t[3], t[4], t[5][0], t[5][1], t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_function_opt_param(t):
"""
function_opt : S_PARIZQ params_function S_PARDER returns_function
"""
t[0] = [t[2], t[4]]
repGrammar.append(t.slice)
def p_procedure_opt_param(t):
"""
procedure_opt : S_PARIZQ params_function S_PARDER
"""
t[0] = [t[2], None]
repGrammar.append(t.slice)
def p_procedure_opt(t):
"""
procedure_opt : S_PARIZQ S_PARDER
"""
t[0] = [None, None]
repGrammar.append(t.slice)
def p_function_opt(t):
"""
function_opt : S_PARIZQ S_PARDER returns_function
"""
t[0] = [None, t[3]]
repGrammar.append(t.slice)
def p_params_function(t):
"""
params_function : params_function S_COMA param_function
"""
t[1].append(t[3])
t[0] = t[1]
repGrammar.append(t.slice)
def p_params_function_u(t):
"""
params_function : param_function
"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_param_function_id(t):
"""
param_function : ID types_d
"""
t[0] = code.Declaration(t[1], t[2], None, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_returns_function(t):
"""
returns_function : R_RETURNS types_d
"""
t[0] = t[2]
repGrammar.append(t.slice)
def p_returns_function_n(t):
"""
returns_function :
"""
t[0] = None
repGrammar.append(t.slice)
def p_language_function(t):
"""
language_function : R_LANGUAGE R_PLPGSQL
|
"""
repGrammar.append(t.slice)
def p_language_function_1(t):
"""
language_function_1 : language_function S_PUNTOCOMA
|
"""
repGrammar.append(t.slice)
# endregion
# region declaration
def p_declaration_stmt(t):
"""
declaration_stmt : R_DECLARE global_variable_declaration
"""
t[0] = t[2]
repGrammar.append(t.slice)
def p_declaration_stmt_n(t):
"""
declaration_stmt :
"""
t[0] = []
repGrammar.append(t.slice)
'''
def p_declaration_list(t):
"""
declaration_list : declaration_list R_DECLARE global_variable_declaration
"""
t[1].append(t[3])
t[0] = t[1]
repGrammar.append(t.slice)
def p_declaration_list_u(t):
"""
declaration_list : R_DECLARE global_variable_declaration
"""
t[0] = [t[1]]
repGrammar.append(t.slice)
'''
def p_global_variable_declaration(t):
"""
global_variable_declaration : global_variable_declaration declaration
"""
t[1].append(t[2])
t[0] = t[1]
repGrammar.append(t.slice)
def p_global_variable_declaration_1(t):
"""
global_variable_declaration : declaration
"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_global_vd(t):
"""
declaration : ID R_RECORD S_PUNTOCOMA
| ID R_ALIAS R_FOR ID S_PUNTOCOMA
"""
repGrammar.append(t.slice)
def p_global_vd_assignment(t):
"""
declaration : ID constant types_d assignment S_PUNTOCOMA
"""
ass = None
if t[4]:
ass = code.Assignment(t[1], t[4], t.slice[1].lineno, t.slice[1].lexpos)
t[0] = code.Declaration(t[1], t[3], ass, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_constant(t):
"""
constant : R_CONSTANT
|
"""
repGrammar.append(t.slice)
def p_assignment(t):
"""
assignment : assignment_operator_D expresion
"""
t[0] = t[2]
repGrammar.append(t.slice)
def p_assignment_none(t):
"""
assignment :
"""
t[0] = None
repGrammar.append(t.slice)
def p_assignment_operator_D(t):
"""
assignment_operator_D : R_DEFAULT
| O_ASIGNACION
| OL_ESIGUAL
"""
repGrammar.append(t.slice)
def p_label(t):
"""label : ID
|
"""
repGrammar.append(t.slice)
# endregion
# region typedeclaration
def p_types_d(t):
"""
types_d : ID
"""
t[0] = TYPE.TYPE
repGrammar.append(t.slice)
def p_types_d_simple_num(t):
"""
types_d : T_SMALLINT
| T_INTEGER
| T_BIGINT
| T_REAL
| T_DOUBLE T_PRECISION
| T_MONEY
"""
t[0] = TYPE.NUMBER
repGrammar.append(t.slice)
def p_types_d_simple_str(t):
"""
types_d : T_TEXT
| R_TIMESTAMP
| T_DATE
| T_TIME
"""
t[0] = TYPE.STRING
repGrammar.append(t.slice)
def p_types_d_simple_bool(t):
"""
types_d : T_BOOLEAN
"""
t[0] = TYPE.BOOLEAN
repGrammar.append(t.slice)
def p_types_d_params_num(t):
"""
types_d : T_DECIMAL optParams
| T_NUMERIC optParams
"""
t[0] = TYPE.NUMBER
repGrammar.append(t.slice)
def p_types_d_params_str(t):
"""
types_d : T_VARCHAR optParams
| T_CHARACTER optParams
| T_CHAR optParams
"""
t[0] = TYPE.STRING
repGrammar.append(t.slice)
def p_typesvar(t):
"""
types_d : T_CHARACTER T_VARYING optParams
"""
t[0] = TYPE.STRING
repGrammar.append(t.slice)
# endregion
# region block stmts
def p_block_stmts(t):
"""
block_stmts : block_stmts block_stmt
"""
t[1].append(t[2])
t[0] = t[1]
def p_block_stmts_u(t):
"""
block_stmts : block_stmt
"""
t[0] = [t[1]]
def p_block_stmt(t):
"""
block_stmt : local_variable_declaration
| statement
| stmt
"""
t[0] = t[1]
repGrammar.append(t.slice)
# endregion
# region local variable declaration
def p_local_variable_declaration(t):
"""
local_variable_declaration : ID assignment_operator expresion S_PUNTOCOMA
"""
t[0] = code.Assignment(t[1], t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_assignment_operator(t):
"""
assignment_operator : O_ASIGNACION
| S_IGUAL
"""
repGrammar.append(t.slice)
# endregion
# region Control Structures
def p_statement(t):
"""
statement : if_stmt
| case_stmt
| stmt_without_substmt
| drop_func
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_drop_func(t):
"""
drop_func : R_DROP R_FUNCTION ID S_PUNTOCOMA
| R_DROP R_PROCEDURE ID S_PUNTOCOMA
"""
t[0] = code.DropFunction(t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_stmt_without_substmt(t):
"""
stmt_without_substmt : R_NULL S_PUNTOCOMA
| query_single_row
"""
def p_stmt_without_substmt_rtn(t):
"""
stmt_without_substmt : R_RETURN return_stmt
"""
t[0] = code.Return(t[2], t.slice[1].lineno, t.slice[1].lexpos)
# endregion
# region CONDITIONALS IF, CASE
# IF
def p_if_stmt(t):
"""if_stmt : R_IF expBool R_THEN block_stmts elseif_stmts_opt else_stmt_opt R_END R_IF S_PUNTOCOMA"""
t[0] = code.IfStatement(
t.slice[1].lineno, t.slice[1].lexpos, t[2], t[5], t[6], t[4]
)
repGrammar.append(t.slice)
# expBool contiene el C3D de la expresion
def p_elseif_stmts_opt(t):
"""
elseif_stmts_opt : elseif_stmts
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_elseif_stmts_opt_1(t):
"""
elseif_stmts_opt :
"""
t[0] = []
repGrammar.append(t.slice)
def p_elseif_stmts(t):
"""
elseif_stmts : elseif_stmts elseif_stmt
"""
t[1].append(t[2])
t[0] = t[1]
repGrammar.append(t.slice)
def p_elseif_stmts_1(t):
"""
elseif_stmts : elseif_stmt
"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_elseif_stmt(t):
"""elseif_stmt : R_ELSEIF expBool R_THEN block_stmts"""
t[0] = code.ElseIfStatement(t.slice[1].lineno, t.slice[1].lexpos, t[2], t[4])
# expBool contiene el C3D de la expresion
repGrammar.append(t.slice)
def p_else_stmt_opt(t):
"""
else_stmt_opt : R_ELSE block_stmts
"""
t[0] = code.ElseStatement(t.slice[1].lineno, t.slice[1].lexpos, t[2])
repGrammar.append(t.slice)
def p_else_stmt_opt_1(t):
"""
else_stmt_opt :
"""
t[0] = None
repGrammar.append(t.slice)
# endregion
# region CASE
def p_case_stmt(t):
"""
case_stmt : case_stmt_n
| case_stmt_bool
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_case_stmt_n(t):
"""case_stmt_n : R_CASE ID R_WHEN list_expression R_THEN block_stmts else_case_stmt_n_opt else_stmt_opt R_END R_CASE S_PUNTOCOMA"""
repGrammar.append(t.slice)
def p_else_case_stmt_n_opt(t):
"""
else_case_stmt_n_opt : else_case_stmt_n
|
"""
repGrammar.append(t.slice)
def p_else_case_stmt_n(t):
"""
else_case_stmt_n : else_case_stmt_n R_WHEN list_expression R_THEN block_stmts
| R_WHEN list_expression R_THEN block_stmts
"""
repGrammar.append(t.slice)
def p_case_stmt_bool(t):
"""case_stmt_bool : R_CASE R_WHEN expBool R_THEN block_stmts else_case_stmt_bool_opt else_stmt_opt R_END R_CASE S_PUNTOCOMA"""
t[0] = code.Case(t[3], t[5], t[6], t[7], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_else_case_stmt_bool_opt(t):
"""
else_case_stmt_bool_opt : else_case_stmt_bool
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_else_case_stmt_bool_opt_none(t):
"""
else_case_stmt_bool_opt :
"""
t[0] = None
repGrammar.append(t.slice)
def p_else_case_stmt_bool(t):
"""
else_case_stmt_bool : else_case_stmt_bool R_WHEN expBool R_THEN block_stmts
"""
t[1].append([t[3], t[5]])
t[0] = t[1]
# expBool contiene el C3D de la expresion
# t[1].append(t[3])
# t[0] = t[1]
repGrammar.append(t.slice)
def p_else_case_stmt_bool_u(t):
"""
else_case_stmt_bool : R_WHEN expBool R_THEN block_stmts
"""
t[0] = [[t[2], t[4]]]
# expBool contiene el C3D de la expresion
# t[0] = [t[2]]
repGrammar.append(t.slice)
def p_list_expression(t):
"""
list_expression : exp1
| list_expression S_COMA exp1
"""
repGrammar.append(t.slice)
def p_exp1(t):
"""
exp1 : INTEGER
| STRING
| DECIMAL
| CHARACTER
| R_TRUE
| R_FALSE
"""
repGrammar.append(t.slice)
# endregion
# region return
# TODO: isblock False
def p_return_stmt(t):
"""
return_stmt : S_PUNTOCOMA
"""
t[0] = None
repGrammar.append(t.slice)
def p_return_stmt_exp(t):
"""
return_stmt : expresion S_PUNTOCOMA
"""
t[0] = t[1]
repGrammar.append(t.slice)
# endregion
# region EXECUTE
def p_execute(t):
"""execute : R_EXECUTE isblock_ funcCall isblock_f into_strict"""
t[0] = code.Execute_(t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_into_strict(t):
"""
into_strict : R_INTO strict ID
|
"""
repGrammar.append(t.slice)
# endregion
# region query single row
def p_query_single_row(t):
"""
query_single_row : insertStmt_SR S_PUNTOCOMA
| updateStmt_SR S_PUNTOCOMA
| deleteStmt_SR S_PUNTOCOMA
| selectStmt_SR S_PUNTOCOMA
| perform S_PUNTOCOMA
| execute S_PUNTOCOMA
| get S_PUNTOCOMA
"""
repGrammar.append(t.slice)
# insert
# TODO: isBlock
def p_insertStmt_single_row(t):
"""insertStmt_SR : R_INSERT isblock_f R_INTO ID paramsColumn R_VALUES S_PARIZQ paramsList S_PARDER R_RETURNING returnParams R_INTO strict ID """
repGrammar.append(t.slice)
# update
def p_updateStmt_single_row(t):
"""
updateStmt_SR : R_UPDATE isblock_f fromBody R_SET updateCols whereCl R_RETURNING returnParams R_INTO strict ID
"""
repGrammar.append(t.slice)
# delete
def p_deleteStmt_single_row(t):
"""
deleteStmt_SR : R_DELETE isblock_f fromCl whereCl R_RETURNING returnParams R_INTO strict ID
"""
repGrammar.append(t.slice)
# select
def p_selectStmt_single_row_1(t):
"""
selectStmt_SR : R_SELECT isblock_f R_DISTINCT selectParams R_INTO strict ID fromCl whereCl groupByCl limitCl
"""
repGrammar.append(t.slice)
def p_selectStmt_single_row_2(t):
"""
selectStmt_SR : R_SELECT isblock_f selectParams R_INTO strict ID fromCl whereCl groupByCl limitCl
"""
repGrammar.append(t.slice)
def p_selectStmt_union_single_row(t):
"""selectStmt_SR : selectStmt_SR R_UNION allOpt selectStmt_SR"""
repGrammar.append(t.slice)
def p_selectStmt_intersect_single_row(t):
"""selectStmt_SR : selectStmt_SR R_INTERSECT allOpt selectStmt_SR"""
repGrammar.append(t.slice)
def p_selectStmt_except_single_row(t):
"""selectStmt_SR : selectStmt_SR R_EXCEPT allOpt selectStmt_SR"""
repGrammar.append(t.slice)
def p_selectStmt_agrupacion_single_row(t):
"""selectStmt_SR : S_PARIZQ selectStmt_SR S_PARDER"""
repGrammar.append(t.slice)
def p_selectstmt_only_params_single_row(t):
"""selectStmt_SR : R_SELECT isblock_f selectParams R_INTO strict ID"""
repGrammar.append(t.slice)
# endregion
# region perform
def p_perform(t):
"""perform : R_PERFORM STRING """
# endregion
# region GET
def p_get(t):
"""get : R_GET current_ R_DIAGNOSTIC ID assignment_operator item """
def p_current_g(t):
"""
current_ : R_CURRENT
|
"""
def p_item(t):
"""item : R_ROW_COUNT"""
# endregion
# region strict
def p_strict(t):
"""
strict : R_STRICT
|
"""
# endregion
# region returnParams
def p_returnparams_all(t):
"""returnParams : O_PRODUCTO"""
def p_returnparams_params(t):
"""returnParams : returnlist"""
# TODO: isBlock optAlias
# En caso de errores cambiar returnlistParams -> expresion
def p_returnlist_list(t):
"""returnlist : returnlist S_COMA returnlistParams optAlias"""
# En caso de errores cambiar returnlistParams -> expresion
def p_returnlist_u(t):
"""returnlist : returnlistParams optAlias"""
def p_returnlistParams_1(t):
"""returnlistParams : expresion"""
def p_returnlistParams_2(t):
"""returnlistParams : ID S_PUNTO O_PRODUCTO"""
# endregion
# region EXCEPTION
def p_exception_stmts(t):
"""
exception_stmts : R_EXCEPTION when_stmt
|
"""
def p_while_stmt_exp(t):
"""
when_stmt : R_WHEN expBoolOR R_THEN handler_statements_opt
| when_stmt R_WHEN expBoolOR R_THEN handler_statements_opt
"""
def p_expBoolOR(t):
"""
expBoolOR : expBoolOR OC_OR expBoolExcept
"""
def p_expBoolOR_u(t):
"""
expBoolOR : expBoolExcept
"""
def p_expBoolExcept(t):
"""
expBoolExcept : ID
| R_SQLSTATE STRING
| R_OTHERS
"""
def p_handler_statements_opt(t):
"""
handler_statements_opt : handler_statements
|
"""
def p_handler_statements(t):
"""
handler_statements : handler_statements handler_statement
| handler_statement
"""
def p_handler_statement(t):
"""
handler_statement : R_RAISE R_NOTICE STRING S_PUNTOCOMA
| R_RAISE R_EXCEPTION STRING S_PUNTOCOMA
| R_RETURN return_stmt
| R_NULL S_PUNTOCOMA
"""
# endregion
# region Fase 1
def p_stmt(t):
"""
stmt : createStmt S_PUNTOCOMA
| showStmt S_PUNTOCOMA
| alterStmt S_PUNTOCOMA
| dropStmt S_PUNTOCOMA
| insertStmt S_PUNTOCOMA
| updateStmt S_PUNTOCOMA
| deleteStmt S_PUNTOCOMA
| truncateStmt S_PUNTOCOMA
| useStmt S_PUNTOCOMA
| selectStmt S_PUNTOCOMA
"""
t[0] = t[1]
global isBlock
isBlock = True
repGrammar.append(t.slice)
# Statement para el CREATE
# region CREATE
def p_id_string(t):
"""
idOrString : ID
| STRING
| CHARACTER
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_createstmt(t):
"""createStmt : R_CREATE createBody"""
t[0] = t[2]
repGrammar.append(t.slice)
def p_createbody(t):
"""
createBody : createOpts
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_createopts_table(t):
"""createOpts : R_TABLE ifNotExists idOrString S_PARIZQ createTableList S_PARDER inheritsOpt """
t[0] = code.CreateTable(
t[2], t[3], t[7], t.slice[1].lineno, t.slice[1].lexpos, t[5]
)
repGrammar.append(t.slice)
def p_createopts_db(t):
"""
createOpts : orReplace R_DATABASE ifNotExists idOrString createOwner createMode
"""
t[0] = code.CreateDatabase(
t[1], t[3], t[4], t[5], t[6], t.slice[2].lineno, t.slice[2].lexpos
)
repGrammar.append(t.slice)
# TODO: hacer el where
def p_createopts_index(t):
"""
createOpts : indexUnique R_INDEX indexName R_ON ID usingMethod S_PARIZQ indexList S_PARDER whereCl
"""
t[0] = code.CreateIndex(
t[1],
t[3],
t.slice[5].value,
t[6],
"",
t.slice[2].lineno,
t.slice[2].lexpos,
t[8],
)
repGrammar.append(t.slice)
def p_indexName(t):
"""
indexName : ID
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_indexName_n(t):
"""
indexName :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_indexList(t):
"""
indexList : indexList S_COMA columnIndex
"""
t[1] += ", " + t[3]
t[0] = t[1]
repGrammar.append(t.slice)
def p_indexList2(t):
"""
indexList : columnIndex
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_columnIndex(t):
"""
columnIndex : columnOpt indexOrder indexNull
"""
t[0] = t[1]
if t[2] != "":
t[0] += " " + t[2]
if t[3] != "":
t[0] += " " + t[3]
repGrammar.append(t.slice)
def p_index_columnOpt(t):
"""
columnOpt : ID
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_index_functionIndex(t):
"""
columnOpt : ID S_PARIZQ ID S_PARDER
"""
t[0] = t.slice[1].value + "(" + t.slice[3].value + ")"
repGrammar.append(t.slice)
def p_index_agrupacion(t):
"""
columnOpt : S_PARIZQ columnOpt S_PARDER
"""
t[0] = "(" + t[2] + ")"
repGrammar.append(t.slice)
def p_usingMethod(t):
"""
usingMethod : R_USING R_HASH
| R_USING R_BTREE
| R_USING R_GIST
| R_USING R_SPGIST
| R_USING R_GIN
| R_USING R_BRIN
"""
t[0] = t.slice[1].value + " " + t.slice[2].value
repGrammar.append(t.slice)
def p_usingMethod_none(t):
"""
usingMethod :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_indexOrder(t):
"""
indexOrder : R_DESC
| R_ASC
|
"""
if len(t) == 1:
t[0] = ""
else:
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_indexNull(t):
"""
indexNull : R_NULLS firstLast
|
"""
if len(t) == 1:
t[0] = ""
else:
t[0] = t.slice[1].value + " " + t[2]
repGrammar.append(t.slice)
def p_indexFirstLast(t):
"""
firstLast : R_FIRST
| R_LAST
|
"""
if len(t) == 1:
t[0] = ""
else:
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_createindex_unique(t):
"""
indexUnique : R_UNIQUE
|
"""
if len(t) == 1:
t[0] = ""
else:
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_replace_true(t):
"""
orReplace : R_OR R_REPLACE
"""
t[0] = "OR REPLACE"
repGrammar.append(t.slice)
def p_replace_false(t):
"""
orReplace :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_createopts_type(t):
"""
createOpts : R_TYPE ifNotExists ID R_AS R_ENUM S_PARIZQ paramsList S_PARDER
"""
t[0] = code.CreateType(t[2], t[3], t.slice[1].lineno, t.slice[1].lexpos, t[7])
repGrammar.append(t.slice)
def p_ifnotexists_true(t):
"""
ifNotExists : R_IF R_NOT R_EXISTS
"""
t[0] = "IF NOT EXISTS"
repGrammar.append(t.slice)
def p_ifnotexists_false(t):
"""
ifNotExists :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_inheritsOpt(t):
"""
inheritsOpt : R_INHERITS S_PARIZQ ID S_PARDER
"""
t[0] = t.slice[1].value
t[0] += t.slice[2].value
t[0] += t.slice[3].value
t[0] += t.slice[4].value
repGrammar.append(t.slice)
def p_inheritsOpt_none(t):
"""
inheritsOpt :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_createowner(t):
"""
createOwner : R_OWNER ID
| R_OWNER STRING
"""
t[0] = t.slice[1].value + " " + t.slice[2].value
repGrammar.append(t.slice)
def p_createowner_asg(t):
"""
createOwner : R_OWNER S_IGUAL ID
| R_OWNER S_IGUAL STRING
"""
t[0] = "OWNER = " + t.slice[3].value
repGrammar.append(t.slice)
def p_createowner_none(t):
"""
createOwner :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_createmode(t):
"""
createMode : R_MODE INTEGER
"""
t[0] = "MODE " + str(t.slice[2].value)
repGrammar.append(t.slice)
def p_createMode_asg(t):
"""
createMode : R_MODE S_IGUAL INTEGER
"""
t[0] = "MODE = " + str(t.slice[3].value)
repGrammar.append(t.slice)
def p_createmode_none(t):
"""
createMode :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_createtable_list(t):
"""createTableList : createTableList S_COMA createTable"""
t[1] += ", " + t[3]
t[0] = t[1]
repGrammar.append(t.slice)
def p_createtable_u(t):
"""createTableList : createTable"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_createTable_id(t):
"""
createTable : ID types createColumns
"""
t[0] = t.slice[1].value + " " + t[2] + " " + t[3]
repGrammar.append(t.slice)
def p_createTable(t):
"""
createTable : createConstraint
| createUnique
| createPrimary
| createForeign
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_createColumNs(t):
"""
createColumns : colOptionsList
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_createColumNs_none(t):
"""
createColumns :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_createConstraint(t):
"""createConstraint : constrName R_CHECK S_PARIZQ booleanCheck S_PARDER"""
t[0] = t[1] + " CHECK (" + t[4] + ")"
repGrammar.append(t.slice)
def p_createUnique(t):
"""createUnique : R_UNIQUE S_PARIZQ idList S_PARDER"""
t[0] = "UNIQUE (" + t[3] + ")"
repGrammar.append(t.slice)
def p_createPrimary(t):
"""createPrimary : R_PRIMARY R_KEY S_PARIZQ idList S_PARDER"""
t[0] = "PRIMARY KEY (" + t[4] + ")"
repGrammar.append(t.slice)
def p_createForeign(t):
"""
createForeign : constrName R_FOREIGN R_KEY S_PARIZQ idList S_PARDER R_REFERENCES ID S_PARIZQ idList S_PARDER
"""
t[0] = (
t[1]
+ " FOREIGN KEY ("
+ t[5]
+ ") REFERENCES "
+ t.slice[8].value
+ " ("
+ t[10]
+ ")"
)
repGrammar.append(t.slice)
def p_constrName(t):
"""
constrName : R_CONSTRAINT ID
"""
t[0] = "CONSTRAINT " + t.slice[2].value
repGrammar.append(t.slice)
def p_constrName_none(t):
"""
constrName :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_id_list(t):
"""idList : idList S_COMA ID"""
t[1] += ", " + t.slice[3].value
t[0] = t[1]
repGrammar.append(t.slice)
def p_id_u(t):
"""idList : ID"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_types(t):
"""
types : T_DOUBLE T_PRECISION
"""
t[0] = "DOUBLE PRECISION"
repGrammar.append(t.slice)
def p_types_simple(t):
"""
types : T_SMALLINT
| T_INTEGER
| T_BIGINT
| T_REAL
| ID
| T_MONEY
| T_TEXT
| T_BOOLEAN
| R_TIMESTAMP
| T_DATE
| T_TIME
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
# TODO: Cambiar el optParams
def p_types_params(t):
"""
types : T_DECIMAL optParams
| T_NUMERIC optParams
| T_VARCHAR optParams
| T_CHARACTER optParams
| T_CHAR optParams
"""
t[0] = t.slice[1].value + t[2]
repGrammar.append(t.slice)
def p_types_var(t):
"""
types : T_CHARACTER T_VARYING optParams
"""
t[0] = t.slice[1].value + t.slice[2].value + t[3]
repGrammar.append(t.slice)
def p_intervalFields(t):
"""
intervalFields : R_YEAR
| R_MONTH
| R_DAY
| R_HOUR
| R_MINUTE
| R_SECOND
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_intervalFields_none(t):
"""
intervalFields :
"""
t[0] = ""
repGrammar.append(t.slice)
def p_optParams(t):
"""optParams : S_PARIZQ literalList S_PARDER"""
t[0] = "(" + t[2] + ")"
repGrammar.append(t.slice)
def p_optParams_none(t):
"""optParams : """
t[0] = ""
repGrammar.append(t.slice)
def p_colOptions_list(t):
"""colOptionsList : colOptionsList colOptions"""
t[1] += " " + t[2]
t[0] = t[1]
repGrammar.append(t.slice)
def p_colOptions_u(t):
"""colOptionsList : colOptions"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_colOptions(t):
"""
colOptions : defaultVal
| nullOpt
| constraintOpt
| primaryOpt
| referencesOpt
"""
t[0] = t[1]
repGrammar.append(t.slice)
# cambiar literal
def p_defaultVal(t):
"""defaultVal : R_DEFAULT literal"""
t[0] = "DEFAULT " + t[2].temp
repGrammar.append(t.slice)
def p_nullOpt_true(t):
"""
nullOpt : R_NOT R_NULL
"""
t[0] = "NOT NULL"
repGrammar.append(t.slice)
def p_nullOpt_false(t):
"""
nullOpt : R_NULL
"""
t[0] = "NULL"
repGrammar.append(t.slice)
# cambiar literal
def p_constraintOpt_unique(t):
"""
constraintOpt : constrName R_UNIQUE
"""
t[0] = t[1] + " UNIQUE"
repGrammar.append(t.slice)
def p_constraintOpt_check(t):
"""
constraintOpt : constrName R_CHECK S_PARIZQ booleanCheck S_PARDER
"""
t[0] = t[1] + " CHECK(" + t[4] + ")"
repGrammar.append(t.slice)
def p_primaryOpt(t):
"""primaryOpt : R_PRIMARY R_KEY"""
t[0] = "PRIMARY KEY"
repGrammar.append(t.slice)
def p_referencesOpt(t):
"""referencesOpt : R_REFERENCES ID"""
t[0] = "REFERENCES " + t.slice[2].value
repGrammar.append(t.slice)
# endregion CREATE
# Gramatica para expresiones
# region Expresiones
def p_expresion(t):
"""
expresion : datatype
| expBool
"""
global isBlock
t[0] = t[1]
repGrammar.append(t.slice)
def p_expresion_(t):
"""
expresion : S_PARIZQ selectStmt S_PARDER
"""
if isinstance(t[2], select.Select):
t[0] = code.SelectFirstValue(newTemp(), t[2])
else:
t[0] = code.SelectOnlyParamsFirst(newTemp(), t[2])
repGrammar.append(t.slice)
def p_funcCall_1(t):
"""
funcCall : ID S_PARIZQ paramsList S_PARDER
"""
global isBlock
t[0] = code.FunctionCall(
t.slice[1].value, t[3], isBlock, newTemp(), t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_funcCall_2(t):
"""
funcCall : ID S_PARIZQ S_PARDER
| R_NOW S_PARIZQ S_PARDER
"""
v = t[1]
if t[1] == "NOW":
v = "now"
global isBlock
t[0] = code.FunctionCall(
v, None, isBlock, newTemp(), t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_funcCall_3(t):
"""
funcCall : R_COUNT S_PARIZQ datatype S_PARDER
| R_SUM S_PARIZQ datatype S_PARDER
| R_PROM S_PARIZQ datatype S_PARDER
"""
t[0] = code.FunctionCall(
t.slice[1].value.lower(),
[t[3]],
isBlock,
newTemp(),
t.slice[1].lineno,
t.slice[1].lexpos,
)
repGrammar.append(t.slice)
def p_funcCall_3_count(t):
"""
funcCall : R_COUNT S_PARIZQ O_PRODUCTO S_PARDER
"""
prod = expression.C3D("", "*", t.slice[3].lineno, t.slice[3].lexpos)
t[0] = code.FunctionCall(
"count", [prod], isBlock, newTemp(), t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_extract_1(t):
"""
extract : R_EXTRACT S_PARIZQ optsExtract R_FROM timeStamp S_PARDER
"""
t[0] = code.FunctionCall(
"extract",
[t[3], t[5][0], t[5][1]],
isBlock,
newTemp(),
t.slice[1].lineno,
t.slice[1].lexpos,
)
repGrammar.append(t.slice)
# TODO: Extract column
def p_extract_2(t):
"""
extract : R_EXTRACT S_PARIZQ optsExtract R_FROM columnName S_PARDER
"""
temp = expression.C3D("", "", t.slice[1].lineno, t.slice[1].lexpos)
t[0] = code.FunctionCall(
"extract",
[t[3], temp, t[5]],
isBlock,
newTemp(),
t.slice[1].lineno,
t.slice[1].lexpos,
)
repGrammar.append(t.slice)
def p_timeStamp(t):
"""
timeStamp : R_TIMESTAMP STRING
| R_INTERVAL STRING
"""
t[0] = [
expression.C3D("", "'" + t[1] + "'", t.slice[1].lineno, t.slice[1].lexpos),
expression.C3D("", t[2], t.slice[1].lineno, t.slice[1].lexpos),
]
repGrammar.append(t.slice)
def p_optsExtract(t):
"""
optsExtract : R_YEAR
| R_MONTH
| R_DAY
| R_HOUR
| R_MINUTE
| R_SECOND
"""
t[0] = expression.C3D("", "'" + t[1] + "'", t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_datePart(t):
"""
datePart : R_DATE_PART S_PARIZQ STRING S_COMA dateSource S_PARDER
"""
temp = expression.C3D("", t[3], t.slice[1].lineno, t.slice[1].lexpos)
t[0] = code.FunctionCall(
"date_part",
[temp, t[5][0], t[5][1]],
isBlock,
newTemp(),
t.slice[1].lineno,
t.slice[1].lexpos,
)
repGrammar.append(t.slice)
def p_dateSource(t):
"""
dateSource : R_TIMESTAMP STRING
| T_DATE STRING
| T_TIME STRING
| R_INTERVAL STRING
| R_NOW S_PARIZQ S_PARDER
"""
v = t[1]
if t[1] == "NOW":
v = "now"
t[0] = [
expression.C3D("", "'" + v + "'", t.slice[1].lineno, t.slice[1].lexpos),
expression.C3D("", t[2], t.slice[1].lineno, t.slice[1].lexpos),
]
repGrammar.append(t.slice)
def p_current(t):
"""
current : R_CURRENT_DATE
| R_CURRENT_TIME
"""
repGrammar.append(t.slice)
def p_current_1(t):
"""
current : timeStamp
"""
repGrammar.append(t.slice)
def p_literal_list(t):
"""literalList : literalList S_COMA literal"""
t[1] += ", " + t[3].temp
t[0] = t[1]
repGrammar.append(t.slice)
def p_literal_u(t):
"""literalList : literal"""
t[0] = t[1].temp
repGrammar.append(t.slice)
def p_literal(t):
"""
literal : INTEGER
| STRING
| DECIMAL
| CHARACTER
| R_NULL
"""
val = t.slice[1].value
t[0] = expression.C3D("", val, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_literal_bool(t):
"""
literal : R_TRUE
| R_FALSE
"""
if t[1] == "TRUE":
val = "True"
else:
val = "False"
t[0] = expression.C3D("", val, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_params_list(t):
"""paramsList : paramsList S_COMA datatype"""
t[1].append(t[3])
t[0] = t[1]
repGrammar.append(t.slice)
def p_params_u(t):
"""paramsList : datatype"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_datatype_operadores_binarios1(t):
"""
datatype : datatype O_SUMA datatype
| datatype O_RESTA datatype
| datatype O_PRODUCTO datatype
| datatype O_DIVISION datatype
| datatype O_EXPONENTE datatype
| datatype O_MODULAR datatype
"""
t[0] = code.BinaryExpression(
newTemp(), t[1], t[3], t[2], isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_datatype_operadores_binarios2(t):
"""
datatype : datatype OC_CONCATENAR datatype
"""
t[0] = code.BinaryExpression(
newTemp(), t[1], t[3], t[2], isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_datatype_case_when(t):
"""
datatype : R_CASE caseList optElse R_END
"""
repGrammar.append(t.slice)
def p_case_list(t):
"""
caseList : caseList caseWhen
| caseWhen
"""
repGrammar.append(t.slice)
def p_caseWhen(t):
"""caseWhen : R_WHEN expBool R_THEN literal"""
repGrammar.append(t.slice)
def p_caseWhen_2(t):
"""optElse : R_ELSE literal
|
"""
repGrammar.append(t.slice)
def p_datatype_operadores_unarios(t):
"""
datatype : O_RESTA datatype %prec UO_RESTA
| O_SUMA datatype %prec UO_SUMA
"""
t[0] = code.UnaryExpression(newTemp(), t[2], t[1], isBlock, t[2].row, t[2].column)
repGrammar.append(t.slice)
def p_datatype_operandos(t):
"""
datatype : columnName
| literal
| funcCall
| extract
| datePart
| current
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_datatype_agrupacion(t):
"""
datatype : S_PARIZQ datatype S_PARDER
"""
global isBlock
t[0] = code.Aggrupation(t[2], isBlock, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_expCompBinario_1(t):
"""
expComp : datatype OL_MENORQUE datatype
| datatype OL_MAYORQUE datatype
| datatype OL_MAYORIGUALQUE datatype
| datatype OL_MENORIGUALQUE datatype
| datatype S_IGUAL datatype
| datatype OL_DISTINTODE datatype
"""
t[0] = code.BinaryExpression(
newTemp(), t[1], t[3], t[2], isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_expCompBinario_2(t):
"""
expComp : datatype R_IS R_DISTINCT R_FROM datatype
"""
t[0] = code.BinaryExpression(
newTemp(), t[1], t[5], "!=", isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_expCompBinario_3(t):
"""
expComp : datatype R_IS R_NOT R_DISTINCT R_FROM datatype
"""
t[0] = code.BinaryExpression(
newTemp(), t[1], t[6], "=", isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_expComp_ternario_1(t):
"""
expComp : datatype R_BETWEEN datatype R_AND datatype
"""
t[0] = code.TernaryExpression(
newTemp(), t[1], t[3], t[5], t[2], isBlock, t[1].row, t[1].column
)
incTemp(2)
repGrammar.append(t.slice)
def p_expComp_ternario_2(t):
"""
expComp : datatype R_NOT R_BETWEEN datatype R_AND datatype
"""
t[0] = code.TernaryExpression(
newTemp(), t[1], t[4], t[6], t[2] + t[3], isBlock, t[1].row, t[1].column
)
incTemp(3)
repGrammar.append(t.slice)
def p_expComp_ternario_3(t):
"""
expComp : datatype R_BETWEEN R_SYMMETRIC datatype R_AND datatype
"""
t[0] = code.TernaryExpression(
newTemp(), t[1], t[4], t[6], t[2] + t[3], isBlock, t[1].row, t[1].column
)
incTemp(6)
repGrammar.append(t.slice)
def p_expComp_unario_1(t):
"""
expComp : datatype R_ISNULL
| datatype R_NOTNULL
"""
t[0] = code.UnaryExpression(newTemp(), t[1], t[2], isBlock, t[1].row, t[1].column)
repGrammar.append(t.slice)
def p_expComp_unario_2(t):
"""
expComp : datatype R_IS R_NULL
| datatype R_IS R_TRUE
| datatype R_IS R_FALSE
| datatype R_IS R_UNKNOWN
"""
t[0] = code.UnaryExpression(
newTemp(), t[1], t[2] + t[3], isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_expComp_unario_3(t):
"""
expComp : datatype R_IS R_NOT R_NULL
| datatype R_IS R_NOT R_TRUE
| datatype R_IS R_NOT R_FALSE
| datatype R_IS R_NOT R_UNKNOWN
"""
t[0] = code.UnaryExpression(
newTemp(), t[1], t[2] + t[3] + t[4], isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_boolean_1(t):
"""
boolean : R_EXISTS S_PARIZQ selectStmt S_PARDER
"""
t[0] = code.ExistsRelationalOperation(newTemp(), t[3])
repGrammar.append(t.slice)
def p_boolean_2(t):
"""
boolean : datatype R_IN S_PARIZQ selectStmt S_PARDER
"""
# temp, colData, optNot , select
t[0] = code.inRelationalOperation(newTemp(), t[1], "", t[4])
repGrammar.append(t.slice)
def p_boolean_3(t):
"""
boolean : datatype R_NOT R_IN S_PARIZQ selectStmt S_PARDER
"""
t[0] = code.inRelationalOperation(newTemp(), t[1], t[2] + " ", t[5])
repGrammar.append(t.slice)
def p_boolean_4(t):
"""
boolean : expComp
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_expBool_1(t):
"""
expBool : expBool R_AND expBool
| expBool R_OR expBool
"""
t[0] = code.BinaryExpression(
newTemp(), t[1], t[3], t[2], isBlock, t[1].row, t[1].column
)
repGrammar.append(t.slice)
def p_expBool_2(t):
"""
expBool : R_NOT expBool
"""
t[0] = code.UnaryExpression(
newTemp(), t[2], t[1], isBlock, t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_expBool_3(t):
"""
expBool : S_PARIZQ expBool S_PARDER
"""
t[0] = code.Aggrupation(t[2], isBlock, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_expBool_5(t):
"""
expBool : expBool optBoolPredicate
"""
t[0] = code.UnaryExpression(newTemp(), t[1], t[2], isBlock, t[1].row, t[1].column)
repGrammar.append(t.slice)
def p_expBool_4(t):
"""
expBool : boolean
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_optBoolPredicate_1(t):
"""
optBoolPredicate : R_IS R_TRUE
| R_IS R_FALSE
| R_IS R_UNKNOWN
"""
t[0] = t[1] + t[2]
repGrammar.append(t.slice)
def p_optBoolPredicate_2(t):
"""
optBoolPredicate : R_IS R_NOT R_TRUE
| R_IS R_NOT R_FALSE
| R_IS R_NOT R_UNKNOWN
"""
t[0] = t[1] + t[2] + t[3]
repGrammar.append(t.slice)
def p_columnName_id(t):
"""
columnName : ID
"""
global isBlock
t[0] = code.Identifier(t[1], isBlock, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_columnName_table_id(t):
"""
columnName : ID S_PUNTO ID
"""
global isBlock
t[0] = code.Identifier(
t[1] + "." + t[3], isBlock, t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
# En caso de errores descomentar este metodo
'''
def p_columnName_table_idAll(t):
"""
columnName : ID S_PUNTO O_PRODUCTO
"""
t[0] = expression.TableAll(t[1], t.slice[1].lineno, t.slice[1].lexpos)
'''
def p_booleanCheck_1(t):
"""
booleanCheck : idOrLiteral OL_MENORQUE idOrLiteral
| idOrLiteral OL_MAYORQUE idOrLiteral
| idOrLiteral OL_MAYORIGUALQUE idOrLiteral
| idOrLiteral OL_MENORIGUALQUE idOrLiteral
| idOrLiteral S_IGUAL idOrLiteral
| idOrLiteral OL_DISTINTODE idOrLiteral
"""
t[0] = t[1] + t.slice[2].value + t[3]
repGrammar.append(t.slice)
def p_booleanCheck_2(t):
"""
booleanCheck : idOrLiteral R_IS R_DISTINCT R_FROM idOrLiteral
"""
t[0] = t[1] + " IS DISTINCT FROM " + t[5]
repGrammar.append(t.slice)
def p_booleanCheck_3(t):
"""
booleanCheck : idOrLiteral R_IS R_NOT R_DISTINCT R_FROM idOrLiteral
"""
t[0] = t[1] + " IS NOT DISTINCT FROM " + t[6]
repGrammar.append(t.slice)
def p_idOrLiteral(t):
"""
idOrLiteral : ID
| STRING
| CHARACTER
| R_TRUE
| R_FALSE
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_idOrLiteral_1(t):
"""
idOrLiteral : INTEGER
| DECIMAL
"""
t[0] = str(t.slice[1].value)
repGrammar.append(t.slice)
# endregion
# Statement para el ALTER
# region ALTER
def p_alterStmt(t):
"""alterStmt : R_ALTER R_DATABASE idOrString alterDb
| R_ALTER R_TABLE idOrString alterTableList
| R_ALTER R_INDEX ifExists idOrString R_RENAME R_TO idOrString
| R_ALTER R_INDEX ifExists idOrString R_ALTER column idOrString idOrNumber
"""
if t[2] == "DATABASE":
t[0] = code.AlterDataBase(
t[4][0], t[3], t[4][1], t.slice[1].lineno, t.slice[1].lexpos
)
elif t[2] == "TABLE":
t[0] = code.AlterTable(t[3], t.slice[1].lineno, t.slice[1].lexpos, t[4])
else:
if t[5] == "RENAME":
t[0] = code.AlterIndex(
t[3], t[4], t[7], t.slice[1].lineno, t.slice[1].lexpos
)
else:
t[0] = code.AlterIndex(
t[3], t[4], t[7], t.slice[1].lineno, t.slice[1].lexpos, t[8]
)
repGrammar.append(t.slice)
def p_column(t):
"""column : R_COLUMN
|
"""
repGrammar.append(t.slice)
def p_idOrNumber(t):
"""idOrNumber : ID
| INTEGER
"""
t[0] = t.slice[1].value
repGrammar.append(t.slice)
def p_alterDb(t):
"""alterDb : R_RENAME R_TO idOrString
| R_OWNER R_TO ownerOPts
"""
t[0] = [t[1], t[3]]
repGrammar.append(t.slice)
def p_ownerOpts(t):
"""
ownerOPts : idOrString
| R_CURRENT_USER
| R_SESSION_USER
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_alterTableList(t):
"""
alterTableList : alterTableList S_COMA alterTable
"""
t[1] += ", " + t[3]
t[0] = t[1]
repGrammar.append(t.slice)
def p_alterTableList_u(t):
"""
alterTableList : alterTable
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_alterTable(t):
"""
alterTable : R_ADD alterAdd
| R_ALTER alterAlter
| R_DROP alterDrop
| R_RENAME alterRename
"""
t[0] = t[1] + " " + t[2]
repGrammar.append(t.slice)
def p_alterAdd_column(t):
"""
alterAdd : R_COLUMN ID types
"""
t[0] = t[1] + " " + t[2] + " " + t[3]
repGrammar.append(t.slice)
def p_alterAdd_constraint(t):
"""
alterAdd : createConstraint
| createPrimary
| createForeign
"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_alterAdd_unique(t):
"""
alterAdd : constrName R_UNIQUE S_PARIZQ ID S_PARDER
"""
t[0] = t[1] + " UNIQUE(" + t[4] + ")"
repGrammar.append(t.slice)
def p_alterAlter(t):
"""
alterAlter : R_COLUMN ID R_SET nullOpt
| R_COLUMN ID R_SET defaultVal
| R_COLUMN ID R_TYPE types
"""
t[0] = t[1] + " " + t[2] + " " + t[3] + " " + t[4]
repGrammar.append(t.slice)
def p_alterDrop(t):
"""
alterDrop : R_CONSTRAINT ID
| R_COLUMN ID
"""
t[0] = t[1] + " " + t[2]
repGrammar.append(t.slice)
def p_alterRename(t):
"""
alterRename : R_COLUMN ID R_TO ID
"""
t[0] = t[1] + " " + t[2] + " " + t[3] + " " + t[4]
repGrammar.append(t.slice)
# endregion
"""
Statement para el DROP
"""
# region DROP
def p_dropStmt_table(t):
"""
dropStmt : R_DROP R_TABLE ifExists idOrString
"""
t[0] = code.DropTable(t[4], t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_dropStmt_database(t):
"""
dropStmt : R_DROP R_DATABASE ifExists idOrString
"""
t[0] = code.DropDatabase(t[4], t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_dropStmt_index(t):
"""
dropStmt : R_DROP R_INDEX ifExists idList
"""
t[0] = code.DropIndex(t[3], t[4], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_ifExists(t):
"""ifExists : R_IF R_EXISTS"""
t[0] = t[1] + " " + t[2] + " "
repGrammar.append(t.slice)
def p_ifExists_n(t):
"""ifExists :"""
t[0] = ""
repGrammar.append(t.slice)
# endregion
# Statement para el SELECT
# region SELECT
def p_selectStmt_1(t):
"""selectStmt : R_SELECT isblock_f R_DISTINCT selectParams fromCl whereCl groupByCl limitCl orderByCl"""
global isBlock
isBlock = True
# distinct, params, fromcl, wherecl, groupbyCl, limitCl, row, column
t[0] = code.Select(
t[3], t[4], t[5], t[6], t[7], t[8], t[9], t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_selectStmt_2(t):
"""selectStmt : R_SELECT isblock_f selectParams fromCl whereCl groupByCl limitCl orderByCl"""
global isBlock
isBlock = True
# distinct, params, fromcl, wherecl, groupbyCl, limitCl, row, column
t[0] = code.Select(
"", t[3], t[4], t[5], t[6], t[7], t[8], t.slice[1].lineno, t.slice[1].lexpos
)
repGrammar.append(t.slice)
def p_selectStmt_union(t):
"""selectStmt : selectStmt R_UNION allOpt selectStmt"""
global isBlock
isBlock = True
t[0] = code.Union(t[2], t[1], t[4], t[3], t.slice[2].lineno, t.slice[2].lexpos)
repGrammar.append(t.slice)
def p_selectStmt_intersect(t):
"""selectStmt : selectStmt R_INTERSECT allOpt selectStmt"""
global isBlock
isBlock = True
t[0] = code.Union(t[2], t[1], t[4], t[3], t.slice[2].lineno, t.slice[2].lexpos)
repGrammar.append(t.slice)
def p_selectStmt_except(t):
"""selectStmt : selectStmt R_EXCEPT allOpt selectStmt"""
global isBlock
isBlock = True
t[0] = code.Union(t[2], t[1], t[4], t[3], t.slice[2].lineno, t.slice[2].lexpos)
repGrammar.append(t.slice)
def p_selectStmt_agrupacion(t):
"""selectStmt : S_PARIZQ selectStmt S_PARDER"""
global isBlock
isBlock = True
t[0] = code.Aggrupation(t[2], isBlock, t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_fromClause(t):
"""
fromCl : R_FROM tableExp
"""
t[0] = t[1] + " " + t[2]
repGrammar.append(t.slice)
def p_selectstmt_only_params(t):
"""selectStmt : R_SELECT isblock_f selectParams"""
global isBlock
isBlock = True
t[0] = code.SelectOnlyParams(t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_allOpt(t):
"""allOpt : R_ALL"""
t[0] = " " + t[1]
repGrammar.append(t.slice)
def p_allOpt_none(t):
"""allOpt :"""
t[0] = ""
repGrammar.append(t.slice)
# TODO : agregar selectParams
def p_selectparams_all(t):
"""selectParams : O_PRODUCTO"""
t[0] = [expression.C3D("", "*", t.slice[1].lineno, t.slice[1].lexpos)]
repGrammar.append(t.slice)
def p_selectparams_params(t):
"""selectParams : selectList"""
t[0] = t[1]
repGrammar.append(t.slice)
# En caso de errores cambiar selectListParams -> expresion
def p_selectList_list(t):
"""selectList : selectList S_COMA selectListParams optAlias"""
param = code.SelectParam(t[3], t[4], t[3].row, t[3].column)
t[1].append(param)
t[0] = t[1]
repGrammar.append(t.slice)
# En caso de errores cambiar selectListParams -> expresion
def p_selectList_u(t):
"""selectList : selectListParams optAlias"""
param = code.SelectParam(t[1], t[2], t[1].row, t[1].column)
t[0] = [param]
repGrammar.append(t.slice)
def p_selectListParams_1(t):
"""selectListParams : expresion"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_selectListParams_2(t):
"""selectListParams : ID S_PUNTO O_PRODUCTO"""
t[0] = expression.C3D("", t[1] + ".* ", t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_optalias_as(t):
"""
optAlias : R_AS idOrString
"""
t[0] = " AS " + t[2]
repGrammar.append(t.slice)
def p_optalias_id(t):
"""
optAlias : idOrString
"""
t[0] = " " + t[1]
repGrammar.append(t.slice)
def p_optalias_none(t):
"""optAlias : """
t[0] = ""
repGrammar.append(t.slice)
def p_tableexp_list(t):
"""tableExp : tableExp S_COMA fromBody """
t[0] = t[1] + ", " + t[3]
repGrammar.append(t.slice)
def p_tableexp_u(t):
"""tableExp : fromBody """
t[0] = t[1]
repGrammar.append(t.slice)
def p_fromBody(t):
"""fromBody : ID optAlias"""
t[0] = t[1] + t[2]
repGrammar.append(t.slice)
def p_tableexp_subq(t):
"""fromBody : S_PARIZQ selectStmt S_PARDER R_AS idOrString"""
t[0] = "(" + t[2].execute(None).value + ") AS " + t[5]
repGrammar.append(t.slice)
def p_whereCl(t):
"""whereCl : R_WHERE expBool"""
t[0] = t[2]
repGrammar.append(t.slice)
def p_whereCl_none(t):
"""whereCl : """
t[0] = expression.C3D("", "", 0, 0)
repGrammar.append(t.slice)
def p_groupByCl_1(t):
"""
groupByCl : R_GROUP R_BY groupList havingCl
"""
t[0] = [t[3], t[4]]
repGrammar.append(t.slice)
def p_groupByCl_2(t):
"""
groupByCl :
"""
t[0] = None
repGrammar.append(t.slice)
def p_groupList_1(t):
"""
groupList : groupList S_COMA columnName
| groupList S_COMA INTEGER
"""
t[1].append(t[3])
t[0] = t[1]
repGrammar.append(t.slice)
def p_groupList_2(t):
"""
groupList : columnName
| INTEGER
"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_havingCl_1(t):
"""havingCl : R_HAVING expBool"""
t[0] = "" # TODO: agregar el havingcl
repGrammar.append(t.slice)
def p_havingCl_2(t):
"""havingCl :"""
t[0] = ""
repGrammar.append(t.slice)
def p_orderByCl(t):
"""orderByCl : R_ORDER R_BY orderList"""
t[0] = t[3]
repGrammar.append(t.slice)
def p_orderByCl_n(t):
"""orderByCl : """
t[0] = None
repGrammar.append(t.slice)
def p_orderList(t):
"""orderList : orderList S_COMA orderByElem"""
t[1].append(t[3])
t[0] = t[1]
repGrammar.append(t.slice)
def p_orderList_1(t):
"""orderList : orderByElem"""
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_orderByElem(t):
"""
orderByElem : columnName orderOpts orderNull
| INTEGER orderOpts orderNull
"""
t[0] = [t[1], t[2], t[3]]
repGrammar.append(t.slice)
def p_orderOpts(t):
"""orderOpts : R_ASC
| R_DESC
"""
t[0] = " " + t[1]
repGrammar.append(t.slice)
def p_orderOpts_n(t):
"""orderOpts :"""
t[0] = ""
repGrammar.append(t.slice)
def p_orderNull(t):
"""orderNull : R_NULLS R_FIRST
| R_NULLS R_LAST
"""
t[0] = " " + t[1] + " " + t[2]
repGrammar.append(t.slice)
def p_orderNull_n(t):
"""orderNull :"""
t[0] = ""
repGrammar.append(t.slice)
def p_limitCl(t):
"""limitCl : R_LIMIT INTEGER offsetLimit
| R_LIMIT R_ALL offsetLimit
"""
t[0] = t[1] + " " + str(t[2]) + t[3]
repGrammar.append(t.slice)
def p_limitCl_n(t):
"""limitCl :"""
t[0] = ""
repGrammar.append(t.slice)
def p_offsetLimit(t):
"""offsetLimit : R_OFFSET INTEGER"""
t[0] = " " + t[1] + " " + str(t[2])
repGrammar.append(t.slice)
def p_offsetLimit_n(t):
"""offsetLimit :"""
t[0] = ""
repGrammar.append(t.slice)
# endregion
# Statement para el INSERT
# region INSERT
def p_insertStmt(t):
"""insertStmt : R_INSERT isblock_f R_INTO ID paramsColumn R_VALUES S_PARIZQ paramsList S_PARDER"""
t[0] = code.Insert(t[4], t[5], t[8], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_paramsColumn(t):
"""paramsColumn : S_PARIZQ idList S_PARDER"""
t[0] = "(" + t[2] + ")"
repGrammar.append(t.slice)
def p_paramsColumn_none(t):
"""paramsColumn :"""
t[0] = ""
repGrammar.append(t.slice)
# endregion
# Statement para el UPDATE
# region UPDATE
def p_updateStmt(t):
"""updateStmt : R_UPDATE isblock_f fromBody R_SET updateCols whereCl"""
t[0] = code.Update(t[3], t[5], t[6], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_updateCols_list(t):
"""updateCols : updateCols S_COMA updateVals"""
t[1].append(t[3])
t[0] = t[1]
repGrammar.append(t.slice)
def p_updateCols_u(t):
"""updateCols : updateVals """
t[0] = [t[1]]
repGrammar.append(t.slice)
def p_updateVals(t):
"""updateVals : ID S_IGUAL updateExp"""
t[0] = [t[1], t[3]]
repGrammar.append(t.slice)
def p_updateExp(t):
"""updateExp : datatype"""
t[0] = t[1]
repGrammar.append(t.slice)
def p_updateExp_Default(t):
"""updateExp : R_DEFAULT"""
t[0] = expression.C3D("", t[1], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
# endregion
# Statement para el DELETE y OTROS
# region DELETE, ETC
def p_deleteStmt(t):
"""deleteStmt : R_DELETE isblock_f fromCl whereCl"""
t[0] = code.Delete(t[3], t[4], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_truncateStmt(t):
"""truncateStmt : R_TRUNCATE tableOpt ID"""
t[0] = code.Truncate(t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_tableOpt(t):
"""tableOpt : R_TABLE
|
"""
repGrammar.append(t.slice)
def p_showStmt(t):
"""showStmt : R_SHOW R_DATABASES likeOpt"""
t[0] = code.ShowDataBase(t[3], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
def p_likeOpt(t):
"""likeOpt : R_LIKE STRING"""
t[0] = " LIKE " + t[2] + " "
repGrammar.append(t.slice)
def p_likeOpt_n(t):
"""likeOpt :"""
t[0] = ""
repGrammar.append(t.slice)
def p_useStmt(t):
"""useStmt : R_USE ID"""
t[0] = code.UseDataBase(t[2], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice)
# endregion
# endregion
syntax_errors = list()
parser = yacc.yacc()
| [
6738,
25064,
1330,
3108,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
355,
26672,
198,
198,
6978,
13,
33295,
7,
15908,
7,
6978,
58,
15,
60,
4008,
198,
6738,
2037,
7509,
62,
489,
13,
18170,
1330,
2438,
198,
11748,
2037,
7509,
62,
4... | 1.998927 | 27,954 |
ice_cream = "ice cream"
print("cream" in ice_cream) # print boolean result directly
contains = type here
print(contains)
| [
501,
62,
36277,
796,
366,
501,
8566,
1,
198,
4798,
7203,
36277,
1,
287,
4771,
62,
36277,
8,
220,
220,
220,
1303,
3601,
25131,
1255,
3264,
198,
198,
3642,
1299,
796,
2099,
994,
198,
4798,
7,
3642,
1299,
8,
198
] | 3.125 | 40 |
from testtools import TestCase, ExpectedException
from moto import mock_s3
from mock import Mock, patch
import monoprocessing
| [
198,
6738,
1332,
31391,
1330,
6208,
20448,
11,
1475,
7254,
16922,
198,
6738,
285,
2069,
1330,
15290,
62,
82,
18,
198,
6738,
15290,
1330,
44123,
11,
8529,
198,
11748,
15848,
305,
919,
278,
628
] | 3.764706 | 34 |
from unittest import TestCase
from pyinflux.parser import LineTokenizer, LineParser, parse_lines
from pyinflux.client import Line
from funcparserlib.lexer import Token
from funcparserlib.parser import NoParseError
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
12972,
10745,
22564,
13,
48610,
1330,
6910,
30642,
7509,
11,
6910,
46677,
11,
21136,
62,
6615,
198,
6738,
12972,
10745,
22564,
13,
16366,
1330,
6910,
198,
6738,
1257,
13155,
28198,
8019,
... | 3.616667 | 60 |
import uuid, filecmp, os, sys, requests, time
from datetime import datetime as Datetime
from nose.tools import assert_raises
import synapseclient.client as client
import synapseclient.utils as utils
from synapseclient import Activity, Entity, Project, Folder, File, Data
from synapseclient.exceptions import *
import integration
from integration import schedule_for_cleanup
| [
11748,
334,
27112,
11,
2393,
48991,
11,
28686,
11,
25064,
11,
7007,
11,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
16092,
8079,
198,
6738,
9686,
13,
31391,
1330,
6818,
62,
430,
2696,
198,
198,
11748,
6171,
7512,
16366,
13,
163... | 3.84 | 100 |
"""
Generate a tradeoff slip/opp curve for a stock
"""
import string
BASE_DIR="/spare/local/apratap/costanalysis"
import sys
crange=["-100","-3","-2.5","-2","-1.5","-1","-0.5","0","0.5","1","1.5","2"]
date_range=["20100104","20100105","20100106","20100107","20100108","20100111","20100112","20100113","20100114","20100115","20100119","20100120","20100121","20100122","20100125","20100126"]
date_range1=date_range[0:7]
date_range2=date_range[7:]
if __name__=="__main__":
process(sys.argv[1],date_range,crange)
process(sys.argv[1],date_range1,crange)
process(sys.argv[1],date_range2,crange)
for d in date_range:
print d, " ",
process(sys.argv[1],[d],crange)
| [
37811,
198,
8645,
378,
257,
3292,
2364,
13819,
14,
10365,
12133,
329,
257,
4283,
198,
37811,
198,
198,
11748,
4731,
198,
33,
11159,
62,
34720,
35922,
2777,
533,
14,
12001,
14,
499,
10366,
499,
14,
15805,
20930,
1,
198,
11748,
25064,
6... | 2.216301 | 319 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Hao Luo at 4/22/19
"""Step_refine_pipeline_part03_amino_acids.py
:description : script to refine amino acids
:param :
:returns:
:rtype:
"""
import os
import cobra
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
print('----- loading model -----')
iHL622 = cobra.io.load_json_model('../../ModelFiles/iHL622.json')
iNF517 = cobra.io.read_sbml_model('../Initial_data/template_models/iNF517.xml')
LbReueri = cobra.io.read_sbml_model('../Initial_data/template_models/Lreuteri_530.xml')
iBT721 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/iBT721_standlized.json')
iML1515 = cobra.io.read_sbml_model('../Initial_data/template_models/iML1515.xml')
iNF517.id = 'iNF517'
LbReueri.id = 'LbReueri'
iBT721.id = 'iBT721'
iML1515.id = 'iML1515'
# %% <amino acids>
unessential = [
'EX_ala__L_e',
'EX_asp__L_e',
'EX_cys__L_e',
'EX_gly_e',
'EX_ile__L_e',
'EX_lys__L_e',
'EX_pro__L_e',
'EX_ser__L_e', ]
essential = [
'EX_arg__L_e',
# 'EX_asn__L_e',
# 'EX_gln__L_e',
'EX_glu__L_e',
'EX_his__L_e',
'EX_leu__L_e',
'EX_met__L_e',
'EX_phe__L_e',
'EX_thr__L_e',
'EX_trp__L_e',
'EX_tyr__L_e',
'EX_val__L_e']
aarealist = unessential + essential
aadic = {'None': []}
for aa in aarealist:
aadic[aa] = []
for model_i in [iHL622, iNF517, LbReueri, iBT721, iML1515]:
model = model_i.copy()
model.solver = 'cplex'
model.reactions.get_by_id('EX_glc__D_e').bounds = (-5, 1000)
model.reactions.get_by_id('EX_glyc_e').bounds = (0, 1000)
for aa in aarealist:
if model.reactions.get_by_id(aa).lower_bound == 0:
model.reactions.get_by_id(aa).lower_bound == -1
# model.objective = 'BIOMASS'
solution = model.optimize()
aadic['None'].append(round(solution.objective_value,3))
for aa in aarealist:
model_ = model.copy()
rea = model_.reactions.get_by_id(aa)
rea.bounds = (0, 1000)
if aa == 'EX_asp__L_e':
model_.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000) # asn ~ asp
elif aa == 'EX_glu__L_e':
model_.reactions.get_by_id('EX_gln__L_e').bounds = (0, 1000) # gln ~ glu
# cobra.flux_analysis.pfba(model)
solution = model_.optimize()
aadic[aa].append(round(solution.objective_value, 3))
# solution = cobra.flux_analysis.pfba(model) # cobra.flux_analysis.pfba(model)
# My_def.io_file.solution2txt(solution, model, model.id + aa + '_temp_flux.txt')
# print(aa, aadic[aa])
# %% <productions>
prod_list = ['lac__L_c', 'ac_c', 'etoh_c', 'hista_c', 'fol_c', 'adeadocbl_c', 'ppal_c', '13ppd_c']
Experiment_prod = [1] * len(prod_list)
prod_dic = {'Experiment' : [1]*len(prod_list),
'iHL622': [],
'iNF517': [],
'LbReueri': [],
'iBT721': [],
'iML1515': []}
model_list = [iHL622, iNF517, LbReueri, iBT721, iML1515]
for mo_i in range(0,len(model_list)):
model = model_list[mo_i].copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-20, 1000)
model.reactions.get_by_id('EX_glyc_e').bounds = (-10, 1000)
for prod in prod_list:
model_ = model.copy()
try:
try:
model_.metabolites.get_by_id(prod)
except :
prod_dic[model_.id].append(0)
continue
rea = cobra.Reaction('Obj')
model_.add_reaction(rea)
model_.reactions.get_by_id('Obj').reaction =prod+ ' --> '
model_.objective = 'Obj'
solution = model_.optimize()
if solution.objective_value>0.001:
prod_dic[model_.id].append(1)
else:
prod_dic[model_.id].append(0)
except :
prod_dic[model_.id].append(0)
print(prod_dic)
# %% plt
import brewer2mpl
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
experiment_data = [1]+[1] * len(unessential) + [0] * len(essential)
iHL622_data = []
iNF517_data = []
LbReueri_data = []
iBT721_data = []
iML1515_data = []
for key in aadic.keys():
if aadic[key][0] > 0: iHL622_data.append(1)
if aadic[key][0] == 0: iHL622_data.append(0)
if aadic[key][1] > 0: iNF517_data.append(1)
if aadic[key][1] == 0: iNF517_data.append(0)
if aadic[key][2] > 0: LbReueri_data.append(1)
if aadic[key][2] == 0: LbReueri_data.append(0)
if aadic[key][3] > 0: iBT721_data.append(1)
if aadic[key][3] == 0: iBT721_data.append(0)
if aadic[key][4] > 0: iML1515_data.append(1)
if aadic[key][4] == 0: iML1515_data.append(0)
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors
cm = LinearSegmentedColormap.from_list(
'cp',[colors[1], colors[0]], N=2)
# data = np.array([experiment_data,iHL622_data,iHL622_data]).T
aalist = ['None'] + [i.split('_')[1] for i in aarealist]
aalist[2] = aalist[2] + '&asn'
aalist[10] = aalist[10] + '&gln'
df = pd.DataFrame({'Experiment': experiment_data,
'$i$HL622': iHL622_data,
'$i$NF517': iNF517_data,
'LbReueri': LbReueri_data,
'$i$BT721': iBT721_data,
'$i$ML1515': iML1515_data,
}, index=aalist)
# dft = df.pivot_table(index='experiment_data', columns='iHL622', values= 'LbReuteri',np.median)
df.to_csv('amino_acid.tsv', sep='\t')
# a = pd.read_csv('amino_acid.tsv',sep = '\t',index_col=0)
sns.set()
fig, ax = plt.subplots(figsize=(8, 3))
im = sns.heatmap(df.T, linewidths=.1, ax=ax, cmap=cm,
cbar=False)
cbar = plt.colorbar(im.collections[0], # orientation='horizontal',
fraction=0.046, pad=0.014, shrink=0.5, aspect=10, )
cbar.set_ticks(np.array([0.25, 0.75]))
cbar.set_ticklabels(('no growth', 'growth'))
ax.tick_params(length=0)
x = plt.xticks()[0]
plt.xticks(x, aalist, rotation=45)
# ax.xaxis.tick_top()
# ax.xaxis.set_label_position('top')
# plt.xlabel('Amino acid omitted in medium')
# plt.tick_params(axis='both', which='major', labelsize=10, labelbottom = False, bottom=False, top = True, labeltop=True)
# plt.yticks(rotation=0)
plt.show()
fig.savefig('Growth rate simulation case4a.png')
# %% plt2
prd_list = ['lactate', 'acteate', 'ethanol ', 'histamine', 'folate', 'Vb12', '1-propanol', '1,3-propanediol'] #
prd_df = pd.DataFrame(prod_dic, index=prd_list)
prd_df.columns = ['Experiment', '$i$HL622', '$i$NF517', 'LbReueri', '$i$BT721', '$i$ML1515']
prd_df.to_csv('products_df.tsv', sep='\t')
sns.set()
fig, ax = plt.subplots(figsize=(8, 3))
im = sns.heatmap(prd_df.T, linewidths=.1, ax=ax, cmap=cm,
cbar=False)
cbar = plt.colorbar(im.collections[0], # orientation='horizontal',
fraction=0.046, pad=0.014, shrink=0.5, aspect=10, )
cbar.set_ticks(np.array([0.25, 0.75]))
cbar.set_ticklabels(('not produce', 'produce'))
ax.tick_params(length=0)
x = plt.xticks()[0]
plt.xticks(x, prd_list, rotation=60)
# ax.xaxis.tick_top()
# ax.xaxis.set_label_position('top')
# plt.title('Product ability of important metabolites')
# plt.tick_params(axis='both', which='major', labelsize=10, labelbottom = False, bottom=False, top = True, labeltop=True)
# plt.yticks(rotation=0)
plt.show()
fig.savefig('Growth rate simulation case4b.png')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15622,
416,
367,
5488,
25956,
379,
604,
14,
1828,
14,
1129,
198,
198,
37811,
8600,
62,
5420,
500,
62,
79,
541,
4... | 1.980754 | 3,741 |
import logzero
import os
os.makedirs('outputs', exist_ok=True)
log_file = 'outputs/logs.txt'
logger = logzero.logger
logzero.loglevel('DEBUG')
logzero.logfile(log_file, maxBytes=1e6, backupCount=3)
| [
11748,
2604,
22570,
198,
11748,
28686,
628,
198,
418,
13,
76,
4335,
17062,
10786,
22915,
82,
3256,
2152,
62,
482,
28,
17821,
8,
198,
6404,
62,
7753,
796,
705,
22915,
82,
14,
6404,
82,
13,
14116,
6,
198,
6404,
1362,
796,
2604,
22570,... | 2.5 | 80 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API examples.
Example on how to use the Client API SPARQL query wrapper.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datacommons as dc
if __name__ == '__main__':
main()
| [
2,
15069,
2177,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 3.767544 | 228 |
#Analisando assinantes da newsletter
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from pandas.plotting import autocorrelation_plot
from analise_de_vendas import plot_comparacao
assinantes = pd.read_csv('arquivos/newsletter_alucar.csv')
assinantes.head()
assinantes.dtypes
print('Quantidade de linhas e colunas', assinantes.shape)
print('Quantidade de dados nulos', assinantes.isna().sum().sum())
assinantes['mes'] = pd.to_datetime(assinantes['mes'])
assinantes.dtypes
assinantes['aumento'] = assinantes ['assinantes'].diff()
assinantes['aceleracao'] = assinantes ['aumento'].diff()
assinantes.head()
plot_comparacao('mes', 'assinantes', 'aumento', 'aceleracao',
assinantes, 'Análise de assinantes da newsletter')
| [
2,
2025,
27315,
25440,
840,
259,
39781,
12379,
13129,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
19798,
292,
13,
294... | 2.697917 | 288 |
import os, json, tempfile, filecmp
from nose.tools import assert_raises
from mock import MagicMock, patch
import unit
import synapseclient
from synapseclient.retry import _with_retry
from synapseclient.exceptions import *
| [
11748,
28686,
11,
33918,
11,
20218,
7753,
11,
2393,
48991,
198,
6738,
9686,
13,
31391,
1330,
6818,
62,
430,
2696,
198,
6738,
15290,
1330,
6139,
44,
735,
11,
8529,
198,
11748,
4326,
198,
11748,
6171,
7512,
16366,
198,
6738,
6171,
7512,
... | 3.571429 | 63 |
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from .table import Table, ParentLookupException
from dbfread import DBF, FieldParser
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
4112,
62,
11748,
198,
198,
6738,
764,
11487,
1330,
8655,
11,
16774,
8567,
929,
16922,
198,
198,
6738,
288,
19881,
961,
1330,
20137,
... | 3.346939 | 49 |
import networkx as nx
from emmaa.filter_functions import filter_chem_mesh_go, \
filter_to_internal_edges
from indra.statements import Agent
from indra.explanation.pathfinding import get_subgraph
edges = [
(1, 2, {'statements': [{'internal': True}]}),
(2, 3, {'statements': [{'internal': False}]}),
(3, 4, {'statements': [{'internal': True}, {'internal': True}]}),
(4, 5, {'statements': [{'internal': True}, {'internal': False}]}),
(5, 6, {'statements': [{'internal': False}, {'internal': False}]}),
]
g = nx.DiGraph()
g.add_edges_from(edges)
| [
11748,
3127,
87,
355,
299,
87,
198,
6738,
795,
2611,
64,
13,
24455,
62,
12543,
2733,
1330,
8106,
62,
15245,
62,
76,
5069,
62,
2188,
11,
3467,
198,
220,
220,
220,
8106,
62,
1462,
62,
32538,
62,
276,
3212,
198,
6738,
773,
430,
13,
... | 2.489083 | 229 |
from flask import Flask
import pika
import time
from flask_mail import Mail
from flask_mail import Message
import sys
from datetime import datetime
import json
canal_con_error=""
uuid_peticion=""
arrayValidar = {}
app = Flask(__name__)
sleepTime = 20
print(' [*] Inicia en ', sleepTime, ' segundos.')
time.sleep(sleepTime)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USERNAME'] = 'gehdevtests@gmail.com '
app.config['MAIL_PASSWORD'] = 'zkadjaefpsxbbldd'
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
mail = Mail(app)
mail.init_app(app)
original_stdout = sys.stdout
connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq'))
channel = connection.channel()
channel.queue_declare(queue='canal_validador', durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='canal_validador', on_message_callback=callback)
channel.start_consuming()
@app.route('/')
| [
6738,
42903,
1330,
46947,
198,
11748,
279,
9232,
198,
11748,
640,
198,
6738,
42903,
62,
4529,
1330,
11099,
198,
6738,
42903,
62,
4529,
1330,
16000,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
198,
... | 2.743017 | 358 |
"""
Tests are based on the torch7 bindings for warp-ctc. Reference numbers are also obtained from the tests.
"""
import unittest
import torch
from torch.autograd import Variable
from warpctc_pytorch import CTCLoss
ctc_loss = CTCLoss()
places = 5
use_cuda = torch.cuda.is_available()
if __name__ == '__main__':
unittest.main()
| [
37811,
198,
51,
3558,
389,
1912,
319,
262,
28034,
22,
34111,
329,
25825,
12,
310,
66,
13,
20984,
3146,
389,
635,
6492,
422,
262,
5254,
13,
198,
37811,
198,
11748,
555,
715,
395,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
2306,
51... | 2.896552 | 116 |
print(s(int(input('Enter number: ')))) | [
198,
4798,
7,
82,
7,
600,
7,
15414,
10786,
17469,
1271,
25,
705,
35514
] | 2.785714 | 14 |
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from django.views import View
from .models import User
class User_manage(View):
'''
用户管理页面
支持用户更换头像
用户修改密码
'''
def post(self, request):
'''
用户修改个人信息
修改手机号码和用户名
'''
name = request.POST.get('name','')
telephone = request.POST.get('telephone','')
user = User.objects.filter(id=id)
user.telephone = telephone
user.name=name
user.save()
return HttpResponse('ok')
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
42625,
14208,
13,
33571,
1330,
3582,
198,
198,
6738,
764,
27530,
1330,
1178... | 1.72973 | 333 |
# Copyright 2017 - 2018 Modoolar <info@modoolar.com>
# Copyright 2018 Brainbean Apps
# Copyright 2020 Manuel Calero
# Copyright 2020 CorporateHub (https://corporatehub.eu)
# License LGPLv3.0 or later (https://www.gnu.org/licenses/lgpl-3.0.en.html).
{
"name": "Web Actions Multi",
"summary": "Enables triggering of more than one action on ActionManager",
"category": "Web",
"version": "13.0.1.0.0",
"license": "LGPL-3",
"author": "Modoolar, " "CorporateHub, " "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web/",
"depends": ["web"],
"data": ["views/web_ir_actions_act_multi.xml"],
"installable": True,
}
| [
2,
15069,
2177,
532,
2864,
3401,
970,
283,
1279,
10951,
31,
4666,
970,
283,
13,
785,
29,
198,
2,
15069,
2864,
14842,
14289,
27710,
198,
2,
15069,
12131,
25995,
2199,
3529,
198,
2,
15069,
12131,
26040,
16066,
357,
5450,
1378,
10215,
38... | 2.644269 | 253 |
##################################################
## Author: Tiago Prata (https://github.com/TiagoPrata)
## Date: 22-Mar-2021
##################################################
# References:
# https://www.tensorflow.org/datasets/catalog/coco
# https://github.com/IntelAI/models/blob/master/docs/object_detection/tensorflow_serving/Tutorial.md
# https://github.com/IntelAI/models/blob/master/benchmarks/object_detection/tensorflow/rfcn/README.md
# https://github.com/tensorflow/models/tree/master/research/object_detection
# https://github.com/tensorflow/serving/
# https://github.com/IntelAI/models/blob/4d114dcdad34706f4c66c494c96a796f125ed07d/benchmarks/object_detection/tensorflow_serving/ssd-mobilenet/inference/fp32/object_detection_benchmark.py#L95
from six import BytesIO
import requests
import numpy as np
import cv2
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
import tempfile
import time
import os
import io
def make_request(image, server_url):
""" Send request to the Tensorflow container """
img_array = get_image_as_array(image)
# reshaping using cv2 instead of numpy as suggested in:
# https://github.com/tensorflow/models/issues/2503
np_image = np.expand_dims(img_array, 0).tolist()
request_data = '{"instances" : %s}' % np_image
r = requests.post(server_url, data=request_data)
return r.json()
def get_predictions(image, server_url):
""" Get the filtered Predictions key from the TensorFlow request """
predictions = make_request(image, server_url)["predictions"][0]
classes_names = get_classnames_dict()
num_detections = int(predictions["num_detections"])
# Filtering out the unused predictions
detection_boxes = predictions["detection_boxes"][:num_detections]
detection_classes = predictions["detection_classes"][:num_detections]
detection_classes_names = []
for detection in detection_classes:
detection_classes_names.append(classes_names[detection - 1])
detection_scores = predictions["detection_scores"][:num_detections]
return {"num_detections": num_detections,
"detection_boxes": detection_boxes,
"detection_classes": detection_classes_names,
"detection_scores": detection_scores}
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.1):
"""Overlay labeled boxes on an image with formatted scores and label names."""
colors = list(ImageColor.colormap.values())
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf", 25)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
for i in range(min(len(boxes), max_boxes)):
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
# display_str = "{}: {}%".format(class_names[i].decode("ascii"),
# int(100 * scores[i]))
display_str = "{}: {}%".format(class_names[i], int(100 * scores[i]))
color = colors[hash(class_names[i]) % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
draw_bounding_box_on_image( image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str])
np.copyto(image, np.array(image_pil))
return image
def save_img(image):
""" Save the image file locally"""
_, filename = tempfile.mkstemp(suffix=".jpg")
im = Image.fromarray(image)
im.save(filename)
return filename
def get_classnames_dict():
""" Get the classes instances from the COCO dataset """
classes = {}
i = 0
dir_path = os.path.dirname(os.path.realpath(__file__))
classes_file = open(dir_path + "/coco-labels-paper.txt")
for line in classes_file:
classes[i] = line.split("\n")[0]
i += 1
return classes
def get_predicted_image(image, server_url, detections_limit):
""" Run the prediction, draw boxes around the objects in the image and
return the image as file """
start_time = time.time()
img = get_image_as_array(image)
end_time = time.time()
result = get_predictions(image, server_url)
print("Found %d objects." % result["num_detections"])
print("Inference time: ", end_time-start_time)
image_with_boxes = draw_boxes(img, result["detection_boxes"],
result["detection_classes"],
result["detection_scores"],
detections_limit)
filename = save_img(image_with_boxes)
f = io.BytesIO()
f = open(filename, 'rb')
content = f.read()
f.close()
return content | [
29113,
14468,
2235,
198,
2235,
220,
6434,
25,
16953,
3839,
1736,
1045,
357,
5450,
1378,
12567,
13,
785,
14,
51,
29601,
6836,
1045,
8,
198,
2235,
220,
7536,
25,
2534,
12,
7676,
12,
1238,
2481,
198,
29113,
14468,
2235,
198,
198,
2,
31... | 2.20979 | 3,146 |
"""
1. Pick 10 random numbers
2. Sort the numbers in decreasing order
3. Print those numbers
4. Repeat 2 more times
"""
import random
if __name__ == "__main__":
main() | [
37811,
198,
16,
13,
12346,
838,
4738,
3146,
198,
17,
13,
33947,
262,
3146,
287,
24030,
1502,
198,
18,
13,
12578,
883,
3146,
198,
19,
13,
30021,
362,
517,
1661,
198,
37811,
628,
198,
11748,
4738,
198,
198,
361,
11593,
3672,
834,
6624... | 3.222222 | 54 |
sigma = 4.65 # standard deviation for observation noise
num_sample = 200 # number of simulations to run for each stopping time
stop_time_list = np.arange(1, 150, 10) # Stopping times to play with
def simulate_accuracy_vs_stoptime(sigma, stop_time_list, num_sample):
"""Calculate the average decision accuracy vs. stopping time by running
repeated SPRT simulations for each stop time.
Args:
sigma (float): standard deviation for observation model
stop_list_list (list-like object): a list of stopping times to run over
num_sample (int): number of simulations to run per stopping time
Returns:
accuracy_list: a list of average accuracies corresponding to input
`stop_time_list`
decisions_list: a list of decisions made in all trials
"""
accuracy_list = []
decisions_list = []
for stop_time in stop_time_list:
decision_list = []
for i in range(num_sample):
_, decision, _= simulate_SPRT_fixedtime(sigma, stop_time)
decision_list.append(decision)
# Calculate accuracy given the true decision is 1
accuracy = sum(decision_list) / len(decision_list)
accuracy_list.append(accuracy)
decisions_list.append(decision_list)
return accuracy_list, decisions_list
np.random.seed(100)
with plt.xkcd():
simulate_and_plot_accuracy_vs_stoptime(sigma, stop_time_list, num_sample) | [
82,
13495,
796,
604,
13,
2996,
220,
1303,
3210,
28833,
329,
13432,
7838,
198,
22510,
62,
39873,
796,
939,
220,
1303,
1271,
286,
27785,
284,
1057,
329,
1123,
12225,
640,
198,
11338,
62,
2435,
62,
4868,
796,
45941,
13,
283,
858,
7,
16... | 2.997821 | 459 |
"""
Faster R-CNN baseline
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = -1.000
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=3000 ] = 0.594
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=3000 ] = 0.373
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=3000 ] = 0.218
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=3000 ] = 0.390
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=3000 ] = 0.443
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.446
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.448
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=3000 ] = 0.448
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=3000 ] = 0.315
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=3000 ] = 0.479
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=3000 ] = 0.564
"""
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_dota.py',
'../_base_/datasets/dota_detection.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
fp16 = dict(loss_scale=512.)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
checkpoint_config = dict(interval=1)
evaluation = dict(interval=12, metric='bbox', proposal_nums=(300, 1000, 3000)) | [
37811,
198,
37,
1603,
371,
12,
18474,
14805,
198,
198,
26287,
39281,
220,
357,
2969,
8,
2488,
58,
27853,
52,
28,
15,
13,
1120,
25,
15,
13,
3865,
930,
1989,
28,
220,
220,
477,
930,
3509,
35,
1039,
28,
3064,
2361,
796,
532,
16,
13... | 2.186228 | 639 |
stack_base_name = "skyer9-test"
region_name = "ap-northeast-2"
acm_certificate_arn = "arn:aws:acm:us-east-1:061175447448:certificate/d6c212a8-8871-XXXXXXXXXXXXXXX"
acm_cluster_certificate_arn = "arn:aws:acm:ap-northeast-2:061175447448:certificate/45a14cb8-3da4-4XXXXXXXXXXXXXXX"
# for assets
root_domain_name = "10x10.io"
api_domain_name = "api." + root_domain_name
# for network
vpc_cidr = "10.0.0.0/16"
public_subnet_cidr = "10.0.1.0/24"
loadbalancer_a_subnet_cidr = "10.0.2.0/24"
loadbalancer_b_subnet_cidr = "10.0.3.0/24"
container_a_subnet_cidr = "10.0.10.0/24"
container_b_subnet_cidr = "10.0.11.0/24"
# for database
db_allocated_storage = 5 # 5 giga byte
db_name = "db_app"
db_class = "db.t2.small"
db_user = "appuser"
db_password = "wYIr9zpQIzuVTCqPOpOQV6lQX1MlgM1E"
# for cluster
container_instance_type = "t2.micro"
max_container_instances = 3
desired_container_instances = 3
autoscaling_group_name = "AutoScalingGroup"
# for service
application_revision = ""
secret_key = "LXeKzcTCAr8kkjKsyARmzX5fUD1BQwi8"
web_worker_cpu = 256
web_worker_memory = 500
web_worker_desired_count = 3
deploy_condition = "Deploy"
web_worker_port = "8000"
| [
25558,
62,
8692,
62,
3672,
796,
366,
15688,
263,
24,
12,
9288,
1,
198,
36996,
62,
3672,
796,
366,
499,
12,
77,
419,
9522,
12,
17,
1,
198,
330,
76,
62,
22583,
22460,
62,
1501,
796,
366,
1501,
25,
8356,
25,
330,
76,
25,
385,
12,... | 2.138122 | 543 |
"""Module for testing Coding DNA DelIns Validator."""
import unittest
from variation.validators import CodingDNADelIns
from variation.classifiers import CodingDNADelInsClassifier
from .validator_base import ValidatorBase
class TestCodingDNADelInsValidator(ValidatorBase, unittest.TestCase):
"""A class to test the Coding DNA DelIns Validator."""
def validator_instance(self):
"""Return coding DNA delins instance."""
return CodingDNADelIns(*self.params)
def classifier_instance(self):
"""Return the coding DNA delins classifier instance."""
return CodingDNADelInsClassifier()
def fixture_name(self):
"""Return the fixture name for coding DNA delins."""
return "coding_dna_delins"
| [
37811,
26796,
329,
4856,
327,
7656,
7446,
4216,
20376,
48951,
1352,
526,
15931,
198,
11748,
555,
715,
395,
198,
198,
6738,
12291,
13,
12102,
2024,
1330,
327,
7656,
35504,
2885,
417,
20376,
198,
6738,
12291,
13,
4871,
13350,
1330,
327,
7... | 2.988048 | 251 |
# An attempt to replicate http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
# (You can download it from Scihub)
from __future__ import print_function, division
import tensorflow as tf
import numpy as np
import gym
from collections import namedtuple
import random
import os
import h5py
from itertools import cycle
FRAMES_PER_STATE = 4
FRAME_WIDTH = 84
FRAME_HEIGHT = 110
RAW_FRAME_WIDTH = 160
RAW_FRAME_HEIGHT = 210
QNetwork = namedtuple('QNetwork', 'frames qvals vlist')
Transition = namedtuple('Transition', 'begin action reward terminal end')
blank_frames = [np.empty([RAW_FRAME_HEIGHT, RAW_FRAME_WIDTH, 3], dtype=np.uint8)
for i in range(FRAMES_PER_STATE - 1)]
for b in blank_frames:
b.fill(0)
| [
2,
1052,
2230,
284,
24340,
2638,
1378,
2503,
13,
21353,
13,
785,
14,
21353,
14,
24891,
14,
85,
44085,
14,
77,
2425,
1821,
14,
12853,
14,
21353,
1415,
24940,
13,
6494,
198,
2,
357,
1639,
460,
4321,
340,
422,
10286,
40140,
8,
198,
1... | 2.833977 | 259 |
"""
面试题 17.16. 按摩师
一个有名的按摩师会收到源源不断的预约请求,每个预约都可以选择接或不接。
在每次预约服务之间要有休息时间,因此她不能接受相邻的预约。
给定一个预约请求序列,替按摩师找到最优的预约集合(总预约时间最长),
返回总的分钟数。
注意:本题相对原题稍作改动
输入: [1,2,3,1]
输出: 4
解释: 选择 1 号预约和 3 号预约,总时长 = 1 + 3 = 4。
输入: [2,7,9,3,1]
输出: 12
解释: 选择 1 号预约、 3 号预约和 5 号预约,总时长 = 2 + 9 + 1 = 12。
输入: [2,1,4,5,3,1,1,3]
输出: 12
解释: 选择 1 号预约、 3 号预约、 5 号预约和 8 号预约,
总时长 = 2 + 4 + 3 + 3 = 12。
"""
from typing import List
if __name__ == "__main__":
assert Solution().massage([1, 2, 3, 1]) == 4
assert Solution().massage([2, 7, 9, 3, 1]) == 12
assert Solution().massage([2, 1, 4, 5, 3, 1, 1, 3]) == 12
| [
37811,
198,
165,
251,
95,
46237,
243,
165,
95,
246,
1596,
13,
1433,
13,
10545,
234,
231,
162,
239,
102,
30585,
230,
198,
31660,
10310,
103,
17312,
231,
28938,
235,
21410,
162,
234,
231,
162,
239,
102,
30585,
230,
27670,
248,
162,
24... | 0.850941 | 691 |
#!/usr/bin/env python3
# Povolene knihovny: copy, math
# Import jakekoli jine knihovny neprojde vyhodnocovaci sluzbou.
# IB002 Domaci uloha 4.
#
# Hammingovu vzdalenost dvou stejne dlouhych binarnich retezcu
# definujeme jako pocet bitu, ve kterych se retezce lisi.
#
# Vasim ukolem je implementovat funkci hamming_distance,
# ktera pro binarni retezec 'b' a nezaporne cele cislo 'k' vrati vsechny
# binarni retezce, jejichz Hammingova vzdalenost od 'b' bude prave 'k'.
#
# Priklady chovani:
# hamming_distance('100', 0) vrati vystup: ['100']
# hamming_distance('0001', 2) vrati vystup:
# ['1101', '1011', '1000', '0111', '0100', '0010']
def hamming_distance(b, k):
"""
vstup: 'b' binarni retezec, 'k' nezaporne cele cislo
vystup: pole vsech binarnich retezcu se vzdalenosti 'k' od 'b'
casova slozitost: polynomialni vzhledem k delce binarniho retezce 'b'
( To znamena, ze pocet operaci je v O(n^j), kde 'n' je delka binarniho
retezce 'b' a 'j' je nejake fixni cislo. Tedy pro slozitostni odhad
'k' povazujeme za fixni. Vsimnete si, ze pokud budete generovat
vsechny binarni retezce stejne delky jako 'b' a nasledne merit
Hammingovu vzdalenost, tak se nevejdete do pozadovane slozitosti.
Doporucejeme se zamyslet nad rekurzivnim pristupem. )
"""
if k == 0:
return [b]
if k > 1:
last, result = b[-1], []
str1 = [i + last for i in hamming_distance(b[:-1], k)] if len(b) > k else []
str2 = []
for i in hamming_distance(b[:-1], k - 1):
str2.append(i + flip(last))
result += str1 + str2
return result
else:
result = []
for i in range(len(b)):
result.append(flip_s(b, i))
return result
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
350,
709,
349,
1734,
638,
4449,
709,
3281,
25,
4866,
11,
10688,
198,
2,
17267,
474,
539,
74,
11106,
474,
500,
638,
4449,
709,
3281,
497,
1676,
73,
2934,
410,
88,
2065,
... | 2.020339 | 885 |
# coding=utf-8
#
import numpy as np
from numpy import *
import matplotlib.pyplot as plt
#
# Calcul du log du temps equivalent Agarwal - 1980
#
# Ezekwe 12.16
#
#
# Algorithme de dérivation d'après Bourdet et al - 1989
#
# Différences centrées à pas variable
#
# Voir Ezekwe - 2010 - 12C.1 pour le calcul
#
#
# Dans cette section on veut pouvoir tracer automatiquement une droite de régression
# de pente nulle des valeurs de dp durant l IARF.
#
# Elle devra donner automatiquement la valeur de dp à l'IARF
#
# On chercher à définir les valeurs de temps Agarwal qui correspondent au IARF
#
#
# Calcul de la pente de l'IARF sur un Horner plot
#
#
# Calcul manuel de la pente de l'IARF sur le Horner plot
#
#
# Représentation du graphe log-log de la pression et la dérivée
#
#
# Représentation du graphe log-log de la pression et la dérivée
#
# avec la droite de pente nulle à l'IARF
#
#
# Représentation du Horner plot avec la droite de même pente que l'IARF
#
#
# Horner plot avec k et p0
#
#
# Bourdet plot avec k_bourd et p0
#
#
# end pour le moment
#
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
1635,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
198,
2,
27131,
7043,
2604,
7043,
2169,
862,
... | 2.486047 | 430 |
from abc import ABC, abstractmethod
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628
] | 4.111111 | 9 |
from . import support
import numpy as np
import tensorflow as tf
| [
6738,
764,
1330,
1104,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
628
] | 3.578947 | 19 |
# Generated by Django 3.2 on 2021-04-30 03:47
import courses.models
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
3023,
12,
1270,
7643,
25,
2857,
198,
198,
11748,
10902,
13,
27530,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.171429 | 35 |
import logging
import os
import math
from plato.config import Config
from plato.datasources import base
from examples.ms_nnrt.ms_nnrt_datasource_yolo_utils import COCOYoloDataset
class DataSource(base.DataSource):
"""The YOLO dataset."""
def classes(self):
"""Obtains a list of class names in the dataset."""
return Config().data.classes
| [
11748,
18931,
201,
198,
11748,
28686,
201,
198,
11748,
10688,
201,
198,
6738,
458,
5549,
13,
11250,
1330,
17056,
201,
198,
6738,
458,
5549,
13,
19608,
292,
2203,
1330,
2779,
201,
198,
6738,
6096,
13,
907,
62,
20471,
17034,
13,
907,
62... | 2.671329 | 143 |
'''
Created by: Craig Fouts
Created on: 11/13/2020
'''
import inspect
import json
import logging
import os
import pydre
import time
from PySide2.QtWidgets import QFileDialog, QInputDialog
from gui.config import Config, CONFIG_PATH, GUI_PATH, PROJECT_PATH
from gui.customs import ProjectTree
from gui.handlers import Pydre
from gui.popups import ErrorPopup, OutputPopup, ProgressPopup, SavePopup
from gui.templates import Window
config = Config()
config.read(CONFIG_PATH)
logger = logging.getLogger('PydreLogger')
class MainWindow(Window):
'''Primary window class that handles all tasks related to the main window
configurations and functionality.
'''
def _configure_window(self):
'''Configures initial window settings.
'''
self._configure_callbacks()
self._configure_splitters()
self._configure_recent()
def _configure_callbacks(self):
'''Configures callback functionality for actions and widgets.
'''
self._configure_action_callbacks()
self._configure_widget_callbacks()
self._configure_button_callbacks()
def _configure_action_callbacks(self):
'''TODO
'''
self.ui.open_act.triggered.connect(self._handle_open_pfile)
self.ui.new_act.triggered.connect(self._handle_new_pfile)
self.ui.save_act.triggered.connect(self._handle_save)
self.ui.run_act.triggered.connect(self._handle_run_act)
def _configure_widget_callbacks(self):
'''TODO
'''
self.ui.pfile_tab.currentChanged.connect(self._handle_tab_change)
self.ui.pfile_tab.tabCloseRequested.connect(self._handle_tab_close)
self.ui.recent_lst.itemDoubleClicked.connect(self._handle_select_pfile)
self.ui.data_lst.itemSelectionChanged.connect(self._toggle_remove_btn)
self.ui.data_lst.model().rowsInserted.connect(self._toggle_run_btn)
self.ui.data_lst.model().rowsRemoved.connect(self._toggle_run_btn)
def _configure_button_callbacks(self):
'''TODO
'''
self.ui.new_pfile_btn.clicked.connect(self._handle_new_pfile)
self.ui.open_pfile_btn.clicked.connect(self._handle_open_pfile)
self.ui.new_roi_btn.clicked.connect(self._handle_new_roi)
self.ui.new_filter_btn.clicked.connect(self._handle_new_filter)
self.ui.new_metric_btn.clicked.connect(self._handle_new_metric)
self.ui.remove_item_btn.clicked.connect(self._handle_remove_item)
self.ui.move_up_btn.clicked.connect(self._handle_move_up)
self.ui.move_down_btn.clicked.connect(self._handle_move_down)
self.ui.add_btn.clicked.connect(self._handle_add_dfile)
self.ui.remove_btn.clicked.connect(self._handle_remove_dfile)
self.ui.cancel_btn.clicked.connect(self._handle_cancel)
self.ui.run_btn.clicked.connect(self._handle_run)
def _configure_splitters(self):
'''Configures the initial stretch factors for splitter widgets.
'''
self.ui.start_hsplitter.setStretchFactor(0, 5)
self.ui.start_hsplitter.setStretchFactor(1, 7)
self.ui.main_hsplitter.setStretchFactor(0, 2)
self.ui.main_hsplitter.setStretchFactor(1, 7)
self.ui.main_vsplitter.setStretchFactor(0, 7)
self.ui.main_vsplitter.setStretchFactor(1, 2)
self.ui.run_vsplitter.setStretchFactor(0, 4)
self.ui.run_vsplitter.setStretchFactor(1, 1)
def _configure_recent(self):
'''Configures the recent files list displayed on the start page.
'''
self.ui.recent_lst.clear()
recent_pfiles = config.get('Recent Files', 'paths').split(',')
for path_ in filter(lambda f: f != '', recent_pfiles):
_, name = os.path.split(path_)
self.ui.recent_lst.addItem(name)
def _handle_add_to_log(self):
'''TODO
'''
level = self.ui.level_box.currentText()
entry = self.ui.log_inp.text()
if entry:
now = time.localtime()
now_time = time.strftime('%H:%M:%S', now)
msg = f'{now_time} - {level}: {entry}'
self.ui.log_lst.addItem(msg)
self.ui.log_lst.scrollToBottom()
self.ui.log_inp.clear()
def _handle_select_pfile(self):
'''Handles selecting a file from the recent files list.
'''
directory = os.path.dirname(GUI_PATH)
recent_paths = config.get('Recent Files', 'paths').split(',')
index = self.ui.recent_lst.currentRow()
file_path = os.path.join(directory, recent_paths[index])
self._launch_editor(file_path) if file_path else None
def _handle_open_pfile(self):
'''Handles opening a project file in a new tab.
'''
pfile_type = config.get('File Types', 'project')
pfile_path = self._open_file(pfile_type)
self._launch_editor(pfile_path) if pfile_path else None
def _open_file(self, filter=None): # TODO: MOVE TO UTILITY CLASS
'''Launches a file selection dialog based on the given file type and
returns a file path if one is selected.
'''
title = "Open File"
directory = os.path.dirname(os.path.dirname(inspect.getfile(pydre)))
path_, _ = QFileDialog.getOpenFileName(self, title, directory, filter)
return os.path.abspath(path_) if path_ else None
def _open_files(self, filter=None): # TODO: MOVE TO UTILITY CLASS
'''Launches a file selection dialog based on the given file type and
returns a list of file paths if one or more is selected.
'''
title = "Open File"
directory = os.path.dirname(os.path.dirname(inspect.getfile(pydre)))
paths, _ = QFileDialog.getOpenFileNames(self, title, directory, filter)
return [os.path.abspath(path_) for path_ in paths]
def _launch_editor(self, pfile_path):
'''Configures and shows a file editor in a new tab.
'''
pfile_name = pfile_path.split(os.sep)[-1]
self._add_to_recent(pfile_path)
if pfile_name not in self.project_files:
project_tree = self._create_project_tree(pfile_name, pfile_path)
self.project_files[pfile_name] = [pfile_path, project_tree]
else:
index = self.ui.file_tab.indexOf(pfile_name)
self.ui.file_tab.setCurrentIndex(index)
self.switch_to_editor()
def _add_to_recent(self, pfile_path):
'''Adds the given project file name and path to the recent files lists
in the configuration file.
'''
relative_path = os.path.join(*pfile_path.split(os.sep)[-2:])
recent = config.get('Recent Files', 'paths').split(',')
recent.remove(relative_path) if relative_path in recent else None
recent.insert(0, relative_path)
config.set('Recent Files', 'paths', ','.join(recent))
config.update()
def _create_project_tree(self, pfile_name, pfile_path):
'''Creates and displays a FileTree widget for the given file.
'''
project_tree = ProjectTree(pfile_path)
index = self.ui.pfile_tab.count()
self.ui.pfile_tab.insertTab(index, project_tree, pfile_name)
self.ui.pfile_tab.setCurrentIndex(index)
return project_tree
def _handle_new_roi(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
project_tree = self.project_files[pfile_name][1]
project_tree.add_roi()
def _handle_new_filter(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
project_tree = self.project_files[pfile_name][1]
project_tree.add_filter()
def _handle_new_metric(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
project_tree = self.project_files[pfile_name][1]
project_tree.add_metric()
def _handle_remove_item(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
project_tree = self.project_files[pfile_name][1]
project_tree.remove_selected()
def _handle_move_up(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
project_tree = self.project_files[pfile_name][1]
project_tree.move_selected_up()
def _handle_move_down(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
project_tree = self.project_files[pfile_name][1]
project_tree.move_selected_down()
def _handle_add_dfile(self):
'''TODO
'''
dfile_type = config.get('File Types', 'data')
dfile_paths = self._open_files(dfile_type)
for path_ in dfile_paths:
self.ui.data_lst.addItem(path_)
def _handle_remove_dfile(self):
'''TODO
'''
row = self.ui.data_lst.currentRow()
self.ui.data_lst.takeItem(row)
def _handle_new_pfile(self):
'''TODO
'''
# QInputDialog.setWindowIcon(self.icon)
pfile_name, ok = QInputDialog.getText(self, 'Pydre', 'File name')
if ok:
pfile_dir = 'project_files'
pfile_name = pfile_name + '.json'
pfile_path = os.path.join(PROJECT_PATH, pfile_dir, pfile_name)
with open(pfile_path, 'w') as pfile:
pfile.write('{}')
self._launch_editor(pfile_path)
def _handle_save(self, index):
'''TODO
'''
pfile_name = self.ui.pfile_tab.tabText(index)
with open(self.project_files[pfile_name][0], 'w') as pfile:
contents = self.ui.pfile_tab.currentWidget().get_contents()
json.dump(contents, pfile, indent=4)
def _handle_run_act(self):
'''TODO
'''
index = self.ui.pfile_tab.currentIndex()
pfile_name = self.ui.pfile_tab.tabText(index)
pfile_path = self.project_files[pfile_name][0]
self.ui.pfile_lbl.setText(pfile_path)
self.switch_to_run()
def _handle_tab_change(self, index):
'''Handles functionality that occurs when a tab is opened, closed, or
selected.
'''
if self.ui.pfile_tab.count() > 0:
pfile_name = self.ui.pfile_tab.tabText(index)
self.ui.run_act.setText(f"Run '{pfile_name}'")
else:
self.switch_to_start()
def _handle_tab_close(self, index):
'''TODO
'''
if self.ui.pfile_tab.widget(index).changed():
pfile_name = self.ui.pfile_tab.tabText(index)
text = f"{pfile_name} " + config.get('Popup Text', 'save')
SavePopup(parent=self).show_(text, cb)
else:
self._handle_close(index, False)
def _handle_close(self, index, save):
'''TODO
'''
self._handle_save(index) if save else None
self.project_files.pop(self.ui.pfile_tab.tabText(index))
self.ui.pfile_tab.removeTab(index)
def _handle_cancel(self):
'''TODO
'''
self.ui.data_lst.clear()
self._toggle_run_btn()
self.switch_to_editor()
def _handle_run(self):
'''TODO
'''
if self.ui.ofile_inp.text().strip():
self._run_pydre()
else:
text = config.get('Popup Text', 'output')
OutputPopup(parent=self).show_(text, cb)
def _run_pydre(self):
'''TODO
'''
text = config.get('Popup Text', 'progress')
progress = ProgressPopup(self.app, parent=self).show_(text)
project_file = self.ui.pfile_lbl.text()
count = self.ui.data_lst.count()
data_files = [self.ui.data_lst.item(i).text() for i in range(count)]
output_file = self.ui.ofile_inp.displayText()
Pydre.run(self.app, project_file, data_files, output_file, progress)
def _toggle_remove_btn(self):
'''TODO
'''
count = len(self.ui.data_lst.selectedItems())
self.ui.remove_btn.setEnabled(True if count > 0 else False)
def _toggle_run_btn(self):
'''TODO
'''
count = self.ui.data_lst.count()
self.ui.run_btn.setEnabled(True if count > 0 else False)
def _toggle_move_btns(self):
'''TODO
'''
pass
def add_to_log(self, entry):
'''TODO
'''
self.ui.log_lst.addItem(entry)
self.ui.log_lst.scrollToBottom()
def show_error(self, error):
'''TODO
'''
ErrorPopup(parent=self).show_(error)
def switch_to_start(self):
'''Swithes to the start page (page 1 / 3).
'''
width = self.screen_width / 2
height = self.screen_height / 2.5
self.resize_and_center(width, height)
self.ui.menu_bar.setVisible(False)
self._configure_recent()
self.ui.page_stack.setCurrentIndex(0)
def switch_to_editor(self):
'''Switches to the editor page (page 2 / 3).
'''
width = self.screen_width / 2
height = self.screen_height / 1.5
self.resize_and_center(width, height)
self.ui.menu_bar.setVisible(True)
self.ui.page_stack.setCurrentIndex(1)
def switch_to_run(self):
'''Switches to the run page (page 3 / 3).
'''
width = self.screen_width / 2
height = self.screen_height / 1.5
self.resize_and_center(width, height)
self.ui.menu_bar.setVisible(True)
self.ui.page_stack.setCurrentIndex(2)
| [
7061,
6,
198,
41972,
416,
25,
13854,
376,
5269,
198,
41972,
319,
25,
1367,
14,
1485,
14,
42334,
198,
7061,
6,
198,
198,
11748,
10104,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
279,
5173,
260,
198,
11748,
640... | 2.150917 | 6,434 |
# WZN: Note here we unify all LIDAR points to camera frame!!!
__author__ = 'yuxiang' # derived from honda.py by fyang
import datasets
import datasets.kitti_mv3d
import os
import time
import PIL
import datasets.imdb
import numpy as np
from matplotlib import pyplot as plt
import scipy.sparse
from utils.cython_bbox import bbox_overlaps
from utils.boxes_grid import get_boxes_grid
import subprocess
import pickle
from fast_rcnn.config import cfg
import math
from rpn_msr.generate_anchors import generate_anchors_bv
from utils.transform import camera_to_lidar_cnr, lidar_to_corners_single, computeCorners3D, lidar_3d_to_bv, lidar_cnr_to_3d,bv_anchor_to_lidar,lidar_cnr_to_camera_bv,lidar_cnr_to_bv_cnr
if __name__ == '__main__':
d = datasets.kitti_mv3d('train')
res = d.roidb
from IPython import embed; embed()
| [
2,
370,
57,
45,
25,
5740,
994,
356,
555,
1958,
477,
406,
2389,
1503,
2173,
284,
4676,
5739,
10185,
201,
198,
201,
198,
834,
9800,
834,
796,
705,
88,
2821,
15483,
6,
1303,
10944,
422,
289,
13533,
13,
9078,
416,
277,
17859,
201,
198... | 2.447977 | 346 |
from preprocessing.calculus import *
x = combination(10, 10)
y = (lambda x: x ** 2)(10)
z = (lambda x, y: x * y)(2, 3)
func = lambda x, y: x * y if (x > y) else x ** 3
print(x)
print(y)
print(z)
print(func(10, 12))
print((lambda x, y, z: x * y * z)(3, 4, 5)) | [
6738,
662,
36948,
13,
9948,
17576,
1330,
1635,
198,
198,
87,
796,
6087,
7,
940,
11,
838,
8,
198,
198,
88,
796,
357,
50033,
2124,
25,
2124,
12429,
362,
5769,
940,
8,
198,
198,
89,
796,
357,
50033,
2124,
11,
331,
25,
2124,
1635,
3... | 2.180328 | 122 |
# ----------------------------------------------------------------------------
# Copyright (c) 2020 Legorooj <legorooj@protonmail.com>
# Copyright (c) 2020 FluffyKoalas <github.com/fluffykoalas>
# This file and all others in this project are licensed under the MIT license.
# Please see the LICENSE file in the root of this repository for more details.
# ----------------------------------------------------------------------------
import pytest
# noinspection PyProtectedMember
from sloth import _utils
| [
2,
16529,
10541,
198,
2,
15069,
357,
66,
8,
12131,
3564,
273,
2238,
73,
1279,
1455,
273,
2238,
73,
31,
1676,
1122,
4529,
13,
785,
29,
198,
2,
15069,
357,
66,
8,
12131,
1610,
15352,
48735,
282,
292,
1279,
12567,
13,
785,
14,
2704,
... | 4.609091 | 110 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from jinja2 import Environment
from ..models import CodeModel
| [
2,
16529,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
16529,
35937,
198,
6... | 6.431034 | 58 |
#!/usr/bin/env python
import unittest
import os
if __name__ == "__main__":
unittest.main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419
] | 2.5 | 38 |
from django.test import TestCase
# Create your tests here.
# from django.contrib.auth.views import LoginView,PasswordChangeDoneView,PasswordResetView
#LoginView,处理登录表单填写和登录功能
#LogoutView,退出登录
#PasswordChangeView,处理修改密码的表单,然后修改密码
#PasswordChangeDoneView,成功修改密码之后执行的视图
#PasswordResetView,用户选择重置密码功能的视图,生成一个一次性重置密码链接和对应的验证token,然后发送邮件给用户
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
2,
13610,
534,
5254,
994,
13,
198,
198,
2,
422,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
33571,
1330,
23093,
7680,
11,
35215,
19400,
45677,
7680,
11,
35215,
4965,
316,
7680... | 1.233813 | 278 |
import joblib
import subprocess
def run_cmd(args_list):
"""
run linux commands
"""
# import subprocess
print('Running system command: {0}'.format(' '.join(args_list)))
proc = subprocess.Popen(args_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
s_output, s_err = proc.communicate()
s_return = proc.returncode
return s_return, s_output, s_err
# Dump file to disk (within application container)
model_variable = {
"key": 123,
}
model_name = "model"
joblib.dump(model_variable, model_name)
# Load file
model_variable = joblib.load(model_name)
# Put model to hdfs
model_hdfs_path = f"hdfs://nameservice1/user/duc.nguyenv3/projects/01_lgbm_modeling/data/output"
run_cmd(["hdfs", "dfs", "-put", "-f", model_name, model_hdfs_path])
# Get and overwrite to local file
run_cmd(["rm", "-rf", model_name])
run_cmd(["hdfs", "dfs", "-get", f"{model_hdfs_path}/{model_name}", "."])
| [
11748,
1693,
8019,
198,
11748,
850,
14681,
198,
198,
4299,
1057,
62,
28758,
7,
22046,
62,
4868,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1057,
32639,
9729,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1303,
1330,
850,
... | 2.490566 | 371 |
from source.rabbit import (
connections,
consumer,
router
)
| [
6738,
2723,
13,
81,
14229,
1330,
357,
198,
220,
220,
220,
8787,
11,
198,
220,
220,
220,
7172,
11,
198,
220,
220,
220,
20264,
198,
8,
198
] | 2.666667 | 27 |
from itertools import chain
from arekit.common.experiment.data_type import DataType
from arekit.common.folding.nofold import NoFolding
from arekit.contrib.experiment_rusentrel.exp_ds.utils import read_ruattitudes_in_memory
from arekit.contrib.source.rusentrel.io_utils import RuSentRelIOUtils
from arelight.network.nn.embedding import RusvectoresEmbedding
| [
6738,
340,
861,
10141,
1330,
6333,
198,
198,
6738,
389,
15813,
13,
11321,
13,
23100,
3681,
13,
7890,
62,
4906,
1330,
6060,
6030,
198,
6738,
389,
15813,
13,
11321,
13,
11379,
278,
13,
77,
1659,
727,
1330,
1400,
37,
33266,
198,
6738,
... | 3.076923 | 117 |
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .StoreAddressJson import StoreAddressJson
| [
37811,
37148,
32329,
526,
15931,
198,
198,
6738,
22397,
42725,
1330,
7032,
11,
10011,
2611,
198,
6738,
22397,
42725,
13,
12102,
378,
1330,
1881,
5189,
198,
6738,
11485,
268,
5700,
1330,
1635,
198,
6738,
11485,
27530,
13,
14881,
27054,
261... | 3.246753 | 77 |
"""`gen` tests under PEP 563."""
from __future__ import annotations
from dataclasses import dataclass
from attr import define
from cattrs import GenConverter
from cattrs.gen import make_dict_structure_fn, make_dict_unstructure_fn
# These need to be at the top level for `attr.resolve_types` to work.
@define
@define
@define
@define
@dataclass
@dataclass
| [
37811,
63,
5235,
63,
5254,
739,
350,
8905,
642,
5066,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
708,
81,
1330,
8160,
198,
198,
6738,
269,
1078,
380... | 2.968 | 125 |
import requests
from flask import render_template, request
from app import app
@app.route('/pokemon', methods=['GET', 'POST'])
| [
11748,
7007,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
198,
6738,
598,
1330,
598,
628,
198,
31,
1324,
13,
38629,
10786,
14,
79,
12717,
3256,
5050,
28,
17816,
18851,
3256,
705,
32782,
6,
12962,
628
] | 3.513514 | 37 |
# Generated by Django 3.1.4 on 2020-12-23 07:55
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
12131,
12,
1065,
12,
1954,
8753,
25,
2816,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Sample Input
# 5
# 12 9 61 5 14
# Sample Output
# True
# Explanation
# Condition 1: All the integers in the list are positive.
# Condition 2: 5 is a palindromic integer.
# Hence, the output is True.
if __name__ == '__main__':
count, numbers = input(), input().split()
msg = "True" if any(map(is_palindromic, numbers)) and all(
map(is_positive_int, numbers)) else "False"
print(msg)
| [
2,
27565,
23412,
198,
2,
642,
198,
2,
1105,
860,
8454,
642,
1478,
198,
198,
2,
27565,
25235,
198,
2,
6407,
198,
198,
2,
50125,
341,
198,
2,
24295,
352,
25,
1439,
262,
37014,
287,
262,
1351,
389,
3967,
13,
198,
2,
24295,
362,
25,... | 2.787671 | 146 |
from django.urls import path, include
from rest_framework import routers
from user import views
router = routers.DefaultRouter()
router.register('', views.UserViewSet, base_name='users')
app_name = 'user'
urlpatterns = [
path('', include(router.urls)),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
2836,
1330,
5009,
628,
198,
472,
353,
796,
41144,
13,
19463,
49,
39605,
3419,
198,
472,
353,
13,
30238,
10786,
3256,... | 3.011364 | 88 |
##################################################
## EC2 ImageBuilder AMI distribution setting targetAccountIds
## is not supported by CloudFormation (as of September 2021).
## https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html
##
## This lambda function uses Boto3 for EC2 ImageBuilder in order
## to set the AMI distribution settings which are currently missing from
## CloudFormation - specifically the targetAccountIds attribute
## https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/imagebuilder.html
##################################################
import os
import boto3
import botocore
import json
import logging | [
29113,
14468,
2235,
198,
2235,
13182,
17,
7412,
32875,
3001,
40,
6082,
4634,
2496,
30116,
7390,
82,
198,
2235,
318,
407,
4855,
416,
10130,
8479,
341,
357,
292,
286,
2693,
33448,
737,
198,
2235,
3740,
1378,
31628,
13,
8356,
13,
33103,
... | 4.223529 | 170 |
"""
The MidiAnimationController gets MIDI messages from the MidiServer and
sends animation messages to the LED strip clients
"""
# TODO
| [
37811,
198,
464,
7215,
72,
39520,
22130,
3011,
33439,
6218,
422,
262,
7215,
72,
10697,
290,
198,
82,
2412,
11034,
6218,
284,
262,
12365,
10283,
7534,
198,
37811,
198,
2,
16926,
46,
198
] | 4.121212 | 33 |
import numpy as np
import os
import pydensecrf.densecrf as dcrf
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
279,
5173,
1072,
6098,
69,
13,
67,
1072,
6098,
69,
355,
288,
6098,
69,
628,
628,
628,
628
] | 2.535714 | 28 |
from typing import List, Optional, Iterable
from pluggy import HookspecMarker
hookspec = HookspecMarker("autofile")
@hookspec(firstresult=True)
def get_template_value(
filepath: str, field: str, subfield: str, default: List[str]
) -> Optional[List[Optional[str]]]:
"""Called by template.py to get template value for custom template
Return: None if field is not handled by this plugin otherwise list of str values"""
# return value of None means that field is not handled by this plugin
# return value of [None] means that field is handled by this plugin but value resolved to None (no value)
# return value of [value] means that field is handled by this plugin and value resolved to value
@hookspec
def get_template_help() -> Iterable:
"""Return iterable of one or more help elements. Each element may be a str, a dict, or a list of lists"""
| [
6738,
19720,
1330,
7343,
11,
32233,
11,
40806,
540,
198,
198,
6738,
6107,
1360,
1330,
18531,
16684,
9704,
263,
198,
198,
25480,
16684,
796,
18531,
16684,
9704,
263,
7203,
2306,
1659,
576,
4943,
628,
198,
31,
25480,
16684,
7,
11085,
2027... | 3.569106 | 246 |
# MNIST
# import mnist.fc
# import mnist.lenet5
# import mnist.lenet5_loader
# import mnist.alex
# CIFAR10
# import cifar.lenet5
# import cifar.alex
# import cifar.vgg16
# Data Preparation
# import data.mnist
# import data.cifar
# import data.fashion_mnist
# Data Augmentation
# import data_augmentation.ds_builder_1normalizer
# import data_augmentation.ds_builder_2builder
# import data_augmentation.ds_builder_3visualization
# import data_augmentation.augmentation_with_albumentation
# import data_augmentation.augmentation_with_torchvision
# Image Processing
# import image_processing.conv_pytorch
# import image_processing.conv_opencv
# import image_processing.semantic_segmentation
# Architecture
# import architectures.LeNet5.net
# import architectures.AlexNet.net
# import architectures.VGG16.net
# import architectures.UNet.net
| [
2,
29060,
8808,
198,
2,
1330,
285,
77,
396,
13,
16072,
198,
2,
1330,
285,
77,
396,
13,
11925,
316,
20,
198,
2,
1330,
285,
77,
396,
13,
11925,
316,
20,
62,
29356,
198,
2,
1330,
285,
77,
396,
13,
1000,
87,
628,
198,
2,
327,
50... | 2.992908 | 282 |
'''
Created on 30.03.2020
@author: JM
'''
| [
7061,
6,
198,
41972,
319,
1542,
13,
3070,
13,
42334,
198,
198,
31,
9800,
25,
47726,
198,
7061,
6,
198
] | 2.15 | 20 |
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import sys
import math
import time
import random
import asyncio
import inspect
import traceback
import contextlib
from math import floor, ceil
from inspect import signature
from itertools import dropwhile, zip_longest
from concurrent.futures import ThreadPoolExecutor
from .ui_utilities import UI_Element_Utils
from .ui_settings import DEBUG_COLOR_CLEAN, DEBUG_PROPERTY, DEBUG_COLOR, DEBUG_DIRTY, DEBUG_LIST, CACHE_METHOD, ASYNC_IMAGE_LOADING
import bpy
import bgl
import blf
import gpu
from .blender import tag_redraw_all
from .ui_linefitter import LineFitter
from .ui_styling import UI_Styling, ui_defaultstylings
from .ui_utilities import helper_wraptext, convert_token_to_cursor
from .drawing import ScissorStack, FrameBuffer
from .fsm import FSM
from .useractions import ActionHandler
from .boundvar import BoundVar
from .debug import debugger, dprint, tprint
from .decorators import debug_test_call, blender_version_wrapper, add_cache
from .drawing import Drawing
from .fontmanager import FontManager
from .globals import Globals
from .hasher import Hasher
from .maths import Vec2D, Color, mid, Box2D, Size1D, Size2D, Point2D, RelPoint2D, Index2D, clamp, NumberUnit
from .maths import floor_if_finite, ceil_if_finite
from .profiler import profiler, time_it
from .utils import iter_head, any_args, join, abspath
class UI_Layout:
'''
layout each block into lines. if a content box of child element is too wide to fit in line and the child
is not the only element on the current line, then end current line, start a new line, relayout the child.
NOTE: this function does not set the final position and size for element.
through this function, we are calculating and committing to a certain width and height
although the parent element might give us something different. if we end up with a
different width and height in self.position() below, we will need to improvise by
adjusting margin (if bigger) or using scrolling (if smaller)
TODO: allow for horizontal growth rather than biasing for vertical
TODO: handle flex layouts
TODO: allow for different line alignments other than top (bottom, baseline)
TODO: percent_of (style width, height, etc.) could be of last non-static element or document
TODO: position based on bottom-right,etc.
NOTE: parent ultimately controls layout and viewing area of child, but uses this layout function to "ask"
child how much space it would like
given size might by inf. given can be ignored due to style. constraints applied at end.
positioning (with definitive size) should happen
IMPORTANT: as currently written, this function needs to be able to be run multiple times!
DO NOT PREVENT THIS, otherwise layout bugs will occur!
'''
# @profiler.function
# @profiler.function
# @profiler.function
# @UI_Element_Utils.add_option_callback('layout:flexbox')
# def layout_flexbox(self):
# style = self._computed_styles
# direction = style.get('flex-direction', 'row')
# wrap = style.get('flex-wrap', 'nowrap')
# justify = style.get('justify-content', 'flex-start')
# align_items = style.get('align-items', 'flex-start')
# align_content = style.get('align-content', 'flex-start')
# @UI_Element_Utils.add_option_callback('layout:block')
# def layout_block(self):
# pass
# @UI_Element_Utils.add_option_callback('layout:inline')
# def layout_inline(self):
# pass
# @UI_Element_Utils.add_option_callback('layout:none')
# def layout_none(self):
# pass
| [
7061,
6,
198,
15269,
357,
34,
8,
33448,
29925,
39606,
198,
4023,
1378,
66,
36484,
18055,
13,
785,
198,
31373,
31,
66,
36484,
18055,
13,
785,
198,
198,
41972,
416,
11232,
5601,
768,
11,
11232,
34974,
628,
220,
220,
220,
770,
1430,
31... | 3.217771 | 1,373 |
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
@attr.s
@attr.s
| [
2,
6127,
7560,
416,
300,
668,
62,
21282,
74,
62,
5235,
13,
8410,
5626,
48483,
13,
198,
198,
6738,
279,
2645,
668,
13,
75,
668,
62,
25927,
1330,
16089,
18453,
3041,
80,
11,
4808,
3605,
62,
24396,
62,
18076,
198,
6738,
279,
2645,
66... | 2.655914 | 93 |
import argparse
import matplotlib.pyplot as plt
import numpy as np
import json
import collections
import os
import pickle
import seaborn as sns
if __name__ == "__main__":
paritions = ["train_2020.json", "test_seen_2020.json", "test_unseen_2020.json"]
parser = argparse.ArgumentParser()
parser.add_argument("--partitions", type=list, default=[0])#, 1, 2])
parser.add_argument("--force_rerun", type=bool, default=True)
args = parser.parse_args()
for p in args.partitions:
summary_dict_file_name = paritions[p].split(".")[0] + "_summary.pkl"
summary_dicts = None
if os.path.exists(summary_dict_file_name) and not args.force_rerun:
with open(summary_dict_file_name, 'rb') as f:
summary_dicts = pickle.load(f)
else:
data = read_partition(paritions[p])
summary_dicts = summarize_partition(paritions[p], data)
with open(summary_dict_file_name, 'wb') as f:
pickle.dump(summary_dicts, f)
caption_lengths, speaker_counts, word_counts = summary_dicts
count_hist(caption_lengths, "Captions Length in Words")
frequency_hist(caption_lengths, "Captions Length in Words", log_scale=False)
frequency_hist(speaker_counts.values(), "Captions Per Speaker")
frequency_hist(word_counts.values(), "Word Counts")
| [
11748,
1822,
29572,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
17268,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
384,
397,
1211,
355,
3013,
... | 2.698482 | 461 |
from ._version import __version__ # noqa
# All created brower-instances are listed here for a reference to avoid having
# them garbage-collected prematurely.
_browser_instances = list()
| [
6738,
47540,
9641,
1330,
11593,
9641,
834,
220,
1303,
645,
20402,
198,
198,
2,
1439,
2727,
4772,
263,
12,
8625,
1817,
389,
5610,
994,
329,
257,
4941,
284,
3368,
1719,
198,
2,
606,
15413,
12,
4033,
12609,
41370,
13,
198,
62,
40259,
6... | 3.836735 | 49 |
import pandas as pd
import numpy as np
ISO_COUNTRY_CODES = {
'Brazil': 76,
'China': 156,
'India': 356,
'Nigeria': 566
}
FAOSTAT_COUNTRY_CODES = {
'Brazil': 21,
'China': 351,
'India': 100,
'Nigeria': 159
}
CLASSES = {'SEAFOOD':
[
'Aquatic Animals, Others',
'Cephalopods',
'Crustaceans',
'Demersal Fish',
'Fish, Body Oil',
'Fish, Liver Oil',
'Freshwater Fish',
'Marine Fish, Other',
'Molluscs, Other',
'Pelagic Fish'
],
'AQUA_PLANTS': ['Aquatic Plants'],
'MEAT': ['Bovine Meat',
'Fats, Animals, Raw',
'Meat, Other',
'Mutton & Goat Meat',
'Offals, Edible'
],
'PORK': ['Pigmeat'],
'POULTRY': ['Poultry Meat'],
'DAIRY': ['Cheese',
'Whey',
'Milk - Excluding Butter',
'Milk, Whole',
'Butter, Ghee',
'Cream',
'Eggs'],
'HONEY': ['Honey'],
}
livestock = pd.read_csv('livestock.csv',header=0,names=['year','commodity','comm_code','country','country_code','food_qty','fqflg','food_qty_yr','fqyflg','food_qty_day','fqd_flg','kcal_day','cdflg','fat_qty_day','fqdflg','pro_qty_day','pqd_flg'])
livestock = livestock[livestock.country_code.isin(FAOSTAT_COUNTRY_CODES.values())]
livestock['type'] = pd.Series()
for typecode,typelist in CLASSES.items():
livestock['type'] = np.where(livestock['commodity'].isin(typelist),typecode,livestock['type'])
simpler = livestock[['country','year','type','kcal_day']]
simpler = simpler.groupby(['country','year','type']).sum()
# find top ten crops for nigeria
for name,code in FAOSTAT_COUNTRY_CODES.items():
top_crops(code,name) | [
11748,
19798,
292,
355,
279,
67,
220,
198,
11748,
299,
32152,
355,
45941,
198,
198,
40734,
62,
34,
19385,
40405,
62,
34,
3727,
1546,
796,
1391,
198,
220,
220,
220,
705,
39190,
10354,
8684,
11,
198,
220,
220,
220,
705,
14581,
10354,
... | 2.270677 | 665 |
from math import pow, sqrt
t=int(input())
while t > 0:
t-=1
r = float(input())
chef = [int(x) for x in input().split(' ')]
head = [int(x) for x in input().split(' ')]
sous = [int(x) for x in input().split(' ')]
dists = [getDist(chef, head), getDist(chef, sous), getDist(sous, head)]
if len([x for x in dists if (x > r)]) > 1:
print("no")
else:
print("yes") | [
6738,
10688,
1330,
7182,
11,
19862,
17034,
198,
198,
83,
28,
600,
7,
15414,
28955,
198,
4514,
256,
1875,
657,
25,
198,
220,
256,
12,
28,
16,
198,
220,
374,
796,
12178,
7,
15414,
28955,
198,
220,
21221,
796,
685,
600,
7,
87,
8,
3... | 2.351852 | 162 |
from str_utils import normalize
if __name__ == '__main__':
print(normalize(' main '))
| [
6738,
965,
62,
26791,
1330,
3487,
1096,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
220,
198,
220,
220,
220,
3601,
7,
11265,
1096,
10786,
1388,
705,
4008,
198
] | 2.787879 | 33 |
"""
Given an int array length 2, return True if it contains a 2 or a 3.
has23([2, 5]) → True
has23([4, 3]) → True
has23([4, 5]) → False
@author unobatbayar
"""
| [
37811,
198,
198,
15056,
281,
493,
7177,
4129,
362,
11,
1441,
6407,
611,
340,
4909,
257,
362,
393,
257,
513,
13,
628,
198,
10134,
1954,
26933,
17,
11,
642,
12962,
15168,
6407,
198,
10134,
1954,
26933,
19,
11,
513,
12962,
15168,
6407,
... | 2.578125 | 64 |
"""
This package is responsible for encoding/decoding RTMP amf messages.
This package contains classes/methods to establish a connection to a RTMP server
and to read/write amf messages on a connected stream.
It also contains the PySocks (https://github.com/Anorov/PySocks) module to enable
a connection to a RTMP server using a proxy.
"""
__author__ = 'nortxort'
__authors__ = ['prekageo', 'Anorov', 'hydralabs']
__credits__ = __authors__
| [
37811,
198,
1212,
5301,
318,
4497,
329,
21004,
14,
12501,
7656,
11923,
7378,
716,
69,
6218,
13,
198,
198,
1212,
5301,
4909,
6097,
14,
24396,
82,
284,
4474,
257,
4637,
284,
257,
11923,
7378,
4382,
198,
392,
284,
1100,
14,
13564,
716,
... | 3.340909 | 132 |
#!/usr/bin/env python
import os
import distutils.core
if os.environ.get('USER', '') == 'vagrant':
del os.link
distutils.core.setup(
name='arago-hiro-actionhandler-stonebranch',
version='1.0.0',
author='Johannes Harth',
author_email='jharth@arago.co',
description='Arago HIRO ActionHandler plugin for Stonebranch',
license='MIT',
url='https://github.com/arago/python-hiro-stonebranch-actionhandler',
python_requires='>=3.4',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Environment :: Plugins',
'Natural Language :: English',
'Operating System :: OS Independent',
],
entry_points={
'console_scripts': [
'stonebranch-actionhandler ='
' arago.hiro.actionhandler.plugin.stonebranch.StonebranchActionHandlerDaemon:StonebranchActionHandlerDaemon.main',
],
},
packages=[
'arago.hiro.actionhandler.plugin',
'arago.hiro.actionhandler.plugin.stonebranch',
'arago.hiro.actionhandler.plugin.stonebranch.action',
],
install_requires=[
'arago-common-base',
'arago-pyactionhandler',
'requests',
],
dependency_links=[
'git+https://github.com/arago/python-arago-common.git@96c1618fc8ab861951930f768d02cb25e2adf9dd#egg=arago-common-base-2.1',
'git+https://github.com/166MMX/python-arago-pyactionhandler.git@abd1f97975e64269f88940d9ecfebc07e4e76d20#egg=arago-pyactionhandler-2.5',
],
scripts=[
'bin/hiro-stonebranch-actionhandler.py'
],
data_files=[
(
'/opt/autopilot/conf/external_actionhandlers/',
[
'config/external_actionhandlers/stonebranch-actionhandler.conf',
'config/external_actionhandlers/stonebranch-instances.conf',
'config/external_actionhandlers/stonebranch-actionhandler-log.conf',
],
),
(
'/opt/autopilot/conf/external_actionhandlers/capabilities/',
[
'config/external_actionhandlers/capabilities/stonebranch-actionhandler.yaml'
],
),
(
'/etc/init.d/', [
'etc/init.d/hiro-stonebranch-actionhandler'
],
)
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
1233,
26791,
13,
7295,
198,
198,
361,
28686,
13,
268,
2268,
13,
1136,
10786,
29904,
3256,
10148,
8,
6624,
705,
29821,
5250,
10354,
198,
220,
220,
220,
1619,
2... | 2.161261 | 1,110 |
from ofx2xlsmbr.writer.IWriterController import IWriterController
| [
6738,
286,
87,
17,
87,
75,
5796,
1671,
13,
16002,
13,
40,
34379,
22130,
1330,
314,
34379,
22130,
628
] | 3.526316 | 19 |
import mne
from .utils import add_annotations_from_events, map_aux
def read_raw_fif(fname):
"""
Read raw FIF files saved with BSL StreamRecorder.
Parameters
----------
fname : file-like
Path to the -raw.fif file to load.
Returns
-------
raw : Raw
MNE raw instance.
"""
raw = mne.io.read_raw_fif(fname, preload=True)
# AUX channels
raw = map_aux(raw)
# Old eego LSL plugin has upper case channel names
mapping = {
"FP1": "Fp1",
"FPZ": "Fpz",
"FP2": "Fp2",
"FZ": "Fz",
"CZ": "Cz",
"PZ": "Pz",
"POZ": "POz",
"FCZ": "FCz",
"OZ": "Oz",
"FPz": "Fpz",
}
for key, value in mapping.items():
try:
mne.rename_channels(raw.info, {key: value})
except Exception:
pass
# Set annotations
raw = add_annotations_from_events(raw)
return raw
| [
11748,
285,
710,
198,
198,
6738,
764,
26791,
1330,
751,
62,
34574,
602,
62,
6738,
62,
31534,
11,
3975,
62,
14644,
628,
198,
4299,
1100,
62,
1831,
62,
32041,
7,
69,
3672,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4149,
8... | 1.981092 | 476 |
#!/usr/bin/env python3
#python3 analyze_video.py {path/video_filename.mp4} {ID Folder Name}
# initalize
import sys
import argparse
import tensorflow as tf
import cv2
import dlib
import numpy as np
import detect_and_align
import os
from model import OpenNsfwModel
from image_utils import create_yahoo_image_loader
from wide_resnet import WideResNet
from sklearn.metrics.pairwise import pairwise_distances
from tensorflow.python.platform import gfile
class IdData:
"""Keeps track of known identities and calculates id matches"""
def print_distance_table(self, id_image_paths):
"""Prints distances between id embeddings"""
distance_matrix = pairwise_distances(self.embeddings, self.embeddings)
image_names = [path.split("/")[-1] for path in id_image_paths]
print("Distance matrix:\n{:20}".format(""), end="")
[print("{:20}".format(name), end="") for name in image_names]
for path, distance_row in zip(image_names, distance_matrix):
print("\n{:20}".format(path), end="")
for distance in distance_row:
print("{:20}".format("%0.3f" % distance), end="")
print()
# draw labels on video for Age and Sex detection engine
if __name__ == "__main__":
main(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
29412,
18,
16602,
62,
15588,
13,
9078,
1391,
6978,
14,
15588,
62,
34345,
13,
3149,
19,
92,
1391,
2389,
48107,
6530,
92,
198,
198,
2,
287,
1287,
1096,
198,
11748,
25064,
198,
... | 2.667368 | 475 |
import copy
from collections import defaultdict
from python_util.geometry.polygon import Polygon, list_to_polygon_object
from python_util.geometry.rectangle import Rectangle
from python_util.geometry.util import ortho_connect, smooth_surrounding_polygon, polygon_clip, convex_hull, \
bounding_box, merge_rectangles
from python_util.image_processing.white_space_detection import get_binarization, is_whitespace
from python_util.parser.xml.page.page import Page
from python_util.parser.xml.page.page_objects import Points
from article_separation.article_rectangle import ArticleRectangle
def get_article_surrounding_polygons(ar_dict):
"""
Create surrounding polygons over sets of rectangles, belonging to different article_ids.
:param ar_dict: dict (keys = article_id, values = corresponding rectangles)
:return: dict (keys = article_id, values = corresponding surrounding polygons)
"""
asp_dict = {}
for id in ar_dict:
sp = ortho_connect(ar_dict[id])
asp_dict[id] = sp
return asp_dict
def smooth_article_surrounding_polygons(asp_dict, poly_norm_dist=10, orientation_dims=(600, 300, 600, 300), offset=0):
"""
Create smoothed polygons over "crooked" polygons, belonging to different article_ids.
1.) The polygon gets normalized, where the resulting vertices are at most ``poly_norm_dist`` pixels apart.
2.) For each vertex of the original polygon an orientation is determined:
2.1) Four rectangles (North, East, South, West) are generated, with the dimensions given by ``or_dims``
(width_vertical, height_vertical, width_horizontal, height_horizontal), i.e. North and South rectangles
have dimensions width_v x height_v, whereas East and West rectangles have dimensions width_h x height_h.
2.2) The offset controls how far the cones overlap (e.g. how far the north cone gets translated south)
2.3) Each rectangle counts the number of contained points from the normalized polygon
2.4) The top two rectangle counts determine the orientation of the vertex: vertical, horizontal or one
of the four possible corner types.
3.) Vertices with a differing orientation to its agreeing neighbours are assumed to be mislabeled and
get its orientation converted to its neighbours.
4.) Corner clusters of the same type need to be shrunken down to one corner, with the rest being converted
to verticals. (TODO or horizontals)
5.) Clusters between corners (corner-V/H-...-V/H-corner) get smoothed if they contain at least five points,
by taking the average over the y-coordinates for horizontal edges and the average over the x-coordinates for
vertical edges.
:param asp_dict: dict (keys = article_id, values = list of "crooked" polygons)
:param poly_norm_dist: int, distance between pixels in normalized polygon
:param orientation_dims: tuple (width_v, height_v, width_h, height_h), the dimensions of the orientation rectangles
:param offset: int, number of pixel that the orientation cones overlap
:return: dict (keys = article_id, values = smoothed polygons)
"""
asp_dict_smoothed = {}
for id in asp_dict:
asp_dict_smoothed[id] = []
for poly in asp_dict[id]:
sp_smooth = smooth_surrounding_polygon(poly, poly_norm_dist, orientation_dims, offset)
asp_dict_smoothed[id].append(sp_smooth)
return asp_dict_smoothed
def stretch_rectangle_until_whitespace(binarized_image, rectangle, whitespace_height=1, stretch_limit=250):
"""
:type rectangle: Rectangle
"""
new_rectangle = copy.deepcopy(rectangle)
# whitespace_rectangle = Rectangle(x=rectangle.x, y=rectangle.y - whitespace_height, width=rectangle.width,
# height=whitespace_height)
whitespace_rectangle = Rectangle(x=rectangle.x + rectangle.width // 5, y=rectangle.y - whitespace_height,
width=3 * rectangle.width // 5,
height=whitespace_height)
if whitespace_rectangle.y < 0 or whitespace_rectangle.y + whitespace_rectangle.height > binarized_image.shape[1]:
return new_rectangle
for i in range(stretch_limit):
if is_whitespace(binarized_image, whitespace_rectangle, threshold=0.04) or whitespace_rectangle.y == 0:
new_rectangle.set_bounds(rectangle.x, whitespace_rectangle.y, rectangle.width,
rectangle.height + i + 1)
break
else:
whitespace_rectangle.translate(0, -1)
return new_rectangle
# TODO: Optimize code
def merge_article_rectangles_vertically(article_rectangles_dict, min_width_intersect=20, max_vertical_distance=50, use_convex_hull=False):
"""
:type article_rectangles_dict: dict[str,list[ArticleRectangle]]
"""
surr_polygon_dict = defaultdict(list)
for aid, article_rectangles_list in article_rectangles_dict.items():
redundant_article_rectangles = []
merged_articles_list = []
for i, article_rectangle in enumerate(article_rectangles_list):
if article_rectangle in redundant_article_rectangles:
continue
merged_articles = [article_rectangle]
for l in merged_articles_list:
if article_rectangle in l:
merged_articles_list.remove(l)
merged_articles = l
break
if i + 1 == len(article_rectangles_list):
merged_articles_list.append(merged_articles)
break
for article_rectangle_compare in article_rectangles_list[i + 1:]:
if article_rectangle_compare in redundant_article_rectangles:
continue
skip = False
if article_rectangle.contains_rectangle(article_rectangle_compare):
# no need to add article rectangle, since it gives no new information
redundant_article_rectangles.append(article_rectangle_compare)
continue
intersection = article_rectangle.intersection(article_rectangle_compare)
if intersection.width > min_width_intersect and intersection.height > 0:
# TODO: Check intersection with other rectangle of same aid?
merged_articles.append(article_rectangle_compare)
merged_articles.append(intersection)
if intersection.width > min_width_intersect and intersection.height < 0:
if abs(intersection.height) < max_vertical_distance:
gap = article_rectangle.get_gap_to(article_rectangle_compare)
# check if there is an intersection with another article rectangle in this area
for ar in [_ar for _ars in article_rectangles_dict.values() for _ar in _ars if
_ar != article_rectangle]:
intersection_gap_with_rectangle = gap.intersection(ar)
if intersection_gap_with_rectangle.height > 0 and intersection_gap_with_rectangle.width > 0:
skip = True
break
if skip:
continue
merged_articles.append(article_rectangle_compare)
merged_articles.append(gap)
merged_articles_list.append(merged_articles)
if use_convex_hull:
for _ars in merged_articles_list:
article_convex_hull = convex_hull(
[vertex for vertices in [_ar.get_vertices() for _ar in _ars] for vertex in vertices])
article_convex_hull_polygon = list_to_polygon_object(article_convex_hull)
surr_polygon_dict[aid].append(article_convex_hull_polygon)
else:
for _ars in merged_articles_list:
article_ortho_connect_polygon = ortho_connect(_ars)
for ortho_connect_polygon in article_ortho_connect_polygon:
surr_polygon_dict[aid].append(ortho_connect_polygon)
return surr_polygon_dict
def get_article_rectangles_from_surr_polygons(page, use_max_rect_size=True, max_d=0, max_rect_size_scale=1 / 50,
max_d_scale=1 / 20):
"""Given the PageXml file ``page`` return the corresponding article subregions as a list of ArticleRectangle objects.
Also returns the width and height of the image (NOT of the PrintSpace).
:param page: Either the path to the PageXml file or a Page object.
:type page: Union[str, Page]
:param use_max_rect_size: whether to use a max rectangle size for the article rectangles or not
:type use_max_rect_size: bool
:return: the article subregion list, the height and the width of the image
"""
if type(page) == str:
page = Page(page)
assert type(page) == Page, f"Type must be Page, got {type(page)} instead."
ps_coords = page.get_print_space_coords()
ps_poly = Points(ps_coords).to_polygon()
# Maybe check if the surrounding Rectangle of the polygon has corners given by ps_poly
ps_rectangle = ps_poly.get_bounding_box()
# First ArticleRectangle to consider
ps_rectangle = ArticleRectangle(ps_rectangle.x, ps_rectangle.y, ps_rectangle.width, ps_rectangle.height,
page.get_textlines())
if use_max_rect_size:
max_rect_size = int(max_rect_size_scale * ps_rectangle.height)
else:
max_rect_size = 0
if not max_d:
max_d = int(max_d_scale * ps_rectangle.height)
ars = ps_rectangle.create_subregions_from_surrounding_polygon(max_d=max_d, max_rect_size=max_rect_size)
# ars = ps_rectangle.create_subregions_from_surrounding_polygon(max_d=int(1 / 20 * ps_rectangle.height))
img_width, img_height = page.get_image_resolution()
return ars, img_height, img_width
if __name__ == '__main__':
# xml_path = "/home/max/data/as/NewsEye_ONB_data_corrected/aze/ONB_aze_18950706_corrected/page/ONB_aze_18950706_4.xml"
# img_path = "/home/max/data/as/NewsEye_ONB_data_corrected/aze/ONB_aze_18950706_corrected/ONB_aze_18950706_4.jpg"
# xml_path = "/home/max/data/as/NewsEye_ONB_data_corrected/krz/ONB_krz_19110701_corrected/page/ONB_krz_19110701_016.xml"
# img_path = "/home/max/data/as/NewsEye_ONB_data_corrected/krz/ONB_krz_19110701_corrected/ONB_krz_19110701_016" \
# ".jpg"
# #
img_path = "/home/max/devel/projects/article_separation/data/newseye_onb/ibn/ONB_ibn_18640702_corrected/ONB_ibn_18640702_003.tif"
xml_path = "/home/max/devel/projects/article_separation/data/newseye_onb/ibn/ONB_ibn_18640702_corrected/page/ONB_ibn_18640702_003.xml"
# xml_path = "/home/max/data/as/NewsEye_ONB_data_corrected/ibn/ONB_ibn_19330701_corrected/page/ONB_ibn_19330701_001.xml"
# img_path = "/home/max/data/as/NewsEye_ONB_data_corrected/ibn/ONB_ibn_19330701_corrected/ONB_ibn_19330701_001.jpg"
# # #
# xml_path = "/home/max/data/as/NewsEye_ONB_data_corrected/nfp/ONB_nfp_18730705_corrected/page/ONB_nfp_18730705_016.xml"
# img_path = "/home/max/data/as/NewsEye_ONB_data_corrected/nfp/ONB_nfp_18730705_corrected/ONB_nfp_18730705_016.tif"
#
# xml_path = '/home/max/data/as/NewsEye_ONB_data_corrected/nfp/ONB_nfp_18950706_corrected/page/ONB_nfp_18950706_015.xml'
# img_path = '/home/max/data/as/NewsEye_ONB_data_corrected/nfp/ONB_nfp_18950706_corrected/ONB_nfp_18950706_015.tif'
article_rectangles_dict = get_article_rectangles_from_baselines(Page(xml_path), img_path, use_surr_polygons=True,
stretch=False)
surr_polys_dict = merge_article_rectangles_vertically(article_rectangles_dict)
import matplotlib.pyplot as plt
from python_util.parser.xml.page import plot as page_plot
from matplotlib.collections import PolyCollection
from python_util.plot import colors
# page_plot.plot_pagexml(xml_path, img_path)
fig, ax = plt.subplots()
page_plot.add_image(ax, img_path)
for i, a_id in enumerate(surr_polys_dict):
surr_polygons = surr_polys_dict[a_id]
if a_id is None:
surr_poly_collection = PolyCollection([surr_poly.as_list() for surr_poly in surr_polygons], closed=True,
edgecolors=colors.DEFAULT_COLOR, facecolors=colors.DEFAULT_COLOR)
else:
surr_poly_collection = PolyCollection([surr_poly.as_list() for surr_poly in surr_polygons], closed=True,
edgecolors=colors.COLORS[i], facecolors=colors.COLORS[i])
surr_poly_collection.set_alpha(0.5)
ax.add_collection(surr_poly_collection)
# plt.show()
fig, ax = plt.subplots()
page_plot.add_image(ax, img_path)
for i, a_id in enumerate(article_rectangles_dict):
# fig, ax = plt.subplots()
# page_plot.add_image(ax, img_path)
# add facecolors="None" if rectangles should not be filled
ars = article_rectangles_dict[a_id]
if a_id is None:
ar_poly_collection = PolyCollection([ar.get_vertices() for ar in ars], closed=True,
edgecolors=colors.DEFAULT_COLOR, facecolors=colors.DEFAULT_COLOR)
else:
ar_poly_collection = PolyCollection([ar.get_vertices() for ar in ars], closed=True,
edgecolors=colors.COLORS[i], facecolors=colors.COLORS[i])
ar_poly_collection.set_alpha(0.5)
ax.add_collection(ar_poly_collection)
for ar in ars:
if ar.height == 0:
print(ar.width, ar.height, len(ar.textlines))
for textline in ar.textlines:
print("\t", textline.baseline.points_list)
# plt.show()
plt.show()
# for aid, ars in article_rectangles_dict.items():
# print(aid)
# print(len(ars))
# for ar in ars:
# print('\t', ar.get_vertices())
# print(article_rectangles_dict)
| [
11748,
4866,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
21015,
62,
22602,
13,
469,
15748,
13,
35428,
14520,
1330,
12280,
14520,
11,
1351,
62,
1462,
62,
35428,
14520,
62,
15252,
198,
6738,
21015,
62,
22602,
13,
469,
15748,
13,
... | 2.301213 | 6,185 |
"""A simple log mechanism styled after PEP 282."""
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
import sys
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
| [
37811,
32,
2829,
2604,
9030,
45552,
706,
350,
8905,
41810,
526,
15931,
198,
198,
2,
383,
1398,
994,
318,
45552,
706,
350,
8905,
41810,
523,
326,
340,
714,
1568,
307,
198,
2,
6928,
351,
257,
3210,
11361,
18931,
7822,
13,
198,
198,
11... | 3.044444 | 135 |
from typing import Any, Dict, Tuple
from flask import Request
from pymongo.cursor import Cursor
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
309,
29291,
198,
198,
6738,
42903,
1330,
19390,
198,
6738,
279,
4948,
25162,
13,
66,
21471,
1330,
327,
21471,
628,
628,
198
] | 3.366667 | 30 |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name='alf',
version='0.0.1',
install_requires=[
'atari_py == 0.1.7',
'fasteners',
'gym == 0.10.11',
'matplotlib',
'numpy',
'opencv-python >= 3.4.1.15',
'pathos == 0.2.4',
'pillow',
'psutil',
'pybullet == 2.5.0',
'tensorflow-gpu == 2.0.0',
], # And any other dependencies foo needs
packages=find_packages(),
)
| [
2,
15069,
357,
66,
8,
13130,
22776,
47061,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.668317 | 404 |
#Refazer o Desafio 51 da PA usando o "while"
primeirotermo = int(input('Digite o primeiro termo da PA: '));
razao = int(input('Agora digite a razão da PA: '));
vezes = 10;
cont = 0;
pa = primeirotermo;
print(f'Os {vezes} termos da PA são: {pa}', end=' ');
cont += 1;
while cont < vezes:
pa = pa + razao;
cont += 1;
print(pa, end=' ');
print('\nFim do programa');
| [
2,
8134,
19178,
267,
2935,
1878,
952,
6885,
12379,
8147,
514,
25440,
267,
366,
4514,
1,
201,
198,
201,
198,
35505,
7058,
4354,
78,
796,
493,
7,
15414,
10786,
19511,
578,
267,
6994,
7058,
3381,
78,
12379,
8147,
25,
705,
18125,
201,
1... | 2.028571 | 210 |
# Playing with Digits
# 6 kyu
# https://www.codewars.com/kata/5552101f47fc5178b1000050
#
# w r i t t e n b y
# oooo https://github.com/lcsm29 .oooo. .ooooo.
# `888 .dP""Y88b 888' `Y88.
# 888 .ooooo. .oooo.o ooo. .oo. .oo. ]8P' 888 888
# 888 d88' `"Y8 d88( "8 `888P"Y88bP"Y88b .d8P' `Vbood888
# 888 888 `"Y88b. 888 888 888 .dP' 888'
# 888 888 .o8 o. )88b 888 888 888 .oP .o .88P'
# o888o `Y8bod8P' 8""888P' o888o o888o o888o 8888888888 .oP'
if __name__ == '__main__':
basic_tests = [
['dig_pow', 89, 1, 1],
['dig_pow', 92, 1, -1],
['dig_pow', 46_288, 3, 51]
]
for test in basic_tests:
fn_name, n, p, expected = test
result = globals()[fn_name](n, p)
print(f'{fn_name}({n}, {p}) returns {result}'
f'{f", expected: {expected}" if result != expected else ""}')
# _ _ _ _
# | | | | | | (_)
# | |__ ___ ___| |_ _ __ _ __ __ _ ___| |_ _ ___ ___
# | '_ \ / _ \/ __| __| | '_ \| '__/ _` |/ __| __| |/ __/ _ \
# | |_) | __/\__ \ |_ | |_) | | | (_| | (__| |_| | (_| __/
# |_.__/ \___||___/\__| | .__/|_| \__,_|\___|\__|_|\___\___|
# | | written by
# |_| https://codewars.com/users/tpatja
'''tpatja
def dig_pow(n, p):
s = 0
for i,c in enumerate(str(n)):
s += pow(int(c),p+i)
return s/n if s%n==0 else -1
'''
| [
2,
220,
23911,
351,
7367,
896,
198,
2,
220,
718,
479,
24767,
198,
2,
220,
3740,
1378,
2503,
13,
19815,
413,
945,
13,
785,
14,
74,
1045,
14,
31046,
2481,
486,
69,
2857,
16072,
20,
23188,
65,
49388,
1120,
198,
2,
198,
2,
220,
220,... | 1.588294 | 1,008 |
# -*- coding: utf-8; -*-
# flake8: noqa
from .agent import Agent
from .agency import Agency
from .const import VERSION
from .rules import Rules
__title__ = 'customs'
__version__ = VERSION
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
26,
532,
9,
12,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
6738,
764,
25781,
220,
1330,
15906,
198,
6738,
764,
40955,
1330,
7732,
198,
6738,
764,
9979,
220,
1330,
44156,
2849,
198,
673... | 2.771429 | 70 |
"""
This Project is a game , name - Egg Catcher.
"""
#Importing libraries
from itertools import cycle
from random import randrange
from tkinter import Tk , Canvas , messagebox , font
#Creating variable for canvas size
canvas_width = 800
canvas_height = 400
win = Tk() #Win starts
#Giving title
win.title("The Egg Catcher")
#creating canvas
c = Canvas(win , width = canvas_width , height = canvas_height , background = 'deep sky blue')
#creating ground
c.create_rectangle(-5, canvas_height - 100 , canvas_width + 5 , canvas_height + 5 , fill='sea green', width=0)
#creating sun
c.create_oval(-80,-80,120,120,fill='orange' , width=0)
c.pack() #canvas packed
#creating list of colors which which will be repeated
color_cycle = cycle(['light blue' , 'light pink' , 'light yellow','light green' , 'red', 'blue' , 'green','black'])
#Creating variables for egg size,shape and movement
egg_width = 45
egg_height = 55
egg_score = 10
egg_speed = 500
egg_interval = 4000
difficulty_factor = 0.95
#Creating variables for basket to catch eggs
catcher_color = 'blue'
catcher_width = 100
catcher_height = 100
catcher_start_x = canvas_width / 2 - catcher_width / 2
catcher_start_y = canvas_height -catcher_height - 20
catcher_start_x2 = catcher_start_x + catcher_width
catcher_start_y2 = catcher_start_y + catcher_height
#Creating catcher (Arc)
catcher = c.create_arc(catcher_start_x ,catcher_start_y ,catcher_start_x2,catcher_start_y2 , start=200 , extent = 140 , style='arc' , outline=catcher_color , width=3)
#Intializing score and creating it's Text
score = 0
score_text = c.create_text(10,10,anchor='nw' , font=('Arial',18,'bold'),fill='darkblue',text='Score : ' + str(score))
#Initializing Life and creating it's Text
lives_remaning = 3
lives_text = c.create_text(canvas_width-10,10,anchor='ne' , font=('Arial',18,'bold'),fill='darkblue',text='Lives Left: ' + str(lives_remaning))
#Creating empty
eggs = []
""" Defining functions for game play"""
#Creating Egg, setting their height and range of falling
#Defining egg movement(i.e changing their positions)
#Removing the egg from list to over the fame
#Decreasing a life score by 1
#Function to check if catcher catches eggs
#Function for increasing the score
#to make catcher move left
#to make catcher move right
#binding keys with function
c.bind('<Left>' , move_left)
c.bind('<Right>' , move_right)
c.focus_set()
#starting the game function to start dropping eggs
win.after(1000,create_eggs)
win.after(1000,move_eggs)
win.after(1000,catch_check)
win.mainloop() #Win ends | [
37811,
198,
1212,
4935,
318,
257,
983,
837,
1438,
532,
14562,
5181,
2044,
13,
198,
37811,
198,
198,
2,
20939,
278,
12782,
198,
6738,
340,
861,
10141,
1330,
6772,
198,
6738,
4738,
1330,
43720,
9521,
198,
6738,
256,
74,
3849,
1330,
309,... | 3.038323 | 835 |
# -*- coding: utf-8 -*-
"""
config
"""
import os
import re
import pwd
import sys
import time
import logging
import sh
from decouple import UndefinedValueError, AutoConfig, config
LOG_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
LOG_LEVEL = config("LOG_LEVEL", logging.WARNING, cast=int)
logging.basicConfig(
stream=sys.stdout, level=LOG_LEVEL, format="%(asctime)s %(name)s %(message)s"
)
logging.Formatter.converter = time.gmtime
log = logging.getLogger(__name__)
class NotGitRepoError(Exception):
"""
NotGitRepoError
"""
def __init__(self):
"""
init
"""
msg = "not a git repository error"
super(NotGitRepoError, self).__init__(msg)
class NoGitRepoOrEnvError(Exception):
"""
NoGitRepoOrEnvError
"""
def __init__(self):
"""
init
"""
msg = "no git repo or env found error"
super(NoGitRepoOrEnvError, self).__init__(msg)
def git(*args, strip=True, **kwargs):
"""
git
"""
try:
result = str(sh.contrib.git(*args, **kwargs)) # pylint: disable=no-member
if strip:
result = result.strip()
return result
except sh.ErrorReturnCode as e:
stderr = e.stderr.decode("utf-8")
if "not a git repository" in stderr.lower():
raise NotGitRepoError
log.error(e)
class AutoConfigPlus(AutoConfig): # pylint: disable=too-many-public-methods
"""
thin wrapper around AutoConfig adding some extra features
"""
@property
def APP_UID(self):
"""
uid
"""
return os.getuid()
@property
def APP_GID(self):
"""
gid
"""
return pwd.getpwuid(self.APP_UID).pw_gid
@property
def APP_USER(self):
"""
user
"""
return pwd.getpwuid(self.APP_UID).pw_name
@property
def APP_PORT(self):
"""
port
"""
return self("APP_PORT", 5000, cast=int)
@property
def APP_JOBS(self):
"""
jobs
"""
try:
return call("nproc")[1].strip()
except: # pylint: disable=bare-except
return 1
@property
def APP_TIMEOUT(self):
"""
timeout
"""
return self("APP_TIMEOUT", 120, cast=int)
@property
def APP_WORKERS(self):
"""
workers
"""
return self("APP_WORKERS", 2, cast=int)
@property
def APP_MODULE(self):
"""
module
"""
return self("APP_MODULE", "main:app")
@property
def APP_REPOROOT(self):
"""
reporoot
"""
return git("rev-parse", "--show-toplevel")
@property
def APP_INSTALLPATH(self):
"""
install path
"""
return self("APP_INSTALLPATH", "/usr/src/app")
@property
def APP_VERSION(self):
"""
version
"""
try:
return git("describe", "--abbrev=7", "--always")
except NotGitRepoError:
return self("APP_VERSION")
@property
def APP_BRANCH(self):
"""
branch
"""
try:
return git("rev-parse", "--abbrev-ref", "HEAD")
except NotGitRepoError:
return self("APP_BRANCH")
@property
def APP_DEPENV(self):
"""
deployment environment
"""
branch = self.APP_BRANCH
if branch == "master":
return "prod"
elif branch.startswith("stage/"):
return "stage"
return "dev"
@property
def APP_SRCTAR(self):
"""
srctar
"""
try:
return self("APP_SRCTAR")
except UndefinedValueError:
return ".src.tar.gz"
@property
def APP_REVISION(self):
"""
revision
"""
try:
return git("rev-parse", "HEAD")
except NotGitRepoError:
return self("APP_REVISION")
@property
def APP_REMOTE_ORIGIN_URL(self):
"""
remote origin url
"""
try:
return git("config", "--get", "remote.origin.url")
except NotGitRepoError:
return self("APP_REMOTE_ORIGIN_URL")
@property
def APP_REPONAME(self):
"""
reponame
"""
pattern = r"((ssh|https)://)?(git@)?github.com[:/](?P<reponame>[A-Za-z0-9\/\-_]+)(.git)?"
match = re.search(pattern, self.APP_REMOTE_ORIGIN_URL)
return match.group("reponame")
@property
def APP_PROJNAME(self):
"""
projname
"""
return os.path.basename(self.APP_REPONAME)
@property
def APP_PROJPATH(self):
"""
projpath
"""
return os.path.join(self.APP_REPOROOT, self.APP_PROJNAME)
@property
def APP_BOTPATH(self):
"""
botpath
"""
return os.path.join(self.APP_PROJPATH, "bot")
@property
def APP_DBPATH(self):
"""
dbpath
"""
return os.path.join(self.APP_PROJPATH, "db")
@property
def APP_TESTPATH(self):
"""
testpath
"""
return os.path.join(self.APP_PROJPATH, "tests")
@property
def APP_LS_REMOTE(self):
"""
ls-remote
"""
try:
result = git("ls-remote", f"https://github.com/{self.APP_REPONAME}")
except NotGitRepoError:
result = self("APP_LS_REMOTE")
return {
refname: revision
for revision, refname in [line.split() for line in result.split("\n")]
}
@property
def APP_GSM_STATUS(self):
"""
gsm status
"""
try:
result = git("submodule", "status", strip=False)
except NotGitRepoError:
result = self("APP_GSM_STATUS")
pattern = r"([ +-])([a-f0-9]{40}) ([A-Za-z0-9\/\-_.]+)( .*)?"
matches = re.findall(pattern, result)
states = {
" ": True, # submodule is checked out the correct revision
"+": False, # submodule is checked out to a different revision
"-": None, # submodule is not checked out
}
return {
repopath: [revision, states[state]]
for state, revision, repopath, _ in matches
}
def __getattr__(self, attr):
"""
getattr
"""
log.info(f"attr = {attr}")
if attr == "create_doit_tasks": # note: to keep pydoit's hands off
return lambda: None
result = self(attr)
try:
return int(result)
except ValueError:
return result
CFG = AutoConfigPlus()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
11250,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
279,
16993,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
18931,
198,
11748,
427,
198... | 1.977386 | 3,405 |
# -*- coding: utf-8 -*-
import pandas as pd
from .common import _getJson
def latestFX(symbols=None, token='', version='', filter=''):
'''This endpoint returns real-time foreign currency exchange rates data updated every 250 milliseconds.
https://iexcloud.io/docs/api/#latest-currency-rates
5pm Sun-4pm Fri UTC
Args:
symbols (string): comma seperated list of symbols
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict: result
'''
if symbols:
if isinstance(symbols, str):
return _getJson('/fx/latest?symbols={symbols}'.format(symbols=symbols), token, version, filter)
return _getJson('/fx/latest?symbols={symbols}'.format(symbols=','.join(symbols)), token, version, filter)
return _getJson('/fx/latest', token, version, filter)
def latestFXDF(symbols=None, token='', version='', filter=''):
'''This endpoint returns real-time foreign currency exchange rates data updated every 250 milliseconds.
https://iexcloud.io/docs/api/#latest-currency-rates
5pm Sun-4pm Fri UTC
Args:
symbols (string): comma seperated list of symbols
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
DataFrame: result
'''
return pd.DataFrame(latestFX(symbols, token, version, filter))
def convertFX(symbols=None, amount=None, token='', version='', filter=''):
'''This endpoint performs a conversion from one currency to another for a supplied amount of the base currency. If an amount isn’t provided, the latest exchange rate will be provided and the amount will be null.
https://iexcloud.io/docs/api/#currency-conversion
5pm Sun-4pm Fri UTC
Args:
symbols (string): comma seperated list of symbols
amount (float): amount to convert
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict: result
'''
amount = amount or ''
if symbols:
if isinstance(symbols, str):
return _getJson('/fx/convert?symbols={symbols}&amount='.format(symbols=symbols, amount=amount), token, version, filter)
return _getJson('/fx/convert?symbols={symbols}&amount='.format(symbols=','.join(symbols), amount=amount), token, version, filter)
return _getJson('/fx/convert?amount={amount}'.format(amount=amount), token, version, filter)
def convertFXDF(symbols=None, amount=None, token='', version='', filter=''):
'''This endpoint performs a conversion from one currency to another for a supplied amount of the base currency. If an amount isn’t provided, the latest exchange rate will be provided and the amount will be null.
https://iexcloud.io/docs/api/#currency-conversion
5pm Sun-4pm Fri UTC
Args:
symbols (string): comma seperated list of symbols
amount (float): amount to convert
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
DataFrame: result
'''
return pd.DataFrame(convertFX(symbols, token, version, filter))
def historicalFX(symbols=None, token='', version='', filter=''):
'''This endpoint returns a daily value for the desired currency pair.
https://iexcloud.io/docs/api/#historical-daily
1am Mon-Sat UTC
Args:
symbols (string): comma seperated list of symbols
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict: result
'''
if symbols:
if isinstance(symbols, str):
return _getJson('/fx/historical?symbols={symbols}'.format(symbols=symbols), token, version, filter)
return _getJson('/fx/historical?symbols={symbols}'.format(symbols=','.join(symbols)), token, version, filter)
return _getJson('/fx/historical', token, version, filter)
def historicalFXDF(symbols=None, amount=None, token='', version='', filter=''):
'''This endpoint returns a daily value for the desired currency pair.
https://iexcloud.io/docs/api/#historical-daily
1am Mon-Sat UTC
Args:
symbols (string): comma seperated list of symbols
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
DataFrame: result
'''
return pd.DataFrame(historicalFX(symbols, token, version, filter))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
764,
11321,
1330,
4808,
1136,
41,
1559,
628,
198,
4299,
3452,
17213,
7,
1837,
2022,
10220,
28,
14202,
11,
11241,
11639,
3256,... | 2.736301 | 1,752 |
#If there is an existing db, it should be deleted before loading otherwise
#the frequency information will be invalid.
import sys
sys.path.append(r'../../Lib')
from ac_oanc_lemma_frequency import *
ac_load_oanc_shelve(r'BCCWJ_frequencylist-FrqGE3.csv', r'BCCWJ_frequencylist-FrqGE3.db', 'Jp')
| [
2,
1532,
612,
318,
281,
4683,
20613,
11,
340,
815,
307,
13140,
878,
11046,
4306,
198,
2,
1169,
8373,
1321,
481,
307,
12515,
13,
198,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7,
81,
6,
40720,
40720,
25835,
11537,
198,... | 2.801887 | 106 |
# -*- coding: utf-8 -*-
import os
import logging
from bs4 import BeautifulSoup
import re
'def'
import codecs
import functools
from pymongo import MongoClient
import datetime
import time
if __name__ == '__main__':
os.chdir(os.getcwd())
os.chdir('..')
os.chdir('..')
os.chdir('data')
os.chdir('raw')
os.chdir('html')
html_dir = os.getcwd()
os.chdir('..')
os.chdir('..')
os.chdir('processed')
processed_dir = os.getcwd()
main(html_dir,processed_dir)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
18931,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
302,
198,
198,
1549,
891,
6,
198,
11748,
40481,
82,
198,
11748,
1257,
310,
... | 2.212121 | 231 |
# Generated by Django 3.2.8 on 2021-11-05 09:20
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
2713,
7769,
25,
1238,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# Copyright (c) 2018 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import redis
import requests
import time
import subprocess
from subprocess import PIPE
from datetime import datetime
from monitor.utils.monasca.connector import MonascaConnector
from monitor.utils.influxdb.connector import InfluxConnector
from monitor.plugins.base import Plugin
from influxdb import InfluxDBClient
import kubernetes
LOG_FILE = "progress.log"
TIME_PROGRESS_FILE = "time_progress.log"
MONITORING_INTERVAL = 2
| [
2,
15069,
357,
66,
8,
2864,
11448,
38,
12,
6561,
35,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
137... | 3.438538 | 301 |
from logging import Logger
from pathlib import Path
from leaf_focus.support.location import Location
from leaf_focus.pdf.info.component import Component
class Operation:
"""A pipeline building block that creates the pdf info file."""
def run(self, pdf_path: Path, file_hash: str):
"""Run the operation."""
# create the output directory
output_file = self._location.info_file(self._base_path, file_hash)
self._location.create_directory(output_file.parent)
# create the pdf info file
self._component.create(pdf_path, output_file)
# result
return output_file
| [
6738,
18931,
1330,
5972,
1362,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
12835,
62,
37635,
13,
11284,
13,
24886,
1330,
13397,
198,
6738,
12835,
62,
37635,
13,
12315,
13,
10951,
13,
42895,
1330,
35100,
628,
198,
4871,
14680,
25... | 2.926267 | 217 |
from ._block import AbstractBlock
__all__ = ["AbstractBlock"]
| [
6738,
47540,
9967,
1330,
27741,
12235,
198,
198,
834,
439,
834,
796,
14631,
23839,
12235,
8973,
198
] | 3.705882 | 17 |
from fastlane.cli.core import main # NOQA
| [
6738,
3049,
33533,
13,
44506,
13,
7295,
1330,
1388,
220,
1303,
8005,
48,
32,
198
] | 2.866667 | 15 |
# register blue print
| [
2,
7881,
220,
4171,
3601,
628
] | 4 | 6 |
"""Repository of common GLSL functions."""
| [
37811,
6207,
13264,
286,
2219,
402,
6561,
43,
5499,
526,
15931,
198
] | 3.583333 | 12 |
import datetime
from os import path
from urllib.request import urlopen
from xml.dom import minidom
from utils import save_image, set_wallpaper_permanent
# get today's date
date = str(datetime.date.today())
| [
11748,
4818,
8079,
198,
6738,
28686,
1330,
3108,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
6738,
35555,
13,
3438,
1330,
949,
312,
296,
198,
198,
6738,
3384,
4487,
1330,
3613,
62,
9060,
11,
900,
62,
11930,
20189,
62... | 3.246154 | 65 |
import requests
from linebot.models import *
# text_get_search_bitcoin_address('1CVyyJ6C8z3t5g25BJ8sBSqpwXTdz3HKiy') | [
11748,
7007,
198,
198,
6738,
1627,
13645,
13,
27530,
1330,
1635,
198,
198,
2,
2420,
62,
1136,
62,
12947,
62,
35395,
62,
21975,
10786,
16,
33538,
22556,
41,
21,
34,
23,
89,
18,
83,
20,
70,
1495,
33,
41,
23,
82,
4462,
80,
79,
86,
... | 2.269231 | 52 |
from hc.accounts.models import Profile
| [
6738,
289,
66,
13,
23317,
82,
13,
27530,
1330,
13118,
628
] | 3.636364 | 11 |
# Copyright (c) 2014-2017, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""snapremove module for the cli."""
import subprocess as su
import click
import iocage.lib.ioc_common as ioc_common
import iocage.lib.ioc_json as ioc_json
import iocage.lib.ioc_list as ioc_list
@click.command(name="snapremove", help="Remove specified snapshot of a jail.")
@click.argument("jail")
@click.option("--name", "-n", help="The snapshot name. This will be what comes"
" after @", required=True)
def cli(jail, name):
"""Removes a snapshot from a user supplied jail."""
# TODO: Move to API
jails = ioc_list.IOCList("uuid").list_datasets()
pool = ioc_json.IOCJson().json_get_value("pool")
_jail = {uuid: path for (uuid, path) in jails.items() if
uuid.startswith(jail)}
if len(_jail) == 1:
uuid, path = next(iter(_jail.items()))
elif len(_jail) > 1:
ioc_common.logit({
"level" : "ERROR",
"message": f"Multiple jails found for {jail}:"
})
for u, p in sorted(_jail.items()):
ioc_common.logit({
"level" : "ERROR",
"message": f" {u} ({p})"
})
exit(1)
else:
ioc_common.logit({
"level" : "EXCEPTION",
"message": f"{jail} not found!"
}, exit_on_error=True)
# Looks like foo/iocage/jails/df0ef69a-57b6-4480-b1f8-88f7b6febbdf@BAR
conf = ioc_json.IOCJson(path).json_load()
if conf["template"] == "yes":
target = f"{pool}/iocage/templates/{uuid}@{name}"
else:
target = f"{pool}/iocage/jails/{uuid}@{name}"
try:
su.check_call(["zfs", "destroy", "-r", "-f", target])
ioc_common.logit({
"level" : "INFO",
"message": f"Snapshot: {target} destroyed."
})
except su.CalledProcessError as err:
ioc_common.logit({
"level" : "EXCEPTION",
"message": f"{err}"
}, exit_on_error=True)
exit(1)
| [
2,
15069,
357,
66,
8,
1946,
12,
5539,
11,
1312,
420,
496,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
4955,
326,
262,
... | 2.405765 | 1,353 |