content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""Code for finding content."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import collections
import os
from ... import types as t
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
from .. import (
PathProvider,
)
class Layout:
"""Description of content locations and helper methods to access content."""
def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
"""Return a list of all file paths."""
if include_symlinked_directories:
return self.__paths
return self.__files
def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
"""Return a list of file paths found recursively under the given directory."""
if include_symlinked_directories:
tree = self.__paths_tree
else:
tree = self.__files_tree
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(tree, parts)
if not item:
return []
directories = collections.deque(item[0].values())
files = list(item[1])
while directories:
item = directories.pop()
directories.extend(item[0].values())
files.extend(item[1])
return files
def get_dirs(self, directory): # type: (str) -> t.List[str]
"""Return a list directory paths found directly under the given directory."""
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(self.__files_tree, parts)
return [os.path.join(directory, key) for key in item[0].keys()] if item else []
def get_files(self, directory): # type: (str) -> t.List[str]
"""Return a list of file paths found directly under the given directory."""
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(self.__files_tree, parts)
return item[1] if item else []
class ContentLayout(Layout):
"""Information about the current Ansible content being tested."""
@property
def prefix(self): # type: () -> str
"""Return the collection prefix or an empty string if not a collection."""
if self.collection:
return self.collection.prefix
return ''
@property
def module_path(self): # type: () -> t.Optional[str]
"""Return the path where modules are found, if any."""
return self.plugin_paths.get('modules')
@property
def module_utils_path(self): # type: () -> t.Optional[str]
"""Return the path where module_utils are found, if any."""
return self.plugin_paths.get('module_utils')
@property
def module_utils_powershell_path(self): # type: () -> t.Optional[str]
"""Return the path where powershell module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'powershell')
return self.plugin_paths.get('module_utils')
@property
def module_utils_csharp_path(self): # type: () -> t.Optional[str]
"""Return the path where csharp module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'csharp')
return self.plugin_paths.get('module_utils')
class LayoutMessages:
"""Messages generated during layout creation that should be deferred for later display."""
class CollectionDetail:
"""Details about the layout of the current collection."""
class LayoutProvider(PathProvider):
"""Base class for layout providers."""
PLUGIN_TYPES = (
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'doc_fragments',
'filter',
'httpapi',
'inventory',
'lookup',
'module_utils',
'modules',
'netconf',
'shell',
'strategy',
'terminal',
'test',
'vars',
)
@abc.abstractmethod
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a layout using the given root and paths."""
def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str])
"""Return a filesystem tree from the given list of paths."""
tree = {}, []
for path in paths:
parts = path.split(os.sep)
root = tree
for part in parts[:-1]:
if part not in root[0]:
root[0][part] = {}, []
root = root[0][part]
root[1].append(path)
return tree
def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])]
"""Return the portion of the tree found under the path given by parts, or None if it does not exist."""
root = tree
for part in parts:
root = root[0].get(part)
if not root:
return None
return root
| [
37811,
10669,
329,
4917,
2695,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
8,
198,
834,
4164,
330,
31172,
834,
796,
2099,
198,
198,
11748,
450,
66,
198,
11748,
17268,
198,
11748... | 2.437801 | 2,074 |
import torch
import os
from datetime import datetime
TODAY = datetime.today().strftime('%Y%m%d')
class Saver(object):
"""
Log, Tensorboard, Checkpoint ์ ์ฅ์ ์ํ Code
1๋ฒ ์คํ ํ ๋๋ง๋ค ์คํ๋ ๋ ์ง๋ฅผ ๊ธฐ์ค์ผ๋ก ํด๋๊ฐ ์์ฑ๋๋ฉฐ
ํด๋น ํด๋ ๋ด๋ถ์๋, Logํ์ผ, Checkpointํ์ผ, Tensorboardํ์ผ์ด ์์ฑ๋๊ฒ ๋๋ค.
"""
| [
11748,
28034,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
51,
3727,
4792,
796,
4818,
8079,
13,
40838,
22446,
2536,
31387,
10786,
4,
56,
4,
76,
4,
67,
11537,
628,
198,
4871,
311,
8770,
7,
15252,
2599,
198,
2... | 1.220833 | 240 |
#!/usr/bin/python
from configparser import ConfigParser
from osgeo import ogr
import psycopg2
import psycopg2.extensions
from psycopg2.extras import LoggingConnection, LoggingCursor
import logging
import time
import pandas as pd
import os
import linecache
import sys
import csv
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import re
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
time_total = 0
cmpStats = {}
# MyLoggingCursor simply sets self.timestamp at start of each query
# MyLogging Connection:
# a) calls MyLoggingCursor rather than the default
# b) adds resulting execution (+ transport) time via filter()
if __name__ == "__main__":
queryCompare()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
28686,
469,
78,
1330,
267,
2164,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
17331,
22163,
70,
17,
13,
2302,
5736,
198,
6738,
17331,
22163,
... | 3.193133 | 233 |
# Generated by Django 2.0.4 on 2018-04-28 15:54
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
19,
319,
2864,
12,
3023,
12,
2078,
1315,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import sys, os, argparse
from io import BytesIO
import torch
import numpy as np
from scipy.io.wavfile import write
from flask import Flask, render_template, request, make_response
# insert python path to allow imports from parent dirs
#sys.path.append(os.path.dirname(__file__))
sys.path.append(os.getcwd())
# SpeedySpeech imports
from hparam import HPStft, HPText
from utils.text import TextProcessor
from functional import mask
from speedyspeech import SpeedySpeech
from melgan.model.generator import Generator
from melgan.utils.hparams import HParam
from functional import mask
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
args = get_args()
speedyspeech = SpeedySpeechInference(
args.speedyspeech_checkpoint,
args.melgan_checkpoint,
args.device
)
@app.route('/')
@app.route('/synt/<text>',methods=['GET'])
app.run(debug=True)
| [
11748,
25064,
11,
28686,
11,
1822,
29572,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
952,
13,
45137,
7753,
1330,
3551,
198,
6738,
42903,
1330,
46947... | 2.936242 | 298 |
from collections import OrderedDict
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
# copy-pasted from https://github.com/City-of-Helsinki/kerrokantasi/blob/2c26bf3ee9ac4fdc88aefabd7d0c4e73f4d3707d/democracy/views/utils.py#L257 # noqa
class TranslatableSerializer(serializers.Serializer):
"""
A serializer for translated fields.
translated_fields must be declared in the Meta class.
By default, translation languages obtained from settings, but can be overriden
by defining translation_lang in the Meta class.
"""
def validate(self, data):
"""
Add a custom validation for translated fields.
"""
validated_data = super().validate(data)
errors = OrderedDict()
for field in self.Meta.translated_fields:
try:
self._validate_translated_field(field, data.get(field, None))
except ValidationError as e:
errors[field] = e.detail
if errors:
raise ValidationError(errors)
return validated_data
def save(self, **kwargs):
"""
Extract the translations and save them after main object save.
"""
translated_data = self._pop_translated_data()
if not self.instance:
# forces the translation to be created, since the object cannot be saved without
self.validated_data[self.Meta.translated_fields[0]] = ''
instance = super(TranslatableSerializer, self).save(**kwargs)
self.save_translations(instance, translated_data)
instance.save()
return instance
def _pop_translated_data(self):
"""
Separate data of translated fields from other data.
"""
translated_data = {}
for meta in self.Meta.translated_fields:
translations = self.validated_data.pop(meta, {})
if translations:
translated_data[meta] = translations
return translated_data
def save_translations(self, instance, translated_data):
"""
Save translation data into translation objects.
"""
for field in self.Meta.translated_fields:
translations = {}
if not self.partial:
translations = {lang_code: '' for lang_code in self.Meta.translation_lang}
translations.update(translated_data.get(field, {}))
for lang_code, value in translations.items():
translation = instance._get_translated_model(lang_code, auto_create=True)
setattr(translation, field, value)
instance.save_translations()
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,... | 2.53407 | 1,086 |
"""
Module: xor.py
Created by alvif@usagi
on 20/04/21
"""
| [
37811,
198,
26796,
25,
2124,
273,
13,
9078,
198,
41972,
416,
435,
85,
361,
31,
385,
18013,
220,
198,
261,
1160,
14,
3023,
14,
2481,
198,
37811,
198
] | 2.107143 | 28 |
import sqlite3 as sql
import os
import sys
import logging
# bmVerify(['final_r7', 'final_r8'], filepath="/home/ysun/disambig/newcode/all/", outdir = "/home/ayu/results_v2/")
# Text Files
txt_file = 'patentlist.txt'
opened_file = open(txt_file, 'U')
log_file = 'benchmark_results.log'
# Logging
logging.basicConfig(filename=log_file, level=logging.DEBUG)
open(log_file, "w")
# Set Up SQL Connections
con = sql.connect('/test/goldstandard/invnum_N_zardoz_with_invpat.sqlite3')
with con:
con_cur = con.cursor()
logging.info("Beginning to query database")
con_cur.execute("CREATE INDEX IF NOT EXISTS index_invnum ON invpat (Invnum)");
con_cur.execute("CREATE INDEX IF NOT EXISTS index_lastname ON invpat (Lastname)");
con_cur.execute("CREATE INDEX IF NOT EXISTS index_firstname ON invpat (Firstname)");
count = 0
errors = 0
success = 0
while True:
line_read = opened_file.readline()
# print line_read
if not line_read:
print "EXITING"
break
count = count + 1
if count%100 == 0:
print "starting patent", count
split_lines = line_read.split(', ')
# Strip out weird characters/formatting
# Need to add leading "0" to Patent if not Design/Util/etc..
patent_to_match = split_lines[0].strip(' \t\n\r')
if len(patent_to_match) == 7:
patent_to_match = "0" + patent_to_match
last_name = split_lines[1].strip(' \t\n\r')
first_name = split_lines[2].strip(' \t\n\r')
# print patent_to_match, last_name, first_name
con_cur.execute("SELECT Patent FROM invpat WHERE (Lastname = \"%s\" and Firstname = \"%s\");" % (last_name, first_name))
patents_matched_from_SQL = con_cur.fetchall()
match_found = False
for patent_match in patents_matched_from_SQL:
# print patent_match[0]
# print patent_to_match
if patent_match[0] == patent_to_match:
match_found = True
success = success + 1
if not match_found:
logging.error("Did not find a match for %s, %s, %s" % (first_name, last_name, patent_to_match))
errors = errors + 1
logging.info("Total Patents: %d" % count)
logging.info("Patents ran successfully: %d" % success)
logging.info("Patents FAILED: %d" % errors)
| [
11748,
44161,
578,
18,
355,
44161,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18931,
198,
198,
2,
220,
275,
76,
13414,
1958,
7,
17816,
20311,
62,
81,
22,
3256,
705,
20311,
62,
81,
23,
6,
4357,
2393,
6978,
35922,
11195,
14,
89... | 2.268727 | 1,068 |
from ..core import Blob, Structure, Comment, Package
Package.diff = l10npackage_diff
Package.apply_diff = l10npackage_apply_diff
| [
6738,
11485,
7295,
1330,
1086,
672,
11,
32522,
11,
18957,
11,
15717,
198,
198,
27813,
13,
26069,
796,
300,
940,
77,
26495,
62,
26069,
198,
198,
27813,
13,
39014,
62,
26069,
796,
300,
940,
77,
26495,
62,
39014,
62,
26069,
198
] | 3.195122 | 41 |
"""
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 3:
(080) is the area code for fixed line telephones in Bangalore.
Fixed line numbers include parentheses, so Bangalore numbers
have the form (080)xxxxxxx.)
Part A: Find all of the area codes and mobile prefixes called by people
in Bangalore.
- Fixed lines start with an area code enclosed in brackets. The area
codes vary in length but always begin with 0.
- Mobile numbers have no parentheses, but have a space in the middle
of the number to help readability. The prefix of a mobile number
is its first four digits, and they always start with 7, 8 or 9.
- Telemarketers' numbers have no parentheses or space, but they start
with the area code 140.
Print the answer as part of a message:
"The numbers called by people in Bangalore have codes:"
<list of codes>
The list of codes should be print out one per line in lexicographic order with no duplicates.
Part B: What percentage of calls from fixed lines in Bangalore are made
to fixed lines also in Bangalore? In other words, of all the calls made
from a number starting with "(080)", what percentage of these calls
were made to a number also starting with "(080)"?
Print the answer as a part of a message::
"<percentage> percent of calls from fixed lines in Bangalore are calls
to other fixed lines in Bangalore."
The percentage should have 2 decimal digits
"""
def GetAllAreaCodes(calls):
'''
Returns all the Area Codes in the form of list of Strings
Returns the total number of calls from land line to land line
Returns the total numner of calls from from landline
'''
tele = set()
num = 0
tot = 0
for records in calls:
if records[0].startswith('(080)'):
if records[1].startswith('('):
tele.add((records[1][1:].split(")")[0]))
elif records[1][0] in ['9','8','7']:
tele.add((records[1].split(" ")[0][:4]))
if records[1].startswith('(080)'):
num += 1
tot +=1
tele.add('140')
return tele,num,tot
if __name__ == "__main__":
tele , TotalLandlineCalls , TotalLineCalls = GetAllAreaCodes(calls)
print("The numbers called by people in Bangalore have codes:")
print(*sorted(set(tele)),sep='\n')
tot =0
num=0
for records in calls:
if records[0].startswith('(080)'):
if records[1].startswith('(080)'):
num += 1
tot +=1
print('{:.2f} percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.'.format(TotalLandlineCalls/TotalLineCalls*100))
| [
37811,
198,
5569,
2393,
656,
13399,
290,
3848,
13,
198,
1026,
338,
12876,
611,
345,
836,
470,
1833,
703,
284,
1100,
3696,
13,
198,
37811,
198,
11748,
269,
21370,
198,
198,
4480,
1280,
10786,
5239,
82,
13,
40664,
3256,
705,
81,
11537,
... | 3.127854 | 876 |
from sympy.liealgebras.cartan_type import CartanType, Standard_Cartan
| [
6738,
10558,
88,
13,
14485,
282,
469,
1671,
292,
13,
26674,
272,
62,
4906,
1330,
13690,
272,
6030,
11,
8997,
62,
43476,
272,
201,
198
] | 2.84 | 25 |
# -*- coding: utf-8 -*-
"""
F5 BIG-IQ auth plugin for HTTPie.
"""
import requests
from requests_f5auth import XF5Auth
from httpie.plugins import AuthPlugin
__version__ = '0.0.6'
__author__ = 'ivan mecimore'
__license__ = 'MIT'
class F5AuthPlugin(AuthPlugin):
"""Plugin registration"""
name = 'X-F5-Auth-Token auth'
auth_type = 'xf5'
description = 'Authenticate using an X-F5-Auth-Token'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
37,
20,
26746,
12,
33866,
6284,
13877,
329,
14626,
494,
13,
198,
37811,
198,
11748,
7007,
198,
6738,
7007,
62,
69,
20,
18439,
1330,
1395,
37,
20,
3051... | 2.625806 | 155 |
#
# A simple Evernote API demo script that lists all notebooks in the user's
# account and creates a simple test note in the default notebook.
#
# Before running this sample, you must fill in your Evernote developer token.
#
# To run (Unix):
# export PYTHONPATH=../../lib; python EDAMTest.py
#
import os
os.environ['PYTHONPATH'] = '/Users/zhanghao/workspace/git/FuckYinxiang/lib'
import sys
sys.path.append('/Users/zhanghao/workspace/git/FuckYinxiang/lib')
#from PIL import Image
import io
import hashlib
import binascii
import evernote.edam.userstore.constants as UserStoreConstants
import evernote.edam.type.ttypes as Types
import evernote.edam.notestore.NoteStore as NoteStore
from evernote.api.client import EvernoteClient
import xml.etree.ElementTree as ET
from lxml import etree
from io import StringIO, BytesIO
from convert import *
import html2text
if __name__ == '__main__':
import sys
auth_token = sys.argv[1]
yx = FuckYinxiang(auth_token)
#note_guid = yx.get_note_guid_bytitle("caller type recall precision data runtime")
note_guid = yx.get_note_guid_bytitle("Deep convolutional neural networks for accurate somatic mutation detection")
print("note guid: ", note_guid)
if note_guid:
yx.process_note(note_guid)
print("Done")
| [
2,
198,
2,
317,
2829,
412,
933,
1258,
7824,
13605,
4226,
326,
8341,
477,
43935,
287,
262,
2836,
338,
198,
2,
1848,
290,
8075,
257,
2829,
1332,
3465,
287,
262,
4277,
20922,
13,
198,
2,
198,
2,
7413,
2491,
428,
6291,
11,
345,
1276,
... | 2.944056 | 429 |
self.description = "usbutils case study: force stays, epoch now in local db"
sp = pmpkg("usbutils", "1:002-1")
self.addpkg2db("sync", sp)
lp = pmpkg("usbutils", "1:001-1")
self.addpkg2db("local", lp)
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=usbutils|1:002-1")
| [
944,
13,
11213,
796,
366,
385,
4360,
4487,
1339,
2050,
25,
2700,
14768,
11,
36835,
783,
287,
1957,
20613,
1,
198,
198,
2777,
796,
279,
3149,
10025,
7203,
385,
4360,
4487,
1600,
366,
16,
25,
21601,
12,
16,
4943,
198,
944,
13,
2860,
... | 2.307692 | 130 |
'''
@author: oluiscabral
'''
from builders.simple_builder import SimpleBuilder
from scrapers.interfaces.scraper_component import ScraperComponent
from data_structure.data_ref import DataRef
from helpers.config import Config
from typing import Set
from data_structure.data import Data
if __name__ == '__main__':
builder = SimpleBuilder()
main(builder)
| [
7061,
6,
198,
31,
9800,
25,
267,
2290,
2304,
397,
1373,
198,
7061,
6,
198,
6738,
31606,
13,
36439,
62,
38272,
1330,
17427,
32875,
198,
6738,
15881,
364,
13,
3849,
32186,
13,
1416,
38545,
62,
42895,
1330,
1446,
38545,
21950,
198,
6738,... | 3.306306 | 111 |
import json
import os
import pytest
import scan_websites_constants
from factories import (
A11yReportFactory,
OrganisationFactory,
ScanFactory,
ScanIgnoreFactory,
ScanTypeFactory,
SecurityReportFactory,
TemplateFactory,
TemplateScanFactory,
)
from models.A11yReport import A11yReport
from models.A11yViolation import A11yViolation
from models.SecurityReport import SecurityReport
from models.SecurityViolation import SecurityViolation
from pub_sub.pub_sub import AvailableScans
from storage import storage
from unittest.mock import MagicMock, patch, call
@patch("storage.storage.log")
@patch("storage.storage.get_session")
@patch("storage.storage.log")
@patch("storage.storage.get_session")
@patch("storage.storage.log")
@patch("storage.storage.get_object")
@patch("storage.storage.log")
@patch("storage.storage.get_object")
@patch("storage.storage.log")
@patch("storage.storage.get_object")
@patch("storage.storage.get_object")
@patch("storage.storage.store_axe_core_record")
@patch.dict(os.environ, {"AXE_CORE_REPORT_DATA_BUCKET": "axe_core"}, clear=True)
@patch("storage.storage.db_session")
@patch.dict(os.environ, {"AXE_CORE_REPORT_DATA_BUCKET": "axe_core"}, clear=True)
@patch("storage.storage.get_object")
@patch("storage.storage.store_owasp_zap_record")
@patch.dict(os.environ, {"OWASP_ZAP_REPORT_DATA_BUCKET": "owasp_zap"}, clear=True)
@patch("storage.storage.db_session")
@patch.dict(os.environ, {"OWASP_ZAP_REPORT_DATA_BUCKET": "owasp_zap"}, clear=True)
@patch("storage.storage.get_object")
@patch("storage.storage.store_nuclei_record")
@patch.dict(os.environ, {"NUCLEI_REPORT_DATA_BUCKET": "nuclei"}, clear=True)
@patch("storage.storage.db_session")
@patch.dict(os.environ, {"NUCLEI_REPORT_DATA_BUCKET": "nuclei"}, clear=True)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
9367,
62,
732,
1443,
2737,
62,
9979,
1187,
198,
198,
6738,
17590,
1330,
357,
198,
220,
220,
220,
317,
1157,
88,
19100,
22810,
11,
198,
220,
220,
220,
30801,
22810,
... | 2.798151 | 649 |
import helper
| [
11748,
31904,
198
] | 4.666667 | 3 |
"""
ๆๅบๅๆฐ๏ผ
id ๆ นๆฎ id ๆฅๅๅบ
-id ๆ นๆฎ id ๆฅ้ๅบ
"""
from drf_yasg import openapi
order_param = openapi.Parameter(
name='order', in_=openapi.IN_QUERY,
description='order by', type=openapi.TYPE_STRING
)
order_params = [
order_param
]
| [
37811,
198,
162,
236,
240,
41753,
237,
20998,
224,
46763,
108,
171,
120,
234,
198,
312,
10545,
254,
117,
162,
235,
106,
4686,
10545,
251,
98,
39355,
229,
41753,
237,
198,
12,
312,
10545,
254,
117,
162,
235,
106,
4686,
10545,
251,
98... | 1.829457 | 129 |
from django.views.generic import ListView, DetailView
from django.views.generic.edit import UpdateView, DeleteView, CreateView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from .models import CustomUser
from rosters.models import Role
| [
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
7343,
7680,
11,
42585,
7680,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
19312,
1330,
10133,
7680,
11,
23520,
7680,
11,
13610,
7680,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
957... | 3.688172 | 93 |
"""
ๆไปถๅค็ๅฝๆฐ
"""
import os
print("ๆไปถๅคงๅฐ", os.path.getsize("../day03 data/my.log"))
print("ๆไปถๅคงๅฐ", os.path.getsize("../.."))
print("ๆไปถๅ่กจ", os.listdir(".."))
print("ๆไปถๆฏๅฆๅญๅจ", os.path.exists("../day03 data/my.log"))
print("ๆไปถ็ฑปๅ", os.path.isfile("../day03 data/my.log"))
# ๆไปถๅคงๅฐ 299
# ๆไปถๅคงๅฐ 448
# ๆไปถๅ่กจ ['.DS_Store', 'day03 data', 'day02_Linux', 'day01 Linux', '็ฌฌไธๆฌกๅจๆต', 'day04 osๆจกๅ ๆญฃๅ่กจ่พพๅผ']
# ๆไปถๆฏๅฆๅญๅจ True
# ๆไปถ็ฑปๅ True
| [
37811,
198,
220,
220,
220,
10545,
244,
229,
20015,
114,
13783,
226,
49426,
228,
49035,
121,
46763,
108,
198,
37811,
198,
11748,
28686,
198,
198,
4798,
7203,
23877,
229,
20015,
114,
32014,
22887,
237,
1600,
28686,
13,
6978,
13,
11407,
10... | 1.49635 | 274 |
# Copyright (c) 2010-2021 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Integer,
Bool,
Sequence,
)
PageBreak = RowBreak
| [
2,
15069,
357,
66,
8,
3050,
12,
1238,
2481,
1280,
9078,
87,
75,
198,
198,
6738,
1280,
9078,
87,
75,
13,
20147,
1968,
669,
13,
46911,
43942,
1330,
23283,
43942,
198,
6738,
1280,
9078,
87,
75,
13,
20147,
1968,
669,
1330,
357,
198,
2... | 2.708333 | 72 |
# Thresholding Test
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('gradient.png',0)
| [
2,
536,
10126,
278,
6208,
201,
198,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
201,
198,
201,
198,
9600,
796,
269,
85,
17,
13,
320,
9... | 2.5 | 52 |
from owm_api_tests.common import api_test
| [
6738,
12334,
76,
62,
15042,
62,
41989,
13,
11321,
1330,
40391,
62,
9288,
628
] | 3.071429 | 14 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
haskellBundleIdentifier = 'org.purl.net.mkhl.haskell'
def iconForName(name):
"""Return the NSImage instance representing a `name` item."""
bundle = NSBundle.bundleWithIdentifier_(haskellBundleIdentifier)
imgpath = bundle.pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
# Autoreleasing the image seems to randomly crash Espresso.
# img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
5693,
1330,
26181,
66,
198,
6738,
5693,
1330,
10896,
33,
31249,
198,
6738,
2034,
20827,
1330,
10896,
5159,
1... | 2.886228 | 334 |
from pprint import pprint
alphabet = {"a":0,"b":0,"c":0,"d":0,"e":0,"f":0,"g":0,"h":0,"i":0,"j":0,\
"k":0,"l":0,"m":0,"n":0,"o":0,"p":0,"q":0,"r":0,"s":0,"t":0,"u":0,"v":0,\
"w":0,"x":0,"y":0,"z":0}
alphaFrecuency = {"a":0,"b":0,"c":0,"d":0,"e":0,"f":0,"g":0,"h":0,"i":0,"j":0,\
"k":0,"l":0,"m":0,"n":0,"o":0,"p":0,"q":0,"r":0,"s":0,"t":0,"u":0,"v":0,\
"w":0,"x":0,"y":0,"z":0}
key = {'a':'n', 'b':'o', 'c':'p', 'd':'q', 'e':'r', 'f':'s', 'g':'t', 'h':'u',
'i':'v', 'j':'w', 'k':'x', 'l':'y', 'm':'z', 'n':'a', 'o':'b', 'p':'c',
'q':'d', 'r':'e', 's':'f', 't':'g', 'u':'h', 'v':'i', 'w':'j', 'x':'k',
'y':'l', 'z':'m', 'A':'N', 'B':'O', 'C':'P', 'D':'Q', 'E':'R', 'F':'S',
'G':'T', 'H':'U', 'I':'V', 'J':'W', 'K':'X', 'L':'Y', 'M':'Z', 'N':'A',
'O':'B', 'P':'C', 'Q':'D', 'R':'E', 'S':'F', 'T':'G', 'U':'H', 'V':'I',
'W':'J', 'X':'K', 'Y':'L', 'Z':'M'}
print (alphaFrecuency)
# 16 filter_long_words
# 17 is_palindrome()
# 18 is pangram
### 19
# 20 Swedish Translator
#21 Frecuency
# 22 Decoder/Encoder
# 23 correct
# 24 Third person
# 25 Ign form
########################################
print ("16.")
print (filter_long_words(["Apple","Pen","Pinneapple","Uh"],4),"\n")
print ("17.")
print (is_palindrome("Ana"),"\n")
print ("18.")
print (is_pangram("The quick brown fox jumps over the lazy dog.\n"))
print ("19.")
for i in range(99,0,-1):
print ("{0} bottles of beer on the wall, {0} bottles of beer.\
\nTake one down, pass it around, {1} bottles of beer on the wall.".format\
(i,i-1),"\n")
print ("20.")
print (translate("Have a merry christmas and a happy new year"),"\n")
print ("21.")
pprint (char_freq("abcabbbababwuidksakjsjksjkskajakjscabcabcabcbacbabcabcabaabaabcbbabcbaccbab"))
print ("22.")
print ("Pnrfne pvcure? V zhpu cersre Pnrfne fnynq!\n",decoder\
("Pnrfne pvcure? V zhpu cersre Pnrfne fnynq!"),"\n")
print ("23.")
print (correct("This is very funny and cool.Indeed!"),"\n")
print ("24.")
print (make_3sg_form("Try"))
print (make_3sg_form("Bush"))
print (make_3sg_form("Run"))
print (make_3sg_form("Fix"),"\n")
print ("25.")
print (make_ing_form("lie"))
print (make_ing_form("see"))
print (make_ing_form("move"))
print (make_ing_form("hug"))
| [
6738,
279,
4798,
1330,
279,
4798,
198,
198,
17307,
8380,
796,
19779,
64,
1298,
15,
553,
65,
1298,
15,
553,
66,
1298,
15,
553,
67,
1298,
15,
553,
68,
1298,
15,
553,
69,
1298,
15,
553,
70,
1298,
15,
553,
71,
1298,
15,
553,
72,
1... | 1.987544 | 1,124 |
from ply.yacc import yacc
from ast.parser_ast import (
AssignNode,
AttrDeclarationNode,
BlocksNode,
BooleanNode,
CaseNode,
CaseOptionNode,
ClassDeclarationNode,
ComplementNode,
ConditionalNode,
DivNode,
EqualsNode,
InstantiateNode,
IntNode,
IsVoidNode,
LessNode,
LessOrEqualNode,
LetNode,
LoopNode,
MethodCallNode,
MethodDeclarationNode,
MinusNode,
NotNode,
PlusNode,
ProgramNode,
StarNode,
StringNode,
VarDeclarationNode,
VariableNode,
)
from parsing.errors import SyntacticError
| [
6738,
35960,
13,
88,
4134,
1330,
331,
4134,
198,
6738,
6468,
13,
48610,
62,
459,
1330,
357,
198,
220,
220,
220,
2195,
570,
19667,
11,
198,
220,
220,
220,
3460,
81,
37835,
10186,
19667,
11,
198,
220,
220,
220,
35111,
19667,
11,
198,
... | 2.404858 | 247 |
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, \
QLabel, QSizePolicy, QSpinBox
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QColor
from app.utilities import utils
from app.extensions.custom_gui import PropertyBox
from app.extensions.color_icon import ColorIcon
from app.extensions.color_slider import RGBSlider, HSVSlider
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
38300,
11,
1195,
39,
14253,
32517,
11,
1195,
53,
14253,
32517,
11,
3467,
201,
198,
220,
220,
220,
1195,
33986,
11,
1195,
10699,
36727,
11,
1195,
4561,
259,
14253,
201,
... | 2.678832 | 137 |
import pytest
from commands.turn_time import TurnTime
from subsystems.drivetrain import Drivetrain
from util.stopwatch import Stopwatch
"""
hal_data['pwm'] looks like this:
[{
'zero_latch': False,
'initialized': False,
'raw_value': 0,
'value': 0,
'period_scale': None,
'type': None
}, {
'zero_latch': True,
'initialized': True,
'raw_value': 1011,
'value': 0.0,
'period_scale': 0,
'type': 'talon'
},...]
"""
@pytest.fixture(scope="function")
@pytest.fixture(scope="function")
@pytest.mark.parametrize("speed,left_ex_speed,right_ex_speed", [
(0.0, 0.0, 0.0),
(0.5, -0.5306122448979592, -0.5306122448979592),
(1.0, -1.0, -1.0),
(-0.5, 0.5306122448979592, 0.5306122448979592),
(-1.0, 1.0, 1.0),
])
@pytest.mark.parametrize("duration,timeout, speed,left_ex_speed,right_ex_speed", [
(0.5, 5.0, 0.5, -0.5306122448979592, -0.5306122448979592),
(0.5, 5.0, -0.5, 0.5306122448979592, 0.5306122448979592),
(2.0, 15.0, 1.0, -1.0, -1.0),
# (5.0, 1.0, 1.0, 1.0, -1.0), # Timeouts don't seem to work in testing
])
| [
11748,
12972,
9288,
198,
6738,
9729,
13,
15344,
62,
2435,
1330,
6756,
7575,
198,
6738,
39335,
82,
13,
19472,
27432,
1330,
9974,
27432,
198,
6738,
7736,
13,
11338,
8340,
1330,
13707,
8340,
628,
198,
37811,
198,
14201,
62,
7890,
17816,
79... | 2.061914 | 533 |
from interfaces.telegram.usibot import UsiBot
try:
UsiBot.run()
except KeyboardInterrupt:
print('Quitting')
| [
6738,
20314,
13,
660,
30536,
13,
385,
571,
313,
1330,
4021,
72,
20630,
198,
198,
28311,
25,
198,
220,
220,
220,
4021,
72,
20630,
13,
5143,
3419,
198,
16341,
31973,
9492,
3622,
25,
198,
220,
220,
220,
3601,
10786,
4507,
2535,
11537,
... | 2.72093 | 43 |
import os
import typing
import warnings
import glob
from . import tags as tags
from ..data_processing import integration as integration
def make_destination_folders(save_location: typing.Union[str, bytes, os.PathLike], optional_settings: dict = {}) -> typing.Tuple[bool,bool,bool]:
"""
Creates destination folders for binary files (crop and bg_sub optional)
Creates the folder save_location if it does not yet exist, then within
save_location makes the folder 'bin' (additionally 'crop' and 'bg_sub' if
those optional settings are True). Warns if any of the folders already
exist. Returns True for folders that exist or do not need to be created
because of optional_settings.
Parameters
----------
save_location: path-like
Path to folder in which to save the sub folders.
If it does not exist, function will create it (as long as rest of path
already exists).
Optional Settings and Defaults
------------------------------
save_crop: bool
True to save intermediate cropped images (i.e. experimental video
images cropped but not background-subtracted or binarized).
Default is False.
save_bg_sub: bool
True to save background-subtracted images (i.e. experimental video
images cropped and background-subtracted but not binarized).
Default is False.
Returns
-------
bin_exists: bool
True if binary folder already exists, False if does not exist
crop_exists: bool
True if save_crop is True or crop folder already exists, False
otherwise
bg_sub_exists: bool
True if save_bg_sub is True or bg_sub folder already exists, False
otherwise
"""
settings = integration.set_defaults(optional_settings)
skip_existing = settings["skip_existing"]
save_crop = settings["save_crop"]
save_bg_sub = settings["save_bg_sub"]
bin_exists = False
crop_exists = False
bg_sub_exists = False
if not os.path.isdir(save_location):
# Makes outer save_location folder if it does not exist.
os.mkdir(save_location)
# Makes binary folder.
if not make_folder(save_location,"bin"):
#warnings.warn("Binary folder already exists in" + str(save_location), UserWarning)
bin_exists = True
# Makes crop folder.
if save_crop:
if not make_folder(save_location,"crop"):
#warnings.warn("Crop folder already exists" + str(save_location), UserWarning)
crop_exists = True
else:
# If not save_crop, returns True .
crop_exists = True
# Makes background subtraction folder.
if save_bg_sub:
if not make_folder(save_location,"bg_sub"):
#warnings.warn("Background Subtraction folder already exists" + str(save_location), UserWarning)
bg_sub_exists = True
else:
# If not save_bg_sub, returns True.
bg_sub_exists = True
return [bin_exists, crop_exists, bg_sub_exists]
def make_folder(save_location: typing.Union[str, bytes, os.PathLike],folder_tag: str) -> bool:
"""
Creates directory in save_location, returns False if already exists
Parameters
----------
save_location: path-like
path to folder in which to save the sub folders
folder_tag: str
sub folder name
Returns
-------
make_folder: bool
returns True if makes directory, False if it already exists
"""
destination = os.path.join(save_location,folder_tag)
if os.path.isdir(destination):
success = False
else:
os.mkdir(destination)
success = True
return success
def identify_experimental_video_folder(folder: str, fname_format: str, optional_settings: dict = {}) -> typing.Tuple[str,bool]:
"""
Identifies if a given folder is an experimental video folder.
Using the given fname_format, identifies if the given folder is an
experimental video folder.
Parameters
----------
folder: str
Folder to check if it matches the format for the experimental video.
fname_format: str
The format of the fname with parameter names separated
by the deliminator specified by fname_split. Must contain the "vtype"
tag.
ex. "date_sampleinfo_fps_run_vtype"
optional_settings: dict
A dictionary of optional settings.
Optional Settings and Defaults
------------------------------
fname_split: string
The deliminator for splitting folder/file names, used in fname_format.
Default is "_".
experiment_tag: string
The tag for identifying experimental videos. May be empty ("").
Default is "exp".
Returns
-------
fname: string
Base name of the folder if the given folder matches the pattern for an
experimental folder, '' otherwise.
exp_video: bool
True if the folder matches the pattern for an experimental folder,
False otherwise.
Raises
------
ValueError: If the given fname_format does not contain the tag "vtype."
"""
settings = integration.set_defaults(optional_settings)
fname_split = settings["fname_split"]
experiment_tag = settings["experiment_tag"]
# Checks for "vtype" tag since it is needed for further processing.
if not tags.check_fname_format_for_tag(fname_format,"vtype",fname_split):
# fname_format must have vtype to be able to match videos.
raise ValueError("fname_format must contain the tag 'vtype' (video type) to identify background vs. experimental videos.")
fname_tag_count = fname_format.count(fname_split) + 1
if experiment_tag == '':
# If there's no experimental tag, then the fname_format has one
# additional tag corresponding to video type (vtype).
tag_count_expected = fname_tag_count - 1
else:
# If there's an experimental tag, then fname_format has the correct
# number of tags.
tag_count_expected = fname_tag_count
if (folder.count(fname_split) +1) == tag_count_expected:
# Only look at folders that have the expected number of tags
# based on user provided filename format.
if experiment_tag == '':
# If there's no tag for experimental videos, then every folder
# that has the correct number of tags is assumed to be an
# experimental video at first.
experiment_video = True
# Construct fname by removing tags labeled "remove" and
# vtype.
# First, create a format string without vtype for the case where
# experimental videos lack an experimental tag.
exp_video_format = tags.remove_tag_from_fname(fname_format,fname_format,"vtype",fname_split)
# Then remove all "remove" tags from the fname
if tags.check_fname_format_for_tag(exp_video_format,"remove",fname_split):
fname = tags.remove_tag_from_fname(folder,exp_video_format,"remove",fname_split)
else:
# If no "remove" tags, then the folder name is the fname
fname = folder
else:
# If there is an experimental tag, checks the vtype matches
# the given experimental tag. Note: only checks the first
# time vtype appears in the fname_format.
# If it does match, then this is an experiment_video.
vtype = tags.get_tag_from_fname(folder,fname_format,"vtype")[0]
if vtype == experiment_tag:
experiment_video = True
# Remove vtype from fname
new_fname = tags.remove_tag_from_fname(folder,fname_format,"vtype",fname_split)
new_format = tags.remove_tag_from_fname(fname_format,fname_format,"vtype",fname_split)
# Remove all "remove" tags from the fname
if tags.check_fname_format_for_tag(new_format,"remove",fname_split):
fname = tags.remove_tag_from_fname(new_fname,new_format,"remove",fname_split)
else:
# If no "remove" tags, then the folder name without the
# experiment tag is the fname
fname = new_fname
else:
# If doesn't have the tag, likely a background video.
experiment_video = False
fname = ''
else:
# If doesn't have the right number of tags, not an experimental video.
experiment_video = False
fname = ''
return fname, experiment_video
def identify_background_video_folder(parent_folder: typing.Union[str, bytes, os.PathLike], fname: str, fname_format: str, optional_settings: dict = {}) -> typing.Tuple[bool,str]:
"""
Identifies a background folder that matches a given experimental fname.
Identifies a background folder tagged with appropriate parameters such that
it matches the given base folder name for an experimental video.
Parameters
----------
parent_folder: path-like
Path in which to look for background video folders.
fname: str
The base name of the experimental video folder.
ex. "20210929_6M-PEO_fps-25k_1"
fname_format: str
The format of the fname with parameter names separated
by the deliminator specified by fname_split. Must contain the "vtype"
tag.
ex. "date_sampleinfo_fps_run_vtype"
optional_settings: dict
A dictionary of optional settings.
Optional Settings and Defaults
------------------------------
fname_split: string
The deliminator for splitting folder/file names, used in fname_format.
Default is "_".
background_tag: string
The tag for identifying background videos. May not be empty.
Default is "bg".
one_background: bool
True to use one background for a group of experiments only differing by
run number. False to pair backgrounds and experiments 1:1.
Default is True.
Returns
-------
matched_bg: bool
True if a matching background is found, False otherwise.
bg_folder: string
Name of background folder if a matching one is found, '' otherwise.
Raises
------
ValueError
If the given fname_format does not contain the tag "vtype."
Warns
-----
UserWarning
If multiple matched backgrounds are found for a given fname.
"""
settings = integration.set_defaults(optional_settings)
fname_split = settings["fname_split"]
background_tag = settings["background_tag"]
one_background = settings["one_background"]
# Checks for "vtype" tag since it is needed for further processing.
if not tags.check_fname_format_for_tag(fname_format,"vtype",fname_split):
# fname_format must have vtype to be able to match videos.
raise ValueError("fname_format must contain the tag 'vtype' (video type) to identify background vs. experimental videos.")
# Starts by inserting background_tag in vtype location.
if tags.check_fname_format_for_tag(fname_format,"remove",fname_split):
no_remove_format = tags.remove_tag_from_fname(fname_format,fname_format,"remove",fname_split)
else:
no_remove_format = fname_format
bg_fname = tags.insert_tag_in_fname(fname,no_remove_format,"vtype",background_tag,fname_split)
# Then puts "*" where "remove" tags would exist.
bg_fname = tags.insert_tag_in_fname(bg_fname,fname_format,"remove","*",fname_split)
if one_background:
# If only one background, handles two cases: no run number or
# still has a run number but we are using the first background for
# every run.
bg_norun_fname = tags.remove_tag_from_fname(bg_fname,fname_format,"run",fname_split)
bg_norun_folders = glob.glob(os.path.join(parent_folder,bg_norun_fname))
# 2nd case, sub the run tag with *, then search.
bg_run_fname = tags.replace_tag_in_fname(bg_fname,fname_format,"run","*",fname_split)
bg_run_folders = glob.glob(os.path.join(parent_folder,bg_run_fname))
# Combines, sorts, then takes the 1st.
bg_folders = bg_run_folders + bg_norun_folders
bg_folders = list(dict.fromkeys(sorted(bg_folders)))
if bg_folders == []:
bg_folder = ''
matched_bg = False
else:
bg_folder = os.path.basename(bg_folders[0])
matched_bg = True
else:
# If matched backgrounds, matchs by run number.
bg_folders = glob.glob(os.path.join(parent_folder,bg_fname))
bg_folders = sorted(bg_folders)
if bg_folders == []:
bg_folder = ''
matched_bg = False
else:
bg_folder = os.path.basename(bg_folders[0])
matched_bg = True
# Warns if there are multiple matching backgrounds.
if len(bg_folders) > 1:
warnings.warn("Multiple folders matched background for " + str(fname) + ". First used.", UserWarning)
return matched_bg, bg_folder
def select_video_folders(parent_folder: typing.Union[str, bytes, os.PathLike], fname_format: str, optional_settings: dict = {}) -> typing.Tuple[list,list,list]:
"""
Pairs experimental and background videos in a given folder.
Iterates through every folder in a given folder, checks if the folder
matches the pattern for an experimental video folder, looks for a matching
background video folder if it is, and returns three matched lists, a list of
base folder names, a list of paths to experimental video folders, and a
list of paths to background video folders.
Parameters
----------
parent_folder: path-like
Path in which to look for experimental and background video folder pairs.
fname_format: str
The format of the fname with parameter names separated
by the deliminator specified by fname_split. Must contain the "vtype"
tag.
ex. "date_sampleinfo_fps_run_vtype"
optional_settings: dict
A dictionary of optional settings.
Optional Settings and Defaults
------------------------------
fname_split: string
The deliminator for splitting folder/file names, used in fname_format.
Default is "_".
experiment_tag: string
The tag for identifying experimental videos. May be empty ("").
Default is "exp".
background_tag: string
The tag for identifying background videos. May not be empty.
Default is "bg".
one_background: bool
True to use one background for a group of experiments only differing by
run number. False to pair backgrounds and experiments 1:1.
Default is True.
Returns
-------
fnames: list of strings
List of base folder names for each matched pair of experimental and
background folders.
exp_videos: list of paths
List of paths to experimental video folders that were matched with
backgrounds.
bg_videos: list of paths
List of paths to background video folders matched with exp_videos.
Examples
--------
"""
## TODO: examples in docstring
# Checks for "vtype" before trying to identify folders.
settings = integration.set_defaults(optional_settings)
fname_split = settings["fname_split"]
if not tags.check_fname_format_for_tag(fname_format,"vtype",fname_split):
# fname_format must have vtype to be able to match videos.
raise ValueError("fname_format must contain the tag 'vtype' (video type) to identify background vs. experimental videos.")
fnames = []
exp_video_folders = []
bg_video_folders = []
subfolders = [ f.name for f in os.scandir(parent_folder) if f.is_dir()]
for subfolder in subfolders:
fname, experiment_video = identify_experimental_video_folder(subfolder, fname_format, optional_settings)
if experiment_video:
# Tries to find a matching background video if the folder
# appears to be an experimental video.
matched_bg, bg_folder = identify_background_video_folder(parent_folder, fname, fname_format, optional_settings)
else:
# If not an experiment, then there's no background.
matched_bg = False
bg_folder = ''
# If we identify an experimental video and a matched background,
# adds the entries to the output.
if experiment_video & matched_bg:
fnames.append(fname)
exp_video_folders.append(os.path.join(parent_folder,subfolder))
bg_video_folders.append(os.path.join(parent_folder,bg_folder))
return fnames, exp_video_folders, bg_video_folders
| [
11748,
28686,
198,
11748,
19720,
198,
11748,
14601,
198,
11748,
15095,
198,
198,
6738,
764,
1330,
15940,
355,
15940,
198,
6738,
11485,
7890,
62,
36948,
1330,
11812,
355,
11812,
198,
198,
4299,
787,
62,
16520,
1883,
62,
11379,
364,
7,
21... | 2.676405 | 6,264 |
'''
Creates a cache with timekeeping in order to prevent vote spam
Released under MIT license, copyright 2018 Tyler Ramer
'''
import logging
import time
logger = logging.getLogger(__name__)
class TimedCache:
"""
Timed cache to reduce upvote/downvote spam. [user] cannot vote
[target] before VOTE_DELAY seconds
This functionality is easily provided by Gemfire and may be moved to that
service at some point...
"""
VOTE_DELAY = 300
def __contains__(self, key):
"""
True or false depending on if key is in cache
"""
self.clean()
return key in self.cache
def update(self, key):
"""
Updates the cache with key after cleaning it of old values
"""
self.clean()
if key not in self.cache and len(self.cache) < self.max_cache_size:
self.cache[key] = {'time_added': time.time()}
logger.debug('added to cache {} at time {}'.format(
key, time.time()))
elif key not in self.cache and len(self.cache) >= self.max_cache_size:
logger.warning('cache is full - dropping oldest entry')
self.remove_old()
self.cache[key] = {'time_added': time.time()}
def clean(self):
"""
Removes any item older than VOTE_DELAY from the cache
"""
logger.debug("cleaning cache")
drop_keys = []
for key in self.cache:
if time.time() - self.cache[key]['time_added'] > self.VOTE_DELAY:
drop_keys.append(key)
logger.debug('Marked {} ready to be dropped'.format(key))
for key in drop_keys:
self.cache.pop(key)
logger.debug('Dropped {} from cache'.format(key))
def remove_old(self):
"""
This should not generally be used - only occurs if we're actually
reaching the cache size AFTER clearing old values. Ideally, cache
and clean should be large/often enough that this function is never used
"""
oldest = None
for key in self.cache:
if oldest is None:
oldest = key
elif (self.cache[key]['time_added'] <
self.cache[oldest]['time_added']):
oldest = key
self.cache.pop(oldest)
@property
| [
7061,
6,
198,
16719,
274,
257,
12940,
351,
640,
19934,
287,
1502,
284,
2948,
3015,
18084,
198,
198,
45037,
739,
17168,
5964,
11,
6634,
2864,
14886,
371,
2382,
198,
7061,
6,
198,
198,
11748,
18931,
198,
11748,
640,
198,
198,
6404,
1362... | 2.325 | 1,000 |
from __future__ import absolute_import
from cli.utils.services import *
from cli.utils.models.user import User | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
537,
72,
13,
26791,
13,
30416,
1330,
1635,
198,
6738,
537,
72,
13,
26791,
13,
27530,
13,
7220,
1330,
11787
] | 3.666667 | 30 |
# -*- coding: utf-8 -*-
"""
create model and predict label of origine wine
using Linear SVC(SVM Classification) of Scikit Learn
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing, metrics
from mlxtend.plotting import plot_decision_regions
def normalize_split_data(data_frame, normalize_mode):
'''Normalize and split sample data
Input1: Original data frame
Input2: Normalize mode number
1: Original
2: z score normalization
3: Min-Max normalization
Output1: Attributes data for training
Output2: Attributes data for test
Output3: Label data for training
Output4: Label data for test
'''
data_label = data_frame['Class'].values
data_attrib_org = data_frame[['Color intensity', 'Proline']].values
if normalize_mode == 1: # Original
attrib_train, attrib_test, label_train, label_test = train_test_split(data_attrib_org, data_label, test_size=0.4, random_state=3)
elif normalize_mode == 2: # z score normalization
sc = preprocessing.StandardScaler()
data_attrib_std = sc.fit_transform(data_attrib_org)
attrib_train, attrib_test, label_train, label_test = train_test_split(data_attrib_std, data_label, test_size=0.4, random_state=3)
elif normalize_mode == 3: # Min-Max normalization
ms = preprocessing.MinMaxScaler()
data_attrib_nrm = ms.fit_transform(data_attrib_org)
attrib_train, attrib_test, label_train, label_test = train_test_split(data_attrib_nrm, data_label, test_size=0.4, random_state=3)
else:
attrib_train, attrib_test, label_train, label_test = train_test_split(data_attrib_org, data_label, test_size=0.4, random_state=3)
return attrib_train, attrib_test, label_train, label_test
if __name__ == '__main__':
main() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
17953,
2746,
290,
4331,
6167,
286,
1796,
500,
8237,
198,
198,
3500,
44800,
311,
15922,
7,
50,
15996,
40984,
8,
286,
10286,
15813,
14365,
198,
37811,
198,
198... | 2.512563 | 796 |
from django.core.management.base import NoArgsCommand
from django.core.management.commands.test import Command as TestCommand
from django.core.management import call_command
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
1400,
42035,
21575,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
9503,
1746,
13,
9288,
1330,
9455,
355,
6208,
21575,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
... | 3.977273 | 44 |
import collections
import itertools
import os.path
import numpy as np
import scipy as sp
import scipy.io as sio
from patran import *
# Print progress every wo packets of patran neutral file
wo = 1000
# Primary reader methods are:
# node_iterator: returns (in file order) the coordinates of each node
# elem_iterator: returns (in file order) the connectivities and type of each
# element
class PatranResultsReader(Reader):
"""
Superclass for everything that reads patran-type results files.
"""
def __init__(self, noderesults=[], elemresults=[], timestepfile=None):
"""
noderesults -- list of nodal results structures
elemresults -- list of element results structures
timestepfile -- file giving the times corresponding to each step
"""
self.noderesults = noderesults
self.elemresults = elemresults
total_results = len(noderesults) + len(elemresults)
if total_results > 0:
self.with_results = True
else:
self.with_results = False
if timestepfile:
self.with_times = True
self.tf = timestepfile
else:
self.with_times = False
def node_field_iterator(self):
"""
For each field (result column) , assemble a big numpy array.
We're going to slurp the
whole thing in anyway, so we may as well make it a (nfields,nnodes,nsteps)
massive array.
"""
return itertools.chain.from_iterable(
FNodeTypeIt(self, f) for f in self.noderesults)
def element_field_iterator(self):
"""
For each field, assemble a big numpy array. We're going to slurp the
whole thing in anyway, so we may as well make it a (nfields,nelems,nsteps)
massive array.
"""
return itertools.chain.from_iterable(
FElemTypeIt(self, f) for f in self.elemresults)
class SimpleReader(PatranResultsReader):
"""
Read from new, simple model file and patran-type results files.
"""
def node_iterator(self):
"""
Return the coordinates of each node, in order of ID.
"""
return self.nodes
def elem_iterator(self):
"""
Return connectivity, etype tuples for each element, in ID order.
"""
return itertools.izip(self.conn, self.etype)
def eblk_iterator(self):
"""
Return tuples consisting of the name of the block and a list of
associated elements.
"""
sgroups = np.array(self.group)
inorder = np.argsort(sgroups)
sgroups = sgroups[inorder]
groups = []
names = []
g = -1
ngroup = []
for i,e in enumerate(inorder):
if sgroups[i] != g:
if len(ngroup) > 0:
groups.append(ngroup)
g = sgroups[i]
ngroup = [e]
names.append(str(g))
else:
ngroup.append(e)
groups.append(ngroup)
return itertools.izip(names, groups)
def read_text_data(self):
"""
Read data in from the text-type files.
"""
self.elements = []
with open(self.filename, 'r') as f:
reading = 1
for line in f:
line = line.strip()
if line[0] == '#':
continue
sline = line.split()
if len(sline) == 2 and reading == 1:
self.num_nodes = int(sline[0])
self.num_elems = int(sline[1])
self.nodes = np.zeros((self.num_nodes,3))
reading = 2
ncount = 0
elif len(sline) == 3 and reading == 2:
self.nodes[ncount] = [ float(i) for i in sline ]
ncount += 1
if ncount == self.num_nodes:
reading = 3
ecount = 0
self.conn = []
self.group = []
self.etype = []
elif reading == 3:
self.etype.append(patran_types[int(sline[0])])
self.group.append(int(sline[1]))
# Figure out where the connectivity line actually ends
intline = [ int(i) for i in sline[:] ]
for i, e in reversed(list(enumerate(intline))):
if e != 0:
n = i+1
break
self.conn.append([ int(i)-1 for i in sline[2:n]])
# Special rules for some weird elements types
if self.etype[-1] == "WEDGE" and n == 17:
# Actually a trint12, collapse nodes
self.conn[-1][9] = self.conn[-1][0]
self.conn[-1][10] = self.conn[-1][1]
self.conn[-1][11] = self.conn[-1][2]
else:
raise ValueError("Found unknown state while reading file")
class PatranReader(PatranResultsReader):
"""
Reads a patran neutral file + patran-type results files
IMPORTANT NOTE: This assumes that the node and element packets of the
neutral file are in order. We do raise an exception if this isn't
the case.
"""
def __init__(self, neutralfile, *args, **kwargs):
"""
neutralfile -- filename of patran neutral file
"""
super(PatranReader, self).__init__(*args, **kwargs)
self.neut_file = neutralfile
self.open()
self.read_opening()
def read_opening(self):
"""
Read the number of nodes, elements, and timesteps.
"""
print("Reading basic data...")
# Dimensions, nsets, and ivars are fixed
self.dim = 3
self.num_nsets = 0
self.num_ivars = 0
# Read remaining header data
total = 0
for packet,data in PatranNeutralIt(self.neut_file, writeout=wo):
if packet[0] == 25:
self.title = ''.join(data[0]).strip()
total += 1
elif packet[0] == 26:
self.num_nodes = packet[4]
self.num_elems = packet[5]
total += 1
if total == 2:
break
if total != 2:
raise ValueError("Could not read header packets 25 and 26!")
# Unfortunately, we don't write consistently the number of element configs
# Read through the file, looking at element packets, data card 1, slot 2
# to total up the configurations
configs = set()
total = 0
for packet,data in PatranNeutralIt(self.neut_file, writeout=wo):
if packet[0] == 2:
configs.add(data[0][1])
total += 1
if total == self.num_elems:
break
self.num_eblocks = len(configs)
# Setup for the patran results
self.setup_results()
def node_iterator(self):
"""
Return the coordinates of each node, in order of ID.
The iterator assumes that the neutral file has sorted the node packets
into order by ID. If not, it will raise an exception.
"""
return PatranNodeIt(self.neut_file, self.num_nodes, writeout=wo)
def elem_iterator(self):
"""
Return connectivity, etype tuples for each element, in ID order.
If the elements in the file aren't in ID order, this will raise
an exception.
"""
return PatranElemIt(self.neut_file, self.num_elems, writeout=wo)
def eblk_iterator(self):
"""
Assume elements are blocked by the **config** ID (not property ID)
Unfortunately, all we can do is run through and add elements to
the appropriate list.
"""
blocks = collections.defaultdict(list)
nelem = 0
for packet,data in PatranNeutralIt(self.neut_file, writeout=wo):
if packet[0] == 2:
nelem += 1
blocks[str(data[0][1])].append(packet[1]-1)
if nelem == self.num_elems:
break
return blocks.iteritems()
class ExodusIIReader(Reader):
"""
Reads from an ExodusII file.
"""
def node_iterator(self):
"""
Return the coordinates of each node, in the file order.
"""
return ExodusNodeIt(self)
def elem_iterator(self):
"""
Return an iterator which will pass through each element in order
(regardless of block) and return the connectivity
(in terms of zero-start node numbers) and the element type (for now, the raw exodus type)
Unfortunately, the Exodus files don't store the elements sequentially
anywhere. Instead, it essentially stores each element block in order,
and then the true element number must be dereferenced from a map.
This means the iterator is a bit more complicated. It's also cheaper
to sort the map once and get a reverse map.
"""
return ExodusElemIt(self)
def nset_iterator(self):
"""
Return an iterator which returns a tuple of (name, list of nodes) for
each node set
"""
return ExodusNSetIt(self)
def eblk_iterator(self):
"""
Return an iterator which returns a tuple of (name, list of elements) for
each element block
"""
return ExodusEBlkIt(self)
def node_field_iterator(self):
"""
This one is annoying. First we need to iterate over all fields, then
over all nodes/elements (in stored order), then over time steps. Or
some other combination of the three.
"""
class ExodusNodeFieldsIt(object):
"""
Returns field name, field shape, and field iterator
"""
return ExodusNodeFieldsIt(self)
def element_field_iterator(self):
"""
This one is annoying. First we need to iterate over all fields, then
over all nodes/elements (in stored order), then over time steps. Or
some other combination of the three.
Additionally, we have the problem of the data possibly being "masked" --
not present for a particular element block.
"""
class ExodusElementFieldsIt(object):
"""
Returns field name, field shape, and field iterator
"""
return ExodusElementFieldsIt(self)
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
def num_ivars(self):
"""
Exodus cannot store integration point variables.
"""
return 0
@property
def times(self):
"""
Return the actual time step times.
"""
return self.ncdf.variables['time_whole'][:]
def enum_to_blk(self, el):
"""
Take a zero-indexed *stored* element number and return its block number
(one-indexed) and offset into the block (zero-indexed)
"""
blkszs = self.eblk_sizes
blkszs.insert(0,0)
blkszs = np.cumsum(blkszs)
for i in range(len(blkszs)-1):
s = blkszs[i]
e = blkszs[i+1]
if s <= el < e:
return (i+1,el-blkszs[i])
raise ValueError("Element #%i does not seem to be in a block." % e)
@property
| [
11748,
17268,
198,
11748,
340,
861,
10141,
198,
11748,
28686,
13,
6978,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
599,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
198,
6738,
1458,
2596,
1330,
... | 2.629006 | 3,620 |
import vugrad as vg
class MLP(vg.Module):
"""
A simple MLP with one hidden layer, and a sigmoid non-linearity on the hidden layer and a softmax on the
output.
"""
def __init__(self, input_size, output_size, hidden_mult=4):
"""
:param input_size:
:param output_size:
:param hidden_mult: Multiplier that indicates how many times bigger the hidden layer is than the input layer.
"""
super().__init__()
hidden_size = hidden_mult * input_size
# -- There is no common wisdom on how big the hidden size should be, apart from the idea
# that it should be strictly _bigger_ than the input if at all possible.
# Inits: glorot (default), he
self.layer1 = vg.Linear(input_size, hidden_size, init="he")
self.layer2 = vg.Linear(hidden_size, output_size, init="glorot")
# -- The linear layer (without activation) is implemented in vugrad. We simply instantiate these modules, and
# add them to our network.
class MLP_3layers(vg.Module):
"""
A simple MLP with one hidden layer, and a sigmoid non-linearity on the hidden layer and a softmax on the
output.
"""
def __init__(self, input_size, output_size, hidden_mult=4):
"""
:param input_size:
:param output_size:
:param hidden_mult: Multiplier that indicates how many times bigger the hidden layer is than the input layer.
"""
super().__init__()
hidden_size = hidden_mult * input_size
hidden_size2 = hidden_size/2
# -- There is no common wisdom on how big the hidden size should be, apart from the idea
# that it should be strictly _bigger_ than the input if at all possible.
# Inits: glorot (default), he
self.layer1 = vg.Linear(input_size, hidden_size, init="he")
self.layer2 = vg.Linear(hidden_size, hidden_size2, init="he")
self.layer3 = vg.Linear(hidden_size2, output_size, init="glorot")
# -- The linear layer (without activation) is implemented in vugrad. We simply instantiate these modules, and
# add them to our network.
class MLP(vg.Module):
"""
A simple MLP with one hidden layer, and a sigmoid non-linearity on the hidden layer and a softmax on the
output.
"""
def __init__(self, input_size, output_size, hidden_mult=4):
"""
:param input_size:
:param output_size:
:param hidden_mult: Multiplier that indicates how many times bigger the hidden layer is than the input layer.
"""
super().__init__()
hidden_size = hidden_mult * input_size
# -- There is no common wisdom on how big the hidden size should be, apart from the idea
# that it should be strictly _bigger_ than the input if at all possible.
# Inits: glorot (default), he
self.layer1 = vg.Linear(input_size, hidden_size, init="he")
self.layer2 = vg.Linear(hidden_size, output_size, init="glorot")
# -- The linear layer (without activation) is implemented in vugrad. We simply instantiate these modules, and
# add them to our network.
class MLP(vg.Module):
"""
A simple MLP with one hidden layer, and a sigmoid non-linearity on the hidden layer and a softmax on the
output.
"""
def __init__(self, input_size, output_size, hidden_mult=4):
"""
:param input_size:
:param output_size:
:param hidden_mult: Multiplier that indicates how many times bigger the hidden layer is than the input layer.
"""
super().__init__()
hidden_size = hidden_mult * input_size
# -- There is no common wisdom on how big the hidden size should be, apart from the idea
# that it should be strictly _bigger_ than the input if at all possible.
# Inits: glorot (default), he
self.layer1 = vg.Linear(input_size, hidden_size, init="he")
self.layer2 = vg.Linear(hidden_size, output_size, init="glorot")
# -- The linear layer (without activation) is implemented in vugrad. We simply instantiate these modules, and
# add them to our network. | [
11748,
410,
1018,
6335,
355,
410,
70,
198,
198,
4871,
10373,
47,
7,
45119,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
2829,
10373,
47,
351,
530,
7104,
7679,
11,
290,
257,
264,
17225,
1868,
1729,
12,
29127,
... | 2.631117 | 1,594 |
# Generated by Django 3.0.4 on 2020-06-04 06:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3312,
12,
3023,
9130,
25,
4349,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
import ast
import datetime
import json
import operator
import settings
true = True
##################################################################################################
##################################################################################################
file_write = open(settings.PWD + 'count_of_attach.txt', 'w')
sum_files = 1000000
descArray = []
for f in range(1, sum_files):
if f < 10:
f = '000' + str(f) + '.idea'
elif f < 100:
f = '00' + str(f) + '.idea'
elif f < 1000:
f = '0' + str(f) + '.idea'
else:
f = str(f) + '.idea'
filik = open(settings.PWD + f, 'r')
ideafile = (filik.read())
idea = eval(ideafile)
if 'Attach' in idea:
file_write.write(str(idea['Attach'][0]) + '\n')
# 84676
# {'name': '13012: SIP: SipVicious Brute Force SIP Tool', 'count': 56886}
# {'name': '32391: UDP: Netcore/Netis Router Backdoor Communication Attempt', 'count': 21773}
# {'name': 'ET SCAN Potential SSH Scan OUTBOUND', 'count': 2671}
# {'name': 'Comm. with host known as malware source', 'count': 1537}
# {'name': '0560: DNS: Version Request (UDP)', 'count': 355}
# {'name': 'ET DOS Possible NTP DDoS Inbound Frequent Un-Authed MON_LIST Requests IMPL 0x03', 'count': 334}
# {'name': 'Drop RPF', 'count': 254}
# {'name': 'Port scanning Security issues', 'count': 215}
# {'name': '12607: Backdoor: Zero Access Trojan Communication Attempt', 'count': 57}
# {'name': 'GPL VOIP SIP INVITE message flooding', 'count': 45}
# {'name': 'ET DOS Possible Memcached DDoS Amplification Query (set)', 'count': 43}
# {'name': 'Communication w. host having reputation score 80+', 'count': 29}
# {'name': 'GPL ATTACK_RESPONSE id check returned root', 'count': 28}
# {'name': 'Resolving name of host having reputation score 80+', 'count': 16}
# {'name': 'Comm. with host known as botnet member or worm src', 'count': 15}
# {'name': 'GPL SNMP public access udp', 'count': 14}
# {'name': '27429: UDP: Ransom_CERBER.ENC Checkin', 'count': 10}
# {'name': 'ET EXPLOIT ETERNALBLUE Exploit M2 MS17-010', 'count': 9}
# {'name': '16304: UDP: MIT Kerberos KDC Server TGS-REQ Denial-of-Service Vulnerability', 'count': 6}
# {'name': '12961: DNS: Large UDP Packet DDoS (ONLY enable when under DoS attack)', 'count': 2}
# {'name': 'Comm. with server hosting phishing page', 'count': 2}
# {'name': '30565: DNS: Possible Kelihos .eu CnC Domain Generation Algorithm (DGA) Lookup NXDOMAIN Response', 'count': 2}
# {'name': 'ET CNC Feodo Tracker Reported CnC Server group 4', 'count': 1}
# {'name': '5300: DNS: Suspicious Localhost PTR Record Response', 'count': 1}
# {'name': 'ET DROP Dshield Block Listed Source group 1', 'count': 1}
# {'name': '0???}?j?x???\x07i\x0c?\x13??????9",2852,"CZ",,"NOVA HOSPODA",0,0,"Information Technology"', 'count': 1}
# {'name': '29739: SIP: Digium Asterisk app_minivm Caller-ID Command Execution Vulnerability', 'count': 1}
file_write = open(settings.PWD + 'attach_types.txt', 'w')
count = 0
attach_types = []
attach_types_set = set()
result = []
filik = settings.PWD + 'count_of_attach.txt'
with open(filik) as f:
lines = f.readlines()
for line in lines:
print(str(count))
count += 1
line = eval(line)
if 'Content' in line:
if len(line['Content'].split('|')) > 1:
try:
sign = line['Content'].split('|')[4]
attach_types.append(sign)
attach_types_set.add(sign)
except:
file_write.write(str(line['Content']) + '\n')
elif line['Content'].startswith('Drop RPF'):
try:
attach_types.append('Drop RPF')
attach_types_set.add('Drop RPF')
except:
file_write.write(str(line['Content']) + '\n')
elif len(line['Content'].split('\t')) > 1:
try:
sign = line['Content'].split('\t')[4] + ' ' + line['Content'].split('\t')[5]
attach_types.append(sign)
attach_types_set.add(sign)
except:
file_write.write(str(line['Content']) + '\n')
else:
try:
sign = json.dumps(line['Content'].replace("u'", '"').replace("'", '"'))
sign = json.loads(sign)
sign = (ast.literal_eval(sign))
attach_types.append(sign['alert']['signature'])
attach_types_set.add(sign['alert']['signature'])
except:
file_write.write(str(line['Content']) + '\n')
counter = 0
for item in attach_types_set:
for i in attach_types:
if item == i:
counter += 1
result.append({'name': item, 'count': counter})
counter = 0
result.sort(key=operator.itemgetter('count'), reverse=True)
for i in result:
print(i)
print(str(count))
| [
11748,
6468,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
10088,
198,
11748,
6460,
628,
198,
7942,
796,
6407,
198,
198,
29113,
29113,
29113,
2235,
220,
198,
198,
29113,
29113,
29113,
2235,
198,
198,
7753,
62,
13564,
796,
1280,
... | 2.189053 | 2,375 |
from __future__ import absolute_import, division, print_function
from libtbx import easy_pickle
from libtbx import group_args
from libtbx.utils import Sorry
from collections import defaultdict
import os.path
import math
import sys
from six.moves import range
def export_ramachandran_distribution(n_dim_table, scale_factor=0.25):
"""
Convert a MolProbity Ramachandran distribution to a format suitable for
display using matplotlib (see wxtbx.plots).
"""
import numpy
z = n_dim_table.lookupTable
z_grid = [ [ z[i + (j * 180)] for j in range(180) ]
for i in range(180) ]
npz = numpy.array(z_grid)
return npz ** scale_factor
def export_rotamer_distribution(n_dim_table, scale_factor=0.5):
"""
Convert a MolProbity rotamer distribution to a format suitable for
display using matplotlib (see wxtbx.plots). Will reduce dimensionality to
2 if necessary.
"""
import numpy
z = n_dim_table.lookupTable
n_dim = n_dim_table.nDim
assert n_dim >= 2
x_offset = 1
for nbins in n_dim_table.nBins[1:] :
x_offset *= nbins
y_width = 1
if n_dim > 2 :
for nbins in n_dim_table.nBins[2:] :
y_width *= nbins
z_grid = [ [] for i in range(n_dim_table.nBins[1]) ]
for i in range(n_dim_table.nBins[0]):
for j in range(n_dim_table.nBins[1]):
z_total = 0
for k in range(y_width):
z_total += z[(i * x_offset) + (j * y_width) + k]
z_grid[j].append(z_total)
npz = numpy.array(z_grid)
return npz ** scale_factor
def molprobity_score(clashscore, rota_out, rama_fav):
"""
Calculate the overall Molprobity score, as described here:
http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2877634/?tool=pubmed
http://kinemage.biochem.duke.edu/suppinfo/CASP8/methods.html
"""
if (clashscore >= 0) and (rota_out >= 0) and (rama_fav >= 0):
rama_iffy = 100. - rama_fav
mpscore = (( 0.426 * math.log(1 + clashscore) ) +
( 0.33 * math.log(1 + max(0, rota_out - 1)) ) +
( 0.25 * math.log(1 + max(0, rama_iffy - 2)) )) + 0.5
else :
return -1 # FIXME prevents crashing on RNA
return mpscore
#this function assumes that use_segids_in_place_of_chainids() is True
if __name__ == "__main__" :
exercise()
print("OK")
| [
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
9195,
83,
65,
87,
1330,
2562,
62,
27729,
293,
198,
6738,
9195,
83,
65,
87,
1330,
1448,
62,
22046,
198,
6738,
9195,
83,
65,
87,
13,
26791... | 2.378579 | 943 |
from frappe import _
def new_column(label, fieldname, fieldtype, width, options=None):
"""
Create a report column
:param label:
:param fieldname:
:param fieldtype:
:param width:
:param options:
:return:
"""
column = {"label": _(label), "fieldname": fieldname, "fieldtype": fieldtype, "width": width}
print(column)
if options:
column.update({'options': options})
return column
| [
6738,
5306,
27768,
1330,
4808,
628,
198,
4299,
649,
62,
28665,
7,
18242,
11,
2214,
3672,
11,
2214,
4906,
11,
9647,
11,
3689,
28,
14202,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13610,
257,
989,
5721,
198,
220,
220,
220,
... | 2.689024 | 164 |
import pickle
import os
import argparse
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import torch
from torch.serialization import default_restore_location
from seq2seq import models, utils
from seq2seq.data.dictionary import Dictionary
from seq2seq.data.dataset import Seq2SeqDataset, BatchSampler
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
def get_args():
""" Defines training-specific hyper-parameters. """
parser = argparse.ArgumentParser('Sequence to Sequence Model')
parser.add_argument('--cuda', default=False, help='Use a GPU')
# Add data arguments
parser.add_argument('--data', default='prepared_data', help='path to data directory')
parser.add_argument('--source-lang', default='jp', help='source language')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
main(args) | [
11748,
2298,
293,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,... | 3.223404 | 282 |
print(riemann(sqrd, 1, 9, 1000))
print(riemann(tripsqrd, 3, 6, 10000))
print(riemann(weirdf, 2, 7, 100000)) | [
628,
628,
198,
198,
4798,
7,
380,
368,
1236,
7,
31166,
4372,
11,
352,
11,
860,
11,
8576,
4008,
198,
198,
4798,
7,
380,
368,
1236,
7,
28461,
862,
80,
4372,
11,
513,
11,
718,
11,
33028,
4008,
198,
198,
4798,
7,
380,
368,
1236,
7... | 2.017544 | 57 |
# -*- coding: utf-8 -*-
from collections import OrderedDict
import time
import h5py
import pandas as pd
import numpy as np
import yaml
from .settings import *
from .core.get_gadgets import *
from .core.metagene import *
from ._version import __format_version__, __version__
##########################################################
#####################################################################
#### C O M P A T I B I L I T Y F U N C T I O N S
#####################################################################
def check_referenence_compatibility( first_pair, second_pair ):
"""
?? Incomplete documentation
"""
reference_error = "Reference Error\n"
first_reference_names = get_reference_names( first_pair[0] )
second_reference_names = get_reference_names( second_pair[0] )
if len(first_reference_names) != len(second_reference_names):
reference_error += "They have different number of references"
reference_error += "{} != {}".format( len(first_reference_names),
len(second_reference_names))
return reference_error
ref_name_comparison = first_reference_names == second_reference_names
if not ref_name_comparison.all():
return "Different reference (transcript) names."
first_reference_lengths = get_reference_lengths( first_pair[0] )
second_reference_lengths = get_reference_lengths( second_pair[0] )
ref_len_comparison = first_reference_lengths == second_reference_lengths
if not ref_len_comparison.all():
return "Different reference (transcript) lengths."
return ""
def check_attribute_compatibility( first_pair, second_pair ):
"""
?? Incomplete documentation
"""
attribute_error = "Attribute Error:\n"
for attribute in ATTRS_ESSENTIAL_COMPATIBILITY:
first_attr = first_pair[0].attrs[attribute]
second_attr = second_pair[0].attrs[attribute]
if first_attr != second_attr:
attribute_error += \
" The {} atrribute is different:\n".format(attribute)
attribute_error += "{first} != {second}".format(
first = first_attr,
second = second_attr )
return attribute_error
return ""
############################################################################
def check_ribo_compatibility_pair( first_pair, second_pair ):
"""
?? Incomplete documentation
"""
first_handle = first_pair[0]
first_identifier = first_pair[1]
second_handle = second_pair[0]
second_identifier = second_pair[1]
error_message = "The ribo files {}, {} are not compatible.\n".format(
first_identifier, second_identifier)
# check attribute compatibility first
attribute_error = \
check_attribute_compatibility( first_pair, second_pair )
if attribute_error:
raise ValueError( error_message + attribute_error )
ref_error = check_referenence_compatibility( first_pair, second_pair )
if ref_error:
raise ValueError( error_message + ref_error )
def check_if_common_libs_exist( ribo_handle_list ):
"""
?? Incomplete documentation
"""
for i, ribo_1 in enumerate(ribo_handle_list):
for ribo_2 in ribo_handle_list[i+1:]:
experiments_1 = set(get_experiment_names(ribo_1[0]) )
experiments_2 = set(get_experiment_names(ribo_2[0]) )
common_experiments = experiments_1.intersection(experiments_2)
if common_experiments :
identifier_1 = ribo_1[1]
identifier_2 = ribo_2[1]
error_message = "The ribos {first} and {second} ".format(\
first = identifier_1,
second = identifier_2)
error_message += " have common experiments:\n"
error_message += str(common_experiments)
raise ValueError(error_message)
#####################################################################
#### M A I N F U N C T I O N S
#####################################################################
def initialize( h5_destination_handle, h5_source_handle ):
"""
?? Incomplete documentation
"""
h5_source_handle.copy( REFERENCE_name,
h5_destination_handle )
h5_destination_handle.create_group(EXPERIMENTS_name)
###########################################################
def _copy_attributes(h5_destination_handle, h5_source_handle):
"""
?? Incomplete documentation
"""
for key in ATTRIBUTES_TO_BE_COPIED_FOR_MERGE:
h5_destination_handle.attrs[key] = h5_source_handle.attrs[key]
def _copy_ribo_metadata(destination_handle, source_list):
"""
Tries to merge the metadata of the source_likst into one dictionary
and write it to destination.
If source has no metadata, destrination metadata attribute will be set
but it will be empty.
"""
merged_metadata_dict = {}
source_with_metadata = []
for h in source_list:
if h.attrs.get(USER_METADATA, None):
metadata_dict = yaml.safe_load(h.attrs[USER_METADATA])
# Metadata might exists as an emty string
# So let's make sure it translates to a non-emptry dict.
if metadata_dict:
merged_metadata_dict.update( metadata_dict )
destination_handle.attrs[USER_METADATA] = \
yaml.safe_dump( merged_metadata_dict )
def merge_ribos(destination_handle, source_list):
"""
?? Incomplete documentation
"""
# If the sdource list is not coming from pairs,
# then make it into pairs
# this way, identifying incompatible ribo files or
# handles is going to be easier
if type(source_list[0]) not in (tuple, list):
source_list = [ ( source_list[i], str(i) ) \
for i in range(len(source_list)) ]
check_ribo_compatibility(source_list)
source_handle_list = list( map(lambda x: x[0], source_list) )
initialize(destination_handle, source_handle_list[0])
_copy_attributes(destination_handle, source_handle_list[0])
_copy_ribo_metadata(destination_handle, source_handle_list)
for ribo_handle in source_handle_list:
for lib in get_experiment_names(ribo_handle):
exp_path = EXPERIMENTS_name + "/" + lib
#destination_handle.create_group(exp_path)
ribo_handle.copy( exp_path, destination_handle[EXPERIMENTS_name] )
destination_handle.attrs[ATTRS_TIME] = time.time()
def merge_ribo_files(destination_file, source_file_list):
"""
Merges the experiments in the given source files and writes
the result in the destination file.
The ribo files need to be compatible
(same left / right span, same metagene radius, same reference)
Because of the compatibility, parameters (attributes), refrence
etc. of the new file is the same as the merged file,
The source files are not allowed to have experiments of the same name
as this creates ambiguity.
Parameters
----------
destination_file : Destination ribo file path
source_file_list : List of ribo file paths to be merged
"""
if len(source_file_list) < 2:
print("Please provide at least two input ribo files")
exit(1)
source_handle_list = [ (h5py.File(f , "r"), f )
for f in source_file_list ]
destination_ribo_handle = h5py.File( destination_file, "w" )
merge_ribos( destination_ribo_handle, source_handle_list )
[s[0].close() for s in source_handle_list]
destination_ribo_handle.close()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
640,
198,
198,
11748,
289,
20,
9078,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
... | 2.578756 | 3,022 |
#! /usr/bin/env python3
"""ะกะพััะธัะพะฒะบะฐ ะะ
"""
import json
import sys
import requests
import threading
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
140,
94,
15166,
21169,
20375,
18849,
21169,
25443,
110,
31583,
16142,
12466,
239,
140,
242,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
7007,
198,
117... | 2.318182 | 44 |
# coding=utf-8
from .{{cookiecutter.project_repo}} import * | [
2,
19617,
28,
40477,
12,
23,
198,
6738,
764,
27007,
44453,
8968,
353,
13,
16302,
62,
260,
7501,
11709,
1330,
1635
] | 2.809524 | 21 |
from bson import ObjectId
from marshmallow import Schema, fields, validate, validates, ValidationError
from app import app
GameServer = app.config["LAZY_UMONGO"].GameServer
| [
6738,
275,
1559,
1330,
9515,
7390,
198,
6738,
22397,
42725,
1330,
10011,
2611,
11,
7032,
11,
26571,
11,
4938,
689,
11,
3254,
24765,
12331,
198,
198,
6738,
598,
1330,
598,
628,
198,
8777,
10697,
796,
598,
13,
11250,
14692,
13534,
57,
5... | 3.415094 | 53 |
# Copyright 2021 JD.com, Inc., JD AI
"""
@author: Yehao Li
@contact: yehaoli.sysu@gmail.com
"""
import torch
import torch.nn as nn
__all__ = ["BaseAttention"] | [
2,
15069,
33448,
28591,
13,
785,
11,
3457,
1539,
28591,
9552,
198,
37811,
198,
31,
9800,
25,
11609,
23778,
7455,
198,
31,
32057,
25,
9838,
3099,
11106,
13,
17597,
84,
31,
14816,
13,
785,
198,
37811,
198,
11748,
28034,
198,
11748,
2803... | 2.741379 | 58 |
import random
import numpy as np
import json
import configparser
if __name__ == '__main__':
create_and_save_data_ids()
# create_and_save_incremental_learning_data_set_ids()
# with open('./data/label_temp.txt') as f:
# strs = f.read()[1:-1].split(',')
# print(len(strs))
# for i in range(len(strs)-1, 0, -1):
# if strs[i] != ' 2.0':
# flag = True
# for j in range(30):
# if strs[i-j] == ' 2.0':
# flag = False
# if flag:
# break
# print(i)
# i = 740452
pass
| [
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
4566,
48610,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2251,
62,
392,
62,
21928,
62,
7890,
62,
2340,
3419,
1... | 1.79096 | 354 |
import pickle
import csv
csv_file=open('IN.txt','r')
location_dict={}
count=0
# low_longi= 80.1846
# low_lati= 12.9673
# up_longi= 80.3067
# up_lati= 13.1515
low_longi= 76.2448
low_lati= 8.0742
up_longi= 80.3502
up_lati= 13.5757
for line in csv_file:
line=line.strip().split('\t')
name1=line[1]
name2=line[2]
names=line[3].split(',')
lat=line[4]
longi=line[5]
if float(lat) <= low_lati or float(lat) >= up_lati or float(longi)<= low_longi or float(longi) >= up_longi:
continue
check_location_dict(name1.lower(),lat,longi)
check_location_dict(name2.lower(),lat,longi)
for name in names:
check_location_dict(name.lower(),lat,longi)
count+=1
if count%10000==0:
print(count/100)
new_location_dict={}
lat_len=[]
long_len=[]
for name in location_dict:
lat_len=[float(i[0]) for i in location_dict[name]]
long_len=[float(i[1]) for i in location_dict[name]]
lat_val=round(sum(lat_len)/len(lat_len),6)
long_val=round(sum(long_len)/len(long_len),6)
new_location_dict[name]=(lat_val,long_val)
with open('TN_loc.p','wb') as handle:
pickle.dump(new_location_dict,handle)
| [
11748,
2298,
293,
198,
11748,
269,
21370,
198,
198,
40664,
62,
7753,
28,
9654,
10786,
1268,
13,
14116,
41707,
81,
11537,
198,
198,
24886,
62,
11600,
34758,
92,
198,
198,
9127,
28,
15,
198,
198,
2,
1877,
62,
6511,
72,
28,
4019,
13,
... | 2.203156 | 507 |
import os
import time
import sys
from datetime import datetime
# This script should be invoked in parent dir of scripts
if len(sys.argv) < 1:
print('ERROR: unknown function invoke time')
sys.exit(1)
activationLog = open('./scripts/activation.log','a')
activationLog.write("terminated\n")
result = open('./scripts/result-single.log','w')
starttime = sys.argv[1]
activationLog = open('./scripts/activation.log','r')
terminateline = activationLog.readline().strip()
while(terminateline.find("terminated") == -1):
if terminateline.find("wage analysis result: ") != -1:
break
terminateline = activationLog.readline().strip()
if terminateline.find("wage analysis result: ") != -1:
starttime = string2timestamp(starttime)
endtime = string2timestamp(terminateline[1:].split()[0])
resline = str(endtime-starttime)
print("%s" % resline)
result.write("%s\n" % resline)
sys.exit(0)
# result not found: invocation vailed
sys.exit(1)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
770,
4226,
815,
307,
24399,
287,
2560,
26672,
286,
14750,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
352,
25,
198,
22... | 2.829971 | 347 |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 ZhicongYan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import os
import numpy as np
import pickle
import xml.etree.ElementTree as ET
from skimage import io
import cv2
from .base_dataset import BaseDataset
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
17168,
13789,
198,
2,
220,
198,
2,
15069,
357,
66,
8,
2864,
10511,
291,
506,
49664,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
... | 3.789474 | 361 |
def product_except_self(A):
'''
238. Product of Array Except Self
=================================
Given an array A of n integers (n > 1), return an array where the i-th
element is the product of all but the i-th element of A.
Restrictions:
-------------
1. Don't use division
2. Use constant extra space.
Example:
--------
>>> product_except_self([1, 2, 3, 4])
[24, 12, 8, 6]
'''
out = [1] * len(A)
# Backward pass: out[-(i + 1)] = prod(A[-1]..A[-i])
for i in range(1, len(A)):
out[-(i + 1)] = out[-i] * A[-i]
# Forward pass
lprod = 1
for i in range(1, len(A)):
lprod *= A[i - 1]
out[i] *= lprod
return out
| [
4299,
1720,
62,
16341,
62,
944,
7,
32,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
32544,
13,
8721,
286,
15690,
18181,
12189,
198,
220,
220,
220,
46111,
198,
220,
220,
220,
11259,
281,
7177,
317,
286,
299,
37014,
357,
... | 2.260188 | 319 |
# Future
from __future__ import annotations
# My stuff
from utilities.paginators.base import BasePaginator
from utilities.paginators.embed import EmbedPaginator
from utilities.paginators.embeds import EmbedsPaginator
from utilities.paginators.fields import FieldsPaginator
from utilities.paginators.file import FilePaginator
from utilities.paginators.text import TextPaginator
| [
2,
10898,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
2,
2011,
3404,
198,
6738,
20081,
13,
79,
23183,
2024,
13,
8692,
1330,
7308,
47,
363,
20900,
198,
6738,
20081,
13,
79,
23183,
2024,
13,
20521,
1330,
13302,
276,
47,
363,
... | 3.705882 | 102 |
import cv2 as cv
import numpy as np
| [
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
628
] | 2.642857 | 14 |
from jinja2 import Template
from .transformer import BaseTransformer
UNIT_TEMPLATE = '''\
# {{ name }}.service #######################################################################
[Unit]
Description={{ name | title }}
After=docker.service {% for link in link_keys %}{{ link }}.service {% endfor %}
Requires=docker.service {% for link in link_keys %}{{ link }}.service {% endfor %}
[Service]
{% if essential == False %}
Type=oneshot {% endif -%}
ExecStartPre=-/usr/bin/docker kill {{ name }}
ExecStartPre=-/usr/bin/docker rm {{ name }}
ExecStartPre=/usr/bin/docker pull {{ image or "<image>" }}
ExecStart=/usr/bin/docker run \\
--name {{ name }} \\
{%- if cpu_shares %}
--cpu {{ cpu_shares }} \\{% endif -%}
{% if memory %}
--memory {{ memory }} \\{% endif -%}
{% if hostname %}
--hostname {{ hostname }} \\{% endif -%}
{% if pid %}
--pid {{ pid }} \\{% endif -%}
{% if entrypoint %}
--entrypoint {{ entrypoint }} \\{% endif -%}
{% for port in ports %}
-p {{ port }} \\{% endfor -%}
{% for ep in expose %}
--expose {{ ep }} \\{% endfor -%}
{% if net %}
--net {{ net }} \\{% endif -%}
{% for volume in volumes %}
-v {{ volume }} \\{% endfor -%}
{%- if logging %}
{% if logging.driver -%}
--log-driver={{ logging.driver }} \\{% endif -%}
{% if logging.options %}{% for opt in logging.options|dictsort %}
--log-opt {{ opt[0] }}={{ opt[1] }} \\{% endfor -%}{% endif %}{% endif -%}
{% if environment %}{% for env in environment|dictsort %}
-e "{{ env[0] }}={{ env[1] }}" \\{% endfor -%}{% endif -%}
{% if labels %}{% for label in labels|dictsort %}
--label {{ label[0] }}="{{ label[1] }}" \\{% endfor -%}{% endif -%}
{% for link in links %}
--link {{ link }} \\{% endfor -%}
{% for ef in env_file %}
--env-file {{ ef }} \\{% endfor -%}
{% for vf in volumes_from %}
--volumes-from {{ vf }} \\{% endfor -%}
{% for ns in dns %}
--dns {{ ns }} \\{% endfor -%}
{% if work_dir %}
--workdir {{ work_dir}} \\{% endif -%}
{% if user %}
--user {{ user }} \\{% endif -%}
{% if privileged %}
--privileged {{ privileged}} \\{%- endif %}
{{ image or "<image>" }} {% if command %}\\
{{ command }}{% endif %}
ExecStop=/usr/bin/docker stop {{ name }}
'''
class SystemdTransformer(BaseTransformer):
"""
A transformer for docker-compose
To use this class:
.. code-block:: python
transformer = SystemdTransformer()
"""
@staticmethod
@staticmethod
def emit_port_mappings(self, port_mappings):
"""
:param port_mappings: the base schema port_mappings
:type port_mappings: list of dict
:return:
:rtype: list of str
"""
return [str(self._emit_mapping(mapping)) for mapping in port_mappings]
@staticmethod
| [
6738,
474,
259,
6592,
17,
1330,
37350,
198,
198,
6738,
764,
7645,
16354,
1330,
7308,
8291,
16354,
628,
198,
4944,
2043,
62,
51,
3620,
6489,
6158,
796,
705,
7061,
59,
198,
2,
22935,
1438,
1782,
27422,
15271,
1303,
29113,
29113,
4242,
2... | 2.483117 | 1,155 |
from django.test import TestCase
from jackal.settings import JackalSettings
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
14509,
282,
13,
33692,
1330,
3619,
282,
26232,
628
] | 3.9 | 20 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2829,
17752,
355,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
3418... | 2.58 | 50 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^announce/$', views.make_announcement, name='announce'),
url(r'^get-viewed/$', views.get_viewed, name='get-viewed'),
url(r'^view-post/$', views.view_post, name='view-post'),
url(r'^submit-post/$', views.submit_post, name='submit-post'),
url(r'^refresh-feed/$', views.get_recent_posts_ajax, name='refresh-feed'),
url(r'^get-announcements/$', views.get_announcements, name='get-announcements'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
197,
6371,
7,
81,
6,
61,
1236,
8652,
32624,
3256,
5009,
13,
15883,
62,
1236,
8652,
434,
11,
1438,
11639,... | 2.589189 | 185 |
import pytest
from dependency_injector import Scope
from dependency_injector.errors import (
FactoryMissingReturnTypeError,
MissingDependentContextError,
ServiceAlreadyRegisteredError,
)
from ..utils import Context
from . import ioc
| [
11748,
12972,
9288,
198,
198,
6738,
20203,
62,
259,
752,
273,
1330,
41063,
198,
6738,
20203,
62,
259,
752,
273,
13,
48277,
1330,
357,
198,
220,
220,
220,
19239,
43730,
13615,
6030,
12331,
11,
198,
220,
220,
220,
25639,
35,
8682,
21947... | 3.392405 | 79 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String,Bool
import threading;
robots=[];
permission_lock=threading.Lock();
if __name__ == '__main__':
main();
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
10903,
11,
33,
970,
198,
11748,
4704,
278,
26,
198,
22609,
1747,
41888,
11208,
198,
525,
3411,
62,
5354,
28,
... | 2.651515 | 66 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# pycairo/cairocffi-based glyph-mono/alpha example - Copyright 2017 Hin-Tak Leung
# Distributed under the terms of the new BSD license.
#
# rewrite of the numply,matplotlib-based example from Nicolas P. Rougier
# - Not immitating the upside-downness of glyph-monochrome/glyph-alpha
# This script default to normal(8-bit) rendering, but render to mono
# if any argument is specified.
#
# Mono rendering requires libtiff on small-endian platforms. See
# comments in bitmap_to_surface.py.
#
# -----------------------------------------------------------------------------
'''
Glyph bitmap monochrome/alpha rendring
'''
from freetype import *
# use Matrix() from Cairo instead of from Freetype
from cairo import Context, ImageSurface, FORMAT_ARGB32, SurfacePattern, FILTER_BEST, Matrix
from bitmap_to_surface import make_image_surface
if __name__ == '__main__':
from PIL import Image
import sys
face = Face('./Vera.ttf')
face.set_char_size( 48*64 )
if len(sys.argv) < 2:
# Normal(8-bit) Rendering
face.load_char('S', FT_LOAD_RENDER |
FT_LOAD_TARGET_NORMAL )
else:
# Mono(1-bit) Rendering
face.load_char('S', FT_LOAD_RENDER |
FT_LOAD_TARGET_MONO )
bitmap = face.glyph.bitmap
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
glyph_surface = make_image_surface(face.glyph.bitmap)
surface = ImageSurface(FORMAT_ARGB32, 800, 600)
ctx = Context(surface)
ctx.rectangle(0,0,800,600)
ctx.set_line_width(0)
ctx.set_source_rgb (0.5 , 0.5, 0.5)
ctx.fill()
#
scale = 480.0 / rows
ctx.set_source_surface(glyph_surface, 0, 0)
pattern = ctx.get_source()
SurfacePattern.set_filter(pattern, FILTER_BEST)
scalematrix = Matrix()
scalematrix.scale(1.0/scale,1.0/scale)
scalematrix.translate(-(400.0 - width *scale /2.0 ), -60)
pattern.set_matrix(scalematrix)
ctx.set_source_rgb (0 , 0, 1)
ctx.mask(pattern)
ctx.fill()
surface.flush()
surface.write_to_png("glyph-mono+alpha-cairo.png")
Image.open("glyph-mono+alpha-cairo.png").show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
32501,
198,
2,
198,
2,
220,
12972,
66,
18131,
14,
66,
1872,
12204,
487,
72,
12,
3106,
25874,
12,
2144,
78... | 2.475322 | 932 |
# Generated by Django 2.1.11 on 2020-05-24 03:03
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
1157,
319,
12131,
12,
2713,
12,
1731,
7643,
25,
3070,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,... | 2.840909 | 44 |
# Generated by Django 2.2.14 on 2021-01-12 21:41
import wagtail.core.blocks
import wagtail.core.fields
from django.db import migrations
import core.blocks as core_blocks
import domestic.models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1415,
319,
33448,
12,
486,
12,
1065,
2310,
25,
3901,
198,
198,
11748,
266,
363,
13199,
13,
7295,
13,
27372,
198,
11748,
266,
363,
13199,
13,
7295,
13,
25747,
198,
6738,
42625,
14208,
13,
... | 3.111111 | 63 |
"""
WAP that prompts the user to input a positive integer. It should then output a message
indicating whether the number is a prime number.
"""
number = int(input())
is_prime = True
for i in range(2, number):
if number % i == 0:
print('composite number')
is_prime = False
break
if is_prime and number is not 1:
print('prime number')
| [
37811,
198,
54,
2969,
326,
36454,
262,
2836,
284,
5128,
257,
3967,
18253,
13,
632,
815,
788,
5072,
257,
3275,
198,
521,
12364,
1771,
262,
1271,
318,
257,
6994,
1271,
13,
198,
37811,
198,
198,
17618,
796,
493,
7,
15414,
28955,
198,
2... | 2.875 | 128 |
import discord.ext.commands
import pytz.exceptions
from stalkbroker import date_utils, errors, messages
from ._bot import STALKBROKER
from ._commands_utils import confirm_execution, user_change_bulletin_subscription
_IMPORT_HELPER = None
@STALKBROKER.command(
name="timezone",
case_insensitive=True,
help="<zone> Sets the timezone for your user (ie pst)",
)
async def set_user_timezone(ctx: discord.ext.commands.Context, zone_arg: str) -> None:
"""
Sets a user's local timezone in the database.
:param ctx: message context passed in by discord.py
:param zone_arg: the timezone argument passed by the user
:raises BadTimezoneError: if the ``zone_arg`` is not a valid timezone.
"""
try:
converted_tz = date_utils.parse_timezone_arg(zone_arg)
except pytz.exceptions.UnknownTimeZoneError:
# If the user has passed a value that pytz doesn't recognize, convert to a
# stalkbroker error and re-raise.
raise errors.BadTimezoneError(ctx, zone_arg)
else:
# Otherwise update the timezone then send a confirmation.
await STALKBROKER.db.update_user_timezone(ctx.author, ctx.guild, converted_tz)
# Let's add a four-o'clock emoji for flavor
await confirm_execution(ctx, [messages.REACTIONS.CONFIRM_TIMEZONE])
# TODO: put this behind some sort of role check
@STALKBROKER.group(case_insensitive=True, pass_context=True)
@bulletins.command(
name="here", pass_context=True, help="send bulletins to this channel",
)
async def set_bulletins_channel(ctx: discord.ext.commands.Context) -> None:
"""
Sets the channel a server wishes bulletins to be sent to.
:param ctx: message context passed in by discord.py. The channel on this context
is used as the bulletin channel.
"""
await STALKBROKER.db.server_set_bulletin_channel(ctx.guild, ctx.channel)
await confirm_execution(ctx, [messages.REACTIONS.CONFIRM_BULLETIN_CHANNEL])
@bulletins.command(
name="minimum",
pass_context=True,
help="set the minimum bell price for a bulletin to be sent to the bulletin channel",
)
async def set_bulletins_minimum(
ctx: discord.ext.commands.Context, price_minimum: int,
) -> None:
"""
Sets the channel a server wishes bulletins to be sent to.
:param ctx: message context passed in by discord.py. The channel on this context
is used as the bulletin channel.
:param price_minimum: the minimum price to set for sending a bulletin about it.
"""
await STALKBROKER.db.server_set_bulletin_minimum(ctx.guild, price_minimum)
await confirm_execution(ctx, [messages.REACTIONS.CONFIRM_BULLETIN_MINIMUM])
@bulletins.command(
name="heat",
pass_context=True,
help=(
"set the minimum heat value for a forecast bulletin to be sent to the bulletin"
" channel"
),
)
async def set_bulletins_minimum_heat(
ctx: discord.ext.commands.Context, heat_minimum: int,
) -> None:
"""
Sets the channel a server wishes bulletins to be sent to.
:param ctx: message context passed in by discord.py. The channel on this context
is used as the bulletin channel.
:param heat_minimum: the minimum heat score to set for sending a forecast bulletin.
"""
await STALKBROKER.db.server_set_heat_minimum(ctx.guild, heat_minimum)
await confirm_execution(ctx, [messages.REACTIONS.CONFIRM_HEAT_MINIMUM])
@bulletins.command(
name="subscribe",
pass_context=True,
help="Get notified when a high-price turnip offer occurs on another island. Signs"
"you up for the 'stalk investor role'. This is a discord-wide subscription and"
" will assign you to the role on every server you are a part of.",
)
async def bulletins_user_subscribe(ctx: discord.ext.commands.Context) -> None:
"""
Assigns the user to the 'stalk investor' role so they get notified when bulletins
are posted.
"""
discord_user: discord.User = ctx.author
await user_change_bulletin_subscription(discord_user, subscribe=True)
await confirm_execution(ctx, [messages.REACTIONS.CONFIRM_BULLETINS_SUBSCRIBED])
@bulletins.command(
name="unsubscribe",
pass_context=True,
help="stop being notified when a turnip price bulletin occurs. This change is "
"applied to every server you are a part of.",
)
async def bulletins_user_unsubscribe(ctx: discord.ext.commands.Context) -> None:
"""
Assigns the user to the 'stalk investor' role so they get notified when bulletins
are posted.
"""
discord_user: discord.User = ctx.author
await user_change_bulletin_subscription(discord_user, subscribe=False)
await confirm_execution(ctx, [messages.REACTIONS.CONFIRM_BULLETINS_UNSUBSCRIBED])
| [
11748,
36446,
13,
2302,
13,
9503,
1746,
198,
11748,
12972,
22877,
13,
1069,
11755,
628,
198,
6738,
31297,
7957,
6122,
1330,
3128,
62,
26791,
11,
8563,
11,
6218,
198,
6738,
47540,
13645,
1330,
3563,
28082,
11473,
11380,
1137,
198,
6738,
... | 2.839039 | 1,665 |
#ao_quadrado = lambda x: x*x
print(ao_quadrado(2))
print(ao_quadrado(3))
| [
198,
2,
5488,
62,
421,
41909,
4533,
796,
37456,
2124,
25,
2124,
9,
87,
198,
198,
4798,
7,
5488,
62,
421,
41909,
4533,
7,
17,
4008,
198,
4798,
7,
5488,
62,
421,
41909,
4533,
7,
18,
4008,
198
] | 1.973684 | 38 |
# -*- coding: utf-8 -*-
import itertools
from typing import Union
import pandas as pd
from zvt.api import get_kdata_schema
from zvt.contract import Mixin, AdjustType
if __name__ == '__main__':
from pprint import pprint
tops1, tops2 = get_top_performance_entities(start_timestamp='2020-01-01')
pprint(tops1)
pprint(tops2)
# the __all__ is generated
__all__ = ['get_top_performance_entities', 'get_top_entities']
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
340,
861,
10141,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1976,
36540,
13,
15042,
1330,
651,
62,
74,
7890,
62,
1... | 2.668712 | 163 |
# placeholder for recording and encoding to flac
| [
2,
46076,
329,
8296,
290,
21004,
284,
781,
330,
198
] | 4.9 | 10 |
MathDict.__add__ = _mathdict_binary_op(lambda a, b: a + b)
MathDict.__sub__ = _mathdict_binary_op(lambda a, b: a - b)
MathDict.__rsub__ = _mathdict_binary_op(lambda a, b: b - a)
MathDict.__mul__ = _mathdict_binary_op(lambda a, b: a * b)
MathDict.__rmul__ = _mathdict_binary_op(lambda a, b: a * b)
MathDict.__truediv__ = _mathdict_binary_op(lambda a, b: a / b)
MathDict.__floordiv__ = _mathdict_binary_op(lambda a, b: a // b)
MathDict.__getitem__ = _mathdict_map_op(
lambda x, args, kwargs: x.__getitem__(*args, **kwargs))
MathDict.__iadd__ = _mathdict_binary_in_place_op(_iadd)
MathDict.__isub__ = _mathdict_binary_in_place_op(_isub)
MathDict.__imul__ = _mathdict_binary_in_place_op(_imul)
MathDict.__itruediv__ = _mathdict_binary_in_place_op(_itruediv)
MathDict.__ifloordiv__ = _mathdict_binary_in_place_op(_ifloordiv)
| [
628,
628,
628,
628,
198,
198,
37372,
35,
713,
13,
834,
2860,
834,
796,
4808,
11018,
11600,
62,
39491,
62,
404,
7,
50033,
257,
11,
275,
25,
257,
1343,
275,
8,
198,
37372,
35,
713,
13,
834,
7266,
834,
796,
4808,
11018,
11600,
62,
... | 2.254054 | 370 |
#: -*- coding: utf-8 -*-
"""
Application default settings file
"""
#: package name
PACKAGE_NAME = 'ipredictor'
#: data datetime format
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
#: resample period identifier
RESAMPLE_PERIOD = 'H'
#: default season period is 24 hours for hourly resampled data
SEASON_PERIOD = 24
#: start coefs for optimization routines
INITIAL_COEF = 0.2
#: default ANN train epochs
TRAIN_EPOCHS = 1
#: default batch size
BATCH_SIZE = 100 | [
2,
25,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
23416,
4277,
6460,
2393,
198,
37811,
198,
2,
25,
5301,
1438,
198,
47,
8120,
11879,
62,
20608,
796,
705,
541,
17407,
273,
6,
198,
198,
2,
25,
1366,
48... | 2.605714 | 175 |
from django.shortcuts import render
from django import forms
from django.http import HttpResponse
import MySQLdb
from django.contrib.auth import login, authenticate
from personal.forms import LoginForm, Form
import base64
import cv2
import re
import face_recognition
import pyttsx3 #text to speech library
engine = pyttsx3.init()
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
11748,
33476,
9945,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
17594,
11... | 3.258993 | 139 |
import time
from functools import partial
from pathlib import Path
import librosa
import numpy as np
import soundfile as sf
import toml
import torch
from torch.nn import functional
from torch.utils.data import DataLoader
from tqdm import tqdm
from ..acoustics.feature import stft, istft, mc_stft
from ..utils import initialize_module, prepare_device, prepare_empty_dir
| [
11748,
640,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2128,
7753,
355,
264,
69,
198,
11748,
284,
4029,
198,
11748,
2803... | 3.46729 | 107 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Functions that save the model's config into different formats.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.saving.saved_model import json_utils
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-import-not-at-top
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
@keras_export('keras.models.model_from_config')
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Usage:
```
# for a Functional API model
tf.keras.Model().from_config(model.get_config())
# for a Sequential model
tf.keras.Sequential().from_config(model.get_config())
```
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError('`model_from_config` expects a dictionary, not a list. '
'Maybe you meant to use '
'`Sequential.from_config(config)`?')
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export('keras.models.model_from_yaml')
def model_from_yaml(yaml_string, custom_objects=None):
"""Parses a yaml model configuration file and returns a model instance.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> try:
... import yaml
... config = model.to_yaml()
... loaded_model = tf.keras.models.model_from_yaml(config)
... except ImportError:
... pass
Args:
yaml_string: YAML string or open file encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError('Requires yaml module installed (`pip install pyyaml`).')
# The method unsafe_load only exists in PyYAML 5.x+, so which branch of the
# try block is covered by tests depends on the installed version of PyYAML.
#try:
# PyYAML 5.x+
# config = yaml.safe_load(yaml_string)
#except AttributeError:
# config = yaml.safe_load(yaml_string)
config = yaml.safe_load(yaml_string)
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export('keras.models.model_from_json')
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration string and returns a model instance.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> config = model.to_json()
>>> loaded_model = tf.keras.models.model_from_json(config)
Args:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
config = json_utils.decode(json_string)
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.993382 | 1,511 |
#!/usr/bin/env python3
# Copyright 2021 Gabor Meszaros
# See LICENSE file for licensing details.
#
# Learn more at: https://juju.is/docs/sdk
"""Charm the service.
Refer to the following post for a quick-start guide that will help you
develop a new k8s charm using the Operator Framework:
https://discourse.charmhub.io/t/4208
"""
import logging
from typing import Any, Dict
from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
from ops.charm import ActionEvent, CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import (
ActiveStatus,
BlockedStatus,
Container,
MaintenanceStatus,
WaitingStatus,
)
from ops.pebble import ServiceStatus, PathError
logger = logging.getLogger(__name__)
class KamailioCharm(CharmBase):
"""Kamailio Charm Operator."""
# StoredState is used to store data the charm needs persisted across invocations.
_stored = StoredState()
# ---------------------------------------------------------------------------
# Properties
# ---------------------------------------------------------------------------
@property
def _ingress_config(self) -> Dict[str, Any]:
"""Ingress configuration property."""
ingress_config = {
"service-hostname": self.config.get("external-url", self.app.name),
"service-name": self.app.name,
"service-port": 5060,
}
if tls_secret_name := self.config.get("tls-secret-name"):
ingress_config["tls-secret-name"] = tls_secret_name
return ingress_config
# ---------------------------------------------------------------------------
# Handlers for Charm Events
# ---------------------------------------------------------------------------
def _on_config_changed(self, _) -> None:
"""Handler for the config-changed event."""
# Validate charm configuration
try:
self._validate_config()
except Exception as e:
self.unit.status = BlockedStatus(f"{e}")
return
# Check Pebble has started in the container
container: Container = self.unit.get_container("kamailio")
if not container.can_connect():
logger.debug("waiting for pebble to start")
self.unit.status = MaintenanceStatus("waiting for pebble to start")
return
# Update ingress config
self.ingress.update_config(self._ingress_config)
# Add Pebble layer with the Kamailio service
container.add_layer(
"kamailio",
{
"summary": "kamailio layer",
"description": "pebble config layer for kamailio",
"services": {
"kamailio": {
"override": "replace",
"summary": "kamailio",
"command": "kamailio -DD -E",
"startup": "enabled",
}
},
},
combine=True,
)
container.replan()
# Configure kamailio and restart service if needed
configuration_has_changed = self._configure_kamailio()
if configuration_has_changed:
container.restart("kamailio")
self._on_update_status()
def _on_update_status(self, _=None) -> None:
"""Handler for the update-status event."""
# Check if the kamailio service is configured
container: Container = self.unit.get_container("kamailio")
if "kamailio" not in container.get_plan().services:
self.unit.status = WaitingStatus("kamailio service not configured yet")
return
# Check if the kamailio service is running
if container.get_service("kamailio").current == ServiceStatus.ACTIVE:
self.unit.status = ActiveStatus("kamailio service is running")
else:
self.unit.status = BlockedStatus("kamailio service is not running")
def _on_restart_action(self, event: ActionEvent) -> None:
"""Handler for the restart-action event."""
try:
self._restart_kamailio()
event.set_results({"output": "service restarted"})
except Exception as e:
event.fail(f"Failed restarting kamailio: {e}")
def _on_start_action(self, event: ActionEvent) -> None:
"""Handler for the start-action event."""
try:
self._start_kamailio()
event.set_results({"output": "service started"})
except Exception as e:
event.fail(f"Failed starting kamailio: {e}")
def _on_stop_action(self, event: ActionEvent) -> None:
"""Handler for the stop-action event."""
try:
self._stop_kamailio()
event.set_results({"output": "service stopped"})
except Exception as e:
event.fail(f"Failed stopping kamailio: {e}")
# ---------------------------------------------------------------------------
# Validation and configuration
# ---------------------------------------------------------------------------
def _validate_config(self) -> None:
"""Validate charm configuration.
Raises:
Exception: if charm configuration is invalid.
"""
# Check if sip-domain config is missing
if "sip-domain" not in self.config:
raise Exception('missing charm config: "sip-domain"')
# Check if sip-domain config value is valid
if len(self.config.get("sip-domain", "")) < 1:
raise Exception('"sip-domain" config must be a non-empty string')
def _configure_kamailio(self) -> bool:
"""Configure kamailio service.
This function is in charge of pushing configuration files to the container.
Returns:
bool: True if the configuration has changed, else False.
"""
configuration_has_changed = False
container = self.unit.get_container("kamailio")
# Configure /etc/kamailio/kamailio-local.cfg
if not self._file_exists(container, "/etc/kamailio/kamailio-local.cfg"):
container.push(
"/etc/kamailio/kamailio-local.cfg",
"listen=udp:0.0.0.0:5060",
)
configuration_has_changed = True
# Configure /etc/kamailio/kamctlrc
if self.config["sip-domain"] != self._stored.sip_domain:
# Backup original configuration file
if not self._file_exists(container, "/etc/kamailio/kamctlrc.backup"):
container.push(
"/etc/kamailio/kamctlrc.backup",
container.pull("/etc/kamailio/kamctlrc").read(),
)
container.push(
"/etc/kamailio/kamctlrc",
f'SIP_DOMAIN={self.config["sip-domain"]}',
)
self._stored.sip_domain = self.config["sip-domain"]
configuration_has_changed = True
return configuration_has_changed
def _file_exists(self, container: Container, path: str) -> bool:
"""Check if a file exists in the container.
Args:
path (str): Path of the file to be checked.
Returns:
bool: True if the file exists, else False.
"""
file_exists = None
try:
_ = container.pull(path)
file_exists = True
except PathError:
file_exists = False
except FileNotFoundError:
file_exists = False
exist_str = "exists" if file_exists else 'doesn"t exist'
logger.debug(f"File {path} {exist_str}.")
return file_exists
# ---------------------------------------------------------------------------
# Kamailio service functions (restart, start, stop)
# ---------------------------------------------------------------------------
def _restart_kamailio(self) -> None:
"""Restart kamailio service.
Raises:
Exception: if the kamailio service is not configured.
"""
# Check if kamailio service doesn't exists
container = self.unit.get_container("kamailio")
if "kamailio" not in container.get_plan().services:
raise Exception("kamailio service not configured yet.")
# Restart kamailio service and update unit status
container.restart("kamailio")
self._on_update_status()
def _start_kamailio(self) -> None:
"""Start kamailio service.
Raises:
Exception: if the kamailio service is already running.
"""
# Check if kamailio service is active
container = self.unit.get_container("kamailio")
if container.get_service("kamailio").current == ServiceStatus.ACTIVE:
raise Exception("kamailio service is already active")
# Start kamailio service and update unit status
container.start("kamailio")
self._on_update_status()
def _stop_kamailio(self) -> None:
"""Stop kamailio service.
Raises:
Exception: if the kamailio service is already stopped.
"""
# Check if kamailio service isn't active
container = self.unit.get_container("kamailio")
if container.get_service("kamailio").current != ServiceStatus.ACTIVE:
raise Exception("kamailio service is already stopped")
# Stop kamailio service and update unit status
container.stop("kamailio")
self._on_update_status()
if __name__ == "__main__":
main(KamailioCharm)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
33448,
402,
4820,
14937,
41046,
418,
198,
2,
4091,
38559,
24290,
2393,
329,
15665,
3307,
13,
198,
2,
198,
2,
14365,
517,
379,
25,
3740,
1378,
14396,
14396,
13,
271,
14,... | 2.400449 | 4,008 |
import dfparser
from libp2python import *
import sys
import os
import getopt
import random
DATAFLOW_NAME = "LookupGenerator"
if __name__ == "__main__":
try:
flags, args = parse_cmdline(sys.argv)
except:
print "EXCEPTION"
print_usage()
sys.exit(3)
if len(args) < 2:
print_usage()
sys.exit(3)
eventLoopInitialize()
address = args[0]
port = int(args[1])
freq = 0
nodes = []
if len(args) >= 3:
freq = int(args[2])
for n in args[3:]:
nodes.append(n)
plumber = Plumber()
stub = gen_stub(plumber, port)
if plumber.install(stub) != 0:
print "** Stub Failed to initialize correct spec\n"
edit = plumber.new_dataflow_edit(DATAFLOW_NAME);
input = edit.find("input");
output = edit.find("output");
lookupGen = edit.addElement(LookupGenerator("lookupGenerator", address+":"+str(port), nodes, freq))
edit.hookUp(input, 0, lookupGen, 0)
edit.hookUp(lookupGen, 0, output, 0)
if plumber.install(edit) != 0:
print "Edit Correctly initialized.\n"
# plumber.toDot("lookupGen.dot")
# os.system("dot -Tps lookupGen.dot -o lookupGen.ps")
# os.remove("lookupGen.dot")
# Run the plumber
eventLoop()
| [
11748,
288,
46428,
28198,
198,
6738,
9195,
79,
17,
29412,
1330,
1635,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
651,
8738,
198,
11748,
4738,
198,
198,
26947,
3697,
3913,
62,
20608,
796,
366,
8567,
929,
8645,
1352,
1,
198,
220,
... | 2.204429 | 587 |
'''get_color()
Retrieves the detected color of a surface.
Returns
Name of the color.
Type:String (text)
Values:'black','violet','blue','cyan','green','yellow','red','white',None
Errors
RuntimeError
The sensor has been disconnected from the Port.
Example
'''
from spike import ColorSensor
import time
# Initialize the Color Sensor
paper_scanner = ColorSensor('E')
# Measure the color
while True:
color = paper_scanner.get_color()
# Print the color name to the console
print('Detected:', color)
time.sleep_ms(1000)
# Check if it's a specific color
if color == 'red':
print('It is red!')
| [
7061,
6,
1136,
62,
8043,
3419,
198,
9781,
5034,
1158,
262,
12326,
3124,
286,
257,
4417,
13,
198,
35561,
198,
5376,
286,
262,
3124,
13,
198,
6030,
25,
10100,
357,
5239,
8,
198,
40161,
32105,
13424,
41707,
85,
19194,
41707,
17585,
41707... | 3.04902 | 204 |
'''hmMHC command line interface
Copyright (c) 2019 Maxim Artyomov, Ilya Kizhvatov
'''
from __future__ import print_function
import argparse
import pandas as pd
import sys
from .hmmhc import hmMHC
def parseArgs(args):
'''Define and parse command line arguments'''
parser = argparse.ArgumentParser(description='hmMHC - a hidden Markov model-based MHC binding predictor')
parser.add_argument('--allele', help='allele (currently, only H2-IAb is supported)', type=str, required=True)
parser.add_argument('--output', help='output CSV file name', type=str, metavar='FILENAME')
inputArgGroup = parser.add_mutually_exclusive_group()
inputArgGroup.add_argument('--input', help='input CSV file name (exclusive with --peptides)', type=str, metavar='FILENAME')
inputArgGroup.add_argument(
'--peptides',
help='peptide sequences delimited by whitespaces (exclusive with --in)',
nargs = '+',
type=str,
metavar='PEPTIDE'
)
return parser.parse_args(args)
def main(arguments=sys.argv[1:]):
'''hmMHC command line entrypoint'''
# get command line arguments
args = parseArgs(arguments)
# get peptides from input
if args.input:
peptidesDf = pd.read_csv(args.input, header=None)
peptides = peptidesDf[0].to_list()
elif args.peptides:
peptides = args.peptides
else:
parser.print_usage(sys.stderr)
print('Error: no input provided', file=sys.stderr)
exit(1)
# predict
predictor = hmMHC('H2-IAb')
predictions = predictor.predict(peptides)
# output predictions
if (args.output):
predictions.to_csv(args.output, index=False)
else:
predictions.to_csv(sys.stdout, index=False)
exit(0)
| [
7061,
6,
23940,
44,
16045,
3141,
1627,
7071,
198,
198,
15269,
357,
66,
8,
13130,
38962,
943,
774,
296,
709,
11,
49804,
64,
509,
528,
71,
85,
265,
709,
198,
7061,
6,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
117... | 2.530758 | 699 |
from django.urls import path
from drones import views
from drones.v2 import views as views_v2
urlpatterns = [
path('vehicle-categories/',
views.DroneCategoryList.as_view(),
name=views.DroneCategoryList.name),
path('vehicle-categories/<int:pk>',
views.DroneCategoryDetail.as_view(),
name=views.DroneCategoryDetail.name),
path('vehicles/',
views.DroneList.as_view(),
name=views.DroneList.name),
path('vehicles/<int:pk>',
views.DroneDetail.as_view(),
name=views.DroneDetail.name),
path('pilots/',
views.PilotList.as_view(),
name=views.PilotList.name),
path('pilots/<int:pk>',
views.PilotDetail.as_view(),
name=views.PilotDetail.name),
path('competitions/',
views.CompetitionList.as_view(),
name=views.CompetitionList.name),
path('competitions/<int:pk>',
views.CompetitionDetail.as_view(),
name=views.CompetitionDetail.name),
path('',
views_v2.ApiRootVersion2.as_view(),
name=views_v2.ApiRootVersion2.name),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
15382,
1330,
5009,
198,
6738,
15382,
13,
85,
17,
1330,
5009,
355,
5009,
62,
85,
17,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
33892,
1548,
12,
66,
... | 2.122841 | 521 |
from .rank_one_tensor import Tensor
from .rank_one_matrix import Matrix
| [
198,
6738,
764,
43027,
62,
505,
62,
83,
22854,
1330,
309,
22854,
198,
6738,
764,
43027,
62,
505,
62,
6759,
8609,
1330,
24936,
198
] | 3.041667 | 24 |
# Generated by Django 2.0.1 on 2018-04-02 05:08
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
16,
319,
2864,
12,
3023,
12,
2999,
8870,
25,
2919,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import os
import pickle
import warnings
import hydra
import pandas as pd
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.callbacks import EarlyStopping, StochasticWeightAveraging, LearningRateMonitor
from torch.utils.data import DataLoader
from torchvision import transforms
import pytorch_lightning as pl
import albumentations as A
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.loggers import WandbLogger
import wandb
from tqdm import tqdm_notebook as tqdm
from our_datasets import PlatCLEFSimCLR, PlantCLEF2022Supr, ObservationsDataset
from engines import SimCLREngine, SuprEngine
from models.factory import create_model
from summary import *
warnings.filterwarnings("ignore")
warnings.simplefilter(action='ignore', category=FutureWarning)
CODE_ROOT = f'C:/Users/maeot/Documents/code/biomachina'
import sys
sys.path.insert(0, CODE_ROOT)
import os
from hydra import initialize, initialize_config_module, initialize_config_dir, compose
from omegaconf import OmegaConf
# initialize_config_dir(config_dir=os.path.join(CODE_ROOT, "config"))
def get_full_path(base_path, path):
r"""
Expands environment variables and user alias (~ tilde), in the case of relative paths it uses the base path
to create a full path.
args:
base_path: used in case of path is relative path to expand the path.
path: directory to be expanded. i.e data, ./web, ~/data, $HOME, %USER%, /data
"""
eval_path = os.path.expanduser(os.path.expandvars(path))
return eval_path if os.path.isabs(eval_path) else os.path.join(base_path, eval_path)
# @hydra.main(config_path="config", config_name="simclr_vit.yaml")
@hydra.main(config_path="config", config_name="supr_hresnet50.yaml")
# @hydra.main(config_path="config", config_name="supr_hresnet101.yaml")
# @hydra.main(config_path="config", config_name="supr_vitae.yaml")
# @hydra.main(config_path="config", config_name="supr_hefficientnet_b4.yaml")
# @hydra.main(config_path="config", config_name="supr_hcct_14_7x2_224.yaml")
# @hydra.main(config_path="config", config_name="supr_hdensenet.yaml")
# @hydra.main(config_path="config", config_name="supr_hefficientnet_b4.yaml")
# @hydra.main(config_path="config", config_name="supr_obs_hefficientnet_b4.yaml")
# @hydra.main(config_path="config", config_name="supr_obs_hresnet50.yaml")
if __name__ == "__main__":
execute_training() | [
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
14601,
198,
198,
11748,
25039,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
6738,
267,
28917,
7807,
69,
1330,
360,
713,
16934,
11,
19839,
18546,
198,
6738,
12972,
13165,
... | 2.763514 | 888 |
from django.db import models
from .settings import settings as nopeek_settings
from .utils import import_callable
class EncrpytedModelMixin(models.Model):
"""Nopeek Encrpyted Model Mixin
Args:
models (django.db.models): Django Model
"""
cipher_module = import_callable(nopeek_settings["CIPHER_CLASS"])
class Meta:
"""Metaclass"""
abstract = True
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
764,
33692,
1330,
6460,
355,
299,
3008,
988,
62,
33692,
198,
6738,
764,
26791,
1330,
1330,
62,
13345,
540,
628,
198,
4871,
2039,
6098,
9078,
1513,
17633,
35608,
259,
7,
27530,
... | 2.682432 | 148 |
import battlecode as bc
import random
import util
| [
11748,
3344,
8189,
355,
47125,
198,
11748,
4738,
198,
11748,
7736,
628
] | 4.25 | 12 |
import asyncio
import logging
import random
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple
import ujson
from hummingbot.client.config.config_methods import using_exchange
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.connector.exchange.gate_io import gate_io_constants as CONSTANTS
from hummingbot.connector.exchange.gate_io.gate_io_auth import GateIoAuth
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
CENTRALIZED = True
EXAMPLE_PAIR = "BTC-USDT"
DEFAULT_FEES = [0.2, 0.2]
@dataclass
async def _sleep(delay):
"""
Function added only to facilitate patching the sleep in unit tests without affecting the asyncio module
"""
await asyncio.sleep(delay)
KEYS = {
"gate_io_api_key":
ConfigVar(key="gate_io_api_key",
prompt=f"Enter your {CONSTANTS.EXCHANGE_NAME} API key >>> ",
required_if=using_exchange("gate_io"),
is_secure=True,
is_connect_key=True),
"gate_io_secret_key":
ConfigVar(key="gate_io_secret_key",
prompt=f"Enter your {CONSTANTS.EXCHANGE_NAME} secret key >>> ",
required_if=using_exchange("gate_io"),
is_secure=True,
is_connect_key=True),
}
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
4738,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
11,
309,
29291,
198,
198,
11748,
334,
17752,
198,
6738,
41465,
13645,
13... | 2.510542 | 664 |
import tkinter
import clock
| [
11748,
256,
74,
3849,
198,
11748,
8801,
198
] | 3.5 | 8 |
# -*- coding: utf-8 -*-
__author__ = 'Adward'
# Python utils imports
import math
import os
import sys
from time import time
import sqlite3
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
# Import classifiers and performance metrics
from sklearn.preprocessing import *
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import StratifiedKFold, ShuffleSplit
from sklearn.metrics import *
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.decomposition import PCA
# Constant values
DATA_PATH = '/Users/Adward/OneDrive/YelpData/'
DB_PATH = os.path.join(DATA_PATH, 'yelp.sqlite')
n_sample = 2225213 # 1992542
review_class = [260492, 190048, 282115, 591618, 900940] # 2.6:1.9:2.8:5.9:9.0
earliest = {'day': 20041018, 'month': 200410, 'year': 2004}
latest = {'day': 20151224, 'month': 201512, 'year': 2015}
valid_states = ['AZ', 'NV', 'ON', 'WI', 'QC', 'SC', 'EDH', 'PA', 'MLN', 'BW', 'NC', "IL"]
applied_categories = {'Debt Relief Services', 'Armenian', 'Spine Surgeons', 'House Sitters', 'Taxidermy', 'Iberian', 'Pita', 'Beer Hall', 'Childproofing', 'Assisted Living Facilities', 'Rhinelandian', 'Oriental', 'Palatine', 'Carpenters', 'Choirs', 'Wok', 'Nursing Schools', 'Surf Shop', 'Perfume', 'Kitchen Incubators', 'Flowers', 'Swiss Food', 'Castles', 'Parenting Classes', 'Ferries', 'Donairs', 'Rest Stops', 'Gerontologists', 'Bike Sharing', 'Piano Stores', 'Trinidadian', 'Translation Services', 'Eastern European', 'College Counseling', 'Community Gardens', 'Wine Tasting Classes', 'Art Restoration', 'Slovakian', 'Backshop', 'Supper Clubs', 'Editorial Services', 'Dialysis Clinics', 'Childbirth Education', 'IP & Internet Law', 'Tax Law', 'Farming Equipment', 'Art Tours', 'Concept Shops', 'Mosques', 'Australian'}
# Loading samples from the database & pre-scale
def load_samples(attr_list, prescale=False, oversampling=(0, 0), elite_expand=False, state_all=False):
'''
:param attr_list: List[Str], containing the list of features to be selected and encoded
:param prescale: Bool, (when True) pre-scale features with too large range of values to expedite converging
:param oversampling: Tuple(Int), double review samples with star classes in range
:param elite_expand: Bool, (when True) encode 12 features related to user.elite as [elite20**] & elite-year-sum;
(when False) only 1 feature stands for elite-year-sum
:param state_all: Bool, (when True) occupies 39 features; (when False) using only 12 prime states PLUS OTHERS
:return: List[Dict], List[Int]
'''
t = time()
with sqlite3.connect(DB_PATH) as conn:
# conn.execute('CREATE TEMP TABLE tmp_b1 (business_id TEXT, avg_star_elite REAL)')
# conn.execute('CREATE TEMP TABLE tmp_b2 (business_id TEXT, avg_star_nonelite REAL)')
# conn.execute('INSERT INTO tmp_b1 (business_id, avg_star_elite) '
# 'SELECT business_id, AVG(average_stars) AS avg_star_elite FROM '
# '(review JOIN user USING (user_id)) WHERE elite!="" GROUP BY business_id')
# conn.execute('INSERT INTO tmp_b2 (business_id, avg_star_nonelite) '
# 'SELECT business_id, AVG(average_stars) AS avg_star_nonelite FROM '
# '(review JOIN user USING (user_id)) WHERE elite="" GROUP BY business_id')
# conn.execute('DROP TABLE IF EXISTS bstat_by_elite')
# conn.execute('CREATE TABLE bstat_by_elite (business_id TEXT, avg_star_elite REAL, avg_star_nonelite REAL)')
# conn.execute('INSERT INTO tmp_b SELECT * FROM '
# '((business LEFT OUTER JOIN tmp_b1 USING (business_id)) '
# 'LEFT OUTER JOIN tmp_b2 USING (business_id))')
# conn.row_factory = sqlite3.Row
cur = conn.execute('SELECT ' + ','.join(attr_list) +
' FROM ('
'(review JOIN (business JOIN b_category_pca USING (business_id)) USING (business_id)) '
'JOIN user '
'USING (user_id) )')
sample_matrix = [] # feature matrix to return
targets = [] # class vector
row_num = 0
for row in cur:
targets.append(row[0]) # review.stars
# construct temp feature dict
sample = {}
for j in range(1, len(attr_list)):
sample[attr_list[j]] = row[j]
# encode features for business.state
if ('business.state' in attr_list) and (not state_all) and (sample['business.state'] not in valid_states):
sample['business.state'] = 'OTH' # other 17 states with few business recorded
if ('user_state' in attr_list) and (not state_all) and (sample['user_state'] not in valid_states):
sample['user_state'] = 'OTH'
# Create elite-related features || encode elite-year-number
# if elite_expand:
# for year in range(earliest['year']+1, latest['year']+1):
# sample['elite'+str(year)] = 0
# if len(sample['elite']):
# elite_years = [int(y) for y in sample['elite'].split('&')]
# sample['elite'] = len(elite_years)
# for year in elite_years:
# sample['elite'+str(year)] = 1
# else:
# sample['elite'] = 0
# else:
# if len(sample['elite']):
# sample['elite'] = len(sample['elite'].split('&'))
# else:
# sample['elite'] = 0
# encode features of friends_stat
# encode features of business_avg_stars_by_elite
nan_list = ['avg_review_count', 'avg_votes', 'avg_star_elite', 'avg_star_nonelite']
for feat in nan_list:
if feat in attr_list and not sample[feat]:
sample[feat] = 0
# encode business.categories features
if 'cas' in attr_list:
cas = sample['cas'].split(';')
del sample['cas']
for i in range(3):
sample['ca_'+str(i)] = float(cas[i])
# for ca in applied_categories:
# sample['ca_'+ca] = 0
# if len(sample['categories']):
# categories = sample['categories'].split('&')
# for j in range(len(categories)):
# if categories[j] in applied_categories:
# sample['ca_' + categories[j]] = 1
# del sample['categories']
# process control & display
row_num += 1
# print(sample)
if row_num % 100000 == 0:
print("%.1f %%" % (row_num * 100 / n_sample))
sample_matrix.append(sample)
# oversampling some review star classes
if oversampling[0] <= targets[-1] <= oversampling[1]:
sample_matrix.append(sample)
targets.append(targets[-1])
# if row_num == 10000:
# break
print('Done with joining & collecting data from database, using ', time()-t, 's')
return sample_matrix, targets
if __name__ == '__main__':
test_flag = 0
for arg in sys.argv:
if arg.split('=')[0] == 'test':
test_flag = arg.split('=')[1]
attr_list = [
'review.stars', # target value, must be placed at this first place
'average_stars',
# 'avg_friends_star',
# 'avg_review_count',
# 'avg_star_elite',
# 'avg_star_nonelite',
# 'avg_votes',
# 'business.city', # occupies 380 features
'business.review_count',
'business.stars',
# 'business.state', # occupies 29 -> 13 features
# 'categories', # occupies 890 features
'cas',
'checkins',
'compliments',
# 'elite', # occupies 12 -> 1 feature(s)
'fans',
'review.votes',
'review_date',
'user.review_count',
'user.votes',
# 'user_state',
# 'weekends_open', # binary
'yelping_since',
]
samples, targets = load_samples(attr_list, prescale=False, oversampling=(1, 4))
samples, n_features = reform_features(samples, scaling=False)
n_samples = len(samples) # may be different from original n_sample in db !
print('n_samples:', n_samples)
# div = StratifiedKFold(targets, n_folds=5) # 5-Fold Cross Validation
div = ShuffleSplit(n_samples, n_iter=5, test_size=0.2, random_state=0)
if test_flag:
div = ShuffleSplit(n_samples, n_iter=1, test_size=0.2, random_state=0)
model = RandomForestClassifier(n_estimators=5, max_features='auto') # int(math.sqrt(n_features)))
# model = GradientBoostingClassifier(n_estimators=5, learning_rate=1, max_depth=2, random_state=0)
train_and_predict(samples, targets, div, model, n_features)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
705,
2782,
904,
6,
198,
198,
2,
11361,
3384,
4487,
17944,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
640,
1330,
640,
198,
... | 2.232353 | 4,080 |
#!/usr/bin/env python2.4
"""
Tools for working with oceanic data
"""
from numpy import *
def rho_stp(s,t,p=0):
"""
returns density as a function of:
s = Salinity in psu,
t = Temperature in deg C,
p = Pressure in dbar (default = 0)
"""
p1 = 999.842594
p2 = 6.793952E-2
p3 = -9.09529E-3
p4 = 1.001685E-4
p5 = -1.120083E-6
p6 = 6.536332E-9
p7 = 8.24493E-1
p8 = -4.0899E-3
p9 = 7.6438E-5
p10 = -8.2467E-7
p11 = 5.3875E-9
p12 = -5.72466E-3
p13 = 1.0227E-4
p14 = -1.6546E-6
p15 = 4.8314E-4
k1 = 19652.21
k2 = 148.4206
k3 = -2.327105
k4 = 1.360477E-2
k5 = -5.155288E-5
k6 = 3.239908
k7 = 1.43713E-3
k8 = 1.16092E-4
k9 = -5.77905E-7
k10 = 8.50935E-5
k11 = -6.12293E-6
k12 = 5.2787E-8
k13 = 54.6746
k14 = -0.603459
k15 = 1.09987E-2
k16 = -6.1670E-5
k17 = 7.944E-2
k18 = 1.6483E-2
k19 = -5.3009E-4
k20 = 2.2838E-3
k21 = -1.0981E-5
k22 = -1.6078E-6
k23 = 1.91075E-4
k24 = -9.9348E-7
k25 = 2.0816E-8
k26 = 9.1697E-10
ro_st0 = p1 + p2*t + p3*t**2 + p4*t**3 + p5*t**4 + p6*t**5\
+ p7*s + p8*s*t + p9*t**2*s + p10*t**3*s + p11*t**4*s\
+ p12*s**1.5 + p13*t*s**1.5 + p14*t**2*s**1.5 + p15*s**2
k_stp = k1 + k2*t + k3*t**2 + k4*t**3 + k5*t**4\
+ k6*p + k7*t*p + k8*t**2*p + k9*t**3*p\
+ k10*p**2 + k11*t*p**2 + k12*t**2*p**2\
+ k13*s + k14*t*s + k15*t**2*s + k16*t**3*s\
+ k17*s**1.5 + k18*t*s**1.5 + k19*t**2*s**1.5\
+ k20*p*s + k21*t*p*s + k22*t**2*p*s + k23*p*s**1.5\
+ k24*p**2*s + k25*t*p**2*s + k26*t**2*p**2*s
return ro_st0/(1.0 - (p/k_stp))
def o2_sat(T,S):
"""returns saturation concentrations of o2 [ millimole O2 / m3 ] for
a given temperature and salinity (at STP)"""
A1 = -173.4292
A2 = 249.6339
A3 = 143.3483
A4 = -21.8492
B1 = -0.033096
B2 = 0.014259
B3 = -0.0017000
# Convert T to deg. C to deg. K
T = T + 273.15
# O2 Concentration in mg/l
# [from Millero and Sohn, Chemical Oceanography, CRC Press, 1992]
O = exp(A1 + A2*(100.0/T) + A3*log(T/100.0) + A4*(T/100.0) + \
S*(B1 + B2*(T/100.0) + B3*((T/100.0)**2)) )
# Convert to mmol/m3
# mmol/m3 = 44.66 ml/l
# mg/l = ml/l * 1.42903 mg/ml
return O*(44.66*1.42903)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
19,
198,
37811,
198,
33637,
329,
1762,
351,
9151,
291,
1366,
198,
37811,
198,
6738,
299,
32152,
1330,
1635,
198,
198,
4299,
374,
8873,
62,
301,
79,
7,
82,
11,
83,
11,
79,
28,
... | 1.551122 | 1,604 |
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
urlpatterns = [
path('', include('categories.urls', namespace='categories')),
path('', include('follows.urls', namespace='follows')),
path('', include('notifications.urls', namespace='notifications')),
path('', include('privates.urls', namespace='privates')),
path('', include('saves.urls', namespace='saves')),
path('', include('threads.urls', namespace='threads')),
path('', include('rewards.urls', namespace='rewards')),
path('', include('reports.urls', namespace='reports')),
path('', include('impressions.urls', namespace='impressions')),
path('', include('accounts.urls', namespace='accounts')),
path('', include('posts.urls', namespace='posts')),
path('dashboard/', admin.site.urls),
path('docs/', include_docs_urls(title='DJ Forum API'))
]
# from rest_framework.schemas import get_schema_view
# from django.views.generic import TemplateView
# schema_view = get_schema_view(title="DJ Forum API", patterns=urlpatterns)
# urlpatterns += [
# path('openapi/', schema_view, name='openapi-schema'),
# path('docs/', TemplateView.as_view(
# template_name='documentation.html',
# extra_context={'schema_url':'openapi-schema'}
# ), name='swagger-ui'),
# ]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
198,
6738,
1334,
62,
30604,
13,
22897,
341,
1330,
2291,
62,
31628,
62,
6371,
82,
628,
198,
6371,
33279,
82,
796,
685,
... | 3 | 444 |
from django.db import models
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from django.db.models.signals import post_save
from django.conf import settings
from django.dispatch import receiver
from django.db.models.fields import URLField
from PIL import Image
from django.contrib.contenttypes.fields import GenericRelation
from hitcount.models import HitCountMixin, HitCount
# Create your models here.
category_choices= (
('Motion_icon', 'Motion Icon'),
('Important_icon','Important'),
)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
# @receiver(post_save, sender=User,dispatch_uid='save_new_user_profile')
# def create_or_save_profile(sender,created,instance,*args,**kwargs):
# super(Profile).save(*args, **kwargs)
# print("HELLO")
# if created:
# Profile.objects.create(user=instance)
# profile = Profile(user=user)
# Profile.save()
# class otherUserDetails(models.Model):
# user = models.OneToOneField(User, on_delete=models.CASCADE)
# story = models.CharField(max_length=50, default="Hii i am using InstaBio",blank=True)
# Proffesion = models.CharField(max_length=100,default="",blank=True)
# adress = models.CharField(max_length=100,default="",blank=True)
# adress = models.CharField()
# class UserDetail(models.Model):
# user = models.OneToOneField(User, null=True, on_delete=models.CASCADE, blank=True)
# Name = models.CharField(max_length=25)
# Description = models.TextField(null=True, blank=True)
# profile_image = models.ImageField(null=True, blank=True)
# email = models.EmailField( null=True, blank=True)
# dateAndTime = models.DateTimeField(auto_now_add=True, null=True)
# # user = models.ForeignKey(User, on_delete=models.CASCADE,blank=True, null=True, default="")
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
341,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
1... | 2.78635 | 674 |
# time: 2021/4/28 22:50
# File: utils.py
# Author: zhangshubo
# Mail: supozhang@126.com
import json
import os
import random
import torch
_bert_token_dict = json.loads(open("data/bert/bert-base-chinese/tokenizer.json", encoding="utf-8").read())["model"][
"vocab"]
# extra_tencent_embedding(r"E:\tencent_embedding\Tencent_AILab_ChineseEmbedding.txt")
| [
2,
640,
25,
33448,
14,
19,
14,
2078,
2534,
25,
1120,
198,
2,
9220,
25,
3384,
4487,
13,
9078,
198,
2,
6434,
25,
1976,
33255,
1477,
549,
78,
198,
2,
11099,
25,
7418,
8590,
33255,
31,
19420,
13,
785,
198,
11748,
33918,
198,
11748,
... | 2.552448 | 143 |
#
# @lc app=leetcode id=7 lang=python3
#
# [7] Reverse Integer
#
# Given a 32-bit signed integer, reverse digits of an integer.
# Example 1:
# Input: 123
# Output: 321
# Example 2:
# Input: -123
# Output: -321
# Example 3:
# Input: 120
# Output: 21
# Note:
# Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [โ231, 231 โ 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
22,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
22,
60,
31849,
34142,
198,
2,
198,
2,
220,
11259,
257,
3933,
12,
2545,
4488,
18253,
11,
9575,
19561,
286,
281,
18253,
13,
... | 3.266234 | 154 |
from hash_table import LinearProbeHashTable
from typing import Tuple
import timeit
def process_option(dictionary : Dictionary, method_name: str) -> None:
""" Helper code for processing menu options."""
if method_name == 'read_file':
filename = input('Enter filename: ')
try:
dictionary.load_dictionary(filename)
print('Successfully read file')
except FileNotFoundError as e:
print(e)
else:
word = input('Enter word: ')
if method_name == 'add_word':
dictionary.add_word(word)
try:
dictionary.add_word(word)
print('[{}] {}'.format(word, 'Successfully added'))
except IndexError as e:
print('[{}] {}'.format(word, e))
elif method_name == 'find_word':
if dictionary.find_word(word):
print('[{}] {}'.format(word, 'Found in dictionary'))
else:
print('[{}] {}'.format(word, 'Not found in dictionary'))
elif method_name == 'delete_word':
try:
dictionary.delete_word(word)
print('[{}] {}'.format(word, 'Deleted from dictionary'))
except KeyError:
print('[{}] {}'.format(word, 'Not found in dictionary'))
def menu(dictionary : Dictionary):
""" Wrapper for using the dictionary. """
option = None
menu_options = {'read_file': 'Read File',
'add_word': 'Add Word',
'find_word': 'Find Word',
'delete_word': 'Delete Word',
'exit': 'Exit'}
exit_option = list(menu_options.keys()).index('exit') + 1
while option != exit_option:
print('---------------------')
opt = 1
for menu_option in menu_options.values():
print('{}. {}'.format(opt, menu_option))
opt += 1
print('---------------------')
try:
option = int(input("Enter option: "))
if option < 1 or option > exit_option:
raise ValueError('Option must be between 1 and ' + str(exit_option))
except ValueError as e:
print('[{}] {}'.format('menu', e))
else:
if option != exit_option:
process_option(dictionary, list(menu_options.keys())[option - 1])
print("---------------------")
if __name__ == '__main__':
dictionary = Dictionary(31, 250727)
menu(dictionary)
| [
6738,
12234,
62,
11487,
1330,
44800,
2964,
1350,
26257,
10962,
198,
6738,
19720,
1330,
309,
29291,
198,
11748,
640,
270,
198,
198,
4299,
1429,
62,
18076,
7,
67,
14188,
1058,
28261,
11,
2446,
62,
3672,
25,
965,
8,
4613,
6045,
25,
198,
... | 2.167393 | 1,147 |
#-*- coding: utf-8 -*-
"""
Mixing matrices and assortativity coefficients.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['degree_assortativity',
'attribute_assortativity',
'numeric_assortativity',
'attribute_mixing_matrix',
'degree_mixing_matrix',
'degree_pearsonr',
'degree_mixing_dict',
'attribute_mixing_dict',
]
import networkx as nx
def degree_assortativity(G,nodes=None):
"""Compute degree assortativity of graph.
Assortativity measures the similarity of connections
in the graph with respect to the node degree.
Parameters
----------
G : NetworkX graph
nodes: list or iterable (optional)
Compute degree assortativity only for nodes in container.
The default is all nodes.
Returns
-------
r : float
Assortativity of graph by degree.
Examples
--------
>>> G=nx.path_graph(4)
>>> r=nx.degree_assortativity(G)
>>> print("%3.1f"%r)
-0.5
See Also
--------
attribute_assortativity
numeric_assortativity
neighbor_connectivity
degree_mixing_dict
degree_mixing_matrix
Notes
-----
This computes Eq. (21) in Ref. [1]_ , where e is the joint
probability distribution (mixing matrix) of the degrees. If G is
directed than the matrix e is the joint probability of out-degree
and in-degree.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
return numeric_assortativity_coefficient(degree_mixing_matrix(G, node_iter))
def degree_pearsonr(G,nodes=None):
"""Compute degree assortativity of graph.
Assortativity measures the similarity of connections
in the graph with respect to the node degree.
Parameters
----------
G : NetworkX graph
nodes: list or iterable (optional)
Compute pearson correlation of degrees only for nodes in container.
The default is all nodes.
Returns
-------
r : float
Assortativity of graph by degree.
Examples
--------
>>> G=nx.path_graph(4)
>>> r=nx.degree_pearsonr(G)
>>> r
-0.5
Notes
-----
This calls scipy.stats.pearsonr().
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks
Physical Review E, 67 026126, 2003
"""
try:
import scipy.stats as stats
except ImportError:
raise ImportError(
"Assortativity requires SciPy: http://scipy.org/ ")
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
xy=node_degree_xy(G,node_iter)
x,y=zip(*xy)
return stats.pearsonr(x,y)[0]
def attribute_mixing_dict(G,attribute,nodes=None,normalized=False):
"""Return dictionary representation of mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Unse nodes in container to build the dict. The default is all nodes.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([0,1],color='red')
>>> G.add_nodes_from([2,3],color='blue')
>>> G.add_edge(1,3)
>>> d=nx.attribute_mixing_dict(G,'color')
>>> print(d['red']['blue'])
1
>>> print(d['blue']['red']) # d symmetric for undirected graphs
1
Returns
-------
d : dictionary
Counts or joint probability of occurrence of attribute pairs.
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
xy_iter=node_attribute_xy(G,attribute,node_iter)
return mixing_dict(xy_iter,normalized=normalized)
def attribute_mixing_matrix(G,attribute,nodes=None,mapping=None,normalized=True):
"""Return mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Use only nodes in container to build the matrix. The default is
all nodes.
mapping : dictionary, optional
Mapping from node attribute to integer index in matrix.
If not specified, an arbitrary ordering will be used.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts or joint probability of occurrence of attribute pairs.
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
d=attribute_mixing_dict(G,attribute,node_iter)
a=dict_to_numpy_array(d,mapping=mapping)
if normalized:
a=a/a.sum()
return a
def attribute_assortativity(G,attribute,nodes=None):
"""Compute assortativity for node attributes.
Assortativity measures the similarity of connections
in the graph with respect to the given attribute.
Parameters
----------
G : NetworkX graph
attribute : string
Node attribute key
nodes: list or iterable (optional)
Compute attribute assortativity for nodes in container.
The default is all nodes.
Returns
-------
a: float
Assortativity of given attribute
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([0,1],color='red')
>>> G.add_nodes_from([2,3],color='blue')
>>> G.add_edges_from([(0,1),(2,3)])
>>> print(nx.attribute_assortativity(G,'color'))
1.0
Notes
-----
This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e))/(1-sum(e)),
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
a=attribute_mixing_matrix(G,attribute,node_iter)
return attribute_assortativity_coefficient(a)
def numeric_assortativity(G,attribute,nodes=None):
"""Compute assortativity for numerical node attributes.
Assortativity measures the similarity of connections
in the graph with respect to the given numeric attribute.
Parameters
----------
G : NetworkX graph
attribute : string
Node attribute key
nodes: list or iterable (optional)
Compute numeric assortativity only for attributes of nodes in
container. The default is all nodes.
Returns
-------
a: float
Assortativity of given attribute
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([0,1],size=2)
>>> G.add_nodes_from([2,3],size=3)
>>> G.add_edges_from([(0,1),(2,3)])
>>> print(nx.numeric_assortativity(G,'size'))
1.0
Notes
-----
This computes Eq. (21) in Ref. [1]_ ,
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks
Physical Review E, 67 026126, 2003
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
a=numeric_mixing_matrix(G,attribute,node_iter)
return numeric_assortativity_coefficient(a)
def attribute_assortativity_coefficient(e):
"""Compute assortativity for attribute matrix e.
Parameters
----------
e : numpy array or matrix
Attribute mixing matrix.
Notes
-----
This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e))/(1-sum(e)),
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
try:
import numpy
except ImportError:
raise ImportError(
"attribute_assortativity requires NumPy: http://scipy.org/ ")
if e.sum() != 1.0:
e=e/float(e.sum())
e=numpy.asmatrix(e)
s=(e*e).sum()
t=e.trace()
r=(t-s)/(1-s)
return float(r)
def degree_mixing_dict(G,nodes=None,normalized=False):
"""Return dictionary representation of mixing matrix for degree.
Parameters
----------
G : graph
NetworkX graph object.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or joint probability of occurrence of degree pairs.
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
xy_iter=node_degree_xy(G,node_iter)
return mixing_dict(xy_iter,normalized=normalized)
def numeric_mixing_matrix(G,attribute,nodes=None,normalized=True):
"""Return numeric mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Build the matrix only with nodes in container. The default is all nodes.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts, or joint, probability of occurrence of node attribute pairs.
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
d=attribute_mixing_dict(G,attribute,node_iter)
s=set(d.keys())
for k,v in d.items():
s.update(v.keys())
m=max(s)
mapping=dict(zip(range(m+1),range(m+1)))
a=dict_to_numpy_array(d,mapping=mapping)
if normalized:
a=a/a.sum()
return a
def degree_mixing_matrix(G,nodes=None,normalized=True):
"""Return mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
nodes: list or iterable (optional)
Build the matrix using only nodes in container.
The default is all nodes.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts, or joint probability, of occurrence of node degree.
"""
if nodes is None:
node_iter = G
else:
node_iter = G.nbunch_iter(nodes)
d=degree_mixing_dict(G,node_iter)
s=set(d.keys())
for k,v in d.items():
s.update(v.keys())
m=max(s)
mapping=dict(zip(range(m+1),range(m+1)))
a=dict_to_numpy_array(d,mapping=mapping)
if normalized:
a=a/a.sum()
return a
def mixing_dict(xy,normalized=False):
"""Return a dictionary representation of mixing matrix.
Parameters
----------
xy : list or container of two-tuples
Pairs of (x,y) items.
attribute : string
Node attribute key
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or Joint probability of occurrence of values in xy.
"""
d={}
psum=0.0
for x,y in xy:
if x not in d:
d[x]={}
if y not in d:
d[y]={}
v=d[x].setdefault(y,0)
d[x][y]=v+1
psum+=1
if normalized:
for k,jdict in d.items():
for j in jdict:
jdict[j]/=psum
return d
def dict_to_numpy_array(d,mapping=None):
"""Convert a dictionary to numpy array with optional mapping."""
try:
import numpy
except ImportError:
raise ImportError(
"dict_to_numpy_array requires numpy : http://scipy.org/ ")
if mapping is None:
s=set(d.keys())
for k,v in d.items():
s.update(v.keys())
mapping=dict(zip(s,range(len(s))))
n=len(mapping)
a = numpy.zeros((n, n))
for k1, row in d.items():
for k2, value in row.items():
i=mapping[k1]
j=mapping[k2]
a[i,j] = value
return a
def node_attribute_xy(G,attribute,nodes=None):
"""Return iterator of node attribute pairs for all edges in G.
For undirected graphs each edge is produced twice, once for each
representation u-v and v-u, with the exception of self loop edges
that only appear once.
"""
if nodes is None:
node_set = G
else:
node_set = G.subgraph(nodes)
node=G.node
for u,nbrsdict in G.adjacency_iter():
if u not in node_set:
continue
uattr=node[u].get(attribute,None)
if G.is_multigraph():
for v,keys in nbrsdict.items():
vattr=node[v].get(attribute,None)
for k,d in keys.items():
yield (uattr,vattr)
else:
for v,eattr in nbrsdict.items():
vattr=node[v].get(attribute,None)
yield (uattr,vattr)
def node_degree_xy(G,nodes=None):
"""Return iterator of degree-degree pairs for edges in G.
Parameters
----------
G : NetworkX graph
nodes: list or iterable (optional)
Use only edges that start or end in nodes in this container.
The default is all nodes.
Notes
-----
For undirected graphs each edge is produced twice, once for each
representation u-v and v-u, with the exception of self loop edges
that only appear once.
For directed graphs this produces out-degree,in-degree pairs
"""
if nodes is None:
node_set = G
else:
node_set = G.subgraph(nodes)
if G.is_directed():
in_degree=G.in_degree
out_degree=G.out_degree
else:
in_degree=G.degree
out_degree=G.degree
for u,nbrsdict in G.adjacency_iter():
if u not in node_set:
continue
degu=out_degree(u)
if G.is_multigraph():
for v,keys in nbrsdict.items():
degv=in_degree(v)
for k,d in keys.items():
yield degu,degv
else:
for v,eattr in nbrsdict.items():
degv=in_degree(v)
yield degu,degv
# fixture for nose tests
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
35608,
278,
2603,
45977,
290,
840,
419,
22055,
44036,
13,
198,
37811,
198,
834,
9800,
834,
796,
37227,
32,
1173,
21375,
3900,
357,
71,
363,
3900,
31,
9620,
... | 2.361491 | 6,144 |
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Auctus A6 dumper')
parser.add_argument('--split', default=False, action="store_true",
help='split the memory locations')
parser.add_argument('--begin', type=lambda x: int(x,0),
help='extract the bin beginning at address',
default=0x82000000)
parser.add_argument('-i', '--input', default='/dev/stdin',
type=str, help='input lod')
parser.add_argument('-o', '--out', default='/dev/stdout',
type=str, help='output bin')
parser.add_argument('-v','--verbosity', default=0, action='count',
help='print sent and received frames to stderr for debugging')
parser.add_argument('-V', '--version', action='version',
version='%(prog)s 0.0.1',
help='display version information and exit')
args = parser.parse_args()
inlod = open(args.input, "r")
outbin = open(args.out, "wb")
curaddress = None
for line in inlod.readlines():
if line[0] == "#":
continue
if line[0] == "@":
address = int(line[1:], 16)
if curaddress is None:
curaddress = address
elif address != curaddress:
print("address out of order {} to {}".format(curaddress, address))
continue
curaddress += 4
data = int(line,16)
outbin.write(data.to_bytes(4, 'little'))
| [
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1330,
1822,
29572,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
32,
4782,
385,
317,
21,
288,
15829,
11537,
198,
220,... | 2.11126 | 746 |
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt, QEvent)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
from ui_PyMessanger import Ui_MainWindow
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = server()
sys.exit(app.exec_())
| [
6738,
9485,
24819,
17,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
54,
312,
11407,
198,
6738,
9485,
24819,
17,
13,
48,
83,
14055,
1330,
357,
48,
14055,
23416,
11,
1195,
21746,
39520,
11,
1195,
10430,
11,
1195,
10430,
7575,
11,... | 2.637209 | 215 |
import discord
from discord.ext import commands
EXT = (
"test",
)
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
13918,
796,
357,
198,
220,
220,
220,
366,
9288,
1600,
198,
8,
198
] | 2.958333 | 24 |
from savu.plugins.plugin_tools import PluginTools
class CameraRotCorrectionTools(PluginTools):
"""A plugin to apply a rotation to projection images, for example to
correct for missing camera alignment.
"""
def define_parameters(self):
"""
angle:
visibility: basic
dtype: float
description: The rotation angle for the output image in degrees.
default: 0.0
crop_edges:
visibility: intermediate
dtype: int
description: When a rotation is applied to any image,
the result will contain unused values around the edges, which
can be removed by cropping the edges by a specified number of
pixels.
default: 0
auto_crop:
visibility: basic
dtype: bool
description: If activated, this feature will automatically
crop the image to eliminate any regions without data
(because of the rotation).
default: False
use_auto_centre:
visibility: intermediate
dtype: bool
description: This parameter automatically sets the centre
of rotation to the centre of the image. If set to False, the
values from centre_x and centre_y are used. Note - The centre
needs to be within the image dimensions.
default: True
center_x:
visibility: intermediate
dtype: float
description: If not use_auto_centre, this value determines the
detector x coordinate for the centre of rotation.
default: 1279.5
centre_y:
visibility: intermediate
dtype: float
description: If not use_auto_centre, this value determines the
detector x coordinate for the centre of rotation.
default: 1079.5
""" | [
6738,
6799,
84,
13,
37390,
13,
33803,
62,
31391,
1330,
42636,
33637,
198,
198,
4871,
20432,
24864,
43267,
33637,
7,
37233,
33637,
2599,
198,
220,
220,
220,
37227,
32,
13877,
284,
4174,
257,
13179,
284,
20128,
4263,
11,
329,
1672,
284,
... | 2.263158 | 893 |
array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(1, len(array)):
for j in range(i, 0, -1):
if array[j] < array[j - 1]:
array[j], array[j - 1] = array[j - 1], array[j]
else:
break
print(array)
| [
18747,
796,
685,
22,
11,
642,
11,
860,
11,
657,
11,
513,
11,
352,
11,
718,
11,
362,
11,
604,
11,
807,
60,
198,
198,
1640,
1312,
287,
2837,
7,
16,
11,
18896,
7,
18747,
8,
2599,
198,
220,
220,
220,
329,
474,
287,
2837,
7,
72,
... | 1.786765 | 136 |
"""
Author: Caiya Zhang, Yuchen Zheng
"""
import numpy as np
import scipy as sp
#trans = function(x) matrix(c(x[bpop_index],exp(x[d_index])),ncol=1,byrow=T
# transform_back = function(par,lower=-Inf,upper=Inf){
# # FastImputation::BoundNormalizedVariable(
# # par,
# # constraints =
# # list(lower=lower,
# # upper=upper))
# bound_par(par,lower=lower,upper=upper)
# }
"""
##' Catch *and* save both errors and warnings, and in the case of
##' a warning, also keep the computed result.
##'
##' @title tryCatch both warnings (with value) and errors
##' @param expr an \R expression to evaluate
##' @return a list with 'value' and 'warning', where
##' 'value' may be an error caught.
##' @author Martin Maechler, The R Core Team
##' @keywords internal
def tryCatch_W_E(expr):
W = None
w_handler = function(w){ # warning handler
W = w
invokeRestart("muffleWarning")
}
return {"value": withCallingHandlers(tryCatch(expr, error = function(e) e),warning = w.handler),
"warning": W}
"""
| [
37811,
201,
198,
201,
198,
13838,
25,
327,
1872,
3972,
19439,
11,
575,
1229,
831,
44583,
201,
198,
37811,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
629,
541,
88,
355,
599,
201,
198,
2,
7645,
220,
796,
2163,... | 2.478161 | 435 |
import numpy as np
import os
import time
import random
import forecast_lib as fl
dropout=False
if dropout:
type_exp = '_dropout'
else:
type_exp = ''
# experiment parameters
directory = './experiments/models_diff_size'+type_exp+'/'
m = fl.num_meters
max_num_models = 20
m_d_frac = np.linspace(0.5, 1, 5)
m_a_frac = np.linspace(0.1, 0.5, 5)
reps = 20
unique_bias = True
strategic_attack=False
if strategic_attack:
type_exp='strategic_' + type_exp
else:
type_exp=''+ type_exp
impact = np.zeros((reps, len(m_d_frac), len(m_a_frac)))
pred_error = np.zeros((reps, len(m_d_frac), len(m_a_frac)))
for i in range(len(m_d_frac)):
m_d = int(m * m_d_frac[i])
print('m_d: '+str(m_d))
dir_models = directory + 'm_d_' + str(m_d) + '/'
try:
os.makedirs(dir_rep)
except:
pass
for j in range(len(m_a_frac)):
m_a = int(m_a_frac[j]*m)
print('\tm_a='+str(m_a))
t0 = time.perf_counter()
for k in range(max_num_models):
print('\t\tk='+str(k))
if strategic_attack:
meters_model = np.load(dir_models + 'meters_' + str(k) + '.npy', allow_pickle=True)
meters_a = random.sample( set( meters_model[0] ), m_a )
else:
meters_a = random.sample( set(range( m )), m_a )
y_test, hat_y, hat_y_a, bias_opt = fl.find_attack(dir_models, max_num_models, 1, meters_a, unique_bias)
impact[k, i, j] = fl.MAE(hat_y, hat_y_a)
pred_error[k, i, j] = fl.MAE(hat_y, y_test)
t_f = time.perf_counter()
print('\t***Train time: ' + str((t_f-t0)/60.0))
dir_results = './'
np.save( dir_results + 'impact'+type_exp+'.npy', impact)
np.save( dir_results + 'pred_error'+type_exp+'.npy', pred_error)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
220,
198,
11748,
640,
198,
11748,
4738,
198,
198,
11748,
11092,
62,
8019,
355,
781,
628,
198,
14781,
448,
28,
25101,
198,
198,
361,
4268,
448,
25,
198,
197,
4906,
62,
11201,
796,
705,
... | 2.115136 | 773 |