id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5033633 | import gensim
if __name__ == '__main__':
print('loading pretrained model')
model = gensim.models.KeyedVectors.load_word2vec_format('43/model.txt', binary=False, unicode_errors='replace')
model = gensim.models.KeyedVectors.load_word2vec_format('43/model.txt', binary=False, unicode_errors='replace')
while True:
try:
word = input('Word: ')
print(model.most_similar(word, topn=20))
except KeyError:
continue
except KeyboardInterrupt:
quit(0)
| StarcoderdataPython |
1988818 | <reponame>shin5ok/spanner-orm-app<gh_stars>0
#!/usr/bin/env python
from sqlalchemy import *
import click
import os
import json
import logging
from typing import *
CONN_STRING: str = os.environ.get("CONN", "")
print(CONN_STRING)
engine = create_engine("spanner:///"+CONN_STRING)
debug_flag: bool = "DEBUG" in os.environ
logging.basicConfig()
loggingConfig = logging.getLogger('sqlalchemy.engine')
if debug_flag:
loggingConfig.setLevel(logging.DEBUG)
@click.group()
def cli() -> None:
pass
@cli.command()
def dbinit() -> None:
metadata = MetaData(bind=engine)
singers = Table(
"Singers",
metadata,
Column("SingerId", String(36), primary_key=True, nullable=False),
Column("FirstName", String(200)),
Column("LastName", String(200), nullable=False),
Column("FullName", String(400), Computed("COALESCE(FirstName || ' ', '') || LastName")),
)
albums = Table(
"Albums",
metadata,
Column("AlbumId", String(36), primary_key=True, nullable=False),
Column("Title", String(100), nullable=False),
Column("SingerId", String(36), ForeignKey("Singers.SingerId", name="FK_Albums_Singers"), nullable=False),
)
tracks = Table(
"Tracks",
metadata,
Column("AlbumId", String(36), primary_key=True, nullable=False),
Column("TrackId", Integer, primary_key=True, nullable=False),
Column("Title", String(200), nullable=False),
spanner_interleave_in="Albums",
spanner_interleave_on_delete_cascade=True,
)
tracks.add_is_dependent_on(albums)
metadata.create_all(engine)
print("DB initialized")
@cli.command()
@click.option("--first_name", "-f")
@click.option("--last_name", "-l")
@click.option("--album_title", "-a")
@click.option("--track_title", "-t")
def put(first_name: str, last_name: str, album_title: str, track_title:str) -> None:
writing(first_name, last_name, album_title, track_title)
def writing(first_name: str, last_name: str, album_title: str, track_title:str) -> None:
import uuid
singers = Table("Singers", MetaData(bind=engine), autoload=True)
albums = Table("Albums", MetaData(bind=engine), autoload=True)
tracks = Table("Tracks", MetaData(bind=engine), autoload=True)
try:
with engine.begin() as connection:
singer_id = str(uuid.uuid4())
connection.execute(singers.insert(), {"SingerId": singer_id, "FirstName": first_name, "LastName": last_name})
album_id = str(uuid.uuid4())
connection.execute(albums.insert(), {"AlbumId": album_id, "Title": album_title, "SingerId": singer_id})
connection.execute(tracks.insert(), {"AlbumId": album_id, "TrackId": 1, "Title": track_title})
except Exception as e:
print(str(e))
@cli.command()
@click.option("--singer_name", "-s")
@click.option("--show", is_flag=True)
def get(singer_name: str, show: bool) -> Any:
reading(singer_name, show)
def reading(singer_name: str, show: bool) -> Any:
singers = Table("Singers", MetaData(bind=engine), autoload=True)
results = []
try:
with engine.begin() as connection:
if singer_name:
s = connection.execute(select([singers]).where(singers.c.FirstName == singer_name))
else:
s = connection.execute(select([singers]))
results = [{"name": f"{v.FirstName} {v.LastName}", "singer_id": v.SingerId} for v in s]
if show:
print(json.dumps(results, indent=2))
except Exception as e:
print(str(e))
return results
if __name__ == '__main__':
cli() | StarcoderdataPython |
3248288 | <reponame>carium-inc/moto<gh_stars>0
from __future__ import unicode_literals
import time
import json
import boto3
from moto.core import BaseBackend, BaseModel
class SecretsManager(BaseModel):
def __init__(self, region_name, **kwargs):
self.secret_id = kwargs.get('secret_id', '')
self.version_id = kwargs.get('version_id', '')
self.version_stage = kwargs.get('version_stage', '')
class SecretsManagerBackend(BaseBackend):
def __init__(self, region_name=None, **kwargs):
super(SecretsManagerBackend, self).__init__()
self.region = region_name
self.secret_id = kwargs.get('secret_id', '')
self.createdate = int(time.time())
def get_secret_value(self, secret_id, version_id, version_stage):
response = json.dumps({
"ARN": self.secret_arn(),
"Name": self.secret_id,
"VersionId": "A435958A-D821-4193-B719-B7769357AER4",
"SecretString": "mysecretstring",
"VersionStages": [
"AWSCURRENT",
],
"CreatedDate": "2018-05-23 13:16:57.198000"
})
return response
def secret_arn(self):
return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-rIjad".format(
self.region, self.secret_id)
available_regions = boto3.session.Session().get_available_regions("secretsmanager")
secretsmanager_backends = {region: SecretsManagerBackend(region_name=region) for region in available_regions}
| StarcoderdataPython |
1978428 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import http
from django.shortcuts import render
from django.conf import settings
def home(request):
return render(request, 'docs/home.html')
| StarcoderdataPython |
6616225 | # apps/contact/models.py
# Django modules
from django.db import models
from django.utils.timezone import datetime
from django.contrib.auth.models import User
# Django locals
# Create your models here.
class Contact(models.Model):
manager = models.ForeignKey(User,
on_delete=models.RESTRICT, default=None)
name = models.CharField(max_length=50)
email = models.CharField(max_length=100)
phone = models.CharField(max_length=15)
info = models.CharField(max_length=50)
gender = models.CharField(max_length=50,
choices=(
('male', 'Male'),
('female', 'Female')))
image = models.ImageField(upload_to='images/', blank=True)
date_added = models.DateTimeField(default=datetime.now)
class Meta:
ordering = ['-id']
def __str__(self):
return self.name
| StarcoderdataPython |
6475010 | """ Creates a tree of digital twins.
Creates digital twin documents to a new timestamped folder named "twintree-<timestamp>".
Each twin document is created in its own folder as "index.yaml" file.
Arguments:
1: The depth of the tree, i.e. the number of relationships from highest to lowest.
Must be at least one (1).
2: The width of the tree, i.e. one twin will have this many children
Must be at least one (1).
Usage example:
python3 create-twins-tree.py 3 3
"""
import sys, uuid, os, yaml, lorem
# import pprint
from datetime import datetime, timezone
from coolname import generate_slug
# Receive user input argument for dimensions of the twin tree
depth = int(sys.argv[1])
width = int(sys.argv[2])
# Constants
REGISTRY = 'https://dtid.org/' # Base URL of DTID registry
DTID_BASE = REGISTRY \
+ datetime.now().strftime('%Y-%m-%d_%H-%M-%S_') # Base URL of DTIDs
# Create a tree of DTs in advance so that adding parents and children is a bit easier
tree = {}
totalcount = 0
print('Creating tree with depth ' + str(depth) + ' and width ' + str(width))
def create_tree(current, depth: int, width: int, totalcount: int):
dtid = DTID_BASE + str(uuid.uuid4()).split('-')[0]
totalcount += 1
current[dtid] = []
if depth != 0:
depth = depth-1
for i in range(width):
current[dtid].append({})
_, totalcount = create_tree(current[dtid][i], depth, width, totalcount)
return current, totalcount
twintree, totalcount = create_tree(tree, depth, width, totalcount)
# Create folder for the twins
foldername = 'twintree-' + datetime.now(timezone.utc).isoformat()
print('Twins are added to folder: ' + foldername + '/')
os.mkdir(foldername)
# Create the twin documents
print('---- Creating ' + str(totalcount) + ' twin docs ----:')
creator_dtid = 'http://d-t.fi/juuso' # Parent for the first twin
def create_twins(current, parent):
for dtid in current:
print('Creating DT doc for: ' + dtid)
doc = {}
doc['dt-id'] = dtid
doc['hosting-iri'] = 'autoassign'
doc['name'] = generate_slug().replace('-', ' ').title()
if parent == 'http://d-t.fi/juuso':
doc['name'] = 'The Origin at ' + datetime.now().strftime('%Y-%m-%d %H-%M-%S')
print('The name of the first DT: ' + doc['name'])
doc['description'] = lorem.sentence()
# Add parent
doc['relations'] = []
doc['relations'].append({})
doc['relations'][0]['dt-id'] = parent
doc['relations'][0]['relationType'] = 'parent'
# Add children
if len(current[dtid]) > 0:
for i in range(len(current[dtid])):
for child in current[dtid][i]:
doc['relations'].append({})
doc['relations'][i+1]['dt-id'] = child
doc['relations'][i+1]['relationType'] = 'child'
# Create folder for the new twin
dtfolder = foldername + '/' + dtid.split('/')[3]
os.mkdir(dtfolder)
# Write the twin doc to a YAML file
filename = dtfolder + '/index.yaml'
with open (filename, 'w') as yamlfile:
yaml.dump(doc, yamlfile, default_flow_style=False, sort_keys=False, allow_unicode=True)
# Create more twins in recursive loop
for i in range(len(current[dtid])):
create_twins(current[dtid][i], dtid)
return dtid
origin_dtid = create_twins(twintree, creator_dtid)
print('Created ' + str(totalcount) + ' twins.')
print('Origin DT: ' + origin_dtid)
# Print the DT tree:
# pprint.pprint(twintree)
| StarcoderdataPython |
4865405 | # Generated by Django 2.0.7 on 2018-07-11 19:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ingest', '0007_update-django'),
]
operations = [
migrations.RemoveField(
model_name='agendaitem',
name='meeting_id',
),
migrations.AddField(
model_name='agenda',
name='meeting_id',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterField(
model_name='message',
name='content',
field=models.CharField(blank=True, max_length=254, null=True),
),
]
| StarcoderdataPython |
11200366 | <reponame>zx273983653/vulscan
from __future__ import unicode_literals
from django.apps import AppConfig
class AppscanConfig(AppConfig):
name = 'appscan'
| StarcoderdataPython |
233423 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from iceberg.api import Schema
from iceberg.api.types import (BinaryType,
BooleanType,
DateType,
DoubleType,
FloatType,
IntegerType,
ListType,
LongType,
MapType,
NestedField,
StringType,
StructType,
TimestampType,
TimeType,
TypeID)
class AvroToIceberg(object):
FIELD_ID_PROP = "field-id"
FIELD_TYPE_PROP = "type"
FIELD_NAME_PROP = "name"
FIELD_LOGICAL_TYPE_PROP = "logicalType"
FIELD_FIELDS_PROP = "fields"
FIELD_ITEMS_PROP = "items"
FIELD_ELEMENT_ID_PROP = "element-id"
AVRO_JSON_PRIMITIVE_TYPES = ("boolean", "int", "long", "float", "double", "bytes", "string")
AVRO_JSON_COMPLEX_TYPES = ("record", "array", "enum", "fixed")
TYPE_PROCESSING_MAP = {str: lambda x, y: AvroToIceberg.convert_str_type(x, y),
dict: lambda x, y: AvroToIceberg.convert_complex_type(x, y),
list: lambda x, y: AvroToIceberg.convert_union_type(x, y)}
COMPLEX_TYPE_PROCESSING_MAP = {"record": lambda x, y: AvroToIceberg.convert_record_type(x, y),
"array": lambda x, y: AvroToIceberg.convert_array_type(x, y),
"map": lambda x, y: AvroToIceberg.convert_map_type(x, y)}
PRIMITIVE_FIELD_TYPE_MAP = {"boolean": BooleanType.get(),
"bytes": BinaryType.get(),
"date": DateType.get(),
"double": DoubleType.get(),
"float": FloatType.get(),
"int": IntegerType.get(),
"long": LongType.get(),
"string": StringType.get(),
"time-millis": TimeType.get(),
"timestamp-millis": TimestampType.without_timezone()
}
@staticmethod
def convert_avro_schema_to_iceberg(avro_schema):
if avro_schema.get(AvroToIceberg.FIELD_TYPE_PROP) != "record":
raise RuntimeError("Cannot convert avro schema to iceberg %s" % avro_schema)
struct = AvroToIceberg.convert_type(avro_schema, None)
return Schema(struct[0].fields)
@staticmethod
def convert_record_type(avro_field, next_id=None):
avro_field_type = avro_field.get(AvroToIceberg.FIELD_TYPE_PROP)
if avro_field_type != "record":
raise RuntimeError("Field type muse be 'record': %s" % avro_field_type)
fields = avro_field.get(AvroToIceberg.FIELD_FIELDS_PROP)
iceberg_fields = []
if next_id is None:
next_id = len(fields)
for field in fields:
iceberg_field, next_id = AvroToIceberg.convert_avro_field_to_iceberg(field, next_id=next_id)
iceberg_fields.append(iceberg_field)
return StructType.of(iceberg_fields), next_id
@staticmethod
def convert_avro_field_to_iceberg(field, next_id):
field_type, is_optional, next_id = AvroToIceberg.convert_type(field, next_id)
if field.get(AvroToIceberg.FIELD_ID_PROP) is None:
return field_type, next_id
if is_optional:
return NestedField.optional(field.get(AvroToIceberg.FIELD_ID_PROP),
field.get(AvroToIceberg.FIELD_NAME_PROP),
field_type), next_id
else:
return NestedField.required(field.get(AvroToIceberg.FIELD_ID_PROP),
field.get(AvroToIceberg.FIELD_NAME_PROP),
field_type), next_id
@staticmethod
def convert_type(field, next_id=None):
avro_field_type = field.get(AvroToIceberg.FIELD_TYPE_PROP)
optional = AvroToIceberg.is_option_schema(avro_field_type)
processing_func = AvroToIceberg.TYPE_PROCESSING_MAP.get(type(avro_field_type))
if processing_func is None:
raise RuntimeError("No function found to process %s" % avro_field_type)
iceberg_type, next_id = processing_func(field, next_id)
return iceberg_type, optional, next_id
@staticmethod
def convert_str_type(avro_field, next_id=None):
avro_field_type = avro_field.get(AvroToIceberg.FIELD_TYPE_PROP)
logical_type = avro_field.get(AvroToIceberg.FIELD_LOGICAL_TYPE_PROP)
if not isinstance(avro_field_type, str):
raise RuntimeError("Field type must be of type str: %s" % avro_field_type)
if avro_field_type in AvroToIceberg.AVRO_JSON_PRIMITIVE_TYPES:
if logical_type is not None:
return AvroToIceberg.PRIMITIVE_FIELD_TYPE_MAP.get(logical_type), next_id
else:
return AvroToIceberg.PRIMITIVE_FIELD_TYPE_MAP.get(avro_field_type), next_id
elif avro_field_type in AvroToIceberg.AVRO_JSON_COMPLEX_TYPES:
if logical_type is not None:
processing_func = AvroToIceberg.COMPLEX_TYPE_PROCESSING_MAP.get(logical_type)
else:
processing_func = AvroToIceberg.COMPLEX_TYPE_PROCESSING_MAP.get(avro_field_type)
if processing_func is None:
raise RuntimeError("No function found to process %s" % avro_field_type)
return processing_func(avro_field, next_id)
else:
raise RuntimeError("Unknown type %s" % avro_field_type)
@staticmethod
def convert_complex_type(avro_field, next_id=None):
avro_field_type = avro_field.get(AvroToIceberg.FIELD_TYPE_PROP)
if not isinstance(avro_field_type, dict):
raise RuntimeError("Complex field type must be of type dict: %s" % avro_field_type)
return AvroToIceberg.convert_avro_field_to_iceberg(avro_field_type, next_id)
@staticmethod
def convert_union_type(avro_field, next_id=None):
avro_field_type = avro_field.get(AvroToIceberg.FIELD_TYPE_PROP)
if not isinstance(avro_field_type, list):
raise RuntimeError("Union field type must be of type list: %s" % avro_field_type)
if len(avro_field_type) > 2:
raise RuntimeError("Cannot process unions larger than 2 items: %s" % avro_field_type)
for item in avro_field_type:
if isinstance(item, str) and item == "null":
continue
avro_field_type = item
avro_field[AvroToIceberg.FIELD_TYPE_PROP] = avro_field_type
items = AvroToIceberg.convert_type(avro_field, next_id)
return items[0], items[2]
@staticmethod
def convert_array_type(avro_field, next_id=None):
avro_field_type = avro_field.get(AvroToIceberg.FIELD_TYPE_PROP)
if avro_field_type != "array":
raise RuntimeError("Avro type must be array: %s" % avro_field_type)
element_id = avro_field.get(AvroToIceberg.FIELD_ELEMENT_ID_PROP)
items = avro_field.get(AvroToIceberg.FIELD_ITEMS_PROP)
is_optional = AvroToIceberg.is_option_schema(items)
if isinstance(items, str) and items in AvroToIceberg.PRIMITIVE_FIELD_TYPE_MAP:
item_type = AvroToIceberg.PRIMITIVE_FIELD_TYPE_MAP.get(items)
if item_type is None:
raise RuntimeError("No mapping found for type %s" % items)
else:
raise RuntimeError("Complex list types not yet implemented")
if is_optional:
return ListType.of_optional(element_id, item_type), next_id
else:
return ListType.of_required(element_id, item_type), next_id
@staticmethod
def convert_map_type(avro_field, next_id=None):
avro_field_type = avro_field.get(AvroToIceberg.FIELD_TYPE_PROP)
avro_logical_type = avro_field.get(AvroToIceberg.FIELD_LOGICAL_TYPE_PROP)
if avro_field_type != "array" or avro_logical_type != "map":
raise RuntimeError("Avro type must be array and logical type must be map: %s" % (avro_field_type,
avro_logical_type))
is_optional = False
items = avro_field.get(AvroToIceberg.FIELD_ITEMS_PROP)
for field in items.get(AvroToIceberg.FIELD_FIELDS_PROP, list()):
if field.get(AvroToIceberg.FIELD_NAME_PROP) == "key":
key_id = field.get(AvroToIceberg.FIELD_ID_PROP)
if not isinstance(field.get(AvroToIceberg.FIELD_TYPE_PROP), str):
raise RuntimeError("Support for complex map keys not yet implemented")
key_type = AvroToIceberg.PRIMITIVE_FIELD_TYPE_MAP.get(field.get(AvroToIceberg.FIELD_TYPE_PROP))
elif field.get(AvroToIceberg.FIELD_NAME_PROP) == "value":
value_id = field.get(AvroToIceberg.FIELD_ID_PROP)
if not isinstance(field.get(AvroToIceberg.FIELD_TYPE_PROP), str):
raise RuntimeError("Support for complex map values not yet imeplemented")
value_type = AvroToIceberg.PRIMITIVE_FIELD_TYPE_MAP.get(field.get(AvroToIceberg.FIELD_TYPE_PROP))
if is_optional:
return MapType.of_optional(key_id, value_id, key_type, value_type), next_id
else:
return MapType.of_required(key_id, value_id, key_type, value_type), next_id
@staticmethod
def is_option_schema(field_type):
if isinstance(field_type, list) and len(field_type) == 2 and "null" in field_type:
return True
return False
@staticmethod
def read_avro_row(iceberg_schema, avro_reader):
try:
avro_row = avro_reader.__next__()
iceberg_row = dict()
for field in iceberg_schema.as_struct().fields:
iceberg_row[field.name] = AvroToIceberg.get_field_from_avro(avro_row, field)
yield iceberg_row
except StopIteration:
return
@staticmethod
def get_field_from_avro(avro_row, field):
process_funcs = {TypeID.STRUCT: lambda avro_row, field: AvroToIceberg.get_field_from_struct(avro_row, field),
TypeID.LIST: lambda avro_row, field: AvroToIceberg.get_field_from_list(avro_row, field),
TypeID.MAP: lambda avro_row, field: AvroToIceberg.get_field_from_map(avro_row, field)}
if field.type.is_primitive_type():
processing_func = AvroToIceberg.get_field_from_primitive
else:
processing_func = process_funcs.get(field.type.type_id)
if processing_func is None:
raise RuntimeError("Don't know how to get field of type: %s" % field.type.type_id)
return processing_func(avro_row, field)
@staticmethod
def get_field_from_primitive(avro_row, field):
avro_value = avro_row.get(field.name)
if avro_row is None and field.is_required:
raise RuntimeError("Field is required but missing in source %s\n%s:" % (field, avro_row))
return avro_value
@staticmethod
def get_field_from_struct(avro_row, field):
field_obj = {}
for nested_field in field.type.fields:
field_obj[nested_field.name] = AvroToIceberg.get_field_from_avro(avro_row[field.name], nested_field)
return field_obj
@staticmethod
def get_field_from_list(avro_row, field):
avro_value = avro_row.get(field.name)
if avro_value is None:
if field.is_required:
raise RuntimeError("Field is required but missing in source %s\n%s:" % (field, avro_row))
return None
return avro_value
@staticmethod
def get_field_from_map(avro_row, field):
val_map = dict()
avro_value = avro_row.get(field.name)
if avro_value is None and field.is_required:
raise RuntimeError("Field is required but missing in source %s\n%s:" % (field, avro_row))
for val in avro_value:
val_map[val['key']] = val['value']
return val_map
| StarcoderdataPython |
6594903 | from pymongo import MongoClient
client = MongoClient('localhost', 27020)
db = client['machine']
collection = db['posts'] | StarcoderdataPython |
9642872 | import argparse
import os
import numpy as np
import logging
from collections import defaultdict
import pickle
from helpful_functions import readEmbeddings, normalize
from mylib import semantic_neighbors
def readArgs ():
parser = argparse.ArgumentParser (description="Near negihbors for words")
parser.add_argument ("--dir-path", required=True, type=str, help="directory path")
parser.add_argument ("--embeddings-file", required=True, type=str, help="embeddings file")
parser.add_argument ("--near-neighbors-file", required=True, type=str, help="near neighbors file")
parser.add_argument ("--facet-name", required=False, type=str, default="MAIN", help="name of the facet (default: MAIN)")
parser.add_argument ("--nearest", required=False, type=int, default=25, help="number of near neighbors (default: 25)")
args = parser.parse_args ()
return args
def getNeighbors (all_embeddings, w2i, i2w, k=25, log_every=1000):
neighbors = defaultdict (list)
for index, w in enumerate (w2i):
for i in range (len (all_embeddings)):
neighbors[w].append (semantic_neighbors (w, all_embeddings[i], (w2i, i2w), k=k))
if (index+1) % log_every == 0:
logging.info (f"Words processed: {index+1}, Percentage: {(index+1)/len(w2i)}")
return neighbors
def main (args):
embeddings = readEmbeddings (os.path.join (args.dir_path, args.embeddings_file))
# Separate the main embeddings and the facet embeddings
static_embeddings = embeddings["MAIN"]
# vocabulary
w2i = {w:i for i, w in enumerate (static_embeddings)}
i2w = {i:w for i, w in enumerate (static_embeddings)}
# the atemporal embeddings
main_embeddings = np.array([static_embeddings[i2w[i]] for i in range (len(i2w))])
main_embeddings = normalize (main_embeddings)
if not args.facet_name == "MAIN":
residual_embeddings = embeddings[args.facet_name]
# temporal embeddings (just add the static embeddings to the facets)
temporal_embeddings = normalize(np.array([static_embeddings[i2w[i]] + residual_embeddings[i2w[i]] \
for i in range (len(i2w))]))
# all the embeddings (the atemporal embeddings are first followed by all the temporal embeddings)
all_embeddings = list ()
if args.facet_name == "MAIN":
all_embeddings.append (main_embeddings)
else:
all_embeddings.append (temporal_embeddings)
neighbors = getNeighbors (all_embeddings, w2i, i2w, k=args.nearest)
# write the neighbors to file
with open (os.path.join (args.dir_path, args.near_neighbors_file), "wb") as fout:
pickle.dump (neighbors, fout)
if __name__ == "__main__":
main (readArgs ())
| StarcoderdataPython |
9731339 | <gh_stars>1-10
docs_file_path = 'docs/index.html'
docs_file_content = open(docs_file_path, 'r').read()[:-len('</html>')]
docs_file = open(docs_file_path, 'w')
metrika_file_content = open('docs/metrika_code.txt', 'r').read()
docs_file.write(docs_file_content + metrika_file_content + '</html>') | StarcoderdataPython |
1865559 | """Generate a time space diagram for some networks.
This method accepts as input a csv file containing the sumo-formatted emission
file, and then uses this data to generate a time-space diagram, with the x-axis
being the time (in seconds), the y-axis being the position of a vehicle, and
color representing the speed of te vehicles.
If the number of simulation steps is too dense, you can plot every nth step in
the plot by setting the input `--steps=n`.
Note: This script assumes that the provided network has only one lane on the
each edge, or one lane on the main highway in the case of MergeScenario.
Usage
-----
::
python time_space_diagram.py </path/to/emission>.csv </path/to/params>.json
"""
from flow.utils.rllib import get_flow_params
import csv
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.colors as colors
import numpy as np
import argparse
# scenarios that can be plotted by this method
ACCEPTABLE_SCENARIOS = [
'LoopScenario',
'Figure8Scenario',
'MergeScenario',
]
def import_data_from_emission(fp):
r"""Import relevant data from the predefined emission (.csv) file.
Parameters
----------
fp : str
file path (for the .csv formatted file)
Returns
-------
dict of dict
Key = "veh_id": name of the vehicle \n Elements:
* "time": time step at every sample
* "edge": edge ID at every sample
* "pos": relative position at every sample
* "vel": speed at every sample
"""
# initialize all output variables
veh_id, t, edge, rel_pos, vel = [], [], [], [], []
# import relevant data from emission file
for record in csv.DictReader(open(fp)):
veh_id.append(record['id'])
t.append(record['time'])
edge.append(record['edge_id'])
rel_pos.append(record['relative_position'])
vel.append(record['speed'])
# we now want to separate data by vehicle ID
ret = {key: {'time': [], 'edge': [], 'pos': [], 'vel': []}
for key in np.unique(veh_id)}
for i in range(len(veh_id)):
ret[veh_id[i]]['time'].append(float(t[i]))
ret[veh_id[i]]['edge'].append(edge[i])
ret[veh_id[i]]['pos'].append(float(rel_pos[i]))
ret[veh_id[i]]['vel'].append(float(vel[i]))
return ret
def get_time_space_data(data, params):
r"""Compute the unique inflows and subsequent outflow statistics.
Parameters
----------
data : dict of dict
Key = "veh_id": name of the vehicle \n Elements:
* "time": time step at every sample
* "edge": edge ID at every sample
* "pos": relative position at every sample
* "vel": speed at every sample
params : dict
flow-specific parameters, including:
* "scenario" (str): name of the scenario that was used when generating
the emission file. Must be one of the scenario names mentioned in
ACCEPTABLE_SCENARIOS,
* "net_params" (flow.core.params.NetParams): network-specific
parameters. This is used to collect the lengths of various network
links.
Returns
-------
as_array
n_steps x n_veh matrix specifying the absolute position of every
vehicle at every time step. Set to zero if the vehicle is not present
in the network at that time step.
as_array
n_steps x n_veh matrix specifying the speed of every vehicle at every
time step. Set to zero if the vehicle is not present in the network at
that time step.
as_array
a (n_steps,) vector representing the unique time steps in the
simulation
Raises
------
AssertionError
if the specified scenario is not supported by this method
"""
# check that the scenario is appropriate
assert params['scenario'] in ACCEPTABLE_SCENARIOS, \
'Scenario must be one of: ' + ', '.join(ACCEPTABLE_SCENARIOS)
# switcher used to compute the positions based on the type of scenario
switcher = {
'LoopScenario': _ring_road,
'MergeScenario': _merge,
'Figure8Scenario': _figure_eight
}
# Collect a list of all the unique times.
all_time = []
for veh_id in data.keys():
all_time.extend(data[veh_id]['time'])
all_time = np.sort(np.unique(all_time))
# Get the function from switcher dictionary
func = switcher[params['scenario']]
# Execute the function
pos, speed = func(data, params, all_time)
return pos, speed, all_time
def _merge(data, params, all_time):
r"""Generate position and speed data for the merge.
This only include vehicles on the main highway, and not on the adjacent
on-ramp.
Parameters
----------
data : dict of dict
Key = "veh_id": name of the vehicle \n Elements:
* "time": time step at every sample
* "edge": edge ID at every sample
* "pos": relative position at every sample
* "vel": speed at every sample
params : dict
flow-specific parameters
all_time : array_like
a (n_steps,) vector representing the unique time steps in the
simulation
Returns
-------
as_array
n_steps x n_veh matrix specifying the absolute position of every
vehicle at every time step. Set to zero if the vehicle is not present
in the network at that time step.
as_array
n_steps x n_veh matrix specifying the speed of every vehicle at every
time step. Set to zero if the vehicle is not present in the network at
that time step.
"""
# import network data from flow params
inflow_edge_len = 100
premerge = params['net'].additional_params['pre_merge_length']
postmerge = params['net'].additional_params['post_merge_length']
# generate edge starts
edgestarts = {
'inflow_highway': 0,
'left': inflow_edge_len + 0.1,
'center': inflow_edge_len + premerge + 22.6,
'inflow_merge': inflow_edge_len + premerge + postmerge + 22.6,
'bottom': 2 * inflow_edge_len + premerge + postmerge + 22.7,
':left_0': inflow_edge_len,
':center_0': inflow_edge_len + premerge + 0.1,
':center_1': inflow_edge_len + premerge + 0.1,
':bottom_0': 2 * inflow_edge_len + premerge + postmerge + 22.6
}
# compute the absolute position
for veh_id in data.keys():
data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],
data[veh_id]['pos'], edgestarts)
# prepare the speed and absolute position in a way that is compatible with
# the space-time diagram, and compute the number of vehicles at each step
pos = np.zeros((all_time.shape[0], len(data.keys())))
speed = np.zeros((all_time.shape[0], len(data.keys())))
for i, veh_id in enumerate(sorted(data.keys())):
for spd, abs_pos, ti, edge in zip(data[veh_id]['vel'],
data[veh_id]['abs_pos'],
data[veh_id]['time'],
data[veh_id]['edge']):
# avoid vehicles outside the main highway
if edge in ['inflow_merge', 'bottom', ':bottom_0']:
continue
ind = np.where(ti == all_time)[0]
pos[ind, i] = abs_pos
speed[ind, i] = spd
return pos, speed
def _ring_road(data, params, all_time):
r"""Generate position and speed data for the ring road.
Vehicles that reach the top of the plot simply return to the bottom and
continue.
Parameters
----------
data : dict of dict
Key = "veh_id": name of the vehicle \n Elements:
* "time": time step at every sample
* "edge": edge ID at every sample
* "pos": relative position at every sample
* "vel": speed at every sample
params : dict
flow-specific parameters
all_time : array_like
a (n_steps,) vector representing the unique time steps in the
simulation
Returns
-------
as_array
n_steps x n_veh matrix specifying the absolute position of every
vehicle at every time step. Set to zero if the vehicle is not present
in the network at that time step.
as_array
n_steps x n_veh matrix specifying the speed of every vehicle at every
time step. Set to zero if the vehicle is not present in the network at
that time step.
"""
# import network data from flow params
total_len = params['net'].additional_params['length']
# generate edge starts
edgestarts = {
'bottom': 0,
'right': total_len / 4,
'top': total_len / 2,
'left': 3 * total_len / 4
}
# compute the absolute position
for veh_id in data.keys():
data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],
data[veh_id]['pos'], edgestarts)
# create the output variables
pos = np.zeros((all_time.shape[0], len(data.keys())))
speed = np.zeros((all_time.shape[0], len(data.keys())))
for i, veh_id in enumerate(sorted(data.keys())):
for spd, abs_pos, ti in zip(data[veh_id]['vel'],
data[veh_id]['abs_pos'],
data[veh_id]['time']):
ind = np.where(ti == all_time)[0]
pos[ind, i] = abs_pos
speed[ind, i] = spd
return pos, speed
def _figure_eight(data, params, all_time):
r"""Generate position and speed data for the figure eight.
The vehicles traveling towards the intersection from one side will be
plotted from the top downward, while the vehicles from the other side will
be plotted from the bottom upward.
Parameters
----------
data : dict of dict
Key = "veh_id": name of the vehicle \n Elements:
* "time": time step at every sample
* "edge": edge ID at every sample
* "pos": relative position at every sample
* "vel": speed at every sample
params : dict
flow-specific parameters
all_time : array_like
a (n_steps,) vector representing the unique time steps in the
simulation
Returns
-------
as_array
n_steps x n_veh matrix specifying the absolute position of every
vehicle at every time step. Set to zero if the vehicle is not present
in the network at that time step.
as_array
n_steps x n_veh matrix specifying the speed of every vehicle at every
time step. Set to zero if the vehicle is not present in the network at
that time step.
"""
# import network data from flow params
net_params = params['net']
ring_radius = net_params.additional_params['radius_ring']
ring_edgelen = ring_radius * np.pi / 2.
intersection = 2 * ring_radius
junction = 2.9 + 3.3 * net_params.additional_params['lanes']
inner = 0.28
# generate edge starts
edgestarts = {
'bottom': inner,
'top': intersection / 2 + junction + inner,
'upper_ring': intersection + junction + 2 * inner,
'right': intersection + 3 * ring_edgelen + junction + 3 * inner,
'left': 1.5*intersection + 3*ring_edgelen + 2*junction + 3*inner,
'lower_ring': 2*intersection + 3*ring_edgelen + 2*junction + 4*inner,
':bottom_0': 0,
':center_1': intersection / 2 + inner,
':top_0': intersection + junction + inner,
':right_0': intersection + 3 * ring_edgelen + junction + 2 * inner,
':center_0': 1.5*intersection + 3*ring_edgelen + junction + 3*inner,
':left_0': 2 * intersection + 3*ring_edgelen + 2*junction + 3*inner,
# for aimsun
'bottom_to_top': intersection / 2 + inner,
'right_to_left': junction + 3 * inner,
}
# compute the absolute position
for veh_id in data.keys():
data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],
data[veh_id]['pos'], edgestarts)
# create the output variables
pos = np.zeros((all_time.shape[0], len(data.keys())))
speed = np.zeros((all_time.shape[0], len(data.keys())))
for i, veh_id in enumerate(sorted(data.keys())):
for spd, abs_pos, ti in zip(data[veh_id]['vel'],
data[veh_id]['abs_pos'],
data[veh_id]['time']):
ind = np.where(ti == all_time)[0]
pos[ind, i] = abs_pos
speed[ind, i] = spd
# reorganize data for space-time plot
figure8_len = 6*ring_edgelen + 2*intersection + 2*junction + 10*inner
intersection_loc = [edgestarts[':center_1'] + intersection / 2,
edgestarts[':center_0'] + intersection / 2]
pos[pos < intersection_loc[0]] += figure8_len
pos[np.logical_and(pos > intersection_loc[0], pos < intersection_loc[1])] \
+= - intersection_loc[1]
pos[pos > intersection_loc[1]] = \
- pos[pos > intersection_loc[1]] + figure8_len + intersection_loc[0]
return pos, speed
def _get_abs_pos(edge, rel_pos, edgestarts):
"""Compute the absolute positions from edges and relative positions.
This is the variable we will ultimately use to plot individual vehicles.
Parameters
----------
edge : list of str
list of edges at every time step
rel_pos : list of float
list of relative positions at every time step
edgestarts : dict
the absolute starting position of every edge
Returns
-------
list of float
the absolute positive for every sample
"""
ret = []
for edge_i, pos_i in zip(edge, rel_pos):
ret.append(pos_i + edgestarts[edge_i])
return ret
if __name__ == '__main__':
# create the parser
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='[Flow] Generates time space diagrams for flow networks.',
epilog='python time_space_diagram.py </path/to/emission>.csv '
'</path/to/flow_params>.json')
# required arguments
parser.add_argument('emission_path', type=str,
help='path to the csv file.')
parser.add_argument('flow_params', type=str,
help='path to the flow_params json file.')
# optional arguments
parser.add_argument('--steps', type=int, default=1,
help='rate at which steps are plotted.')
parser.add_argument('--title', type=str, default='Time Space Diagram',
help='rate at which steps are plotted.')
parser.add_argument('--max_speed', type=int, default=8,
help='The maximum speed in the color range.')
parser.add_argument('--start', type=float, default=0,
help='initial time (in sec) in the plot.')
parser.add_argument('--stop', type=float, default=float('inf'),
help='final time (in sec) in the plot.')
args = parser.parse_args()
# flow_params is imported as a dictionary
flow_params = get_flow_params(args.flow_params)
# import data from the emission.csv file
emission_data = import_data_from_emission(args.emission_path)
# compute the position and speed for all vehicles at all times
pos, speed, time = get_time_space_data(emission_data, flow_params)
# some plotting parameters
cdict = {
'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)),
'green': ((0, 0, 0), (0.2, 0, 0), (0.6, 1, 1), (1, 1, 1)),
'blue': ((0, 0, 0), (0.2, 0, 0), (0.6, 0, 0), (1, 0, 0))
}
my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024)
# perform plotting operation
fig = plt.figure(figsize=(16, 9))
ax = plt.axes()
norm = plt.Normalize(0, args.max_speed)
cols = []
xmin = max(time[0], args.start)
xmax = min(time[-1], args.stop)
xbuffer = (xmax - xmin) * 0.025 # 2.5% of range
ymin, ymax = np.amin(pos), np.amax(pos)
ybuffer = (ymax - ymin) * 0.025 # 2.5% of range
ax.set_xlim(xmin - xbuffer, xmax + xbuffer)
ax.set_ylim(ymin - ybuffer, ymax + ybuffer)
for indx_car in range(pos.shape[1]):
unique_car_pos = pos[:, indx_car]
# discontinuity from wraparound
disc = np.where(np.abs(np.diff(unique_car_pos)) >= 10)[0] + 1
unique_car_time = np.insert(time, disc, np.nan)
unique_car_pos = np.insert(unique_car_pos, disc, np.nan)
unique_car_speed = np.insert(speed[:, indx_car], disc, np.nan)
points = np.array(
[unique_car_time, unique_car_pos]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=my_cmap, norm=norm)
# Set the values used for color mapping
lc.set_array(unique_car_speed)
lc.set_linewidth(1.75)
cols.append(lc)
plt.title(args.title, fontsize=25)
plt.ylabel('Position (m)', fontsize=20)
plt.xlabel('Time (s)', fontsize=20)
for col in cols:
line = ax.add_collection(col)
cbar = plt.colorbar(line, ax=ax)
cbar.set_label('Velocity (m/s)', fontsize=20)
cbar.ax.tick_params(labelsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
###########################################################################
# Note: For MergeScenario only #
if flow_params['scenario'] == 'MergeScenario': #
plt.plot(time, [0] * pos.shape[0], linewidth=3, color="white") #
plt.plot(time, [-0.1] * pos.shape[0], linewidth=3, color="white") #
###########################################################################
plt.show()
| StarcoderdataPython |
8195943 | <filename>AdelaiDet/detectron2/projects/DensePose/densepose/data/samplers/densepose_uniform.py<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import torch
from .densepose_base import DensePoseBaseSampler
class DensePoseUniformSampler(DensePoseBaseSampler):
"""
Samples DensePose datas from DensePose predictions.
Samples for each class are drawn uniformly over all pixels estimated
to belong to that class.
"""
def __init__(self, count_per_class: int = 8):
"""
Constructor
Args:
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
super().__init__(count_per_class)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a uniform sample of indices to select datas
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
return random.sample(range(k), count)
| StarcoderdataPython |
11385399 | # -*- coding: utf-8 -*-
"""
bromelia.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements the central Diameter application object. It works
as per Peer State Machine defined in RFC 6733 in order to establish a
Diameter association with another Peer Node.
:copyright: (c) 2020-present <NAME>.
:license: MIT, see LICENSE for more details.
"""
class DiameterApplicationError(BaseException):
"""A Diameter Application error occurred."""
pass
class DiameterAssociationError(BaseException):
"""A Diameter Association error occurred."""
pass
class DiameterMessageError(BaseException):
"""A Diameter Message error occurred."""
pass
class DiameterHeaderError(BaseException):
"""A Diameter Header error occurred."""
pass
class DiameterAvpError(BaseException):
""" A Diameter AVP error occurred."""
pass
class DiameterHeaderAttributeValueError(BaseException):
"""A valid attribute value is required."""
pass
class AVPAttributeValueError(BaseException):
"""A valid attribute value is required."""
pass
#class AVPKeyValueError(BaseException):
# """A valid key value is required."""
# pass
class AVPOperationError(BaseException):
"""Invalid operation between two DiameterAVP objects"""
class AVPParsingError(BaseException):
"""An invalid AVP byte stream has been found."""
pass
class ProcessRequestException(BaseException):
pass
class DataTypeError(BaseException):
pass
class ParsingDataTypeError(BaseException):
pass
class InvalidConfigKey(BaseException):
"""Invalid config key found"""
class InvalidConfigValue(BaseException):
"""Invalid config value found"""
class MissingAttributes(KeyError):
""" Not found attributes for a given class """
class BromeliaException(BaseException):
""" Something went wrong in Bromelia class """
| StarcoderdataPython |
11296209 | <filename>orchestra/contrib/contacts/filters.py
from django.contrib.admin import SimpleListFilter
from django.utils.translation import ugettext_lazy as _
from .models import Contact
class EmailUsageListFilter(SimpleListFilter):
title = _("email usages")
parameter_name = 'email_usages'
def lookups(self, request, model_admin):
return Contact.EMAIL_USAGES
def queryset(self, request, queryset):
value = self.value()
if value is None:
return queryset
return queryset.filter(email_usages=value.split(','))
| StarcoderdataPython |
8140351 | import pytest
from hypothesis import given, strategies as st
from antidote import Tag, Tagged
from antidote.core import DependencyContainer, DependencyInstance
from antidote.exceptions import DependencyNotFoundError, DuplicateTagError
from antidote.providers.tag import TaggedDependencies, TagProvider
class Service:
pass
@pytest.fixture()
def provider():
container = DependencyContainer()
provider = TagProvider(container=container)
container.register_provider(provider)
return provider
def test_tag():
t = Tag(name='test', val='x')
assert 'test' == t.name
assert 'x' == t.val
assert t.anything is None
assert "val='x'" in repr(t)
assert "'test'" in repr(t)
assert "val='x'" in str(t)
assert "'test'" in str(t)
t2 = Tag(name='test')
assert "'test'" in str(t2)
@pytest.mark.parametrize('name,error', [('', ValueError),
(object(), TypeError)])
def test_invalid_tag(name, error):
with pytest.raises(error):
Tag(name)
@given(st.builds(Tagged, name=st.sampled_from(['test', '987 jkh@è'])))
def test_tagged_eq_hash(tagged):
# does not fail
hash(tagged)
for f in (lambda e: e, hash):
assert f(Tagged(tagged.name)) != f(tagged)
assert repr(tagged.name) in repr(tagged)
@pytest.mark.parametrize('name,error', [('', ValueError),
(object(), TypeError)])
def test_invalid_tagged(name, error):
with pytest.raises(error):
Tagged(name)
def test_tagged_dependencies():
tag1 = Tag('tag1')
tag2 = Tag('tag2', dummy=True)
c = DependencyContainer()
t = TaggedDependencies(
container=c,
dependencies=['d', 'd2'],
tags=[tag1, tag2]
)
assert {tag1, tag2} == set(t.tags())
assert {'d', 'd2'} == set(t.dependencies())
assert 2 == len(t)
# instantiation from container
c.update_singletons({'d': 'test', 'd2': 'test2'})
assert {'test', 'test2'} == set(t.instances())
# from cache
c.update_singletons({'d': 'different', 'd2': 'different2'})
assert {'test', 'test2'} == set(t.instances())
def test_tagged_dependencies_invalid_dependency():
tag = Tag('tag1')
c = DependencyContainer()
t = TaggedDependencies(
container=c,
dependencies=['d'],
tags=[tag]
)
assert ['d'] == list(t.dependencies())
assert [tag] == list(t.tags())
with pytest.raises(DependencyNotFoundError):
list(t.instances())
def test_repr(provider: TagProvider):
provider = TagProvider(DependencyContainer())
x = object()
provider.register(x, [Tag(name='tag')])
assert str(x) in repr(provider)
def test_provide_tags(provider: TagProvider):
container = provider._container
container.update_singletons(dict(test=object(), test2=object()))
custom_tag = Tag('tag2', error=True)
provider.register('test', ['tag1', custom_tag])
provider.register('test2', ['tag2'])
result = provider.provide(Tagged('xxxxx'))
assert isinstance(result, DependencyInstance)
assert result.singleton is False
assert 0 == len(result.instance)
result = provider.provide(Tagged('tag1'))
assert isinstance(result, DependencyInstance)
assert result.singleton is False
#
tagged_dependencies = result.instance # type: TaggedDependencies
assert 1 == len(tagged_dependencies)
assert ['test'] == list(tagged_dependencies.dependencies())
assert ['tag1'] == [tag.name for tag in tagged_dependencies.tags()]
assert [container.get('test') == list(tagged_dependencies.instances())]
result = provider.provide(Tagged('tag2'))
assert isinstance(result, DependencyInstance)
assert result.singleton is False
tagged_dependencies = result.instance # type: TaggedDependencies
assert 2 == len(tagged_dependencies)
assert {'test', 'test2'} == set(tagged_dependencies.dependencies())
tags = list(tagged_dependencies.tags())
assert {'tag2', 'tag2'} == {tag.name for tag in tags}
assert any(tag is custom_tag for tag in tags)
instances = {container.get('test'), container.get('test2')}
assert instances == set(tagged_dependencies.instances())
@pytest.mark.parametrize('tag', ['tag', Tag(name='tag')])
def test_duplicate_tag_error(provider: TagProvider, tag):
provider.register('test', [Tag(name='tag')])
with pytest.raises(DuplicateTagError):
provider.register('test', tags=[tag])
def test_duplicate_tag_error_in_same_register(provider: TagProvider):
with pytest.raises(DuplicateTagError):
provider.register('test', tags=[Tag(name='tag'), 'tag'])
@pytest.mark.parametrize(
'tags',
[
[object],
[lambda _: False],
['test', object]
]
)
def test_invalid_register(provider: TagProvider, tags):
with pytest.raises(ValueError):
provider.register('test', tags)
@pytest.mark.parametrize('dependency', ['test', Service, object()])
def test_unknown_dependency(provider: TagProvider, dependency):
assert provider.provide(dependency) is None
| StarcoderdataPython |
1913668 | <gh_stars>0
"""_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib.auth.models import User, Group
from django.contrib import admin
admin.autodiscover()
from rest_framework import generics, permissions, serializers
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope, TokenHasScope
# first we define the serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', "first_name", "last_name")
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ("name", )
# Create the API views
class UserList(generics.ListCreateAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetails(generics.RetrieveAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupList(generics.ListAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasScope]
required_scopes = ['groups']
queryset = Group.objects.all()
serializer_class = GroupSerializer
# Setup the URLs and include login URLs for the browsable API.
urlpatterns = [
path('admin/', admin.site.urls),
path('o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
path('users/', UserList.as_view()),
path('users/<pk>/', UserDetails.as_view()),
path('groups/', GroupList.as_view()),
# ...
]
| StarcoderdataPython |
8017325 | <filename>2020/Day 02/part1.py<gh_stars>1-10
answer = 0
with open("input.txt", 'r', encoding="utf-8") as file:
for line in file:
boundaries, charecter, string = line.split()
lowest, highest = map(int, boundaries.split('-'))
charecter = charecter.rstrip(':')
if lowest <= string.count(charecter) <= highest:
answer += 1
print(answer)
| StarcoderdataPython |
4918585 | <reponame>AlexGolovaschenko/PoultryCam<gh_stars>0
from django.contrib import admin
from .models import Photo, PhotoMetaData
class PhotoMetaDataInline (admin.StackedInline):
model = PhotoMetaData
class PhotoAdmin (admin.ModelAdmin):
list_display = ['__str__', 'upload_date', 'marker']
inlines = [PhotoMetaDataInline]
admin.site.register(Photo, PhotoAdmin)
| StarcoderdataPython |
6452880 | from flask import Blueprint, send_from_directory
from os.path import dirname, join
blueprint = Blueprint('controller', __name__)
rootDir = join(dirname(dirname(__file__)), 'web')
dbDir = join(dirname(dirname(__file__)), 'db')
@blueprint.route('/')
def root():
return send_from_directory(rootDir, 'index.html')
@blueprint.route('/<path:filename>')
def serve(filename):
return send_from_directory(rootDir, filename)
@blueprint.route('/db/<path:filename>')
def serve_image(filename):
return send_from_directory(dbDir, filename)
| StarcoderdataPython |
8063543 | import os
from django.contrib.sites.models import Site
one = Site.objects.all()[0]
one.domain = os.environ.get('DOMAIN')
one.name = os.environ.get('NAME')
one.save() | StarcoderdataPython |
3348725 | <gh_stars>10-100
from rest_framework import serializers
from .models import Invoice, Item
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
read_only_fields = (
"invoice",
)
fields = (
"id",
"title",
"quantity",
"unit_price",
"net_amount",
"vat_rate",
"discount"
)
class InvoiceSerializer(serializers.ModelSerializer):
items = ItemSerializer(many=True)
bankaccount = serializers.CharField(required=False)
class Meta:
model = Invoice
read_only_fields = (
"team",
"invoice_number",
"created_at",
"created_by",
"modified_at",
"modified_by",
),
fields = (
"id",
"invoice_number",
"client",
"client_name",
"client_email",
"client_org_number",
"client_address1",
"client_address2",
"client_zipcode",
"client_place",
"client_country",
"client_contact_person",
"client_contact_reference",
"sender_reference",
"invoice_type",
"due_days",
"is_sent",
"is_paid",
"gross_amount",
"vat_amount",
"net_amount",
"discount_amount",
"items",
"bankaccount",
"get_due_date_formatted",
"is_credit_for",
"is_credited",
)
def create(self, validated_data):
items_data = validated_data.pop('items')
invoice = Invoice.objects.create(**validated_data)
for item in items_data:
Item.objects.create(invoice=invoice, **item)
return invoice | StarcoderdataPython |
246715 | """ Stat objects make life a little easier """
class CoreStat():
""" Core stats are what go up and down to modify the character """
def __init__(self, points=0):
points = int(points)
if points < 0:
points = 0
self._points = points
self._current = self.base
self._derived = DerivedStat(self.base)
@property
def base(self):
""" Force use of setter/getter """
return self._points + 1
@property
def current(self):
""" current getter """
return self._current
@current.setter
def current(self, value):
""" current setter """
self._current = value
if self._current < 0:
self._current = 0
@property
def derived(self):
""" Grab the derived stat """
return self._derived
def set_derived(self, factor, offset):
""" Set's the multiplication factor for the derived stat """
self.derived._base = self.base
new_factor = self._derived.factor = factor
new_offset = self._derived.offset = offset
self.derived.restore()
return new_factor, new_offset
def upgrade(self, points=1):
""" Increases the core stat by upgrading it """
points = int(points)
if points < 1:
points = 1
# Update the core stat
self._points += points
self.restore()
# Update the derived stat
self.derived._base = self.base
self.derived.restore()
return True
def restart(self):
""" Removes all upgrades on this stat """
old_points = self._points
# Update the core stat
self._points = 0
self.restore()
# Update the derived stat
self.derived._base = self.base
self.derived.restore()
# Return what was removed for keeping track of stat restarts
return old_points
def restore(self):
""" Restores the current value back to baseline """
self.current = self._points + 1
def __getstate__(self):
""" Serialize core stat """
return {
"points": self._points,
"derived_factor": self._derived.factor,
"derived_offset": self._derived.offset
}
def __setstate__(self, state):
""" Deserialize core stat """
# Update the core stat
self._points = state["points"]
self.restore()
# Update the derived stat
factor = state["derived_factor"]
offset = state["derived_offset"]
self.set_derived(factor, offset)
self.derived.restore()
class DerivedStat():
""" Derived stats are based off core stats, and change when their core changes """
def __init__(self, base, factor=1.0, offset=0):
self._factor = factor
self._offset = offset
self._base = base
self._current = self.base
@property
def base(self):
""" Programmatically calculates the base value """
return (self._base * self._factor) + self._offset
@property
def current(self):
""" Update than grab the current value """
return self._current
@current.setter
def current(self, value):
""" Sets the current value """
self._current = int(value)
if self._current < 0:
self._current = 0
@property
def factor(self):
""" Get the multiplication factor """
return self._factor
@factor.setter
def factor(self, value):
""" Set the multiplication factor """
value = int(value)
if value < 1:
value = 1
self._factor = value
return value
@property
def offset(self):
""" Get the offset """
return self._offset
@offset.setter
def offset(self, value):
""" Set the addition offset """
value = int(value)
if value < 0:
value = 0
self._offset = value
return value
def restore(self):
""" Restores the current value back to the given amount """
self._current = self.base
| StarcoderdataPython |
4842704 | import ipyparallel
class remote_iterator:
"""Return an iterator on an object living on a remote engine."""
def __init__(self, view, name):
self.view = view
self.name = name
def __iter__(self):
return self
def __next__(self):
it_name = '_%s_iter' % self.name
self.view.execute('%s = iter(%s)' % (it_name, self.name), block=True)
next_ref = ipyparallel.Reference(it_name + '.next')
while True:
try:
yield self.view.apply_sync(next_ref)
# This causes the StopIteration exception to be raised.
except ipyparallel.RemoteError as e:
if e.ename == 'StopIteration':
raise StopIteration
else:
raise e
| StarcoderdataPython |
5045278 | <gh_stars>10-100
# Derived from: https://gist.githubusercontent.com/stantonk/b0a937ca9c035a83b14c/raw/bff09b6977579057cec7812e41d2e486a07a14b2/get_valid_tlds.py
# <NAME>, Jask Labs Inc.
# Jan 2017
# requirements.txt
# pip install requests
# pip install BeautifulSoup4
import codecs
import requests
from bs4 import BeautifulSoup
PER_LINE = 12
text = requests.get('http://www.iana.org/domains/root/db').text
soup = BeautifulSoup(text, "html.parser")
x = soup.find('table', {'id': 'tld-table'})
#tlds = [anchor.text for anchor in x.find_all('a')]
tld_uris = [anchor.attrs['href'] for anchor in x.find_all('a')]
tlds = []
for uri in tld_uris:
fields = uri.split("/")
fields = fields[-1].split(".")
tlds.append(fields[0])
print tlds
| StarcoderdataPython |
1830822 | from ._auto_fight import AutoFight
from ._normal_fight import NormalFight
from kf_lib.ui import cls, pak, yn
def get_prefight_info(side_a, side_b=None, hide_enemy_stats=False, basic_info_only=False):
fs = side_a[:]
if side_b:
fs.extend(side_b)
s = ''
first_fighter = fs[0]
size1 = max([len(s) for s in ['NAME '] + [f.name + ' ' for f in fs]])
size2 = max([len(s) for s in ['LEV '] + [str(f.level) + ' ' for f in fs]])
size3 = max([len(s) for s in ['STYLE '] + [f.style.name + ' ' for f in fs]])
att_names = ' '.join(first_fighter.att_names_short) if not basic_info_only else ''
s += 'NAME'.ljust(size1) + 'LEV'.ljust(size2) + 'STYLE'.ljust(size3) + att_names
if any([f.weapon for f in fs]) and not basic_info_only:
s += ' WEAPON'
for f in fs:
if side_b and f == side_b[0]:
s += '\n-vs-'
s += '\n{:<{}}{:<{}}{:<{}}'.format(
f.name,
size1,
f.level,
size2,
f.style.name,
size3,
)
if basic_info_only:
continue
if (
(not hide_enemy_stats)
or f.is_human
or (f in side_a and any([ff.is_human for ff in side_a]))
or (side_b and f in side_b and any([ff.is_human for ff in side_b]))
):
atts_wb = (f.get_att_str_prefight(att) for att in first_fighter.att_names)
else:
atts_wb = (f.get_att_str_prefight(att, hide=True) for att in first_fighter.att_names)
s += '{:<4}{:<4}{:<4}{:<4}'.format(*atts_wb)
if f.weapon:
s += f'{f.weapon.name} {f.weapon.descr_short}'
s += f"\n{' ' * (size1 + size2)}{f.style.descr_short}"
return s
def fight(
f1,
f2,
f1_allies=None,
f2_allies=None,
auto_fight=False,
af_option=True,
hide_stats=True,
environment_allowed=True,
items_allowed=True,
win_messages=None,
school_display=False,
return_fight_obj=False,
):
"""Return True if f1 wins, False otherwise (including draw)."""
side_a, side_b = get_sides(f1, f2, f1_allies, f2_allies)
all_fighters = side_a + side_b
if any((f.is_human for f in all_fighters)):
if not any((f.is_human for f in side_a)):
side_a, side_b = (
side_b,
side_a,
) # swap sides for human player's convenience (e.g. in tournaments)
if win_messages:
temp = win_messages[:]
win_messages = [temp[1], temp[0]] # swap win messages also
cls()
print(get_prefight_info(side_a, side_b, hide_stats))
if af_option:
auto_fight = yn('\nAuto fight?')
else:
pak()
cls()
else:
auto_fight = True
if auto_fight:
f = AutoFight(
side_a, side_b, environment_allowed, items_allowed, win_messages, school_display
)
else:
f = NormalFight(
side_a, side_b, environment_allowed, items_allowed, win_messages, school_display
)
if return_fight_obj:
return f
return f.win
def get_sides(f1, f2, f1_allies, f2_allies):
side_a = [f1]
if f1_allies:
side_a.extend(f1_allies)
side_b = [f2]
if f2_allies:
side_b.extend(f2_allies)
return side_a, side_b
| StarcoderdataPython |
8067331 | <gh_stars>0
from xmlrpc.server import SimpleXMLRPCServer
import argparse
import threading
import shelve
from typing import *
class Server:
_rpc_methods_ = ['add', 'delete', 'run', 'check', 'duels']
def __init__(self, address):
self._strategies = shelve.open('ten_castles')
self._srv = SimpleXMLRPCServer(address,
allow_none=True, logRequests=False)
self._lock = threading.Lock()
for name in self._rpc_methods_:
self._srv.register_function(getattr(self, name))
def start(self):
self._srv.serve_forever()
def add(self, name: str, strategy: List[int]):
print('add:', name)
with self._lock:
self._strategies[name] = list(strategy)
return self._run(name)
def delete(self, name: str):
print('delete:', name)
with self._lock:
if name not in self._strategies:
return
del self._strategies[name]
def run(self, name: str):
print('run:', name)
with self._lock:
return self._run(name)
def check(self, name: str):
with self._lock:
if name not in self._strategies:
return 'No such strategy'
return name + ": " + str(self._strategies[name])
def duels(self):
if len(self._strategies) == 1:
return "Only one strategy exists"
with self._lock:
result = ""
for n1, s1 in self._strategies.items():
score = 0
for n2, s2 in self._strategies.items():
if n1 == n2:
continue
cur_score = 0
for i in range(0, 10):
if s1[i] > 2 * s2[i]:
cur_score += i + 1
score += cur_score
score = score / (len(self._strategies) - 1)
result += n1 + ': ' + str(score) + '\n'
return result
def _run(self, name: str):
if name not in self._strategies:
return 'Invalid strategy'
result = ""
score = 0
strategy = self._strategies[name]
for k, v in self._strategies.items():
if k == name:
continue
cur_score = 0
for i in range(0, 10):
if strategy[i] > 2 * v[i]:
cur_score += i + 1
result += "from " + k + ": " + str(cur_score) + '\n'
score += cur_score
if len(self._strategies) == 1:
return 'No other strategies'
return 'average: ' + str(score / (len(self._strategies) - 1)) + '\n' \
+ result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('port', type=int)
args = parser.parse_args()
srv = Server(('', args.port))
srv.start()
| StarcoderdataPython |
1869340 | <filename>Ejercicios/Escritura de archivos.py
try:
f = open("new.txt","r")
print("Archivo")
except:
exit()
print("Error")
#leer archivo
#print(f.read())
#rint(f.read(50))
#print("Nueva linea",f.readline())
while True:
print(f.readline())
if f.readline() == "":
break
#leer cada linea
lineas = f.readlines()
#print(lineas)
for l in lineas:
print("=>",l,"\n\n")
| StarcoderdataPython |
1840266 | <reponame>mail2nsrajesh/python-monascaclient<filename>examples/check_monasca.py<gh_stars>0
#!/usr/bin/env python
#
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import getopt
import os
import sys
from monascaclient import client
def usage():
usage = """
Requires services.osrc variables in bash environment (OS_USERNAME etc).
check_monasca -d <dimension> -v <value>
-h --help Prints this
-d dimension Dimension to filter on
-v value Value of dimension
Examples
check_monasca -d hostname -v test-c0-m1-mgmt # Retrieve all alarms for a host
check_monasca -d service -v nova # Retrieve all nova alarms
"""
print(usage)
def get_keystone_creds():
creds = {'username': os.environ['OS_USERNAME'], 'password': os.environ['OS_PASSWORD'],
'auth_url': os.environ['OS_AUTH_URL'], 'project_name': os.environ['OS_PROJECT_NAME'],
'endpoint': os.environ['OS_MONASCA_URL'], 'os_cacert': os.environ['OS_CACERT']}
return creds
def format_alarm(alarm):
output = "%s %s ( Metric = %s)" % (
alarm['metrics'][0]['dimensions']['hostname'],
alarm['alarm_definition']['name'],
alarm['metrics'][0]['name'])
if "process." in alarm['metrics'][0]['name']:
output += "-%s," % (alarm['metrics'][0]['dimensions']['process_name'])
if "disk." in alarm['metrics'][0]['name']:
output += "-%s," % (alarm['metrics'][0]['dimensions']['mount_point'])
output += ","
return output
def main(argv):
# Initialise Variables
warns = 0
crits = 0
warns_output = ""
crits_output = ""
dimension = ""
dim_value = ""
# Test parameters
try:
opts, args = getopt.getopt(argv, "h::d:v:", ["dimension=", "value="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-d", "--dimension"):
dimension = arg
elif opt in ("-v", "--value"):
dim_value = arg
if dimension == "" or dim_value == "":
usage()
sys.exit(2)
# Set the api version of monasca-api
api_version = '2_0'
creds = get_keystone_creds()
# Build request
dimensions = {}
dimensions[dimension] = dim_value
fields = {}
fields['metric_dimensions'] = dimensions
monasca_client = client.Client(api_version, **creds)
body = monasca_client.alarms.list(**fields)
# Process retrieved alarms
# Note Monasca has state and severity, these are mapped to Nagios values as
# State ALARM and severity = LOW or MEDIUM is Nagios Warning
# State = UNDERTERMINED is Nagios Warning
# State ALARM and severity = HIGH it Nagios Critical
for alarm in body:
if alarm['lifecycle_state'] != "RESOLVED":
if (alarm['state'] == "ALARM" and
(alarm['alarm_definition']['severity'] == "LOW" or
alarm['alarm_definition']['severity'] == "MEDIUM")):
warns += 1
warns_output += format_alarm(alarm)
if alarm['state'] == "UNDETERMINED":
warns += 1
warns_output += format_alarm(alarm)
if alarm['state'] == "ALARM" and alarm['alarm_definition']['severity'] == "HIGH":
crits += 1
crits_output += format_alarm(alarm)
if warns == 0 and crits == 0:
print("OK")
return
elif warns > 0 and crits == 0:
print(str(warns) + " WARNING - " + warns_output)
sys.exit(1)
elif crits > 0:
print(str(crits) + " CRITICAL - " + crits_output + str(warns) + " WARNING - " + warns_output)
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
1823170 | <reponame>jshower/models
#Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import time
import argparse
import unittest
import contextlib
import numpy as np
import paddle.fluid as fluid
import utils, metric, configs
import models
from pretrained_word2vec import Glove840B_300D
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--model_name', type=str, default='cdssmNet', help="Which model to train")
parser.add_argument('--config', type=str, default='cdssm_base', help="The global config setting")
DATA_DIR = os.path.join(os.path.expanduser('~'), '.cache/paddle/dataset')
def evaluate(epoch_id, exe, inference_program, dev_reader, test_reader, fetch_list, feeder, metric_type):
"""
evaluate on test/dev dataset
"""
def infer(test_reader):
"""
do inference function
"""
total_cost = 0.0
total_count = 0
preds, labels = [], []
for data in test_reader():
avg_cost, avg_acc, batch_prediction = exe.run(inference_program,
feed=feeder.feed(data),
fetch_list=fetch_list,
return_numpy=True)
total_cost += avg_cost * len(data)
total_count += len(data)
preds.append(batch_prediction)
labels.append(np.asarray([x[-1] for x in data], dtype=np.int64))
y_pred = np.concatenate(preds)
y_label = np.concatenate(labels)
metric_res = []
for metric_name in metric_type:
if metric_name == 'accuracy_with_threshold':
metric_res.append((metric_name, metric.accuracy_with_threshold(y_pred, y_label, threshold=0.3)))
elif metric_name == 'accuracy':
metric_res.append((metric_name, metric.accuracy(y_pred, y_label)))
else:
print("Unknown metric type: ", metric_name)
exit()
return total_cost / (total_count * 1.0), metric_res
dev_cost, dev_metric_res = infer(dev_reader)
print("[%s] epoch_id: %d, dev_cost: %f, " % (
time.asctime( time.localtime(time.time()) ),
epoch_id,
dev_cost)
+ ', '.join([str(x[0]) + ": " + str(x[1]) for x in dev_metric_res]))
test_cost, test_metric_res = infer(test_reader)
print("[%s] epoch_id: %d, test_cost: %f, " % (
time.asctime( time.localtime(time.time()) ),
epoch_id,
test_cost)
+ ', '.join([str(x[0]) + ": " + str(x[1]) for x in test_metric_res]))
print("")
def train_and_evaluate(train_reader,
dev_reader,
test_reader,
network,
optimizer,
global_config,
pretrained_word_embedding,
use_cuda,
parallel):
"""
train network
"""
# define the net
if global_config.use_lod_tensor:
# automatic add batch dim
q1 = fluid.layers.data(
name="question1", shape=[1], dtype="int64", lod_level=1)
q2 = fluid.layers.data(
name="question2", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
cost, acc, prediction = network(q1, q2, label)
else:
# shape: [batch_size, max_seq_len_in_batch, 1]
q1 = fluid.layers.data(
name="question1", shape=[-1, -1, 1], dtype="int64")
q2 = fluid.layers.data(
name="question2", shape=[-1, -1, 1], dtype="int64")
# shape: [batch_size, max_seq_len_in_batch]
mask1 = fluid.layers.data(name="mask1", shape=[-1, -1], dtype="float32")
mask2 = fluid.layers.data(name="mask2", shape=[-1, -1], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
cost, acc, prediction = network(q1, q2, mask1, mask2, label)
if parallel:
# TODO: Paarallel Training
print("Parallel Training is not supported for now.")
sys.exit(1)
#optimizer.minimize(cost)
if use_cuda:
print("Using GPU")
place = fluid.CUDAPlace(0)
else:
print("Using CPU")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
if global_config.use_lod_tensor:
feeder = fluid.DataFeeder(feed_list=[q1, q2, label], place=place)
else:
feeder = fluid.DataFeeder(feed_list=[q1, q2, mask1, mask2, label], place=place)
# logging param info
for param in fluid.default_main_program().global_block().all_parameters():
print("param name: %s; param shape: %s" % (param.name, param.shape))
# define inference_program
inference_program = fluid.default_main_program().clone(for_test=True)
optimizer.minimize(cost)
exe.run(fluid.default_startup_program())
# load emb from a numpy erray
if pretrained_word_embedding is not None:
print("loading pretrained word embedding to param")
embedding_name = "emb.w"
embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor()
embedding_param.set(pretrained_word_embedding, place)
evaluate(-1,
exe,
inference_program,
dev_reader,
test_reader,
fetch_list=[cost, acc, prediction],
feeder=feeder,
metric_type=global_config.metric_type)
# start training
print("[%s] Start Training" % time.asctime(time.localtime(time.time())))
for epoch_id in range(global_config.epoch_num):
data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0
batch_id = 0
epoch_begin_time = time.time()
for data in train_reader():
avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[cost, acc])
data_size = len(data)
total_acc += data_size * avg_acc_np
total_cost += data_size * avg_cost_np
data_count += data_size
if batch_id % 100 == 0:
print("[%s] epoch_id: %d, batch_id: %d, cost: %f, acc: %f" % (
time.asctime(time.localtime(time.time())),
epoch_id,
batch_id,
avg_cost_np,
avg_acc_np))
batch_id += 1
avg_cost = total_cost / data_count
avg_acc = total_acc / data_count
print("")
print("[%s] epoch_id: %d, train_avg_cost: %f, train_avg_acc: %f, epoch_time_cost: %f" % (
time.asctime( time.localtime(time.time())),
epoch_id, avg_cost, avg_acc,
time.time() - epoch_begin_time))
epoch_model = global_config.save_dirname + "/" + "epoch" + str(epoch_id)
fluid.io.save_inference_model(epoch_model, ["question1", "question2", "label"], acc, exe)
evaluate(epoch_id,
exe,
inference_program,
dev_reader,
test_reader,
fetch_list=[cost, acc, prediction],
feeder=feeder,
metric_type=global_config.metric_type)
def main():
"""
This function will parse argments, prepare data and prepare pretrained embedding
"""
args = parser.parse_args()
global_config = configs.__dict__[args.config]()
print("net_name: ", args.model_name)
net = models.__dict__[args.model_name](global_config)
# get word_dict
word_dict = utils.getDict(data_type="quora_question_pairs")
# get reader
train_reader, dev_reader, test_reader = utils.prepare_data(
"quora_question_pairs",
word_dict=word_dict,
batch_size = global_config.batch_size,
buf_size=800000,
duplicate_data=global_config.duplicate_data,
use_pad=(not global_config.use_lod_tensor))
# load pretrained_word_embedding
if global_config.use_pretrained_word_embedding:
word2vec = Glove840B_300D(filepath=os.path.join(DATA_DIR, "glove.840B.300d.txt"),
keys=set(word_dict.keys()))
pretrained_word_embedding = utils.get_pretrained_word_embedding(
word2vec=word2vec,
word2id=word_dict,
config=global_config)
print("pretrained_word_embedding to be load:", pretrained_word_embedding)
else:
pretrained_word_embedding = None
# define optimizer
optimizer = utils.getOptimizer(global_config)
# use cuda or not
if not global_config.has_member('use_cuda'):
if 'CUDA_VISIBLE_DEVICES' in os.environ and os.environ['CUDA_VISIBLE_DEVICES'] != '':
global_config.use_cuda = True
else:
global_config.use_cuda = False
global_config.list_config()
train_and_evaluate(
train_reader,
dev_reader,
test_reader,
net,
optimizer,
global_config,
pretrained_word_embedding,
use_cuda=global_config.use_cuda,
parallel=False)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1619933 | <gh_stars>0
from RemoveWindowsLockScreenAds.RemoveWindowsLockScreenAds import GetAdSettingsDirectory, AdRemover
| StarcoderdataPython |
336315 | # This script will track two lists through a 3-D printing process
# Source code/inspiration/software
# Python Crash Course by <NAME>, Chapter 8, example 8+
# Made with Mu 1.0.3 in October 2021
# Start with a list of unprinted_designs to be 3-D printed
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
for unprinted_design in unprinted_designs:
print("The following model will be printed: " + unprinted_design)
completed_models =[]
print("\n ")
# Track unprinted_designs as they are 3-D printed
while unprinted_designs:
current_design = unprinted_designs.pop()
# Track model in the 3-D print process
print("Printing model: " + current_design)
completed_models.append(current_design)
# Generate a list of printed designs
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
| StarcoderdataPython |
9687999 | <reponame>goubertbrent/oca-backend
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import unicode_property, long_property, typed_property
from rogerthat.to import ReturnStatusTO
class GrantTO(object):
service_email = unicode_property('0')
identity = unicode_property('1')
user_email = unicode_property('2')
user_name = unicode_property('3')
user_avatar_id = long_property('4')
role_type = unicode_property('5')
role_id = long_property('6')
role = unicode_property('7')
app_id = unicode_property('8')
class RoleTO(object):
id = long_property('1')
name = unicode_property('2')
creation_time = long_property('3')
type = unicode_property('4')
@staticmethod
def fromServiceRole(service_role):
r = RoleTO()
r.id = service_role.role_id
r.name = service_role.name
r.creation_time = service_role.creationTime
r.type = service_role.type
return r
class RolesReturnStatusTO(ReturnStatusTO):
roles = typed_property('51', RoleTO, True)
service_identity_email = unicode_property('52')
@classmethod
def create(cls, success=True, errormsg=None, roles=None, service_identity_email=None):
r = super(RolesReturnStatusTO, cls).create(success, errormsg)
r.roles = list() if roles is None else roles
r.service_identity_email = service_identity_email
return r
| StarcoderdataPython |
4800896 | # Generated by Django 3.1.2 on 2021-07-29 18:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('permission', '0004_auto_20210729_1815'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='pid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='permission.menu', verbose_name='父节点'),
),
migrations.AlterField(
model_name='menu',
name='role',
field=models.ManyToManyField(blank=True, to='permission.Role', verbose_name='权限角色'),
),
]
| StarcoderdataPython |
11252280 | from robot import logging,Conversation
import numpy as np
logger = logging.getLogger(__name__)
def active():
global times, cuple
list = []
time_list = []
days = []
t_dict = {}
l = []
# 获取保存激活log里面的每一行
logs = logging.read_active_log().splitlines()
# 进行遍历取值,进行切片,保留后面的时间
for log in logs:
times = log.split(': ')[1]
list.append(times)
# 将时间进行切片,保留年月日,去掉时分秒
for i in list:
time = i.split(' ')
time_list.append(time)
# 将年月日另存为一个列表
for t_list in time_list:
l.append(t_list[0])
# 进行频数统计
result = {}
for i in np.unique(l):
result[i] = l.count(i)
"""
{'2020-03-10': 1, '2020-03-11': 1, '2020-03-12': 1, '2020-03-13': 1, '2020-03-14': 1, '2020-03-23': 33, '2020-03-25': 1, '2020-03-26': 1, '2020-03-27': 1, '2020-03-28': 1, '2020-03-29': 1, '2020-03-30': 1}
"""
return result | StarcoderdataPython |
1888405 | <reponame>MihailMarkovski/Python-Advanced-2020<filename>Exams/Exam _27_June_2020/03_list_manipulator.py
from collections import deque
def list_manipulator(numbers, command, side, *args):
new_list = deque(numbers)
if command == 'add':
new_nums = list(args)
if side == 'beginning':
new_list = new_nums + numbers
elif side == 'end':
new_list = numbers + new_nums
elif command == 'remove':
if side == 'beginning':
if len(args) >= 1:
n = args[0]
while n > 0:
new_list.popleft()
n -= 1
else:
new_list.popleft()
elif side == 'end':
if len(args) >= 1:
n = args[0]
while n > 0:
new_list.pop()
n -= 1
else:
new_list.pop()
return list(new_list)
print(list_manipulator([1, 2, 3], "remove", "end"))
print(list_manipulator([1, 2, 3], "remove", "beginning"))
print(list_manipulator([1, 2, 3], "add", "beginning", 20))
print(list_manipulator([1, 2, 3], "add", "end", 30))
print(list_manipulator([1, 2, 3], "remove", "end", 2))
print(list_manipulator([1, 2, 3], "remove", "beginning", 2))
print(list_manipulator([1, 2, 3], "add", "beginning", 20, 30, 40))
print(list_manipulator([1, 2, 3], "add", "end", 30, 40, 50))
| StarcoderdataPython |
1734527 | # package org.apache.helix.store
#from org.apache.helix.store import *
class PropertyStat:
"""
"""
def __init__(self):
this(0, 0)
"""
Parameters:
long lastModifiedTime
int version
"""
def __init__(self, lastModifiedTime, version):
_lastModifiedTime = lastModifiedTime
_version = version
def getLastModifiedTime(self):
"""
Returns long
"""
return _lastModifiedTime
def getVersion(self):
"""
Returns int
"""
return _version
def setLastModifiedTime(self, lastModifiedTime):
"""
Returns void
Parameters:
lastModifiedTime: long
"""
_lastModifiedTime = lastModifiedTime
def setVersion(self, version):
"""
Returns void
Parameters:
version: int
"""
_version = version
| StarcoderdataPython |
157395 | from pysal.common import simport, requires
from pysal.cg import asShape
from pysal.contrib import pdutilities as pdio
from pysal.core import FileIO
import pandas as pd
class Namespace(object):
pass
@requires('geopandas')
def geopandas(filename, **kw):
import geopandas
return geopandas.read_file(filename, **kw)
@requires('fiona')
def fiona(filename, **kw):
import fiona
props = {}
with fiona.open(filename, **kw) as f:
for i,feat in enumerate(f):
idx = feat.get('id', i)
try:
idx = int(idx)
except ValueError:
pass
props.update({idx:feat.get('properties', dict())})
props[idx].update({'geometry':asShape(feat['geometry'])})
return pd.DataFrame().from_dict(props).T
_readers = {'read_shapefile':pdio.read_files,
'read_fiona':fiona}
_writers = {'to_shapefile':pdio.write_files}
_pandas_readers = {k:v for k,v in pd.io.api.__dict__.items() if k.startswith('read_')}
readers = Namespace()
readers.__dict__.update(_readers)
readers.__dict__.update(_pandas_readers)
| StarcoderdataPython |
3295187 | <filename>sensor.community/real-time-plotting.py
#setting up packages
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import plotly.express as px
import datetime
import wget
import os
date = datetime.date.today()
date = str(date)
filename = 'data-esp8266-12776407-'+ date + '.csv'
#Empty arrays for plotting in real-time
x_vals = []
y1_vals = []
y2_vals = []
files = os.listdir()
wget.download('https://api-rrd.madavi.de/data_csv/csv-files/'+ date + '/data-esp8266-12776407-'+ date + '.csv')
def animate(i):
os.remove(filename)
wget.download('https://api-rrd.madavi.de/data_csv/csv-files/'+ date + '/data-esp8266-12776407-'+ date + '.csv')
feinstaub = pd.read_csv("data-esp8266-12776407-" + date + ".csv", sep=";")
x_vals=feinstaub['Time']
y1_vals=feinstaub['SDS_P1']
y2_vals=feinstaub['SDS_P2']
plt.cla()
plt.plot(x_vals, y1_vals, label='PM2.5')
plt.plot(x_vals, y2_vals, label='PM10.0')
plt.xticks([])
plt.legend(loc='upper right')
plt.tight_layout()
if __name__ == "__main__":
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.show()
#try using plotly and dash | StarcoderdataPython |
6471916 | import glob
import os
import re
import yaml
from oonib import errors as e
from oonib.handlers import OONIBHandler
from oonib import log
from oonib.config import config
class DeckDescHandler(OONIBHandler):
def get(self, deckID):
# note:
# we don't have to sanitize deckID, because it's already checked
# against matching a certain pattern in the handler.
bn = os.path.basename(deckID + '.desc')
try:
f = open(os.path.join(config.main.deck_dir, bn))
except IOError:
log.err("Deck %s missing" % deckID)
raise e.MissingDeck
with f:
deckDesc = yaml.safe_load(f)
response = {}
for k in ['name', 'description', 'version', 'author', 'date']:
try:
response[k] = deckDesc[k]
except KeyError:
log.err("Deck %s missing required keys!" % deckID)
raise e.MissingDeckKeys
self.write(response)
class DeckListHandler(OONIBHandler):
def get(self):
if not config.main.deck_dir:
self.set_status(501)
raise e.NoDecksConfigured
path = os.path.abspath(config.main.deck_dir) + "/*"
decknames = map(os.path.basename, glob.iglob(path))
decknames = filter(lambda y: re.match("[a-z0-9]{64}.desc", y),
decknames)
deckList = []
for deckname in decknames:
with open(os.path.join(config.main.deck_dir, deckname)) as f:
d = yaml.safe_load(f)
deckList.append({
'id': deckname,
'name': d['name'],
'description': d['description']
})
self.write(deckList)
| StarcoderdataPython |
6586447 | # Image processing library functions
import numpy as np
from img_lib import list_images, rotate_image, translate_image, shear_image
from img_lib import change_brightness_image, motion_blur_image, scale_image
def random_transform_image(image):
if np.random.randint(2) == 0:
return image
transformation_library = ['rotation','translation','shear','brightness','blur', 'scale']
transformation_id = transformation_library[np.random.randint(len(transformation_library))]
if transformation_id == 'rotation':
image = rotate_image(image)
if transformation_id == 'translation':
image = translate_image(image)
if transformation_id == 'shear':
image = shear_image(image)
if transformation_id == 'brightness':
image = change_brightness_image(image)
if transformation_id == 'blur':
image = motion_blur_image(image)
if transformation_id == 'scale':
image = scale_image(image)
return image
def data_augmentation(X_data):
return np.array([random_transform_image(image) for image in X_data]).reshape(X_data.shape[0], 32,32,-1)
| StarcoderdataPython |
9633248 | <reponame>butaihuiwan/meiduo_demo<gh_stars>0
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from itsdangerous import TimedJSONWebSignatureSerializer, BadData
# Create your models here.
class User(AbstractUser):
mobile = models.CharField(max_length=11, unique=True,verbose_name='手机号')
email_active = models.BooleanField(default=False,verbose_name='邮箱是否激活')
class Meta:
db_table = 'tb_users'
verbose_name = '用户'
verbose_name_plural = verbose_name
# 在 str 魔法方法中, 返回用户名称
def __str__(self):
return self.username
def generate_verify_email_url(self):
"""
生成邮箱验证链接
:param user: 当前登录用户
:return: verify_url
"""
# 调用 itsdangerous 中的类,生成对象
# 有效期: 1天
serializer = TimedJSONWebSignatureSerializer(settings.SECRET_KEY,
expires_in=60 * 60 * 24)
# 拼接参数
data = {'user_id': self.id, 'email': self.email}
# 生成 token 值, 这个值是 bytes 类型, 所以解码为 str:
token = serializer.dumps(data).decode()
# 拼接 url
verify_url = settings.EMAIL_VERIFY_URL + '?token=' + token
# 返回
return verify_url
@staticmethod
def check_cerify_emile_token(token):
"""
验证token并提取user
:param token: 用户信息签名后的结果
:return: user, None
"""
# 调用 itsdangerous 类,生成对象
# 邮件验证链接有效期:一天
serializer = TimedJSONWebSignatureSerializer(settings.SECRET_KEY, 60 * 60 * 24)
try:
# 解析传入的token值,获取数据data
data = serializer.loads(token)
except BadData:
return None
else:
# 如果有值,则获取
user_id = data.get('user_id')
email = data.get('email')
# 获取到值之后,尝试从User表中获取对应的用户
try:
user = User.objects.get(id=user_id,email=email)
except User.DoesNotExist:
return None
else:
return user | StarcoderdataPython |
9781618 | <filename>CPAC/randomise/pipeline.py
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.utility as util
from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec
from nipype.interfaces import fsl
from nilearn import input_data, masking, image, datasets
from nilearn.image import resample_to_img, concat_imgs
from nilearn.input_data import NiftiMasker, NiftiLabelsMasker
from CPAC.utils.interfaces.function import Function
import os
import copy
import numpy as np
import nibabel as nb
def create_randomise(name='randomise',working_dir=None,crash_dir=None):
"""
Parameters
----------
Returns
-------
workflow : nipype.pipeline.engine.Workflow
Randomise workflow.
Notes
-----
Workflow Inputs::
Workflow Outputs::
References
----------
"""
if not working_dir:
working_dir = os.path.join(os.getcwd(), 'Randomise_work_dir')
if not crash_dir:
crash_dir = os.path.join(os.getcwd(), 'Randomise_crash_dir')
wf = pe.Workflow(name=name)
wf.base_dir = working_dir
wf.config['execution'] = {'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(crash_dir)}
inputspec = pe.Node(util.IdentityInterface(fields=['subjects_list','pipeline_output_folder','permutations','mask_boolean','demean','c_thresh']),name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['tstat_files' ,'t_corrected_p_files','index_file','threshold_file','localmax_txt_file','localmax_vol_file','max_file','mean_file','pval_file','size_file']), name='outputspec')
#merge = pe.Node(interface=fsl.Merge(), name='fsl_merge')
#merge.inputs.dimension = 't'
#merge.inputs.merged_file = "randomise_merged.nii.gz"
#wf.connect(inputspec, 'subjects', merge, 'in_files')
#mask = pe.Node(interface=fsl.maths.MathsCommand(), name='fsl_maths')
#mask.inputs.args = '-abs -Tmin -bin'
#mask.inputs.out_file = "randomise_mask.nii.gz"
#wf.connect(inputspec, 'subjects', mask, 'in_file')
randomise = pe.Node(interface=fsl.Randomise(), name='randomise')
randomise.inputs.base_name = "randomise"
randomise.inputs.demean = True
randomise.inputs.tfce = True
wf.connect([(inputspec, randomise, [('subjects', 'in_file'),
('design_matrix_file', 'design_mat'),
('constrast_file', 'tcon'),
('permutations', 'num_perm'),
])])
wf.connect(randomise,'tstat_files',outputspec,'tstat_files')
wf.connect(randomise,'t_corrected_p_files',outputspec,'t_corrected_p_files')
#------------- issue here arises while using tfce. By not using tfce, you don't get t_corrected_p files. R V in a conundrum? --------------------#
select_tcorrp_files = pe.Node(Function(input_names=['input_list'],output_names=['out_file'],function=select),name='select_t_corrp')
wf.connect(randomise, 't_corrected_p_files',select_tcorrp_files, 'input_list')
wf.connect(select_tcorrp_files,'out_file',outputspec,'out_tcorr_corrected')
select_tstat_files = pe.Node(Function(input_names=['input_list'],output_names=['out_file'],function=select),name='select_t_stat')
wf.connect(randomise, 'tstat_files',select_tstat_files, 'input_list')
wf.connect(select_tstat_files,'out_file',outputspec,'out_tstat_corrected')
thresh = pe.Node(interface=fsl.Threshold(),name='fsl_threshold_contrast')
thresh.inputs.thresh = 0.95
thresh.inputs.out_file = 'rando_pipe_thresh_tstat.nii.gz'
wf.connect(select_tstat_files, 'out_file', thresh, 'in_file')
wf.connect(thresh,'out_file',outputspec,'rando_pipe_thresh_tstat.nii.gz')
thresh_bin = pe.Node(interface=fsl.UnaryMaths(),name='fsl_threshold_bin_contrast')
thresh_bin.inputs.operation = 'bin'
wf.connect(thresh, 'out_file', thresh_bin, 'in_file')
wf.connect(thresh_bin,'out_file',outputspec,'thresh_bin_out')
apply_mask = pe.Node(interface=fsl.ApplyMask(),name='fsl_applymask_contrast')
wf.connect(select_tstat_files, 'out_file', apply_mask, 'in_file')
wf.connect(thresh_bin, 'out_file', apply_mask, 'mask_file')
cluster = pe.Node(interface=fsl.Cluster(),name='cluster_contrast')
cluster.inputs.threshold = 0.0001
cluster.inputs.out_index_file = "index_file"
cluster.inputs.out_localmax_txt_file = "lmax_contrast.txt"
cluster.inputs.out_size_file = "cluster_size_contrast"
cluster.inputs.out_threshold_file = True
cluster.inputs.out_max_file = True
cluster.inputs.out_mean_file = True
cluster.inputs.out_pval_file = True
cluster.inputs.out_size_file = True
wf.connect(apply_mask, 'out_file', cluster, 'in_file')
wf.connect(cluster,'index_file',outputspec,'index_file')
wf.connect(cluster,'threshold_file',outputspec,'threshold_file')
wf.connect(cluster,'localmax_txt_file',outputspec,'localmax_txt_file')
wf.connect(cluster,'localmax_vol_file',outputspec,'localmax_vol_file')
wf.connect(cluster,'max_file',outputspec,'max_file')
wf.connect(cluster,'mean_file',outputspec,'meal_file')
wf.connect(cluster,'pval_file',outputspec,'pval_file')
wf.connect(cluster,'size_file',outputspec,'size_file')
return wf
| StarcoderdataPython |
5126537 | #!/usr/bin/env python3
from ply import lex
_SINGLE_QUOTE = "'"
_DOUBLE_QUOTE = '"'
tokens = (
'IMPORT_EXPRESSION_START',
'IMPORT_EXPRESSION_END',
'STRING_OR_BYTES_LITERAL',
'STRING_LITERAL',
'BYTES_LITERAL',
'STRING_PREFIX',
'SHORT_STRING',
'LONG_STRING',
'SHORT_STRING_ITEM',
'LONG_STRING_ITEM',
'SHORT_STRING_CHAR',
'LONG_STRING_CHAR',
'STRING_ESCAPE_SEQ',
'BYTES_PREFIX',
'SHORT_BYTES',
'LONG_BYTES'
'SHORT_BYTES_ITEM',
'LONG_BYTES_ITEM',
'SHORT_BYTES_CHAR',
'LONG_BYTES_CHAR',
'BYTES_ESCAPE_SEQ',
)
states = (
('in_short_string', 'exclusive'),
('in_long_string', 'exclusive'),
('in_short_bytes', 'exclusive'),
('in_long_bytes_bytes', 'exclusive'),
)
t_SHORT_STRING = (
_SINGLE_QUOTE + t_SHORT_STRING_ITEM + _SINGLE_QUOTE
+ '|' + _DOUBLE_QUOTE + t_SHORT_STRING_ITEM + _DOUBLE_QUOTE
)
t_SHORT_STRING_ITEM = r'[^\n\\]'
| StarcoderdataPython |
57176 | <gh_stars>1-10
from django.apps import AppConfig
class SupplementaryContentConfig(AppConfig):
name = "supplementary_content"
verbose_name = "Supplementary content for regulations"
| StarcoderdataPython |
91942 | # -*- coding: utf-8 -*-
from tests.utils import fix_bs4_parsing_spaces
from tests.data.dummy import LINKS
def test_anchor_format():
"""Test annotate elements with default and manipulated config."""
RLINKS = [
{"A": {"type": "letterA", "score": 42}},
{"AA": {"type": "letterA", "score": 42}},
{"AAA": {"type": "letterA", "score": 42}},
{"B": {"type": "letterB", "score": 42}},
{"BB": {"type": "letterB", "score": 42}},
{"BBB": {"type": "letterB", "score": 42}},
{"C": {"type": "letterC", "score": 42}},
{"CC": {"type": "letterC", "score": 42}},
{"CCC": {"type": "letterC", "score": 42}},
{"D": {"type": "letterD", "score": 42}},
{"DD": {"type": "letterD", "score": 42}},
{"DDD": {"type": "letterD", "score": 42}},
{"E": {"type": "letterE", "score": 42}},
{"EE": {"type": "letterE", "score": 42}},
{"EEE": {"type": "letterE", "score": 42}}]
RTEXT = """<div>
<p id="1">lala A la lala AA BB B la C lalala DDD D E</p>
<p id="2">la E EE AA lal CC C la la BB la DD D lala EE la</p>
<p id="3">B la BB EEE A la CCC B la DDD C lala AAA D la BBB E</p>
</div>"""
from anchorman import elements
def my_format_element(a, b, c):
return "RUMBLE"
import copy
newobj = copy.copy(elements.format_element)
elements.format_element = my_format_element
from anchorman import annotate, clean, get_config
cfg = get_config()
cfg['settings']['return_applied_links'] = True
number_of_links_to_apply = 5
cfg['rules']['replaces_at_all'] = number_of_links_to_apply
cfg['markup']['decorate'] = {
'tag': 'span'
}
annotated, applied, rest = annotate(RTEXT, RLINKS, config=cfg)
assert len(applied) == number_of_links_to_apply
expected = """<div>
<p id="1">lala RUMBLE la lala RUMBLE RUMBLE RUMBLE la RUMBLE lalala RUMBLE RUMBLE RUMBLE</p>
<p id="2">la RUMBLE RUMBLE RUMBLE lal RUMBLE RUMBLE la la RUMBLE la RUMBLE RUMBLE lala RUMBLE la</p>
<p id="3">RUMBLE la RUMBLE RUMBLE RUMBLE la RUMBLE RUMBLE la RUMBLE RUMBLE lala RUMBLE RUMBLE la RUMBLE RUMBLE</p>
</div>"""
from tests.utils import fix_bs4_parsing_spaces, compare_results
a = fix_bs4_parsing_spaces(annotated)
b = fix_bs4_parsing_spaces(expected)
# compare_results(a, b)
assert a == b
elements.format_element = newobj
| StarcoderdataPython |
9765759 | <gh_stars>10-100
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import unittest
from webkitpy.common.system.directoryfileset import DirectoryFileSet
from webkitpy.common.system.filesystem_mock import MockFileSystem
class DirectoryFileSetTest(unittest.TestCase):
def setUp(self):
files = {}
files['/test/some-file'] = 'contents'
files['/test/some-other-file'] = 'other contents'
files['/test/b/c'] = 'c'
self._filesystem = MockFileSystem(files)
self._fileset = DirectoryFileSet('/test', self._filesystem)
def test_files_in_namelist(self):
self.assertTrue('some-file' in self._fileset.namelist())
self.assertTrue('some-other-file' in self._fileset.namelist())
self.assertTrue('b/c' in self._fileset.namelist())
def test_read(self):
self.assertEquals('contents', self._fileset.read('some-file'))
def test_open(self):
file = self._fileset.open('some-file')
self.assertEquals('some-file', file.name())
self.assertEquals('contents', file.contents())
def test_extract(self):
self._fileset.extract('some-file', '/test-directory')
contents = self._filesystem.read_text_file('/test-directory/some-file')
self.assertEquals('contents', contents)
def test_extract_deep_file(self):
self._fileset.extract('b/c', '/test-directory')
self.assertTrue(self._filesystem.exists('/test-directory/b/c'))
def test_delete(self):
self.assertTrue(self._filesystem.exists('/test/some-file'))
self._fileset.delete('some-file')
self.assertFalse(self._filesystem.exists('/test/some-file'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
32246 | <filename>multi_parser/shared/__init__.py
from .request import *
from .response import *
| StarcoderdataPython |
66913 | <gh_stars>1-10
"""
Curricula for M6 Milestone
In this curriculum, we cover the following:
* Object vocabulary:
“mommy, daddy, baby, book, house, car, water, ball, juice, cup, box, chair, head, milk,
hand, dog, truck, door, hat, table, cookie, bird”
* Modifier vocabulary:
basic color terms (red, blue, green, white, black…), one, two, my, your)
"""
import random as r
from itertools import chain
from typing import Sequence, List, Optional
from more_itertools import flatten
from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
from adam.language.language_generator import LanguageGenerator
from adam.language.dependency import LinearizedDependencyTree
from adam.curriculum import ExplicitWithSituationInstanceGroup
from adam.curriculum.curriculum_utils import (
CHOOSER_FACTORY,
Phase1InstanceGroup,
phase1_instances,
standard_object,
make_noise_objects,
)
from adam.curriculum.phase1_curriculum import (
_make_each_object_by_itself_curriculum,
_make_object_on_ground_curriculum,
_make_objects_with_colors_curriculum,
)
from adam.curriculum.preposition_curriculum import (
_behind_template,
_beside_template,
_on_template,
_over_template,
_under_template,
)
from adam.ontology import IS_ADDRESSEE, IS_SPEAKER
from adam.ontology.phase1_ontology import (
BALL,
BIRD,
BOOK,
BOX,
CAR,
CHAIR,
COOKIE,
CUP,
GAILA_PHASE_1_ONTOLOGY,
HAT,
HOUSE,
LEARNER,
MOM,
TABLE,
TRUCK,
DAD,
BABY,
WATER,
HAND,
DOG,
MILK,
HEAD,
JUICE,
DOOR,
)
from adam.perception.high_level_semantics_situation_to_developmental_primitive_perception import (
GAILA_M6_PERCEPTION_GENERATOR,
)
from adam.situation.templates.phase1_templates import sampled
M6_PREPOSITION_CURRICULUM_SMALL_OBJECTS = [BALL, CUP, BOX, HAT, BOOK, COOKIE, BIRD]
M6_PREPOSITION_CURRICULUM_LARGER_OBJECTS = [TABLE, HOUSE, CAR, CHAIR, TRUCK]
M6_PREPOSITION_CURRICULUM_OBJECTS = list(
chain(
M6_PREPOSITION_CURRICULUM_SMALL_OBJECTS, M6_PREPOSITION_CURRICULUM_LARGER_OBJECTS
)
)
M6_CURRICULUM_ALL_OBJECTS = [
MOM,
DAD,
BABY,
BOOK,
HOUSE,
CAR,
WATER,
BALL,
JUICE,
CUP,
BOX,
# TODO: https://github.com/isi-vista/adam/issues/946
# CHAIR,
HEAD,
MILK,
HAND,
DOG,
TRUCK,
DOOR,
HAT,
TABLE,
COOKIE,
BIRD,
]
# Create object variables for objects to use in prepositions
SMALL_OBJECT_VARS = [
standard_object("small_" + str(i), obj)
for i, obj in enumerate(M6_PREPOSITION_CURRICULUM_SMALL_OBJECTS)
]
LARGE_OBJECT_VARS = [
standard_object("large_" + str(i), obj)
for i, obj in enumerate([TABLE, HOUSE, CAR, CHAIR, TRUCK])
]
def _make_m6_on_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
return phase1_instances(
"Preposition on",
situations=chain(
*[
sampled(
_on_template(
object_1,
object_2,
make_noise_objects(noise_objects),
is_training=True,
),
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
max_to_sample=num_samples if num_samples else 1,
block_multiple_of_the_same_type=True,
)
for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
]
),
perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
language_generator=language_generator,
)
def _make_m6_beside_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
return phase1_instances(
"Preposition on",
situations=chain(
*[
sampled(
_beside_template(
object_1,
object_2,
make_noise_objects(noise_objects),
is_training=True,
is_right=True,
),
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
max_to_sample=num_samples if num_samples else 1,
block_multiple_of_the_same_type=True,
)
for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
]
),
perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
language_generator=language_generator,
)
def _make_m6_under_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
return phase1_instances(
"Preposition under",
situations=chain(
*[
sampled(
_under_template(
object_1,
object_2,
make_noise_objects(noise_objects),
is_training=True,
is_distal=True,
),
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
max_to_sample=num_samples if num_samples else 1,
block_multiple_of_the_same_type=True,
)
for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
]
),
perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
language_generator=language_generator,
)
def _make_m6_over_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
return phase1_instances(
"Preposition over",
situations=chain(
*[
sampled(
_over_template(
object_1,
object_2,
make_noise_objects(noise_objects),
is_training=True,
is_distal=True,
),
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
max_to_sample=num_samples if num_samples else 1,
block_multiple_of_the_same_type=True,
)
for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
]
),
perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
language_generator=language_generator,
)
def _make_m6_behind_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
learner_object = standard_object("learner", LEARNER, added_properties=[IS_ADDRESSEE])
mom = standard_object("mom", MOM, added_properties=[IS_SPEAKER])
background = [learner_object, mom]
background.extend(make_noise_objects(noise_objects))
return phase1_instances(
"Preposition behind",
situations=chain(
*[
sampled(
_behind_template(
object_1, object_2, background, is_training=True, is_near=True
),
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
max_to_sample=num_samples if num_samples else 1,
block_multiple_of_the_same_type=True,
)
for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
]
),
perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
language_generator=language_generator,
)
def _make_m6_in_front_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
learner_object = standard_object("learner", LEARNER, added_properties=[IS_ADDRESSEE])
mom = standard_object("mom", MOM, added_properties=[IS_SPEAKER])
background = [learner_object, mom]
background.extend(make_noise_objects(noise_objects))
return phase1_instances(
"Preposition behind",
situations=chain(
*[
sampled(
_behind_template(
object_1, object_2, background, is_training=True, is_near=True
),
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
max_to_sample=num_samples if num_samples else 1,
block_multiple_of_the_same_type=True,
)
for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
]
),
perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
language_generator=language_generator,
)
M6_PREPOSITION_SUBCURRICULUM_GENERATORS = [
_make_m6_on_curriculum,
_make_m6_beside_curriculum,
_make_m6_under_curriculum,
_make_m6_over_curriculum,
_make_m6_behind_curriculum,
_make_m6_in_front_curriculum,
]
M6_SUBCURRICULUM_GENERATORS = list(
chain(
[
[ # Single objects
_make_each_object_by_itself_curriculum,
# Objects with modifiers
# Colors
_make_objects_with_colors_curriculum,
_make_object_on_ground_curriculum,
],
M6_PREPOSITION_SUBCURRICULUM_GENERATORS,
]
)
)
def _make_m6_mixed_curriculum(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
r.seed(0)
all_instances = flatten( # type: ignore
make_m6_curriculum(num_samples, noise_objects, language_generator) # type: ignore
)
r.shuffle(all_instances) # type: ignore
return ExplicitWithSituationInstanceGroup("m6_mixed", tuple(all_instances))
def instantiate_subcurricula(
subcurricula,
num_samples: Optional[int],
num_noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> List[Phase1InstanceGroup]:
return [
subcurriculum(num_samples, num_noise_objects, language_generator)
for subcurriculum in subcurricula
]
def make_m6_curriculum(
num_samples: Optional[int],
num_noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Sequence[Phase1InstanceGroup]:
return instantiate_subcurricula(
M6_SUBCURRICULUM_GENERATORS, num_samples, num_noise_objects, language_generator
) + [_make_m6_mixed_curriculum(num_samples, num_noise_objects, language_generator)]
| StarcoderdataPython |
219816 | """
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import torch.nn.functional as F
import cv2
import numpy as np
try:
from .net_s3fd import s3fd
except ImportError:
from net_s3fd import s3fd
class SFDDetector(object):
__WHITENING = np.array([104, 117, 123])
def __init__(self, device, path_to_detector=None):
self.device = device
if path_to_detector is None:
print("lllllllllllllllllllllllllllllllllllllllllll")
import rospkg
path_to_detector = rospkg.RosPack().get_path('DRNXGENE/rt_gene') + '/model_nets/SFD/s3fd_facedetector.pth'
if 'cuda' in device:
torch.backends.cudnn.benchmark = True
self.face_detector = s3fd()
self.face_detector.load_state_dict(torch.load(path_to_detector))
self.face_detector.to(device)
self.face_detector.eval()
def detect_from_image(self, tensor_or_path):
image = self.tensor_or_path_to_ndarray(tensor_or_path)
bboxlist = self.detect(self.face_detector, image, device=self.device)
keep = self.nms(bboxlist, 0.3)
bboxlist = bboxlist[keep, :]
bboxlist = [x for x in bboxlist if x[-1] > 0.5]
return bboxlist
@staticmethod
def tensor_or_path_to_ndarray(tensor_or_path, rgb=True):
"""Convert path (represented as a string) or torch.tensor to a numpy.ndarray
Arguments:
tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
"""
if isinstance(tensor_or_path, str):
from skimage import io
return cv2.imread(tensor_or_path) if not rgb else io.imread(tensor_or_path)
elif torch.is_tensor(tensor_or_path):
# Call cpu in case its coming from cuda
return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy()
elif isinstance(tensor_or_path, np.ndarray):
return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path
else:
raise TypeError
@staticmethod
def nms(dets, thresh):
if 0 == len(dets):
return []
x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1, yy1 = np.maximum(x1[i], x1[order[1:]]), np.maximum(y1[i], y1[order[1:]])
xx2, yy2 = np.minimum(x2[i], x2[order[1:]]), np.minimum(y2[i], y2[order[1:]])
w, h = np.maximum(0.0, xx2 - xx1 + 1), np.maximum(0.0, yy2 - yy1 + 1)
ovr = w * h / (areas[i] + areas[order[1:]] - w * h)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
@staticmethod
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def detect(self, net, img, device):
img = img - self.__WHITENING
img = img.transpose(2, 0, 1)
img = img.reshape((1,) + img.shape)
img = torch.from_numpy(img).float().to(device)
with torch.no_grad():
olist = net(img)
bboxlist = []
for i in range(len(olist) // 2):
olist[i * 2] = F.softmax(olist[i * 2], dim=1)
olist = [oelem.data.cpu() for oelem in olist]
for i in range(len(olist) // 2):
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
stride = 2 ** (i + 2) # 4,8,16,32,64,128
poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
for Iindex, hindex, windex in poss:
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
score = ocls[0, 1, hindex, windex]
loc = oreg[0, :, hindex, windex].contiguous().view(1, 4)
priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
variances = [0.1, 0.2]
box = self.decode(loc, priors, variances)
x1, y1, x2, y2 = box[0] * 1.0
bboxlist.append([x1, y1, x2, y2, score])
bboxlist = np.array(bboxlist)
if 0 == len(bboxlist):
bboxlist = np.zeros((1, 5))
return bboxlist
| StarcoderdataPython |
1675695 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes
# The service will return the vif type for the specific port.
VIF_TYPE = 'binding:vif_type'
# In some cases different implementations may be run on different hosts.
# The host on which the port will be allocated.
HOST_ID = 'binding:host_id'
# The profile will be a dictionary that enables the application running
# on the specific host to pass and receive vif port specific information to
# the plugin.
PROFILE = 'binding:profile'
# The capabilities will be a dictionary that enables pass information about
# functionalies neutron provides. The following value should be provided.
# - port_filter : Boolean value indicating Neutron provides port filtering
# features such as security group and anti MAC/IP spoofing
CAPABILITIES = 'binding:capabilities'
CAP_PORT_FILTER = 'port_filter'
VIF_TYPE_UNBOUND = 'unbound'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_OTHER = 'other'
VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS,
VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG,
VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET,
VIF_TYPE_OTHER]
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
VIF_TYPE: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
HOST_ID: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
PROFILE: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'validate': {'type:dict_or_none': None},
'is_visible': True},
CAPABILITIES: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
}
}
class Portbindings(extensions.ExtensionDescriptor):
"""Extension class supporting port bindings.
This class is used by neutron's extension framework to make
metadata about the port bindings available to external applications.
With admin rights one will be able to update and read the values.
"""
@classmethod
def get_name(cls):
return "Port Binding"
@classmethod
def get_alias(cls):
return "binding"
@classmethod
def get_description(cls):
return "Expose port bindings of a virtual port to external application"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/binding/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-11-14T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| StarcoderdataPython |
11346354 | <reponame>tlambert-forks/pyclesperanto_prototype<filename>tests/test_sum_z_projection.py<gh_stars>10-100
import pyclesperanto_prototype as cle
import numpy as np
def test_sum_z_projection():
test1 = cle.push(np.asarray([
[
[1, 0, 0, 0, 9],
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[1, 0, 0, 0, 9],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[5, 0, 6, 0, 10]
], [
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[0, 2, 0, 8, 0],
[5, 0, 6, 0, 10]
]
]).T)
reference = cle.push(np.asarray([
[10, 10, 14, 11, 21],
[10, 10, 14, 11, 21],
[10, 14, 11, 10, 21],
[10, 10, 11, 14, 21],
[10, 11, 14, 10, 21]
]).T)
result = cle.create(reference)
cle.sum_z_projection(test1, result)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.allclose(a, b, 0.01))
| StarcoderdataPython |
1682777 | <filename>tests/m2m_through/tests.py
from datetime import datetime
from operator import attrgetter
from django.db import IntegrityError
from django.test import TestCase
from .models import (
CustomMembership, Employee, Event, Friendship, Group, Ingredient,
Invitation, Membership, Person, PersonSelfRefM2M, Recipe, RecipeIngredient,
Relationship,
)
class M2mThroughTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name='Bob')
cls.jim = Person.objects.create(name='Jim')
cls.jane = Person.objects.create(name='Jane')
cls.rock = Group.objects.create(name='Rock')
cls.roll = Group.objects.create(name='Roll')
def test_retrieve_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
expected = ['Jane', 'Jim']
self.assertQuerysetEqual(
self.rock.members.all(),
expected,
attrgetter("name")
)
def test_get_on_intermediate_model(self):
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.get(person=self.jane, group=self.rock)
self.assertEqual(
repr(queryset),
'<Membership: Jane is a member of Rock>'
)
def test_filter_on_intermediate_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.filter(group=self.rock)
expected = [
'<Membership: Jim is a member of Rock>',
'<Membership: Jane is a member of Rock>',
]
self.assertQuerysetEqual(
queryset,
expected
)
def test_add_on_m2m_with_intermediate_model(self):
self.rock.members.add(self.bob, through_defaults={'invite_reason': 'He is good.'})
self.assertSequenceEqual(self.rock.members.all(), [self.bob])
self.assertEqual(self.rock.membership_set.get().invite_reason, 'He is good.')
def test_add_on_m2m_with_intermediate_model_value_required(self):
self.rock.nodefaultsnonulls.add(self.jim, through_defaults={'nodefaultnonull': 1})
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1)
def test_add_on_m2m_with_intermediate_model_value_required_fails(self):
with self.assertRaises(IntegrityError):
self.rock.nodefaultsnonulls.add(self.jim)
def test_create_on_m2m_with_intermediate_model(self):
annie = self.rock.members.create(name='Annie', through_defaults={'invite_reason': 'She was just awesome.'})
self.assertSequenceEqual(self.rock.members.all(), [annie])
self.assertEqual(self.rock.membership_set.get().invite_reason, 'She was just awesome.')
def test_create_on_m2m_with_intermediate_model_value_required(self):
self.rock.nodefaultsnonulls.create(name='Test', through_defaults={'nodefaultnonull': 1})
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1)
def test_create_on_m2m_with_intermediate_model_value_required_fails(self):
with self.assertRaises(IntegrityError):
self.rock.nodefaultsnonulls.create(name='Test')
def test_get_or_create_on_m2m_with_intermediate_model_value_required(self):
self.rock.nodefaultsnonulls.get_or_create(name='Test', through_defaults={'nodefaultnonull': 1})
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1)
def test_get_or_create_on_m2m_with_intermediate_model_value_required_fails(self):
with self.assertRaises(IntegrityError):
self.rock.nodefaultsnonulls.get_or_create(name='Test')
def test_update_or_create_on_m2m_with_intermediate_model_value_required(self):
self.rock.nodefaultsnonulls.update_or_create(name='Test', through_defaults={'nodefaultnonull': 1})
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1)
def test_update_or_create_on_m2m_with_intermediate_model_value_required_fails(self):
with self.assertRaises(IntegrityError):
self.rock.nodefaultsnonulls.update_or_create(name='Test')
def test_remove_on_m2m_with_intermediate_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
self.rock.members.remove(self.jim)
self.assertSequenceEqual(self.rock.members.all(), [])
def test_remove_on_m2m_with_intermediate_model_multiple(self):
Membership.objects.create(person=self.jim, group=self.rock, invite_reason='1')
Membership.objects.create(person=self.jim, group=self.rock, invite_reason='2')
self.assertSequenceEqual(self.rock.members.all(), [self.jim, self.jim])
self.rock.members.remove(self.jim)
self.assertSequenceEqual(self.rock.members.all(), [])
def test_set_on_m2m_with_intermediate_model(self):
members = list(Person.objects.filter(name__in=['Bob', 'Jim']))
self.rock.members.set(members)
self.assertSequenceEqual(self.rock.members.all(), [self.bob, self.jim])
def test_set_on_m2m_with_intermediate_model_value_required(self):
self.rock.nodefaultsnonulls.set([self.jim], through_defaults={'nodefaultnonull': 1})
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1)
self.rock.nodefaultsnonulls.set([self.jim], through_defaults={'nodefaultnonull': 2})
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1)
self.rock.nodefaultsnonulls.set([self.jim], through_defaults={'nodefaultnonull': 2}, clear=True)
self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 2)
def test_set_on_m2m_with_intermediate_model_value_required_fails(self):
with self.assertRaises(IntegrityError):
self.rock.nodefaultsnonulls.set([self.jim])
def test_clear_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
self.rock.members.clear()
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_retrieve_reverse_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
expected = ['Rock', 'Roll']
self.assertQuerysetEqual(
self.jim.group_set.all(),
expected,
attrgetter("name")
)
def test_add_on_reverse_m2m_with_intermediate_model(self):
self.bob.group_set.add(self.rock)
self.assertSequenceEqual(self.bob.group_set.all(), [self.rock])
def test_create_on_reverse_m2m_with_intermediate_model(self):
funk = self.bob.group_set.create(name='Funk')
self.assertSequenceEqual(self.bob.group_set.all(), [funk])
def test_remove_on_reverse_m2m_with_intermediate_model(self):
Membership.objects.create(person=self.bob, group=self.rock)
self.bob.group_set.remove(self.rock)
self.assertSequenceEqual(self.bob.group_set.all(), [])
def test_set_on_reverse_m2m_with_intermediate_model(self):
members = list(Group.objects.filter(name__in=['Rock', 'Roll']))
self.bob.group_set.set(members)
self.assertSequenceEqual(self.bob.group_set.all(), [self.rock, self.roll])
def test_clear_on_reverse_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
self.jim.group_set.clear()
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
def test_query_model_by_attribute_name_of_related_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),
['Roll'],
attrgetter("name")
)
def test_order_by_relational_field_through_model(self):
CustomMembership.objects.create(person=self.jim, group=self.rock)
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jane, group=self.roll)
CustomMembership.objects.create(person=self.jim, group=self.roll)
self.assertSequenceEqual(
self.rock.custom_members.order_by('custom_person_related_name'),
[self.jim, self.bob]
)
self.assertSequenceEqual(
self.roll.custom_members.order_by('custom_person_related_name'),
[self.jane, self.jim]
)
def test_query_first_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Group.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Roll'],
attrgetter("name")
)
def test_query_second_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Person.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Jane'],
attrgetter("name")
)
def test_query_model_by_related_model_name(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),
['Jane', 'Jim'],
attrgetter("name")
)
def test_query_model_by_custom_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),
['Bob', 'Jim'],
attrgetter("name")
)
def test_query_model_by_intermediate_can_return_non_unique_queryset(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(
person=self.jane, group=self.rock,
date_joined=datetime(2006, 1, 1)
)
Membership.objects.create(
person=self.bob, group=self.roll,
date_joined=datetime(2004, 1, 1))
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(
person=self.jane, group=self.roll,
date_joined=datetime(2004, 1, 1))
qs = Person.objects.filter(
membership__date_joined__gt=datetime(2004, 1, 1)
)
self.assertQuerysetEqual(
qs,
['Jane', 'Jim', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_forward_empty_qs(self):
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
def test_custom_related_name_reverse_empty_qs(self):
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
def test_custom_related_name_forward_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.rock.custom_members.all(),
['Bob', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_reverse_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom.all(),
['Rock'],
attrgetter("name")
)
def test_custom_related_name_doesnt_conflict_with_fky_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),
['<CustomMembership: Bob is a member of Rock>']
)
def test_through_fields(self):
"""
Relations with intermediary tables with multiple FKs
to the M2M's ``to`` model are possible.
"""
event = Event.objects.create(title='Rockwhale 2014')
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim)
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane)
self.assertQuerysetEqual(
event.invitees.all(),
['Jane', 'Jim'],
attrgetter('name')
)
class M2mThroughReferentialTests(TestCase):
def test_self_referential_empty_qs(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
def test_self_referential_non_symmetrical_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_non_symmetrical_second_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
def test_self_referential_non_symmetrical_clear_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
chris.friends.clear()
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_symmetrical(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
Friendship.objects.create(
first=chris, second=tony, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
self.assertQuerysetEqual(
chris.friends.all(),
['Tony'],
attrgetter("name")
)
def test_through_fields_self_referential(self):
john = Employee.objects.create(name='john')
peter = Employee.objects.create(name='peter')
mary = Employee.objects.create(name='mary')
harry = Employee.objects.create(name='harry')
Relationship.objects.create(source=john, target=peter, another=None)
Relationship.objects.create(source=john, target=mary, another=None)
Relationship.objects.create(source=john, target=harry, another=peter)
self.assertQuerysetEqual(
john.subordinates.all(),
['peter', 'mary', 'harry'],
attrgetter('name')
)
class M2mThroughToFieldsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.pea = Ingredient.objects.create(iname='pea')
cls.potato = Ingredient.objects.create(iname='potato')
cls.tomato = Ingredient.objects.create(iname='tomato')
cls.curry = Recipe.objects.create(rname='curry')
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.potato)
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.pea)
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.tomato)
def test_retrieval(self):
# Forward retrieval
self.assertSequenceEqual(self.curry.ingredients.all(), [self.pea, self.potato, self.tomato])
# Backward retrieval
self.assertEqual(self.tomato.recipes.get(), self.curry)
def test_choices(self):
field = Recipe._meta.get_field('ingredients')
self.assertEqual(
[choice[0] for choice in field.get_choices(include_blank=False)],
['pea', 'potato', 'tomato']
)
| StarcoderdataPython |
11281999 | <filename>tensornet/models/__init__.py
from .base_model import BaseModel
from .resnet import (
ResNet, resnet18, resnet34, resnet50, resnet101, resnet152,
resnext50_32x4d, resnext101_32x8d, wide_resnet50_2,
wide_resnet101_2,
)
from .dsresnet import DSResNet
from .mobilenetv2 import MobileNetV2, mobilenet_v2
from .squeezenet import SqueezeNet, squeezenet1_0, squeezenet1_1
from .shufflenetv2 import ShuffleNetV2, shufflenet_v2_x0_5, shufflenet_v2_x1_0
__all__ = [
'BaseModel', 'ResNet', 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2', 'MobileNetV2',
'mobilenet_v2', 'DSResNet', 'SqueezeNet', 'squeezenet1_0', 'squeezenet1_1',
'ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0',
]
| StarcoderdataPython |
4876957 | import torch
from torch import nn
class ConvBNLayer(nn.Module):
def __init__(self, cin, cout, ksize):
super(ConvBNLayer, self).__init__()
self.conv = nn.Conv2d(cin, cout, ksize, padding=1)
self.bn = nn.BatchNorm2d(cout)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class MaxPooledConvBNLayer(nn.Module):
def __init__(self, cin, cout, ksize):
super(MaxPooledConvBNLayer, self).__init__()
self.conv = nn.Conv2d(cin, cout, ksize, padding=1)
self.bn = nn.BatchNorm2d(cout)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool(x)
return x
class ResidualLayer(nn.Module):
def __init__(self, cin, cout, ksize):
super(ResidualLayer, self).__init__()
self.prep = MaxPooledConvBNLayer(cin, cout, ksize)
self.res1 = ConvBNLayer(cout, cout, ksize)
self.res2 = ConvBNLayer(cout, cout, ksize)
def forward(self, x):
x = self.prep(x)
y = self.res1(x)
y = self.res2(y)
return x.add(y)
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.pool = nn.AdaptiveMaxPool2d(1)
self.linear = nn.Linear(512, 10)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.pool(x)
x = x.view(-1, 512)
x = self.linear(x)
return x
class ResNet9(nn.Module):
def __init__(self):
super(ResNet9, self).__init__()
self.prep = ConvBNLayer(cin=3, cout=64, ksize=3)
self.layer1 = ResidualLayer(cin=64, cout=128, ksize=3)
self.layer2 = MaxPooledConvBNLayer(cin=128, cout=256, ksize=3)
self.layer3 = ResidualLayer(cin=256, cout=512, ksize=3)
self.classifier = Classifier()
def forward(self, x):
x = self.prep(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.classifier(x)
return x
| StarcoderdataPython |
365834 | <gh_stars>0
# !/usr/bin/env python3
# -*- config: utf-8 -*-
from tkinter import *
# Напишите программу, в которой на главном окне находятся холст и кнопка
# "Добавить фигуру". Кнопка открывает второе окно, включающее четыре поля для ввода
# координат и две радиокнопки для выбора, рисовать ли на холсте прямоугольник или овал.
# Здесь же находится кнопка "Нарисовать", при клике на которую соответствующая фигура
# добавляется на холст, а второе окно закрывается. Проверку корректности ввода в поля
# можно опустить.
class Main_prog:
def __init__(self, master, function):
self.main_canv = Canvas(master, width=500, height=500, bg='white')
self.btn_1 = Button(master, text='Добавить фигуру')
self.btn_1['command'] = eval('self.' + function)
self.main_canv.pack()
self.btn_1.pack()
def new_window(self):
opt_wndw = Toplevel()
opt_wndw.title("Меню создания")
opt_wndw.resizable(False, False)
opt_wndw.geometry('300x100')
header_1 = LabelFrame(opt_wndw, text="X1 Y1")
header_2 = LabelFrame(opt_wndw, text="X2 Y2")
bottom_1 = LabelFrame(opt_wndw)
ent_x1 = Entry(header_1, width=20)
ent_y1 = Entry(header_1, width=20)
ent_x2 = Entry(header_2, width=20)
ent_y2 = Entry(header_2, width=20)
def draw_ob(event):
x_1 = int(ent_x1.get())
y_1 = int(ent_y1.get())
x_2 = int(ent_x2.get())
y_2 = int(ent_y2.get())
if temp_var.get():
self.main_canv.create_oval(x_1, y_1, x_2, y_2, width=2)
else:
self.main_canv.create_rectangle(x_1, y_1, x_2, y_2, width=2)
temp_var = BooleanVar()
temp_var.set(False)
rad_oval = Radiobutton(bottom_1, text="Овал", variable=temp_var, value=True)
rad_rect = Radiobutton(bottom_1, text="Прямоугольник", variable=temp_var, value=False)
btn_creat = Button(bottom_1, text="Нарисовать")
btn_creat.bind("<Button-1>", draw_ob)
header_1.pack(side=TOP)
header_2.pack(side=TOP)
bottom_1.pack(side=TOP)
ent_x1.pack(side=LEFT)
ent_y1.pack(side=LEFT)
ent_x2.pack(side=LEFT)
ent_y2.pack(side=LEFT)
rad_oval.pack(side=LEFT)
rad_rect.pack(side=LEFT)
btn_creat.pack(side=BOTTOM)
if __name__ == '__main__':
root = Tk()
root.title("Холст для рисования")
main_prog = Main_prog(root, 'new_window')
root.mainloop()
| StarcoderdataPython |
6610285 | from . import FixtureTest
class EarlyFootway(FixtureTest):
def test_footway_unnamed_national(self):
# highway=footway, no name, route national (Pacific Crest Trail)
self._run_test(
['https://www.openstreetmap.org/way/83076573',
'https://www.openstreetmap.org/relation/1225378'],
11, 344, 790)
# highway=footway, no name, route national (Pacific Crest Trail)
self._run_test([
'https://www.openstreetmap.org/way/372066789',
'https://www.openstreetmap.org/relation/1225378',
], 11, 345, 790)
def test_footway_unnamed_regional(self):
# highway=footway, with name, and route regional (Rodeo Valley
# Trail, Marin)
self._run_test([
'https://www.openstreetmap.org/way/239141479',
'https://www.openstreetmap.org/relation/2684235',
], 12, 654, 1582)
def test_footway_with_designation(self):
# highway=footway, with designation (Ocean Beach north, SF)
self._run_test([
'https://www.openstreetmap.org/way/161702316',
], 13, 1308, 3166)
def test_footway_with_name(self):
# highway=footway, with name (Coastal Trail, Marin)
self._run_test([
'https://www.openstreetmap.org/way/24526324',
], 13, 1308, 3164)
# highway=footway, with name (Coastal Trail, SF)
self._run_test([
'https://www.openstreetmap.org/way/27553452',
], 13, 1308, 3166)
# highway=footway, with name (Lovers Lane, SF)
self._run_test([
'https://www.openstreetmap.org/way/69020102',
], 13, 1309, 3165)
def test_sidewalk(self):
# SF State
self.load_fixtures(['https://www.openstreetmap.org/way/346093021'])
self.assert_no_matching_feature(
14, 2617, 6335, 'roads',
{'kind': 'path', 'footway': 'sidewalk'})
self.assert_has_feature(
15, 5235, 12671, 'roads',
{'kind': 'path', 'footway': 'sidewalk'})
def test_crossing(self):
# SF in the Avenues
self.load_fixtures(['https://www.openstreetmap.org/way/344205837'])
self.assert_no_matching_feature(
14, 2617, 6333, 'roads',
{'id': 344205837, 'kind': 'path', 'footway': 'sidewalk'})
self.assert_has_feature(
15, 5234, 12667, 'roads',
{'kind': 'path', 'footway': 'crossing'})
def _run_test(self, urls, z, x, y):
self.load_fixtures(urls)
self.assert_has_feature(
z, x, y, 'roads',
{'kind_detail': 'footway'})
| StarcoderdataPython |
1890274 | import unittest
from metaheuristic_algorithms.firefly_algorithm import FireflyAlgorithm
from metaheuristic_algorithms.function_wrappers.nonsmooth_multipeak_function_wrapper import NonsmoothMultipeakFunctionWrapper
class TestFireflyAlgorithm(unittest.TestCase):
def test_find_glocal_maximum_for_nonsmooth_multipeak_function(self):
nonsmooth_multipeak_function_wrapper = NonsmoothMultipeakFunctionWrapper()
number_of_variables = 2
objective = "maximization"
firefly_algorithm = FireflyAlgorithm(nonsmooth_multipeak_function_wrapper, number_of_variables, objective)
number_of_fireflies = 10
maximun_generation = 10
randomization_parameter_alpha = 0.2
absorption_coefficient_gamma = 1.0
result = firefly_algorithm.search(number_of_fireflies = number_of_fireflies, maximun_generation = maximun_generation,
randomization_parameter_alpha = randomization_parameter_alpha, absorption_coefficient_gamma = absorption_coefficient_gamma)
# TODO: Improve accuracy:
self.assertAlmostEqual(result["best_decision_variable_values"][0], 2.8327, delta = 5)
self.assertAlmostEqual(result["best_decision_variable_values"][1], -0.0038, delta = 5)
self.assertAlmostEqual(result["best_objective_function_value"], 3.4310, delta = 5)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
4891001 | # Generated by Django 3.0.6 on 2020-06-01 17:08
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('lots', '0002_auto_20200601_1708'),
]
operations = [
migrations.AlterField(
model_name='lot',
name='expires_at',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 4, 17, 8, 25, tzinfo=utc)),
),
]
| StarcoderdataPython |
3256020 | from pyCardDeck import *
# noinspection PyPep8Naming
def test_BaseCard():
card = BaseCard("BaseCard")
assert str(card) == "BaseCard"
assert repr(card) == "BaseCard({'name': 'BaseCard'})"
# noinspection PyPep8Naming
def test_PokerCard():
card = PokerCard("Hearts", "J", "Jack")
assert str(card) == "Jack of Hearts"
assert card.rank == "J"
assert repr(card).startswith("PokerCard({'")
| StarcoderdataPython |
5086955 | # -*- coding: utf-8 ; test-case-name: bridgedb.test.test_configure -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: please see the AUTHORS file for attributions
# :copyright: (c) 2013-2015, Isis Lovecruft
# (c) 2013-2015, <NAME>
# (c) 2007-2015, <NAME>
# (c) 2007-2015, The Tor Project, Inc.
# :license: see LICENSE for licensing information
"""Utilities for dealing with configuration files for BridgeDB."""
import logging
import os
# Used to set the SUPPORTED_TRANSPORTS:
from bridgedb import strings
def loadConfig(configFile=None, configCls=None):
"""Load configuration settings on top of the current settings.
All pathnames and filenames within settings in the ``configFile`` will be
expanded, and their expanded values will be stored in the returned
:class:`configuration <bridgedb.configure.Conf>` object.
**Note:**
On the strange-looking use of::
exec compile(open(configFile).read(), '<string>', 'exec') in dict()
in this function…
The contents of the config file should be compiled first, and then passed
to ``exec()`` -- not ``execfile()`` ! -- in order to get the contents of
the config file to exist within the scope of the configuration dictionary.
Otherwise, Python *will* default_ to executing the config file directly
within the ``globals()`` scope.
Additionally, it's roughly 20-30 times faster_ to use the ``compile()``
builtin on a string (the contents of the file) before passing it to
``exec()``, than using ``execfile()`` directly on the file.
.. _default: http://stackoverflow.com/q/17470193
.. _faster: http://lucumr.pocoo.org/2011/2/1/exec-in-python/
:ivar bool itsSafeToUseLogging: This is called in
:func:`bridgedb.main.run` before
:func:`bridgedb.safelog.configureLogging`. When called from
:func:`~bridgedb.main.run`, the **configCls** parameter is not given,
because that is the first time that a
:class:`config <bridgedb.configure.Conf>` has been created. If a
:class:`logging.Logger` is created in this function, then logging will
not be correctly configured, therefore, if the **configCls** parameter
is not given, then it's the first time this function has been called
and it is therefore *not* safe to make calls to the logging module.
:type configFile: :any:`str` or ``None``
:param configFile: If given, the filename of the config file to load.
:type configCls: :class:`bridgedb.configure.Conf` or ``None``
:param configCls: The current configuration instance, if one already
exists.
:returns: A new :class:`configuration <bridgedb.configure.Conf>`, with the
old settings as defaults, and the settings from the **configFile** (if
given) overriding those defaults.
"""
itsSafeToUseLogging = False
configuration = {}
if configCls:
itsSafeToUseLogging = True
oldConfig = configCls.__dict__
configuration.update(**oldConfig) # Load current settings
logging.info("Reloading over in-memory configurations...")
conffile = configFile
if (configFile is None) and ('CONFIG_FILE' in configuration):
conffile = configuration['CONFIG_FILE']
if conffile is not None:
if itsSafeToUseLogging:
logging.info("Loading settings from config file: '%s'" % conffile)
compiled = compile(open(conffile).read(), '<string>', 'exec')
exec(compiled, configuration)
if itsSafeToUseLogging:
logging.debug("New configuration settings:")
logging.debug("\n".join(["{0} = {1}".format(key, value)
for key, value in configuration.items()
if not key.startswith('_')]))
# Create a :class:`Conf` from the settings stored within the local scope
# of the ``configuration`` dictionary:
config = Conf(**configuration)
# We want to set the updated/expanded paths for files on the ``config``,
# because the copy of this config, `state.config` is used later to compare
# with a new :class:`Conf` instance, to see if there were any changes.
#
# See :meth:`bridgedb.persistent.State.useUpdatedSettings`.
for attr in ["PROXY_LIST_FILES"]:
setting = getattr(config, attr, None)
if setting is None: # pragma: no cover
setattr(config, attr, []) # If they weren't set, make them lists
else:
setattr(config, attr, # If they were set, expand the paths:
[os.path.abspath(os.path.expanduser(f)) for f in setting])
for attr in ["DB_FILE", "DB_LOG_FILE", "MASTER_KEY_FILE", "PIDFILE",
"ASSIGNMENTS_FILE", "HTTPS_CERT_FILE", "HTTPS_KEY_FILE",
"MOAT_CERT_FILE", "MOAT_KEY_FILE",
"LOG_FILE", "COUNTRY_BLOCK_FILE",
"GIMP_CAPTCHA_DIR", "GIMP_CAPTCHA_HMAC_KEYFILE",
"GIMP_CAPTCHA_RSA_KEYFILE", "EMAIL_GPG_HOMEDIR",
"EMAIL_GPG_PASSPHRASE_FILE", "NO_DISTRIBUTION_FILE"]:
setting = getattr(config, attr, None)
if setting is None:
setattr(config, attr, setting)
else:
setattr(config, attr, os.path.abspath(os.path.expanduser(setting)))
for attr in ["MOAT_ROTATION_PERIOD",
"HTTPS_ROTATION_PERIOD",
"EMAIL_ROTATION_PERIOD"]:
setting = getattr(config, attr, None) # Default to None
setattr(config, attr, setting)
for attr in ["IGNORE_NETWORKSTATUS",
"MOAT_CSP_ENABLED",
"MOAT_CSP_REPORT_ONLY",
"MOAT_CSP_INCLUDE_SELF",
"CSP_ENABLED",
"CSP_REPORT_ONLY",
"CSP_INCLUDE_SELF"]:
setting = getattr(config, attr, True) # Default to True
setattr(config, attr, setting)
for attr in ["FORCE_PORTS", "FORCE_FLAGS", "NO_DISTRIBUTION_COUNTRIES"]:
setting = getattr(config, attr, []) # Default to empty lists
setattr(config, attr, setting)
for attr in ["SUPPORTED_TRANSPORTS"]:
setting = getattr(config, attr, {}) # Default to empty dicts
setattr(config, attr, setting)
# Set the SUPPORTED_TRANSPORTS to populate the webserver and email options:
strings._setSupportedTransports(getattr(config, "SUPPORTED_TRANSPORTS", {}))
strings._setDefaultTransport(getattr(config, "DEFAULT_TRANSPORT", ""))
logging.info("Currently supported transports: %s" %
" ".join(strings._getSupportedTransports()))
logging.info("Default transport: %s" % strings._getDefaultTransport())
for domain in config.EMAIL_DOMAINS:
config.EMAIL_DOMAIN_MAP[domain] = domain
if conffile: # Store the pathname of the config file, if one was used
config.CONFIG_FILE = os.path.abspath(os.path.expanduser(conffile))
return config
class Conf(object):
"""A configuration object. Holds unvalidated attributes."""
def __init__(self, **attrs):
for key, value in attrs.items():
if key == key.upper():
if not key.startswith('__'):
self.__dict__[key] = value
| StarcoderdataPython |
6652975 | import traceback
from copy import deepcopy
from alarm.page.ding_talk import DingTalk
from hupun_operator.page.warehouse.extra_store_query import ExtraStoreQuery
from hupun_operator.page.warehouse.extra_store_setting import ExtraStoreSetting
from hupun_slow_crawl.model.es.store_house import StoreHouse
from mq_handler.base import Base
from pyspider.helper.crawler_utils import CrawlerHelper
from pyspider.helper.date import Date
from pyspider.helper.input_data import InputData
class WarehouseStoreSet(Base):
"""
万里牛包含【预售SKU】的订单拆单
"""
# 店铺名和对应的仓库uid
storage_uid_shop = []
# 报警的钉钉机器人
ROBOT_TOKEN = '58c52b735767dfea3be10898320d4cf11af562b4b6ac6a2ea94be7de722cfbf9'
def execute(self):
print('更改【仓库匹配】的例外店铺设置中的店铺和仓库对应关系')
self.print_basic_info()
# 写入数据
try:
if not isinstance(self._data.get('data'), list):
raise Exception('传入的数据格式不对')
# 查询例外店铺设置的数据
store_query = ExtraStoreQuery().set_cookie_position(1)
store_query_st, store_query_re = CrawlerHelper.get_sync_result(store_query)
if store_query_st == 1 or not store_query_re or not isinstance(store_query_re, list):
raise Exception('未查询到例外店铺设置的数据,error:{}'.format(store_query_re))
# 开始设置例外店铺的对应关系
for data in self._data.get('data'):
shop_name = InputData(data).get_str('channel')
storage_code = InputData(data).get_str('storage_code')
if not shop_name or not storage_code:
raise Exception('传入的shop_name或者storage_code不符合要求')
print('开始操作店铺:{}的仓库:{}设置'.format(shop_name, storage_code))
self.set_store_config(shop_name, storage_code, store_query_re)
print('完成操作店铺:{}的仓库:{}设置'.format(shop_name, storage_code))
# 更改库存配置
# 更新:应业务方要求,取消更改库存配置
# self.change_inventory_setting()
print('发送更改库存配置完成')
except Exception as e:
err_msg = '更改【仓库匹配】的例外店铺设置中的店铺和仓库对应关系error:{};'.format(e)
print(err_msg)
print(traceback.format_exc())
# 发送钉钉报警
print('发送钉钉报警')
title = '更改【仓库匹配】的例外店铺设置中的店铺和仓库对应关系异常'
text = err_msg
DingTalk(self.ROBOT_TOKEN, title, text).enqueue()
def set_store_config(self, shop_name, storage_code, store_query_re):
"""
执行添加例外店铺设置的操作
:param shop_name:
:param storage_code:
:param store_query_re:
:return:
"""
print('开始执行添加例外店铺设置的操作')
# 输入信息
input_shop_name = shop_name
store_query_re = deepcopy(store_query_re)
# 判断传入的店铺是否已经绑定好仓库了
aim_shop_data = {}
for index, re in enumerate(store_query_re):
shop_whole_name = re['shopName']
if input_shop_name not in shop_whole_name and index == len(store_query_re) - 1:
raise Exception('查不到传入的店铺:{}'.format(input_shop_name))
if input_shop_name in shop_whole_name:
aim_shop_data = deepcopy(re)
break
shop_uid = aim_shop_data['shopUid']
shop_whole_name = aim_shop_data['shopName']
print('shop_uid', shop_uid)
print('shop_whole_name', shop_whole_name)
storage_uid, storage_name = StoreHouse().get_uid_and_name_by_code(storage_code)
if not storage_uid or not storage_name:
raise Exception('没有查询到storage_code:{}对应的storage_name或者storage_uid'.format(storage_code))
print('storage_uid', storage_uid)
print('storage_name', storage_name)
# 添加更改例外店铺设置
re = ExtraStoreSetting(shop_uid, shop_whole_name, storage_uid, storage_name) \
.set_cookie_position(1) \
.get_result()
# 删除更改例外店铺设置
# re = ExtraStoreDelete(shop_uid, shop_whole_name, storage_name) \
# .set_cookie_position(1) \
# .get_result()
print('更改例外店铺设置之后的返回结果:{}'.format(re))
if isinstance(re, dict):
print('设置例外店铺:{}成功'.format(input_shop_name))
# 设置库存配置的仓库uid
self.storage_uid_shop.append({
'shop_name': shop_name,
'storage_uid': storage_uid,
})
else:
err_msg = '设置例外店铺:{}失败, 返回的结果:{}'.format(input_shop_name, re)
raise Exception(err_msg)
def change_inventory_setting(self):
"""
更改库存配置
:return:
"""
# 库存同步的通用配置更改
# 组合店铺对应的仓库uid
data = {'data': [
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD", # 默认为总仓, 如果有其他仓就改为其他仓
"shop_name": "icy旗舰店", # 天猫旗舰店
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存+在途库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
},
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD",
"shop_name": "穿衣助手旗舰店", # 京东
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存+在途库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
},
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD",
"shop_name": "iCY设计师集合店", # APP
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存+在途库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
},
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD",
"shop_name": "ICY小红书", # 小红书
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存+在途库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
},
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD",
"shop_name": "ICY奥莱", # 奥莱店
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
},
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD",
"shop_name": "ICY唯品会", # 唯品会
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
},
{
"storage_uid": "D1E338D6015630E3AFF2440F3CBBAFAD",
"shop_name": "ICY设计师平台", # 淘宝
'open_auto': True, # 是否打开自动上传
"quantity_type": "可用库存", # 需要同步的具体库存
"upload_ratio": 100, # 同步的库存比例
"upload_beyond": 0 # 同步的库存添加数量
}
]}
for inner_data in data['data']:
for s_index, storage_item in enumerate(self.storage_uid_shop):
if storage_item['shop_name'] == inner_data['shop_name']:
inner_data['storage_uid'] = storage_item['storage_uid']
break
elif s_index == len(self.storage_uid_shop) - 1:
raise Exception('店铺:{}找不到对应的仓库uid'.format(inner_data['shop_name']))
data_id = '2340349'
from pyspider.libs.mq import MQ
from mq_handler import CONST_MESSAGE_TAG_SYNC_COMMON_INV, CONST_ACTION_UPDATE
MQ().publish_message(CONST_MESSAGE_TAG_SYNC_COMMON_INV, data, data_id, Date.now().timestamp(),
CONST_ACTION_UPDATE)
| StarcoderdataPython |
3395407 | #!/usr/bin/env python3
""
z = ''
x = "m"
"""
Documentation
# a comment inside of a documentation comment
"""
def test():
print('##nope#####')
'''
documentation
#another comment inside of a documentation comment
\"
\'''
\''\'
\'\''
'''
x = "#not a comment"
y = "# and another non-comment"
z = '#not a comment'
y = '# and another non-comment'
z = " # yup\" # NOOOOOO \" \" #are you sure about that? \' #yup \' \' # you sure 100%?"
z = ' #yup \" # NOOOOOO \" \" #are you sure about that? \' #yup \' \' # you sure 100%?'
if x == '####### really?':
print('####yup')
z = ""
"""
a
sdasd
"""
| StarcoderdataPython |
6654455 | """
## SCRIPT HEADER ##
Created By : <NAME>
Email : <EMAIL>
Start Date : 02 May 2021
Info :
"""
import FrMaya.core as fmc
import pymel.core as pm
def select_joint_from_skincluster():
sel = pm.ls(os = True)
skin_nodes = []
for o in sel:
skin_nodes.extend(fmc.get_skincluster_nodes(o))
print skin_nodes
joint_list = []
for o in skin_nodes:
joint_list.extend(fmc.get_skincluster_info(o)['joint_list'])
pm.select(joint_list)
select_joint_from_skincluster()
| StarcoderdataPython |
1929081 | <reponame>Leaflowave/PrivCQ<gh_stars>0
import group_frequency_oracle as freq
import linecache
import random
def query_on_adult_dim2(oraclePath,oracleInterval,queryPath,trueOraclePath,aggregation="count"):
# adult_2 equal 5 and 7
queriesStr=linecache.getline(queryPath,1)
queries=eval(queriesStr)
answer=[0]*500
trueOracleStr=linecache.getline(trueOraclePath,1)
trueOracle= eval(trueOracleStr)
n=sum([sum(trueOracle[k].values()) for k in trueOracle.keys()])
TrueAnswer=[0]*500
relativeError = 0
averageError=0
absrelativeError=0
absaverageError=0
for i in range(1,501):
for _ in range(10):
kthoracle=random.randint(1,500)
# kthoracle=_+1
oracle=freq.group_frequency_oracle(oraclePath, oracleInterval,k_th_oracle=kthoracle)
if aggregation=="count":
count_value=0
true_count_value=0
# print(i)
# print(queries[i-1])
# for k1 in range(queries[i - 1][0][0], queries[i - 1][0][1] + 1):
# for k2 in range(queries[i - 1][1][0], queries[i - 1][1][1] + 1):
for j in oracle.keys():
count_value+=oracle[j][queries[i-1]]
true_count_value += trueOracle[j][queries[i - 1]]
answer[i-1]+=count_value
TrueAnswer[i-1]+=true_count_value
# averageError += count_value - true_count_value
# relativeError+= (abs(count_value - true_count_value))/max(0.001*n,float(true_count_value))
elif aggregation=="sum":
sum_value = 0
true_sum_value = 0
# for k1 in range(queries[i - 1][0][0], queries[i - 1][0][1] + 1):
# for k2 in range(queries[i - 1][1][0], queries[i - 1][1][1] + 1):
for j in oracle.keys():
sum_value += j*oracle[j][queries[i-1]]
true_sum_value += j*trueOracle[j][queries[i - 1]]
answer[i-1]+=sum_value
TrueAnswer[i-1]+=true_sum_value
# averageError += sum_value - true_sum_value
# relativeError += (abs(sum_value - true_sum_value)) /max(0.001*n,float(true_sum_value))
answer[i - 1] /= 10.0
TrueAnswer[i - 1] /= 10.0
# absrelativeError += (abs(answer[i - 1] - TrueAnswer[i - 1])) / max(0.001 * n, float(TrueAnswer[i - 1]))
relativeError += (answer[i - 1] - TrueAnswer[i - 1]) / max(0.001 * n, float(TrueAnswer[i - 1]))
averageError += answer[i - 1] - TrueAnswer[i - 1]
# absaverageError+= abs(answer[i - 1] - TrueAnswer[i - 1])
return answer,TrueAnswer,relativeError/500,averageError/500
if __name__ == '__main__':
oraclePath = "experiments//adult_2_gr_results.txt"
oracleInterval = 33
queryPath = "experiments//adult_query_5_7_9.txt"
trueOraclePath = "adult//adult5.txt"
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="count")
print(relativeError)
with open("experiments//final_adult_2_count_gr.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans"+str(Trueans)+"\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="sum")
print(relativeError)
with open("experiments//final_adult_2_sum_gr.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans" + str(Trueans) + "\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
| StarcoderdataPython |
6447421 | <reponame>Darlingcris/Desafios-Python<filename>desafio086b.py<gh_stars>0
#Crie um programa que declare uma matriz de dimensão 3×3 e preencha com valores lidos pelo teclado. No #final, mostre a matriz na tela, com a formatação correta.
matriz=[[],[],[]]
for n in range(0,3):
valor=int(input(f"Digite um valor para [0,{n}]: "))
matriz[0].append(valor)
for n in range(0,3):
valor=int(input(f"Digite um valor para [1,{n}]: "))
matriz[1].append(valor)
for n in range(0,3):
valor=int(input(f"Digite um valor para [2,{n}]: "))
matriz[2].append(valor)
print(f"{matriz[0]}")
print(f"{matriz[1]}")
print(f"{matriz[2]}")
| StarcoderdataPython |
4876363 | <reponame>Chenct-jonathan/LokiHub
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pprint import pprint
import os
import json
from latent_search_engine import se
from nlu.IOHbot import LokiResult, runLoki
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot import (
LineBotApi, WebhookHandler
)
from flask import Flask, request, abort
with open("./linebot.json", encoding="utf-8") as f:
account_dict = json.loads(f.read())
LINE_ACCESS_TOKEN = account_dict["LINE_ACCESS_TOKEN"]
LINE_CHANNEL_SECRET = account_dict["LINE_CHANNEL_SECRET"]
app = Flask(__name__)
line_bot_api = LineBotApi(LINE_ACCESS_TOKEN)
handler = WebhookHandler(LINE_CHANNEL_SECRET)
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
msg = request.get_json()
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
from collections import defaultdict
session = defaultdict(list)
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
text = event.message.text
inputLIST = [text]
filterLIST = []
resultDICT = runLoki(inputLIST, filterLIST)
content = "不好意思,我看不懂,要不要試著像個人類一樣說話呢?"
loki_keys = resultDICT.values()
flat_loki_keys = [item for sublist in loki_keys for item in sublist]
user_id = event.source.user_id
session[user_id].extend(flat_loki_keys)
query_machine = se.HippoChamber()
query_machine.vectorize()
num = -1
keys = set()
keywords = session[user_id]
for key in keywords:
sim_sorted = query_machine.get_similar_articles(query=key)
key_list = [k for k, v in sim_sorted if v > 0.0]
print(key, len(key_list))
if num == -1:
keys = set(key_list)
else:
print(type(keys))
keys = set(key_list) & keys
num = len(keys)
print(num)
num = max(num, 0)
if 0 < num <= 10:
result = list(keys)
result_content = []
for r in result:
res = query_machine.source_doc[r]
result_content.append(res[0])
result_content.append(res[1])
content = "\n".join(result_content)
print(content)
session[user_id] = []
elif num == 0:
content = "不好意思,目前IOH還沒有相關科系的分享"
session[user_id] = []
else:
content = f"哇!你有興趣的內容在IOH上面有{num}篇相關的分享,請再多給我一點提示"
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| StarcoderdataPython |
317089 | <reponame>stevemk14ebr/swiffas
from . import swfparse
from . import avm2
# make top level parsers also module top level
from .swfparse import SWFParser
from .avm2 import ABCFile | StarcoderdataPython |
9605318 | <gh_stars>0
from .np_blockwise import blockwise_expand, blockwise_contract
from .np_rand3drot import random_rotation_matrix
from .scipy_hungarian import linear_sum_assignment
from .gph_uno_bipartite import uno
#from .mpl import plot_coord
from .misc import (distance_matrix, update_with_error, standardize_efp_angles_units, filter_comments, unnp,
compute_distance, compute_angle, compute_dihedral, measure_coordinates)
from .internal import provenance_stamp
from .itertools import unique_everseen
from .importing import parse_version, safe_version, which, which_import
| StarcoderdataPython |
11249974 | # Copyright 2021 <NAME>, alvarobartt @ GitHub
# See LICENSE for details.
import streamlit as st
import requests
from utils import image2tensor, prediction2label
from constants import REST_URL, MAPPING
# General information about the UI
st.title("TensorFlow Serving + Streamlit! ✨🖼️")
st.header("UI to use a TensorFlow image classification model of The Simpsons characters (named SimpsonsNet) served with TensorFlow Serving.")
# Show which are the classes that the SimpsonsNet model can predict
if st.checkbox("Show classes"):
st.write("The SimpsonsNet can predict the following characters:")
st.write(MAPPING)
# Create a FileUploader so that the user can upload an image to the UI
uploaded_file = st.file_uploader(label="Upload an image of any of the available The Simpsons characters (please see Classes).",
type=["png", "jpeg", "jpg"])
# Display the predict button just when an image is being uploaded
if not uploaded_file:
st.warning("Please upload an image before proceeding!")
st.stop()
else:
image_as_bytes = uploaded_file.read()
st.image(image_as_bytes, use_column_width=True)
pred_button = st.button("Predict")
if pred_button:
# Converts the input image into a Tensor
image_tensor = image2tensor(image_as_bytes=image_as_bytes)
# Prepare the data that is going to be sent in the POST request
json_data = {
"instances": image_tensor
}
# Send the request to the Prediction API
response = requests.post(REST_URL, json=json_data)
# Retrieve the highest probablity index of the Tensor (actual prediction)
prediction = response.json()['predictions'][0]
label = prediction2label(prediction=prediction)
# Write the predicted label for the input image
st.write(f"Predicted The Simpsons character is: {label}")
| StarcoderdataPython |
1906601 | import tensorflow as tf
import _pickle as cPickle
from .format import CocoMeta,PoseInfo
def generate_train_data(train_imgs_path,train_anns_path,dataset_filter=None,input_kpt_cvter=lambda x: x):
# read coco training images contains valid people
data = PoseInfo(train_imgs_path, train_anns_path, with_mask=True, dataset_filter=dataset_filter)
img_paths_list = data.get_image_list()
kpts_list = data.get_kpt_list()
mask_list = data.get_mask_list()
bbx_list=data.get_bbx_list()
target_list=[]
for kpts,mask,bbx in zip(kpts_list,mask_list,bbx_list):
target_list.append({
"kpt":kpts,
"mask":mask,
"bbx":bbx,
"labeled":1
})
return img_paths_list,target_list
def generate_eval_data(val_imgs_path,val_anns_path,dataset_filter=None):
# read coco training images contains valid people
coco_data=PoseInfo(val_imgs_path,val_anns_path,with_mask=False, dataset_filter=dataset_filter, eval=True)
img_file_list,img_id_list=coco_data.get_image_list(),coco_data.get_image_id_list()
return img_file_list,img_id_list | StarcoderdataPython |
9612954 | """Tests for editorial app."""
| StarcoderdataPython |
304981 | <reponame>w-cheng/docker-nlp
# Copyright (c) Jupyter Development Team.
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.NotebookApp.token = ''
# Set a password if PASSWORD is set
if 'PASSWORD' in os.environ:
from IPython.lib import passwd
c.NotebookApp.password = passwd(os.environ['PASSWORD'])
del os.environ['PASSWORD']
| StarcoderdataPython |
3572458 | import numpy as np
import pandas as pd
import random
import time
from sklearn.utils import shuffle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data import DataLoader
from torch.nn.functional import relu,leaky_relu
from torch.nn import Linear
from torch.nn import BatchNorm1d
import networkx as nx
from rdkit import Chem
from torch_geometric.nn import global_max_pool as gmp
from torch_geometric import data as DATA
from torch_geometric.data import Data, DataLoader
from math import sqrt
from rdkit.Chem import AllChem
from torch_geometric.nn import GATConv
from torch_geometric.nn import global_add_pool, global_mean_pool
import matplotlib.pyplot as plt
import pickle
#Convert SMILES to graph representation
def smile_to_graph(smile):
mol = Chem.MolFromSmiles(smile)
if(mol is None):
return None
else:
c_size = mol.GetNumAtoms()
features = []
for atom in mol.GetAtoms():
feature = atom_features(atom)
features.append( feature / sum(feature) )
edges = []
for bond in mol.GetBonds():
edges.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
g = nx.Graph(edges).to_directed()
edge_index = []
for e1, e2 in g.edges:
edge_index.append([e1, e2])
return c_size, features, edge_index
# +
#Get compound features
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
return np.array(one_of_k_encoding_unk(atom.GetSymbol(),['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na','Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb','Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H','Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr','Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
[atom.GetIsAromatic()])
# -
#Model architecture
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# SMILES graph branch
#self.n_output = n_output
self.conv1 = GATConv(78, 78, heads=2, dropout=0.1)
self.conv2 = GATConv(78*2, 78*3, dropout=0.1)
self.conv3 = GATConv(78*3, 78 * 4, dropout=0.1)
self.fc_g1 = torch.nn.Linear(78*4, 256)
self.bn2 = BatchNorm1d(256)
self.fc_g2 = Linear(256, 64)
## Protein Sequences
n_filters = 128
self.embedding_xt = nn.Embedding(21 + 1, 128)
self.conv_xt_1 = nn.Conv1d(in_channels=2000, out_channels=n_filters, kernel_size=3)
self.conv_xt_2 = nn.Conv1d(in_channels= 128, out_channels= 128, kernel_size=5)
self.conv_xt_3 = nn.Conv1d(in_channels=128, out_channels=32, kernel_size=8)
self.fc1_xt1 = nn.Linear(32*11, 256)
self.bn3 = BatchNorm1d(256)
self.fc1_xt2 = nn.Linear(256,64)
self.fc12 = nn.Linear(2*64, 128)
self.fc22 = nn.Linear(128, 64)
self.out3 = nn.Linear(64, 1)
def forward(self, data):
# get graph input
self.relu = leaky_relu
flat = nn.Flatten()
x, edge_index, batch = data.x, data.edge_index, data.batch
# get protein input
target = data.target
x = self.conv1(x, edge_index)
x = F.relu(x)
x = self.conv2(x, edge_index)
x = F.relu(x)
x = self.conv3(x, edge_index)
x = F.relu(x)
x = gmp(x, batch) # global max pooling
# flatten
x = F.relu(self.fc_g1(x))
x = F.dropout(x, p=0.1)
x = F.relu(self.fc_g2(x))
# Proteins
embedded_xt = self.embedding_xt(target)
conv_xt = F.relu(F.max_pool1d(self.conv_xt_1(embedded_xt),2))
conv_xt = F.relu(F.max_pool1d(self.conv_xt_2(conv_xt),2))
conv_xt = F.relu(F.max_pool1d(self.conv_xt_3(conv_xt), 2))
#print("Shape of Conv layer: ", conv_xt.shape)
#xt = flat(conv_xt)
xt = conv_xt.view(-1, 32*11)
#print("Flatten XT shape: ", xt.shape)
xt = F.relu(self.fc1_xt1(xt))
xt = F.dropout(xt, p=0.1)
xt = F.relu(self.fc1_xt2(xt))
xt = F.dropout(xt, p=0.1)
xc = torch.cat((x, xt), 1)
# add some dense layers
xc = F.relu(self.fc12(xc))
#xc = F.relu(xc)
xc = F.dropout(xc, p=0.1)
xc = F.relu(self.fc22(xc))
xc = F.dropout(xc, p=0.1)
out = self.out3(xc)
return out
# +
#Calculate loss function
loss_fn = nn.MSELoss()
best_mse = 1000
calculated_mse = 1000
def mse(y,f):
mse = ((y - f)**2).mean(axis=0)
return mse
# -
# ################################## Test Mode #####################################
# +
#Option 0 then use test set else use the sars_cov_2 test set
option=1
if (option==0):
df = pd.read_csv("../data/Test_Compound_Viral_interactions_for_Supervised_Learning.csv")
else:
df = pd.read_csv("../data/sars_cov_2_Compound_Viral_interactions_for_Supervised_Learning.csv")
protein_seqs = df['Sequence'].values.tolist()
seq_voc_dic = "ACDEFGHIKLMNPQRSTVWXY"
seq_dict = {voc:idx for idx,voc in enumerate(seq_voc_dic)}
seq_dict_len = len(seq_dict)
max_seq_len = 2000
# +
#Process the protein sequence
def seq_dict_fun(prot):
x = np.zeros(max_seq_len)
x += 21
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
return x
for i in range(len(protein_seqs)):
for j in range(len(protein_seqs[i])):
if(protein_seqs[i][j] in seq_voc_dic):
continue
else:
protein_seqs[i][j] = 'X'
PS = [seq_dict_fun(k) for k in protein_seqs]
pt = []
for i in range(len(PS)):
pt.append(PS[i])
protein_inputs = np.array(pt)
for i in range(len(protein_seqs)):
for j in range(len(protein_seqs[i])):
if(protein_seqs[i][j] in seq_voc_dic):
continue
else:
protein_seqs[i][j] = 'X'
# -
smiles = df['canonical_smiles'].values.tolist()
y = df['pchembl_value'].values.tolist()
uniprot = df['uniprot_accession']
inchi = df['standard_inchi_key']
# +
#Get the features from graph to be used in the GAT model
smile_graph = {}
none_smiles = []
got_g = []
for smile in smiles:
g = smile_to_graph(smile)
if(g is None):
print(smile)
none_smiles.append(smile)
else:
got_g.append(smile)
smile_graph[smile] = g
# -
#Get the features from graph model
data_features = []
data_edges = []
data_c_size = []
labels = []
data_list = []
for i in range(len(smiles)):
if(smiles[i] == 'Nc1ccc([S+]2(=O)Nc3nccc[n+]3['):
print(i)
else:
c_size, features, edge_index = smile_graph[smiles[i]]
data_features.append(features)
data_edges.append(edge_index)
data_c_size.append(c_size)
labels = y[i]
target = protein_inputs[i]
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
# +
#Load the test set and model
test_X = data_list
test_loader = DataLoader(test_X, batch_size=1, shuffle=False, drop_last=False)
device = torch.device('cpu')
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
model.load_state_dict(torch.load('../models/gat_cnn_models/GAT_CNN_2000_3_pooling_checkpoint.pt',map_location=device))#['state_dict'])
# +
#Make the predictions on the test set
model.eval()
total_preds = torch.Tensor()
total_labels = torch.Tensor()
print("Predicting...")
total_pred = []
total_labels = []
with torch.no_grad():
for data in test_loader:
data = data.to(device)
output = model(data)
total_labels.append(data.y.cpu().data.numpy().tolist()[0])
total_pred.append(output.cpu().data.numpy()[0].tolist()[0])
t = np.array(total_labels)
p = np.array(total_pred)
pred1 = mse(t,p)
print("Saving results...")
scores = []
for i in range(len(p)):
tk = []
tk.append(uniprot[i])
tk.append(inchi[i])
tk.append(p[i])
tk.append(t[i])
scores.append(tk)
f1 = pd.DataFrame(scores)
f1.columns =['uniprot_accession', 'standard_inchi_key', 'predictions', 'labels']
if (option==0):
f1.to_csv("../results/gat_cnn_supervised_test_predictions.csv",index=False)
else:
f1.to_csv("../results/gat_cnn_supervised_sars_cov_2_predictions.csv", index=False)
print("Results saved...")
| StarcoderdataPython |
347087 | <gh_stars>1-10
#!/usr/bin/env python3
import gym
import numpy as np
import tensorflow as tf
import tensorflow.contrib.summary # Needed to allow importing summary operations
class Network:
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def load(self, path):
# Load the metagraph
with self.session.graph.as_default():
self.saver = tf.train.import_meta_graph(path + ".meta")
# Attach the end points
self.observations = tf.get_collection("end_points/observations")[0]
self.actions = tf.get_collection("end_points/actions")[0]
# Load the graph weights
self.saver.restore(self.session, path)
def predict(self, observations):
return self.session.run(self.actions, {self.observations: [observations]})[0]
if __name__ == "__main__":
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("model", default="gym_cartpole/model", nargs='?', type=str, help="Name of tensorflow model.")
parser.add_argument("--episodes", default=100, type=int, help="Number of episodes.")
parser.add_argument("--render", default=False, action="store_true", help="Render the environment.")
parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
args = parser.parse_args()
# Create the environment
env = gym.make('CartPole-v1')
# Construct and load the network
network = Network(threads=args.threads)
network.load(args.model)
# Evaluate the episodes
total_score = 0
for episode in range(args.episodes):
observation = env.reset()
score = 0
for i in range(env.spec.timestep_limit):
if args.render:
env.render()
observation, reward, done, info = env.step(network.predict(observation))
score += reward
if done:
break
total_score += score
print("The episode {} finished with score {}.".format(episode + 1, score))
print("The average reward per episode was {:.2f}.".format(total_score / args.episodes))
| StarcoderdataPython |
8005888 | <gh_stars>1-10
from distutils.core import setup
setup(name='again',
version='1.2.20',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/kashifrazzaqui/again',
description='Python decorators for type and value checking at runtime. Also, some boilerplate code for making classes that support event registration and firing.',
packages=['again']
)
| StarcoderdataPython |
6539112 | # Natural Language Toolkit: Dependency Trees
#
# Author: <NAME> <<EMAIL>>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
| StarcoderdataPython |
3210011 | """
We will use this script to perform measurements on the library.
Also maybe we can use it to implement certain managmenet tasks.
"""
from rfeature import types
from rfeature import util
import argparse
def init_args():
ap=argparse.ArgumentParser(description=__doc__)
ap.add_argument("-v","--verbose",required=False,help="level of verbosity",default="debug",choices=["debug","warning","warn","info","error"])
ap.add_argument("--log-dir",required=False,help="log directory to write")
#ap.add_argument("-bs","--batch_size",required=True,help="size of each batch",type=int)
#ap.add_argument("-d","--directory",required=True,help="drectory which should be chunked")
#ap.add_argument("-o","--output",required=False,help="output directory for chunks")
args=ap.parse_args()
util.put(util.LOG_LEVEL,args.verbose)
util.put(util.LOG_DIR,args.log_dir)
pass
if __name__=='__main__':
util.init(argparser=init_args)
path=util.data_path("watch.jpg")
util.log_info(f"file:{path}")
print(types.capital_case("car"))
pass | StarcoderdataPython |
1939726 | <gh_stars>10-100
#!/usr/bin/env python3
import datetime
import os
import subprocess
import sys
import tempfile
import warnings
from Bio import SeqIO
from Bio.Seq import Seq
#======================================================================================================================
GFFREAD = 'gffread -C -g %s -y %s %s'
IPRSCAN = 'interproscan.sh -i %s -cpu %s'
IPRSCAN_HELP = 'interproscan.sh -version'
#======================================================================================================================
def check_iprscan():
com = IPRSCAN_HELP
call = subprocess.Popen(com, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
err, log = call.communicate()
return log
def iprscan(ref, gff_file, wd, threads):
warnings.filterwarnings("ignore")
fmtdate = '%H:%M:%S %d-%m'
now = datetime.datetime.now().strftime(fmtdate)
fasta_file_outfile = tempfile.NamedTemporaryFile(delete=False, mode='w', dir=wd, prefix="prot_gffread.", suffix=".log")
errorFilefile = tempfile.NamedTemporaryFile(delete=False, mode='w', dir=wd, prefix="prot_gffread.", suffix=".err")
prot_file_out = tempfile.NamedTemporaryFile(delete=False, mode='w', dir=wd, prefix="prot_gffread.", suffix=".fasta")
prot_file_mod = tempfile.NamedTemporaryFile(delete=False, mode='w', dir=wd, prefix="prot_gffread.mod.", suffix=".fasta")
com = GFFREAD % (os.path.abspath(ref), prot_file_out.name, os.path.abspath(gff_file))
call = subprocess.Popen(com, stdout=fasta_file_outfile, cwd = wd, stderr=errorFilefile, shell=True)
call.communicate()
input_file = open(prot_file_out.name)
fasta_dict = SeqIO.to_dict(SeqIO.parse(input_file, "fasta"))
count = len(fasta_dict)
bad_prot = []
for id in fasta_dict:
if "." in str(fasta_dict[id].seq):
count += 1
bad_prot.append(fasta_dict[id].description)
prot = str(fasta_dict[id].seq)
prot_mod = prot.replace(".","")
fasta_dict[id].seq = Seq(prot_mod)
SeqIO.write(fasta_dict[id], prot_file_mod, "fasta")
bad_gene = wd + "/bad_gene.txt"
with open(bad_gene, "w") as fh:
for line in bad_prot:
fh.write(line + "\n")
sys.stdout.write(("###INTERPROSCAN ANALYSIS STARTED AT:\t" + now + "\t###\n###RUNNING ANALYSIS FOR \t\033[32m" + str(count) + "\033[0m\t mRNA\t###\n"))
cmd = IPRSCAN %(prot_file_mod.name, threads)
err = tempfile.NamedTemporaryFile(delete=False, mode='w', dir=wd, prefix=prot_file_mod.name, suffix=".err")
log = tempfile.NamedTemporaryFile(delete=False, mode='w', dir=wd, prefix=prot_file_mod.name, suffix=".log")
iprscan = subprocess.Popen(cmd, cwd=wd, stderr = err, stdout = log, shell=True)
iprscan.communicate()
done_prot = {}
tsv_file = prot_file_mod.name + ".tsv"
with open(tsv_file, "r") as fh:
for line in fh:
mRNA = line.split("\t")[0]
done_prot[mRNA] = mRNA
sys.stdout.write(("###FINISHED TO RUN INTERPROSCAN ANALYSIS AT:\t" + now + "\t###\n###PROTEINS DOMAINS WERE FOUND FOR \t\033[32m" + str(len(done_prot)) + "\033[0m\t PROTEINS\t###\n"))
final_annot = gff_file + ".tsv"
os.rename(prot_file_mod.name + ".tsv", final_annot)
return final_annot, bad_gene
if __name__ == '__main__':
iprscan(*sys.argv[1:])
| StarcoderdataPython |
9617215 | print('Importing libs...')
import pandas as pd
# Import local libraries
import sys
sys.path.append('.')
from TFIDF import TFIDF
from W2V import W2V
from KDTREE import KDTREE
if __name__ == '__main__':
print('-' * 80)
print('Reading files...')
faq = pd.read_csv('../data/interim/faq-text-separated.csv', keep_default_na=False)
test_questions = pd.read_csv('../data/test/test-questions.csv')
features = ['Topic', 'Category', 'Department', 'question', 'answer']
test_topics = pd.read_excel('../../../Inquire Boulder request data- detailed open and closed - for research purposes.xlsx')
test_topics = test_topics[['Description', 'Topic']]
test_topics = test_topics.rename(index=str, columns={"Description": "test_question", "Topic": "match_topic"})
# # Evaluate KDTree on questions
# kdtree = KDTREE(faq, features, 'KDTREE')
# kdtree.evaluate(test_questions, 'questions')
# # Evaluate Word2Vec on questions
# w2v = W2V(faq, features, 'W2V')
# w2v.evaluate(test_questions, 'questions')
# w2v.evaluate(test_topics, 'topics')
# Evaluate TFIDF on questions and Topics
tfidf = TFIDF(faq, features, 'TFIDF')
tfidf.evaluate(test_questions, 'questions')
# tfidf.evaluate(test_topics, 'topics')
| StarcoderdataPython |
9659502 | # pylint: disable=unused-argument,redefined-outer-name
import os
from pathlib import Path
import pytest
from aiida.manage.tests import TestManager
@pytest.fixture(scope="session")
def top_dir() -> Path:
"""Return Path instance for the repository's top (root) directory"""
return Path(__file__).parent.parent.resolve()
@pytest.fixture(scope="session", autouse=True)
def setup_config(top_dir):
"""Method that runs before pytest collects tests so no modules are imported"""
filename = top_dir.joinpath("tests/static/test_config.json")
original_env_var = os.getenv("OPTIMADE_CONFIG_FILE")
try:
os.environ["OPTIMADE_CONFIG_FILE"] = str(filename)
yield
finally:
if original_env_var is not None:
os.environ["OPTIMADE_CONFIG_FILE"] = original_env_var
elif "OPTIMADE_CONFIG_FILE" in os.environ:
del os.environ["OPTIMADE_CONFIG_FILE"]
@pytest.fixture(scope="session", autouse=True)
def aiida_profile(top_dir) -> TestManager:
"""Load test data for AiiDA test profile
It is necessary to remove `AIIDA_PROFILE`, since it clashes with the test profile
"""
from aiida import load_profile
from aiida.manage.tests import (
get_test_backend_name,
get_test_profile_name,
test_manager,
)
from aiida.tools.importexport import import_data
org_env_var = os.getenv("AIIDA_PROFILE")
try:
# Setup profile
with test_manager(
backend=get_test_backend_name(), profile_name=get_test_profile_name()
) as manager:
manager.reset_db()
profile = load_profile().name
assert profile in ["test_profile", "test_django", "test_sqlalchemy"]
os.environ["AIIDA_PROFILE"] = profile
filename = top_dir.joinpath("tests/static/test_structuredata.aiida")
import_data(filename, silent=True)
yield manager
finally:
if org_env_var is not None:
os.environ["AIIDA_PROFILE"] = org_env_var
elif "AIIDA_PROFILE" in os.environ:
del os.environ["AIIDA_PROFILE"]
@pytest.fixture
def get_valid_id() -> str:
"""Get a currently valid ID/PK from a StructureData Node"""
from aiida.orm import QueryBuilder, StructureData
builder = QueryBuilder().append(StructureData, project="id")
return builder.first()[0]
| StarcoderdataPython |
1685216 | import difflib
import json
import logging
import os
import re
from typing import Dict, Pattern
from . import Wrapper
from .Events import *
RegexMatches = List[Match[str]]
class TextProcessor(object):
def __init__(self, wrapper: "Wrapper.Wrapper"):
"""
Initializes a new text processor
:param wrapper: The wrapper that this text processor belongs to
"""
self._wrapper = wrapper
self.regexes = [] # type: List[Dict[str, Pattern[str]]]
self.loaded_files = []
self._logger = logging.getLogger("TextProcessor")
self.server_log = logging.getLogger("MinecraftServer")
self._regex_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "regex")
self.load_version("generic")
@staticmethod
def get_json_files(path: str):
"""
Returns a generator giving all json files for a given path
:param path: The path
"""
for file in os.listdir(path):
if file.endswith(".json"):
yield os.path.join(path, file)
def load_version(self, version: str) -> None:
"""
Loads the regex json files for a version
:param version: The version to load the files for
"""
self._logger.debug(f"Loading regexes for version {version}...")
directory = os.path.join(self._regex_path, version)
if os.path.isdir(directory):
for file in self.get_json_files(directory):
self.process_file(file)
else:
self._logger.warning(f"Version {version} not found.")
close = difflib.get_close_matches(version, os.listdir(self._regex_path))
if len(close) != 0:
self._logger.warning(f"Using closest variation: {close[0]}. This may cause compatibility issues.")
self.load_version(close[0])
else:
self._logger.error("No close variation found. Not attempting to load.")
return
self._logger.debug("Regexes loaded.")
def process_file(self, path: str) -> None:
if path in self.loaded_files:
return
with open(path) as f:
data = json.load(f)
for item in data:
if "import" in item:
self.process_file(os.path.join(self._regex_path, item["import"]))
else:
self.regexes.append({
"type": item["type"],
"regex": re.compile(item["regex"])
})
self._logger.debug(f"Loaded new regex for {item['type']}")
self.loaded_files.append(path)
def unspecified_handler(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a regex with no handler
:param event_type: The type of event
:param matches: The regex matches
"""
self._logger.warning(f"No handler specified for {event_type} (Matches: {', '.join(matches)}")
def console_output(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a console_output regex
:param event_type: The type of event
:param matches: The regex matches
"""
self.server_log.log(getattr(logging, matches[0][0]), matches[0][1])
self._wrapper.EventManager.dispatch_event(Events.CONSOLE_OUTPUT, ConsoleOutputEvent(matches[0][0],
matches[0][1]))
def version_discovered(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a version_discovered regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._logger.debug(f"Version detected: {matches[0]}")
self._wrapper.version = matches[0]
self.load_version(matches[0])
self._wrapper.EventManager.dispatch_event(Events.VERSION_DISCOVERED, VersionDiscoveredEvent(matches[0]))
def server_ready(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a server_ready regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._wrapper.EventManager.dispatch_event(Events.SERVER_READY, ServerReadyEvent())
def uuid_found(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a uuid_found regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._wrapper.PlayerManager.set_uuid(matches[0][0], matches[0][1])
self._wrapper.EventManager.dispatch_event(Events.UUID_DISCOVERED, UUIDDiscoveredEvent(matches[0][0], matches[0][1]))
def player_connected(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a player_connected regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._wrapper.PlayerManager.add_player(matches[0])
self._wrapper.EventManager.dispatch_event(Events.PLAYER_CONNECTED, PlayerConnectedEvent(matches[0], self._wrapper.PlayerManager.get_player(matches[0])))
def player_disconnected(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a player_disconnected regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._wrapper.PlayerManager.set_player_disconnected(matches[0])
self._wrapper.EventManager.dispatch_event(Events.PLAYER_DISCONNECTED, PlayerDisconnectedEvent(matches[0], self._wrapper.PlayerManager.get_player(matches[0])))
def message_sent(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a message_sent regex
:param event_type: The type of event
:param matches: The regex matches
"""
event = MessageSentEvent(matches[0][0], matches[0][1], self._wrapper.PlayerManager.get_player(matches[0][0]))
self._wrapper.EventManager.dispatch_event(Events.MESSAGE_SENT, event)
self._wrapper.CommandRegistry.process_command(event)
def user_opped(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a user_opped regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._wrapper.PlayerManager.get_player(matches[0]).is_op = True
self._wrapper.ops.append(self._wrapper.PlayerManager.get_uuid(matches[0]))
self._wrapper.EventManager.dispatch_event(Events.USER_OPPED, UserOppedEvent(matches[0], self._wrapper.PlayerManager.get_player(matches[0])))
def user_deopped(self, event_type: str, matches: RegexMatches):
"""
Processes the data returned by a user_deopped regex
:param event_type: The type of event
:param matches: The regex matches
"""
self._wrapper.PlayerManager.get_player(matches[0]).is_op = False
self._wrapper.ops.remove(self._wrapper.PlayerManager.get_uuid(matches[0]))
self._wrapper.EventManager.dispatch_event(Events.USER_DEOPPED, UserDeoppedEvent(matches[0], self._wrapper.PlayerManager.get_player(matches[0])))
def process_line(self, line: str):
"""
Processes a line of server output
:param line: The server output
"""
line = line.replace("\r\n", "\n").rstrip("\n")
print(line)
for regex in self.regexes:
if regex["regex"].match(line):
getattr(self, regex["type"], self.unspecified_handler)(regex["type"], regex["regex"].findall(line))
| StarcoderdataPython |
95333 | import csv
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from path import Path
from vector_math import *
from find_matches import *
from file_paths import *
#********************
#**** this function reads a CSV file and returns a header, and a list that has been converted to float
#********************
def read_csv_float_with_header(file1,list1):
fileopen = open(file1,'rb')
fileobject = csv.reader(fileopen)
# get the header
header = fileobject.next()
# read each line in the CSV, and convert the values to float before appending to list1
for row in fileobject:
float_line = []
for subrow in row:
float_line.append( float(subrow))
list1.append( float_line)
fileopen.close() # close the file that we read from
return header, list1
#********************
#**** end function reads a CSV file and returns a header, and a list that has been converted to float
#********************
#********************
#**** main code
#********************
#@profile
def mainloop(driver_id, rdp_tolerance,Input_Path):
Input_Path = DATA
start_time = time.time()
list_of_paths = []
list_of_lengths = []
# read all the routes for this driver
for cnt in range(1,201):
# initialize this path
path = Path(1,cnt) # start with driver 1, route 1
input_coords=[]
# file_name = "Test_Set\\driver_1\\" + str(cnt) + ".csv"
file_name = os.path.join(Input_Path,str(driver_id),str(cnt) + ".csv")
header, input_coords = read_csv_float_with_header(file_name,input_coords)
path_array = np.array(input_coords)
path.route = path_array
path.time = len(path.route) # 1 second per data file
# only analyze this path if it is not within a 50 meter bound of the starting point
max_value = np.amax(path.route)
min_value = np.amin(path.route)
if ( max_value < 50 and min_value > -50):
path.is_zero = 1 # this is a zero length route
#if ( path.is_zero == 0) :
# x_coord = path.route[ path.time-1, 0]
# y_coord = path.route[ path.time-1, 1]
# angle_off_horizontal = np.arctan( y_coord / x_coord )
# path.rotate_path(angle_off_horizontal)
# x_coord = path.route[ path.time-1, 0]
# y_coord = path.route[ path.time-1, 1]
# if (x_coord < 0) : # for quadrant 2 & 3 rotate back to quadrant 1
# angle_off_horizontal = np.pi
# path.rotate_path(angle_off_horizontal)
# # make our new 0, 0 point be the highest point in the path, bisecting the angle that makes the tallest
# path.center_on_highest_point()
# find the total distance along the route
path.distance = path.get_route_distance(0, path.time)
list_of_lengths.append(path.distance)
if ( path.is_zero == 0) :
# get features on this path
path.generate_features(rdp_tolerance)
#plt.figure()
#plt.plot(path.route[:,0],path.route[:,1],markersize=2.0)
#feature_list = []
#for cnt, feature in enumerate(path.feature_loc):
# x1 = path.route[ path.feature_loc[cnt,2] ,0]
# y1 = path.route[ path.feature_loc[cnt,2] ,1]
# feature_list.append( [x1, y1] )
#feature_list = np.array(feature_list)
#
#plt.scatter(feature_list[:,0],feature_list[:,1])
#plt.figure()
#plt.plot(path.route[:,0],path.route[:,1],markersize=2.0)
#feature_list = []
#for cnt, feature in enumerate(path.feature_loc):
# x1 = path.route[ path.feature_loc[cnt,2] ,0]
# y1 = path.route[ path.feature_loc[cnt,2] ,1]
# feature_list.append( [x1, y1] )
#feature_list = np.array(feature_list)
#
#plt.scatter(feature_list[:,0],feature_list[:,1])
#plt.show()
# get angles between each of the consective features
path.generate_angles()
list_of_paths.append(path)
list_to_run = []
#list_to_run.append( [11,19] )
#list_to_run.append( [17,9,15] )
#list_to_run.append( [63, 83, 120, 148] )
#list_to_run.append( [102, 167, 183, 197, 200] )
#list_to_run.append( [5,96] )
for cnt, path in enumerate(list_of_paths):
if ( path.is_zero == 0) :
for cnt2, run_list in enumerate(list_to_run):
if ( path.routeid in run_list):
plt.figure(cnt2+1)
plt.plot(path.route[:,0],path.route[:,1],markersize=2.0)
feature_list = []
for cnt, feature in enumerate(path.feature_loc):
x1 = path.route[ path.feature_loc[cnt,2] ,0]
y1 = path.route[ path.feature_loc[cnt,2] ,1]
feature_list.append( [x1, y1] )
feature_list = np.array(feature_list)
plt.scatter(feature_list[:,0],feature_list[:,1])
# make CSV files of our angles
#file_name = open("Angle_Info_" + str(path.routeid) + ".csv",'wb')
#file_object = csv.writer(file_name)
#
#for angle in path.angles:
# file_object.writerow(angle)
#
#file_name.close()
for cnt1, path1 in enumerate(list_of_paths):
for cnt2, path2 in enumerate(list_of_paths[cnt1+1:]):
if (path1.matched < 3 or path2.matched < 3): # if one of the two paths aren't matched, check it
if ( path1.is_zero == 0 and path2.is_zero == 0) : # make sure we don't run a zero length path
already_matched = 0
path2.print_flag = 0 # default to not making a picture
path1.print_flag = 0
if (path1.routeid != path2.routeid): # don't compare a path against itself
compare_two_sets_of_angles(path1, path2) # Compare these two paths and record the score in path 1
#if (path1.matched ==1 and path2.matched ==1): # if we matched this time, see if it is a new match
#
#
#
#
# if (previous_value1 ==0 and rdp_tolerance==15):
# path1.print_flag = 1
# else:
# path1.print_flag = 0
# if (previous_value2 ==0 and rdp_tolerance==15):
# path2.print_flag = 1
# else:
# path2.print_flag = 0
# #
# if (path1.print_flag==1 or path2.print_flag==1): # if the new values are a match that wasn't a previous match, print it
# print(path1.routeid, path2.routeid)
# align_two_paths(path1, path2,driver_id,rdp_tolerance)
#
# path2.print_flag = 0
# path1.print_flag = 0
#
#
list_of_lengths.sort()
for cnt1, path1 in enumerate(list_of_paths):
if (path1.distance > list_of_lengths[190] and path1.matched ==0): # if it is a long path and not matched, move it down the ranking
path1.matched = -2
elif (path1.distance > list_of_lengths[180] and path1.matched ==0): # if it is a long path and not matched, move it down the ranking
path1.matched = -1
num_matched = 0
final_out = open("Results_"+str(rdp_tolerance)+"m//Driver_" + str(driver_id)+".csv",'wb')
final_out_csv = csv.writer(final_out)
for cnt1, path1 in enumerate(list_of_paths):
final_out_csv.writerow([driver_id, path1.routeid, path1.matched])
if (path1.matched >=1):
num_matched+=1
fout = open("intial_match_list.txt",'a')
fout.write("Driver " + str(driver_id) +" num matched " + str(num_matched) + "\n")
fout.close()
end_time = time.time()
print("minutes elapsed ",(end_time-start_time) / 60. )
#plt.show()
#sys.exit(0)
#time_vs_distance = []
#color_list=[]
#for cnt, path in enumerate(list_of_paths):
# if (path.matched==1):
# time_vs_distance.append( [ path.time, path.distance] )
# color_list.append("red")
# elif(path.is_zero ==1):
# time_vs_distance.append( [ path.time, path.distance] )
# color_list.append("blue")
# else:
# time_vs_distance.append( [ path.time, path.distance] )
# color_list.append("green")
#
#time_vs_distance = np.array(time_vs_distance)
#plt.figure(2)
#plt.scatter(time_vs_distance[:,0],time_vs_distance[:,1],c=color_list,s=100)
#plt.show()
#plt.close()
#
#
#********************
#**** end main code
#********************
# drivers start at 1 and the last one is 3612 however there are gaps in between
# there are a total of 2736 drivers
rdp_tolerance_list = [ 13, 15, 17 ]
#rdp_tolerance_list = [ 13 ]
for rdp_tolerance in rdp_tolerance_list:
if( not os.path.isdir("Results_"+str(rdp_tolerance)+"m") ):
os.mkdir("Results_"+str(rdp_tolerance)+"m")
Input_Path = DATA
for driver_id in range(1,3613):
#rdp_tolerance = 15
#if (1==1):
try:
file_name = os.path.join(Input_Path,str(driver_id),str(1) + ".csv")
fileopen = open(file_name,'rb')
fileopen.close() # close the file that we read from
for rdp_tolerance in rdp_tolerance_list:
print ("doing driver ",driver_id, " rdp ",rdp_tolerance)
mainloop(driver_id, rdp_tolerance,Input_Path)
except:
x=1 | StarcoderdataPython |
11257239 | <filename>kaishi_chushibanben.py
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask.ext.wtf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
from redis import StrictRedis
from flask_session import Session
from flask_migrate import Migrate,MigrateCommand
from flask_script import Manager
app = Flask(__name__)
manager=Manager(app=app)
class PeiZhiLei(object):
#调试模式
DEBUG=True
#数据库的配置文件
SQLALCHEMY_DATABASE_URI='mysql://root:mysql@192.168.47.142:3306/f_db'
SQLALCHEMY_TRACK_MODIFICATIONS=False
#设置redis配置参数
REDIS_HOST='192.168.47.142'
REDIS_PORT=6379
#配置session
SECRET_KEY='dyh'
SESSION_TYPE = 'redis'
SESSION_REDIS = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=1)
SESSION_USE_SIGNER = True # 修改为True之后就必须要是何止 serect_key
PERMANENT_SESSION_LIFETIME = 3600
#配置日志
# 默认日志等级
LOG_LEVEL = logging.DEBUG
app.config.from_object(PeiZhiLei)
db=SQLAlchemy(app=app)
rs=StrictRedis(host=PeiZhiLei.REDIS_HOST,port=PeiZhiLei.REDIS_PORT)
Session(app=app)
Migrate(app=app,db=db)
manager.add_command('db',MigrateCommand)
#scrf 开启
CSRFProtect(app)
#日志函数
def setup_log(config_name):
"""配置日志"""
# 设置日志的记录等级
logging.basicConfig(level=PeiZhiLei.LOG_LEVEL) # 调试debug级
# 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限
file_log_handler = RotatingFileHandler("logs/log", maxBytes=1024 * 1024 * 100, backupCount=10)
# 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息
formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app使用的)添加日志记录器
logging.getLogger().addHandler(file_log_handler)
@app.route('/')
def hello_world():
print(app.permanent_session_lifetime)
return 'Hello World!'
if __name__ == '__main__':
# db.create_all()
manager.run() | StarcoderdataPython |
8096042 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2015-10-16 10:06:16
# @Last Modified by: codykochmann
# @Last Modified time: 2015-10-16 11:53:14
bashrc_file="~/.bashrc"
bash_aliases_file="~/.bash_aliases"
remote_alias_link="http://bit.ly/codys-aliases"
def append_to_file(filename,input_string):
with open(filename, 'a') as f:
f.write(input_string)
def grep(url):
import os
return(str(os.popen("wget -qO- %s" % (url)).read()))
def bash(text_to_run):
import os
os.system(text_to_run)
remote_bash_aliases_text=grep(remote_alias_link)
append_to_file(bash_aliases_file, remote_bash_aliases_text)
to_append_to_bashrc="""
# automatically loads the bash aliases from ~/.bash_aliases
bash %s
""" % (bash_aliases_file)
append_to_file(bashrc_file, to_append_to_bashrc)
bash("bash %s"%(bash_aliases_file))
bash("alias -p")
print("""
All aliases have been loaded.
""")
| StarcoderdataPython |
624 | import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
args = parser.parse_args()
np.random.seed(args.seed)
task_list = np.random.choice(256, num_procs(), replace=False)
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
# args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle)
args.exp_name = 'MTL_{}'.format(len(task_list))
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, log_std_init=args.log_std_init), seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
| StarcoderdataPython |
393410 | <gh_stars>1-10
"""
Test similarities
-----------------
Collection of tests for the similarities module.
"""
import numpy as np
from ..Similarities.aux_functions import KL_divergence, average_prob,\
Jensen_Shannon_divergence
from ..Similarities.dtw import dtw
from ..Similarities.magnitude_similarities import general_dtw
from ..Similarities.correlation_similarities import lagged_PearsonCorrelation
from ..Similarities.informationth_similarities import mutualInformation,\
mutualInformation_1to1, conditional_entropy, information_GCI_ind
from ..Similarities.similarities import general_lag_distance,\
general_distance_M, general_comparison
def test():
## Artificial data
##################
p, q = np.random.random(10), np.random.random(10)
p /= np.sum(p)
q /= np.sum(q)
probs, qrobs = np.random.random((10, 10)), np.random.random((10, 10))
probs /= np.sum(probs)
qrobs /= np.sum(qrobs)
## Auxiliar functions
##############
KL_divergence(p, q)
Jensen_Shannon_divergence(p, q)
average_prob(p, q)
average_prob(probs, qrobs)
## Magnitude similarity (dist)
###############################
x, y = np.random.randn(200).cumsum(), np.random.randn(200).cumsum()
dtw(x, y, dist=None)
### Try to import and apply rpy2 dependencies
try:
import rpy2
general_dtw(x, y, 'rpy2')
except:
pass
## Magnitude similarity (dist)
###############################
X = np.random.randn(1000, 4).cumsum(0)
lagged_PearsonCorrelation(X, timelag=0)
lagged_PearsonCorrelation(X, timelag=2)
X_disc = np.random.randint(0, 10, (1000, 2))
mutualInformation_1to1(X_disc[:, 0], X_disc[:, 1], bins=10)
mutualInformation(X_disc, bins=10)
conditional_entropy(X_disc[:, 0], X_disc[:, 1])
information_GCI_ind(X, bins=None)
method_f = lambda x, y: np.max(x-y)
tlags = [0, 2, 3]
general_lag_distance(X, method_f, tlags, simmetrical=False, kwargs={})
general_lag_distance(X, method_f, tlags, simmetrical=True, kwargs={})
pars_lag = {'method_f': method_f, 'tlags': tlags, 'simmetrical': False}
general_comparison(X, 'lag_based', pars_lag)
general_distance_M(X, method_f, simmetrical=True, kwargs={})
general_distance_M(X, method_f, simmetrical=False, kwargs={})
pars_dist = {'method_f': method_f, 'simmetrical': False}
general_comparison(X, 'static_based', pars_dist)
| StarcoderdataPython |
1647924 | """
Manually subscribing each :mod:`Source <snsary.sources.source>` to each :mod:`Output <snsary.outputs.output>` is repetitive, especially when there are multiple Outputs. MultiSource combines multiple Sources as one. Just like a :mod:`Sensor <snsary.sources.sensor>`, a MultiSource also exposes a stream to make it easier to work with: ::
MultiSource(MockSensor(), MockSensor()).stream.into(MockOutput())
"""
from .source import Source
class MultiSource(Source):
def __init__(self, *sources):
from snsary.streams import AsyncStream
self.__stream = AsyncStream()
for source in sources:
source.subscribe(self.__stream)
def subscribe(self, output):
self.stream.subscribe(output)
@property
def stream(self):
return self.__stream
| StarcoderdataPython |
8103991 | <reponame>ndrplz/computer_vision_utils<filename>tensor_manipulation.py
import cv2
import numpy as np
def resize_tensor(tensor, new_shape):
"""
Resize a numeric input 3D tensor with opencv. Each channel is resized independently from the others.
Parameters
----------
tensor: ndarray
Numeric 3D tensor of shape (channels, h, w)
new_shape: tuple
Tuple (new_h, new_w)
Returns
-------
new_tensor: ndarray
Resized tensor having size (channels, new_h, new_w)
"""
channels = tensor.shape[0]
new_tensor = np.zeros(shape=(channels,) + new_shape)
for i in range(0, channels):
new_tensor[i] = cv2.resize(tensor[i], dsize=new_shape[::-1])
return new_tensor
def crop_tensor(tensor, indexes):
"""
Crop a numeric 3D input tensor.
Parameters
----------
tensor: ndarray
Numeric 3D tensor of shape (channels, h, w)
indexes: tuple
Crop indexes following convention (h1, h2, w1, w2)
Returns
-------
new_tensor: ndarray
Cropped tensor having size (channels, h2-h1, w2-w1)
"""
h1, h2, w1, w2 = indexes
new_tensor = tensor[:, h1:h2, w1:w2].copy()
return new_tensor
| StarcoderdataPython |
6405074 |
"""### Data Pre-processing"""
# Commented out IPython magic to ensure Python compatibility.
# Importing Modules
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm import tqdm
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import BayesianRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import OrdinalEncoder
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
import warnings
warnings.filterwarnings('ignore')
from matplotlib import style
style.use('ggplot')
df=pd.read_excel('data/vehicles_Manheim.xlsx', sheet_name='Sheet1')
df=pd.DataFrame(df)
# Getting Rid of Irrelevant Rows
df2=df.copy()
#print('Shape of dataframe before: ', df2.shape)
df2 = df2[df2.isnull().sum(axis=1) > 4]
#print('Shape of dataframe after: ', df2.shape)
# Drop Columns and Check Missing Values
df2=df2.drop(columns=['Subseries', 'Drs','Cyl','Fuel','EW','Radio','Int'])
#df2.head()
#return series of columns with respective of number of null values
#df2.isnull().sum()
#heatmap to identify nulll values using graph
#sns.heatmap(df2.isnull(),yticklabels=False,cbar=True,cmap='Accent')
# Getting Desired Data
df3 = df2[pd.notnull(df2['Price'])]
df3 = df3[pd.notnull(df3['Top'])]
df3 = df3[pd.notnull(df3['4x4'])]
df3 = df3[pd.notnull(df3['Trans'])]
df3 = df3[pd.notnull(df3['Color'])]
df3 = df3[pd.notnull(df3['Model'])]
#df3.isnull().sum()
# Defining the variable categories
#define numeric variable and categorical variable to work separatly on them
num_col=['Year', 'Odometer']
cat_cols=['Make','Model','Color','Trans','4x4','Top']
# Making strings and Integers
for cat in cat_cols:
df3[cat] = df3[cat].fillna(-1)
df3[cat] = df3[cat].astype(str)
df3[cat] = df3[cat].replace('-1', np.nan)
#df3.isnull().sum()
# Making strings and Integers
for num in num_col:
df3[num] = df3[num].astype(int)
# Saving the processed data
df3.to_csv(r'data/vehicles_Manheim_cleaned.csv',index=False)
df3=pd.read_csv('data/vehicles_Manheim_cleaned.csv')
"""### Outliers"""
#outliers_condi=Latex(r" $\textbf{W𝑒 𝑐𝑎𝑛 𝑠𝑎𝑦 $𝑥_1$ or $x_2$ 𝑖𝑠 𝑜𝑢𝑡𝑙𝑖𝑒𝑟𝑠 if }\\ x_1 < Q1 - 1.5*IQR \\ or\\ x_2 > Q3+1.5*IQR $")
#outliers_info=Latex(r"$L_{p} = \frac{p}{100}(n+1) = i_p.f_p \\ where \,\, i_p \,\, is \,\, integer \,\, part \,\, of \,\, L_p \,\, and \,\, f_p \,\, is \,\, fractional \,\, part \,\, of \,\, L_p \\ Q1 = Y_{25} = x_{i_p} + f_p*(x_{i_{p+1}}-x_{i_p}) \\ Q3 = Y_{75} = x_{i_p} + f_p*(x_{i_{p+1}}-x_{i_p}) \\ IQR = Q3-Q1 \\ x_1 = Q1 - 1.5*IQR \,\,and\,\, x_2 = Q3+1.5*IQR $")
# It will return the range of the variables and the values outside this range will be outliers
def outliers(arr,col):
x=sorted(arr[col].values.ravel())
L_25=25/100*(len(x)+1) #L_p where p=25%
i_p=int(str(L_25).split(".")[0])
f_p=int(str(L_25).split(".")[1])
q1=x[i_p]+f_p*(x[i_p+1]-x[i_p])
L_75=75/100*(len(x)+1) #L_p where p=75%
i_p=int(str(L_75).split(".")[0])
f_p=int(str(L_75).split(".")[1])
q3=x[i_p]+f_p*(x[i_p+1]-x[i_p])
#q1,q3=(arr[col].quantile([0.25,0.75]))
IQR=q3-q1
x1=q1-1.5*IQR
x2=q3+1.5*IQR
return (x1,x2)
# Price
def min_max_price(df):
r=[]
q1,q3=(df['logprice'].quantile([0.25,0.75]))
r.append(q1-1.5*(q3-q1))
r.append(q3+1.5*(q3-q1))
return (r)
df3['logprice'] = np.log(df3['Price'])
x=df3['logprice']
price_range=list(range(0,int(max(df3['logprice']))+1))
red_square = dict(markerfacecolor='g', marker='s')
plt.boxplot(x, vert=False)
plt.xticks(price_range)
plt.text(min_max_price(df3)[0]-0.3,1.05,str(round(min_max_price(df3)[0],2)))
plt.text(min_max_price(df3)[1]-0.5,1.05,str(round(min_max_price(df3)[1],2)))
plt.title("Figure 1: Box Plot of Price")
plt.savefig('plots/graph-boxplot-price.jpg')
#plt.show()
# Odometer
fig, ax1 = plt.subplots()
ax1.set_title('Figure 2: Box Plot of Odometer')
ax1.boxplot(df3['Odometer'], vert=False, flierprops=red_square)
plt.savefig('plots/graph-boxplot-odometer.jpg')
#plt.show()
# Year
fig,(ax1,ax2)=plt.subplots(ncols=2,figsize=(12,5))
#ploting boxplot
o1,o2=outliers(df3,'Year')
ax1.boxplot(sorted(df3['Year']), vert=False, flierprops=red_square)
ax1.set_xlabel("Years")
ax1.set_title("Figure 3: Box Plot of Year")
ax1.text(o1-8,1.05,str(round(o1,2)))
#ploting histogram
hist,bins=np.histogram(df3['Year'])
n, bins, patches = ax2.hist(x=df3['Year'], bins=bins)
ax2.set_xlabel("Years")
ax2.set_title("Figure 4: Histogram of Year")
for i in range(len(n)):
if(n[i]>2000):
ax2.text(bins[i],n[i]+3000,str(n[i]))
plt.tight_layout()
plt.savefig('plots/graph-barplot-histogram-year.jpg',dpi=1200)
#plt.show()
# Removing outliers
df_new=df3.copy()
out=np.array(['logprice','Odometer','Year'])
for col in out:
o1,o2=outliers(df_new,col)
df_new=df_new[(df_new[col]>=o1) & (df_new[col]<=o2)]
print('IQR of',col,'=',o1,o2)
df_new=df_new[df_new['Price']!=0]
df_new.drop('logprice',axis=1,inplace=True)
#df_new.head()
# Saving the final dataframe
print("Shape before process=",df.shape)
print("Shape After process=",df_new.shape)
diff=df.shape[0]-df_new.shape[0]
print("Total {} rows and {} cols removed".format(diff,df.shape[1]-df_new.shape[1]))
df_new.to_csv("data/vehicles_Manheim_Final.csv",index=False)
| StarcoderdataPython |
1885480 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-12 14:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'Account', max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='asstreg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Caretaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
('hostel', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='CC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'CC', max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
('dept', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Gymkhana',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'Gymkhana', max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='HOD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
('dept', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Lab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'Lab', max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Library',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'Library', max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('roll', models.IntegerField(default=0)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
('dept', models.CharField(max_length=100)),
('hostel', models.CharField(max_length=100)),
('caretaker_approval', models.BooleanField(default=False)),
('warden_approval', models.BooleanField(default=False)),
('gymkhana_approval', models.BooleanField(default=False)),
('library_approval', models.BooleanField(default=False)),
('CC_approval', models.BooleanField(default=False)),
('asstreg_approval', models.BooleanField(default=False)),
('HOD_approval', models.BooleanField(default=False)),
('account_approval', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='StudFacStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('approved', models.BooleanField(default=False)),
('faculty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Faculty')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Student')),
],
),
migrations.CreateModel(
name='StudLabStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('approved', models.BooleanField(default=False)),
('lab', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Lab')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Student')),
],
),
migrations.CreateModel(
name='Warden',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('webmail', models.CharField(max_length=100, unique=True)),
('password', models.CharField(max_length=250)),
('hostel', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='student',
name='faculty_approval',
field=models.ManyToManyField(through='main.StudFacStatus', to='main.Faculty'),
),
migrations.AddField(
model_name='student',
name='lab_approval',
field=models.ManyToManyField(through='main.StudLabStatus', to='main.Lab'),
),
]
| StarcoderdataPython |
3333988 | import os
import sys
import glob
import csv
import pandas as pd
def mkdir_fol(path):
if not os.path.exists(path):
os.mkdir(path)
out_dir = os.environ.get('OUT_DIR')
if out_dir is None:
out_dir = "../data/output"
in_fol = str(out_dir)+'/heatmap_txt'
out_fol = str(out_dir)+'/heatmap_txt_3classes_separate_class'
dest_fols = ['heatmap_txt_grade3', 'heatmap_txt_grade45', 'heatmap_txt_benign']
mkdir_fol(out_fol)
for fol in dest_fols:
mkdir_fol(os.path.join(out_fol, fol))
for file in glob.glob(in_fol + '/prediction-*'):
slide_id = file.split('/')[-1]
print(file)
pred = pd.read_csv(file, delimiter=' ')
zeros = [0]*len(pred)
pred['zeros'] = zeros
columns = pred.columns
for i in range(len(dest_fols)):
data = pred[[columns[0], columns[1], columns[i + 2], 'zeros']]
data.to_csv(os.path.join(out_fol, dest_fols[i], slide_id), sep=' ', header=False, index=False)
os.system('cp ' + os.path.join(in_fol, 'color-' + slide_id[11:]) + ' ' + os.path.join(out_fol, dest_fols[i], 'color-' + slide_id[11:]))
| StarcoderdataPython |
11214757 | <reponame>manailin/facebookresearch-ParlAI
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Base script for running official ConvAI2 validation eval for perplexity.
This uses a the version of the dataset which does not contain candidates.
Leaderboard scores will be run in the same form but on a hidden test set.
The official vocabulary for the competition is based on using the
"split_tokenize" method on in the ParlAI core dictionary (parlai/core/dict.py)
and is built on the training and validation sets of the "convai2" task.
This dictionary contains a total of 19304 tokens. The test set contains some
tokens which are not in this dictionary--this tokens will not be provided, but
we will also *SKIP* calculating perplexity on these tokens. The model should
still produce a good guess for the remaining tokens in the sentence, so
handling unknown words or expanding the vocabulary with pre-trained or
multitasked embeddings are legitimate strategies that may or may not impact the
score of the models.
Note that this tokenizer will also be used during the perplexity evaluation:
the model will be asked to predict one word at a time according to this
tokenizer's parsing of the text.
This requires agents to implement the following function:
def next_word_probability(self, partial_out):
Return probability distribution over next words given a partial true output.
This is used to calculate the per-word perplexity.
Arguments:
partial_out -- list of previous "true" words
Returns a dict, where each key is a word and each value is a probability
score for that word. Unset keys assume a probability of zero.
e.g.
{'text': 'Run test program.'}, ['hello'] => {'world': 1.0}
"""
from parlai.core.agents import Agent, create_agent, create_agents_from_shared
from parlai.core.build_data import download_models
from parlai.core.dict import DictionaryAgent
from parlai.core.params import ParlaiParser
from parlai.core.utils import Timer, round_sigfigs, no_lock
from parlai.core.thread_utils import SharedTable
from parlai.core.worlds import create_task, World
from projects.convai2.build_dict import build_dict
import math
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True)
parser.set_defaults(
task='convai2:self:no_cands',
datatype='valid',
hide_labels=False, # will be shown to model partially in steps
)
return parser
class WordFrequencyEntry(Agent):
"""This is an example entry which tries to use the RepeatLabelAgent.
Since no labels are given to the model, it will guess something useless.
It builds the official dictionary first, so that it can provide a minimum
probablity for each word as well as use the official tokenizer.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not shared:
# build official eval dictionary
self.dict = build_dict()
else:
# only build dict once
self.dict = shared['dict']
max_freq = self.dict.max_freq()
# set probability of each word, skipping the invalid words like __NULL__
# (which have frequency more than max_freq)
self.freqs = {k: f for k, f in self.dict.freqs().items() if f <= max_freq}
def share(self):
shared = super().share()
# share dict with other threads instead of rebuilding in each
shared['dict'] = self.dict
return shared
def next_word_probability(self, partial_out):
"""Example implementation of next word probability."""
obs = self.observation
# initialize probabilities with inverse word frequency
freqs = self.freqs.copy()
# increase likelihood of predicting input words
tokens = self.dict.tokenize(obs.get('text', ''))
for t in tokens:
freqs[t] += 10000
return freqs
class PerplexityWorld(World):
"""Instead of just calling act/observe on each agent, this world just calls
act on the teacher and then calls `next_word_probability` on the agent.
The label for each example is parsed by the provided tokenizer, and then
for each word in the parsed label the model is given the input and all of
the tokens up to the current word and asked to predict the current word.
The model must return a probability of any words it thinks are likely in
the form of a dict mapping words to scores. If the scores do not sum to 1,
they are normalized to do so. If the correct word is not present or has a
probablity of zero, it will be assigned a probability of 1e-8.
The API of the next_word_probability function which agents must implement
is mentioned in the documentation for this file.
"""
def __init__(self, opt, agents, shared=None):
super().__init__(opt)
if shared:
# Create agents based on shared data.
self.task, self.agent, self.dict = create_agents_from_shared(shared['agents'])
self.metrics = shared['metrics']
else:
if len(agents) != 3:
raise RuntimeError('There must be exactly three agents.')
if opt.get('batchsize', 1) > 1:
raise RuntimeError('This world only works with bs=1. Try '
'using multiple threads instead, nt>1.')
self.task, self.agent, self.dict = agents
if not hasattr(self.agent, 'next_word_probability'):
raise RuntimeError('Agent must implement function '
'`next_word_probability`.')
self.metrics = {'total': 0, 'loss': 0.0, 'num_tokens': 0, 'num_unk': 0}
if opt.get('numthreads', 1) > 1:
self.metrics = SharedTable(self.metrics)
self.agents = [self.task, self.agent, self.dict]
self.acts = [None, None]
def _lock(self):
if hasattr(self.metrics, 'get_lock'):
# use the shared_table's lock
return self.metrics.get_lock()
else:
# otherwise do nothing
return no_lock()
def parley(self):
action = self.task.act()
self.acts[0] = action.copy()
# hide labels from model
labels = action.get('eval_labels', action.pop('labels', None))
if 'label_candidates' in action:
action.pop('label_candidates')
if labels is None:
# empty example, move on
return
parsed = self.dict.tokenize(labels[0])
loss = 0
num_tokens = 0
num_unk = 0
self.agent.observe(action)
for i in range(len(parsed)):
if parsed[i] in self.dict:
# only score words which are in the dictionary
probs = self.agent.next_word_probability(parsed[:i])
# get probability of correct answer, divide by total prob mass
prob_true = probs.get(parsed[i], 0)
if prob_true > 0:
prob_true /= sum(probs.values())
loss -= math.log(prob_true)
else:
loss = float('inf')
num_tokens += 1
else:
num_unk += 1
with self._lock():
self.metrics['total'] += 1
self.metrics['loss'] += loss
self.metrics['num_tokens'] += num_tokens
self.metrics['num_unk'] += num_unk
def epoch_done(self):
return self.task.epoch_done()
def num_examples(self):
return self.task.num_examples()
def num_episodes(self):
return self.task.num_episodes()
def share(self):
shared = super().share()
shared['metrics'] = self.metrics
return shared
def reset_metrics(self):
with self._lock():
self.metrics['total'] = 0
self.metrics['loss'] = 0
self.metrics['num_tokens'] = 0
self.metrics['num_unk'] = 0
def report(self, compute_time=None):
m = {}
with self._lock():
m['total'] = self.metrics['total']
if m['total'] > 0:
# m['num_unk'] = self.metrics['num_unk']
# m['num_tokens'] = self.metrics['num_tokens']
m['loss'] = round_sigfigs(self.metrics['loss'] / self.metrics['num_tokens'], 3)
m['ppl'] = round_sigfigs(math.exp(self.metrics['loss'] / self.metrics['num_tokens']), 4)
return m
def eval_ppl(opt):
"""Evaluates the the perplexity and f1 of a model (and hits@1 if model has
ranking enabled.
"""
dict_agent = build_dict()
# create agents
agent = create_agent(opt)
world = create_task(opt, [agent, dict_agent], default_world=PerplexityWorld)
world.dict = dict_agent
# set up logging
log_time = Timer()
tot_time = 0
while not world.epoch_done():
world.parley() # process an example
if log_time.time() > 1: # log every 1 sec
tot_time += log_time.time()
report = world.report()
print('{}s elapsed, {}%% complete, {}'.format(
int(tot_time),
round_sigfigs(report['total'] / world.num_examples() * 100, 3),
report))
log_time.reset()
if world.epoch_done():
print('EPOCH DONE')
tot_time += log_time.time()
final_report = world.report()
print('{}s elapsed: {}'.format(int(tot_time), final_report))
print("============================")
print("FINAL PPL: " +str(final_report['ppl']))
if final_report.get('ppl', 0) == float('inf'):
print('Note: you got inf perplexity. Consider adding (or raising) the '
'minimum probability you assign to each possible word. If you '
'assign zero probability to the correct token in the evaluation '
'vocabulary, you get inf probability immediately.')
if __name__ == '__main__':
parser = setup_args()
# example model just uses word frequencies
parser.set_defaults(model='projects.convai2.eval_ppl:WordFrequencyEntry')
# try with --numthreads N to go fast
opt = parser.parse_args()
eval_ppl(opt)
if opt['model'] == 'projects.convai2.eval_ppl:WordFrequencyEntry':
print('This run just used the example filler model. To get better '
'results, try implementing your own!')
| StarcoderdataPython |
6490387 | # coding=utf-8
from setuptools import find_packages, setup
import pathlib
import os
MAIN_DIR = pathlib.Path(__file__).absolute().parent
def get_packages():
base_file = MAIN_DIR / "requirements" / "base.in"
response = []
with base_file.open("r") as file:
for line in file:
line = line.strip()
if not line.startswith("#"):
response.append(line)
return response
packages = find_packages(
str(MAIN_DIR), include=("{{cookiecutter.project_slug}}*",), exclude=[]
)
# Did I mention that setup.py is not finest piece of software on earth.
# For this to work when installed you'll need to enumerate all template and static file.
def read_dir(package: str, directory: str):
package_root = os.path.abspath(package.replace(".", "/"))
directory = os.path.join(package_root, directory)
res = []
for root, subFolders, files in os.walk(directory):
for file in files:
res.append(os.path.relpath(os.path.join(root, file), package_root))
return res
if __name__ == "__main__":
setup(
name="{{cookiecutter.project_slug}}",
version="{{cookiecutter.version}}",
packages=packages,
license="All Rights reserved",
author="{{ cookiecutter.full_name }}",
author_email="{{ cookiecutter.email }}",
description="{{cookiecutter.project_short_description}}",
install_requires=get_packages(),
package_data={
package: [] + read_dir(package, "static") + read_dir(package, "templates")
for package in packages
},
include_package_data=True,
)
| StarcoderdataPython |
1986005 | <gh_stars>10-100
# Author: <NAME>, TU Darmstadt (<EMAIL>)
# Parts of this code were adapted from https://github.com/NVlabs/pacnet
import copy
import torch
import pac
class NormalizedPacConv2d(pac.PacConv2d):
"""Implements a pixel-adaptive convolution with advanced normalization."""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding=0,
bias=True,
kernel_type='gaussian',
shared_filters=False):
"""Initializes PAC with advanced normalization.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Filter size of used kernel.
padding: Number of zero padding elements applied at all borders.
bias: Usage of bias term.
kernel_type: Type of kernel function K. See original PAC for
available options.
shared_filters: Sharing of filters among input dimensions.
"""
super(NormalizedPacConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=1,
padding=padding,
dilation=1,
bias=bias,
kernel_type=kernel_type,
smooth_kernel_type='none',
normalize_kernel=False,
shared_filters=shared_filters,
filler='uniform',
native_impl=False)
# Create normalization weight.
self.weight_normalization = torch.nn.parameter.Parameter(
torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
# Initialize convolution and normalization weight with same positive
# values.
self.weight.data = torch.abs(self.weight.data)
self.weight_normalization.data = torch.log(
torch.abs(copy.deepcopy(self.weight.data)))
def forward(self, input_features, input_for_kernel, kernel=None,
mask=None):
"""Returns pixel-adaptive convolution with advanced normalization."""
# Compute pixel-adaptive kernel.
output_mask = None
if kernel is None:
kernel, output_mask = self.compute_kernel(input_for_kernel, mask)
# Perform pixel-adaptive convolution.
channels = input_features.shape[1]
output = pac.pacconv2d(input_features, kernel, self.weight, None,
self.stride, self.padding, self.dilation,
self.shared_filters, self.native_impl)
# Determine normalization factor dependent on kernel and weight.
if self.shared_filters:
normalization_factor = torch.einsum(
'ijklmn,zykl->ijmn',
(kernel, torch.exp(self.weight_normalization)))
else:
normalization_factor = torch.einsum(
'ijklmn,ojkl->iomn', (kernel.repeat(1, channels, 1, 1, 1, 1),
torch.exp(self.weight_normalization)))
# Crop normalization factor for numerical stability.
normalization_factor = torch.max(normalization_factor,
torch.tensor([1e-20]).cuda())
output = output / normalization_factor
# Bias term added after normalization.
if self.bias is not None:
output += self.bias.view(1, -1, 1, 1)
return output if output_mask is None else (output, output_mask)
class ProbPacConv2d(pac.PacConv2d):
"""Implements a probabilistic pixel-adaptive convolution."""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding=0,
bias=True,
kernel_type='gaussian',
shared_filters=False):
"""Initializes PPAC.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Filter size of used kernel.
padding: Number of zero padding elements applied at all borders.
bias: Usage of bias term.
kernel_type: Type of kernel function K. See original PAC for
available options.
shared_filters: Sharing of filters among input dimensions.
"""
super(ProbPacConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=1,
padding=padding,
dilation=1,
bias=bias,
kernel_type=kernel_type,
smooth_kernel_type='none',
normalize_kernel=False,
shared_filters=shared_filters,
filler='uniform',
native_impl=False)
# Create normalization weight.
self.weight_normalization = torch.nn.parameter.Parameter(
torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
# Initialize convolution and normalization weight with same positive
# values.
self.weight.data = torch.abs(self.weight.data)
self.weight_normalization.data = torch.log(
torch.abs(copy.deepcopy(self.weight.data)))
def forward(self,
input_features,
input_for_probabilities,
input_for_kernel,
kernel=None,
mask=None):
"""Returns result of probabilistic pixel-adaptive convolution."""
# Compute pixel-adaptive kernel.
output_mask = None
if kernel is None:
kernel, output_mask = self.compute_kernel(input_for_kernel, mask)
# Multiply input with probabilities and perform standard PAC.
batch_size, channels = input_features.shape[:2]
input_features = input_features * input_for_probabilities
output = pac.pacconv2d(input_features, kernel, self.weight, None,
self.stride, self.padding, self.dilation,
self.shared_filters, self.native_impl)
# Determine normalization factor dependent on probabilities, kernel and
# convolution weight.
neighbor_probabilities = torch.nn.functional.unfold(
input_for_probabilities, self.kernel_size, self.dilation,
self.padding, self.stride)
neighbor_factors = neighbor_probabilities.view(
batch_size, 1, *kernel.shape[2:]) * kernel
if self.shared_filters:
normalization_factor = torch.einsum(
'ijklmn,zykl->ijmn',
(neighbor_factors, torch.exp(self.weight_normalization)))
else:
normalization_factor = torch.einsum(
'ijklmn,ojkl->iomn',
(neighbor_factors.repeat(1, channels, 1, 1, 1, 1),
torch.exp(self.weight_normalization)))
# Crop normalization factor for numerical stability.
normalization_factor = torch.max(normalization_factor,
torch.tensor([1e-20]).cuda())
output = output / normalization_factor
# Bias term added after normalization.
if self.bias is not None:
output += self.bias.view(1, -1, 1, 1)
return output if output_mask is None else (output, output_mask)
| StarcoderdataPython |
4940297 | <reponame>OpenGeoscience/data_queues<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from __future__ import absolute_import
import logging
import os
import datetime
import requests
from django.conf import settings
import shutil
from dataqs.processor_base import GeoDataMosaicProcessor
from dataqs.helpers import gdal_translate, style_exists
logger = logging.getLogger("dataqs.processors")
script_dir = os.path.dirname(os.path.realpath(__file__))
GS_DATA_DIR = getattr(settings, 'GS_DATA_DIR', '/data/geodata')
class ForecastIOAirTempProcessor(GeoDataMosaicProcessor):
"""
Class for processing the latest 'QuickSilver' global air temperature
geotiff from forecast.io (http://forecast.io/quicksilver/)
"""
prefix = "forecast_io_airtemp"
base_url = "http://maps.forecast.io/temperature/"
layer_name = "forecast_io_airtemp"
description = """Project Quicksilver is an experimental new data product
that attempts to create the world's highest resolution real-time map of global
(near-surface) air temperature.\n\n
It is generated using the same source data models that power Forecast.io,
combined with a sophisticated microclimate model that adjusts the temperatures
based on the effects of elevation, terrain, proximity to water, foliage cover,
and other factors.\n\nSource: http://blog.forecast.io/project-quicksilver/"""
def parse_name(self, img_date):
imgstrtime = img_date.strftime("%Y-%m-%d %H:00")
layer_title = "Global (near-surface) Air Temperature - {} UTC".format(
imgstrtime)
return layer_title
def convert(self, dl_file, imgtime):
"""
Set the correct projection on the image and save as a GeoTIFF.
"""
tif_file = "{prefix}_{year}{month}{day}T{hour}0000000Z.tif".format(
prefix=self.prefix,
year=str(imgtime.year),
month='{0:02d}'.format(imgtime.month),
day='{0:02d}'.format(imgtime.day), hour='{0:02d}'.format(
imgtime.hour))
gdal_translate(os.path.join(self.tmp_dir, dl_file),
os.path.join(self.tmp_dir, tif_file),
projection='EPSG:4326',
options=['COMPRESS=DEFLATE'])
return tif_file
def run(self, now=None):
"""
Retrieve and process the latest global air temperature image
from forecast.io
"""
if not now:
now = datetime.datetime.utcnow()
raw_name = "{prefix}_{hour}.tif".format(
prefix=self.prefix,
hour='{0:02d}'.format(now.hour))
try:
raw_file = self.download(
"{url}{year}/{month}/{day}/{hour}.tif".format(
url=self.base_url,
year=str(now.year),
month='{0:02d}'.format(now.month),
day='{0:02d}'.format(now.day),
hour='{0:02d}'.format(now.hour)), filename=raw_name)
except requests.HTTPError:
# Try the previous hour:
now = now - datetime.timedelta(hours=1)
raw_file = self.download(
"{url}{year}/{month}/{day}/{hour}.tif".format(
url=self.base_url,
year=str(now.year),
month='{0:02d}'.format(now.month),
day='{0:02d}'.format(now.day),
hour='{0:02d}'.format(now.hour)), filename=raw_name)
tif_file = self.convert(raw_file, now)
dst_file = self.data_dir.format(gsd=GS_DATA_DIR, ws=self.workspace,
layer=self.layer_name, file=tif_file)
dst_dir = os.path.dirname(dst_file)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
if dst_file.endswith('.tif'):
shutil.move(os.path.join(self.tmp_dir, tif_file), dst_file)
self.post_geoserver(dst_file, self.layer_name)
if not style_exists(self.layer_name):
with open(os.path.join(script_dir,
'resources/forecastio.sld')) as sld:
self.set_default_style(self.layer_name,
self.layer_name,
sld.read())
self.drop_old_hourly_images(now, self.layer_name)
self.drop_old_daily_images(now, self.layer_name)
self.update_geonode(
self.layer_name, title=self.parse_name(now),
description=self.description,
store=self.layer_name,
bounds=('-180.0', '180.0',
'-90.0', '90.0', 'EPSG:4326'),
extra_keywords=['category:Climatology Meteorology'])
self.truncate_gs_cache(self.layer_name)
self.cleanup()
if __name__ == '__main__':
processor = ForecastIOAirTempProcessor()
processor.run()
| StarcoderdataPython |
6598249 | <filename>chargebee/compat.py<gh_stars>10-100
import sys
try:
import simplejson as json
except ImportError:
import json
py_major_v = sys.version_info[0]
py_minor_v = sys.version_info[1]
if py_major_v < 3:
from urllib import urlencode
from urlparse import urlparse
elif py_major_v >= 3:
from urllib.parse import urlencode, urlparse
| StarcoderdataPython |
4824730 | <reponame>JBlaschke/lcls2<gh_stars>0
"""
Smalldata (v2)
Parallel data analysis with MPI send/recv
Analysis consists of two different process types:
1. clients
> these perform per-event analysis
> are associted with one specific server
> after processing `batch_size` events, send a
dict of data over to their server
2. servers (srv)
> recv a batch of events from one of many clients
> add these batches to a `cache`
> when the cache is full, write to disk
> each server produces its OWN hdf5 file
>> at the end of execution, rank 0 "joins" all the
individual hdf5 files together using HDF virtual
datasets -- this provides a "virtual", unified
view of all processed data
CLIENT SRV
[ --------- ] | [ --------- ]
[ -{event}- ] send | [ --------- ]
batch [ --------- ] ~~~> | [ --------- ]
[ --------- ] | [ --------- ]
[ --------- ] | [ --------- ]
|
| [ --------- ]
| [ --------- ]
| [ --------- ]
| [ --------- ]
| [ --------- ]
| -- cache
(I apologize for indulging in some ASCII art)
Some Notes:
* number of servers to use is set by PS_SRV_NODES
environment variable
* if running in psana parallel mode, clients ARE
BD nodes (they are the same processes)
* eventual time-stamp sorting would be doable with
code conceptually similar to this (but would need
to be optimized for performance):
import numpy as np
import h5py
f = h5py.File('smalldata_test.h5')
ts = f['timestamp'][:]
tsneg = f['tsneg']
for i in np.argsort(ts):
print(tsneg[i])
"""
import os
import numpy as np
import h5py
from collections.abc import MutableMapping
# -----------------------------------------------------------------------------
from psana.psexp.tools import mode
if mode == 'mpi':
from mpi4py import MPI
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
else:
SIZE = 1
if SIZE > 1:
MODE = 'PARALLEL'
else:
MODE = 'SERIAL'
# -----------------------------------------------------------------------------
MISSING_INT = -99999
MISSING_FLOAT = np.nan
INT_TYPES = [int, np.int8, np.int16, np.int32, np.int64,
np.int, np.uint8, np.uint16, np.uint32, np.uint64, np.uint]
FLOAT_TYPES = [float, np.float16, np.float32, np.float64, np.float128, np.float]
RAGGED_PREFIX = 'ragged_'
UNALIGED_PREFIX = 'unaligned_'
def is_unaligned(dset_name):
return dset_name.split('/')[-1].startswith(UNALIGED_PREFIX)
# -----------------------------------------------------------------------------
def _flatten_dictionary(d, parent_key='', sep='/'):
"""
http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(_flatten_dictionary(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def _get_missing_value(dtype):
if type(dtype) is not np.dtype:
dtype = np.dtype(dtype)
if dtype in INT_TYPES:
missing_value = MISSING_INT
elif dtype in FLOAT_TYPES:
missing_value = MISSING_FLOAT
else:
raise ValueError('%s :: Invalid num type for missing data' % str(dtype))
return missing_value
def _format_srv_filename(dirname, basename, rank):
srv_basename = '%s_part%d.h5' % (basename.strip('.h5'), rank)
srv_fn = os.path.join(dirname, srv_basename)
return srv_fn
# FOR NEXT TIME
# CONSIDER MAKING A FileServer CLASS
# CLASS BASECLASS METHOD THEN HANDLES HDF5
class CacheArray:
"""
The CacheArray class provides for a *data cache* in
the server's memory.
"""
def __init__(self, singleton_shape, dtype, cache_size):
self.singleton_shape = singleton_shape
self.dtype = dtype
self.cache_size = cache_size
# initialize
self.data = np.empty((self.cache_size,) + self.singleton_shape,
dtype=self.dtype)
self.reset()
return
def append(self, data):
self.data[self.n_events,...] = data
self.n_events += 1
return
def reset(self):
self.n_events = 0
return
class Server: # (hdf5 handling)
def __init__(self, filename=None, smdcomm=None, cache_size=10000,
callbacks=[]):
self.filename = filename
self.smdcomm = smdcomm
self.cache_size = cache_size
self.callbacks = callbacks
# maps dataset_name --> (dtype, shape)
self._dsets = {}
# maps dataset_name --> CacheArray()
self._cache = {}
self.num_events_seen = 0
if (self.filename is not None):
self.file_handle = h5py.File(self.filename, 'w')
return
def recv_loop(self):
num_clients_done = 0
num_clients = self.smdcomm.Get_size() - 1
while num_clients_done < num_clients:
msg = self.smdcomm.recv(source=MPI.ANY_SOURCE)
if type(msg) is list:
self.handle(msg)
elif msg == 'done':
num_clients_done += 1
return
def handle(self, batch):
for event_data_dict in batch:
for cb in self.callbacks:
cb(event_data_dict)
if self.filename is not None:
# to_backfill: list of keys we have seen previously
# we want to be sure to backfill if we
# dont see them
to_backfill = list(self._dsets.keys())
for dataset_name, data in event_data_dict.items():
if dataset_name not in self._dsets.keys():
self.new_dset(dataset_name, data)
else:
to_backfill.remove(dataset_name)
self.append_to_cache(dataset_name, data)
for dataset_name in to_backfill:
if not is_unaligned(dataset_name):
self.backfill(dataset_name, 1)
self.num_events_seen += 1
return
def new_dset(self, dataset_name, data):
if type(data) == int:
shape = ()
maxshape = (None,)
dtype = 'i8'
elif type(data) == float:
shape = ()
maxshape = (None,)
dtype = 'f8'
elif hasattr(data, 'dtype'):
shape = data.shape
maxshape = (None,) + data.shape
dtype = data.dtype
else:
raise TypeError('Type: %s not compatible' % type(data))
self._dsets[dataset_name] = (dtype, shape)
dset = self.file_handle.create_dataset(dataset_name,
(0,) + shape, # (0,) -> expand dim
maxshape=maxshape,
dtype=dtype,
chunks=(self.cache_size,) + shape)
if not is_unaligned(dataset_name):
self.backfill(dataset_name, self.num_events_seen)
return
def append_to_cache(self, dataset_name, data):
if dataset_name not in self._cache.keys():
dtype, shape = self._dsets[dataset_name]
cache = CacheArray(shape, dtype, self.cache_size)
self._cache[dataset_name] = cache
else:
cache = self._cache[dataset_name]
cache.append(data)
if cache.n_events == self.cache_size:
self.write_to_file(dataset_name, cache)
return
def write_to_file(self, dataset_name, cache):
dset = self.file_handle.get(dataset_name)
new_size = (dset.shape[0] + cache.n_events,) + dset.shape[1:]
dset.resize(new_size)
# remember: data beyond n_events in the cache may be OLD
dset[-cache.n_events:,...] = cache.data[:cache.n_events,...]
cache.reset()
return
def backfill(self, dataset_name, num_to_backfill):
dtype, shape = self._dsets[dataset_name]
missing_value = _get_missing_value(dtype)
fill_data = np.empty(shape, dtype=dtype)
fill_data.fill(missing_value)
for i in range(num_to_backfill):
self.append_to_cache(dataset_name, fill_data)
return
def done(self):
if (self.filename is not None):
# flush the data caches (in case did not hit cache_size yet)
for dset, cache in self._cache.items():
if cache.n_events > 0:
self.write_to_file(dset, cache)
self.file_handle.close()
return
class SmallData: # (client)
def __init__(self, server_group=None, client_group=None,
filename=None, batch_size=10000, cache_size=None,
callbacks=[]):
"""
Parameters
----------
server_group : MPI.Group
The MPI group to allocate to server processes
client_group : MPI.Group
The MPI group to allocate to client processes
filename : str
The file path of the (new) HDF5 file to write data to,
will be overwritten if it exits -- if "None", data
will not be written to disk.
batch_size : int
Number of events before send/recv
cache_size : int
Number of events before write
callbacks : list of functions
Functions that get called on each server's data before
being written to disk. The functions should take as
arguments a dictionary, where the keys are the data field
names and the values are the data themselves. Each event
processed will have it's own dictionary of this form
containing the data saved for that event.
"""
self.batch_size = batch_size
self._batch = []
self._previous_timestamp = -1
if cache_size is None:
cache_size = batch_size
if cache_size < batch_size:
print('Warning: `cache_size` smaller than `batch_size`')
print('setting cache_size -->', batch_size)
cache_size = batch_size
self._full_filename = filename
if (filename is not None):
self._basename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self._first_open = True # filename has not been opened yet
if MODE == 'PARALLEL':
self._server_group = server_group
self._client_group = client_group
# hide intermediate files -- join later via VDS
if filename is not None:
self._srv_filename = _format_srv_filename(self._dirname,
self._basename,
self._server_group.Get_rank())
else:
self._srv_filename = None
self._comm_partition()
if self._type == 'server':
self._server = Server(filename=self._srv_filename,
smdcomm=self._srvcomm,
cache_size=cache_size,
callbacks=callbacks)
self._server.recv_loop()
elif MODE == 'SERIAL':
self._srv_filename = self._full_filename # dont hide file
self._type = 'serial'
self._server = Server(filename=self._srv_filename,
cache_size=cache_size,
callbacks=callbacks)
return
def _comm_partition(self):
self._smalldata_group = MPI.Group.Union(self._server_group, self._client_group)
self._smalldata_comm = COMM.Create(self._smalldata_group)
self._client_comm = COMM.Create(self._client_group)
# partition into comms
n_srv = self._server_group.size
if n_srv < 1:
raise Exception('Attempting to run smalldata with no servers'
' set env var PS_SRV_NODES to be 1 or more')
if self._server_group.rank != MPI.UNDEFINED: # if in server group
self._type = 'server'
self._srv_color = self._server_group.rank
self._srvcomm = self._smalldata_comm.Split(self._srv_color, 0) # rank=0
if self._srvcomm.Get_size() == 1:
print('WARNING: server has no associated clients!')
print('This core is therefore idle... set PS_SRV_NODES')
print('to be smaller, or increase the number of mpi cores')
elif self._client_group.rank != MPI.UNDEFINED: # if in client group
self._type = 'client'
self._srv_color = self._client_group.rank % n_srv
self._srvcomm = self._smalldata_comm.Split(self._srv_color,
RANK+1) # keep rank order
else:
# we are some other node type
self._type = 'other'
return
def _get_full_file_handle(self):
"""
makes sure we overwrite on first open, but not after that
"""
if MODE == 'PARALLEL':
if self._first_open == True and self._full_filename is not None:
fh = h5py.File(self._full_filename, 'w', libver='latest')
self._first_open = False
else:
fh = h5py.File(self._full_filename, 'r+', libver='latest')
elif MODE == 'SERIAL':
fh = self._server.file_handle
return fh
def event(self, event, *args, **kwargs):
"""
event: int, psana.event.Event
"""
if type(event) is int:
timestamp = event
elif hasattr(event, 'timestamp'):
timestamp = int(event.timestamp)
else:
raise ValueError('`event` must have a timestamp attribute')
# collect all new data to add
event_data_dict = {}
event_data_dict.update(kwargs)
for d in args:
event_data_dict.update( _flatten_dictionary(d) )
# check to see if the timestamp indicates a new event...
# >> multiple calls to self.event(...), same event as before
if timestamp == self._previous_timestamp:
self._batch[-1].update(event_data_dict)
# >> we have a new event
elif timestamp > self._previous_timestamp:
# if we have a "batch_size", ship events
# (this avoids splitting events if we have multiple
# calls to self.event)
if len(self._batch) >= self.batch_size:
if MODE == 'SERIAL':
self._server.handle(self._batch)
elif MODE == 'PARALLEL':
self._srvcomm.send(self._batch, dest=0)
self._batch = []
event_data_dict['timestamp'] = timestamp
self._previous_timestamp = timestamp
self._batch.append(event_data_dict)
else:
# FIXME: cpo
print('event data is "old", event timestamps'
' must increase monotonically'
' previous timestamp: %d, current: %d'
'' % (self._previous_timestamp, timestamp))
"""
raise IndexError('event data is "old", event timestamps'
' must increase monotonically'
' previous timestamp: %d, current: %d'
'' % (self._previous_timestamp, timestamp))
"""
return
@property
def summary(self):
"""
This "flag" is required when you save summary data OR
do a "reduction" operation (e.g. sum) across MPI procs
>> if SmallData.summary:
>> whole = SmallData.sum(part)
>> SmallData.save_summary(mysum=whole)
"""
r = False
if MODE == 'PARALLEL':
if self._type == 'client':
r = True
elif MODE == 'SERIAL':
r = True
else:
raise RuntimeError()
return r
def sum(self, value):
return self._reduction(value, MPI.SUM)
def _reduction(self, value, op):
"""
perform a reduction across the worker MPI procs
"""
# because only client nodes may have certain summary
# variables, we collect the summary data on client
# rank 0 -- later, we need to remember this client
# is the one who needs to WRITE the summary data to disk!
if MODE == 'PARALLEL':
red_val = None
if self._type == 'client':
red_val = self._client_comm.reduce(value, op)
elif MODE == 'SERIAL':
red_val = value # just pass it through...
return red_val
def save_summary(self, *args, **kwargs):
"""
Save 'summary data', ie any data that is not per-event (typically
slower, e.g. at the end of the job).
Interface is identical to SmallData.event()
Note: this function should be called in a SmallData.summary: block
"""
if self._full_filename is None:
print('Warning: smalldata not saving summary since no h5 filename specified')
return
# in parallel mode, only client rank 0 writes to file
if MODE == 'PARALLEL':
if self._client_comm.Get_rank() != 0:
return
# >> collect summary data
data_dict = {}
data_dict.update(kwargs)
for d in args:
data_dict.update( _flatten_dictionary(d) )
# >> write to file
fh = self._get_full_file_handle()
for dataset_name, data in data_dict.items():
if data is None:
print('Warning: dataset "%s" was passed value: None'
'... ignoring that dataset' % dataset_name)
else:
fh[dataset_name] = data
# we don't want to close the file in serial mode
# this file is the server's main (only) file
if MODE == 'PARALLEL':
fh.close()
return
def done(self):
"""
Finish any final communication and join partial files
(in parallel mode).
"""
# >> finish communication
if self._type == 'client':
# we want to send the finish signal to the server
if len(self._batch) > 0:
self._srvcomm.send(self._batch, dest=0)
self._srvcomm.send('done', dest=0)
elif self._type == 'server':
self._server.done()
elif self._type == 'serial':
self._server.handle(self._batch)
self._server.done()
# stuff only one process should do in parallel mode
if MODE == 'PARALLEL':
if self._type != 'other': # other = not smalldata (Mona)
self._smalldata_comm.barrier()
# CLIENT rank 0 does all final file writing
# this is because this client may write to the file
# during "save_summary(...)" calls, and we want
# ONE file owner
if self._type == 'client' and self._full_filename is not None:
if self._client_comm.Get_rank() == 0:
self.join_files()
return
def join_files(self):
"""
"""
joined_file = self._get_full_file_handle()
# locate the srv (partial) files we expect
files = []
for i in range(self._server_group.Get_size()):
srv_fn = _format_srv_filename(self._dirname,
self._basename,
i)
if os.path.exists(srv_fn):
files.append(srv_fn)
else:
print('!!! WARNING: expected partial (srv) file:')
print(srv_fn)
print('NOT FOUND. Trying to proceed with remaining data...')
print('This almost certainly means something went wrong.')
print('Joining: %d files --> %s' % (len(files), self._basename))
# discover all the dataset names
file_dsets = {}
def assign_dset_info(name, obj):
# TODO check if name contains unaligned, if so ignore
if isinstance(obj, h5py.Dataset):
tmp_dsets[obj.name] = (obj.dtype, obj.shape)
all_dsets = []
for fn in files:
tmp_dsets = {}
f = h5py.File(fn, 'r')
f.visititems(assign_dset_info)
file_dsets[fn] = tmp_dsets
all_dsets += list(tmp_dsets.keys())
f.close()
all_dsets = set(all_dsets)
# h5py requires you declare the size of the VDS at creation
# (we have been told by <NAME> that this is not
# necessary for the C++ version).
# so: we must first loop over each file to find # events
# that come from each file
# then: do a second loop to join the data together
for dset_name in all_dsets:
# part (1) : loop over all files and get the total number
# of events for this dataset
total_events = 0
for fn in files:
dsets = file_dsets[fn]
if dset_name in dsets.keys():
dtype, shape = dsets[dset_name]
total_events += shape[0]
# this happens if a dataset is completely missing in a file.
# to maintain alignment, we need to extend the length by the
# appropriate number and it will be filled in with the
# "fillvalue" argument below. if it's unaligned, then
# we don't need to extend it at all.
elif not is_unaligned(dset_name):
if '/timestamp' in dsets:
total_events += dsets['/timestamp'][1][0]
combined_shape = (total_events,) + shape[1:]
layout = h5py.VirtualLayout(shape=combined_shape,
dtype=dtype)
# part (2): now that the number of events is known for this
# dataset, fill in the "soft link" that points from the
# master file to all the smaller files.
index_of_last_fill = 0
for fn in files:
dsets = file_dsets[fn]
if dset_name in dsets.keys():
_, shape = dsets[dset_name]
vsource = h5py.VirtualSource(fn, dset_name, shape=shape)
layout[index_of_last_fill:index_of_last_fill+shape[0], ...] = vsource
index_of_last_fill += shape[0]
else:
# only need to pad aligned data with "fillvalue" argument below
if is_unaligned(dset_name):
pass
else:
if '/timestamp' in dsets:
n_timestamps = dsets['/timestamp'][1][0]
index_of_last_fill += n_timestamps
joined_file.create_virtual_dataset(dset_name,
layout,
fillvalue=_get_missing_value(dtype))
joined_file.close()
return
| StarcoderdataPython |
11302269 | import cv2
import numpy as np
#Read the Rust Photograph
img = cv2.imread(r'C:\Users\HP\imageprocessing\resources\rust-1-e1488346306943-1288x724.jpg', 1)
#Set different boundaries for different shades of rust
boundaries1 = [ ([58, 57, 101], [76, 95, 162]) ]
boundaries2 = [ ([26, 61, 111], [81, 144, 202]) ]
boundaries3 = [ ([44, 102, 167], [115, 169, 210]) ]
#Highlight out the shades of rust
for (lower1, upper1) in boundaries1:
lower1 = np.array(lower1, dtype = "uint8")
upper1 = np.array(upper1, dtype = "uint8")
mask = cv2.inRange(img, lower1, upper1)
output1 = cv2.bitwise_and(img, img, mask = mask)
for (lower2, upper2) in boundaries2:
lower2 = np.array(lower2, dtype = "uint8")
upper2 = np.array(upper2, dtype = "uint8")
mask = cv2.inRange(img, lower2, upper2)
output2 = cv2.bitwise_and(img, img, mask = mask)
for (lower3, upper3) in boundaries3:
lower3 = np.array(lower3, dtype = "uint8")
upper3 = np.array(upper3, dtype = "uint8")
mask = cv2.inRange(img, lower3, upper3)
output3 = cv2.bitwise_and(img, img, mask = mask)
#Combine the 3 different masks with the different shades into 1 image file
final = cv2.bitwise_or(output1, output2, output3)
cv2.imshow("Rusted patterns", final)
cv2.waitKey(0) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.