max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
utils/parser.py
scalar42/scholar-alerts-assistant
0
13000
<filename>utils/parser.py<gh_stars>0 from html.parser import HTMLParser class Paper(): def __init__(self): self.title = "" self.source_link = "" self.authr_and_pub = "" # self.publication = "" self.abstract = "" self.star_link = "" def add_title(self, title): self.title = title return self.check_complete() def add_source_link(self, source_link): self.source_link = source_link return self.check_complete() def add_authr_and_pub(self, authr_and_pub): self.authr_and_pub = authr_and_pub return self.check_complete() # def add_publication(self, publication): # self.publication = publication # return self.check_complete() def add_abstract(self, abstract): self.abstract += abstract return self.check_complete() def add_star_link(self, star_link): self.star_link = star_link return self.check_complete() def check_complete(self): if self.title == "" or self.source_link == "" or self.authr_and_pub == "" or self.abstract == "" or self.star_link == "": return False return True def __str__(self): return self.title + "\n" + self.source_link + "\n" + self.authr_and_pub + "\n" + self.abstract + "\n" + self.star_link def __eq__(self, other): return self.title == other.title def __hash__(self): return hash(self.title) class Parser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.is_title = False self.is_authr_and_pub = False self.is_abstract = False self.is_table = False self.papers = [] self.current_paper = Paper() def move_to_next_paper(self): self.papers.append(self.current_paper) self.current_paper = Paper() self.is_title = False self.is_authr_and_pub = False self.is_abstract = False self.is_table = False def handle_starttag(self, tag, attrs): if tag == "h3": self.is_title = True elif tag == "a" and self.is_title: for attr in attrs: if attr[0].lower() == 'href': self.current_paper.add_source_link(attr[1]) break elif tag == "a" and self.is_table: for attr in attrs: if attr[0].lower() == 'href': self.current_paper.add_star_link(attr[1]) self.is_table = False self.move_to_next_paper() break def handle_data(self, data): if self.is_title: self.current_paper.add_title(data) elif self.is_authr_and_pub: self.current_paper.add_authr_and_pub(data) elif self.is_abstract: self.current_paper.add_abstract(data) def handle_endtag(self, tag): if tag == "h3": self.is_title = False self.is_authr_and_pub = True elif tag == "div": if self.is_authr_and_pub: self.is_authr_and_pub = False self.is_abstract = True elif self.is_abstract: self.is_abstract = False self.is_table = True def get_papers(self): return self.papers
3.125
3
ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py
MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb
0
13001
"""Wrapper for pygame, which exports the PSP Python API on non-PSP systems.""" __author__ = "<NAME>, <<EMAIL>>" import pygame pygame.init() _vol_music = 255 _vol_sound = 255 def setMusicVolume(vol): global _vol_music if vol >= 0 and vol <= 255: _vol_music = vol pygame.mixer.music.set_volume(_vol_music / 255.0) def setSndFxVolume(vol): global _vol_sound if vol >= 0 and vol <= 255: _vol_sound = vol class Music: def __init__(self, filename, maxchan=128, loop=False): self._loop = loop pygame.mixer.music.load(filename) pygame.mixer.music.set_volume(_vol_music / 255.0) def start(self): if self._loop: pygame.mixer.music.play(-1) else: pygame.mixer.music.play() def stop(self): pygame.mixer.music.stop() class Sound: def __init__(self, filename): self._snd = pygame.mixer.Sound(filename) def start(self): self._snd.set_volume(_vol_sound / 255.0) self._snd.play()
2.859375
3
utility_parseCMUMovie.py
bipulkumar22/pyTextClassification
11
13002
<filename>utility_parseCMUMovie.py import os import csv import ast # used to generate folder-seperated corpus from CMUMovie dataset # just type python utility_parseCMUMovie.py in a terminal and the data will be downloaded and split to subfolders in the moviePlots/ path os.system("wget http://www.cs.cmu.edu/~ark/personas/data/MovieSummaries.tar.gz") os.system("tar -xvzf MovieSummaries.tar.gz") minRevenue = 20000000 movieMetadata = {} with open('MovieSummaries/movie.metadata.tsv', 'rb') as csvfile: reader = csv.reader(csvfile, delimiter='\t', quotechar='|') for row in reader: rev = 0 if len(row[4])>1: rev = int(row[4]) if (minRevenue < 0) or ( (minRevenue > 0) and (rev>minRevenue) ): movieMetadata[row[0]] = {} movieMetadata[row[0]]['title'] = row[2] movieMetadata[row[0]]['genres'] = ast.literal_eval(row[8]).values() print len(movieMetadata) with open("MovieSummaries/plot_summaries.txt") as f: content = f.readlines() for c in content: d = c.split("\t") id = d[0] plot = d[1] if id in movieMetadata: print id, movieMetadata[id]['title'] for g in movieMetadata[id]['genres']: if not os.path.exists("moviePlots" + os.sep + g.replace("/","-")): os.makedirs("moviePlots" + os.sep + g.replace("/","-")) f = open("moviePlots" + os.sep + g.replace("/","-") + os.sep + id + "_" + movieMetadata[id]["title"].replace("/","-"), 'w') f.write(plot) f.close()
2.9375
3
model.py
luqifeng/CVND---Image-Captioning-Project
0
13003
import torch import torch.nn as nn import torchvision.models as models import numpy as np class EncoderCNN(nn.Module): def __init__(self, embed_size): super(EncoderCNN, self).__init__() resnet = models.resnet50(pretrained=True) for param in resnet.parameters(): param.requires_grad_(False) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.embed = nn.Linear(resnet.fc.in_features, embed_size) def forward(self, images): features = self.resnet(images) features = features.view(features.size(0), -1) features = self.embed(features) return features class DecoderRNN(nn.Module): def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1): super(DecoderRNN, self).__init__() self.lstm = nn.LSTM(embed_size,hidden_size,num_layers,batch_first=True) self.embeddings = nn.Embedding(vocab_size, embed_size) self.linear = nn.Linear(hidden_size, vocab_size) def forward(self, features, captions): captions = self.embeddings(captions) embed = torch.cat((features.unsqueeze(1),captions),1) r_out = self.lstm(embed) output = self.linear(r_out[0])[:, :-1, :] return output def sample(self, inputs, states=None, max_len=20): #" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) " #pass output = [] for i in range(max_len): hiddens, states = self.lstm(inputs, states) mid = self.linear(hiddens.squeeze(1)) predicted = mid.max(1)[1] output.append(predicted.tolist()[0]) inputs = self.embeddings(predicted) inputs = inputs.unsqueeze(1) #print(output) #output = torch.cat(output, 1) return output
2.734375
3
App/items/models/items.py
fmgar/BlackMarker-API
0
13004
"""Items model. """ # Django from django.db import models # Utilities from App.utils.models import BlackMarketModel # Models from .category import Category from .unit import Unit from .owner import Owner class Item(BlackMarketModel): """Items model. Is a model to items we goin to sell """ name = models.CharField(max_length=100, unique=True, blank=False, null=False) category = models.ForeignKey(Category, blank=True, on_delete=models.SET_NULL, null=True) description = models.TextField(max_length=200, blank=True) type_item = models.CharField(max_length=15, blank=True) unit = models.ForeignKey(Unit, blank=True, on_delete=models.SET_NULL, null=True) price = models.DecimalField(max_digits=5, decimal_places=2, blank=False, null=False) owner = models.ForeignKey(Owner, blank=True, on_delete=models.SET_NULL, null=True) is_active = models.BooleanField(default=True) def __str__(self): return 'name:{}'.format(self.name)
2.671875
3
run_all.py
yuriisthebest/Advent-of-Code
0
13005
<filename>run_all.py import json import time from multiprocessing import Process from utils.paths import PATHS from years.AoC2021.tasks import TASKS2021 # Constants PARALLEL_COMPUTATION = True TASKS = { 2021: TASKS2021 } def asses_task(task: type, answers: dict, year: int) -> None: """ Run a task 4 times (part 1 test, part 1 task, part 2 test, part 2 task) Test if the answers of each run correspond to the correct answers :param task: Task object able to run a task :param answers: The correct answers of the given task :param year: The year where this task was asked """ t = task() pred = t.run_all() true = answers[task.__name__] assert pred[0][0] == true[0] or true[0] == 0, \ f"({year}, {task.__name__}) Part 1 has failed on the test data. Expected: {true[0]}, got: {pred[0][0]}" assert pred[0][1] == true[1] or true[1] == 0, \ f"({year}, {task.__name__}) Part 1 has failed on the real data. Expected: {true[1]}, got: {pred[0][1]}" assert pred[1][0] == true[2] or true[2] == 0, \ f"({year}, {task.__name__}) Part 2 has failed on the test data. Expected: {true[2]}, got: {pred[1][0]}" assert pred[1][1] == true[3] or true[3] == 0, \ f"({year}, {task.__name__}) Part 2 has failed on the real data. Expected: {true[3]}, got: {pred[1][1]}" if __name__ == "__main__": start = time.perf_counter() num_tests = 0 processes = [] for year_num in TASKS.keys(): # Find the answers of the current year with open(f"{PATHS[year_num]}\\answers.json") as f: year_answers = json.load(f) # Compute task results (unknown answers have a value of -1) for i, current_task in enumerate(TASKS[year_num]): num_tests += 1 if PARALLEL_COMPUTATION: p = Process(target=asses_task, args=[current_task, year_answers, year_num]) p.start() processes.append(p) else: asses_task(current_task, year_answers, year_num) # Wait for processes to stop and report success for process in processes: process.join() print(f"\n*** All {num_tests} tests completed successfully in {time.perf_counter() - start:.2f} sec***")
2.828125
3
python/edl/tests/unittests/master_client_test.py
WEARE0/edl
90
13006
<gh_stars>10-100 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import paddle_edl.utils.master_pb2 as master_pb2 import unittest from edl.utils.master_client import Client from edl.utils.utils import get_file_list, get_logger os.environ["https_proxy"] = "" os.environ["http_proxy"] = "" class TestMasterClient(unittest.TestCase): def setUp(self): self._client = Client("127.0.0.1:8080") def test_add_dataset(self): dataset = master_pb2.DataSet() dataset.name = "train" for t in get_file_list("./test_file_list.txt"): dataset.file_list.append(t[0]) res = self._client.add_dataset(dataset) assert res is None or res.type == "", "must not any error" res = self._client.add_dataset(dataset) assert res.type == "DuplicateInitDataSet", "must error" if __name__ == "__main__": logger = get_logger(10) unittest.main()
2.046875
2
src/commercetools/services/types.py
BramKaashoek/commercetools-python-sdk
0
13007
<reponame>BramKaashoek/commercetools-python-sdk import typing from commercetools import schemas, types from commercetools.services import abstract from commercetools.typing import OptionalListStr __all__ = ["TypeService"] class TypeDeleteSchema(abstract.AbstractDeleteSchema): pass class TypeQuerySchema(abstract.AbstractQuerySchema): pass class TypeService(abstract.AbstractService): def get_by_id(self, id: str, expand: OptionalListStr = None) -> types.Type: query_params = {} if expand: query_params["expand"] = expand return self._client._get(f"types/{id}", query_params, schemas.TypeSchema) def get_by_key(self, key: str, expand: OptionalListStr = None) -> types.Type: query_params = {} if expand: query_params["expand"] = expand return self._client._get(f"types/key={key}", query_params, schemas.TypeSchema) def query( self, where: OptionalListStr = None, sort: OptionalListStr = None, expand: OptionalListStr = None, limit: int = None, offset: int = None, ) -> types.TypePagedQueryResponse: params = TypeQuerySchema().dump( { "where": where, "sort": sort, "expand": expand, "limit": limit, "offset": offset, } ) return self._client._get("types", params, schemas.TypePagedQueryResponseSchema) def create( self, draft: types.TypeDraft, expand: OptionalListStr = None ) -> types.Type: query_params = {} if expand: query_params["expand"] = expand return self._client._post( "types", query_params, draft, schemas.TypeDraftSchema, schemas.TypeSchema ) def update_by_id( self, id: str, version: int, actions: typing.List[types.TypeUpdateAction], expand: OptionalListStr = None, *, force_update: bool = False, ) -> types.Type: query_params = {} if expand: query_params["expand"] = expand update_action = types.TypeUpdate(version=version, actions=actions) return self._client._post( endpoint=f"types/{id}", params=query_params, data_object=update_action, request_schema_cls=schemas.TypeUpdateSchema, response_schema_cls=schemas.TypeSchema, force_update=force_update, ) def update_by_key( self, key: str, version: int, actions: typing.List[types.TypeUpdateAction], expand: OptionalListStr = None, *, force_update: bool = False, ) -> types.Type: query_params = {} if expand: query_params["expand"] = expand update_action = types.TypeUpdate(version=version, actions=actions) return self._client._post( endpoint=f"types/key={key}", params=query_params, data_object=update_action, request_schema_cls=schemas.TypeUpdateSchema, response_schema_cls=schemas.TypeSchema, force_update=force_update, ) def delete_by_id( self, id: str, version: int, expand: OptionalListStr = None, *, force_delete: bool = False, ) -> types.Type: params = {"version": version} if expand: params["expand"] = expand query_params = TypeDeleteSchema().dump(params) return self._client._delete( endpoint=f"types/{id}", params=query_params, response_schema_cls=schemas.TypeSchema, force_delete=force_delete, ) def delete_by_key( self, key: str, version: int, expand: OptionalListStr = None, *, force_delete: bool = False, ) -> types.Type: params = {"version": version} if expand: params["expand"] = expand query_params = TypeDeleteSchema().dump(params) return self._client._delete( endpoint=f"types/key={key}", params=query_params, response_schema_cls=schemas.TypeSchema, force_delete=force_delete, )
2
2
augraphy/augmentations/noisetexturize.py
RyonSayer/augraphy
36
13008
import random import cv2 import numpy as np from augraphy.base.augmentation import Augmentation class NoiseTexturize(Augmentation): """Creates a random noise based texture pattern to emulate paper textures. Consequently applies noise patterns to the original image from big to small. :param sigma_range: Defines bounds of noise fluctuations. :type sigma_range: tuple, optional :param turbulence_range: Defines how quickly big patterns will be replaced with the small ones. The lower value - the more iterations will be performed during texture generation. :type turbulence_range: tuple, optional :param p: The probability this Augmentation will be applied. :type p: float, optional """ def __init__( self, sigma_range=(3, 10), turbulence_range=(2, 5), p=1, ): """Constructor method""" super().__init__(p=p) self.sigma_range = sigma_range self.turbulence_range = turbulence_range # Constructs a string representation of this Augmentation. def __repr__(self): return f"NoiseTexturize(sigma_range={self.sigma_range}, turbulence_range={self.turbulence_range}, p={self.p})" # Applies the Augmentation to input data. def __call__(self, image, layer=None, force=False): if force or self.should_run(): image = image.copy() sigma = random.randint(self.sigma_range[0], self.sigma_range[1]) turbulence = random.randint( self.turbulence_range[0], self.turbulence_range[1], ) result = image.astype(float) rows, cols = image.shape[:2] if len(image.shape) > 2: channel = image.shape[2] else: channel = 0 ratio = cols while not ratio == 1: result += self.noise(cols, rows, channel, ratio, sigma=sigma) ratio = (ratio // turbulence) or 1 cut = np.clip(result, 0, 255) cut = cut.astype(np.uint8) return cut def noise(self, width, height, channel, ratio, sigma): """The function generates an image, filled with gaussian nose. If ratio parameter is specified, noise will be generated for a lesser image and then it will be upscaled to the original size. In that case noise will generate larger square patterns. To avoid multiple lines, the upscale uses interpolation. :param ratio: the size of generated noise "pixels" :param sigma: defines bounds of noise fluctuations """ mean = 0 # assert width % ratio == 0, "Can't scale image with of size {} and ratio {}".format(width, ratio) # assert height % ratio == 0, "Can't scale image with of size {} and ratio {}".format(height, ratio) h = int(height / ratio) w = int(width / ratio) if h == 0: h = 1 if w == 0: w = 1 gaussian = np.vectorize(lambda x: random.gauss(mean, sigma)) result = gaussian(np.array((w, h))) result = cv2.resize( result, dsize=(width, height), interpolation=cv2.INTER_LINEAR, ) # for multiple channels input, convert result to multiple channels if channel: result = np.stack([result, result, result], axis=2) return result
3.59375
4
plugins/modules/bigip_sslo_config_ssl.py
kevingstewart/f5_sslo_ansible
7
13009
<gh_stars>1-10 #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2021, kevin-dot-g-dot-stewart-at-gmail-dot-com # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Version: 1.0.1 #### Updates: #### 1.0.1 - added 9.0 support # - changed max version # - added clientssl "alpn" proxy support # - added clientssl logPublisher support # - added serverssl logPublisher support # - updated version and previousVersion keys to match target SSLO version from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r''' --- module: bigip_sslo_config_ssl short_description: Manage an SSL Orchestrator SSL configuration description: - Manage an SSL Orchestrator SSL configuration version_added: "1.0.0" options: name: description: - Specifies the name of the SSL configuration. Configuration auto-prepends "ssloT_" to service. Service name should be less than 14 characters and not contain dashes "-". type: str required: True clientSettings: description: - Specifies the client-side SSL settings suboptions: cipherType: description: - Defines the type of cipher used, either "string" (for cipher strings), or "group" (an existing cipher group). type: str choices: - string - group default: string cipher: description: - Defines the actual cipher string (ex. "DEFAULT"), or existing cipher group (ex. /Common/f5-default) to use. type: str default: DEFAULT enableTLS1_3: description: - Defines whether or not to enable client-side TLSv1.3 support. When enabled, the cipherType must be "group" and cipher must indicate an existing cipher group. type: bool default: False cert: description: - Defines the certificate applied in the client side settings. For a forward proxy this is the template certificate and (ex. /Common/default.crt). For a reverse proxy, this is the client-facing server certificate. type: str default: /Common/default.crt key: description: - Defines the private key applied in the client side settings. For a forward proxy this is the template key and (ex. /Common/default.key). For a reverse proxy, this is the client-facing server private key. type: str default: /Common/default.key chain: description: - Defines the certificate keychain in the client side settings. type: str default: None caCert: description: - Defines the CA certificate applied in the client side settings. This is the signing/forging CA certificate used for forward proxy TLS handling. This setting is not applicable in reverse proxy SSL. type: str default: None caKey: description: - Defines the CA private key applied in the client side settings. This is the signing/forging CA private key used for forward proxy TLS handling. This setting is not applicable in reverse proxy SSL. type: str default: None caChain: description: - Defines the CA certificate keychain in the client side settings. This would contain any CA subordinated in the trust chain between the signing CA and explicitly-trusted root certificate. If required, it should contain any intermediate CA certificates, up to but not including the self-signed root CA. type: str default: None alpn: description: - Requires 9.0+. Enables or disables ALPN HTTP/2 full proxy in an outbound (forward proxy) topology. type: bool default: False logPublisher: description: - Requires 9.0+. Defines a specific log publisher to use for client-side SSL-related events. type: str default: /Common/sys-ssl-publisher serverSettings: description: - Specifies the server-side SSL settings suboptions: cipherType: description: - Defines the type of cipher used, either "string" (for cipher strings), or "group" (an existing cipher group). type: str choices: - string - group default: string cipher: description: - Defines the actual cipher string (ex. "DEFAULT"), or existing cipher group (ex. /Common/f5-default) to use. type: str default: DEFAULT enableTLS1_3: description: - Defines whether or not to enable server-side TLSv1.3 support. When enabled, the cipherType must be "group" and cipher must indicate an existing cipher group. type: bool default: False caBundle: description: - Defines the certificate authority bundle used to validate remote server certificates. This setting is most applicable in the forward proxy use case to validate remote (Internat) server certificates. type: str default: /Common/ca-bundle.crt blockExpired: description: - Defines the action to take if an expired remote server certificate is encountered. For forward proxy the default is to ignore expired certificates (False). For reverse proxy the default is to drop expired certificates (True). type: bool default: False blockUntrusted: description: - Defines the action to take if an untrusted remote server certificate is encountered, based on the defined caBundle. For forward proxy the default is to ignore untrusted certificates (False). For reverse proxy the default is to drop untrusted certificates (True). type: bool default: False ocsp: description: - Defines an OCSP configuration to use to perform certificate revocation checking again remote server certificates. type: str default: None crl: description: - Defines a CRL configuration to use to perform certificate revocation checking again remote server certificates. type: str default: None logPublisher: description: - Requires 9.0+. Defines a specific log publisher to use for server-side SSL-related events. type: str default: /Common/sys-ssl-publisher bypassHandshakeFailure: description: - Defines the action to take if a server side TLS handshake failure is detected. A value of False will cause the connection to fail. A value of True will shutdown TLS decryption and allow the connection to proceed un-decrypted. type: bool default: False bypassClientCertFailure: description: - Defines the action to take if a server side TLS handshake client certificate request is detected. A value of False will cause the connection to fail. A value of True will shutdown TLS decryption and allow the connection to proceed un-decrypted. type: bool default: False mode: description: - Defines how this task is handled. With the default setting of 'update', the module performs the tasks required to update the target resource. With the 'output' setting, the resulting JSON object blocks are returned without updating the target resource. This option is useful for debugging, and when subordinate objects (ex. SSL, services, service chains, policy, resolver) are created in the same playbook, and their respectice output JSON referenced in a single Topology create task. type: str choices: - update - output default: update state: description: - Specifies the present/absent state required. type: str choices: - absent - present default: present extends_documentation_fragment: f5networks.f5_modules.f5 author: - <NAME> (kevin-dot-g-dot-stewart-at-gmail-dot-com) ''' EXAMPLES = r''' - name: Create SSLO SSL Forward Proxy Settings (simple) hosts: localhost gather_facts: False connection: local collections: - kevingstewart.f5_sslo_ansible vars: provider: server: 172.16.1.77 user: admin password: <PASSWORD> validate_certs: no server_port: 443 tasks: - name: SSLO SSL forward proxy settings bigip_sslo_config_ssl: provider: "{{ provider }}" name: "demo_ssl" clientSettings: caCert: "/Common/subrsa.f5labs.com" caKey: "/Common/subrsa.f5labs.com" delegate_to: localhost - name: Create SSLO SSL Forward Proxy Settings hosts: localhost gather_facts: False connection: local collections: - kevingstewart.f5_sslo_ansible vars: provider: server: 172.16.1.77 user: admin password: <PASSWORD> validate_certs: no server_port: 443 tasks: - name: SSLO SSL settings bigip_sslo_config_ssl: provider: "{{ provider }}" name: "demo_ssl" clientSettings: cipherType: "group" cipher: "/Common/f5-default" enableTLS1_3: True cert: "/Common/default.crt" key: "/Common/default.key" caCert: "/Common/subrsa.f5labs.com" caKey: "/Common/subrsa.f5labs.com" caChain: "/Common/my-ca-chain" alpn: True logPublisher: "/Common/my-ssl-publisher" serverSettings: cipherType: "group" cipher: "/Common/f5-default" enableTLS1_3: True caBundle: "/Common/local-ca-bundle.crt" blockExpired: False blockUntrusted: False ocsp: "/Common/my-ocsp" crl: "/Common/my-crl" logPublisher: "/Common/my-ssl-publisher" bypassHandshakeFailure: True bypassClientCertFailure: True delegate_to: localhost - name: Create SSLO SSL Reverse Proxy Settings (simple) hosts: localhost gather_facts: False connection: local collections: - kevingstewart.f5_sslo_ansible vars: provider: server: 172.16.1.77 user: admin password: <PASSWORD> validate_certs: no server_port: 443 tasks: - name: SSLO SSL settings bigip_sslo_config_ssl: provider: "{{ provider }}" name: "demo_ssl" clientSettings: cert: "/Common/myserver.f5labs.com" key: "/Common/myserver.f5labs.com" delegate_to: localhost - name: Create SSLO SSL Reverse Proxy Settings hosts: localhost gather_facts: False connection: local collections: - kevingstewart.f5_sslo_ansible vars: provider: server: 172.16.1.77 user: admin password: <PASSWORD> validate_certs: no server_port: 443 tasks: - name: SSLO SSL settings bigip_sslo_config_ssl: provider: "{{ provider }}" name: "demo5" clientSettings: cipherType: "group" cipher: "/Common/f5-default" enableTLS1_3: True cert: "/Common/myserver.f5labs.com" key: "/Common/myserver.f5labs.com" chain: "/Common/my-ca-chain" serverSettings: cipherType: "group" cipher: "/Common/f5-default" enableTLS1_3: True caBundle: "/Common/local-ca-bundle.crt" blockExpired: False blockUntrusted: False delegate_to: localhost ''' RETURN = r''' name: description: - Changed name of SSL configuration. type: str sample: demo_ssl clientSettings: description: client-side SSL settings type: complex contains: cipherType: description: defines "string" for cipher string, or "group" for cipher group type: str sample: string cipher: description: defines the cipher string or an existing cipher group type: str sample: DEFAULT or /Common/f5-default enableTLS1_3: description: enables or disables client-side TLSv1.3 type: bool sample: True cert: description: defines the client-facing certificate. For forward proxy this is the template certificate. For reverse proxy this is the server certificate. type: str sample: /Common/default.crt key: description: defines the client-facing private key. For forward proxy this is the template key. For reverse proxy this is the server private key. type: str sample: /Common/default.key chain: description: defines the client-facing CA certificate chain. For reverse proxy this is the server certificate's CA chain. type: str sample: /Common/local-ca-chain.crt caCert: description: defines the issuing CA certificate for a forward proxy. type: str sample: /Common/default.crt caKey: description: defines the issuing CA private key for a forward proxy. type: str sample: /Common/default.key caChain: description: defines the CA certificate chain for the issuing CA in a forward proxy. type: str sample: /Common/local-ca-chain.crt alpn: description: requires 9.0+. Enables or disables ALPN HTTP/2 full proxy through a forward proxy topology. type: bool sample: True logPublisher: description: requires 9.0+. Defines a specific log publisher for client-side SSL-related events. type: str sample: /Common/sys-ssl-publisher serverSettings: description: network settings for for-service configuration type: complex contains: cipherType: description: defines "string" for cipher string, or "group" for cipher group type: str sample: string cipher: description: defines the cipher string or an existing cipher group type: str sample: DEFAULT or /Common/f5-default enableTLS1_3: description: enables or disables server-side TLSv1.3 type: bool sample: True caBundle: description: defines a CA bundle used to valdate remote server certificates. type: str sample: /Common/ca-bundle.crt blockExpired: description: defines the action to take on receiving an expired remote server certificate, True = block, False = ignore. type: bool sample: True blockUntrusted: description: defines the action to take on receiving an untrusted remote server certificate, True = block, False = ignore. type: bool sample: True ocsp: description: defines aan existing OCSP configuration to validate revocation of remote server certificates. type: str sample: /Common/my-ocsp crl: description: defines aan existing CRL configuration to validate revocation of remote server certificates. type: str sample: /Common/my-crl logPublisher: description: requires 9.0+. Defines a specific log publisher for server-side SSL-related events. type: str sample: /Common/sys-ssl-publisher bypassHandshakeFailure: description: - Defines the action to take on receiving a TLS handshake alert from a server. True = bypass decryption and allow through, False = block type: bool sample: True bypassClientCertFailure: description: - Defines the action to take on receiving a TLS handshake client certificate request from a server. True = bypass decryption and allow through, False = block type: bool sample: True mode: description: describes the action to take on the task. type: str sample: update state: description: - Changed state. type: str sample: present ''' from datetime import datetime from ansible.module_utils.basic import ( AnsibleModule, env_fallback ) from ansible_collections.f5networks.f5_modules.plugins.module_utils.bigip import ( F5RestClient ) from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import ( F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec ) from ansible_collections.f5networks.f5_modules.plugins.module_utils.icontrol import ( tmos_version ) from ipaddress import ( ip_network, ip_interface ) import json, time, re global print_output global json_template global obj_attempts global min_version global max_version print_output = [] ## define object creation attempts count (with 1 seconds pause between each attempt) obj_attempts = 20 ## define minimum supported tmos version - min(SSLO 5.x) min_version = 5.0 ## define maximum supported tmos version - max(SSLO 8.x) max_version = 9.0 json_template = { "name":"f5-ssl-orchestrator-gc", "inputProperties":[ { "id":"f5-ssl-orchestrator-operation-context", "type":"JSON", "value":{ "operationType":"CREATE", "deploymentType":"SSL_SETTINGS", "deploymentName":"TEMPLATE_NAME", "deploymentReference":"", "partition":"Common", "strictness":False } }, { "id":"f5-ssl-orchestrator-tls", "type":"JSON", "value":{ "sslSettingsReference":"", "sslSettingsName":"", "description":"", "previousVersion":"7.2", "version":"7.2", "generalSettings":{ "isForwardProxy":True, "bypassHandshakeAlert":False, "bypassClientCertFailure":False }, "clientSettings":{ "ciphers":{ "isCipherString":True, "cipherString":"DEFAULT", "cipherGroup":"/Common/f5-default" }, "certKeyChain":[ { "cert":"/Common/default.crt", "key":"/Common/default.key", "chain":"", "passphrase":"", "name":"CERT_KEY_CHAIN_0" } ], "caCertKeyChain":[], "forwardByPass":True, "enabledSSLProcessingOptions":[] }, "serverSettings":{ "ciphers":{ "isCipherString":True, "cipherString":"DEFAULT", "cipherGroup":"/Common/f5-default" }, "caBundle":"/Common/ca-bundle.crt", "expiredCertificates":False, "untrustedCertificates":False, "ocsp":"", "crl":"", "enabledSSLProcessingOptions":[] }, "name":"TEMPLATE_NAME", "advancedMode":"off", "strictness":False, "partition":"Common" } }, { "id":"f5-ssl-orchestrator-topology", "type":"JSON" } ], "configurationProcessorReference":{ "link":"https://localhost/mgmt/shared/iapp/processors/f5-iappslx-ssl-orchestrator-gc" }, "configProcessorTimeoutSeconds": 120, "statsProcessorTimeoutSeconds": 60, "configProcessorAffinity": { "processorPolicy": "LOCAL", "affinityProcessorReference": { "link": "https://localhost/mgmt/shared/iapp/affinity/local" } }, "state":"BINDING", "presentationHtmlReference":{ "link":"https://localhost/iapps/f5-iappslx-ssl-orchestrator/sgc/sgcIndex.html" }, "operation":"CREATE" } json_ca_cert_template = { "cert":"/Common/default.crt", "key":"/Common/defaut.key", "chain":"", "isCa":True, "usage":"CA", "port":"0", "passphrase":"", "certKeyChainMismatch":False, "isDuplicateVal":False, "name":"CA_CERT_KEY_CHAIN_0" } json_enable_tls13 = { "name":"TLSv1.3", "value":"TLSv1.3" } class Parameters(AnsibleF5Parameters): api_map = {} updatables = [] api_attributes = [] returnables = [] class ApiParameters(Parameters): pass class ModuleParameters(Parameters): global print_output @property def name(self): name = self._values['name'] name = "ssloT_" + name return name @property def client_cipher_type(self): try: client_cipher_type = self._values['clientSettings']['cipherType'] if client_cipher_type is None: return "string" return client_cipher_type except: return "string" @property def client_cipher(self): try: client_cipher = self._values['clientSettings']['cipher'] if client_cipher is None: return "DEFAULT" return client_cipher except: return "DEFAULT" @property def client_enable_tls13(self): try: client_enable_tls13 = self._values['clientSettings']['enableTLS1_3'] if client_enable_tls13 is None: return False return client_enable_tls13 except: return False @property def client_cert(self): try: client_cert = self._values['clientSettings']['cert'] if client_cert is None: return "/Common/default.crt" return client_cert except: return "/Common/default.crt" @property def client_key(self): try: client_key = self._values['clientSettings']['key'] if client_key is None: return "/Common/default.key" return client_key except: return "/Common/default.key" @property def client_chain(self): try: client_chain = self._values['clientSettings']['chain'] if client_chain is None: return None return client_chain except: return None @property def client_ca_cert(self): try: client_ca_cert = self._values['clientSettings']['caCert'] if client_ca_cert is None: return None return client_ca_cert except: return None @property def client_ca_key(self): try: client_ca_key = self._values['clientSettings']['caKey'] if client_ca_key is None: return None return client_ca_key except: return None @property def client_ca_chain(self): try: client_ca_chain = self._values['clientSettings']['caChain'] if client_ca_chain is None: return None return client_ca_chain except: return None @property def server_cipher_type(self): try: server_cipher_type = self._values['serverSettings']['cipherType'] if server_cipher_type is None: return "string" return server_cipher_type except: return "string" @property def server_cipher(self): try: server_cipher = self._values['serverSettings']['cipher'] if server_cipher is None: return "DEFAULT" return server_cipher except: return "DEFAULT" @property def server_enable_tls13(self): try: server_enable_tls13 = self._values['serverSettings']['enableTLS1_3'] if server_enable_tls13 is None: return False return server_enable_tls13 except: return False @property def server_ca_bundle(self): try: server_ca_bundle = self._values['serverSettings']['caBundle'] if server_ca_bundle is None: return "/Common/ca-bundle.crt" return server_ca_bundle except: return "/Common/ca-bundle.crt" @property def server_block_expired(self): try: server_block_expired = self._values['serverSettings']['blockExpired'] if server_block_expired is None: return None return server_block_expired except: return None @property def server_block_untrusted(self): try: server_block_untrusted = self._values['serverSettings']['blockUntrusted'] if server_block_untrusted is None: return None return server_block_untrusted except: return None @property def server_ocsp(self): try: server_ocsp = self._values['serverSettings']['ocsp'] if server_ocsp is None: return None return server_ocsp except: return None @property def server_crl(self): try: server_crl = self._values['serverSettings']['crl'] if server_crl is None: return None return server_crl except: return None @property def bypass_handshake_failure(self): bypass_handshake_failure = self._values['bypassHandshakeFailure'] if bypass_handshake_failure is None: return False return bypass_handshake_failure @property def bypass_clientcert_failure(self): bypass_clientcert_failure = self._values['bypassClientCertFailure'] if bypass_clientcert_failure is None: return False return bypass_clientcert_failure @property def mode(self): mode = self._values['mode'] return mode @property def client_alpn(self): try: client_alpn = self._values['clientSettings']['alpn'] if client_alpn is None: return False return client_alpn except: return False @property def client_log_publisher(self): try: client_log_publisher = self._values['clientSettings']['logPublisher'] if client_log_publisher is None: return "/Common/sys-ssl-publisher" return client_log_publisher except: return "/Common/sys-ssl-publisher" @property def server_log_publisher(self): try: server_log_publisher = self._values['clientSettings']['logPublisher'] if server_log_publisher is None: return "/Common/sys-ssl-publisher" return server_log_publisher except: return "/Common/sys-ssl-publisher" class ModuleManager(object): global print_output global json_template global obj_attempts global min_version global max_version def __init__(self, *args, **kwargs): self.module = kwargs.pop('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) def getSsloVersion(self): ## use this method to get the SSLO version (first two digits (x.y)) uri = "https://{0}:{1}/mgmt/shared/iapp/installed-packages".format( self.client.provider['server'], self.client.provider['server_port'] ) try: resp = self.client.api.get(uri).json() for x in resp["items"]: if x["appName"] == "f5-iappslx-ssl-orchestrator": tmpversion = x["release"].split(".") version = tmpversion[0] + "." + tmpversion[1] return float(version) break except: raise F5ModuleError("SSL Orchestrator package does not appear to be installed. Aborting.") def deleteOperation(self, id): ## use this method to delete an operation that failed uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/{2}".format( self.client.provider['server'], self.client.provider['server_port'], id ) resp = self.client.api.delete(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: return True else: return False def update_json(self, operation): ## use this to method to create and return a modified copy of the JSON template self.config = json_template ## get base name self.local_name = re.sub('ssloT_', '', self.want.name) ## perform some input validation ## if TLS1.3 is enabled, the isCipherString value must be "false" if self.want.client_enable_tls13 == True and self.want.client_cipher_type == "string": raise F5ModuleError("Enabling client-side TLS 1.3 also requires a cipher group") if self.want.server_enable_tls13 == True and self.want.server_cipher_type == "string": raise F5ModuleError("Enabling server-side TLS 1.3 also requires a cipher group") ## ================================= ## 1.0.1 general update: modify version and previousVersion values to match target BIG-IP version ## ================================= self.config["inputProperties"][0]["value"]["version"] = self.ssloVersion self.config["inputProperties"][1]["value"]["version"] = self.ssloVersion self.config["inputProperties"][1]["value"]["previousVersion"] = self.ssloVersion ## general json settings for all operations self.config["inputProperties"][0]["value"]["deploymentName"] = self.want.name self.config["inputProperties"][0]["value"]["operationType"] = operation self.config["inputProperties"][1]["value"]["name"] = self.want.name self.config["inputProperties"][1]["value"]["generalSettings"]["bypassHandshakeAlert"] = self.want.bypass_handshake_failure self.config["inputProperties"][1]["value"]["generalSettings"]["bypassClientCertFailure"] = self.want.bypass_clientcert_failure if self.want.client_enable_tls13 == False: self.config["inputProperties"][1]["value"]["clientSettings"]["enabledSSLProcessingOptions"].append(json_enable_tls13) if self.want.server_enable_tls13 == False: self.config["inputProperties"][1]["value"]["serverSettings"]["enabledSSLProcessingOptions"].append(json_enable_tls13) ## generic client settings self.config["inputProperties"][1]["value"]["clientSettings"]["certKeyChain"][0]["cert"] = self.want.client_cert self.config["inputProperties"][1]["value"]["clientSettings"]["certKeyChain"][0]["key"] = self.want.client_key if self.want.client_chain != None: self.config["inputProperties"][1]["value"]["clientSettings"]["certKeyChain"][0]["chain"] = self.want.client_chain if self.want.client_cipher_type == "string": self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["isCipherString"] = True self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["cipherString"] = self.want.client_cipher elif self.want.client_cipher_type == "group": self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["isCipherString"] = False self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["cipherGroup"] = self.want.client_cipher ## generic server settings self.config["inputProperties"][1]["value"]["serverSettings"]["caBundle"] = self.want.server_ca_bundle if self.want.server_cipher_type == "string": self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["isCipherString"] = True self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["cipherString"] = self.want.server_cipher elif self.want.server_cipher_type == "group": self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["isCipherString"] = False self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["cipherGroup"] = self.want.server_cipher if self.want.server_ocsp != None: self.config["inputProperties"][1]["value"]["serverSettings"]["ocsp"] = self.want.server_ocsp if self.want.server_crl != None: self.config["inputProperties"][1]["value"]["serverSettings"]["crl"] = self.want.server_crl ## Test if this is a forward or reverse proxy config, based on presence of client_ca_cert value if self.want.client_ca_cert != None: ## assume this is a forward proxy self.config["inputProperties"][1]["value"]["generalSettings"]["isForwardProxy"] = True self.proxyType = "forward" self.ca_cert_config = json_ca_cert_template self.ca_cert_config["cert"] = self.want.client_ca_cert self.ca_cert_config["key"] = self.want.client_ca_key if self.want.client_ca_chain != None: self.ca_cert_config["chain"] = self.want.client_ca_chain self.config["inputProperties"][1]["value"]["clientSettings"]["caCertKeyChain"].append(self.ca_cert_config) ## client settings self.config["inputProperties"][1]["value"]["clientSettings"]["forwardByPass"] = True ## server settings - set defaults if none specified if self.want.server_block_untrusted == None: ## for forward proxy default to False unless specified self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = True else: self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = self.want.server_block_untrusted if self.want.server_block_expired == None: ## for forward proxy default to False unless specified self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = True else: self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = self.want.server_block_expired else: ## assume this is a reverse proxy self.config["inputProperties"][1]["value"]["generalSettings"]["isForwardProxy"] = False self.proxyType = "reverse" ## client settings self.config["inputProperties"][1]["value"]["clientSettings"]["forwardByPass"] = False ## server settings - set defaults if none specified if self.want.server_block_untrusted == None: ## for forward proxy default to False unless specified self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = False else: self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = self.want.server_block_untrusted if self.want.server_block_expired == None: ## for forward proxy default to False unless specified self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = False else: self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = self.want.server_block_expired ## ================================================ ## updates: 9.0 ## alpn - only available in 9.0+ and forward proxy if self.ssloVersion >= 9.0 and self.proxyType == "forward": self.config["inputProperties"][1]["value"]["clientSettings"]["alpn"] = self.want.client_alpn ## logPublisher - only available in 9.0+ if self.ssloVersion >= 9.0: self.config["inputProperties"][1]["value"]["clientSettings"]["logPublisher"] = self.want.client_log_publisher self.config["inputProperties"][1]["value"]["serverSettings"]["logPublisher"] = self.want.server_log_publisher ## ================================================ ## create operation if operation == "CREATE": #### TO DO: update JSON code for CREATE operation self.config["name"] = "sslo_obj_SSL_SETTINGS_CREATE_" + self.want.name ## modify/delete operations elif operation in ["DELETE", "MODIFY"]: self.config["name"] = "sslo_obj_SSL_SETTINGS_MODIFY_" + self.want.name ## get object ID and add to deploymentReference and existingBlockId values uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) query = "?$filter=name+eq+'{0}'&$select=id".format(self.want.name) resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]: raise F5ModuleError(resp.content) try: id = response["items"][0]['id'] self.config["inputProperties"][0]["value"]["deploymentReference"] = "https://localhost/mgmt/shared/iapp/blocks/" + id self.config["inputProperties"][1]["value"]["existingBlockId"] = id except: raise F5ModuleError("Failure to create/modify - unable to fetch object ID") if operation in ["MODIFY"]: pass #### TO DO: update JSON code for MODIFY operation return self.config def exec_module(self): start = datetime.now().isoformat() self.ssloVersion = self.getSsloVersion() changed = False result = dict() state = self.want.state ## test for correct TMOS version if self.ssloVersion < min_version or self.ssloVersion > max_version: raise F5ModuleError("Unsupported SSL Orchestrator version, requires a version between min(" + str(min_version) + ") and max(" + str(max_version) + ")") ## enable/disable testdev to output to JSON only for testing (1) or push config to server (0) testdev = 0 if testdev: self.exists() jsonstr = self.update_json("CREATE") print_output.append("jsonstr = " + str(jsonstr)) else: if state == 'present': changed = self.update() elif state == 'absent': changed = self.absent() result.update(dict(changed=changed)) print_output.append('changed=' + str(changed)) return result def update(self): if self.module.check_mode: return True ## use this method to create the objects (if not exists) or modify (if exists) if self.exists(): ## MODIFY: object exists - perform modify - get modified json first jsonstr = self.update_json("MODIFY") if self.want.mode == "output": print_output.append(jsonstr) else: ## post the object modify json uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=jsonstr) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]: raise F5ModuleError(resp.content) ## get operation id from last request and loop through check self.operationId = str(response["id"]) attempts = 1 error = "" while attempts <= obj_attempts: uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) query = "?$filter=id+eq+'{0}'".format(self.operationId) resp = self.client.api.get(uri + query).json() try: if resp["items"][0]["state"] == "BOUND": return True break elif resp["items"][0]["state"] == "ERROR": error = str(resp["items"][0]["error"]) break except: time.sleep(1) attempts += 1 if error != "": ## delete attempted configuration and raise error self.deleteOperation(self.operationId) raise F5ModuleError("Creation error: " + error) else: raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout") else: ## CREATE: object doesn't exist - perform create - get modified json first jsonstr = self.update_json("CREATE") if self.want.mode == "output": print_output.append(jsonstr) else: ## post the object create json uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=jsonstr) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]: raise F5ModuleError(resp.content) ## get operation id from last request and loop through check self.operationId = str(response["id"]) attempts = 1 error = "" while attempts <= obj_attempts: uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) query = "?$filter=id+eq+'{0}'".format(self.operationId) resp = self.client.api.get(uri + query).json() try: if resp["items"][0]["state"] == "BOUND": return True break elif resp["items"][0]["state"] == "ERROR": error = str(resp["items"][0]["error"]) break except: time.sleep(1) attempts += 1 if error != "": ## delete attempted configuration and raise error self.deleteOperation(self.operationId) raise F5ModuleError("Creation error: " + self.operationId + ":" + error) else: raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout") def absent(self): ## use this method to delete the objects (if exists) if self.exists(): if self.module.check_mode: return True ## DELETE: object doesn't exist - perform create - get modified json first jsonstr = self.update_json("DELETE") if self.want.mode == "output": print_output.append(jsonstr) else: ## post the object create json uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=jsonstr) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]: raise F5ModuleError(resp.content) ## get operation id from last request and loop through check self.operationId = str(response["id"]) attempts = 1 error = "" while attempts <= obj_attempts: uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) query = "?$filter=id+eq+'{0}'".format(self.operationId) resp = self.client.api.get(uri + query).json() try: if resp["items"][0]["state"] == "BOUND": return True break elif resp["items"][0]["state"] == "ERROR": error = str(resp["items"][0]["error"]) break except: time.sleep(1) attempts += 1 if error != "": ## delete attempted configuration and raise error self.deleteOperation(self.operationId) raise F5ModuleError("Creation error: " + self.operationId + ":" + error) else: raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout") else: ## object doesn't exit - just exit (changed = False) return False def exists(self): ## use this method to see if the objects already exists - queries for the respective application service object uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format( self.client.provider['server'], self.client.provider['server_port'] ) query = "?$filter=name+eq+'{0}'".format(self.want.name) resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: foundit = 0 for i in range(0, len(response["items"])): try: if str(response["items"][i]["name"]) == self.want.name: foundit = 1 self.existing_config = response["items"][i] break except: pass if foundit == 1: return True else: return False else: return False class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), clientSettings=dict( required=True, type='dict', options=dict( cipherType=dict( choices=['string','group'], default='string' ), cipher=dict(default=None), enableTLS1_3=dict(type='bool', default=False), cert=dict(default='/Common/default.crt'), key=dict(default='/Common/default.key'), chain=dict(default=None), caCert=dict(default=None), caKey=dict(default=None), caChain=dict(), alpn=dict(type='bool', default=False), logPublisher=dict(default='/Common/sys-ssl-publisher') ) ), serverSettings=dict( type='dict', options=dict( cipherType=dict( choices=['string','group'], default='string' ), cipher=dict(default=None), enableTLS1_3=dict(type='bool', default=False), caBundle=dict(default='/Common/ca-bundle.crt'), blockExpired=dict(type='bool'), blockUntrusted=dict(type='bool'), ocsp=dict(default=None), crl=dict(default=None), logPublisher=dict(default='/Common/sys-ssl-publisher') ) ), bypassHandshakeFailure=dict(type='bool', default=False), bypassClientCertFailure=dict(type='bool', default=False), state=dict( default='present', choices=['absent','present'] ), mode=dict( choices=["update","output"], default="update" ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): ## start here ## define global print_output global print_output print_output = [] ## define argumentspec spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) ## send to exec_module, result contains output of tasks try: mm = ModuleManager(module=module) results = mm.exec_module() result = dict( print_output = print_output, **results ) module.exit_json(**result) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
1.390625
1
nemo/pipelines.py
simonsobs/nemo
2
13010
<gh_stars>1-10 """ This module defines pipelines - sets of tasks in nemo that we sometimes want to do on different inputs (e.g., real data or simulated data). """ import os import sys import glob import shutil import time import astropy.io.fits as pyfits import astropy.table as atpy from astLib import astWCS import numpy as np from scipy import ndimage, interpolate import copy from pixell import enmap import nemo from . import startUp from . import filters from . import photometry from . import catalogs from . import maps from . import signals from . import completeness from . import MockSurvey import nemoCython #------------------------------------------------------------------------------------------------------------ def filterMapsAndMakeCatalogs(config, rootOutDir = None, copyFilters = False, measureFluxes = True, invertMap = False, verbose = True, useCachedMaps = True): """Runs the map filtering and catalog construction steps according to the given configuration. Args: config (:obj: 'startup.NemoConfig'): Nemo configuration object. rootOutDir (str): If None, use the default given by config. Otherwise, use this to override where the output filtered maps and catalogs are written. copyFilters (bool, optional): If True, and rootOutDir is given (not None), then filters will be copied from the default output location (from a pre-existing nemo run) to the appropriate directory under rootOutDir. This is used by, e.g., contamination tests based on sky sims, where the same kernels as used on the real data are applied to simulated maps. If rootOutDir = None, setting copyKernels = True has no effect. measureFluxes (bool, optional): If True, measure fluxes. If False, just extract S/N values for detected objects. invertMap (bool, optional): If True, multiply all maps by -1; needed by :meth:maps.estimateContaminationFromInvertedMaps). Returns: Optimal catalog (keeps the highest S/N detection when filtering at multiple scales). Note: See bin/nemo for how this pipeline is applied to real data, and maps.sourceInjectionTest for how this is applied to source-free sims that are generated on the fly. """ if config.parDict['twoPass'] == False: catalog=_filterMapsAndMakeCatalogs(config, rootOutDir = rootOutDir, copyFilters = copyFilters, measureFluxes = measureFluxes, invertMap = invertMap, verbose = verbose, useCachedMaps = useCachedMaps) else: # Two pass pipeline # On 1st pass, find sources (and maybe clusters) with canned settings, masking nothing. # On 2nd pass, the 1st pass catalog will be used to mask or subtract sources from maps used for # noise estimation only. # No point doing this if we're not using the map itself for the noise term in the filter for f in config.parDict['mapFilters']: for key in f.keys(): if key == 'noiseParams' and f['noiseParams']['method'] != 'dataMap': raise Exception("There is no point running if filter noise method != 'dataMap'.") # Pass 1 - find point sources, save nothing # NOTE: We need to do this for each map in the list, if we have a multi-frequency filter pass1PtSrcSettings={'label': "Beam", 'class': "BeamMatchedFilter", 'params': {'noiseParams': {'method': "model", 'noiseGridArcmin': 40.0, 'numNoiseBins': 2}, 'saveFilteredMaps': False, 'outputUnits': 'uK', 'edgeTrimArcmin': 0.0}} config.parDict['mapFilters']=[pass1PtSrcSettings] config.parDict['photFilter']=None config.parDict['maskPointSourcesFromCatalog']=[] # This is only applied on the 2nd pass config.parDict['measureShapes']=True # Double-lobed extended source at f090 causes havoc in one tile orig_unfilteredMapsDictList=list(config.unfilteredMapsDictList) config.parDict['forcedPhotometryCatalog']=None # If in this mode, only wanted on 2nd pass pass1CatalogsList=[] surveyMasksList=[] # ok, these should all be the same, otherwise we have problems... for mapDict in orig_unfilteredMapsDictList: # We use whole tile area (i.e., don't trim overlaps) so that we get everything if under MPI # Otherwise, powerful sources in overlap regions mess things up under MPI # Serial mode doesn't have this issue as it can see the whole catalog over all tiles # But since we now use full area, we may double subtract ovelap sources when in serial mode # So the removeDuplicates call fixes that, and doesn't impact anything else here surveyMasksList.append(mapDict['surveyMask']) mapDict['surveyMask']=None config.unfilteredMapsDictList=[mapDict] catalog=_filterMapsAndMakeCatalogs(config, verbose = False, writeAreaMasks = False) if len(catalog) > 0 : catalog, numDuplicatesFound, names=catalogs.removeDuplicates(catalog) pass1CatalogsList.append(catalog) # Pass 2 - subtract point sources in the maps used for noise term in filter only # To avoid ringing in the pass 2, we siphon off the super bright things found in pass 1 # We subtract those from the maps used in pass 2 - we then need to add them back at the end config.restoreConfig() config.parDict['measureShapes']=True # We'll keep this for pass 2 as well siphonSNR=50 for mapDict, catalog, surveyMask in zip(orig_unfilteredMapsDictList, pass1CatalogsList, surveyMasksList): #catalogs.catalog2DS9(catalog[catalog['SNR'] > siphonSNR], config.diagnosticsDir+os.path.sep+"pass1_highSNR_siphoned.reg") mapDict['noiseMaskCatalog']=catalog[catalog['SNR'] < siphonSNR] mapDict['subtractPointSourcesFromCatalog']=[catalog[catalog['SNR'] > siphonSNR]] mapDict['maskSubtractedPointSources']=True mapDict['surveyMask']=surveyMask config.unfilteredMapsDictList=orig_unfilteredMapsDictList catalog=_filterMapsAndMakeCatalogs(config, verbose = False) # Merge back in the bright sources that were subtracted in pass 1 # (but we don't do that in forced photometry mode) mergeList=[catalog] if config.parDict['forcedPhotometryCatalog'] is None: for pass1Catalog in pass1CatalogsList: mergeList.append(pass1Catalog[pass1Catalog['SNR'] > siphonSNR]) catalog=atpy.vstack(mergeList) return catalog #------------------------------------------------------------------------------------------------------------ def _filterMapsAndMakeCatalogs(config, rootOutDir = None, copyFilters = False, measureFluxes = True, invertMap = False, verbose = True, useCachedMaps = True, writeAreaMasks = True): """Runs the map filtering and catalog construction steps according to the given configuration. Args: config (:obj: 'startup.NemoConfig'): Nemo configuration object. rootOutDir (str): If None, use the default given by config. Otherwise, use this to override where the output filtered maps and catalogs are written. copyFilters (bool, optional): If True, and rootOutDir is given (not None), then filters will be copied from the default output location (from a pre-existing nemo run) to the appropriate directory under rootOutDir. This is used by, e.g., contamination tests based on sky sims, where the same kernels as used on the real data are applied to simulated maps. If rootOutDir = None, setting copyKernels = True has no effect. measureFluxes (bool, optional): If True, measure fluxes. If False, just extract S/N values for detected objects. invertMap (bool, optional): If True, multiply all maps by -1; needed by :meth:maps.estimateContaminationFromInvertedMaps). Returns: Optimal catalog (keeps the highest S/N detection when filtering at multiple scales). Note: See bin/nemo for how this pipeline is applied to real data, and maps.sourceInjectionTest for how this is applied to source-free sims that are generated on the fly. """ # If running on sims (source-free or with injected sources), this ensures we use the same kernels for # filtering the sim maps as was used on the real data, by copying kernels to the sims dir. The kernels # will then be loaded automatically when filterMaps is called. Yes, this is a bit clunky... if rootOutDir is not None: filteredMapsDir=rootOutDir+os.path.sep+"filteredMaps" diagnosticsDir=rootOutDir+os.path.sep+"diagnostics" dirList=[rootOutDir, filteredMapsDir, diagnosticsDir] for d in dirList: os.makedirs(d, exist_ok = True) if copyFilters == True: for tileName in config.tileNames: fileNames=glob.glob(config.diagnosticsDir+os.path.sep+tileName+os.path.sep+"filter*#%s*.fits" % (tileName)) if len(fileNames) == 0: raise Exception("Could not find pre-computed filters to copy - you need to add 'saveFilter: True' to the filter params in the config file (this is essential for doing source injection sims quickly).") kernelCopyDestDir=diagnosticsDir+os.path.sep+tileName os.makedirs(kernelCopyDestDir, exist_ok = True) for f in fileNames: dest=kernelCopyDestDir+os.path.sep+os.path.split(f)[-1] if os.path.exists(dest) == False: shutil.copyfile(f, dest) print("... copied filter %s to %s ..." % (f, dest)) else: rootOutDir=config.rootOutDir filteredMapsDir=config.filteredMapsDir diagnosticsDir=config.diagnosticsDir # We re-sort the filters list here - in case we have photFilter defined photFilter=config.parDict['photFilter'] filtersList=[] if photFilter is not None: for f in config.parDict['mapFilters']: if f['label'] == photFilter: filtersList.append(f) for f in config.parDict['mapFilters']: if photFilter is not None: if f['label'] == photFilter: continue filtersList.append(f) if photFilter is not None: assert(filtersList[0]['label'] == photFilter) photFilteredMapDict=None # Make filtered maps for each filter and tile catalogDict={} for tileName in config.tileNames: # Now have per-tile directories (friendlier for Lustre) tileFilteredMapsDir=filteredMapsDir+os.path.sep+tileName tileDiagnosticsDir=diagnosticsDir+os.path.sep+tileName for d in [tileFilteredMapsDir, tileDiagnosticsDir]: os.makedirs(d, exist_ok = True) if verbose == True: print(">>> Making filtered maps - tileName = %s ..." % (tileName)) # We could load the unfiltered map only once here? # We could also cache 'dataMap' noise as it will always be the same for f in filtersList: label=f['label']+"#"+tileName catalogDict[label]={} if 'saveDS9Regions' in f['params'] and f['params']['saveDS9Regions'] == True: DS9RegionsPath=config.filteredMapsDir+os.path.sep+tileName+os.path.sep+"%s_filteredMap.reg" % (label) else: DS9RegionsPath=None filteredMapDict=filters.filterMaps(config.unfilteredMapsDictList, f, tileName, filteredMapsDir = tileFilteredMapsDir, diagnosticsDir = tileDiagnosticsDir, selFnDir = config.selFnDir, verbose = True, undoPixelWindow = True, useCachedMaps = useCachedMaps) if f['label'] == photFilter: photFilteredMapDict={} photFilteredMapDict['SNMap']=filteredMapDict['SNMap'] photFilteredMapDict['data']=filteredMapDict['data'] # Forced photometry on user-supplied list of objects, or detect sources if 'forcedPhotometryCatalog' in config.parDict.keys() and config.parDict['forcedPhotometryCatalog'] is not None: catalog=photometry.makeForcedPhotometryCatalog(filteredMapDict, config.parDict['forcedPhotometryCatalog'], useInterpolator = config.parDict['useInterpolator'], DS9RegionsPath = DS9RegionsPath) else: # Normal mode catalog=photometry.findObjects(filteredMapDict, threshold = config.parDict['thresholdSigma'], minObjPix = config.parDict['minObjPix'], findCenterOfMass = config.parDict['findCenterOfMass'], removeRings = config.parDict['removeRings'], ringThresholdSigma = config.parDict['ringThresholdSigma'], rejectBorder = config.parDict['rejectBorder'], objIdent = config.parDict['objIdent'], longNames = config.parDict['longNames'], useInterpolator = config.parDict['useInterpolator'], measureShapes = config.parDict['measureShapes'], invertMap = invertMap, DS9RegionsPath = DS9RegionsPath) # We write area mask here, because it gets modified by findObjects if removing rings # NOTE: condition added to stop writing tile maps again when running nemoMass in forced photometry mode maskFileName=config.selFnDir+os.path.sep+"areaMask#%s.fits" % (tileName) surveyMask=np.array(filteredMapDict['surveyMask'], dtype = int) if writeAreaMasks == True: if os.path.exists(maskFileName) == False and os.path.exists(config.selFnDir+os.path.sep+"areaMask.fits") == False: maps.saveFITS(maskFileName, surveyMask, filteredMapDict['wcs'], compressed = True, compressionType = 'PLIO_1') if measureFluxes == True: photometry.measureFluxes(catalog, filteredMapDict, config.diagnosticsDir, photFilteredMapDict = photFilteredMapDict, useInterpolator = config.parDict['useInterpolator']) else: # Get S/N only - if the reference (fixed) filter scale has been given # This is (probably) only used by maps.estimateContaminationFromInvertedMaps if photFilter is not None: photometry.getSNRValues(catalog, photFilteredMapDict['SNMap'], filteredMapDict['wcs'], prefix = 'fixed_', useInterpolator = config.parDict['useInterpolator'], invertMap = invertMap) catalogDict[label]['catalog']=catalog # Merged/optimal catalogs optimalCatalog=catalogs.makeOptimalCatalog(catalogDict, constraintsList = config.parDict['catalogCuts']) return optimalCatalog #------------------------------------------------------------------------------------------------------------ def makeSelFnCollection(config, mockSurvey): """Makes a collection of selection function dictionaries (one per footprint specified in selFnFootprints in the config file, plus the full survey mask), that contain information on noise levels, area covered, and completeness. Returns a dictionary (keys: 'full' - corresponding to whole survey, plus other keys named by footprint). """ # Q varies across tiles Q=signals.QFit(config) # We only care about the filter used for fixed_ columns photFilterLabel=config.parDict['photFilter'] for filterDict in config.parDict['mapFilters']: if filterDict['label'] == photFilterLabel: break # We'll only calculate completeness for this given selection SNRCut=config.parDict['selFnOptions']['fixedSNRCut'] # Handle any missing options for calcCompleteness (these aren't used by the default fast method anyway) if 'numDraws' not in config.parDict['selFnOptions'].keys(): config.parDict['selFnOptions']['numDraws']=2000000 if 'numIterations' not in config.parDict['selFnOptions'].keys(): config.parDict['selFnOptions']['numIterations']=100 # We can calculate stats in different extra areas (e.g., inside optical survey footprints) footprintsList=[] if 'selFnFootprints' in config.parDict.keys(): footprintsList=footprintsList+config.parDict['selFnFootprints'] # Run the selection function calculation on each tile in turn selFnCollection={'full': []} for footprintDict in footprintsList: if footprintDict['label'] not in selFnCollection.keys(): selFnCollection[footprintDict['label']]=[] for tileName in config.tileNames: RMSTab=completeness.getRMSTab(tileName, photFilterLabel, config.selFnDir) compMz=completeness.calcCompleteness(RMSTab, SNRCut, tileName, mockSurvey, config.parDict['massOptions'], Q, numDraws = config.parDict['selFnOptions']['numDraws'], numIterations = config.parDict['selFnOptions']['numIterations'], method = config.parDict['selFnOptions']['method']) selFnDict={'tileName': tileName, 'RMSTab': RMSTab, 'tileAreaDeg2': RMSTab['areaDeg2'].sum(), 'compMz': compMz} selFnCollection['full'].append(selFnDict) # Generate footprint intersection masks (e.g., with HSC) and RMS tables, which are cached # May as well do this bit here (in parallel) and assemble output later for footprintDict in footprintsList: completeness.makeIntersectionMask(tileName, config.selFnDir, footprintDict['label'], masksList = footprintDict['maskList']) tileAreaDeg2=completeness.getTileTotalAreaDeg2(tileName, config.selFnDir, footprintLabel = footprintDict['label']) if tileAreaDeg2 > 0: RMSTab=completeness.getRMSTab(tileName, photFilterLabel, config.selFnDir, footprintLabel = footprintDict['label']) compMz=completeness.calcCompleteness(RMSTab, SNRCut, tileName, mockSurvey, config.parDict['massOptions'], Q, numDraws = config.parDict['selFnOptions']['numDraws'], numIterations = config.parDict['selFnOptions']['numIterations'], method = config.parDict['selFnOptions']['method']) selFnDict={'tileName': tileName, 'RMSTab': RMSTab, 'tileAreaDeg2': RMSTab['areaDeg2'].sum(), 'compMz': compMz} selFnCollection[footprintDict['label']].append(selFnDict) # Optional mass-limit maps if 'massLimitMaps' in list(config.parDict['selFnOptions'].keys()): for massLimitDict in config.parDict['selFnOptions']['massLimitMaps']: completeness.makeMassLimitMap(SNRCut, massLimitDict['z'], tileName, photFilterLabel, mockSurvey, config.parDict['massOptions'], Q, config.diagnosticsDir, config.selFnDir) return selFnCollection #------------------------------------------------------------------------------------------------------------ def makeMockClusterCatalog(config, numMocksToMake = 1, combineMocks = False, writeCatalogs = True, writeInfo = True, verbose = True): """Generate a mock cluster catalog using the given nemo config. Returns: List of catalogs (each is an astropy Table object) """ # Having changed nemoMock interface, we may need to make output dir if os.path.exists(config.mocksDir) == False: os.makedirs(config.mocksDir, exist_ok = True) # Noise sources in mocks if 'applyPoissonScatter' in config.parDict.keys(): applyPoissonScatter=config.parDict['applyPoissonScatter'] else: applyPoissonScatter=True if 'applyIntrinsicScatter' in config.parDict.keys(): applyIntrinsicScatter=config.parDict['applyIntrinsicScatter'] else: applyIntrinsicScatter=True if 'applyNoiseScatter' in config.parDict.keys(): applyNoiseScatter=config.parDict['applyNoiseScatter'] else: applyNoiseScatter=True if verbose: print(">>> Mock noise sources (Poisson, intrinsic, measurement noise) = (%s, %s, %s) ..." % (applyPoissonScatter, applyIntrinsicScatter, applyNoiseScatter)) # Q varies across tiles Q=signals.QFit(config) # We only care about the filter used for fixed_ columns photFilterLabel=config.parDict['photFilter'] for filterDict in config.parDict['mapFilters']: if filterDict['label'] == photFilterLabel: break # The same as was used for detecting objects thresholdSigma=config.parDict['thresholdSigma'] # We need an assumed scaling relation for mock observations scalingRelationDict=config.parDict['massOptions'] if verbose: print(">>> Setting up mock survey ...") # NOTE: Sanity check is possible here: area in RMSTab should equal area from areaMask.fits # If it isn't, there is a problem... # Also, we're skipping the individual tile-loading routines here for speed checkAreaConsistency=False wcsDict={} RMSMap=pyfits.open(config.selFnDir+os.path.sep+"RMSMap_%s.fits" % (photFilterLabel)) RMSTab=atpy.Table().read(config.selFnDir+os.path.sep+"RMSTab.fits") count=0 totalAreaDeg2=0 RMSMapDict={} areaDeg2Dict={} if checkAreaConsistency == True: areaMap=pyfits.open(config.selFnDir+os.path.sep+"areaMask.fits") t0=time.time() for tileName in config.tileNames: count=count+1 if tileName == 'PRIMARY': if tileName in RMSMap: extName=tileName data=RMSMap[extName].data else: data=None if data is None: for extName in RMSMap: data=RMSMap[extName].data if data is not None: break RMSMapDict[tileName]=RMSMap[extName].data wcsDict[tileName]=astWCS.WCS(RMSMap[extName].header, mode = 'pyfits') else: RMSMapDict[tileName]=RMSMap[tileName].data wcsDict[tileName]=astWCS.WCS(RMSMap[tileName].header, mode = 'pyfits') # Area from RMS table areaDeg2=RMSTab[RMSTab['tileName'] == tileName]['areaDeg2'].sum() areaDeg2Dict[tileName]=areaDeg2 totalAreaDeg2=totalAreaDeg2+areaDeg2 # Area from map (slower) if checkAreaConsistency == True: areaMask, wcsDict[tileName]=completeness.loadAreaMask(tileName, config.selFnDir) areaMask=areaMap[tileName].data map_areaDeg2=(areaMask*maps.getPixelAreaArcmin2Map(areaMask.shape, wcsDict[tileName])).sum()/(60**2) if abs(map_areaDeg2-areaDeg2) > 1e-4: raise Exception("Area from areaMask.fits doesn't agree with area from RMSTab.fits") RMSMap.close() if checkAreaConsistency == True: areaMap.close() t1=time.time() if verbose: print("... took %.3f sec ..." % (t1-t0)) # Useful for testing: if 'seed' in config.parDict.keys(): seed=config.parDict['seed'] else: seed=None if seed is not None: np.random.seed(seed) # We're now using one MockSurvey object for the whole survey massOptions=config.parDict['massOptions'] minMass=5e13 zMin=0.0 zMax=2.0 defCosmo={'H0': 70.0, 'Om0': 0.30, 'Ob0': 0.05, 'sigma8': 0.80, 'ns': 0.95, 'delta': 500, 'rhoType': 'critical'} for key in defCosmo: if key not in massOptions.keys(): massOptions[key]=defCosmo[key] H0=massOptions['H0'] Om0=massOptions['Om0'] Ob0=massOptions['Ob0'] sigma8=massOptions['sigma8'] ns=massOptions['ns'] delta=massOptions['delta'] rhoType=massOptions['rhoType'] mockSurvey=MockSurvey.MockSurvey(minMass, totalAreaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, delta = delta, rhoType = rhoType, enableDrawSample = True) print("... mock survey parameters:") for key in defCosmo.keys(): print(" %s = %s" % (key, str(massOptions[key]))) for key in ['tenToA0', 'B0', 'Mpivot', 'sigma_int']: print(" %s = %s" % (key, str(scalingRelationDict[key]))) print(" total area = %.1f square degrees" % (totalAreaDeg2)) print(" random seed = %s" % (str(seed))) if verbose: print(">>> Making mock catalogs ...") catList=[] for i in range(numMocksToMake): mockTabsList=[] t0=time.time() for tileName in config.tileNames: # It's possible (depending on tiling) that blank tiles were included - so skip # We may also have some tiles that are almost but not quite blank if RMSMapDict[tileName].sum() == 0 or areaDeg2Dict[tileName] < 0.5: continue mockTab=mockSurvey.drawSample(RMSMapDict[tileName], scalingRelationDict, Q, wcs = wcsDict[tileName], photFilterLabel = photFilterLabel, tileName = tileName, makeNames = True, SNRLimit = thresholdSigma, applySNRCut = True, areaDeg2 = areaDeg2Dict[tileName], applyPoissonScatter = applyPoissonScatter, applyIntrinsicScatter = applyIntrinsicScatter, applyNoiseScatter = applyNoiseScatter) if mockTab is not None: mockTabsList.append(mockTab) tab=atpy.vstack(mockTabsList) catList.append(tab) t1=time.time() if verbose: print("... making mock catalog %d took %.3f sec ..." % (i+1, t1-t0)) # Write catalog and .reg file if writeCatalogs == True: #colNames=['name', 'RADeg', 'decDeg', 'template', 'redshift', 'redshiftErr', 'true_M500', 'true_fixed_y_c', 'fixed_SNR', 'fixed_y_c', 'fixed_err_y_c'] #colFmts =['%s', '%.6f', '%.6f', '%s', '%.3f', '%.3f', '%.3f', '%.3f', '%.1f', '%.3f', '%.3f'] mockCatalogFileName=config.mocksDir+os.path.sep+"mockCatalog_%d.csv" % (i+1) catalogs.writeCatalog(tab, mockCatalogFileName) catalogs.writeCatalog(tab, mockCatalogFileName.replace(".csv", ".fits")) addInfo=[{'key': 'fixed_SNR', 'fmt': '%.1f'}] catalogs.catalog2DS9(tab, mockCatalogFileName.replace(".csv", ".reg"), constraintsList = [], addInfo = addInfo, color = "cyan") if combineMocks == True: tab=None for i in range(numMocksToMake): mockCatalogFileName=config.mocksDir+os.path.sep+"mockCatalog_%d.fits" % (i+1) stackTab=atpy.Table().read(mockCatalogFileName) if tab == None: tab=stackTab else: tab=atpy.vstack([tab, stackTab]) outFileName=config.mocksDir+os.path.sep+"mockCatalog_combined.fits" tab.meta['NEMOVER']=nemo.__version__ tab.write(outFileName, overwrite = True) # Write a small text file with the parameters used to generate the mocks into the mocks dir (easier than using headers) if writeInfo == True: mockKeys=['massOptions', 'makeMockCatalogs', 'applyPoissonScatter', 'applyIntrinsicScatter', 'applyNoiseScatter'] with open(config.mocksDir+os.path.sep+"mockParameters.txt", "w") as outFile: for m in mockKeys: if m in config.parDict.keys(): outFile.write("%s: %s\n" % (m, config.parDict[m])) return catList #------------------------------------------------------------------------------------------------------------ def extractSpec(config, tab, method = 'CAP', diskRadiusArcmin = 4.0, highPassFilter = False, estimateErrors = True, saveFilteredMaps = False): """Returns a table containing the spectral energy distribution, extracted using either compensated aperture photometry (CAP) at each object location in the input catalog, or using a matched filter. Maps at different frequencies will first be matched to the lowest resolution beam, using a Gaussian kernel. For the CAP method, at each object location, the temperature fluctuation is measured within a disk of radius diskRadiusArcmin, after subtracting the background measured in an annulus between diskRadiusArcmin < r < sqrt(2) * diskRadiusArcmin (i.e., this should be similar to the method described in Schaan et al. 2020). For the matched filter method, the catalog must contain a `template` column, as produced by the main `nemo` script, with template names in the format Arnaud_M2e14_z0p4 (for example). This will be used to set the signal scale used for each object. All definitions of filters in the config will be ignored, in favour of a filter using a simple CMB + white noise model. Identical filters will be used for all maps (i.e., the method of Saro et al. 2014). Args: config (:obj:`startup.NemoConfig`): Nemo configuration object. tab (:obj:`astropy.table.Table`): Catalog containing input object positions. Must contain columns 'name', 'RADeg', 'decDeg'. method (str, optional): diskRadiusArcmin (float, optional): If using CAP method: disk aperture radius in arcmin, within which the signal is measured. The background will be estimated in an annulus between diskRadiusArcmin < r < sqrt(2) * diskRadiusArcmin. highPassFilter (bool, optional): If using CAP method: if set, subtract the large scale background using maps.subtractBackground, with the smoothing scale set to 2 * sqrt(2) * diskRadiusArcmin. estimateErrors (bool, optional): If used CAP method: if set, estimate uncertainties by placing random apertures throughout the map. For now, this is done on a tile-by-tile basis, and doesn't take into account inhomogeneous noise within a tile. saveFilteredMaps (bool, optional): If using matchedFilter method: save the filtered maps under the `nemoSpecCache` directory (which is created in the current working directory, if it doesn't already exist). Returns: Catalog containing spectral energy distribution measurements for each object. For the CAP method, units of extracted signals are uK arcmin^2. For the matchedFilter method, extracted signals are deltaT CMB amplitude in uK. """ diagnosticsDir=config.diagnosticsDir # Choose lowest resolution as the reference beam - we match to that refBeam=None refFWHMArcmin=0 refIndex=0 beams=[] for i in range(len(config.unfilteredMapsDictList)): mapDict=config.unfilteredMapsDictList[i] beam=signals.BeamProfile(mapDict['beamFileName']) if beam.FWHMArcmin > refFWHMArcmin: refBeam=beam refFWHMArcmin=beam.FWHMArcmin refIndex=i beams.append(beam) # Sort the list of beams and maps so that the one with the reference beam is in index 0 config.unfilteredMapsDictList.insert(0, config.unfilteredMapsDictList.pop(refIndex)) beams.insert(0, beams.pop(refIndex)) # Figure out how much we need to Gaussian blur to match the reference beam # NOTE: This was an alternative to proper PSF-matching that wasn't good enough for ACT beams #for i in range(1, len(config.unfilteredMapsDictList)): #mapDict=config.unfilteredMapsDictList[i] #beam=beams[i] #degPerPix=np.mean(np.diff(beam.rDeg)) #assert(abs(np.diff(beam.rDeg).max()-degPerPix) < 0.001) #resMin=1e6 #smoothPix=0 #attFactor=1.0 #for j in range(1, 100): #smoothProf=ndimage.gaussian_filter1d(beam.profile1d, j) #smoothProf=smoothProf/smoothProf.max() #res=np.sum(np.power(refBeam.profile1d-smoothProf, 2)) #if res < resMin: #resMin=res #smoothPix=j #attFactor=1/smoothProf.max() #smoothScaleDeg=smoothPix*degPerPix #mapDict['smoothScaleDeg']=smoothScaleDeg #mapDict['smoothAttenuationFactor']=1/ndimage.gaussian_filter1d(beam.profile1d, smoothPix).max() # For testing on CMB maps here refMapDict=config.unfilteredMapsDictList[0] # PSF matching via a convolution kernel kernelDict={} # keys: tile, obsFreqGHz for tileName in config.tileNames: if tileName not in kernelDict.keys(): kernelDict[tileName]={} for i in range(1, len(config.unfilteredMapsDictList)): mapDict=config.unfilteredMapsDictList[i] beam=beams[i] degPerPix=np.mean(np.diff(beam.rDeg)) assert(abs(np.diff(beam.rDeg).max()-degPerPix) < 0.001) # Calculate convolution kernel sizePix=beam.profile1d.shape[0]*2 if sizePix % 2 == 0: sizePix=sizePix+1 symRDeg=np.linspace(-0.5, 0.5, sizePix) assert((symRDeg == 0).sum()) symProf=interpolate.splev(abs(symRDeg), beam.tck) symRefProf=interpolate.splev(abs(symRDeg), refBeam.tck) fSymRef=np.fft.fft(np.fft.fftshift(symRefProf)) fSymBeam=np.fft.fft(np.fft.fftshift(symProf)) fSymConv=fSymRef/fSymBeam fSymConv[fSymBeam < 1e-1]=0 # Was 1e-2; this value avoids ringing, smaller values do not symMatched=np.fft.ifft(fSymBeam*fSymConv).real symConv=np.fft.ifft(fSymConv).real # This allows normalization in same way as Gaussian smooth method symConv=symConv/symConv.sum() convedProf=ndimage.convolve(symProf, np.fft.fftshift(symConv)) attenuationFactor=1/convedProf.max() # norm # Make profile object peakIndex=np.argmax(np.fft.fftshift(symConv)) convKernel=signals.BeamProfile(profile1d = np.fft.fftshift(symConv)[peakIndex:], rDeg = symRDeg[peakIndex:]) ## Check plots #import pylab as plt #plt.figure(figsize=(10,8)) #plt.plot(abs(symRDeg*60), symRefProf, label = 'ref', lw = 3) #plt.plot(abs(symRDeg*60), convedProf*attenuationFactor, label = 'kernel convolved') #integralRatio=np.trapz(symRefProf)/np.trapz(convedProf*attenuationFactor) #plt.title("%.3f" % (integralRatio)) #plt.semilogy() #plt.legend() #ratio=(convedProf*attenuationFactor)/symRefProf #plt.figure(figsize=(10,8)) #plt.plot(abs(symRDeg*60), ratio, label = 'ratio') #plt.plot(abs(symRDeg*60), [1.0]*len(symRDeg), 'r-') #plt.legend() # Fudging 2d kernel to match (fix properly later) # NOTE: Now done at higher res but doesn't make much difference # (but DOES blow up in some tiles if you use e.g. have the resolution) wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits').copy() wcs.header['CDELT1']=np.diff(refBeam.rDeg)[0] wcs.header['CDELT2']=np.diff(refBeam.rDeg)[0] wcs.header['NAXIS1']=int(np.ceil(2*refBeam.rDeg.max()/wcs.header['CDELT1'])) wcs.header['NAXIS2']=int(np.ceil(2*refBeam.rDeg.max()/wcs.header['CDELT2'])) wcs.updateFromHeader() shape=(wcs.header['NAXIS2'], wcs.header['NAXIS1']) degreesMap=np.ones([shape[0], shape[1]], dtype = float)*1e6 RADeg, decDeg=wcs.pix2wcs(int(degreesMap.shape[1]/2), int(degreesMap.shape[0]/2)) degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg, 1.0) beamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, beam, amplitude = None) refBeamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, refBeam, amplitude = None) matchedBeamMap=maps.convolveMapWithBeam(beamMap*attenuationFactor, wcs, convKernel, maxDistDegrees = 1.0) # Find and apply radial fudge factor yRow=np.where(refBeamMap == refBeamMap.max())[0][0] rowValid=np.logical_and(degreesMap[yRow] < refBeam.rDeg.max(), matchedBeamMap[yRow] != 0) ratio=refBeamMap[yRow][rowValid]/matchedBeamMap[yRow][rowValid] zeroIndex=np.argmin(degreesMap[yRow][rowValid]) assert(degreesMap[yRow][rowValid][zeroIndex] == 0) tck=interpolate.splrep(degreesMap[yRow][rowValid][zeroIndex:], ratio[zeroIndex:]) fudge=interpolate.splev(convKernel.rDeg, tck) #fudge[fudge < 0.5]=1.0 #fudge[fudge > 1.5]=1.0 fudgeKernel=signals.BeamProfile(profile1d = convKernel.profile1d*fudge, rDeg = convKernel.rDeg) ## Check plot #import pylab as plt #plt.figure(figsize=(10,8)) #plt.plot(convKernel.rDeg, fudge, lw = 3, label = 'fudge') #plt.plot(convKernel.rDeg, [1.0]*len(fudge), 'r-') #plt.title("fudge") ##plt.ylim(0, 2) #plt.legend() #plt.show() # 2nd fudge factor - match integrals of 2d kernels fudgeMatchedBeamMap=maps.convolveMapWithBeam(beamMap*attenuationFactor, wcs, fudgeKernel, maxDistDegrees = 1.0) attenuationFactor=refBeamMap.sum()/fudgeMatchedBeamMap.sum() # Check at map pixelization that is actually used #shape=(config.tileCoordsDict[tileName]['header']['NAXIS2'], #config.tileCoordsDict[tileName]['header']['NAXIS1']) #wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits').copy() #degreesMap=np.ones([shape[0], shape[1]], dtype = float)*1e6 #RADeg, decDeg=wcs.pix2wcs(int(degreesMap.shape[1]/2), int(degreesMap.shape[0]/2)) #degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg, 1.0) #beamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, beam, amplitude = None) #refBeamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, refBeam, amplitude = None) #fudgeMatchedBeamMap=maps.convolveMapWithBeam(beamMap*attenuationFactor, wcs, fudgeKernel, maxDistDegrees = 1.0) ## Check plot #import pylab as plt #yRow=np.where(refBeamMap == refBeamMap.max())[0][0] #rowValid=np.logical_and(degreesMap[yRow] < refBeam.rDeg.max(), fudgeMatchedBeamMap[yRow] != 0) #plt.figure(figsize=(10,8)) #plt.plot(degreesMap[yRow][rowValid]*60, refBeamMap[yRow][rowValid], lw = 3, label = 'ref') #plt.plot(degreesMap[yRow][rowValid]*60, fudgeMatchedBeamMap[yRow][rowValid], label = 'fudged') #integralRatio=np.trapz(fudgeMatchedBeamMap[yRow][rowValid])/np.trapz(refBeamMap[yRow][rowValid]) #plt.title("native map res - %.3f" % (integralRatio)) #plt.semilogy() #plt.ylim(1e-5) #plt.legend() #plt.show() #from astLib import astImages #astImages.saveFITS("ref.fits", refBeamMap, wcs) #astImages.saveFITS("fudgematched.fits", fudgeMatchedBeamMap, wcs) #astImages.saveFITS("diff.fits", refBeamMap-fudgeMatchedBeamMap, wcs) #import IPython #IPython.embed() #sys.exit() # NOTE: If we're NOT passing in 2d kernels, don't need to organise by tile kernelDict[tileName][mapDict['obsFreqGHz']]={'smoothKernel': fudgeKernel, 'smoothAttenuationFactor': attenuationFactor} if method == 'CAP': catalog=_extractSpecCAP(config, tab, kernelDict, diskRadiusArcmin = 4.0, highPassFilter = False, estimateErrors = True) elif method == 'matchedFilter': catalog=_extractSpecMatchedFilter(config, tab, kernelDict, saveFilteredMaps = saveFilteredMaps) else: raise Exception("'method' should be 'CAP' or 'matchedFilter'") return catalog #------------------------------------------------------------------------------------------------------------ def _extractSpecMatchedFilter(config, tab, kernelDict, saveFilteredMaps = False, noiseMethod = 'dataMap'): """See extractSpec. """ cacheDir="nemoSpecCache"+os.path.sep+os.path.basename(config.rootOutDir) os.makedirs(cacheDir, exist_ok = True) # Build filter configs allFilters={'class': 'ArnaudModelMatchedFilter', 'params': {'noiseParams': {'method': noiseMethod, 'noiseGridArcmin': 40.0}, 'saveFilteredMaps': False, 'saveRMSMap': False, 'savePlots': False, 'saveDS9Regions': False, 'saveFilter': False, 'outputUnits': 'yc', 'edgeTrimArcmin': 0.0, 'GNFWParams': 'default'}} filtersList=[] templatesUsed=np.unique(tab['template']).tolist() for t in templatesUsed: newDict=copy.deepcopy(allFilters) M500MSun=float(t.split("_M")[-1].split("_")[0]) z=float(t.split("_z")[-1].replace("p", ".")) newDict['params']['M500MSun']=M500MSun newDict['params']['z']=z newDict['label']=t filtersList.append(newDict) # Filter and extract # NOTE: We assume index 0 of the unfiltered maps list is the reference for which the filter is made catalogList=[] for tileName in config.tileNames: print("... rank %d: tileName = %s ..." % (config.rank, tileName)) diagnosticsDir=cacheDir+os.path.sep+tileName os.makedirs(diagnosticsDir, exist_ok = True) for f in filtersList: tempTileTab=None # catalogs are organised by tile and template filterObj=None for mapDict in config.unfilteredMapsDictList: if tempTileTab is None: shape=(config.tileCoordsDict[tileName]['header']['NAXIS2'], config.tileCoordsDict[tileName]['header']['NAXIS1']) wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits') tempTileTab=catalogs.getCatalogWithinImage(tab, shape, wcs) tempTileTab=tempTileTab[tempTileTab['template'] == f['label']] if tempTileTab is None or len(tempTileTab) == 0: continue if mapDict['obsFreqGHz'] == config.unfilteredMapsDictList[0]['obsFreqGHz']: filteredMapDict, filterObj=filters.filterMaps([mapDict], f, tileName, filteredMapsDir = cacheDir, diagnosticsDir = diagnosticsDir, selFnDir = cacheDir, verbose = True, undoPixelWindow = True, returnFilter = True) else: mapDict['smoothKernel']=kernelDict[tileName][mapDict['obsFreqGHz']]['smoothKernel'] mapDict['smoothAttenuationFactor']=kernelDict[tileName][mapDict['obsFreqGHz']]['smoothAttenuationFactor'] mapDictToFilter=maps.preprocessMapDict(mapDict.copy(), tileName = tileName) filteredMapDict['data']=filterObj.applyFilter(mapDictToFilter['data']) RMSMap=filterObj.makeNoiseMap(filteredMapDict['data']) filteredMapDict['SNMap']=np.zeros(filterObj.shape) mask=np.greater(filteredMapDict['surveyMask'], 0) filteredMapDict['SNMap'][mask]=filteredMapDict['data'][mask]/RMSMap[mask] filteredMapDict['data']=enmap.apply_window(filteredMapDict['data'], pow=-1.0) if saveFilteredMaps == True: outFileName=cacheDir+os.path.sep+'%d_' % (mapDict['obsFreqGHz'])+f['label']+'#'+tileName+'.fits' # Add conversion to delta T in here? maps.saveFITS(outFileName, filteredMapDict['data'], filteredMapDict['wcs']) freqTileTab=photometry.makeForcedPhotometryCatalog(filteredMapDict, tempTileTab, useInterpolator = config.parDict['useInterpolator']) photometry.measureFluxes(freqTileTab, filteredMapDict, cacheDir, useInterpolator = config.parDict['useInterpolator'], ycObsFreqGHz = mapDict['obsFreqGHz']) # We don't take tileName from the catalog, some objects in overlap areas may only get cut here if len(freqTileTab) == 0: tempTileTab=None continue tempTileTab, freqTileTab, rDeg=catalogs.crossMatch(tempTileTab, freqTileTab, radiusArcmin = 2.5) colNames=['deltaT_c', 'y_c', 'SNR'] suff='_%d' % (mapDict['obsFreqGHz']) for colName in colNames: tempTileTab[colName+suff]=freqTileTab[colName] if 'err_'+colName in freqTileTab.keys(): tempTileTab['err_'+colName+suff]=freqTileTab['err_'+colName] if tempTileTab is not None and len(tempTileTab) > 0: catalogList.append(tempTileTab) if len(catalogList) > 0: catalog=atpy.vstack(catalogList) else: catalog=[] return catalog #------------------------------------------------------------------------------------------------------------ def _extractSpecCAP(config, tab, kernelDict, method = 'CAP', diskRadiusArcmin = 4.0, highPassFilter = False, estimateErrors = True): """See extractSpec. """ # Define apertures like Schaan et al. style compensated aperture photometry filter innerRadiusArcmin=diskRadiusArcmin outerRadiusArcmin=diskRadiusArcmin*np.sqrt(2) catalogList=[] for tileName in config.tileNames: # This loads the maps, applies any masks, and smooths to approx. same scale mapDictList=[] freqLabels=[] for mapDict in config.unfilteredMapsDictList: mapDict=maps.preprocessMapDict(mapDict.copy(), tileName = tileName) if highPassFilter == True: mapDict['data']=maps.subtractBackground(mapDict['data'], mapDict['wcs'], smoothScaleDeg = (2*outerRadiusArcmin)/60) freqLabels.append(int(round(mapDict['obsFreqGHz']))) mapDictList.append(mapDict) wcs=mapDict['wcs'] shape=mapDict['data'].shape # Extract spectra pixAreaMap=maps.getPixelAreaArcmin2Map(shape, wcs) maxSizeDeg=(outerRadiusArcmin*1.2)/60 tileTab=catalogs.getCatalogWithinImage(tab, shape, wcs) for label in freqLabels: tileTab['diskT_uKArcmin2_%s' % (label)]=np.zeros(len(tileTab)) tileTab['err_diskT_uKArcmin2_%s' % (label)]=np.zeros(len(tileTab)) tileTab['diskSNR_%s' % (label)]=np.zeros(len(tileTab)) for row in tileTab: degreesMap=np.ones(shape, dtype = float)*1e6 # NOTE: never move this degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, row['RADeg'], row['decDeg'], maxSizeDeg) innerMask=degreesMap < innerRadiusArcmin/60 outerMask=np.logical_and(degreesMap >= innerRadiusArcmin/60, degreesMap < outerRadiusArcmin/60) for mapDict, label in zip(mapDictList, freqLabels): d=mapDict['data'] diskFlux=(d[innerMask]*pixAreaMap[innerMask]).sum()-(d[outerMask]*pixAreaMap[outerMask]).sum() row['diskT_uKArcmin2_%s' % (label)]=diskFlux # Estimate noise in every measurement (on average) from spatting down on random positions # This will break if noise is inhomogeneous though. But at least it's done separately for each tile. # We can later add something that scales / fits using the weight map? if estimateErrors == True: randTab=catalogs.generateRandomSourcesCatalog(mapDict['surveyMask'], wcs, 1000) for label in freqLabels: randTab['diskT_uKArcmin2_%s' % (label)]=np.zeros(len(randTab)) for row in randTab: degreesMap=np.ones(shape, dtype = float)*1e6 # NOTE: never move this degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, row['RADeg'], row['decDeg'], maxSizeDeg) innerMask=degreesMap < innerRadiusArcmin/60 outerMask=np.logical_and(degreesMap >= innerRadiusArcmin/60, degreesMap < outerRadiusArcmin/60) for mapDict, label in zip(mapDictList, freqLabels): d=mapDict['data'] diskFlux=(d[innerMask]*pixAreaMap[innerMask]).sum()-(d[outerMask]*pixAreaMap[outerMask]).sum() row['diskT_uKArcmin2_%s' % (label)]=diskFlux noiseLevels={} for label in freqLabels: if signals.fSZ(float(label)) < 0: SNRSign=-1 else: SNRSign=1 noiseLevels[label]=np.percentile(abs(randTab['diskT_uKArcmin2_%s' % (label)]), 68.3) tileTab['err_diskT_uKArcmin2_%s' % (label)]=noiseLevels[label] tileTab['diskSNR_%s' % (label)]=SNRSign*(tileTab['diskT_uKArcmin2_%s' % (label)]/noiseLevels[label]) catalogList.append(tileTab) catalog=atpy.vstack(catalogList) return catalog
2.171875
2
pypkg-gen.py
GameMaker2k/Neo-Hockey-Test
1
13011
<gh_stars>1-10 #!/usr/bin/env python ''' This program is free software; you can redistribute it and/or modify it under the terms of the Revised BSD License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Revised BSD License for more details. Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k Copyright 2011-2016 <NAME> - https://github.com/KazukiPrzyborowski $FileInfo: pypkg-gen.py - Last Update: 6/1/2016 Ver. 0.2.0 RC 1 - Author: cooldude2k $ ''' from __future__ import absolute_import, division, print_function, unicode_literals; import re, os, sys, time, platform, datetime, argparse, subprocess; __version_info__ = (0, 2, 0, "rc1"); if(__version_info__[3]!=None): __version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2])+"+"+str(__version_info__[3]); if(__version_info__[3]==None): __version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2]); proname = "pypkg-gen"; prover = __version__; profullname = proname+" "+prover; def which_exec(execfile): for path in os.environ["PATH"].split(":"): if os.path.exists(path + "/" + execfile): return path + "/" + execfile; linuxdist = [None]; try: linuxdist = platform.linux_distribution(); except AttributeError: linuxdist = [None]; getlinuxdist = linuxdist; setdistroname = "debian"; setdistrocname = "jessie"; if(getlinuxdist[0] is not None and (getlinuxdist[0].lower()=="debian" or getlinuxdist[0].lower()=="ubuntu" or getlinuxdist[0].lower()=="linuxmint")): setdistroname = getlinuxdist[0].lower(); setdistrocname = getlinuxdist[2].lower(); if(setdistrocname==""): lsblocatout = which_exec("lsb_release"); pylsblistp = subprocess.Popen([lsblocatout, "-c"], stdout=subprocess.PIPE, stderr=subprocess.PIPE); pylsbout, pylsberr = pylsblistp.communicate(); if(sys.version[0]=="3"): pylsbout = pylsbout.decode("utf-8"); pylsb_esc = re.escape("Codename:")+'([a-zA-Z\t+\s+]+)'; pylsbname = re.findall(pylsb_esc, pylsbout)[0].lower(); setdistrocname = pylsbname.strip(); if(getlinuxdist[0] is not None and getlinuxdist[0].lower()=="archlinux"): setdistroname = getlinuxdist[0].lower(); setdistrocname = None; parser = argparse.ArgumentParser(conflict_handler = "resolve", add_help = True); parser.add_argument("-v", "--version", action = "version", version = profullname); parser.add_argument("-s", "--source", default = os.path.realpath(os.getcwd()), help = "source dir"); parser.add_argument("-d", "--distro", default = setdistroname, help = "enter linux distribution name"); parser.add_argument("-c", "--codename", default = setdistrocname, help = "enter release code name"); parser.add_argument("-p", "--pyver", default = sys.version[0], help = "enter version of python to use"); getargs = parser.parse_args(); bashlocatout = which_exec("bash"); getargs.source = os.path.realpath(getargs.source); getargs.codename = getargs.codename.lower(); getargs.distro = getargs.distro.lower(); if(getargs.pyver=="2"): getpyver = "python2"; if(getargs.pyver=="3"): getpyver = "python3"; if(getargs.pyver!="2" and getargs.pyver!="3"): if(sys.version[0]=="2"): getpyver = "python2"; if(sys.version[0]=="3"): getpyver = "python3"; get_pkgbuild_dir = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"); get_pkgbuild_dist_pre_list = [d for d in os.listdir(get_pkgbuild_dir) if os.path.isdir(os.path.join(get_pkgbuild_dir, d))]; get_pkgbuild_dist_list = []; for dists in get_pkgbuild_dist_pre_list: tmp_pkgbuild_python = os.path.realpath(get_pkgbuild_dir+os.path.sep+dists+os.path.sep+getpyver); if(os.path.exists(tmp_pkgbuild_python) and os.path.isdir(tmp_pkgbuild_python)): get_pkgbuild_dist_list.append(dists); if(not getargs.distro in get_pkgbuild_dist_list): print("Could not build for "+getargs.distro+" distro."); sys.exit(); if(getargs.distro=="debian" or getargs.distro=="ubuntu" or getargs.distro=="linuxmint"): pypkgpath = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"+os.path.sep+getargs.distro+os.path.sep+getpyver+os.path.sep+"pydeb-gen.sh"); pypkgenlistp = subprocess.Popen([bashlocatout, pypkgpath, getargs.source, getargs.codename], stdout=subprocess.PIPE, stderr=subprocess.PIPE); pypkgenout, pypkgenerr = pypkgenlistp.communicate(); if(sys.version[0]=="3"): pypkgenout = pypkgenout.decode("utf-8"); print(pypkgenout); pypkgenlistp.wait(); if(getargs.distro=="archlinux"): pypkgpath = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"+os.path.sep+getargs.distro+os.path.sep+getpyver+os.path.sep+"pypac-gen.sh"); pypkgenlistp = subprocess.Popen([bashlocatout, pypkgpath, getargs.source, getargs.codename], stdout=subprocess.PIPE, stderr=subprocess.PIPE); pypkgenout, pypkgenerr = pypkgenlistp.communicate(); if(sys.version[0]=="3"): pypkgenout = pypkgenout.decode("utf-8"); print(pypkgenout); pypkgenlistp.wait();
1.992188
2
10/testtime.py
M0nica/python-foundations
0
13012
<filename>10/testtime.py import time print (time.strftime("%B %e, %Y")) # Guides: # how to formate date: # http://strftime.net/ # how to use time: # http://www.cyberciti.biz/faq/howto-get-current-date-time-in-python/
2.96875
3
2020/02/Teil 2 - V01.py
HeWeMel/adventofcode
1
13013
import re with open('input.txt', 'r') as f: pw_ok=0 for line in f: (rule,s,space_and_pw) = line.partition(':') (lowhigh,s,c) = rule.partition(' ') (low,s,high) = lowhigh.partition('-') pw=space_and_pw[1:-1] c1=pw[int(low)-1] c2=pw[int(high)-1] if (c1==c and c2!=c) or (c1!=c and c2==c): print(low, high, c, pw, c1, c2, 'ok') pw_ok+=1 else: print(low, high, c, pw, c1, c2, 'falsch') print (pw_ok) #737
3.1875
3
slides_manager/openslide_engine.py
crs4/ome_seadragon
31
13014
# Copyright (c) 2019, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import openslide from openslide import OpenSlide from openslide.deepzoom import DeepZoomGenerator from io import BytesIO from PIL import Image from .rendering_engine_interface import RenderingEngineInterface from .. import settings from ome_seadragon_cache import CacheDriverFactory class OpenSlideEngine(RenderingEngineInterface): def __init__(self, image_id, connection): super(OpenSlideEngine, self).__init__(image_id, connection) def _get_openslide_wrapper(self, original_file_source, file_mimetype): img_path = self._get_image_path(original_file_source, file_mimetype) if img_path: return OpenSlide(img_path) else: return None def _get_deepzoom_config(self, tile_size=None, limit_bounds=None): cfg = { 'tile_size': tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE, 'overlap': settings.DEEPZOOM_OVERLAP, 'limit_bounds': limit_bounds if limit_bounds is not None else settings.DEEPZOOM_LIMIT_BOUNDS } self.logger.debug(settings.DEEPZOOM_LIMIT_BOUNDS) self.logger.debug(cfg) return cfg def _get_deepzoom_wrapper(self, original_file_source, file_mimetype, tile_size=None, limit_bounds=None): os_wrapper = self._get_openslide_wrapper(original_file_source, file_mimetype) if os_wrapper: return DeepZoomGenerator(os_wrapper, **self._get_deepzoom_config(tile_size, limit_bounds)) else: return None def _get_image_mpp(self, original_file_source=False, file_mimetype=None): slide = self._get_openslide_wrapper(original_file_source, file_mimetype) if slide: try: mpp_x = slide.properties[openslide.PROPERTY_NAME_MPP_X] mpp_y = slide.properties[openslide.PROPERTY_NAME_MPP_Y] return (float(mpp_x) + float(mpp_y)) / 2 except (KeyError, ValueError): return 0 else: return 0 def get_openseadragon_config(self, original_file_source=False, file_mimetype=None): return { 'mpp': self._get_image_mpp(original_file_source, file_mimetype) } def _get_slide_bounds(self, original_file_source=False, file_mimetype=None): slide = self._get_openslide_wrapper(original_file_source, file_mimetype) if slide: return ( int(slide.properties.get('openslide.bounds-x', 0)), int(slide.properties.get('openslide.bounds-y', 0)), int(slide.properties.get('openslide.bounds-height', 0)), int(slide.properties.get('openslide.bounds-width', 0)) ) else: return None def get_slide_bounds(self, original_file_source=False, file_mimetype=None): bounds = self._get_slide_bounds(original_file_source, file_mimetype) if bounds: return { 'bounds_x': bounds[0], 'bounds_y': bounds[1], 'bounds_height': bounds[2], 'bounds_width': bounds[3] } else: return bounds def _get_original_file_json_description(self, resource_path, file_mimetype=None, tile_size=None, limit_bounds=True): slide = self._get_openslide_wrapper(original_file_source=True, file_mimetype=file_mimetype) if slide: if limit_bounds: _, _, height, width = self._get_slide_bounds(True, file_mimetype) return self._get_json_description(resource_path, height, width, tile_size) return self._get_json_description(resource_path, slide.dimensions[1], slide.dimensions[0], tile_size) return None def get_dzi_description(self, original_file_source=False, file_mimetype=None, tile_size=None, limit_bounds=None): dzi_slide = self._get_deepzoom_wrapper(original_file_source, file_mimetype, tile_size, limit_bounds) if dzi_slide: return dzi_slide.get_dzi(settings.DEEPZOOM_FORMAT) else: return None def get_thumbnail(self, size, original_file_source=False, file_mimeype=None): if settings.IMAGES_CACHE_ENABLED: cache = CacheDriverFactory(settings.IMAGES_CACHE_DRIVER).\ get_cache(settings.CACHE_HOST, settings.CACHE_PORT, settings.CACHE_DB, settings.CACHE_EXPIRE_TIME) # get thumbnail from cache thumb = cache.thumbnail_from_cache(self.image_id, size, settings.DEEPZOOM_FORMAT, 'openslide') else: thumb = None # if thumbnail is not in cache build it .... if thumb is None: self.logger.debug('No thumbnail loaded from cache, building it') slide = self._get_openslide_wrapper(original_file_source, file_mimeype) if slide: thumb = slide.get_thumbnail((size, size)) # ... and store it into the cache if settings.IMAGES_CACHE_ENABLED: cache.thumbnail_to_cache(self.image_id, thumb, size, settings.DEEPZOOM_FORMAT, 'openslide') else: self.logger.debug('Thumbnail loaded from cache') return thumb, settings.DEEPZOOM_FORMAT def get_tile(self, level, column, row, original_file_source=False, file_mimetype=None, tile_size=None, limit_bounds=None): if settings.IMAGES_CACHE_ENABLED: cache = CacheDriverFactory(settings.IMAGES_CACHE_DRIVER).\ get_cache(settings.CACHE_HOST, settings.CACHE_PORT, settings.CACHE_DB, settings.CACHE_EXPIRE_TIME) tile_size = tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE self.logger.debug('TILE SIZE IS: %s', tile_size) cache_params = { 'image_id': self.image_id, 'level': level, 'column': column, 'row': row, 'tile_size': tile_size, 'image_format': settings.DEEPZOOM_FORMAT, 'rendering_engine': 'openslide' } if cache_params['image_format'].lower() == 'jpeg': cache_params['image_quality'] = settings.DEEPZOOM_JPEG_QUALITY # get tile from cache tile = cache.tile_from_cache(**cache_params) else: tile = None # if tile is not in cache build it ... if tile is None: slide = self._get_deepzoom_wrapper(original_file_source, file_mimetype, tile_size, limit_bounds) if slide: dzi_tile = slide.get_tile(level, (column, row)) tile_buffer = BytesIO() tile_conf = { 'format': settings.DEEPZOOM_FORMAT } if tile_conf['format'].lower() == 'jpeg': tile_conf['quality'] = settings.DEEPZOOM_JPEG_QUALITY dzi_tile.save(tile_buffer, **tile_conf) tile = Image.open(tile_buffer) # ... and store it into the cache if settings.IMAGES_CACHE_ENABLED: cache_params['image_obj'] = tile cache.tile_to_cache(**cache_params) return tile, settings.DEEPZOOM_FORMAT
1.804688
2
pyppy/config.py
maehster/pyppy
5
13015
<gh_stars>1-10 """Global config management This module provides functions for initializing, accessing and destroying a global config object. You can initialize a global config from any object. However, in the context of pyppy, only the instance attributes of the object are used and work with the decorators ``fill_args`` and ``condition``. But you can use any object you like. The config management methods are just a convenience reference to the original object. Initialization -------------- In this example, we initialize a global config from a ``NameSpace`` parsed with a custom ``ArgumentParser``. For demonstration purposes, the parser will not parse args from the commandline but from a list:: from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument("--message") # parse_args returns an argparse.Namespace args = parser.parse_args(["--message", "hello!"]) To initialize a global config object, import the function ``initialize_config`` and pass the args variable:: from pyppy.config import initialize_config initialize_config(args) You can also create an empty global object (which just holds a reference to an empty ``object``) and change it afterwards via accessing the global config object (see Config access section):: from pyppy.config import initialize_config initialize_config(args) Access ------ Now that you have initialized the global config, you can use it throughout your code:: from pyppy.config import config print(config().message) # "hello!" Note ---- The original object that you used to initialize the global config is returned any time you call ``config()``, so you can do everything with the object that you could also do before. Modification ------------ It is possible to change the global config object during time, e.g. to pass information between objects in your code. We know that the term 'config' is not ideal for these use cases and we're working on functionality to handle these use cases in a better way. Here's an example of config modification:: config().message = "bye!" print(config().message) Reset ----- There can be only one global config object. So whenever you have initialized a config you cannot initialize a new one. If you try to an exception is raised. In the rare cases you might want to have a new global config you can explicitly destroy the current one and initialize a new one:: from pyppy.config import destroy_config destroy_config() initialize_config(args2) """ from types import SimpleNamespace from pyppy.exc import ConfigAlreadyInitializedException _CONFIG = "pyppy-config" def initialize_config(obj: object = SimpleNamespace()) -> None: """ Initialize a global config with the specified object or with an empty ``object`` if no object is given. Parameters ---------- obj : object Object to initialize the global config with. Whenever you will call ``pyppy.config.config()`` you will get a r reference to this object. Returns ------- None Examples -------- >>> destroy_config() >>> c = SimpleNamespace() >>> c.option = "say_hello" >>> initialize_config(c) >>> config().option 'say_hello' >>> destroy_config() """ if hasattr(config, _CONFIG): raise ConfigAlreadyInitializedException( ( "Config has already been initialized. " "If you want to initialize a new config call " f"{destroy_config.__name__}()." ) ) config(obj) def config(_obj: object = None) -> object: """ Accesses a previously initialized global config. Returns ------- object: The object that was used to initialize the global config. Examples -------- >>> destroy_config() >>> c = SimpleNamespace() >>> c.option = "say_hello" >>> initialize_config(c) >>> config().option 'say_hello' >>> destroy_config() """ if not hasattr(config, _CONFIG) and _obj: setattr(config, _CONFIG, _obj) if not hasattr(config, _CONFIG): raise Exception("Please initialize config first!") return getattr(config, _CONFIG) def destroy_config() -> None: """ Deletes the global reference to the object that the config was initialized with. Examples -------- >>> destroy_config() >>> c = SimpleNamespace() >>> c.option = "say_hello" >>> initialize_config(c) >>> config().option 'say_hello' >>> destroy_config() >>> config().option Traceback (most recent call last): ... Exception: Please initialize config first! """ if hasattr(config, _CONFIG): delattr(config, _CONFIG)
3.453125
3
LeetCode/3_sum.py
milkrong/Basic-Python-DS-Algs
0
13016
def three_sum(nums): """ Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero. :param nums: list[int] :return: list[list[int]] """ if len(nums) < 3: return [] nums.sort() res = [] for i in range(len(nums) - 2): if i > 0 and nums[i - 1] == nums[i]: continue l, r = i + 1, len(nums) - 1 while l < r: s = nums[i] + nums[l] + nums[r] if s == 0: res.append([nums[i], nums[l], nums[r]]) l += 1; r -= 1 while l < r and nums[l] == nums[l - 1]: l += 1 while l < r and nums[r] == nums[r + 1]: r -= 1 elif s < 0: l += 1 else: r -= 1 return res
3.90625
4
src/tests/dao_test/guild_roles_dao_test.py
Veloxization/likahbot
0
13017
<gh_stars>0 import unittest import os from dao.guild_roles_dao import GuildRolesDAO from dao.guild_role_categories_dao import GuildRoleCategoriesDAO class TestGuildRolesDAO(unittest.TestCase): def setUp(self): self.db_addr = "database/test_db.db" os.popen(f"sqlite3 {self.db_addr} < database/schema.sql") self.guild_roles_dao = GuildRolesDAO(self.db_addr) self.guild_role_categories_dao = GuildRoleCategoriesDAO(self.db_addr) self.guild_role_categories_dao.add_guild_role_category(1234, "TEST") self.guild_role_categories_dao.add_guild_role_category(2345, "TEST") self.category_id1 = self.guild_role_categories_dao.get_all_guild_role_categories(1234)[0]["id"] self.category_id2 = self.guild_role_categories_dao.get_all_guild_role_categories(2345)[0]["id"] def tearDown(self): self.guild_roles_dao.clear_guild_roles_table() self.guild_role_categories_dao.clear_guild_role_categories_table() def test_guild_role_is_added_correctly(self): roles = self.guild_roles_dao.get_all_guild_roles(1234) self.assertEqual(len(roles), 0) self.guild_roles_dao.add_guild_role(9876, self.category_id1) roles = self.guild_roles_dao.get_all_guild_roles(1234) self.assertEqual(len(roles), 1) def test_guild_role_is_removed_correctly(self): self.guild_role_categories_dao.add_guild_role_category(1234, "TEST2") cat_id = self.guild_role_categories_dao.get_all_guild_role_categories(1234)[1]["id"] self.guild_roles_dao.add_guild_role(9876, self.category_id1) self.guild_roles_dao.add_guild_role(9876, cat_id) roles = self.guild_roles_dao.get_all_guild_roles(1234) self.assertEqual(len(roles), 2) self.guild_roles_dao.remove_guild_role_from_category(9876, self.category_id1) roles = self.guild_roles_dao.get_all_guild_roles(1234) self.assertEqual(len(roles), 1) def test_all_guild_roles_are_removed_correctly(self): self.guild_roles_dao.add_guild_role(9876, self.category_id1) self.guild_roles_dao.add_guild_role(8765, self.category_id2) roles1 = self.guild_roles_dao.get_all_guild_roles(1234) roles2 = self.guild_roles_dao.get_all_guild_roles(2345) self.assertEqual(len(roles1), 1) self.assertEqual(len(roles2), 1) self.guild_roles_dao.delete_guild_roles(1234) roles1 = self.guild_roles_dao.get_all_guild_roles(1234) roles2 = self.guild_roles_dao.get_all_guild_roles(2345) self.assertEqual(len(roles1), 0) self.assertEqual(len(roles2), 1) def test_guild_roles_of_type_are_returned_correctly(self): self.guild_role_categories_dao.add_guild_role_category(1234, "TEST2") cat_id = self.guild_role_categories_dao.get_all_guild_role_categories(1234)[1]["id"] self.guild_roles_dao.add_guild_role(9876, self.category_id1) self.guild_roles_dao.add_guild_role(8765, self.category_id1) self.guild_roles_dao.add_guild_role(7654, cat_id) roles = self.guild_roles_dao.get_guild_roles_of_type("TEST", 1234) self.assertEqual(len(roles), 2) roles = self.guild_roles_dao.get_guild_roles_of_type("TEST2", 1234) self.assertEqual(len(roles), 1) def test_guild_role_is_returned_correctly_with_id(self): self.guild_roles_dao.add_guild_role(9876, self.category_id1) self.guild_roles_dao.add_guild_role(8765, self.category_id2) role = self.guild_roles_dao.get_guild_roles_by_role_id(9876)[0] self.assertEqual(role["role_id"], 9876) self.assertEqual(role["guild_id"], 1234) self.assertEqual(role["category"], "TEST")
2.46875
2
qcic.py
milkllc/qcic
0
13018
<filename>qcic.py<gh_stars>0 import picamera import datetime import os delcount = 2 def check_fs(): global delcount st = os.statvfs('/') pct = 100 - st.f_bavail * 100.0 / st.f_blocks print pct, "percent full" if pct > 90: # less than 10% left, delete a few minutes files = os.listdir('.') files.sort() for i in range(0, delcount): print "deleting", files[i] os.remove(files[i]) delcount += 1 # keep increasing until we get under 90% else: delcount = 2 with picamera.PiCamera() as camera: try: check_fs() tstamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S%f') print "recording", tstamp camera.start_recording(tstamp + '.h264') camera.wait_recording(60) while True: check_fs() tstamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S%f') print "recording", tstamp camera.split_recording(tstamp + '.h264') camera.wait_recording(60) except KeyboardInterrupt: print "quitting" camera.stop_recording()
2.453125
2
exercises/allergies/allergies.py
akashsara/python
0
13019
class Allergies(object): def __init__(self, score): pass def allergic_to(self, item): pass @property def lst(self): pass
2.15625
2
forge/mock_handle.py
ujjwalsh/pyforge
7
13020
<filename>forge/mock_handle.py from .handle import ForgeHandle class MockHandle(ForgeHandle): def __init__(self, forge, mock, behave_as_instance=True): super(MockHandle, self).__init__(forge) self.mock = mock self.behaves_as_instance = behave_as_instance self._attributes = {} self._is_hashable = False self._is_setattr_enabled_in_replay = False def is_hashable(self): return self._is_hashable def enable_hashing(self): self._is_hashable = True def disable_hashing(self): self._is_hashable = False def enable_setattr_during_replay(self): self._is_setattr_enabled_in_replay = True def disable_setattr_during_replay(self): self._is_setattr_enabled_in_replay = False def is_setattr_enabled_in_replay(self): return self._is_setattr_enabled_in_replay def has_attribute(self, attr): return False def get_attribute(self, attr): if self.forge.attributes.has_attribute(self.mock, attr): return self.forge.attributes.get_attribute(self.mock, attr) if self.has_nonmethod_class_member(attr): return self.get_nonmethod_class_member(attr) if self.has_method(attr): return self.get_method(attr) raise AttributeError("%s has no attribute %r" % (self.mock, attr)) def set_attribute(self, attr, value, caller_info): if self.forge.is_recording() or self.is_setattr_enabled_in_replay(): self._set_attribute(attr, value) else: self._set_attribute_during_replay(attr, value, caller_info) def expect_setattr(self, attr, value): return self.forge.queue.push_setattr(self.mock, attr, value, caller_info=self.forge.debug.get_caller_info()) def _set_attribute_during_replay(self, attr, value, caller_info): self.forge.queue.pop_matching_setattr(self.mock, attr, value, caller_info) self._set_attribute(attr, value) def _set_attribute(self, attr, value): self.forge.attributes.set_attribute(self.mock, attr, value) def has_method(self, attr): return self.forge.stubs.has_initialized_method_stub(self.mock, attr) or self._has_method(attr) def _has_method(self, name): raise NotImplementedError() def has_nonmethod_class_member(self, name): raise NotImplementedError() def get_nonmethod_class_member(self, name): raise NotImplementedError() def get_method(self, name): returned = self.forge.stubs.get_initialized_method_stub_or_none(self.mock, name) if returned is None: real_method = self._get_real_method(name) if not self.forge.is_recording(): self._check_unrecorded_method_getting(name) returned = self._construct_stub(name, real_method) self._bind_if_needed(name, returned) self.forge.stubs.add_initialized_method_stub(self.mock, name, returned) self._set_method_description(returned, name) elif self.forge.is_replaying() and not returned.__forge__.has_recorded_calls(): self._check_getting_method_stub_without_recorded_calls(name, returned) return returned def _set_method_description(self, method, name): method.__forge__.set_description("%s.%s" % ( self.describe(), name )) def _construct_stub(self, name, real_method): return self.forge.create_method_stub(real_method) def _check_unrecorded_method_getting(self, name): raise NotImplementedError() def _check_getting_method_stub_without_recorded_calls(self, name, stub): raise NotImplementedError() def _get_real_method(self, name): raise NotImplementedError() def handle_special_method_call(self, name, args, kwargs, caller_info): self._check_special_method_call(name, args, kwargs) return self.get_method(name).__forge__.handle_call(args, kwargs, caller_info) def _check_special_method_call(self, name, args, kwargs): raise NotImplementedError() def is_callable(self): raise NotImplementedError() def _bind_if_needed(self, name, method_stub): bind_needed, bind_target = self._is_binding_needed(name, method_stub) if bind_needed: method_stub.__forge__.bind(bind_target) def _is_binding_needed(self, name, method_stub): raise NotImplementedError()
2.609375
3
RecoBTag/PerformanceDB/python/measure/Pool_mistag110118.py
ckamtsikis/cmssw
852
13021
<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms from CondCore.DBCommon.CondDBCommon_cfi import * PoolDBESSourceMistag110118 = cms.ESSource("PoolDBESSource", CondDBCommon, toGet = cms.VPSet( # # working points # cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGJBPLtable_v5_offline'), label = cms.untracked.string('BTagMISTAGJBPLtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGJBPLwp_v5_offline'), label = cms.untracked.string('BTagMISTAGJBPLwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGJBPMtable_v5_offline'), label = cms.untracked.string('BTagMISTAGJBPMtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGJBPMwp_v5_offline'), label = cms.untracked.string('BTagMISTAGJBPMwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGJBPTtable_v5_offline'), label = cms.untracked.string('BTagMISTAGJBPTtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGJBPTwp_v5_offline'), label = cms.untracked.string('BTagMISTAGJBPTwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGJPLtable_v5_offline'), label = cms.untracked.string('BTagMISTAGJPLtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGJPLwp_v5_offline'), label = cms.untracked.string('BTagMISTAGJPLwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGJPMtable_v5_offline'), label = cms.untracked.string('BTagMISTAGJPMtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGJPMwp_v5_offline'), label = cms.untracked.string('BTagMISTAGJPMwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGJPTtable_v5_offline'), label = cms.untracked.string('BTagMISTAGJPTtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGJPTwp_v5_offline'), label = cms.untracked.string('BTagMISTAGJPTwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGSSVHEMtable_v5_offline'), label = cms.untracked.string('BTagMISTAGSSVHEMtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGSSVHEMwp_v5_offline'), label = cms.untracked.string('BTagMISTAGSSVHEMwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGSSVHPTtable_v5_offline'), label = cms.untracked.string('BTagMISTAGSSVHPTtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGSSVHPTwp_v5_offline'), label = cms.untracked.string('BTagMISTAGSSVHPTwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGTCHELtable_v5_offline'), label = cms.untracked.string('BTagMISTAGTCHELtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGTCHELwp_v5_offline'), label = cms.untracked.string('BTagMISTAGTCHELwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGTCHEMtable_v5_offline'), label = cms.untracked.string('BTagMISTAGTCHEMtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGTCHEMwp_v5_offline'), label = cms.untracked.string('BTagMISTAGTCHEMwp_v5_offline') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('BTagMISTAGTCHPTtable_v5_offline'), label = cms.untracked.string('BTagMISTAGTCHPTtable_v5_offline') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('BTagMISTAGTCHPTwp_v5_offline'), label = cms.untracked.string('BTagMISTAGTCHPTwp_v5_offline') ), )) PoolDBESSourceMistag110118.connect = 'frontier://FrontierProd/CMS_COND_31X_PHYSICSTOOLS'
1.351563
1
src/ScheduleEvaluation.py
franTarkenton/replication_health_check
0
13022
<reponame>franTarkenton/replication_health_check ''' Created on Nov 22, 2018 @author: kjnether methods that evaluate the given schedule ''' import logging import FMEUtil.FMEServerApiData import re class EvaluateSchedule(object): def __init__(self, schedulesData): self.logger = logging.getLogger(__name__) if not isinstance(schedulesData, FMEUtil.FMEServerApiData.Schedules): msg = 'arg schedulesData should be type FMEUtil.FMEServerAp' + \ 'iData.Schedules instead its a type {0}' msg = msg.format(type(schedulesData)) raise ValueError(msg) self.schedule = schedulesData def getDisabled(self): ''' :return: a list of schedules that are currently disabled ''' disableList = [] for schedule in self.schedule: if not schedule.isEnabled(): fmw = schedule.getFMWName() repo = schedule.getRepository() schedName = schedule.getScheduleName() disableList.append([schedName, repo, fmw]) # sort by the fmw name disableList.sort(key=lambda x: x[0]) return disableList def compareRepositorySchedule(self, workspacesData): ''' identifies FMW's in the workspaces ( workspacesData) that are not associated with a a schedule. :param workspacesData: a fmeserver data api workspaces object that is to be compared against the schedule data :type workspacesData: FMEUtil.FMEServerApiData.Workspaces ''' notScheduled = [] for workspace in workspacesData: repoName = workspace.getRepositoryName() workspaceName = workspace.getWorkspaceName() scheduleName = self.schedule.getFMWRepositorySchedule( repositoryName=repoName, fmwName=workspaceName) if scheduleName is None: notScheduled.append(workspaceName) notScheduled.sort() return notScheduled def getEmbeddedData(self): ''' identifies dataset that probably should be sourcing info from the staging area but instead are sourcing from some other location ''' searchRegex = re.compile('^\$\(FME_MF_\w*\).*$') schedEmbeds = [] self.schedule.reset() for schedule in self.schedule: pubparams = schedule.getPublishedParameters() schedName = schedule.getScheduleName() for pubparam in pubparams: paramName = pubparam.getName() paramValue = pubparam.getValue() self.logger.debug("paramName: %s", paramName) self.logger.debug("paramValue: %s", paramValue) if isinstance(paramValue, list): paramValue = ', '.join(paramValue) self.logger.info("list param as string: %s", paramValue) if searchRegex.match(paramValue): schedEmbeds.append([schedName, paramName, paramValue]) schedEmbeds.sort(key=lambda x: x[0]) return schedEmbeds def getNonProdSchedules(self): ''' iterates through the schedules returning a list of lists, where the inner list contains the: - FMW Name - Value that DEST_DB_ENV_KEY is set to. Returns None if the parameter doesn't exist at all. ''' filterList = ['OTHR', 'PRD', 'DBCPRD', 'OTHER'] filteredScheds = self.getSchedsFilterByDestDbEnvKey(filterList, includeNull=True) nonProdList = [] for schedule in filteredScheds: scheduleName = schedule.getScheduleName() fmw = schedule.getFMWName() scheduleName = schedule.getScheduleName() fmw = schedule.getFMWName() if fmw.upper() != 'APP_KIRK__FGDB.FMW': pubparams = schedule.getPublishedParameters() destDbEnvKey = pubparams.getDestDbEnvKey() nonProdList.append([scheduleName, destDbEnvKey]) nonProdList.sort(key=lambda x: x[0]) return nonProdList def getSchedsFilterByDestDbEnvKey(self, envKeysToExclude, includeNull=False): ''' returns a filtered list based on the parameters identified, does not include KIRK jobs :param envKeysToExclude: Schedules that are configured with these values will be excluded from the list :type envKeysToExclude: list of strings :param includeNull: whether replication scripts that do not have a DEST_DB_ENV_KEY defined for them should be included in the replication. :type includeNull: ''' envKeysToExcludeUC = [element.upper() for element in envKeysToExclude] filterList = [] self.schedule.reset() for schedule in self.schedule: scheduleName = schedule.getScheduleName() fmw = schedule.getFMWName() if fmw.upper() != 'APP_KIRK__FGDB.FMW': pubparams = schedule.getPublishedParameters() destDbEnvKey = pubparams.getDestDbEnvKey() if destDbEnvKey is None and includeNull: filterList.append(schedule) elif isinstance(destDbEnvKey, list): if len(destDbEnvKey) == 1: destDbEnvKey = destDbEnvKey[0] elif len(destDbEnvKey) == 0: destDbEnvKey = '' else: msg = 'The schedule {0} is configured with ' + \ "multiple DEST_DB_ENV_KEYS, uncertain " + \ "which key to use. The fmw associated " + \ 'with the job is {1}, the number of ' + \ 'values in the list is {2} the value for' + \ ' DEST_DB_ENV_KEY\'s is {3}' msg = msg.format(scheduleName, fmw, len(destDbEnvKey), destDbEnvKey) # logging this as a warning for now, will catch this # case later when we get to evaluating schedules # that are replicating to non prod self.logger.warning(msg) self.logger.debug( f"destDbEnvKey: -{destDbEnvKey}- {scheduleName}") if (destDbEnvKey is not None) and destDbEnvKey.upper() \ not in envKeysToExcludeUC: self.logger.debug(f"adding the key: {destDbEnvKey}") filterList.append(schedule) return filterList def getAllBCGWDestinations(self): ''' retrieves all the BCGW destinations, to retrieve these they MUST have the DEST_DB_ENV_KEY defined for them ''' filterList = ['OTHR', 'OTHER'] filteredSchedules = self.getSchedsFilterByDestDbEnvKey( envKeysToExclude=filterList) return filteredSchedules
2.640625
3
podcastista/ListenNowTab.py
andrsd/podcastista
0
13023
from PyQt5 import QtWidgets, QtCore from podcastista.ShowEpisodeWidget import ShowEpisodeWidget from podcastista.FlowLayout import FlowLayout class FillThread(QtCore.QThread): """ Worker thread for loading up episodes """ def __init__(self, spotify, shows): super().__init__() self._spotify = spotify self._shows = shows def run(self): for item in self._shows['items']: show = item['show'] show['episodes'] = [] show_episodes = self._spotify.show_episodes(show['id'], limit=20) for episode in show_episodes['items']: display = True if ('resume_point' in episode and episode['resume_point']['fully_played']): display = False if display: show['episodes'].append(episode) @property def shows(self): return self._shows class ListenNowTab(QtWidgets.QWidget): """ Tab on the main window with the list of shows """ def __init__(self, parent): super().__init__() self._main_window = parent # empty widget self._empty_widget = QtWidgets.QWidget() empty_layout = QtWidgets.QVBoxLayout() nothing = QtWidgets.QLabel("No items") nothing.setSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) nothing.setContentsMargins(40, 20, 40, 20) nothing.setStyleSheet(""" font-size: 14px; """) nothing.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop) empty_layout.addWidget(nothing) empty_layout.addStretch(1) self._empty_widget.setLayout(empty_layout) # list of items self._layout = FlowLayout() widget = QtWidgets.QWidget() widget.setLayout(self._layout) widget.setSizePolicy( QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding) self._list = QtWidgets.QScrollArea() self._list.setFrameShape(QtWidgets.QFrame.NoFrame) self._list.setWidgetResizable(True) self._list.setWidget(widget) self._stacked_layout = QtWidgets.QStackedLayout(self) self._stacked_layout.addWidget(self._empty_widget) self._stacked_layout.addWidget(self._list) def clear(self): self._stacked_layout.setCurrentWidget(self._empty_widget) while self._layout.count() > 0: item = self._layout.takeAt(0) if item.widget() is not None: item.widget().deleteLater() def fill(self): if self._main_window.spotify is None: return shows = self._main_window.spotify.current_user_saved_shows() self._filler = FillThread(self._main_window.spotify, shows) self._filler.finished.connect(self.onFillFinished) self._filler.start() def onFillFinished(self): for item in self._filler.shows['items']: show = item['show'] if len(show['episodes']) > 0: w = ShowEpisodeWidget(show, self._main_window) self._layout.addWidget(w) if self._layout.count() > 0: self._stacked_layout.setCurrentWidget(self._list)
2.359375
2
ggpy/cruft/prolog_pyparser.py
hobson/ggpy
1
13024
import pyparsing as pp #relationship will refer to 'track' in all of your examples relationship = pp.Word(pp.alphas).setResultsName('relationship') number = pp.Word(pp.nums + '.') variable = pp.Word(pp.alphas) # an argument to a relationship can be either a number or a variable argument = number | variable # arguments are a delimited list of 'argument' surrounded by parenthesis arguments= (pp.Suppress('(') + pp.delimitedList(argument) + pp.Suppress(')')).setResultsName('arguments') # a fact is composed of a relationship and it's arguments # (I'm aware it's actually more complicated than this # it's just a simplifying assumption) fact = (relationship + arguments).setResultsName('facts', listAllMatches=True) # a sentence is a fact plus a period sentence = fact + pp.Suppress('.') # self explanatory prolog_sentences = pp.OneOrMore(sentence)
3.390625
3
Imaging/Core/Testing/Python/ReslicePermutations.py
inviCRO/VTK
0
13025
#!/usr/bin/env python import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # this script tests vtkImageReslice with various axes permutations, # in order to cover a nasty set of "if" statements that check # the intersections of the raster lines with the input bounding box. # Image pipeline reader = vtk.vtkImageReader() reader.ReleaseDataFlagOff() reader.SetDataByteOrderToLittleEndian() reader.SetDataExtent(0,63,0,63,1,93) reader.SetDataSpacing(3.2,3.2,1.5) reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter") reader.SetDataMask(0x7fff) transform = vtk.vtkTransform() # rotate about the center of the image transform.Translate(+100.8,+100.8,+69.0) transform.RotateWXYZ(10,1,1,0) transform.Translate(-100.8,-100.8,-69.0) reslice1 = vtk.vtkImageReslice() reslice1.SetInputConnection(reader.GetOutputPort()) reslice1.SetResliceAxesDirectionCosines([1,0,0,0,1,0,0,0,1]) reslice1.SetResliceTransform(transform) reslice1.SetOutputSpacing(3.2,3.2,3.2) reslice1.SetOutputExtent(0,74,0,74,0,0) reslice2 = vtk.vtkImageReslice() reslice2.SetInputConnection(reader.GetOutputPort()) reslice2.SetResliceAxesDirectionCosines([0,1,0,0,0,1,1,0,0]) reslice2.SetResliceTransform(transform) reslice2.SetOutputSpacing(3.2,3.2,3.2) reslice2.SetOutputExtent(0,74,0,74,0,0) reslice3 = vtk.vtkImageReslice() reslice3.SetInputConnection(reader.GetOutputPort()) reslice3.SetResliceAxesDirectionCosines([0,0,1,1,0,0,0,1,0]) reslice3.SetResliceTransform(transform) reslice3.SetOutputSpacing(3.2,3.2,3.2) reslice3.SetOutputExtent(0,74,0,74,0,0) reslice4 = vtk.vtkImageReslice() reslice4.SetInputConnection(reader.GetOutputPort()) reslice4.SetResliceAxesDirectionCosines([-1,0,0,0,-1,0,0,0,-1]) reslice4.SetResliceTransform(transform) reslice4.SetOutputSpacing(3.2,3.2,3.2) reslice4.SetOutputExtent(0,74,0,74,0,0) reslice5 = vtk.vtkImageReslice() reslice5.SetInputConnection(reader.GetOutputPort()) reslice5.SetResliceAxesDirectionCosines([0,-1,0,0,0,-1,-1,0,0]) reslice5.SetResliceTransform(transform) reslice5.SetOutputSpacing(3.2,3.2,3.2) reslice5.SetOutputExtent(0,74,0,74,0,0) reslice6 = vtk.vtkImageReslice() reslice6.SetInputConnection(reader.GetOutputPort()) reslice6.SetResliceAxesDirectionCosines([0,0,-1,-1,0,0,0,-1,0]) reslice6.SetResliceTransform(transform) reslice6.SetOutputSpacing(3.2,3.2,3.2) reslice6.SetOutputExtent(0,74,0,74,0,0) mapper1 = vtk.vtkImageMapper() mapper1.SetInputConnection(reslice1.GetOutputPort()) mapper1.SetColorWindow(2000) mapper1.SetColorLevel(1000) mapper1.SetZSlice(0) mapper2 = vtk.vtkImageMapper() mapper2.SetInputConnection(reslice2.GetOutputPort()) mapper2.SetColorWindow(2000) mapper2.SetColorLevel(1000) mapper2.SetZSlice(0) mapper3 = vtk.vtkImageMapper() mapper3.SetInputConnection(reslice3.GetOutputPort()) mapper3.SetColorWindow(2000) mapper3.SetColorLevel(1000) mapper3.SetZSlice(0) mapper4 = vtk.vtkImageMapper() mapper4.SetInputConnection(reslice4.GetOutputPort()) mapper4.SetColorWindow(2000) mapper4.SetColorLevel(1000) mapper4.SetZSlice(0) mapper5 = vtk.vtkImageMapper() mapper5.SetInputConnection(reslice5.GetOutputPort()) mapper5.SetColorWindow(2000) mapper5.SetColorLevel(1000) mapper5.SetZSlice(0) mapper6 = vtk.vtkImageMapper() mapper6.SetInputConnection(reslice6.GetOutputPort()) mapper6.SetColorWindow(2000) mapper6.SetColorLevel(1000) mapper6.SetZSlice(0) actor1 = vtk.vtkActor2D() actor1.SetMapper(mapper1) actor2 = vtk.vtkActor2D() actor2.SetMapper(mapper2) actor3 = vtk.vtkActor2D() actor3.SetMapper(mapper3) actor4 = vtk.vtkActor2D() actor4.SetMapper(mapper4) actor5 = vtk.vtkActor2D() actor5.SetMapper(mapper5) actor6 = vtk.vtkActor2D() actor6.SetMapper(mapper6) imager1 = vtk.vtkRenderer() imager1.AddActor2D(actor1) imager1.SetViewport(0.0,0.0,0.3333,0.5) imager2 = vtk.vtkRenderer() imager2.AddActor2D(actor2) imager2.SetViewport(0.0,0.5,0.3333,1.0) imager3 = vtk.vtkRenderer() imager3.AddActor2D(actor3) imager3.SetViewport(0.3333,0.0,0.6667,0.5) imager4 = vtk.vtkRenderer() imager4.AddActor2D(actor4) imager4.SetViewport(0.3333,0.5,0.6667,1.0) imager5 = vtk.vtkRenderer() imager5.AddActor2D(actor5) imager5.SetViewport(0.6667,0.0,1.0,0.5) imager6 = vtk.vtkRenderer() imager6.AddActor2D(actor6) imager6.SetViewport(0.6667,0.5,1.0,1.0) imgWin = vtk.vtkRenderWindow() imgWin.AddRenderer(imager1) imgWin.AddRenderer(imager2) imgWin.AddRenderer(imager3) imgWin.AddRenderer(imager4) imgWin.AddRenderer(imager5) imgWin.AddRenderer(imager6) imgWin.SetSize(225,150) imgWin.Render() # --- end of script --
2.265625
2
neuronlp2/nn/utils.py
ntunlp/ptrnet-depparser
9
13026
import collections from itertools import repeat import torch import torch.nn as nn import torch.nn.utils.rnn as rnn_utils def _ntuple(n): def parse(x): if isinstance(x, collections.Iterable): return x return tuple(repeat(x, n)) return parse _single = _ntuple(1) _pair = _ntuple(2) _triple = _ntuple(3) _quadruple = _ntuple(4) def prepare_rnn_seq(rnn_input, lengths, hx=None, masks=None, batch_first=False): ''' Args: rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence. lengths: [batch]: tensor containing the lengthes of the input sequence hx: [num_layers * num_directions, batch, hidden_size]: tensor containing the initial hidden state for each element in the batch. masks: [seq_len, batch]: tensor containing the mask for each element in the batch. batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature]. Returns: ''' def check_decreasing(lengths): lens, order = torch.sort(lengths, dim=0, descending=True) if torch.ne(lens, lengths).sum() == 0: return None else: _, rev_order = torch.sort(order) return lens, order, rev_order check_res = check_decreasing(lengths) if check_res is None: lens = lengths rev_order = None else: lens, order, rev_order = check_res batch_dim = 0 if batch_first else 1 rnn_input = rnn_input.index_select(batch_dim, order) if hx is not None: # hack lstm if isinstance(hx, tuple): hx, cx = hx hx = hx.index_select(1, order) cx = cx.index_select(1, order) hx = (hx, cx) else: hx = hx.index_select(1, order) lens = lens.tolist() seq = rnn_utils.pack_padded_sequence(rnn_input, lens, batch_first=batch_first) if masks is not None: if batch_first: masks = masks[:, :lens[0]] else: masks = masks[:lens[0]] return seq, hx, rev_order, masks def recover_rnn_seq(seq, rev_order, hx=None, batch_first=False): output, _ = rnn_utils.pad_packed_sequence(seq, batch_first=batch_first) if rev_order is not None: batch_dim = 0 if batch_first else 1 output = output.index_select(batch_dim, rev_order) if hx is not None: # hack lstm if isinstance(hx, tuple): hx, cx = hx hx = hx.index_select(1, rev_order) cx = cx.index_select(1, rev_order) hx = (hx, cx) else: hx = hx.index_select(1, rev_order) return output, hx def freeze_embedding(embedding): assert isinstance(embedding, nn.Embedding), "input should be an Embedding module." embedding.weight.detach_()
2.828125
3
Data_Analyst/Step_2_Intermediate_Python_and_Pandas/2_Data_Analysis_with_Pandas_Intermediate/3_Introduction_to_Pandas/7_Selecting_a_row/script.py
ustutz/dataquest
8
13027
import pandas as pandas_Pandas_Module class Script: @staticmethod def main(): food_info = pandas_Pandas_Module.read_csv("../food_info.csv") print(str(food_info.dtypes)) Script.main()
2.234375
2
examples/fire.py
pombreda/py-lepton
7
13028
<filename>examples/fire.py ############################################################################# # # Copyright (c) 2008 by <NAME> and contributors # All Rights Reserved. # # This software is subject to the provisions of the MIT License # A copy of the license should accompany this distribution. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # ############################################################################# """Fire simulation using point sprites""" __version__ = '$Id$' import os from pyglet import image from pyglet.gl import * from lepton import Particle, ParticleGroup, default_system from lepton.renderer import PointRenderer from lepton.texturizer import SpriteTexturizer, create_point_texture from lepton.emitter import StaticEmitter from lepton.domain import Line from lepton.controller import Gravity, Lifetime, Movement, Fader, ColorBlender win = pyglet.window.Window(resizable=True, visible=False) win.clear() glEnable(GL_BLEND) glShadeModel(GL_SMOOTH) glBlendFunc(GL_SRC_ALPHA,GL_ONE) glDisable(GL_DEPTH_TEST) flame = StaticEmitter( rate=500, template=Particle( position=(300,25,0), velocity=(0,0,0), color=(1,1,1,1), ), position=Line((win.width/2 - 85, -15, 0), (win.width/2 + 85, -15, 0)), deviation=Particle(position=(10,0,0), velocity=(7,50,0), age=0.75) ) default_system.add_global_controller( Lifetime(6), Gravity((0,20,0)), Movement(), ColorBlender( [(0, (0,0,0.5,0)), (0.5, (0,0,0.5,0.2)), (0.75, (0,0.5,1,0.6)), (1.5, (1,1,0,0.2)), (2.7, (0.9,0.2,0,0.4)), (3.2, (0.6,0.1,0.05,0.2)), (4.0, (0.8,0.8,0.8,0.1)), (6.0, (0.8,0.8,0.8,0)), ] ), ) group = ParticleGroup(controllers=[flame], renderer=PointRenderer(64, SpriteTexturizer(create_point_texture(64, 5)))) win.set_visible(True) pyglet.clock.schedule_interval(default_system.update, (1.0/30.0)) pyglet.clock.set_fps_limit(None) @win.event def on_draw(): win.clear() glLoadIdentity() default_system.draw() if __name__ == '__main__': default_system.run_ahead(2, 30) pyglet.app.run()
2.5625
3
landspout/cli.py
gmr/landspout
0
13029
# coding=utf-8 """ Command Line Interface ====================== """ import argparse import logging import os from os import path import sys from landspout import core, __version__ LOGGER = logging.getLogger('landspout') LOGGING_FORMAT = '[%(asctime)-15s] %(levelname)-8s %(name)-15s: %(message)s' def exit_application(message=None, code=0): """Exit the application displaying the message to info or error based upon the exit code :param str message: The exit message :param int code: The exit code (default: 0) """ log_method = LOGGER.error if code else LOGGER.info log_method(message.strip()) sys.exit(code) def parse_cli_arguments(): """Return the base argument parser for CLI applications. :return: :class:`~argparse.ArgumentParser` """ parser = argparse.ArgumentParser( 'landspout', 'Static website generation tool', formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler='resolve') parser.add_argument('-s', '--source', metavar='SOURCE', help='Source content directory', default='content') parser.add_argument('-d', '--destination', metavar='DEST', help='Destination directory for built content', default='build') parser.add_argument('-t', '--templates', metavar='TEMPLATE DIR', help='Template directory', default='templates') parser.add_argument('-b', '--base-uri-path', action='store', default='/') parser.add_argument('--whitespace', action='store', choices=['all', 'single', 'oneline'], default='all', help='Compress whitespace') parser.add_argument('-n', '--namespace', type=argparse.FileType('r'), help='Load a JSON file of values to inject into the ' 'default rendering namespace.') parser.add_argument('-i', '--interval', type=int, default=3, help='Interval in seconds between file ' 'checks while watching or serving') parser.add_argument('--port', type=int, default=8080, help='The port to listen on when serving') parser.add_argument('--debug', action='store_true', help='Extra verbose debug logging') parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__), help='output version information, then exit') parser.add_argument('command', nargs='?', choices=['build', 'watch', 'serve'], help='The command to run', default='build') return parser.parse_args() def validate_paths(args): """Ensure all of the configured paths actually exist.""" if not path.exists(args.destination): LOGGER.warning('Destination path "%s" does not exist, creating', args.destination) os.makedirs(path.normpath(args.destination)) for file_path in [args.source, args.templates]: if not path.exists(file_path): exit_application('Path {} does not exist'.format(file_path), 1) def main(): """Application entry point""" args = parse_cli_arguments() log_level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=log_level, format=LOGGING_FORMAT) LOGGER.info('Landspout v%s [%s]', __version__, args.command) validate_paths(args) landspout = core.Landspout(args) if args.command == 'build': landspout.build() elif args.command == 'watch': landspout.watch() elif args.command == 'serve': landspout.serve()
2.59375
3
examples/test_cross.py
rballester/ttpy
0
13030
<gh_stars>0 import sys sys.path.append('../') import numpy as np import tt d = 30 n = 2 ** d b = 1E3 h = b / (n + 1) #x = np.arange(n) #x = np.reshape(x, [2] * d, order = 'F') #x = tt.tensor(x, 1e-12) x = tt.xfun(2, d) e = tt.ones(2, d) x = x + e x = x * h sf = lambda x : np.sin(x) / x #Should be rank 2 y = tt.multifuncrs([x], sf, 1e-6, ['y0', tt.ones(2, d)]) #y1 = tt.tensor(sf(x.full()), 1e-8) print "pi / 2 ~ ", tt.dot(y, tt.ones(2, d)) * h #print (y - y1).norm() / y.norm()
2.234375
2
phone2board.py
brandjamie/phone2board
0
13031
import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web import tornado.auth import tornado.escape import os.path import logging import sys import urllib import json from uuid import uuid4 from tornado.options import define, options define("port", default=8000, help="run on the given port", type=int) #to do - # check character set of inputs (not vital as 'block' added to each user). # scores? #------------------------------------------------------------------------------Main app code------------------------------------------- class Status (object): currentStatus = "waitingforstart" currentLoginStatus = "open" currentTime = 90 currentQuestion = False currentQuestionType = False clientcallbacks = [] users = {} # users is a dictionary - names are keys, each item is a dictionary of score and (if neccesary), current question and correct or not globalcallbacks = [] controlcallbacks = [] answercounter = 0 quiztype = "" def registerclient(self, callback): print('register client---------------------------------------------------------') if (callback not in self.clientcallbacks): self.clientcallbacks.append(callback) def registerglobal(self, callback): print('register global----------------------------------------------------------') if (callback not in self.globalcallbacks): self.globalcallbacks.append(callback) def registercontrol(self, callback): print('register control----------------------------------------------------------') if (callback not in self.controlcallbacks): self.controlcallbacks.append(callback) def adduser(self, name): if self.getStatus()=="waitingforstart": self.users[tornado.escape.native_str(name)]={'qnum':0,'level':0,'complete':"false",'Score':0,'answerordinal':10000, 'block':"false",'finished':"false"} else: self.users[tornado.escape.native_str(name)]={'qnum':0,'level':0,'complete':"false",'Score':0,'answerordinal':10000, 'block':"false",'finished':"false"} # self.users(tornado.escape.native_str(name)) self.notifyAddUser() def removeuser(self, name): print('removing user') # self.users.remove(tornado.escape.native_str(name)) delname = tornado.escape.native_str(name) if (delname in self.users): del self.users[delname] def setQuestion(self, question): print('setquestion') questtype = "open" jsonstring ='{"type":"question","question":"'+question+'"}' self.clearAnswers() self.currentQuestion = question self.currentQuestionType = questtype self.setStatus("waitingforanswer") self.setLoginStatus("closed") self.notifyGlobal(jsonstring) jsonstring ='{"type":"question","status":"waitingforanswer","loginstatus":"closed"}' self.notifyControl(jsonstring) jsonstring ='{"type":"questionasked","qtype":"'+questtype+'"}' self.notifyClient(jsonstring) # print ("what the hell") # self.notifyAnswer() # could be named better as is doing the marking def setControlAnswer(self, answer): print('set control answer') answers = answer.split('/') print(len(answers)) for user in self.users.keys(): if ('answer' in self.users[user]): if (self.users[user]['answer']in answers): self.users[user]['mark']="correct" else: self.users[user]['mark']="incorrect" self.notifyGlobalAnswer() self.notifyUserAllAnswers() def setCorrectFromControl(self, user): if (self.users[user]): self.users[user]['mark']="correct" print("does it workd") print(self.users[user]['mark']) self.notifyGlobalAnswer() self.notifyUserAnswerCorrect(user) def setIncorrectFromControl(self, user): if (self.users[user]): self.users[user]['mark']="incorrect" print(self.users[user]['mark']) self.notifyGlobalAnswer() self.notifyUserAnswerIncorrect(user) def setBlockFromControl(self, user): if (self.users[user]): self.users[user]['block']="true" self.notifyGlobalAnswer() def setUnblockFromControl(self, user): if (self.users[user]): self.users[user]['block']="false" self.notifyGlobalAnswer() def toggleLoginStatus(self): if (self.getLoginStatus()=="closed"): self.setLoginStatus("open") else: self.setLoginStatus("closed") self.notifyControlLoginStatus() def toggleStatus(self): if (self.getStatus()=="waitingforanswer"): self.setStatus("answersclosed") else: self.setStatus("waitingforanswer") self.notifyControlStatus() def resetGame(self): jsonstring = '{"type":"reset"}' print("what the hell") self.notifyClient(jsonstring) def setAnswer(self, answer, user): print('getting answer') print (answer) print (user) self.users[user]['answer'] = answer self.users[user]['answerordinal']=self.answercounter self.users[user]['mark']="notmarked" self.answercounter=self.answercounter + 1 self.notifyAnswer() def setClientResult(self, level, qnum, finished, user): print ('gotten result') print (level) print (qnum) print (user) print (finished) self.users[user]['level']=int(level) self.users[user]['qnum']=int(qnum) self.users[user]['finished']=finished self.notifyAnswer() def clearAnswers(self): self.answercounter = 0 for user in self.users.keys(): if ('answer' in self.users[user]): del self.users[user]['answer'] self.users[user]['answerordinal']=10000 self.users[user]['mark']="notmarked" def setStatus(self, status): self.currentStatus = status def setQuizType(self, quiztype): self.quiztype = quiztype def setLoginStatus(self, status): self.currentLoginStatus = status def setTime(self, time): print("SETTING TIMER________________") self.currentTime = time self.notifyGlobalTimeChange(time) self.notifyUserTimeChange(time) def notifyAddUser(self): print("notify add user") jsonstring = '{"type":"users","users":[' print (self.users) for c in self.users.keys(): jsonstring = jsonstring+'"'+c+'",' jsonstring = jsonstring[:-1] jsonstring = jsonstring+']}' self.notifyGlobal(jsonstring) self.notifyControlAnswer() def notifyAnswer(self): print ("notify answer") self.notifyGlobalAnswer() self.notifyControlAnswer() def notifyGlobalAnswer(self): print ("notify gloabla answer") jsonstring = '{"type":"answers","answers":[' answerarray = self.makeAnswerArrayString() jsonstring = jsonstring+answerarray jsonstring = jsonstring+']}' self.notifyGlobal(jsonstring) def notifyUserAnswerCorrect(self, markedusername): jsonstring = '{"type":"mark","mark":"correct","markeduser":"' jsonstring = jsonstring+markedusername+'"}' self.notifyClient(jsonstring) def notifyUserAnswerIncorrect(self, markedusername): jsonstring = '{"type":"mark","mark":"incorrect","markeduser":"' jsonstring = jsonstring+markedusername+'"}' self.notifyClient(jsonstring) def notifyUserTimeChange(self, time): print ("notify user time") jsonstring = '{"type":"time","time":' jsonstring = jsonstring+time jsonstring = jsonstring+'}' self.notifyClient(jsonstring) def notifyGlobalTimeChange(self, time): print ("notify gloabl time") jsonstring = '{"type":"time","time":' jsonstring = jsonstring+time jsonstring = jsonstring+'}' self.notifyGlobal(jsonstring) def notifyUserAllAnswers(self): print ("notify all users") jsonstring = '{"type":"alluseranswers","answers":[' answerarray = self.makeAnswerArrayString() jsonstring = jsonstring+answerarray jsonstring = jsonstring+']}' self.notifyClient(jsonstring) def notifyControlAnswer(self): print ("notify contorl answer") jsonstring = '{"type":"answers","answers":[' controlanswerarray = self.makeControlArrayString() jsonstring = jsonstring+controlanswerarray jsonstring = jsonstring+']' # jsonstring = jsonstring+ ',"status":"' # jsonstring = jsonstring+self.application.status.getstatus()+'"' jsonstring = jsonstring+'}' self.notifyControl(jsonstring) def notifyControlLoginStatus(self): print(self.getLoginStatus()) jsonstring = '{"type":"loginstatus","loginstatus":"' jsonstring = jsonstring+self.getLoginStatus() jsonstring = jsonstring + '"}' self.notifyControl(jsonstring) def notifyControlStatus(self): print(self.getStatus()) jsonstring = '{"type":"status","status":"' jsonstring = jsonstring+self.getStatus() jsonstring = jsonstring + '"}' self.notifyControl(jsonstring) def makeAnswerArrayString (self): if self.quiztype == "multiq": sortedlist = self.getMultiqSortedUserList() else: sortedlist = self.getSortedUserList() jsonstring = "" #for c in self.users.keys(): #self.application.quiztype for c in sortedlist: if self.quiztype == "multiq": jsonstring = jsonstring+'[' jsonstring = jsonstring+'"'+c[0]+'",' jsonstring = jsonstring+'"no answer",' jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",' jsonstring = jsonstring+'"'+str(c[1]['level'])+'",' jsonstring = jsonstring+'"'+str(c[1]['block'])+'",' jsonstring = jsonstring+'"'+str(c[1]['qnum'])+'",' jsonstring = jsonstring+'"'+str(c[1]['finished'])+'"],' else: if ('answer' in c[1]): jsonstring = jsonstring+'[' jsonstring = jsonstring+'"'+c[0]+'",' jsonstring = jsonstring+'"'+c[1]['answer']+'",' jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",' jsonstring = jsonstring+'"'+c[1]['mark']+'",' jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],' jsonstring = jsonstring[:-1] return jsonstring def getSortedUserList (self): print("-------------------------------------") listfromusers = self.users.items() print(listfromusers) sortedlist = sorted(listfromusers, key=lambda usered: usered[1]['answerordinal']) print(sortedlist) return sortedlist def getMultiqSortedUserList (self): listfromusers = self.users.items() sortedlist = sorted(listfromusers, key=lambda usered: (usered[1]['level'], usered[1]['qnum'],usered[1]['answerordinal']), reverse = True) print(sortedlist) return sortedlist def makeControlArrayString (self): jsonstring = "" if self.quiztype == "multiq": jsonstring = self.makeMultiqControlArrayString() else: sortedlist = self.getSortedUserList() for c in sortedlist: jsonstring = jsonstring+'[' jsonstring = jsonstring+'"'+c[0]+'",' if ('answer' in c[1]): jsonstring = jsonstring+'"'+c[1]['answer']+'",' jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",' jsonstring = jsonstring+'"'+c[1]['mark']+'",' jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],' else: jsonstring = jsonstring+'"noanswer",' jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",' jsonstring = jsonstring+'"nomark",' jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],' jsonstring = jsonstring[:-1] return jsonstring def makeMultiqControlArrayString (self): jsonstring = "" sortedlist = self.getSortedUserList() for c in sortedlist: jsonstring = jsonstring+'[' jsonstring = jsonstring+'"'+c[0]+'",' if ('answer' in c[1]): jsonstring = jsonstring+'"'+c[1]['answer']+'",' jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",' jsonstring = jsonstring+'"'+c[1]['mark']+'",' jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],' else: jsonstring = jsonstring+'"noanswer",' jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",' jsonstring = jsonstring+'"'+str(c[1]['level'])+'",' jsonstring = jsonstring+'"'+str(c[1]['block'])+'",' jsonstring = jsonstring+'"'+str(c[1]['qnum'])+'"],' jsonstring = jsonstring[:-1] print (jsonstring) print ("make controll array string") return jsonstring def notifyGlobal(self, message): for c in self.globalcallbacks: print('globalcallbacks') print(message) print(c) c(message) self.globalcallbacks=[] def notifyControl(self, message): for c in self.controlcallbacks: print('controlcallbacks') print(message) print(c) c(message) self.controlcallbacks=[] def notifyClient(self, message): for c in self.clientcallbacks: print('controlcallbacks') print(message) print(c) c(message) self.clientcallbacks=[] def getUsers(self): return self.users.keys() def getStatus(self): return self.currentStatus def getTime(self): return self.currentTime def getLoginStatus(self): return self.currentLoginStatus def getQuestion(self): return self.currentQuestion def getQuizType(self): return self.quizType def getQuestionType(self): return self.currentQuestionType #----------------------------------------------------------status handlers------------------------- # these handle the asynch hooks from the pages and sending messages to the pages # a lot of shared code here - I'm sure this could be better! class ClientStatusHandler(tornado.web.RequestHandler): @tornado.web.asynchronous @tornado.gen.engine def get(self): print("register client") self.application.status.registerclient(self.on_message) def on_message(self, message): print("client message sent") print(message) self.write(message) self.finish() class GlobalStatusHandler(tornado.web.RequestHandler): @tornado.web.asynchronous @tornado.gen.engine def get(self): print("reggister gloabl") self.application.status.registerglobal(self.on_message) def on_message(self, message): print("global message sent") print(message) self.write(message) self.finish() class ControlStatusHandler(tornado.web.RequestHandler): @tornado.web.asynchronous @tornado.gen.engine def get(self): print("registeredd control") self.application.status.registercontrol(self.on_message) def on_message(self, message): print("control message sent") print(message) self.write(message) self.finish() # message handlers - recieves messages from the pages (currently only control and client) class ControlMessageHandler(tornado.web.RequestHandler): def get(self): messagetype = self.get_argument("type") if messagetype=="question": question = urllib.parse.unquote(self.get_argument("question")) self.application.status.setQuestion(question) if messagetype=="time": time = urllib.parse.unquote(self.get_argument("time")) self.application.status.setTime(time) if messagetype=="controlanswer": answer = urllib.parse.unquote(self.get_argument("answer")) self.application.status.setControlAnswer(answer) if messagetype=="markcorrect": name = urllib.parse.unquote(self.get_argument("id")) self.application.status.setCorrectFromControl(name) if messagetype=="markincorrect": name = urllib.parse.unquote(self.get_argument("id")) self.application.status.setIncorrectFromControl(name) if messagetype=="block": name = urllib.parse.unquote(self.get_argument("id")) self.application.status.setBlockFromControl(name) if messagetype=="unblock": name = urllib.parse.unquote(self.get_argument("id")) self.application.status.setUnblockFromControl(name) if messagetype=="toggleloginstatus": self.application.status.toggleLoginStatus() if messagetype=="togglestatus": self.application.status.toggleStatus() if messagetype=="resetgame": self.application.status.resetGame(); self.finish() class ClientMessageHandler(tornado.web.RequestHandler): def get(self): messagetype = self.get_argument("type") if messagetype=="answer": currentstatus = self.application.status.getStatus() if (currentstatus=="waitingforanswer"): answer = urllib.parse.unquote(self.get_argument("answer")) user = tornado.escape.native_str(self.get_secure_cookie("username")) self.application.status.setAnswer(answer,user) if messagetype=="clientmarked": currentstatus = self.application.status.getStatus() if (currentstatus=="waitingforanswer"): user = tornado.escape.native_str(self.get_secure_cookie("username")) level = self.get_argument("level"); qnum = self.get_argument("qnum"); finished = self.get_argument("finished"); self.application.status.setClientResult(level, qnum, finished, user); self.finish() class GlobalMessageHandler(tornado.web.RequestHandler): def get(self): messagetype = self.get_argument("type") if messagetype=="requestanswers": self.application.status.notifyAnswer() self.finish() # - template handlers ------------- pages that are actually called by the browser. class ClientPageHandler(tornado.web.RequestHandler): def get_current_user(self): return self.get_secure_cookie("username") def get(self): session = uuid4() class LoginHandler(ClientPageHandler): def get(self): #print (self.application.gamefile) #print (self.application.gamefile["quiztype"]) if self.application.status.getLoginStatus()=="open": self.render('login.html') elif self.get_secure_cookie("username"): print(self.application.status.getStatus()) self.redirect("/") else: print(self.application.status.getStatus()) self.render('gamestarted.html') def post(self): # if client already has a username set, remove it from the list before creating a new username if self.get_secure_cookie("username"): self.application.status.removeuser(self.current_user) # create new user self.set_secure_cookie("username",self.get_argument("username"),expires_days=1) self.redirect("/") class ClientWelcome(ClientPageHandler): @tornado.web.authenticated def get(self): session = uuid4() self.application.status.adduser(self.current_user) currentstatus = self.application.status.getStatus() currenttime = self.application.status.getTime() questionarray = self.application.questionarray currentquestiontype = self.application.status.getQuestionType() clientpage = self.application.quiztypes[self.application.quiztype]['client_page'] self.render(clientpage,session=session,user=self.current_user, status=currentstatus, questiontype=currentquestiontype,time=currenttime, levels = questionarray) class ControlPageHandler(tornado.web.RequestHandler): def get(self): # users = self.application.status.getUsers() # userstring = "','".join(str(thisuser) for thisuser in users) controlstring = self.application.status.makeControlArrayString() currentstatus = self.application.status.getStatus() currentloginstatus = self.application.status.getLoginStatus() currenttime = self.application.status.getTime() quiztype = "'" + self.application.quiztype + "'" questionarray = self.application.questionarray answerarray = self.application.answerarray page = self.application.quiztypes[self.application.quiztype]["control_page"] self.render(page,teams="["+str(controlstring)+"]", status=currentstatus, loginstatus=currentloginstatus, time=currenttime, quiztype = quiztype, questionarray = questionarray, answerarray = answerarray) class GlobalPageHandler(tornado.web.RequestHandler): def get(self): users = self.application.status.getUsers() userstring = '","'.join(str(thisuser) for thisuser in users) currentstatus = self.application.status.getStatus() currentquestion = self.application.status.getQuestion() currentanswers = self.application.status.makeAnswerArrayString() currenttime = self.application.status.getTime() globalpage = self.application.quiztypes[self.application.quiztype]["global_page"] # should add extra [ ] for current answers string (as in teams) - currently done in javascript self.render(globalpage,teams='["'+str(userstring)+'"]', status=currentstatus, question=currentquestion, answers=currentanswers,time=currenttime) class Application(tornado.web.Application): def __init__(self): self.status = Status() # self.gametype = "default" print('init') handlers = [ (r'/',ClientWelcome), (r'/control',ControlPageHandler), (r'/global',GlobalPageHandler), (r'/login',LoginHandler), (r'/clientstatus',ClientStatusHandler), (r'/globalstatus',GlobalStatusHandler), (r'/controlstatus',ControlStatusHandler), (r'/controlmessage',ControlMessageHandler), (r'/clientmessage',ClientMessageHandler), (r'/globalmessage',GlobalMessageHandler), ] settings = { 'template_path':'./templates', 'static_path':'./static', 'cookie_secret':'123456', 'login_url':'/login', 'xsft_cookies':True, 'debug':True, } ## states which pages should be served for each type of quiz. self.quiztypes = { 'default':{"client_page":"default_client.html", "global_page":"default_global.html", "control_page":"default_control.html"}, 'fixed_answers':{"client_page":"default_client.html", "global_page":"default_global.html", "control_page":"default_control.html"}, 'open_answers':{"client_page":"default_client.html", "global_page":"default_global.html", "control_page":"default_control.html"}, 'fixed_timed':{"client_page":"timed_client.html", "global_page":"timed_global.html", "control_page":"timed_control.html"}, 'open_timed':{"client_page":"timed_client.html", "global_page":"timed_global.html", "control_page":"timed_control.html"}, 'multiq':{"client_page":"multiq_client.html", "global_page":"multiq_global.html", "control_page":"multiq_control.html"} } tornado.web.Application.__init__(self, handlers,**settings) if __name__ == '__main__': # tornado.options.parse_command_line() def set_defaults(): app.quiztype = "default" app.notes = "Open ended questions can be entered in control pages. Answers can be marked individualy or by entering an answer in the control page." app.questionarray = "{}" app.answerarray = "{}" app = Application() if len(sys.argv) > 1: try: with open(sys.argv[1]) as json_data: app.gamefile = json.load(json_data) json_data.close() app.quiztype = app.gamefile["quiztype"] if "notes" in app.gamefile: app.notes = app.gamefile["notes"] if "questionarray" in app.gamefile: app.questionarray = app.gamefile["questionarray"] else: app.questionarray = "{}" if "answerarray" in app.gamefile: app.answerarray = app.gamefile["answerarray"] else: app.answerarray = "{}" except: print("not a valid json file, using defaults") set_defaults() else: print("no file given - using defaults") set_defaults() app.status.setQuizType(app.quiztype) http_server = tornado.httpserver.HTTPServer(app) http_server.listen(options.port) tornado.ioloop.IOLoop.instance().start()
2.359375
2
scripts/plotRUC.py
akrherz/radcomp
3
13032
<filename>scripts/plotRUC.py import matplotlib.pyplot as plt import netCDF4 import numpy nc = netCDF4.Dataset("data/ructemps.nc") data = nc.variables["tmpc"][17, :, :] nc.close() (fig, ax) = plt.subplots(1, 1) ax.imshow(numpy.flipud(data)) fig.savefig("test.png")
2.53125
3
tools/draw_cal_lr_ablation.py
twangnh/Calibration_mrcnn
87
13033
import matplotlib import matplotlib.pyplot as plt import numpy as np import math from matplotlib.ticker import FormatStrFormatter from matplotlib import scale as mscale from matplotlib import transforms as mtransforms # z = [0,0.1,0.3,0.9,1,2,5] z = [7.8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1230] # thick = [20,40,20,60,37,32,21]ax1.set_xscale('log') # thick=[15.4, 18.2, 18.7, 19.2, 19.4, 19.5, 19.9, 20.1, 20.4, 20.5, 20.6, 20.7, 20.8, 20.7, 20.7, 20.6, 20.6, 20.6, 20.5, 20.5, 19.8] mrcnn=[17.7, 19.8, 20.0, 19.9, 20.2, 19.5, 19.1, 19.1] x_ticks = [0.001, 0.002, 0.004, 0.008, 0.01, 0.02, 0.04, 0.08] # plt.plot([1.0],[44.8], 'D', color = 'black') # plt.plot([0],[35.9], 'D', color = 'red') # plt.plot([1.0],[56.8], 'D', color = 'black') fig = plt.figure(figsize=(8,5)) ax1 = fig.add_subplot(111) matplotlib.rcParams.update({'font.size': 20}) ax1.plot(x_ticks, mrcnn, linestyle='dashed', marker='o', linewidth=2, c='k', label='mrcnn-r50-ag') # ax1.plot(z, htc, marker='o', linewidth=2, c='g', label='htc') # ax1.plot([1e-4],[15.4], 'D', color = 'green') # ax1.plot([1230],[19.8], 'D', color = 'red') plt.xlabel('calibration lr', size=16) plt.ylabel('bAP', size=16) # plt.gca().set_xscale('custom') ax1.set_xscale('log') ax1.set_xticks(x_ticks) # from matplotlib.ticker import ScalarFormatter # ax1.xaxis.set_major_formatter(ScalarFormatter()) # plt.legend(['calibration lr'], loc='best') plt.minorticks_off() plt.grid() plt.savefig('calibration_lr.eps', format='eps', dpi=1000) plt.show() # import numpy as np # import matplotlib.pyplot as plt # from scipy.interpolate import interp1d # y1=[35.9, 43.4, 46.1, 49.3, 50.3, 51.3, 51.4, 49.9, 49.5, 48.5, 44.8] # y2=[40.5, 48.2, 53.9 , 56.9, 57.8, 59.2, 58.3, 57.9, 57.5, 57.2, 56.8] # y3=[61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5] # x = np.linspace(0, 1, num=11, endpoint=True) # # f1 = interp1d(x, y1, kind='cubic') # f2 = interp1d(x, y2, kind='cubic') # f3 = interp1d(x, y3, kind='cubic') # xnew = np.linspace(0, 1, num=101, endpoint=True) # plt.plot(xnew, f3(xnew), '--', color='fuchsia') # plt.plot(xnew, f1(xnew), '--', color='blue') # plt.plot(xnew, f2(xnew), '--', color='green') # # plt.plot([0],[40.5], 'D', color = 'red') # plt.plot([1.0],[44.8], 'D', color = 'black') # plt.plot([0],[35.9], 'D', color = 'red') # plt.plot([1.0],[56.8], 'D', color = 'black') # plt.plot(x, y3, 'o', color = 'fuchsia') # plt.plot(x, y1, 'o', color = 'blue') # plt.plot(x, y2, 'o', color = 'green') # plt.plot([0],[40.5], 'D', color = 'red') # plt.plot([1.0],[44.8], 'D', color = 'black') # plt.plot([0],[35.9], 'D', color = 'red') # plt.plot([1.0],[56.8], 'D', color = 'black') # plt.legend(['teacher','0.25x', '0.5x', 'full-feature-imitation', 'only GT supervison'], loc='best') # plt.xlabel('Thresholding factor') # plt.ylabel('mAP') # plt.title('Resulting mAPs of varying thresholding factors') # #plt.legend(['0.5x']) # # plt.savefig('varying_thresh.eps', format='eps', dpi=1000) # plt.show()
2.34375
2
discord bot.py
salihdursun1/dc-bot
0
13034
import discord from discord.ext.commands import Bot TOKEN = "<discordtoken>" client = discord.Client() bot = Bot(command_prefix="!") @bot.event async def on_ready(): print("Bot Hazır " + str(bot.user)) @bot.event async def on_message(message): if message.author == client.user: return if message.content == "selam": await message.channel.send("selam naber") bot.run(TOKEN)
2.9375
3
Lesson08/problem/problem_optional_pandas.py
AlexMazonowicz/PythonFundamentals
2
13035
import pandas as pd # Global variable to set the base path to our dataset folder base_url = '../dataset/' def update_mailing_list_pandas(filename): """ Your docstring documentation starts here. For more information on how to proper document your function, please refer to the official PEP8: https://www.python.org/dev/peps/pep-0008/#documentation-strings. """ df = # Read your csv file with pandas return # Your logic to filter only rows with the `active` flag the return the number of rows # Calling the function to test your code print(update_mailing_list_pandas('mailing_list.csv'))
3.203125
3
example_problems/tutorial/euler_dir/services/is_eulerian_server.py
romeorizzi/TALight
4
13036
<filename>example_problems/tutorial/euler_dir/services/is_eulerian_server.py #!/usr/bin/env python3 # "This service will check your statement that a directed graph you provide us admits an eulerian walk (of the specified type)"" from os import EX_TEMPFAIL from sys import stderr, exit import collections from multilanguage import Env, Lang, TALcolors from TALinputs import TALinput from euler_dir_lib import * # METADATA OF THIS TAL_SERVICE: args_list = [ ('walk_type',str), ('feedback',str), ('eulerian',bool), ('MAXN',int), ('MAXM',int), ] ENV =Env(args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'")) MAXN = ENV['MAXN'] MAXM = ENV['MAXM'] # START CODING YOUR SERVICE: print(f"#? waiting for your directed graph.\nFormat: each line two numbers separated by space. On the first line the number of nodes (an integer n in the interval [1,{MAXN}]) and the number of arcs (an integer m in the interval [1,{MAXM}]). Then follow m lines, one for each arc, each with two numbers in the interval [0,n). These specify the tail node and the head node of the arc, in this order.\nAny line beggining with the '#' character is ignored.\nIf you prefer, you can use the 'TA_send_txt_file.py' util here to send us the lines of a file. Just plug in the util at the 'rtal connect' command like you do with any other bot and let the util feed in the file for you rather than acting by copy and paste yourself.") n, m = TALinput(int, 2, TAc=TAc) if n < 1: TAc.print(LANG.render_feedback("n-LB", f"# ERRORE: il numero di nodi del grafo deve essere almeno 1. Invece il primo dei numeri che hai inserito è n={n}."), "red") exit(0) if m < 0: TAc.print(LANG.render_feedback("m-LB", f"# ERRORE: il numero di archi del grafo non può essere negativo. Invece il secondo dei numeri che hai inserito è m={m}."), "red") exit(0) if n > MAXN: TAc.print(LANG.render_feedback("n-UB", f"# ERRORE: il numero di nodi del grafo non può eccedere {ENV['MAXN']}. Invece il primo dei numeri che hai inserito è n={n}>{ENV['MAXN']}."), "red") exit(0) if m > MAXM: TAc.print(LANG.render_feedback("m-UB", f"# ERRORE: il numero di archi del grafo non può eccedere {ENV['MAXM']}. Invece il secondo dei numeri che hai inserito è n={n}>{ENV['MAXM']}."), "red") exit(0) g = Graph(int(n)) adj = [ [] for _ in range(n)] for i in range(m): head, tail = TALinput(int, 2, TAc=TAc) if tail >= n or head >= n or tail < 0 or head < 0: TAc.print(LANG.render_feedback("n-at-least-1", f"# ERRORE: entrambi gli estremi di un arco devono essere nodi del grafo, ossia numeri interi ricompresi nell'intervallo [0,{ENV['MAXN']}."), "red") exit(0) g.addEdge(int(head),int(tail)) adj[int(head)].append(int(tail)) eul = ENV['eulerian'] if eul == 1: if ENV['walk_type'] == "closed": answer1 = g.isEulerianCycle() if answer1 == eul: TAc.OK() if answer1 == True: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian cycle!"),"green") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"green") printCircuit(adj) exit(0) else: TAc.print(LANG.render_feedback("not-eulerian", f"Il grafo NON contiene alcun eulerian cycle!"),"red") exit(0) else: TAc.NO() exit(0) if ENV['walk_type'] == "open": answer1 = g.isEulerianWalk() answer2 = g.isEulerianCycle() if answer1 == eul and answer2==False and answer1 ==True : TAc.OK() if answer1 == True: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian walk!"),"green") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"green") printCircuit(adj) exit(0) else: TAc.print(LANG.render_feedback("not-eulerian", f"Il grafo NON contiene alcun eulerian walk!"),"red") exit(0) else: TAc.NO() exit(0) if ENV['walk_type'] == "any": answer1 = g.isEulerianCycle() answer2 = g.isEulerianWalk() if answer1 == eul or answer2 == eul: TAc.OK() if answer1 == eul: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian cycle!"),"green") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"green") printCircuit(adj) exit(0) if answer2 == eul: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian walk!"),"green") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"green") g.printEulerTour() exit(0) else: TAc.print(LANG.render_feedback("not-eulerian", f"Il grafo NON contiene alcun eulerian walk/cycle!"),"red") exit(0) if eul == 0: if ENV['walk_type'] == "closed": answer1 = g.isEulerianCycle() if answer1 == eul: TAc.OK() else: TAc.NO() if answer1 == True: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian cycle!"),"red") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"red") printCircuit(adj) exit(0) exit(0) if ENV['walk_type'] == "open": answer1 = g.isEulerianWalk() answer2 = g.isEulerianCycle() if answer1 == eul: TAc.OK() else: TAc.NO() TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian walk!"),"red") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"red") printCircuit(adj) exit(0) if ENV['walk_type'] == "any": answer1 = g.isEulerianCycle() answer2 = g.isEulerianWalk() if answer1 == True or answer2 == True: TAc.NO() if answer1 == True: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian cycle!"),"red") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"red") printCircuit(adj) exit(0) if answer2 == True: TAc.print(LANG.render_feedback("eulerian", f"Il grafo ammette un eulerian walk!"),"red") if ENV['feedback'] == "with_YES_certificate": TAc.print(LANG.render_feedback("here-is-the-certificate", f"Eccone uno:"),"red") g.printEulerTour() exit(0) else: TAc.OK() exit(0)
3.234375
3
get_vocab.py
Amir-Mehrpanah/hgraph2graph
182
13037
<gh_stars>100-1000 import sys import argparse from hgraph import * from rdkit import Chem from multiprocessing import Pool def process(data): vocab = set() for line in data: s = line.strip("\r\n ") hmol = MolGraph(s) for node,attr in hmol.mol_tree.nodes(data=True): smiles = attr['smiles'] vocab.add( attr['label'] ) for i,s in attr['inter_label']: vocab.add( (smiles, s) ) return vocab if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--ncpu', type=int, default=1) args = parser.parse_args() data = [mol for line in sys.stdin for mol in line.split()[:2]] data = list(set(data)) batch_size = len(data) // args.ncpu + 1 batches = [data[i : i + batch_size] for i in range(0, len(data), batch_size)] pool = Pool(args.ncpu) vocab_list = pool.map(process, batches) vocab = [(x,y) for vocab in vocab_list for x,y in vocab] vocab = list(set(vocab)) for x,y in sorted(vocab): print(x, y)
2.734375
3
web_app/cornwall/views.py
blackradley/heathmynd
0
13038
# -*- coding: utf-8 -*- """ test """ from __future__ import unicode_literals from django.template.loader import get_template from django.contrib import messages # Create your views here. from django.http import HttpResponse def index(request): """ index """ template = get_template('cornwall/index.html') messages.set_level(request, messages.DEBUG) list(messages.get_messages(request))# clear out the previous messages messages.add_message(request, messages.INFO, 'Hello world.') context = {'nbar': 'cornwall'} html = template.render(context, request) return HttpResponse(html)
2.15625
2
vshare/user_/urls.py
jeyrce/vshare
4
13039
# coding = utf-8 # env = python3.5.2 # author = lujianxin # time = 201x-xx-xx # purpose= - - - from django.urls import re_path from . import views urlpatterns = [ # 此模块下的路径映射 re_path(r'usercenter$', views.UserCenter.as_view()), re_path(r'details/(\d+)$', views.UserDetails.as_view()), re_path(r'login$', views.Login.as_view()), re_path(r'regist$', views.Regist.as_view()), re_path(r'logout$', views.Logout.as_view()), re_path(r'securecenter$', views.SecureCenter.as_view()), re_path(r'write_article$', views.WriteArticle.as_view()), re_path(r'change_art/(\d+)$', views.ChangeArt.as_view()), re_path(r'cpwd$', views.ModifyPwd.as_view()), re_path(r'findpwd$', views.FindPwd.as_view()), re_path(r'cpwdsafe$', views.ModifyPwdSafe.as_view()), ] if __name__ == '__main__': pass
2.03125
2
Day_3/task2.py
DjaffDjaff/AdventOfCode
2
13040
<filename>Day_3/task2.py import math oxygen_rating = 0 co2_rating = 0 length = 0 n_bits = 12 common = [0] * n_bits anti = [0] * n_bits numbers = [] def new_bitmap(old_list): new_list = [0] * n_bits for num in old_list: for j, bit in enumerate(num): new_list[j] += bit return new_list with open("data.txt", "r") as f: lines = f.readlines() length = len(lines) for line in lines: bitmap = list(line.strip("\n")) bitmap = [int(bit) for bit in bitmap] numbers.append(bitmap) #print(bitmap) for j, bit in enumerate(bitmap): common[j] += bit # Let's find oxygen generator rating first numbers_copy = [number for number in numbers] for i in range(n_bits): # Update common common = new_bitmap(numbers) # if more 1s in bit i if common[i] >= len(numbers)/2: most_c = 1 else: most_c = 0 #print(f"In round {i+1}, most common: {most_c}") numbers[:] = [number for number in numbers if (number[i] == most_c)] #print(numbers) if len(numbers) < 2: break oxygen_rating = int("".join(str(bit) for bit in numbers[0]), 2) print("O2:",oxygen_rating) for i in range(n_bits): # Update common common = new_bitmap(numbers_copy) # if more 1s in bit i if common[i] >= len(numbers_copy)/2: most_c = 1 else: most_c = 0 #print(f"In round {i+1}, most common: {most_c}") numbers_copy[:] = [number for number in numbers_copy if (number[i] != most_c)] #print(numbers_copy) if len(numbers_copy) < 2: break co2_rating = int("".join(str(bit) for bit in numbers_copy[0]), 2) print("CO2:", co2_rating) print("Answer: ", oxygen_rating*co2_rating)
3.59375
4
polyjuice/filters_and_selectors/perplex_filter.py
shwang/polyjuice
38
13041
<filename>polyjuice/filters_and_selectors/perplex_filter.py<gh_stars>10-100 import math import numpy as np from munch import Munch from transformers import GPT2LMHeadModel, GPT2TokenizerFast import torch from copy import deepcopy ######################################################################### ### compute perplexity ######################################################################### def _add_special_tokens(text, tokenizer): return tokenizer.bos_token + text + tokenizer.eos_token def _tokens_log_prob(texts, model, tokenizer, batch_size=128, is_cuda=True): outputs = [] for i in range(0, len(texts), batch_size): batch = texts[i : i + batch_size] outputs.extend(_tokens_log_prob_for_batch(batch, model, tokenizer, is_cuda=is_cuda)) return outputs def _tokens_log_prob_for_batch(texts, model, tokenizer, is_cuda=True): device = "cuda" if is_cuda else "cpu" outputs = [] texts = [_add_special_tokens(text, tokenizer) for text in deepcopy(texts)] #encoding = tokenizer.batch_encode_plus(texts, return_tensors='pt') encoding = tokenizer.batch_encode_plus(texts, return_tensors='pt', truncation=True, padding=True) with torch.no_grad(): ids = encoding["input_ids"].to(device) attention_mask = encoding["attention_mask"].to(device) #nopad_mask = ids != tokenizer.pad_token_id nopad_mask = ids != tokenizer.pad_token_id logits = model(ids, attention_mask=attention_mask)[0] for sent_index in range(len(texts)): sent_nopad_mask = nopad_mask[sent_index] sent_tokens = [tok for i, tok in enumerate(encoding.tokens(sent_index)) if sent_nopad_mask[i] and i != 0] sent_ids = ids[sent_index, sent_nopad_mask][1:] sent_logits = logits[sent_index, sent_nopad_mask][:-1, :] sent_logits[:, tokenizer.pad_token_id] = float("-inf") sent_ids_scores = sent_logits.gather(1, sent_ids.unsqueeze(1)).squeeze(1) sent_log_probs = sent_ids_scores - sent_logits.logsumexp(1) #sent_log_probs = cast(torch.DoubleTensor, sent_log_probs) #sent_ids = cast(torch.LongTensor, sent_ids) output = (sent_log_probs.cpu().numpy(), sent_ids.cpu().numpy(), sent_tokens) outputs.append(output) return outputs def load_perplex_scorer(model_id = 'gpt2', is_cuda=True): model = GPT2LMHeadModel.from_pretrained(model_id) tokenizer = GPT2TokenizerFast.from_pretrained(model_id, use_fast=True, add_special_tokens=False) device = "cuda" if is_cuda else "cpu" tokenizer.add_special_tokens({"additional_special_tokens": ["<|pad|>"]}) tokenizer.pad_token = "<|pad|>" model.resize_token_embeddings(len(tokenizer)) model.eval() model.to(device) return Munch(model=model, tokenizer=tokenizer) def reduce_perplex_prob(log_probs, log=False, reduce="prod"): tlen = log_probs.shape[0] if reduce == "prod": score = log_probs.sum() elif reduce == "mean": score = log_probs.logsumexp(0) - math.log(tlen) elif reduce == "gmean": score = log_probs.mean(0) elif reduce == "hmean": score = log_probs.neg().logsumexp(0).neg() + math.log(tlen) else: raise ValueError("Unrecognized scoring strategy: %s" % reduce) if not log: score = score.exp() return score.item() def normalize_score(log_score, slen, alpha=0.8): #Elephant in the Room: An Evaluation Framework for Assessing Adversarial Examples in NLP return log_score/math.pow((5+slen)/6, alpha) def compute_sent_perplexity( sentences, perplex_scorer, log=True, reduce="prod", is_normalize=False, is_cuda=True): """Compute the sentence perplexity. For filtering. Args: sentences ([type]): [description] perplex_scorer ([type]): [description] log (bool, optional): [description]. Defaults to True. reduce (str, optional): [description]. Defaults to "prod". is_normalize (bool, optional): [description]. Defaults to False. Returns: [type]: [description] """ scores = [] model, tokenizer = perplex_scorer.model, perplex_scorer.tokenizer outputs = _tokens_log_prob(sentences, model, tokenizer, is_cuda=is_cuda) for sent_log_prob, sent_ids, sent_tokens in outputs: score = reduce_perplex_prob(sent_log_prob, reduce=reduce, log=log) if is_normalize: score = normalize_score(score, len(sent_tokens)) scores.append(score) return scores def filter_by_sent_perplexity(sentences, perplex_scorer, thred=20, is_cuda=True): scores = compute_sent_perplexity( sentences, perplex_scorer, log=True, reduce="prod", is_normalize=False, is_cuda=is_cuda) idxes = np.where(np.array(scores) <= thred)[0] filtered = [sentences[i] for i in idxes] def compute_phrase_perplexity( sentence_phrase_tuples, perplex_scorer, log=True, reduce="prod", is_normalize=False, is_cuda=True): scores = [] sentence_phrase_tuples = sentence_phrase_tuples if type(sentence_phrase_tuples) != tuple else [sentence_phrase_tuples] if len(sentence_phrase_tuples) == 0: return scores model, tokenizer = perplex_scorer.model, perplex_scorer.tokenizer outputs = _tokens_log_prob([s[0] for s in sentence_phrase_tuples], model, tokenizer, is_cuda=is_cuda) for idx, (sentence, phrase) in enumerate(sentence_phrase_tuples): log_probs_all = outputs[idx][0] full_len = len(outputs[idx][1]) - 1 if phrase: prefix_len = len(tokenizer(sentence.split(phrase)[0].strip())["input_ids"]) else: prefix_len = 0 phrase_len = len(tokenizer(phrase)["input_ids"]) prefix_idx, phrase_idx = [0, prefix_len], [prefix_len, prefix_len+phrase_len] log_probs = log_probs_all[phrase_idx[0]:phrase_idx[1]] #print(sentence.split(phrase)[0].strip(), perplex_scorer.tokenizer(sentence.split(phrase)[0].strip())) #print(sentence, phrase, phrase_idx) full_sent_score = reduce_perplex_prob(log_probs_all, log=log, reduce=reduce) phrase_score = reduce_perplex_prob(log_probs, log=log, reduce=reduce) if is_normalize: full_sent_score = normalize_score(full_sent_score, full_len) phrase_score = normalize_score(phrase_score, phrase_len) scores.append((full_sent_score, phrase_score)) return scores def compute_delta_perplexity(edit_ops, perplex_scorer, is_normalize=False, is_cuda=True): """This is to compute the perplexity Args: edit_ops ([type]): [description] perplex_scorer ([type]): [description] is_normalize (bool, optional): [description]. Defaults to False. Returns: [type]: [description] """ tuples = [] #print(metadata.primary.acore.doc.text) #print(metadata.primary.bcore.doc.text) edit_ops = [o for o in edit_ops if o.op != "equal"] for op in edit_ops: aphrase, bphrase = (op.fromz_full, op.toz_full) if \ op.op == "insert" or op.op == "delete" else (op.fromz_core, op.toz_core) asent, bsent = aphrase.doc, bphrase.doc tuples += [(asent.text, aphrase.text), (bsent.text, bphrase.text)] #print(tuples) scores = compute_phrase_perplexity(tuples, perplex_scorer, is_normalize=is_normalize, is_cuda=is_cuda) #print(scores) paired_scores = [] for i in range(len(edit_ops)): # because of negative, it's i - i+1; lower the better. #print(scores[2*i]) #print(scores[2*i+1]) paired_scores.append(Munch( pr_sent=scores[2*i][0]-scores[2*i+1][0], pr_phrase=scores[2*i][1]-scores[2*i+1][1])) paired_scores = sorted(paired_scores, key=lambda x: ( max(x.pr_sent, x.pr_phrase)), reverse=True) # use the most ungrammar part as the return paired_scores[0]
1.953125
2
Python/example_controllers/visual_perception/flow.py
ricklentz/tdw
0
13042
from tdw.controller import Controller from tdw.tdw_utils import TDWUtils from tdw.add_ons.image_capture import ImageCapture from tdw.backend.paths import EXAMPLE_CONTROLLER_OUTPUT_PATH """ Get the _flow pass. """ c = Controller() object_id_0 = c.get_unique_id() object_id_1 = c.get_unique_id() object_id_2 = c.get_unique_id() object_id_3 = c.get_unique_id() object_names = {object_id_0: "small_table_green_marble", object_id_1: "rh10", object_id_2: "jug01", object_id_3: "jug05"} output_directory = EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath("flow") # Enable image capture for the _flow pass. print(f"Images will be saved to: {output_directory}") capture = ImageCapture(path=output_directory, pass_masks=["_flow"], avatar_ids=["a"]) c.add_ons.append(capture) commands = [TDWUtils.create_empty_room(12, 12), c.get_add_object(object_names[object_id_0], object_id=object_id_0), c.get_add_object(object_names[object_id_1], position={"x": 0.7, "y": 0, "z": 0.4}, rotation={"x": 0, "y": 30, "z": 0}, object_id=object_id_1), c.get_add_object(model_name=object_names[object_id_2], position={"x": -0.3, "y": 0.9, "z": 0.2}, object_id=object_id_2), c.get_add_object(object_names[object_id_3], position={"x": 0.3, "y": 0.9, "z": -0.2}, object_id=object_id_3), {"$type": "apply_force_to_object", "id": object_id_1, "force": {"x": 0, "y": 5, "z": -200}}] commands.extend(TDWUtils.create_avatar(position={"x": 2.478, "y": 1.602, "z": 1.412}, look_at={"x": 0, "y": 0.2, "z": 0}, avatar_id="a")) c.communicate(commands) for i in range(3): c.communicate([]) c.communicate({"$type": "terminate"})
2.3125
2
main.py
pepetox/gae-angular-materialize
1
13043
<reponame>pepetox/gae-angular-materialize # Copyright 2013 Google, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import modelCourse as model import webapp2 from google.appengine.api import users def AsDict(course): return { 'key': course.key.urlsafe(), 'author': course.author.email(), 'name': course.name, 'description': course.description, 'lang': course.lang, 'date': course.date.strftime("%B %d, %Y") } class RestHandler(webapp2.RequestHandler): def dispatch(self): # time.sleep(1) if (users.get_current_user().email() == '<EMAIL>') | (users.get_current_user().email() == '<EMAIL>'): super(RestHandler, self).dispatch() else: self.abort(402) def SendJson(self, r): self.response.headers['content-type'] = 'text/plain' self.response.write(json.dumps(r)) class QueryHandler(RestHandler): def get(self): courses = model.All() r = [AsDict(course) for course in courses] self.SendJson(r) class UpdateHandler(RestHandler): def post(self): r = json.loads(self.request.body) guest = model.Update(r['key'], r['name'], r['description'], r['lang']) r = AsDict(guest) self.SendJson(r) class InsertHandler(RestHandler): def post(self): r = json.loads(self.request.body) course = model.Insert(r['name'], r['description'], r['lang']) r = AsDict(course) self.SendJson(r) class DeleteHandler(RestHandler): def post(self): r = json.loads(self.request.body) model.Delete(r['key']) class GetUser(RestHandler): def get(self): user = users.get_current_user() if user: email = user.email() url = users.create_logout_url(self.request.uri) url_linktext = 'Logout' else: email = '' url = users.create_login_url(self.request.uri) url_linktext = 'Login' r = {'user': email, 'url': url, 'url_linktext': url_linktext} self.SendJson(r) APP = webapp2.WSGIApplication([ ('/rest/query', QueryHandler), ('/rest/insert', InsertHandler), ('/rest/delete', DeleteHandler), ('/rest/update', UpdateHandler), ('/rest/user', GetUser), ], debug=True)
2.296875
2
config.py
laundmo/counter-generator
0
13044
<reponame>laundmo/counter-generator from sys import platform try: from yaml import CSafeLoader as Loader # use the C loader when possible except ImportError: from yaml import SafeLoader as Loader import yaml with open("config.yml") as f: config = yaml.load(f, Loader=Loader) # load the config yaml if platform in ("linux", "linux2", "win32"): import PySimpleGUI elif ( platform == "darwin" ): # Have to use web/remi on MacOS as the normal tkinter version causes a OS error # TODO: Test on MacOS with tkinter possibly figure out how to get it working. import PySimpleGUIWeb as PySimpleGUI
2.625
3
configs/sem_fpn/onaho_fpn.py
xiong-jie-y/mmsegmentation
1
13045
<reponame>xiong-jie-y/mmsegmentation _base_ = [ '../_base_/models/fpn_r50.py', '../_base_/datasets/onaho.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict(decode_head=dict(num_classes=2))
1.289063
1
config.py
dhkim2810/MaskedDatasetCondensation
0
13046
def get_default_convnet_setting(): net_width, net_depth, net_act, net_norm, net_pooling = 128, 3, 'relu', 'instancenorm', 'avgpooling' return net_width, net_depth, net_act, net_norm, net_pooling def get_loops(ipc): # Get the two hyper-parameters of outer-loop and inner-loop. # The following values are empirically good. if ipc == 1: outer_loop, inner_loop = 1, 1 elif ipc == 10: outer_loop, inner_loop = 10, 50 elif ipc == 20: outer_loop, inner_loop = 20, 25 elif ipc == 30: outer_loop, inner_loop = 30, 20 elif ipc == 40: outer_loop, inner_loop = 40, 15 elif ipc == 50: outer_loop, inner_loop = 50, 10 else: outer_loop, inner_loop = 0, 0 exit('loop hyper-parameters are not defined for %d ipc'%ipc) return outer_loop, inner_loop def get_eval_pool(eval_mode, model, model_eval): if eval_mode == 'M': # multiple architectures model_eval_pool = ['MLP', 'ConvNet', 'LeNet', 'AlexNet', 'VGG11', 'ResNet18'] elif eval_mode == 'W': # ablation study on network width model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256'] elif eval_mode == 'D': # ablation study on network depth model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4'] elif eval_mode == 'A': # ablation study on network activation function model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL'] elif eval_mode == 'P': # ablation study on network pooling layer model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP'] elif eval_mode == 'N': # ablation study on network normalization layer model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN'] elif eval_mode == 'S': # itself model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model] else: model_eval_pool = [model_eval] return model_eval_pool
2.390625
2
jgem/dataset/__init__.py
kensugino/JUGEMu
0
13047
""" Expression Dataset for analysis of matrix (RNASeq/microarray) data with annotations """ import pandas as PD import numpy as N from matplotlib import pylab as P from collections import OrderedDict from ast import literal_eval # from ..plot.matrix import matshow_clustered class ExpressionSet(object): def __init__(self, eData, gData=None, sData=None): """ eData: expression data (gene x samples) header: MultiIndex (samplename, group) fData: gene annotation (gene x gene annotations) pData: sample annotation (sample x sample annotations) """ self.eData = eData self.gData = gData self.sData = sData def read(self, eFile, gFile=None, sFile=None): pass def write(self, eFile, gFile=None, sFile=None): self.eData.to_csv(eFile, tupleize_cols=False, sep="\t") if gFile is not None: self.gData.to_csv(gFile, tupleize_cols=False, sep="\t") if sFile is not None: self.sData.to_csv(sFile, tupleize_cols=False, sep="\t") def find(self, field, pat): pass def read_bioinfo3_data(fname): """ read bioinfo3.table.dataset type of data """ fobj = open(fname) groups = OrderedDict() cnt = 0 for line in fobj: cnt += 1 if line[:2]=='#%': if line.startswith('#%groups:'): gname, members = line[len('#%groups:'):].split('=') gname = gname.strip() members = members.strip().split(',') groups[gname] = members datafields = line.strip().split('=')[1].strip().split(',') elif line.startswith('#%fields'): fields = line.strip().split('=')[1].strip().split(',') elif not line.strip(): continue # empty line else: break df = PD.read_table(fname, skiprows=cnt-1) f2g = {} for g,m in groups.items(): for f in m: f2g[f] = g df.columns = PD.MultiIndex.from_tuples([(x, f2g.get(x,'')) for x in df.columns], names=['samplename','group']) e = ExpressionSet(df) return e def read_multiindex_data(fname, tupleize=True, index_names = ['samplename','group']): """ read dataset table with MultiIndex in the header """ if not tupleize: df = PD.read_table(fname, header=range(len(index_names)), index_col=[0], tupleize_cols=False) e = ExpressionSet(df) return e df = PD.read_table(fname, index_col=0) df.columns = PD.MultiIndex.from_tuples(df.columns.map(literal_eval).tolist(), names=index_names) e = ExpressionSet(df) return e def read_grouped_table(fname, groupfn=lambda x: '_'.join(x.split('_')[:-1])): """ Read dataset whose group is encoded in the colname. Column 0 is index. """ df = PD.read_table(fname) f2g = {x:groupfn(x) for x in df.columns} df.columns = PD.MultiIndex.from_tuples([(x, f2g[x]) for x in df.columns], names=['samplename','group']) e = ExpressionSet(df) return e def concatenate(dic): """ dic: dict of DataFrames merge all using index and outer join """ keys = list(dic) d = dic[keys[0]].merge(dic[keys[1]], left_index=True, right_index=True, how='outer', suffixes=('.'+keys[0],'.'+keys[1])) for k in keys[2:]: d = d.merge(dic[k], left_index=True, right_index=True, how='outer', suffixes=('','.'+k)) return d def calc_mergesortkey(dic, pos_neg_flds): conc = concatenate(dic) selected = ~N.isnan(conc[pos_neg_flds]) pos = conc[pos_neg_flds]>0 neg = conc[pos_neg_flds]<=0 num_pos = pos.sum(axis=1) num_neg = neg.sum(axis=1) pos_neg_mix = -1*(num_neg==0) + 1*(num_pos==0) # pos(-1), mix(0), neg(1) #num_hit = num_pos - num_neg num_hit = num_pos + num_neg n = len(pos_neg_flds) #position = (N.arange(1,n+1)*pos + N.arange(-1,-n-1,-1)*neg).sum(axis=1) position = (N.arange(1,n+1)*pos + N.arange(-n,0)*neg).sum(axis=1) strength = (conc[pos_neg_flds]*pos).sum(axis=1) + (conc[pos_neg_flds]*neg).sum(axis=1) #msk = PD.Series(list(zip(pos_neg_mix, num_hit, position, strength)), index=conc.index) #msk.sort() conc['mergesortkey'] = list(zip(pos_neg_mix, num_hit, position, strength)) conc.sort('mergesortkey', inplace=True) return conc
2.484375
2
tests/test_sql.py
YPlan/django-perf-rec
148
13048
<reponame>YPlan/django-perf-rec<gh_stars>100-1000 from __future__ import annotations from django_perf_rec.sql import sql_fingerprint def test_empty(): assert sql_fingerprint("") == "" assert sql_fingerprint("\n\n \n") == "" def test_select(): assert sql_fingerprint("SELECT `f1`, `f2` FROM `b`") == "SELECT ... FROM `b`" def test_select_show_columns(settings): assert ( sql_fingerprint("SELECT `f1`, `f2` FROM `b`", hide_columns=False) == "SELECT `f1`, `f2` FROM `b`" ) def test_select_limit(settings): assert ( sql_fingerprint("SELECT `f1`, `f2` FROM `b` LIMIT 12", hide_columns=False) == "SELECT `f1`, `f2` FROM `b` LIMIT #" ) def test_select_coalesce_show_columns(settings): assert ( sql_fingerprint( ( "SELECT `table`.`f1`, COALESCE(table.f2->>'a', table.f2->>'b', " + "'default') FROM `table`" ), hide_columns=False, ) == "SELECT `table`.`f1`, COALESCE(table.f2->>#, table.f2->>#, #) FROM `table`" ) def test_select_where(): assert ( sql_fingerprint( "SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = 1" ) == "SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = #" ) def test_select_where_show_columns(settings): assert ( sql_fingerprint( "SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = 1", hide_columns=False, ) == "SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = #" ) def test_select_comment(): assert ( sql_fingerprint("SELECT /* comment */ `f1`, `f2` FROM `b`") == "SELECT /* comment */ ... FROM `b`" ) def test_select_comment_show_columns(settings): assert ( sql_fingerprint("SELECT /* comment */ `f1`, `f2` FROM `b`", hide_columns=False) == "SELECT /* comment */ `f1`, `f2` FROM `b`" ) def test_select_join(): assert ( sql_fingerprint( "SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = 1" ) == "SELECT ... FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = #" ) def test_select_join_show_columns(settings): assert ( sql_fingerprint( "SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = 1", hide_columns=False, ) == "SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = #" ) def test_select_order_by(): assert ( sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3") == "SELECT ... FROM a ORDER BY f3" ) def test_select_order_by_limit(): assert ( sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3 LIMIT 12") == "SELECT ... FROM a ORDER BY f3 LIMIT #" ) def test_select_order_by_show_columns(settings): assert ( sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3", hide_columns=False) == "SELECT f1, f2 FROM a ORDER BY f3" ) def test_select_order_by_multiple(): assert ( sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3, f4") == "SELECT ... FROM a ORDER BY f3, f4" ) def test_select_group_by(): assert ( sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1") == "SELECT ... FROM a GROUP BY f1" ) def test_select_group_by_show_columns(settings): assert ( sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1", hide_columns=False) == "SELECT f1, f2 FROM a GROUP BY f1" ) def test_select_group_by_multiple(): assert ( sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1, f2") == "SELECT ... FROM a GROUP BY f1, f2" ) def test_select_group_by_having(): assert ( sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21") == "SELECT ... FROM a GROUP BY f1 HAVING f1 > #" ) def test_select_group_by_having_show_columns(settings): assert ( sql_fingerprint( "SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21", hide_columns=False ) == "SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > #" ) def test_select_group_by_having_multiple(): assert ( sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21, f2 < 42") == "SELECT ... FROM a GROUP BY f1 HAVING f1 > #, f2 < #" ) def test_insert(): assert ( sql_fingerprint("INSERT INTO `table` (`f1`, `f2`) VALUES ('v1', 2)") == "INSERT INTO `table` (...) VALUES (...)" ) def test_insert_show_columns(settings): assert ( sql_fingerprint( "INSERT INTO `table` (`f1`, `f2`) VALUES ('v1', 2)", hide_columns=False ) == "INSERT INTO `table` (`f1`, `f2`) VALUES (#, #)" ) def test_update(): assert ( sql_fingerprint("UPDATE `table` SET `foo` = 'bar' WHERE `table`.`id` = 1") == "UPDATE `table` SET ... WHERE `table`.`id` = #" ) def test_update_no_where(): assert ( sql_fingerprint("UPDATE `table` SET `foo` = 'bar'") == "UPDATE `table` SET ..." ) def test_declare_cursor(): assert ( sql_fingerprint( 'DECLARE "_django_curs_140239496394496_1300" NO SCROLL CURSOR WITHOUT' ) == 'DECLARE "_django_curs_#" NO SCROLL CURSOR WITHOUT' ) def test_savepoint(): assert sql_fingerprint("SAVEPOINT `s140323809662784_x54`") == "SAVEPOINT `#`" def test_rollback_to_savepoint(): assert ( sql_fingerprint("ROLLBACK TO SAVEPOINT `s140323809662784_x54`") == "ROLLBACK TO SAVEPOINT `#`" ) def test_release_savepoint(): assert ( sql_fingerprint("RELEASE SAVEPOINT `s140699855320896_x17`") == "RELEASE SAVEPOINT `#`" ) def test_null_value(): assert ( sql_fingerprint( "SELECT `f1`, `f2` FROM `b` WHERE `b`.`name` IS NULL", hide_columns=False ) == "SELECT `f1`, `f2` FROM `b` WHERE `b`.`name` IS #" ) def test_strip_duplicate_whitespaces(): assert ( sql_fingerprint( "SELECT `f1`, `f2` FROM `b` WHERE `b`.`f1` IS NULL LIMIT 12 " ) == "SELECT ... FROM `b` WHERE `b`.`f1` IS # LIMIT #" ) def test_strip_duplicate_whitespaces_recursive(): assert ( sql_fingerprint( "SELECT `f1`, `f2`, ( COALESCE(b.f3->>'en', b.f3->>'fr', '')) " "FROM `b` WHERE (`b`.`f1` IS NULL OR ( EXISTS COUNT(1) )) LIMIT 12 ", hide_columns=False, ) == "SELECT `f1`, `f2`, (COALESCE(b.f3->>#, b.f3->>#, #)) " "FROM `b` WHERE (`b`.`f1` IS # OR (EXISTS COUNT(#))) LIMIT #" ) def test_strip_newlines(): assert ( sql_fingerprint("SELECT `f1`, `f2`\nFROM `b`\n LIMIT 12\n\n") == "SELECT ... FROM `b` LIMIT #" ) def test_strip_raw_query(): assert ( sql_fingerprint( """ SELECT 'f1' , 'f2' , 'f3' FROM "table_a" WHERE "table_a"."f1" = 1 OR ( "table_a"."type" = 'A' AND EXISTS ( SELECT "table_b"."id" FROM "table_b" WHERE "table_b"."id" = 1 ) = true) """ ) == ( 'SELECT ... FROM "table_a" WHERE "table_a"."f1" = # OR ' + '("table_a"."type" = # AND EXISTS (SELECT "table_b"."id" FROM ' + '"table_b" WHERE "table_b"."id" = # ) = true)' ) ) def test_in_single_value(): assert ( sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1)") == "SELECT ... FROM `b` WHERE `x` IN (...)" ) def test_in_multiple_values(): assert ( sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3)") == "SELECT ... FROM `b` WHERE `x` IN (...)" ) def test_in_multiple_clauses(): assert ( sql_fingerprint( "SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3) AND `y` IN (4, 5, 6)" ) == "SELECT ... FROM `b` WHERE `x` IN (...) AND `y` IN (...)" ) def test_in_multiple_values_and_clause(): assert ( sql_fingerprint( "SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3) AND (`y` = 1 OR `y` = 2)" ) == "SELECT ... FROM `b` WHERE `x` IN (...) AND (`y` = # OR `y` = #)" ) def test_in_subquery(): assert ( sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (SELECT 1)") == "SELECT ... FROM `b` WHERE `x` IN (SELECT #)" )
2.203125
2
src/user_auth_api/serializers.py
Adstefnum/mockexams
0
13049
<filename>src/user_auth_api/serializers.py from rest_framework import serializers from user_auth_api.models import User # User Serializer class UserSerializer(serializers.ModelSerializer): class Meta: model = User fields = [ 'user_name', 'email', 'current_jamb_score', 'phone_num', 'last_name', 'first_name', 'is_staff', 'is_superuser', 'uuid', 'is_active', 'last_login', 'date_joined', ] # Register Serializer class RegisterSerializer(serializers.ModelSerializer): class Meta: model = User fields = [ 'user_name', 'email', 'password', 'current_jamb_score', 'phone_num', 'last_name', 'first_name', 'uuid', ] extra_kwargs = {'password': {'<PASSWORD>': True}} def create(self, validated_data): user = User.objects.create_user( validated_data['user_name'], validated_data['email'],validated_data['current_jamb_score'], validated_data['phone_num'],validated_data['password'], validated_data['last_name'],validated_data['first_name'] ) return user
2.5
2
cenv_script/cenv_script.py
technic/cenv_script
0
13050
<gh_stars>0 """Main module.""" import json import os import re import shutil import subprocess import sys from pathlib import Path from typing import List, Optional import yaml ENV_FILE = "environment.yml" class CondaEnvException(Exception): pass def find_environment_file(): p = Path(os.getcwd()).resolve() while True: env_file = p / ENV_FILE if env_file.is_file(): return env_file if p.parents: p = p.parent continue raise CondaEnvException( "environment.yml file not find in '%s' or in any of parent directories" % os.getcwd() ) def get_conda(): if sys.platform.startswith("win"): return "conda.bat" return "conda" def print_args(args): def escape(arg): if arg.find(" ") > -1: return '"%s"' % arg return arg print(">>>", " ".join(map(escape, args))) def in_directory(file_name, dir_name): return os.path.realpath(file_name).startswith(os.path.realpath(dir_name) + os.sep) class CondaEnv: def __init__(self): super().__init__() self._conda = get_conda() self._env_file = find_environment_file() with open(self._env_file) as f: self._data = yaml.safe_load(f) data = subprocess.check_output([self._conda, "info", "--json"]) data = json.loads(data) active_name = data["active_prefix_name"] active_prefix = data["active_prefix"] if active_name != self._data["name"]: raise CondaEnvException( f"Active environment is {active_name} but {ENV_FILE} points to {self._data['name']}" ) if "prefix" in self._data and active_prefix != self._data["prefix"]: raise CondaEnvException( f"Active environment is located in {active_prefix} but {ENV_FILE} points to {self._data['prefix']}" ) python_exe = shutil.which("python") if not python_exe: raise CondaEnvException("Python not found in path") # The following check is quite strict, but I think it is better to keep it. See comments below. if not in_directory(python_exe, active_prefix): raise CondaEnvException( f"Python '{python_exe}' is not in conda prefix '{active_prefix}'" ) @staticmethod def pip_cmd(args): return [ # disabled due to: https://github.com/conda/conda/issues/9572 # "run", "-n", self._data["name"], "python", # This can lead to installing into the wrong place, but checks in the __init__ should help os.path.realpath(shutil.which("python")), "-m", "pip", ] + args def _exec_pip(self, args): args = self.pip_cmd(args) # return self._exec_conda(args) print_args(args) exit_code = subprocess.call(args) print("-" * 80) print("python -m pip finished with exit code: %d" % exit_code) return exit_code def _exec_conda(self, args): args = [self._conda] + args print_args(args) exit_code = subprocess.call(args) print("-" * 80) print("conda finished with exit code: %d" % exit_code) return exit_code @staticmethod def parse_pkg(pkg_spec: str): m = re.match(r"^(git|hg|svn|bzr)\+.*|^[\w-]+", pkg_spec) if m: return m.group(0) raise CondaEnvException("Failed to parse package specification '%s'" % pkg_spec) def _spec_add_package(self, deps: List[str], package: str) -> bool: """Add given package to a deps list if it is not already there :param deps: list of current dependencies :param package: package spec that should be added :return: True when deps list was mutated, False overwise """ name = self.parse_pkg(package) for i, pkg in enumerate(deps): if not isinstance(pkg, str): continue pkg = pkg.strip() n = self.parse_pkg(pkg) if n == name: if pkg != package: print(f"Updating spec from {pkg} to {package} ...") deps[i] = package break print(f"Same package spec already found: {pkg}") return False else: print(f"Adding package spec {package} to dependencies ...") deps.append(package) return True def install(self, package: str): package = package.strip() deps = self._get_deps() if not self._spec_add_package(deps, package): return exit_code = self._exec_conda(["install", "-n", self._data["name"], package]) if exit_code != 0: raise CondaEnvException("Bad conda exitcode: %d" % exit_code) name = self.parse_pkg(package) if not self.check_installed(name): raise CondaEnvException(f"Package {name} was not installed") print("Verified that package has been installed") self._write_env_file() def check_installed(self, name): data = subprocess.check_output( [self._conda, "env", "export", "-n", self._data["name"]] ) data = yaml.safe_load(data.decode("utf-8")) names = set( self.parse_pkg(x) for x in data.get("dependencies", []) if isinstance(x, str) ) return name in names def pip_install(self, package: str): package = package.strip() deps = self._get_pip_deps() if not self._spec_add_package(deps, package): return exit_code = self._exec_pip(["install", package]) if exit_code != 0: raise CondaEnvException("Bad conda+pip exitcode: %d" % exit_code) name = self.parse_pkg(package) if not self.check_pip_installed(name): raise CondaEnvException( f"Package {name} was not installed (not found in pip freeze)" ) print("Verified that package has been installed") self._write_env_file() def check_pip_installed(self, name): data = subprocess.check_output(self.pip_cmd(["freeze"])) names = set( self.parse_pkg(l.strip()) for l in data.decode("utf-8").split("\n") if l ) return name in names def _spec_rm_package( self, deps: List[str], package: str ) -> (Optional[str], List[str]): """Remove package from the deps list if it is present :param deps: current list of packages :param package: spec containing a package name that should be removed :return: tuple - package name if it was found or none - new list of packages """ name = self.parse_pkg(package) new_deps = [] to_remove = 0 for pkg in deps: if not isinstance(pkg, str): continue n = self.parse_pkg(pkg) if n == name: to_remove += 1 continue new_deps.append(pkg) if to_remove == 0: return None, new_deps if to_remove > 1: print("Warning: more than one spec matched") return name, new_deps def remove(self, package: str): package = package.strip() name, new_deps = self._spec_rm_package(self._get_deps(), package) self._set_deps(new_deps) if name is None: print("Specified package '%s' not found" % self.parse_pkg(package)) return exit_code = self._exec_conda(["remove", "-n", self._data["name"], name]) if exit_code != 0: raise CondaEnvException("Bad conda exitcode: %d" % exit_code) if self.check_installed(name): raise CondaEnvException(f"Package {name} was not removed") self._write_env_file() def pip_remove(self, package: str): package = package.strip() name, new_deps = self._spec_rm_package(self._get_pip_deps(), package) self._set_pip_deps(new_deps) if name is None: print( "Specified package '%s' not found in pip section" % self.parse_pkg(package) ) return exit_code = self._exec_pip(["uninstall", name]) if exit_code != 0: raise CondaEnvException("Bad conda exitcode: %d" % exit_code) if self.check_pip_installed(name): raise CondaEnvException( f"Package {name} was not removed (found in pip freeze)" ) self._write_env_file() def _write_env_file(self): with open(self._env_file, "w") as f: yaml.dump(self._data, f, sort_keys=False) print("Updated %s" % ENV_FILE) def _get_deps(self): if "dependencies" not in self._data: self._data["dependencies"] = [] return self._data["dependencies"] def _set_deps(self, value): self._data["dependencies"] = value def _get_pip_deps(self): for item in self._get_deps(): if isinstance(item, dict) and "pip" in item: return item["pip"] self._data["dependencies"].append({"pip": []}) return self._data["dependencies"][-1]["pip"] def _set_pip_deps(self, value): for item in self._get_deps(): if isinstance(item, dict) and "pip" in item: item["pip"] = value return self._data["dependencies"].append({"pip": []}) self._data["dependencies"][-1]["pip"] = value
2.5625
3
cv_utils/cv_util_node.py
OAkyildiz/cibr_img_processing
0
13051
import sys import rospy import types #from std_msgs.msg import String from sensor_msgs.msg import Image from cibr_img_processing.msg import Ints from cv_bridge import CvBridge, CvBridgeError #make int msgs #TODO: get the img size from camera_indo topics class CVUtilNode: # abstarct this, it can easily work with other cv_utils and be an image bbm_node def __init__(self, util, name="cv_util_node", pub_topic=False): #self.obj_pub = rospy.Publisher("image_topic_2", ***) self.bridge = CvBridge() self.util=util self.name=name rospy.init_node(self.name, anonymous=True) self.rate=rospy.Rate(30) self.image_sub = rospy.Subscriber("image_topic", Image, self.callback) self.result_pub = rospy.Publisher("results", Ints, queue_size=10) #always publish data self.result_msgs = [-1,-1,-1] #make int msgs self.pubs=lambda:0 self.subs=[] if pub_topic: self.image_pub = rospy.Publisher(pub_topic,Image, queue_size=10) pass #do stuff with img.pub def callback(self,data): try: self.util.hook(self.bridge.imgmsg_to_cv2(data, "bgr8")) except CvBridgeError as e: print(e) def data_pub(self): self.result_pub.publish(self.util.results) #try catch def img_pub(cv_image): # to handleconverting from OpenCV to ROS try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) except CvBridgeError as e: print(e) def run(self): self.util.init_windows() while not rospy.is_shutdown(): try: if self.util.loop(): break if not -1 in self.util.results and self.util._publish: self.data_pub() self.util._publish = 0 # if self.util._publish: # for pub in self.pubs: # pub.publish #self.rate.sleep() except KeyboardInterrupt: self.util.shutdown() self.util.shutdown() #adds a publisher to alirlaes, def attach_pub(self, topic, type): self.pubs.pub.append(rospy.Publisher(topic, type, queue_size=1)) # TODO:attach structs of publisher and message template instead # so it is iterable together #pubs.pub=... pubs.msg=type() def attach_sub(self, topic, cb_handle): self.subs.append = rospy.Subscriber(topic, type, cb_handle) def attach_controls(self, fun_handle): # bind the method to instance self.util.external_ops=types.MethodType(fun_handle,self.util)
2.640625
3
pokepay/request/get_shop.py
pokepay/pokepay-partner-python-sdk
0
13052
<filename>pokepay/request/get_shop.py # DO NOT EDIT: File is generated by code generator. from pokepay_partner_python_sdk.pokepay.request.request import PokepayRequest from pokepay_partner_python_sdk.pokepay.response.shop_with_accounts import ShopWithAccounts class GetShop(PokepayRequest): def __init__(self, shop_id): self.path = "/shops" + "/" + shop_id self.method = "GET" self.body_params = {} self.response_class = ShopWithAccounts
1.929688
2
clearml/backend_interface/setupuploadmixin.py
arielleoren/clearml
2,097
13053
from abc import abstractproperty from ..backend_config.bucket_config import S3BucketConfig from ..storage.helper import StorageHelper class SetupUploadMixin(object): log = abstractproperty() storage_uri = abstractproperty() def setup_upload( self, bucket_name, host=None, access_key=None, secret_key=None, region=None, multipart=True, https=True, verify=True): """ Setup upload options (currently only S3 is supported) :param bucket_name: AWS bucket name :type bucket_name: str :param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used) :type host: str :param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the configuration file (bucket-specific, than global) :type access_key: str :param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the configuration file (bucket-specific, than global) :type secret_key: str :param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support multipart. :type multipart: bool :param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS. :type https: bool :param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1) :type region: str :param verify: Whether or not to verify SSL certificates. Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate. :type verify: bool """ self._bucket_config = S3BucketConfig( bucket=bucket_name, host=host, key=access_key, secret=secret_key, multipart=multipart, secure=https, region=region, verify=verify ) self.storage_uri = ('s3://%(host)s/%(bucket_name)s' if host else 's3://%(bucket_name)s') % locals() StorageHelper.add_configuration(self._bucket_config, log=self.log)
2.5625
3
tests/test_parser.py
szymon6927/parcels-parser
0
13054
import os import unittest import pandas as pd from application.ParcelsParser import ParcelsParser class TestPracelsParser(unittest.TestCase): def setUp(self): self.parser = ParcelsParser("./test_cadastral_parcels.tsv", "cadastral_parcel_identifier") def test_if_file_exist(self): file_path = self.parser.get_file() self.assertTrue(file_path, os.path.isfile(file_path)) def test_if_file_doesnt_exist(self): self.parser.set_file("./test_cadastral_parcels_wrong.tsv") file_path = file_path = self.parser.get_file() self.assertTrue(file_path, os.path.isfile(file_path)) def test_if_column_exist(self): dirpath = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(dirpath, self.parser.get_file()) df = pd.read_csv(filepath, sep='\t') self.assertTrue(True, self.parser.get_column_name() in df.columns) def test_get_identifiers_data(self): dirpath = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(dirpath, self.parser.get_file()) self.parser.set_file(filepath) self.parser.get_identifiers_data() data = self.parser.get_data() self.assertTrue(7, len(data)) def test_province_county_commune(self): segment = "301304" province_code, county_code, commune_code = self.parser.get_province_county_commune(segment) self.assertEqual(province_code, "30") self.assertEqual(county_code, "13") self.assertEqual(commune_code, "4") def test_extract_data(self): dirpath = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(dirpath, self.parser.get_file()) df = pd.read_csv(filepath, sep='\t') self.parser.set_file(filepath) self.parser.get_identifiers_data() self.parser.extract_data() result = self.parser.get_result() province_code_list = df['province_code'].astype(str).tolist() county_code_list = df['county_code'].astype(str).tolist() commune_code_list = df['commune_code'].astype(str).tolist() commune_type_list = df['commune_type'].astype(str).tolist() district_number_list = df['district_number'].astype(str).tolist() parcel_number_list = df['parcel_number'].astype(str).tolist() for i, item in enumerate(result): self.assertEqual(item['province_code'], province_code_list[i]) self.assertEqual(item['county_code'], county_code_list[i]) self.assertEqual(item['commune_code'], commune_code_list[i]) self.assertEqual(item['commune_type'], commune_type_list[i]) self.assertEqual(item['district_number'], district_number_list[i]) self.assertEqual(item['parcel_number'], parcel_number_list[i]) if __name__ == '__main__': unittest.main()
3.0625
3
dataStructures/complete.py
KarlParkinson/practice
0
13055
import binTree import queue def complete(tree): q = queue.Queue() nonFull = False q.enqueue(tree) while (not q.isEmpty()): t = q.dequeue() if (t.getLeftChild()): if (nonFull): return False q.enqueue(t.getLeftChild()) if (t.getLeftChild() == None): nonFull = True if (t.getRightChild()): if (nonFull): return False q.enqueue(t.getRightChild()) if (t.getRightChild() == None): nonFull = True return True t = binTree.BinaryTree(1) t.insertLeft(2) t.insertRight(3) t.getRightChild().insertLeft(5) t.getRightChild().insertRight(6) print complete(t)
3.671875
4
sd_maskrcnn/sd_maskrcnn/gop/src/eval_bnd.py
marctuscher/cv_pipeline
1
13056
# -*- encoding: utf-8 """ Copyright (c) 2014, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Stanford University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY <NAME> ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from .gop import * import numpy as np from .util import * LATEX_OUTPUT=True for bnd in ['st','sf','mssf','ds']: # Load the dataset over_segs,segmentations,boxes = loadVOCAndOverSeg( "test", detector=bnd, year="2012" ) has_box = [len(b)>0 for b in boxes] boxes = [np.vstack(b).astype(np.int32) if len(b)>0 else np.zeros((0,4),dtype=np.int32) for b in boxes] # Generate the proposals s = [] s.append( (50,5,0.7) ) # ~250 props s.append( (100,5,0.75) ) # ~450 props s.append( (180,5,0.8) ) # ~650 props s.append( (200,7,0.85) ) # ~1100 props s.append( (250,10,0.9) ) # ~2200 props s.append( (290,20,0.9) ) # ~4400 props for N_S,N_T,iou in s: prop_settings = setupBaseline( N_S, N_T, iou ) bo,b_bo,pool_s,box_pool_s = dataset.proposeAndEvaluate( over_segs, segmentations, boxes, proposals.Proposal( prop_settings ) ) if LATEX_OUTPUT: print(( "Baseline %s ($%d$,$%d$) & %d & %0.3f & %0.3f & %0.3f & %0.3f & \\\\"%(bnd, N_S,N_T,np.mean(pool_s),np.mean(bo[:,0]),np.sum(bo[:,0]*bo[:,1])/np.sum(bo[:,1]), np.mean(bo[:,0]>=0.5), np.mean(bo[:,0]>=0.7) ) )) else: print(( "ABO ", np.mean(bo[:,0]) )) print(( "cover ", np.sum(bo[:,0]*bo[:,1])/np.sum(bo[:,1]) )) print(( "recall ", np.mean(bo[:,0]>=0.5), "\t", np.mean(bo[:,0]>=0.6), "\t", np.mean(bo[:,0]>=0.7), "\t", np.mean(bo[:,0]>=0.8), "\t", np.mean(bo[:,0]>=0.9), "\t", np.mean(bo[:,0]>=1) )) print(( "# props ", np.mean(pool_s) )) print(( "box ABO ", np.mean(b_bo) )) print(( "box recall ", np.mean(b_bo>=0.5), "\t", np.mean(b_bo>=0.6), "\t", np.mean(b_bo>=0.7), "\t", np.mean(b_bo>=0.8), "\t", np.mean(b_bo>=0.9), "\t", np.mean(b_bo>=1) )) print(( "# box ", np.mean(box_pool_s[~np.isnan(box_pool_s)]) ))
1.773438
2
ctr_prediction/datasets/Amazon/AmazonElectronics_x1/convert_amazonelectronics_x1.py
jimzhu/OpenCTR-benchmarks
59
13057
import pickle import pandas as pd # cat aa ab ac > dataset.pkl from https://github.com/zhougr1993/DeepInterestNetwork with open('dataset.pkl', 'rb') as f: train_set = pickle.load(f, encoding='bytes') test_set = pickle.load(f, encoding='bytes') cate_list = pickle.load(f, encoding='bytes') user_count, item_count, cate_count = pickle.load(f, encoding='bytes') train_data = [] for sample in train_set: user_id = sample[0] item_id = sample[2] item_history = "^".join([str(i) for i in sample[1]]) label = sample[3] cate_id = cate_list[item_id] cate_history = "^".join([str(i) for i in cate_list[sample[1]]]) train_data.append([label, user_id, item_id, cate_id, item_history, cate_history]) train_df = pd.DataFrame(train_data, columns=['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history']) train_df.to_csv("train.csv", index=False) test_data = [] for sample in test_set: user_id = sample[0] item_pair = sample[2] item_history = "^".join([str(i) for i in sample[1]]) cate_history = "^".join([str(i) for i in cate_list[sample[1]]]) test_data.append([1, user_id, item_pair[0], cate_list[item_pair[0]], item_history, cate_history]) test_data.append([0, user_id, item_pair[1], cate_list[item_pair[1]], item_history, cate_history]) test_df = pd.DataFrame(test_data, columns=['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history']) test_df.to_csv("test.csv", index=False)
2.609375
3
email_extras/admin.py
maqmigh/django-email-extras
33
13058
from email_extras.settings import USE_GNUPG if USE_GNUPG: from django.contrib import admin from email_extras.models import Key, Address from email_extras.forms import KeyForm class KeyAdmin(admin.ModelAdmin): form = KeyForm list_display = ('__str__', 'email_addresses') readonly_fields = ('fingerprint', ) class AddressAdmin(admin.ModelAdmin): list_display = ('__str__', 'key') readonly_fields = ('key', ) def has_add_permission(self, request): return False admin.site.register(Key, KeyAdmin) admin.site.register(Address, AddressAdmin)
1.96875
2
Tableau-Supported/Python/insert_data_with_expressions.py
TableauKyle/hyper-api-samples
73
13059
<reponame>TableauKyle/hyper-api-samples<filename>Tableau-Supported/Python/insert_data_with_expressions.py # ----------------------------------------------------------------------------- # # This file is the copyrighted property of Tableau Software and is protected # by registered patents and other applicable U.S. and international laws and # regulations. # # You may adapt this file and modify it to fit into your context and use it # as a template to start your own projects. # # ----------------------------------------------------------------------------- import shutil from pathlib import Path from tableauhyperapi import HyperProcess, Telemetry, \ Connection, CreateMode, \ NOT_NULLABLE, NULLABLE, SqlType, TableDefinition, \ Inserter, \ escape_name, escape_string_literal, \ TableName, Name, \ HyperException # The table is called "Extract" and will be created in the "Extract" schema. # This has historically been the default table name and schema for extracts created by Tableau extract_table = TableDefinition( table_name=TableName("Extract", "Extract"), columns=[ TableDefinition.Column(name='Order ID', type=SqlType.int(), nullability=NOT_NULLABLE), TableDefinition.Column(name='Ship Timestamp', type=SqlType.timestamp(), nullability=NOT_NULLABLE), TableDefinition.Column(name='Ship Mode', type=SqlType.text(), nullability=NOT_NULLABLE), TableDefinition.Column(name='Ship Priority', type=SqlType.int(), nullability=NOT_NULLABLE) ] ) def run_insert_data_with_expressions(): """ An example of how to push down computations to Hyper during insertion with expressions. """ print("EXAMPLE - Push down computations to Hyper during insertion with expressions") path_to_database = Path("orders.hyper") # Starts the Hyper Process with telemetry enabled to send data to Tableau. # To opt out, simply set telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU. with HyperProcess(telemetry=Telemetry.SEND_USAGE_DATA_TO_TABLEAU) as hyper: # Creates new Hyper file "orders.hyper". # Replaces file with CreateMode.CREATE_AND_REPLACE if it already exists. with Connection(endpoint=hyper.endpoint, database=path_to_database, create_mode=CreateMode.CREATE_AND_REPLACE) as connection: connection.catalog.create_schema(schema=extract_table.table_name.schema_name) connection.catalog.create_table(table_definition=extract_table) # Hyper API's Inserter allows users to transform data during insertion. # To make use of data transformation during insertion, the inserter requires the following inputs # 1. The connection to the Hyper instance containing the table. # 2. The table name or table defintion into which data is inserted. # 3. List of Inserter.ColumnMapping. # This list informs the inserter how each column in the target table is tranformed. # The list must contain all the columns into which data is inserted. # "Inserter.ColumnMapping" maps a valid SQL expression (if any) to a column in the target table. # For example Inserter.ColumnMapping('target_column_name', f'{escape_name("colA")}*{escape_name("colB")}') # The column "target_column" contains the product of "colA" and "colB" after successful insertion. # SQL expression string is optional in Inserter.ColumnMapping. # For a column without any transformation only the column name is required. # For example Inserter.ColumnMapping('no_data_transformation_column') # 4. The Column Definition of all input values provided to the Inserter # Inserter definition contains the column definition for the values that are inserted inserter_definition = [ TableDefinition.Column(name='Order ID', type=SqlType.int(), nullability=NOT_NULLABLE), TableDefinition.Column(name='Ship Timestamp Text', type=SqlType.text(), nullability=NOT_NULLABLE), TableDefinition.Column(name='Ship Mode', type=SqlType.text(), nullability=NOT_NULLABLE), TableDefinition.Column(name='Ship Priority Text', type=SqlType.text(), nullability=NOT_NULLABLE)] # Column 'Order Id' is inserted into "Extract"."Extract" as-is # Column 'Ship Timestamp' in "Extract"."Extract" of timestamp type is computed from Column 'Ship Timestamp Text' of text type using 'to_timestamp()' # Column 'Ship Mode' is inserted into "Extract"."Extract" as-is # Column 'Ship Priority' is "Extract"."Extract" of integer type is computed from Colum 'Ship Priority Text' of text type using 'CASE' statement shipPriorityAsIntCaseExpression = f'CASE {escape_name("Ship Priority Text")} ' \ f'WHEN {escape_string_literal("Urgent")} THEN 1 ' \ f'WHEN {escape_string_literal("Medium")} THEN 2 ' \ f'WHEN {escape_string_literal("Low")} THEN 3 END' column_mappings = [ 'Order ID', Inserter.ColumnMapping( 'Ship Timestamp', f'to_timestamp({escape_name("Ship Timestamp Text")}, {escape_string_literal("YYYY-MM-DD HH24:MI:SS")})'), 'Ship Mode', Inserter.ColumnMapping('Ship Priority', shipPriorityAsIntCaseExpression) ] # Data to be inserted data_to_insert = [ [399, '2012-09-13 10:00:00', 'Express Class', 'Urgent'], [530, '2012-07-12 14:00:00', 'Standard Class', 'Low'] ] # Insert data into "Extract"."Extract" table with expressions with Inserter(connection, extract_table, column_mappings, inserter_definition=inserter_definition) as inserter: inserter.add_rows(rows=data_to_insert) inserter.execute() print("The data was added to the table.") print("The connection to the Hyper file has been closed.") print("The Hyper process has been shut down.") if __name__ == '__main__': try: run_insert_data_with_expressions() except HyperException as ex: print(ex) exit(1)
2.125
2
dumplogs/bin.py
xinhuagu/dumplogs
1
13060
import boto3 import argparse import os,sys def main(argv=None): argv = (argv or sys.argv)[1:] parser = argparse.ArgumentParser(description='dump all aws log streams into files') parser.add_argument("--profile", dest="aws_profile", type=str, default=os.environ.get('AWS_PROFILE', None), help="aws profile") parser.add_argument("-o", "--output", type=str, dest='output', default=".", help="output folder") parser.add_argument('group_name',help='aws loggroup name') options,args = parser.parse_known_args(argv) options.aws_profile options.output options.group_name """ main logic """ client = boto3.client('logs') aws_profile = options.aws_profile group_name = options.group_name output_folder = options.output stream_list=[] stream_response = client.describe_log_streams( logGroupName=group_name, orderBy='LastEventTime', limit=50, ) while True: stream_name_arr = stream_response['logStreams'] for stream_elm in stream_name_arr: stream_name = stream_elm['logStreamName'] stream_list.append(stream_name) if "nextToken" in stream_response: next_token = stream_response['nextToken'] stream_response = client.describe_log_streams( logGroupName=group_name, orderBy='LastEventTime', nextToken=next_token, limit=50, ) else: break print("loggroup {} has total {} streams".format(group_name,len(stream_list))) for s_name in stream_list: file_name=s_name.replace("[$LATEST]", "").replace("/","-") stream_content= client.get_log_events( logGroupName=group_name, logStreamName=s_name, ) print("{} ==> {}".format(s_name,file_name)) completeName = os.path.join(output_folder, file_name) with open(completeName, "w") as text_file: text_file.write("{}".format(stream_content)) print("Done.")
2.5625
3
ch5/gaussian_mixture.py
susantamoh84/HandsOn-Unsupervised-Learning-with-Python
25
13061
import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_blobs from sklearn.mixture import GaussianMixture from sklearn.cluster import KMeans from matplotlib.patches import Ellipse # For reproducibility np.random.seed(1000) nb_samples = 300 nb_centers = 2 if __name__ == '__main__': # Create the dataset X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=nb_centers, cluster_std=[1.0, 0.6], random_state=1000) # Show the dataset sns.set() fig, ax = plt.subplots(figsize=(15, 9)) ax.scatter(X[:, 0], X[:, 1], s=120) ax.set_xlabel(r'$x_0$', fontsize=14) ax.set_ylabel(r'$x_1$', fontsize=14) plt.show() # Train the model gm = GaussianMixture(n_components=2, random_state=1000) gm.fit(X) Y_pred = gm.fit_predict(X) print('Means: \n{}'.format(gm.means_)) print('Covariance matrices: \n{}'.format(gm.covariances_)) print('Weights: \n{}'.format(gm.weights_)) m1 = gm.means_[0] m2 = gm.means_[1] c1 = gm.covariances_[0] c2 = gm.covariances_[1] we1 = 1 + gm.weights_[0] we2 = 1 + gm.weights_[1] # Eigendecompose the covariances w1, v1 = np.linalg.eigh(c1) w2, v2 = np.linalg.eigh(c2) nv1 = v1 / np.linalg.norm(v1) nv2 = v2 / np.linalg.norm(v2) print('Eigenvalues 1: \n{}'.format(w1)) print('Eigenvectors 1: \n{}'.format(nv1)) print('Eigenvalues 2: \n{}'.format(w2)) print('Eigenvectors 2: \n{}'.format(nv2)) a1 = np.arccos(np.dot(nv1[:, 1], [1.0, 0.0]) / np.linalg.norm(nv1[:, 1])) * 180.0 / np.pi a2 = np.arccos(np.dot(nv2[:, 1], [1.0, 0.0]) / np.linalg.norm(nv2[:, 1])) * 180.0 / np.pi # Perform K-Means clustering km = KMeans(n_clusters=2, random_state=1000) km.fit(X) Y_pred_km = km.predict(X) # Show the comparison of the results fig, ax = plt.subplots(1, 2, figsize=(22, 9), sharey=True) ax[0].scatter(X[Y_pred == 0, 0], X[Y_pred == 0, 1], s=80, marker='o', label='Gaussian 1') ax[0].scatter(X[Y_pred == 1, 0], X[Y_pred == 1, 1], s=80, marker='d', label='Gaussian 2') g1 = Ellipse(xy=m1, width=w1[1] * 3, height=w1[0] * 3, fill=False, linestyle='dashed', angle=a1, color='black', linewidth=1) g1_1 = Ellipse(xy=m1, width=w1[1] * 2, height=w1[0] * 2, fill=False, linestyle='dashed', angle=a1, color='black', linewidth=2) g1_2 = Ellipse(xy=m1, width=w1[1] * 1.4, height=w1[0] * 1.4, fill=False, linestyle='dashed', angle=a1, color='black', linewidth=3) g2 = Ellipse(xy=m2, width=w2[1] * 3, height=w2[0] * 3, fill=False, linestyle='dashed', angle=a2, color='black', linewidth=1) g2_1 = Ellipse(xy=m2, width=w2[1] * 2, height=w2[0] * 2, fill=False, linestyle='dashed', angle=a2, color='black', linewidth=2) g2_2 = Ellipse(xy=m2, width=w2[1] * 1.4, height=w2[0] * 1.4, fill=False, linestyle='dashed', angle=a2, color='black', linewidth=3) ax[0].set_xlabel(r'$x_0$', fontsize=16) ax[0].set_ylabel(r'$x_1$', fontsize=16) ax[0].add_artist(g1) ax[0].add_artist(g1_1) ax[0].add_artist(g1_2) ax[0].add_artist(g2) ax[0].add_artist(g2_1) ax[0].add_artist(g2_2) ax[0].set_title('Gaussian Mixture', fontsize=16) ax[0].legend(fontsize=16) ax[1].scatter(X[Y_pred_km == 0, 0], X[Y_pred_km == 0, 1], s=80, marker='o', label='Cluster 1') ax[1].scatter(X[Y_pred_km == 1, 0], X[Y_pred_km == 1, 1], s=80, marker='d', label='Cluster 2') ax[1].set_xlabel(r'$x_0$', fontsize=16) ax[1].set_title('K-Means', fontsize=16) ax[1].legend(fontsize=16) # Predict the probability of some sample points print('P([0, -2]=G1) = {:.3f} and P([0, -2]=G2) = {:.3f}'.format(*list(gm.predict_proba([[0.0, -2.0]]).squeeze()))) print('P([1, -1]=G1) = {:.3f} and P([1, -1]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, -1.0]]).squeeze()))) print('P([1, 0]=G1) = {:.3f} and P([1, 0]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, 0.0]]).squeeze()))) plt.show() # Compute AICs, BICs, and log-likelihood n_max_components = 20 aics = [] bics = [] log_likelihoods = [] for n in range(1, n_max_components + 1): gm = GaussianMixture(n_components=n, random_state=1000) gm.fit(X) aics.append(gm.aic(X)) bics.append(gm.bic(X)) log_likelihoods.append(gm.score(X) * nb_samples) # Show the results fig, ax = plt.subplots(1, 3, figsize=(20, 6)) ax[0].plot(range(1, n_max_components + 1), aics) ax[0].set_xticks(range(1, n_max_components + 1)) ax[0].set_xlabel('Number of Gaussians', fontsize=14) ax[0].set_title('AIC', fontsize=14) ax[1].plot(range(1, n_max_components + 1), bics) ax[1].set_xticks(range(1, n_max_components + 1)) ax[1].set_xlabel('Number of Gaussians', fontsize=14) ax[1].set_title('BIC', fontsize=14) ax[2].plot(range(1, n_max_components + 1), log_likelihoods) ax[2].set_xticks(range(1, n_max_components + 1)) ax[2].set_xlabel('Number of Gaussians', fontsize=14) ax[2].set_title('Log-likelihood', fontsize=14) plt.show()
2.8125
3
shipyard2/shipyard2/rules/images/merge_image.py
clchiou/garage
3
13062
<reponame>clchiou/garage __all__ = [ 'DEFAULT_FILTERS', 'DEFAULT_XAR_FILTERS', 'merge_image', ] import contextlib import logging import tempfile from pathlib import Path from g1 import scripts from g1.containers import models from g1.containers import scripts as ctr_scripts from . import utils LOG = logging.getLogger(__name__) DEFAULT_FILTERS = ( # Do not leak any source codes to the application image. # Keep drydock path in sync with //bases:build. ('exclude', '/home/plumber/drydock'), ('exclude', '/home/plumber/.gradle'), ('exclude', '/home/plumber/.gsutil'), ('exclude', '/home/plumber/.python_history'), ('exclude', '/home/plumber/.vpython_cipd_cache'), ('exclude', '/home/plumber/.vpython-root'), ('exclude', '/home/plumber/.wget-hsts'), ('exclude', '/root/.cache'), ('exclude', '/usr/src'), # Include only relevant files under /etc. ('include', '/etc/'), # We use distro java at the moment. ('include', '/etc/alternatives/'), ('include', '/etc/alternatives/java'), ('include', '/etc/java*'), ('include', '/etc/java*/**'), ('include', '/etc/group'), ('include', '/etc/group-'), ('include', '/etc/gshadow'), ('include', '/etc/gshadow-'), ('include', '/etc/inputrc'), ('include', '/etc/ld.so.cache'), ('include', '/etc/passwd'), ('include', '/etc/passwd-'), ('include', '/etc/shadow'), ('include', '/etc/shadow-'), ('include', '/etc/ssl/'), ('include', '/etc/ssl/**'), ('include', '/etc/subgid'), ('include', '/etc/subgid-'), ('include', '/etc/subuid'), ('include', '/etc/subuid-'), ('include', '/etc/sudoers.d/'), ('include', '/etc/sudoers.d/**'), ('exclude', '/etc/**'), # Exclude distro binaries from application image (note that base # image includes a base set of distro binaries). ('exclude', '/bin'), ('exclude', '/sbin'), # We use distro java at the moment. ('include', '/usr/bin/'), ('include', '/usr/bin/java'), ('exclude', '/usr/bin/**'), ('exclude', '/usr/bin'), ('exclude', '/usr/sbin'), # Exclude headers. ('exclude', '/usr/include'), ('exclude', '/usr/local/include'), # Exclude distro systemd files. ('exclude', '/lib/systemd'), ('exclude', '/usr/lib/systemd'), # In general, don't exclude distro libraries since we might depend # on them, except these libraries. ('exclude', '/usr/lib/apt'), ('exclude', '/usr/lib/gcc'), ('exclude', '/usr/lib/git-core'), ('exclude', '/usr/lib/python*'), ('exclude', '/usr/lib/**/*perl*'), # Exclude these to save more space. ('exclude', '/usr/share/**'), ('exclude', '/var/**'), ) # For XAR images, we only include a few selected directories, and # exclude everything else. # # To support Python, we include our code under /usr/local in the XAR # image (like our pod image). An alternative is to use venv to install # our codebase, but this seems to be too much effort; so we do not take # this approach for now. # # We explicitly remove CPython binaries from /usr/local/bin so that the # `env` command will not (and should not) resolve to them. # # We do not include /usr/bin/java (symlink to /etc/alternatives) for # now. If you want to use Java, you have to directly invoke it under # /usr/lib/jvm/... DEFAULT_XAR_FILTERS = ( ('include', '/usr/'), ('include', '/usr/lib/'), ('exclude', '/usr/lib/**/*perl*'), ('include', '/usr/lib/jvm/'), ('include', '/usr/lib/jvm/**'), ('include', '/usr/lib/x86_64-linux-gnu/'), ('include', '/usr/lib/x86_64-linux-gnu/**'), ('include', '/usr/local/'), ('include', '/usr/local/bin/'), ('exclude', '/usr/local/bin/python*'), ('include', '/usr/local/bin/*'), ('include', '/usr/local/lib/'), ('include', '/usr/local/lib/**'), ('exclude', '**'), ) @scripts.using_sudo() def merge_image( *, name, version, builder_images, default_filters, filters, output, ): rootfs_paths = [ ctr_scripts.ctr_get_image_rootfs_path(image) for image in builder_images ] rootfs_paths.append( ctr_scripts.ctr_get_image_rootfs_path( models.PodConfig.Image( name=utils.get_builder_name(name), version=version, ) ) ) filter_rules = _get_filter_rules(default_filters, filters) with contextlib.ExitStack() as stack: tempdir_path = stack.enter_context( tempfile.TemporaryDirectory(dir=output.parent) ) output_rootfs_path = Path(tempdir_path) / 'rootfs' stack.callback(scripts.rm, output_rootfs_path, recursive=True) LOG.info('generate application image under: %s', output_rootfs_path) # NOTE: Do NOT overlay-mount these rootfs (and then rsync from # the overlay) because the overlay does not include base and # base-builder, and thus some tombstone files may not be copied # correctly (I don't know why but rsync complains about this). # For now our workaround is to rsync each rootfs sequentially. for rootfs_path in rootfs_paths: utils.rsync(rootfs_path, output_rootfs_path, filter_rules) ctr_scripts.ctr_build_image(name, version, output_rootfs_path, output) def _get_filter_rules(default_filters, filters): return [ # Log which files are included/excluded due to filter rules. '--debug=FILTER2', # Add filters before default_filters so that the former may # override the latter. I have a feeling that this "override" # thing could be brittle, but let's leave this here for now. *('--%s=%s' % pair for pair in filters), *('--%s=%s' % pair for pair in default_filters), ]
1.9375
2
python3/best_time_stock1.py
joshiaj7/CodingChallenges
1
13063
<filename>python3/best_time_stock1.py<gh_stars>1-10 """ Space : O(1) Time : O(n) """ class Solution: def maxProfit(self, prices: List[int]) -> int: start, dp = 10**10, 0 for i in prices: print(start) start = min(start, i) dp = max(dp, i-start) return dp
3.25
3
environments/assets/gym_collectball/__init__.py
GPaolo/SERENE
3
13064
<filename>environments/assets/gym_collectball/__init__.py # Created by <NAME> # Date: 27/08/2020 from gym.envs.registration import register register( id='CollectBall-v0', entry_point='gym_collectball.envs:CollectBall' )
1.390625
1
foreverbull/foreverbull.py
quantfamily/foreverbull-python
0
13065
import logging import threading from concurrent.futures import ThreadPoolExecutor from multiprocessing import Queue from foreverbull.worker.worker import WorkerHandler from foreverbull_core.models.finance import EndOfDay from foreverbull_core.models.socket import Request from foreverbull_core.models.worker import Instance from foreverbull_core.socket.client import ContextClient, SocketClient from foreverbull_core.socket.exceptions import SocketClosed, SocketTimeout from foreverbull_core.socket.router import MessageRouter class Foreverbull(threading.Thread): _worker_routes = {} def __init__(self, socket: SocketClient = None, executors: int = 1): self.socket = socket self.running = False self.logger = logging.getLogger(__name__) self._worker_requests = Queue() self._worker_responses = Queue() self._workers: list[WorkerHandler] = [] self.executors = executors self._routes = MessageRouter() self._routes.add_route(self.stop, "backtest_completed") self._routes.add_route(self._configure, "configure", Instance) self._routes.add_route(self._stock_data, "stock_data", EndOfDay) self._request_thread: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=5) threading.Thread.__init__(self) @staticmethod def on(msg_type): def decorator(t): Foreverbull._worker_routes[msg_type] = t return t return decorator def run(self): self.running = True self.logger.info("Starting instance") while self.running: try: context_socket = self.socket.new_context() request = context_socket.recv() self._request_thread.submit(self._process_request, context_socket, request) except (SocketClosed, SocketTimeout): self.logger.info("main socket closed, exiting") return self.socket.close() self.logger.info("exiting") def _process_request(self, socket: ContextClient, request: Request): try: self.logger.debug(f"recieved task: {request.task}") response = self._routes(request) socket.send(response) self.logger.debug(f"reply sent for task: {response.task}") socket.close() except (SocketTimeout, SocketClosed) as exc: self.logger.warning(f"Unable to process context socket: {exc}") pass except Exception as exc: self.logger.error("unknown excetion when processing context socket") self.logger.exception(exc) def stop(self): self.logger.info("Stopping instance") self.running = False for worker in self._workers: worker.stop() self._workers = [] def _configure(self, instance_configuration: Instance): for _ in range(self.executors): w = WorkerHandler(instance_configuration, **self._worker_routes) self._workers.append(w) return def _stock_data(self, message: EndOfDay): for worker in self._workers: if worker.locked(): continue if worker.acquire(): break else: raise Exception("workers are not initialized") try: worker.process(message) except Exception as exc: self.logger.error("Error processing to worker") self.logger.exception(exc) worker.release()
2.25
2
scopus/tests/test_AffiliationSearch.py
crew102/scopus
0
13066
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `AffiliationSearch` module.""" from collections import namedtuple from nose.tools import assert_equal, assert_true import scopus s = scopus.AffiliationSearch('af-id(60021784)', refresh=True) def test_affiliations(): received = s.affiliations assert_true(isinstance(received, list)) order = 'eid name variant documents city country parent' Affiliation = namedtuple('Affiliation', order) expected = [Affiliation(eid='10-s2.0-60021784', name='<NAME>', variant='', documents='101148', city='New York', country='United States', parent='0')] assert_equal(received, expected)
2.5625
3
MechOS/simple_messages/int.py
PierceATronics/MechOS
0
13067
<gh_stars>0 ''' ''' import struct class Int: ''' ''' def __init__(self): ''' ''' #construct the message format self.message_constructor = 'i' #number of bytes for this message self.size = 4 def _pack(self, message): ''' ''' encoded_message = struct.pack(self.message_constructor, message) return(encoded_message) def _unpack(self, encoded_message): ''' ''' message = struct.unpack(self.message_constructor, encoded_message)[0] return(message)
2.90625
3
Curso Python/PythonExercicios/ex017.py
marcos-saba/Cursos
0
13068
<filename>Curso Python/PythonExercicios/ex017.py #from math import hypot import math print('='*5, 'Cálculo triângulo retângulo', '='*5) cat_op = float(input('Digite o comprimento do cateto oposto: ')) cat_adj = float(input('Digite o comprimento do cateto adjacente: ')) hip = math.hypot(cat_op, cat_adj) print(f'O comprimento da hipotenusa do triângulo retângulo, cujos catetos são {cat_op:.2f} e {cat_adj:.2f} é {hip:.2f}.')
4.03125
4
exercicios/ex074.py
CinatitBR/exercicios-phyton
0
13069
<gh_stars>0 from random import randint numeros = (randint(0, 10), randint(0, 10), randint(0, 10), randint(0, 10), randint(0, 10)) print(f'Os cinco números são: ', end='') for n in numeros: # Exibe números sorteados print(n, end=' ') print(f'\nO MAIOR número é {max(numeros)}') print(f'O MENOR número é {min(numeros)}')
3.65625
4
libs3/maxwellccs.py
tmpbci/LJ
7
13070
<gh_stars>1-10 #!/usr/bin/python3 # -*- coding: utf-8 -*- """ Maxwell Macros v0.7.0 by <NAME> from /team/laser Launchpad set a "current path" """ from OSC3 import OSCServer, OSCClient, OSCMessage import time import numpy as np import rtmidi from rtmidi.midiutil import open_midiinput from threading import Thread from rtmidi.midiconstants import (CHANNEL_PRESSURE, CONTROLLER_CHANGE, NOTE_ON, NOTE_OFF, PITCH_BEND, POLY_PRESSURE, PROGRAM_CHANGE) import os, json import midi3 if os.uname()[1]=='raspberrypi': pass port = 8090 ip = "127.0.0.1" mididest = 'Session 1' djdest = 'Port' midichannel = 1 computerIP = ['127.0.0.1','192.168.2.95','192.168.2.52','127.0.0.1', '127.0.0.1','127.0.0.1','127.0.0.1','127.0.0.1'] computer = 0 # store current value for computer 1 cc1 =[0]*140 current = { "patch": 0, "prefixLeft": "/osc/left/X", "prefixRight": "/osc/right/X", "suffix": "/amp", "path": "/osc/left/X/curvetype", "pathLeft": "/osc/left/X/curvetype", "pathRight": "/osc/left/X/curvetype", "previousmacro": -1, "LeftCurveType": 0, "lfo": 1, "rotator": 1, "translator": 1 } specificvalues = { # Sine: 0-32, Tri: 33-64, Square: 65-96, Line: 96-127 "curvetype": {"sin": 0, "saw": 33, "squ": 95, "lin": 127}, "freqlimit": {"1": 0, "4": 26, "16": 52, "32": 80, "127": 127}, "amptype": {"constant": 0, "lfo1": 33, "lfo2": 95, "lfo3": 127}, "phasemodtype": {"linear": 0,"sin": 90}, "phaseoffsettype": {"manual": 0, "lfo1": 33, "lfo2": 95, "lfo3": 127}, "ampoffsettype": { "manual": 0, "lfo1": 33, "lfo2": 95, "lfo3": 127}, "inversion": {"off": 0, "on": 127}, "colortype": {"solid": 0, "lfo": 127}, "modtype": {"sin": 0,"linear": 127}, "switch": {"off": 0,"on": 127}, "operation": {"+": 0, "-": 50, "*": 127} } # # Maxwell CCs # def FindCC(FunctionName): for Maxfunction in range(len(maxwell['ccs'])): if FunctionName == maxwell['ccs'][Maxfunction]['Function']: #print(FunctionName, "is CC", Maxfunction) return Maxfunction def LoadCC(): global maxwell print("Loading Maxwell CCs Functions...") if os.path.exists('maxwell.json'): #print('File maxwell.json exits') f=open("maxwell.json","r") else: if os.path.exists('../maxwell.json'): #print('File ../maxwell.json exits') f=open("../maxwell.json","r") s = f.read() maxwell = json.loads(s) print(len(maxwell['ccs']),"Functions") print("Loaded.") # /cc cc number value def cc(ccnumber, value, dest=mididest): #print('Output CC',[CONTROLLER_CHANGE+midichannel-1, ccnumber, value], dest) midi3.MidiMsg([CONTROLLER_CHANGE+midichannel-1,ccnumber,value], dest) def NoteOn(note,velocity, dest=mididest): midi3.NoteOn(note,velocity, mididest) def NoteOff(note, dest=mididest): midi3.NoteOn(note, mididest) def Send(oscaddress,oscargs=''): oscmsg = OSCMessage() oscmsg.setAddress(oscaddress) oscmsg.append(oscargs) osclient = OSCClient() osclient.connect((ip, port)) print("sending OSC message : ", oscmsg, "to", ip, ":",port) try: osclient.sendto(oscmsg, (ip, port)) oscmsg.clearData() return True except: print ('Connection to', ip, 'refused : died ?') return False def ssawtooth(samples,freq,phase): t = np.linspace(0+phase, 1+phase, samples) for ww in range(samples): samparray[ww] = signal.sawtooth(2 * np.pi * freq * t[ww]) return samparray def ssquare(samples,freq,phase): t = np.linspace(0+phase, 1+phase, samples) for ww in range(samples): samparray[ww] = signal.square(2 * np.pi * freq * t[ww]) return samparray def ssine(samples,freq,phase): t = np.linspace(0+phase, 1+phase, samples) for ww in range(samples): samparray[ww] = np.sin(2 * np.pi * freq * t[ww]) return samparray def MixerLeft(value): if value == 127: Send("/mixer/value", 0) def MixerRight(value): if value == 127: Send("/mixer/value", 127) def MixerTempo(tempo): for counter in range(127): Send("/mixer/value", counter) # Jog send 127 to left and 1 to right # increase or decrease current CC defined in current path def jogLeft(value): path = current["pathLeft"] print("jog : path =",path, "CC :", FindCC(path), "value", value) MaxwellCC = FindCC(current["pathLeft"]) if value == 127: # decrease CC if cc1[MaxwellCC] > 0: cc1[MaxwellCC] -= 1 else: if cc1[MaxwellCC] < 127: cc1[MaxwellCC] += 1 #print("sending", cc1[MaxwellCC], "to CC", MaxwellCC ) cc(MaxwellCC, cc1[MaxwellCC] , dest ='to Maxwell 1') #RotarySpecifics(MaxwellCC, path[path.rfind("/")+1:len(path)], value) # Jog send 127 to left and 1 to right # increase or decrease current CC defined in current path def jogRight(value): path = current["pathRight"] print("jog : path =",path, "CC :", FindCC(path), "value", value) MaxwellCC = FindCC(current["pathRight"]) if value == 127: # decrease CC if cc1[MaxwellCC] > 0: cc1[MaxwellCC] -= 1 else: if cc1[MaxwellCC] < 127: cc1[MaxwellCC] += 1 #print("sending", cc1[MaxwellCC], "to CC", MaxwellCC ) cc(MaxwellCC, cc1[MaxwellCC] , dest ='to Maxwell 1') #RotarySpecifics(MaxwellCC, path[path.rfind("/")+1:len(path)], value) # Parameter change : to left 127 / to right 0 or 1 def RotarySpecifics( MaxwellCC, specificsname, value): global maxwell print("Maxwell CC :",MaxwellCC) print("Current :",maxwell['ccs'][MaxwellCC]['init']) print("Specifics :",specificvalues[specificsname]) print("midi value :", value) elements = list(enumerate(specificvalues[specificsname])) print(elements) nextype = maxwell['ccs'][MaxwellCC]['init'] for count,ele in elements: if ele == maxwell['ccs'][MaxwellCC]['init']: if count > 0 and value == 127: nextype = elements[count-1][1] if count < len(elements)-1 and value < 2: #print("next is :",elements[count+1][1]) nextype = elements[count+1][1] print("result :", nextype, "new value :", specificvalues[specificsname][nextype], "Maxwell CC", MaxwellCC) maxwell['ccs'][MaxwellCC]['init'] = nextype cc(MaxwellCC, specificvalues[specificsname][nextype], dest ='to Maxwell 1') # Change type : trig with only with midi value 127 on a CC event def ButtonSpecifics127( MaxwellCC, specificsname, value): global maxwell print("Maxwell CC :",MaxwellCC) print("Current :",maxwell['ccs'][MaxwellCC]['init']) print("Specifics :",specificvalues[specificsname]) print("midi value :", value) elements = list(enumerate(specificvalues[specificsname])) print(elements) nextype = maxwell['ccs'][MaxwellCC]['init'] for count,ele in elements: if ele == maxwell['ccs'][MaxwellCC]['init']: if count >0 and value == 127: nextype = elements[count-1][1] if count < len(elements)-1 and value < 2: #print("next is :",elements[count+1][1]) nextype = elements[count+1][1] print("result :", nextype, "new value :", specificvalues[specificsname][nextype], "Maxwell CC", MaxwellCC) maxwell['ccs'][MaxwellCC]['init'] = nextype cc(MaxwellCC, specificvalues[specificsname][nextype], dest ='to Maxwell 1') # Left cue button 127 = on 0 = off def PrevPatch(value): global current print('PrevPatch function') if value == 127 and current['patch'] - 1 > -1: cc(9, 127, dest=djdest) time.sleep(0.1) current['patch'] -= 1 print("Current patch is now :",current['patch']) midi3.NoteOn(current['patch'], 127, 'to Maxwell 1') cc(9, 0, dest=djdest) # Right cue button 127 = on 0 = off def NextPatch(value): global current print('NextPatch function', current["patch"]) if value == 127 and current["patch"] + 1 < 41: cc(3, 127, dest = djdest) current["patch"] += 1 #ModeNote(current["patch"], 127, 'to Maxwell 1') midi3.NoteOn(current["patch"], 127, 'to Maxwell 1') print("Current patch is now :",current["patch"]) time.sleep(0.1) cc(3, 0, dest = djdest) # increase/decrease a CC def changeCC(value, path): global current #path = current["pathLeft"] MaxwellCC = FindCC(path) cc1[MaxwellCC] += value print("Change Left CC : path =",path, "CC :", FindCC(path), "is now ", cc1[MaxwellCC]) cc(MaxwellCC, cc1[MaxwellCC] , dest ='to Maxwell 1') def PlusTenLeft(value): value = 10 changeCC(value, current["pathLeft"]) def MinusTenLeft(value): value = -10 changeCC(value, current["pathLeft"]) def PlusOneLeft(value): value = 1 changeCC(value, current["pathLeft"]) def MinusOneLeft(value): value = -1 changeCC(value, current["pathLeft"]) def PlusTenRight(value): value = 10 changeCC(value, current["pathRight"]) def MinusTenRight(value): value = -10 changeCC(value, current["pathRight"]) def PlusOneRight(value): value = 1 changeCC(value, current["pathRight"]) def MinusOneRight(value): value = -1 changeCC(value, current["pathRight"]) def ChangeCurveLeft(value): MaxwellCC = FindCC(current["prefixLeft"] + '/curvetype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeFreqLimitLeft(value): MaxwellCC = FindCC(current["prefixLeft"] + '/freqlimit') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeATypeLeft(value): MaxwellCC = FindCC(current["prefixLeft"] + '/freqlimit') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangePMTypeLeft(value): MaxwellCC = FindCC(current["prefixLeft"] + '/phasemodtype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangePOTypeLeft(value): MaxwellCC = FindCC(current["prefixLeft"] + '/phaseoffsettype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeAOTypeLeft(value): MaxwellCC = FindCC(current["prefixLeft"] + '/ampoffsettype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeCurveRight(value): MaxwellCC = FindCC(current["prefixRight"] + '/curvetype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeCurveLFO(value): MaxwellCC = FindCC('/lfo/'+ current["lfo"] +'/curvetype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeCurveRot(value): MaxwellCC = FindCC('/rotator/'+ current["rotator"] +'/curvetype') RotarySpecifics(MaxwellCC, "curvetype", value) def ChangeCurveTrans(value): MaxwellCC = FindCC('/translator/'+ current["translator"] +'/curvetype') RotarySpecifics(MaxwellCC, "curvetype", value)
2.125
2
functions_alignComp.py
lauvegar/VLBI_spectral_properties_Bfield
1
13071
<gh_stars>1-10 import numpy as np import matplotlib.pyplot as plt from pylab import * #import pyspeckit as ps from scipy import io from scipy import stats from scipy.optimize import leastsq #from lmfit import minimize, Parameters, Parameter, report_fit #from lmfit.models import GaussianModel import scipy.optimize as optimization import matplotlib.ticker as ticker import cmath as math import pickle import iminuit import astropy.io.fits as pf import os,glob #import string,math,sys,fileinput,glob,time #load modules #from pylab import * import subprocess as sub import re #from plot_components import get_ellipse_coords, ellipse_axis import urllib2 from astropy import units as u #from astropy.coordinates import SkyCoord #FUNCTION TO READ THE HEADER AND TAKE IMPORTANT PARAMETERS AS #cell #BMAJ, BMIN, BPA #date, freq and epoch def find_nearest(array,value): index = (np.abs(array-value)).argmin() return array[index], index def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): ''' alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) ''' return [ atoi(c) for c in re.split('(\d+)', text) ] def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2): """ Draws an ellipse using (360*k + 1) discrete points; based on pseudo code given at http://en.wikipedia.org/wiki/Ellipse k = 1 means 361 points (degree by degree) a = major axis distance, b = minor axis distance, x = offset along the x-axis y = offset along the y-axis angle = clockwise rotation [in degrees] of the ellipse; * angle=0 : the ellipse is aligned with the positive x-axis * angle=30 : rotated 30 degrees clockwise from positive x-axis """ pts = np.zeros((360*k+1, 2)) beta = -angle * np.pi/180.0 sin_beta = np.sin(beta) cos_beta = np.cos(beta) alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)]) sin_alpha = np.sin(alpha) cos_alpha = np.cos(alpha) pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta) pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta) return pts def ellipse_axis(x, y,s): x1=x-s x2=x+s if x1<x2: xaxis=np.linspace(x1,x2,50) else: xaxis=np.linspace(x2,x1,50) y1=y-s y2=y+s if y1<y2: yaxis=np.linspace(y1,y2,50) else: yaxis=np.linspace(y2,y1,50) return xaxis,yaxis def ellipse_axis_lines(x,y,size): pts_arr=[] pt_arr=[] x_el_arr=[] x_elH_arr=[] y_el_arr=[] y_elH_arr=[] for i in xrange(0,len(x)): n = len(x[i]) pts, pt = [], [] x_el, y_el = [], [] x_elH, y_elH = [], [] for k in xrange(0,n): pts.append(get_ellipse_coords(a=size[i][k], b=size[i][k], x=x[i][k],y=y[i][k], angle=0)) pt.append(get_ellipse_coords(a=0.01, b=0.01, x=x[i][k],y=y[i][k], angle=0)) #lines axis ellipses x_el.append(ellipse_axis(x=float(x[i][k]),y=float(y[i][k]),s=float(size[i][k]))[0]) y_el.append(ellipse_axis(x=x[i][k],y=y[i][k],s=size[i][k])[1]) x_elH.append(np.linspace(x[i][k],x[i][k],50)) y_elH.append(np.linspace(y[i][k],y[i][k],50)) pts_arr.append(pts) pt_arr.append(pt) x_el_arr.append(x_el) y_el_arr.append(y_el) x_elH_arr.append(x_elH) y_elH_arr.append(y_elH) return pts_arr,pt_arr,x_el_arr,y_el_arr,x_elH_arr,y_elH_arr def read_modfile(file1,beam,errors): nfiles = len(file1) r_arr = [] errr_arr = [] #np.array([0.]*nfiles) psi_arr = [] errpsi_arr = [] size_arr = [] errsize_arr = [] flux_arr = [] errflux_arr = [] ntot=0 for k in xrange (0,nfiles): with open(file1[k]) as myfile: count = sum(1 for line in myfile if line.rstrip('\n')) count = count-4 #n = len(rms[k]) n = count split_f=[] c=[] r=np.array([0.]*n) errr=np.array([0.]*n) psi=np.array([0.]*n) errpsi=np.array([0.]*n) size=np.array([0.]*n) errsize=np.array([0.]*n) tb=np.array([0.]*n) errtb=np.array([0.]*n) flux=np.array([0.]*n) fluxpeak = np.array([0.]*n) rms = np.array([0.]*n) errflux=np.array([0.]*n) lim_resol=np.array([0.]*n) errlim_resol=np.array([0.]*n) temp=file1[k] temp_file=open(temp,mode='r') temp_file.readline() temp_file.readline() temp_file.readline() temp_file.readline() for i in xrange(0,n): split_f = temp_file.readline().split() flux[i] = (float(split_f[0][:-1])) r[i] = (float(split_f[1][:-1])) psi[i] = (float(split_f[2][:-1])*np.pi/180.) size[i] = (float(split_f[3][:-1])/2.) #tb[i] = (float(split_f[7])) if errors == True: temp_file2=open('pos_errors.dat',mode='r') temp_file2.readline() temp_file2.readline() for i in xrange(0,ntot): temp_file2.readline() for i in xrange(0,n): split_f = temp_file2.readline().split() fluxpeak[i] = (float(split_f[2][:-1])) rms[i] = (float(split_f[1][:-1])) for i in xrange(0,n): errflux[i] = rms[i] snr = fluxpeak[i]/rms[i]#[k][i] #change to flux_peak dlim = 4/np.pi*np.sqrt(np.pi*np.log(2)*beam[k]*np.log((snr)/(snr-1.))) #np.log((snr+1.)/(snr))) 4/np.pi*beam if size[i] > beam[k]: ddec=np.sqrt(size[i]**2-beam[k]**2) else: ddec=0. y=[dlim,ddec] dg=np.max(y) err_size = rms[i]*dlim/fluxpeak[i] err_r = err_size/2. if r[i] > 0.: err_psi = np.real(math.atan(err_r*180./(np.pi*r[i]))) else: err_psi = 1./5*beam[k] if err_size < 2./5.*beam[k]: errsize[i] = 2./5.*beam[k] else: errsize[i] = (err_size) if err_r < 1./5*beam: errr[i] = 1./5*beam if errr[i] < 1./2.*size[i]: errr[i] = 1./2.*size[i] else: errr[i] = (err_r) errpsi[i] = (err_psi) elif errors == 'Done': print 'done' else: for i in xrange(0,n): errflux[i] = 0.1*flux[i] errr[i] = 1./5.*beam[k] errpsi[i] = 0. errsize[i] = 2./5*beam[k] r_arr.append(r) errr_arr.append(errr) psi_arr.append(psi) errpsi_arr.append(errpsi) size_arr.append(size) errsize_arr.append(errsize) flux_arr.append(flux) errflux_arr.append(errflux) ntot = n + ntot + 1 return r_arr,errr_arr,psi_arr,errpsi_arr,size_arr,errsize_arr,tb,flux_arr,errflux_arr def x_y(r,errr,psi,errpsi,errors): n = len(r) x,errx = np.array([0.]*n),np.array([0.]*n) y,erry = np.array([0.]*n),np.array([0.]*n) x_arr, errx_arr = [], [] y_arr, erry_arr = [], [] for i in xrange (0,n): x=r[i]*np.sin(psi[i]) y=r[i]*np.cos(psi[i]) if errors == True: errx=np.sqrt((errr[i]*np.cos(psi[i]))**2+(r[i]*np.sin(psi[i])*errpsi[i])**2) erry=np.sqrt((errr[i]*np.sin(psi[i]))**2+(r[i]*np.cos(psi[i])*errpsi[i])**2) else: errx = errr[i] erry = errr[i] x_arr.append(x) errx_arr.append(errx) y_arr.append(y) erry_arr.append(erry) x_arr = np.asarray(x_arr) errx_arr = np.asarray(errx_arr) y_arr = np.asarray(y_arr) erry_arr = np.asarray(erry_arr) return x_arr,errx_arr,y_arr,erry_arr def r_psi(x,errx,y,erry): n = len(r) r,errr = np.array([0.]*n),np.array([0.]*n) psi,errpsi = np.array([0.]*n),np.array([0.]*n) r_arr, errr_arr = [], [] psi_arr, errpsi_arr = [], [] for i in xrange (0,n): r=np.sqrt(x[i]**2+y[i]**2) psi=np.atan(y[i]/x[i]) #errr=np.sqrt((1/(2*r)*2*x[i]*errx[i])**2+(1/(2*r)*2*y[i]*erry[i])**2) #errpsi=np.sqrt(((y[i]/([x[i]**2+y[i])**2])*errx[i])**2+((x[i]/([x[i]**2+y[i])**2])*erry[i])**2) r_arr.append(r) #errr_arr.append(errr) psi_arr.append(psi) #errpsi_arr.append(errpsi) return r_arr,psi_arr def selectComponent(realDAT,realDAT2, first_contour, pts_arr,x_el_arr,x_elH_arr,y_elH_arr,y_el_arr,ext,freq1,freq2,x,y,numComp,orientation): levels = first_contour[0]*np.array([-1., 1., 1.41,2.,2.83,4.,5.66,8.,11.3,16., 22.6,32.,45.3,64.,90.5,128.,181.,256.,362.,512., 724.,1020.,1450.,2050.]) plt.figure(10) plt.subplot(121) cset = plt.contour(realDAT, levels, inline=1, colors=['grey'], extent=ext, aspect=1.0 ) for j in xrange(0,len(x_el_arr[0])): plt.plot(pts_arr[0][j][:,0], pts_arr[0][j][:,1], color='blue',linewidth=4) plt.plot(x_el_arr[0][j], y_elH_arr[0][j], color='blue',linewidth=4) plt.plot(x_elH_arr[0][j], y_el_arr[0][j], color='blue',linewidth=4) plt.xlim(ext[0],ext[1]) plt.ylim(ext[2],ext[3]) plt.axis('scaled') plt.xlabel('Right Ascension [pixels]') plt.ylabel('Relative Declination [pixels]') plt.title(str('%1.3f' %(freq1))+' GHz') levels = first_contour[1]*np.array([-1., 1., 1.41,2.,2.83,4.,5.66,8.,11.3,16., 22.6,32.,45.3,64.,90.5,128.,181.,256.,362.,512., 724.,1020.,1450.,2050.]) #plt.figure(2) plt.subplot(122) cset = plt.contour(realDAT2, levels, inline=1, colors=['grey'], extent=ext, aspect=1.0 ) for j in xrange(0,len(x_el_arr[1])): plt.plot(pts_arr[1][j][:,0], pts_arr[1][j][:,1], color='blue',linewidth=4) plt.plot(x_el_arr[1][j], y_elH_arr[1][j], color='blue',linewidth=4) plt.plot(x_elH_arr[1][j], y_el_arr[1][j], color='blue',linewidth=4) plt.xlim(ext[0],ext[1]) plt.ylim(ext[2],ext[3]) plt.axis('scaled') plt.xlabel('Right Ascension [pixels]') plt.title(str('%1.3f' %(freq2))+' GHz') param = ginput(4*numComp,0) near_comp1 = [] near_comp2 = [] a = 0 if orientation == 'h': for i in xrange(0,numComp): x_c = float(param[1+a][0]) near_comp1.append(int(find_nearest(x[0],x_c)[1])) x_c = float(param[3+a][0]) near_comp2.append(int(find_nearest(x[1],x_c)[1])) a = a + 4 if orientation == 'v': for i in xrange(0,numComp): y_c = float(param[1+a][1]) near_comp1.append(int(find_nearest(y[0],y_c)[1])) y_c = float(param[3+a][1]) near_comp2.append(int(find_nearest(y[1],y_c)[1])) a = a + 4 plt.show() return near_comp1, near_comp2 def CoreShiftCalculation(indexes,x,y,errx,erry,numComp): #indexes[0] low freq, indexes[1] high frequency #shift high freq - low freq if numComp == 1: RaShift = x[1][indexes[1][0]]-x[0][indexes[0][0]] DecShift = y[1][indexes[1][0]]-y[0][indexes[0][0]] errRaShift = np.sqrt((errx[1][indexes[1][0]])**2+(errx[0][indexes[0][0]])**2) errDecShift = np.sqrt((erry[1][indexes[1][0]])**2+(erry[0][indexes[0][0]])**2) if numComp > 1: #calculate all the Ra and Dec shifts and do an average RaShiftArr = np.asarray([0.]*numComp) DecShiftArr = np.asarray([0.]*numComp) for i in xrange(0,numComp): RaShiftArr[i] = x[1][indexes[1][i]]-x[0][indexes[0][i]] DecShiftArr[i] = y[1][indexes[1][i]]-y[0][indexes[0][i]] RaShift = np.sum(RaShiftArr)/len(RaShiftArr) DecShift = np.sum(DecShiftArr)/len(DecShiftArr) if numComp < 4: #not enough values to do a proper dispersion, I consider the values' error as more reliable errRaShiftArr = np.asarray([0.]*numComp) errDecShiftArr = np.asarray([0.]*numComp) for i in xrange(0,numComp): #no square root because I need to square them later in the sum, so i avoid unnecessary calculations errRaShiftArr[i] = (errx[1][indexes[1][i]])**2+(errx[0][indexes[0][i]])**2 errDecShiftArr[i] = (erry[1][indexes[1][i]])**2+(erry[0][indexes[0][i]])**2 errRaShift = np.sqrt(np.sum(errRaShiftArr))/numComp errDecShift = np.sqrt(np.sum(errDecShiftArr))/numComp else: #statistical error errRaShift = np.sqrt(np.sum((RaShiftArr-RaShift)**2))/(np.sqrt(numComp-1)) errDecShift = np.sqrt(np.sum((DecShiftArr-DecShift)**2))/(np.sqrt(numComp-1)) return RaShift, DecShift, errRaShift, errDecShift
2.6875
3
osbuild/dist.py
dnarvaez/osbuild
0
13072
# Copyright 2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil from distutils.sysconfig import parse_makefile from osbuild import config from osbuild import command _dist_builders = {} def dist_one(module_name): for module in config.load_modules(): if module.name == module_name: return _dist_module(module) return False def dist(): shutil.rmtree(config.get_dist_dir(), ignore_errors=True) modules = config.load_modules() for module in modules: if not _dist_module(module): return False return True def _dist_module(module): if not module.dist: return True print("* Creating %s distribution" % module.name) return _dist_builders[module.build_system](module) def _autotools_dist_builder(module): source_dir = module.get_source_dir() os.chdir(source_dir) command.run(["make", "distcheck"]) makefile = parse_makefile(os.path.join(source_dir, "Makefile")) tarball = "%s-%s.tar.xz" % (module.name, makefile["VERSION"]) shutil.move(os.path.join(source_dir, tarball), os.path.join(config.get_dist_dir(), tarball)) return True _dist_builders['autotools'] = _autotools_dist_builder
1.960938
2
neural_net/game_status.py
Ipgnosis/tic_tac_toe
0
13073
# node to capture and communicate game status # written by Russell on 5/18 class Game_state(): node_weight = 1 # node_bias = 1 # not going to use this for now, but may need it later list_of_moves = [] def __init__(self, node_list): self.node_list = node_list def num_moves(self): moves = 0 for i in range(len(self.node_list)): if self.node_list[i].cell_contains() != "": moves += 1 return moves def moves_list(self): #if len(self.list_of_moves) < self.num_moves(): for i in range(len(self.node_list)): if self.node_list[i].move != "" and self.node_list[i].position not in self.list_of_moves: self.list_of_moves.append(self.node_list[i].position) ret_val = self.list_of_moves #print('list of moves: type =', type(self.list_of_moves)) return ret_val def next_up(self): if self.num_moves() % 2 == 0 or self.num_moves() == 0: return "X" else: return "O" def game_prop_remaining(self): moves = self.num_moves() return 1 - moves / 9
3.46875
3
alphatrading/system/db_methods/method_sqlite3.py
LoannData/Q26_AlphaTrading
0
13074
<gh_stars>0 """ """ import sqlite3 import numpy as np import math class SQL: def __init__(self): self.path = None self.connexion = None self.cursor = None self.verbose = False def to_SQL_type(self, type_, mode = "format"): """ Function allowing to convert element type expressed in Python syntax into type expressed into SQL syntax. Parameter: - type_ [str]: Types have to be committed as a string format Returns: - [str]: The parameter type converted in the SQL format if the type is considered in the method. The input variable otherwise. """ if type(type_) == list and mode == "list": sql_list = "(" for element in type_: sql_list += "'"+str(element)+"'"+"," sql_list = sql_list[:-1] sql_list += ")" return sql_list if mode == "format": if type_ == "str": return "text" elif type_ == "int": return "integer" elif type_ == "float": return "real" else: return type_ elif mode == "logic": if type_ == "all": return "ALL" elif type_ == "any": return "ANY" elif type_ == "and": return "AND" elif type_ == "or": return "OR" elif type_ == "not": return "NOT" elif type_ == "in": return "IN" elif type_ == "is" or type_ == "==": return "IS" else: return type_ elif mode == "operator": if type_ == "==": return "=" elif type_ == "!=": return "<>" else: return type_ else: return type_ def create_database(self, path): """ Function allowing to create a database. Parameter: - path [str]: Path and name of the database. Note: The folder should exist. Returns: None """ if not path[-3:] == ".db": path += ".db" self.path = path self.connexion = sqlite3.connect(path) self.cursor = self.connexion.cursor() return def connect_database(self, path): """ Function allowing to connect to an existing database Parameter: - path [str]: Path and name of the database. Note: The folder should exist. Returns: None """ self.create_database(path) def execute(self, action = None, object = None, argument = None): """ Function that execute every command following the SQL query structure. """ command = action+" "+object+" "+argument if self.verbose: print (command) iterator = self.cursor.execute(command) return iterator #=====================================================================================# # LISTING FUNCTIONS #=====================================================================================# def get_table_list(self): """ Function returning the list of tables in the database Parameters: None Returns: - [list(str)]: ["table_name1", "table_name2", ...] """ action = "SELECT" object = "name FROM sqlite_master" argument = "WHERE type='table'" iterator = self.execute(action = action, object = object, argument = argument) table_list = [x[0] for x in iterator.fetchall()] return table_list def get_id_list(self, table): """ Function that retrieves the list of ids of the elements within a table. If the tabe doesn't contain any elements, it return the following list: [0] Parameters: - table [str]: Table name Returns: - [int]: List of ids of the elements in the table in the order they have been added """ action = "SELECT" object = "id" argument = "FROM "+table iterator = self.execute(action = action, object = object, argument = argument) id_list = [x[0] for x in iterator.fetchall()] if len(id_list) == 0 : return [0] return id_list #=====================================================================================# # CREATION & INSERTION FUNCTIONS #=====================================================================================# def create_table(self, name, structure): """ Function allowing to create a table in the already existing database Parameters: - name [str]: Name of the table - structure [dict]: Structure of the table. Keys corresponds to the name of the columns while associated values corresponds to the anounced type of the data. Returns: None """ action = "CREATE" object = "TABLE"+" "+name argument = "(" argument += "id"+" "+"integer"+", " for key in structure.keys(): argument += key+" "+self.to_SQL_type(structure[key], mode = "format")+", " argument = argument[:-2] argument += ")" self.execute(action = action, object = object, argument = argument) return def insert(self, table, value): """ Function allowing to insert an element in an existing table of the connected database Parameters: - table [str] : Name of the table - value [list] : List of the attributes of the element to be inserted Returns: None """ # Check if there are non-common numbers in the list of numbers # such as infinity values # print (value) # print (type(value[-2])) for i in range(len(value)): val = value[i] if not type(val) == str: # if type(val) == float: # val = np.float(val) # elif type(val) == int: # val = np.int(val) # print ("VAL = ",val) if np.isinf(val) or math.isinf(val): # print("Cond1") if val > 1e32: # print("Cond1.1") value[i] = "Inf" elif val < -1e32: # print("Cond1.2") value[i] = "-Inf" else: # print("Cond1.3") value[i] = "+-Inf" elif np.isnan(val): value[i] = "NaN" # print (value) last_id = self.get_id_list(table)[-1] value = [last_id+1]+value action = "INSERT INTO" object = table argument = "VALUES (" for element in value: if type(element) == str: element = element.replace("'", '"') element = "'"+element+"'" else: element = str(element) argument += element+"," argument = argument[:-1] argument += ")" self.execute(action = action, object = object, argument = argument) self.connexion.commit() return def delete(self, table, where_ = None): """ Function allowing to delete an element from a table in the database. Parameters: - table [str]: Name of the table - where_ [list(dict, str, list)]: List of conditions defining elements to be deleted. The structure of this variable follows the scheme below: [{ "object" : #Define the attribute name of an element, "operator": #Define an operator defined in python syntax but provided inside a string "value" : #A value which close the conditional statement }, logic_operator [str] (it may be : "and", "or", "not"...) ... The sequence of conditions has to follow logical rules otherwise it will probably raise an error. ] """ action = "DELETE FROM"+" " object = table argument = "" if where_ is not None: argument += "WHERE"+" " for condition in where_: if type(condition) == dict: sub_object = condition["object"] operator = self.to_SQL_type(condition["operator"], mode = "operator") sub_value = condition["value"] if type(sub_value) == str: sub_value = "'"+sub_value+"'" else: sub_value = str(sub_value) argument += sub_object+operator+sub_value+" " if type(condition) == str: argument += self.to_SQL_type(condition, mode = "logic")+" " if type(condition) == list: argument += self.to_SQL_type(condition, mode="list")+" " self.execute(action = action, object = object, argument = argument) self.connexion.commit() return def drop_table(self, table): """ Function allowing to drop a table from the database Parameters: - table [str]: Table name Returns: None """ action = "DROP" object = "TABLE" argument = table self.execute(action = action, object = object, argument = argument) self.connexion.commit() return #=====================================================================================# # QUERY FUNCTIONS #=====================================================================================# def select(self, #https://www.w3schools.com/sql/sql_select.asp distinct = False, #https://www.w3schools.com/sql/sql_distinct.asp columns = ["*"], #column1, column2 ... table = None, where_ = None, #https://www.w3schools.com/sql/sql_where.asp orderby_ = None, #https://www.w3schools.com/sql/sql_orderby.asp ordering = "ASC" # "DESC" ): action = "SELECT" if distinct: action += " "+"DISTINCT" object = "" for col in columns: object += col+", " object = object[:-2] if "*" in columns: object = "*"+" " object += "FROM"+" "+table argument = "" if where_ is not None: argument += "WHERE"+" " for condition in where_: if type(condition) == dict: sub_object = condition["object"] operator = self.to_SQL_type(condition["operator"], mode = "operator") sub_value = condition["value"] if type(sub_value) == str: sub_value = "'"+sub_value+"'" else: sub_value = str(sub_value) argument += sub_object+operator+sub_value+" " if type(condition) == str: argument += self.to_SQL_type(condition, mode = "logic")+" " if type(condition) == list: argument += self.to_SQL_type(condition, mode="list")+" " if orderby_ is not None: argument += "ORDER BY"+" " for col in orderby_: argument += col+", " argument = argument[:-2] argument += " "+ordering iterator = self.execute(action = action, object = object, argument = argument) result_list = [x for x in iterator.fetchall()] return result_list
3.609375
4
pysport/horseracing/lattice_calibration.py
notbanker/pysport
0
13075
<reponame>notbanker/pysport from .lattice import skew_normal_density, center_density,\ state_prices_from_offsets, densities_and_coefs_from_offsets, winner_of_many,\ expected_payoff, densities_from_offsets, implicit_state_prices, densitiesPlot import pandas as pd # todo: get rid of this dependency import numpy as np def dividend_implied_racing_ability( dividends ): return dividend_implied_ability( dividends=dividends, density=racing_density( loc=0.0 ) ) def racing_ability_implied_dividends( ability ): return ability_implied_dividends( ability, density=racing_density( loc=0.0 ) ) RACING_L = 500 RACING_UNIT = 0.1 RACING_SCALE = 1.0 RACING_A = 1.0 def make_nan_2000( x ) : """ Longshots """ if pd.isnull( x ): return 2000. else: return x def normalize( p ): """ Naive renormalization of probabilities """ S = sum( p ) return [ pr/S for pr in p ] def prices_from_dividends( dividends ): """ Risk neutral probabilities using naive renormalization """ return normalize( [ 1. / make_nan_2000(x) for x in dividends ] ) def dividends_from_prices( prices ): """ Australian style dividends """ return [ 1./d for d in normalize( prices ) ] def normalize_dividends( dividends ): return dividends_from_prices( prices_from_dividends( dividends )) def racing_density( loc ): """ A rough and ready distribution of performance distributions for one round """ density = skew_normal_density( L=RACING_L, unit=RACING_UNIT, loc=0, scale=RACING_SCALE, a=RACING_A ) return center_density( density ) def dividend_implied_ability( dividends, density ): """ Infer risk-neutral implied_ability from Australian style dividends :param dividends: [ 7.6, 12.0, ... ] :return: [ float ] Implied ability """ state_prices = prices_from_dividends( dividends ) implied_offsets_guess = [ 0 for _ in state_prices] L = len( density )/2 offset_samples = list( xrange( -L/4, L/4 ))[::-1] ability = implied_ability( prices = state_prices, density = density, \ offset_samples = offset_samples, implied_offsets_guess = implied_offsets_guess, nIter = 3) return ability def ability_implied_dividends( ability, density ): """ Return betfair style prices :param ability: :return: [ 7.6, 12.3, ... ] """ state_prices = state_prices_from_offsets( density=density, offsets = ability) return [ 1./sp for sp in state_prices ] def implied_ability( prices, density, offset_samples = None, implied_offsets_guess = None, nIter = 3, verbose = False, visualize = False): """ Finds location translations of a fixed density so as to replicate given state prices for winning """ L = len( density ) if offset_samples is None: offset_samples = list( xrange( -L/4, L/4 ))[::-1] # offset_samples should be descending TODO: add check for this else: _assert_descending( offset_samples ) if implied_offsets_guess is None: implied_offsets_guess = range( len(prices) ) # First guess at densities densities, coefs = densities_and_coefs_from_offsets( density, implied_offsets_guess ) densityAllGuess, multiplicityAllGuess = winner_of_many( densities ) densityAll = densityAllGuess.copy() multiplicityAll = multiplicityAllGuess.copy() guess_prices = [ np.sum( expected_payoff( density, densityAll, multiplicityAll, cdf = None, cdfAll = None)) for density in densities] for _ in xrange( nIter ): if visualize: # temporary hack to check progress of optimization densitiesPlot( [ densityAll] + densities , unit=0.1 ) implied_prices = implicit_state_prices( density=density, densityAll=densityAll, multiplicityAll = multiplicityAll, offsets=offset_samples ) implied_offsets = np.interp( prices, implied_prices, offset_samples ) densities = densities_from_offsets( density, implied_offsets ) densityAll, multiplicityAll = winner_of_many( densities ) guess_prices = [ np.sum(expected_payoff(density, densityAll, multiplicityAll, cdf = None, cdfAll = None)) for density in densities ] approx_prices = [ np.round( pri, 3 ) for pri in prices] approx_guesses = [ np.round( pri, 3 ) for pri in guess_prices] if verbose: print zip( approx_prices, approx_guesses )[:5] return implied_offsets def _assert_descending( xs ): for d in np.diff( xs ): if d>0: raise ValueError("Not descending")
2.515625
3
var/spack/repos/builtin/packages/autoconf/package.py
LiamBindle/spack
2,360
13076
<reponame>LiamBindle/spack # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import re class Autoconf(AutotoolsPackage, GNUMirrorPackage): """Autoconf -- system configuration part of autotools""" homepage = 'https://www.gnu.org/software/autoconf/' gnu_mirror_path = 'autoconf/autoconf-2.69.tar.gz' version('2.71', sha256='431075ad0bf529ef13cb41e9042c542381103e80015686222b8a9d4abef42a1c') version('2.70', sha256='f05f410fda74323ada4bdc4610db37f8dbd556602ba65bc843edb4d4d4a1b2b7') version('2.69', sha256='954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969', preferred=True) version('2.62', sha256='83aa747e6443def0ebd1882509c53f5a2133f502ddefa21b3de141c433914bdd') version('2.59', sha256='9cd05c73c5fcb1f5ccae53dd6cac36bb8cb9c7b3e97ffae5a7c05c72594c88d8') # https://savannah.gnu.org/support/?110396 patch('https://git.savannah.gnu.org/cgit/autoconf.git/patch/?id=05972f49ee632cd98057a3caf82ebfb9574846da', sha256='eaa3f69d927a853313a0b06e2117c51adab6377a2278549b05abc5df93643e16', when='@2.70') # Apply long-time released and already in-use upstream patches to fix test cases: # tests/foreign.at (Libtool): Be tolerant of 'quote' replacing the older `quote' patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-fix-libtool-test.patch', sha256='7793209b33013dc0f81208718c68440c5aae80e7a1c4b8d336e382525af791a7', when='@2.69') # Fix bin/autoscan.in for current perl releases (reported already in January 2013) patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26.patch', sha256='35c449281546376449766f92d49fc121ca50e330e60fefcfc9be2af3253082c2', when='@2.62:2.69 ^perl@5.17:') # Fix bin/autoheader.in for current perl relases not having "." in @INC: patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26-2.patch', sha256='a49dd5bac3b62daa0ff688ab4d508d71dbd2f4f8d7e2a02321926346161bf3ee', when='@2.62:2.69 ^perl@5.17:') # Note: m4 is not a pure build-time dependency of autoconf. m4 is # needed when autoconf runs, not only when autoconf is built. depends_on('m4@1.4.6:', type=('build', 'run')) depends_on('perl', type=('build', 'run')) build_directory = 'spack-build' tags = ['build-tools'] executables = [ '^autoconf$', '^autoheader$', '^autom4te$', '^autoreconf$', '^autoscan$', '^autoupdate$', '^ifnames$' ] @classmethod def determine_version(cls, exe): output = Executable(exe)('--version', output=str, error=str) match = re.search(r'\(GNU Autoconf\)\s+(\S+)', output) return match.group(1) if match else None def patch(self): # The full perl shebang might be too long; we have to fix this here # because autom4te is called during the build patched_file = 'bin/autom4te.in' # We save and restore the modification timestamp of the file to prevent # regeneration of the respective man page: with keep_modification_time(patched_file): filter_file('^#! @PERL@ -w', '#! /usr/bin/env perl', patched_file) if self.version == Version('2.62'): # skip help2man for patched autoheader.in and autoscan.in touch('man/autoheader.1') touch('man/autoscan.1') # make installcheck would execute the testsuite a 2nd time, skip it def installcheck(self): pass @run_after('install') def filter_sbang(self): # We have to do this after install because otherwise the install # target will try to rebuild the binaries (filter_file updates the # timestamps) # Revert sbang, so Spack's sbang hook can fix it up filter_file('^#! /usr/bin/env perl', '#! {0} -w'.format(self.spec['perl'].command.path), self.prefix.bin.autom4te, backup=False) def _make_executable(self, name): return Executable(join_path(self.prefix.bin, name)) def setup_dependent_package(self, module, dependent_spec): # Autoconf is very likely to be a build dependency, # so we add the tools it provides to the dependent module executables = ['autoconf', 'autoheader', 'autom4te', 'autoreconf', 'autoscan', 'autoupdate', 'ifnames'] for name in executables: setattr(module, name, self._make_executable(name))
1.328125
1
src/wallet/web/schemas/categories.py
clayman-micro/wallet
2
13077
<gh_stars>1-10 from aiohttp_micro.web.handlers.openapi import PayloadSchema, ResponseSchema from marshmallow import fields, post_load, Schema from wallet.core.entities.categories import CategoryFilters from wallet.web.schemas.abc import CollectionFiltersSchema class CategorySchema(Schema): key = fields.Int(required=True, data_key="id", description="Category id") name = fields.Str(required=True, description="Category name") class CategoriesResponseSchema(ResponseSchema): """Categories list.""" categories = fields.List(fields.Nested(CategorySchema), required=True, description="Categories") class CategoriesFilterSchema(CollectionFiltersSchema): """Filter categories list.""" @post_load def make_payload(self, data, **kwargs): return CategoryFilters(user=self.context["user"]) class ManageCategoryPayloadSchema(PayloadSchema): """Add new category.""" name = fields.Str(required=True, description="Category name") class CategoryResponseSchema(ResponseSchema): """Get category info.""" category = fields.Nested(CategorySchema, required=True)
2.1875
2
scons_gbd_docs/Gbd/Docs/SConscript.py
ASoftTech/Scons.Gbd.Docs
0
13078
<filename>scons_gbd_docs/Gbd/Docs/SConscript.py SConscript('Mkdocs/Common/SConscript.py') SConscript('Pandoc/Common/SConscript.py') SConscript('Doxygen/Common/SConscript.py')
1.226563
1
seg/segmentor/tools/module_runner.py
Frank-Abagnal/HRFormer
254
13079
<gh_stars>100-1000 #!/usr/bin/env python # -*- coding:utf-8 -*- # Author: <NAME>(<EMAIL>) # Some methods used by main methods. from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os from collections import OrderedDict import torch import torch.nn as nn from torch.nn.parallel.scatter_gather import gather as torch_gather from lib.extensions.parallel.data_parallel import DataParallelModel from lib.utils.tools.logger import Logger as Log from lib.utils.distributed import get_rank, is_distributed class ModuleRunner(object): def __init__(self, configer): self.configer = configer self._init() def _init(self): self.configer.add(['iters'], 0) self.configer.add(['last_iters'], 0) self.configer.add(['epoch'], 0) self.configer.add(['last_epoch'], 0) self.configer.add(['max_performance'], 0.0) self.configer.add(['performance'], 0.0) self.configer.add(['min_val_loss'], 9999.0) self.configer.add(['val_loss'], 9999.0) if not self.configer.exists('network', 'bn_type'): self.configer.add(['network', 'bn_type'], 'torchbn') # if self.configer.get('phase') == 'train': # assert len(self.configer.get('gpu')) > 1 or self.configer.get('network', 'bn_type') == 'torchbn' Log.info('BN Type is {}.'.format(self.configer.get('network', 'bn_type'))) def to_device(self, *params, force_list=False): if is_distributed(): device = torch.device('cuda:{}'.format(get_rank())) else: device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') return_list = list() for i in range(len(params)): return_list.append(params[i].to(device)) if force_list: return return_list else: return return_list[0] if len(params) == 1 else return_list def _make_parallel(self, net): if is_distributed(): local_rank = get_rank() return torch.nn.parallel.DistributedDataParallel( net, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True ) if len(self.configer.get('gpu')) == 1: self.configer.update(['network', 'gathered'], True) return DataParallelModel(net, gather_=self.configer.get('network', 'gathered')) def load_net(self, net): net = self.to_device(net) net = self._make_parallel(net) if not is_distributed(): net = net.to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')) net.float() if self.configer.get('network', 'resume') is not None: Log.info('Loading checkpoint from {}...'.format(self.configer.get('network', 'resume'))) resume_dict = torch.load(self.configer.get('network', 'resume')) if 'state_dict' in resume_dict: checkpoint_dict = resume_dict['state_dict'] elif 'model' in resume_dict: checkpoint_dict = resume_dict['model'] elif isinstance(resume_dict, OrderedDict): checkpoint_dict = resume_dict else: raise RuntimeError( 'No state_dict found in checkpoint file {}'.format(self.configer.get('network', 'resume'))) if list(checkpoint_dict.keys())[0].startswith('module.'): checkpoint_dict = {k[7:]: v for k, v in checkpoint_dict.items()} # load state_dict if hasattr(net, 'module'): self.load_state_dict(net.module, checkpoint_dict, self.configer.get('network', 'resume_strict')) else: self.load_state_dict(net, checkpoint_dict, self.configer.get('network', 'resume_strict')) if self.configer.get('network', 'resume_continue'): self.configer.resume(resume_dict['config_dict']) return net @staticmethod def load_state_dict(module, state_dict, strict=False): """Load state_dict to a module. This method is modified from :meth:`torch.nn.Module.load_state_dict`. Default value for ``strict`` is set to ``False`` and the message for param mismatch will be shown even if strict is False. Args: module (Module): Module that receives the state_dict. state_dict (OrderedDict): Weights. strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. """ unexpected_keys = [] own_state = module.state_dict() for name, param in state_dict.items(): if name not in own_state: unexpected_keys.append(name) continue if isinstance(param, torch.nn.Parameter): # backwards compatibility for serialized parameters param = param.data try: own_state[name].copy_(param) except Exception: Log.warn('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) missing_keys = set(own_state.keys()) - set(state_dict.keys()) err_msg = [] if unexpected_keys: err_msg.append('unexpected key in source state_dict: {}\n'.format(', '.join(unexpected_keys))) if missing_keys: # we comment this to fine-tune the models with some missing keys. err_msg.append('missing keys in source state_dict: {}\n'.format(', '.join(missing_keys))) err_msg = '\n'.join(err_msg) if err_msg: if strict: raise RuntimeError(err_msg) else: Log.warn(err_msg) def save_net(self, net, save_mode='iters'): if is_distributed() and get_rank() != 0: return state = { 'config_dict': self.configer.to_dict(), 'state_dict': net.state_dict(), } if self.configer.get('checkpoints', 'checkpoints_root') is None: checkpoints_dir = os.path.join(self.configer.get('project_dir'), self.configer.get('checkpoints', 'checkpoints_dir')) else: checkpoints_dir = os.path.join(self.configer.get('checkpoints', 'checkpoints_root'), self.configer.get('checkpoints', 'checkpoints_dir')) if not os.path.exists(checkpoints_dir): os.makedirs(checkpoints_dir) latest_name = '{}_latest.pth'.format(self.configer.get('checkpoints', 'checkpoints_name')) torch.save(state, os.path.join(checkpoints_dir, latest_name)) if save_mode == 'performance': if self.configer.get('performance') > self.configer.get('max_performance'): latest_name = '{}_max_performance.pth'.format(self.configer.get('checkpoints', 'checkpoints_name')) torch.save(state, os.path.join(checkpoints_dir, latest_name)) self.configer.update(['max_performance'], self.configer.get('performance')) elif save_mode == 'val_loss': if self.configer.get('val_loss') < self.configer.get('min_val_loss'): latest_name = '{}_min_loss.pth'.format(self.configer.get('checkpoints', 'checkpoints_name')) torch.save(state, os.path.join(checkpoints_dir, latest_name)) self.configer.update(['min_val_loss'], self.configer.get('val_loss')) elif save_mode == 'iters': if self.configer.get('iters') - self.configer.get('last_iters') >= \ self.configer.get('checkpoints', 'save_iters'): latest_name = '{}_iters{}.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'), self.configer.get('iters')) torch.save(state, os.path.join(checkpoints_dir, latest_name)) self.configer.update(['last_iters'], self.configer.get('iters')) elif save_mode == 'epoch': if self.configer.get('epoch') - self.configer.get('last_epoch') >= \ self.configer.get('checkpoints', 'save_epoch'): latest_name = '{}_epoch{}.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'), self.configer.get('epoch')) torch.save(state, os.path.join(checkpoints_dir, latest_name)) self.configer.update(['last_epoch'], self.configer.get('epoch')) else: Log.error('Metric: {} is invalid.'.format(save_mode)) exit(1) def freeze_bn(self, net, syncbn=False): for m in net.modules(): if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): m.eval() if syncbn: from lib.extensions import BatchNorm2d, BatchNorm1d if isinstance(m, BatchNorm2d) or isinstance(m, BatchNorm1d): m.eval() def clip_grad(self, model, max_grad=10.): """Computes a gradient clipping coefficient based on gradient norm.""" total_norm = 0 for p in model.parameters(): if p.requires_grad: modulenorm = p.grad.data.norm() total_norm += modulenorm ** 2 total_norm = math.sqrt(total_norm) norm = max_grad / max(total_norm, max_grad) for p in model.parameters(): if p.requires_grad: p.grad.mul_(norm) def gather(self, outputs, target_device=None, dim=0): r""" Gathers tensors from different GPUs on a specified device (-1 means the CPU). """ if not self.configer.get('network', 'gathered'): if target_device is None: target_device = list(range(torch.cuda.device_count()))[0] return torch_gather(outputs, target_device, dim=dim) else: return outputs def get_lr(self, optimizer): return [param_group['lr'] for param_group in optimizer.param_groups] def warm_lr(self, iters, scheduler, optimizer, backbone_list=(0, )): """Sets the learning rate # Adapted from PyTorch Imagenet example: # https://github.com/pytorch/examples/blob/master/imagenet/main.py """ if not self.configer.exists('lr', 'is_warm') or not self.configer.get('lr', 'is_warm'): return warm_iters = self.configer.get('lr', 'warm')['warm_iters'] if iters < warm_iters: if self.configer.get('lr', 'warm')['freeze_backbone']: for backbone_index in backbone_list: optimizer.param_groups[backbone_index]['lr'] = 0.0 else: lr_ratio = (self.configer.get('iters') + 1) / warm_iters base_lr_list = scheduler.get_lr() for backbone_index in backbone_list: optimizer.param_groups[backbone_index]['lr'] = base_lr_list[backbone_index] * (lr_ratio ** 4)
2.015625
2
scripts/si_figs.py
gbirzu/density-dependent_dispersal_growth
0
13080
<filename>scripts/si_figs.py import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import pickle import scipy.stats as stats data_path = '../data/het_average.dat' output_dir = '../figures/' # Configure matplotlib environment helvetica_scale_factor = 0.92 # rescale Helvetica to other fonts of same size mpl.rcParams['font.size'] = 10 * helvetica_scale_factor mpl.rcParams['font.family'] = 'sans-serif' mpl.rcParams['font.sans-serif'] = 'Helvetica Neue' mpl.rcParams['axes.titlesize'] = 12 * helvetica_scale_factor single_col_width = 3.43 # = 8.7 cm double_col_width = 7.01 # = 17.8 cm def plot_het_comparison(het_averages): time = het_averages['time'] het_global = het_averages['global'] het_local = het_averages['local'] fig = plt.figure(figsize=(single_col_width, single_col_width)) ax = fig.add_subplot(111) ax.set_xlabel('time, t', fontweight='bold') ax.set_ylabel('heterozygosity, H', fontweight='bold') ax.set_yscale('log') ax.plot(time, het_global, ls='-', lw=2, c='k') ax.plot(time, het_local, ls='', marker='o', markevery=5, markersize=5, markeredgecolor='r', markerfacecolor='none') plt.tight_layout() plt.savefig(output_dir + 'het_comparison.pdf') def fit_Ne(het_averages, averaging='global'): time = het_averages['time'] het = het_averages[averaging] slope, intercept, rvalue, pvalue, stderr = stats.linregress(time, np.log(het)) return 1 / abs(slope) if __name__ == '__main__': with open(data_path, 'rb') as f_in: het_averages = pickle.load(f_in) plot_het_comparison(het_averages) ne_global = fit_Ne(het_averages, averaging='global') ne_local = fit_Ne(het_averages, averaging='local') print('Ne (global averaging): ', ne_global) print('Ne (local averaging): ', ne_local) print('Ne difference: ', 100 * (ne_global - ne_local) / ne_global, '%')
2.59375
3
language.py
sanine-a/dream-atlas
0
13081
<gh_stars>0 from random import random, choice, seed, shuffle, randint from math import ceil import copy target = [ 2, 2, 3, 1, 4, 5 ] consonants_base = [ 'p', 't', 'k', 'm', 'n' ] vowels = [ [ 'a', 'i', 'u' ], [ 'a', 'i', 'u', 'e', 'o' ], [ 'a', 'A', 'i', 'I', 'u', 'U', 'e', 'E', 'o', 'O' ] ] consonants_extra = [ 'b', 'd', 'j', 's', 'z', 'y', 'q', 'G', '?', 'N', 'r', 'f', 'v', 'T', 'D', 'S', 'Z', 'x', 'h', 'w', 'l', 'C' ] sibilants = [ ['s',], [ 's', 'S' ], ['s', 'S', 'f'] ] liquids = [ ['r'], ['l'], ['r','l'], ['w','y'], ['r','l','w','y'] ] orthography1 = { 'name':'nordic', 'j':'dz', 'y':'j', 'T':'th', 'D':'ð', 'S':'sh', 'Z':'zh', 'N':'ng', '?':"'", 'G':'q', 'C':'ch', 'A':'å', 'E':'ë', 'I':'ï', 'O':'ö', 'U':'ü' } orthography2 = { 'name':'czech', 'T':'th', 'D':'th', 'S':'š', 'Z':'ž', 'C':'č', 'G':'q', 'N':'ng', '?':'-', 'A':'á', 'E':'ě', 'I':'ý', 'O':'ó', 'U':'ú' } orthography3 = { 'name':'french', 'T':'th', 'D':'th', 'S':'ch', 'G':'gh', 'C':'tc', '?':"'", 'N':'ng', 'Z':'z', 'k':'c', 'A':'â', 'E':'ê', 'I':'î', 'O':'ô', 'U':'û' } orthography4 = { 'name':'mexica', 'k':'c', 'G':'gh', 'N':'ng', 'T':'th', 'D':'th', 'S':'x', 'C':'ch', '?':"'", 'Z':'zh', 'A':'á', 'E':'é', 'I':'í', 'O':'ó', 'U':'ú' } orthographies = ( orthography1, orthography2, orthography3, orthography4 ) syllables = ( [ 'CV', ], [ 'CV', 'V' ], [ 'CV', 'CVC' ], [ 'CV', 'CVC', 'V' ], [ 'CVC', ], [ 'CVC', 'CRVC', 'CV', 'CRV' ], [ 'CVC', 'CRVC', 'CVRC', 'CV', 'CRV' ], [ 'CVC', 'CRVC', 'CVCC', 'CRVCC', 'CV', 'CRV' ], [ 'CVC', 'CRVC', 'CVRC', 'CVCC', 'CRVCC', 'CV', 'CRV' ], [ 'CV', 'CVC', 'SCV', 'SCVC' ], [ 'CVC', 'CVCC', 'SVC', 'SVCC', 'CV', 'SCV' ], [ 'CVC', 'CVCC', 'CRVC', 'SCVC', 'SCRVC', 'CV', 'CRV', 'SCV', 'SCRV' ] ) government = [ 'Republic of ', 'Kingdom of ', 'Confederacy of ', 'Satrapy of ','Empire of ' ] class morpheme: def __init__(self,morpheme,prefix): self.morpheme = morpheme self.prefix = prefix def elem(obj, items): for item in items: if item == obj: return True return False def biased_choice(items, bias=2): i = int( random()**bias * len(items) ) return items[i] class language: def __init__(self): # get phonemes self.phonemes = {} self.phonemes['V'] = choice(vowels) shuffle(self.phonemes['V']) self.phonemes['R'] = choice(liquids) self.phonemes['S'] = choice(sibilants) more_consonants = [] for i in range(0, int(random()*len(consonants_extra))): c = choice(consonants_extra) if elem(c,more_consonants): break else: more_consonants.append(c) #shuffle(more_consonants) self.phonemes['C'] = consonants_base + more_consonants shuffle(self.phonemes['C']) #get syllables, orthography, and word length self.syllables = choice(syllables) self.orthography = choice(orthographies) self.orthography[';'] = '' # skip syllable separators self.wordtarget = biased_choice(target,5) # basic morphemes & words if random() >= 0.3: self.prefix = False else: self.prefix = True self.the = self.syllable() self.of = self.syllable() self.landm = [] for i in range(randint(3,6)): self.landm.append(self.shortword()) self.waterm = [] for i in range(randint(3,6)): self.waterm.append(self.shortword()) self.citym = [] for i in range(randint(3,6)): self.citym.append(self.shortword()) def derive(self): derived = copy.deepcopy(self) if random() > 0.7: shuffle(derived.syllables) lm = 0 wm = 0 cm = 0 the = False of = False if random() > 0.5: for i in range(randint(1,4)): c = choice(derived.phonemes['C']) if not elem(c,consonants_base): derived.phonemes['C'].remove(c) if elem(c,derived.the): the = True if elem(c,derived.of): of = True for m in derived.landm: if elem(c,m): derived.landm.remove(m) lm += 1 for m in derived.waterm: if elem(c,m): derived.waterm.remove(m) wm += 1 for m in derived.citym: if elem(c,m): derived.citym.remove(m) cm += 1 if random() > 0.5: for i in range(randint(1,4)): index = randint(5,len(derived.phonemes['C'])) derived.phonemes['C'].insert(index,choice(consonants_extra)) if the: derived.the = derived.syllable() if of: derived.of = derived.syllable() for i in range(lm): derived.landm.append(derived.shortword()) for i in range(wm): derived.waterm.append(derived.shortword()) for i in range(cm): derived.citym.append(derived.shortword()) return derived def orthographic(self,string): outstring = "" for c in string: try: outstring += self.orthography[c] except KeyError: outstring += c return outstring def syllable(self): syl = "" stype = biased_choice(self.syllables) for letter in stype: try: syl = syl+biased_choice(self.phonemes[letter]) except KeyError: break return syl+';' def word(self,short=False): w = "" N = randint(ceil(.5*self.wordtarget),ceil(1.5*self.wordtarget)) if short and N >= 2: N -= 1 for i in range(N): w = w+self.syllable() return w def shortword(self): sw = "" for i in range(randint(1,ceil(self.wordtarget))): sw += self.syllable() return sw def gen_name(self,morph): if random() < 0.1: return self.word() + ' ' + self.of + ' ' + self.word() if random() < 0.1: if self.prefix: return self.word() + ' ' + self.the else: return self.the + ' ' + self.word() m = '' if random() > 0.5: m = choice(morph) w = self.word(bool(m)) if self.prefix: return m + w else: return w + m def cityname(self): return self.gen_name(self.citym) def landname(self): return self.gen_name(self.landm) def watername(self): return self.gen_name(self.waterm) def countryname(self): if random() > 0.7: return choice(government) + self.orthographic(self.landname()).title() else: return self.orthographic(self.landname()).title() ''' lang1 = language() for j in range(10): print('Language '+str(j+1)) for i in range(5): word = lang1.cityname() print(lang1.orthographic(word).title()) lang1 = lang1.derive() print(' ') '''
2.5625
3
week/templatetags/sidebar_data.py
uno-isqa-8950/fitgirl-inc
6
13082
from django import template from week.models import SidebarContentPage,SidebarImagePage register = template.Library() @register.inclusion_tag('week/announcement.html') def sidebar(): sidebar_data = SidebarContentPage.objects.get() return {'sidebar_data':sidebar_data} @register.inclusion_tag('week/advertisement.html') def sidebarimage(): sidebar_image = SidebarImagePage.objects.get() return {'sidebar_image':sidebar_image}
1.71875
2
tests/functional/Hydro/AcousticWave/CSPH_mod_package.py
jmikeowen/Spheral
22
13083
#------------------------------------------------------------------------------- # A mock physics package to mess around with the CRKSPH corrections. #------------------------------------------------------------------------------- from Spheral1d import * class CRKSPH_mod_package(Physics): def __init__(self): Physics.__init__(self) return def evaluateDerivatives(self, t, dt, db, state, derivs): return def dt(self, db, state, derivs, t): return pair_double_string(1e100, "No vote") def registerState(self, dt, state): return def registerDerivatives(self, db, derivs): return def label(self): return "CRKSPH_mod_package" def initialize(self, t, dt, db, state, derivs): # Grab the CRKSPH arrays. A0_fl = state.scalarFields(HydroFieldNames.A0_CRKSPH) A_fl = state.scalarFields(HydroFieldNames.A_CRKSPH) B_fl = state.vectorFields(HydroFieldNames.B_CRKSPH) A0 = A0_fl[0] A = A_fl[0] B = B_fl[0] print "A", A.internalValues() return
2.25
2
fbm-scraper.py
cbdelavenne/fb-messenger-media-scraper
8
13084
import os import requests import time import uuid import configparser import datetime import fbchat import re from fbchat import Client, ImageAttachment from fbchat import FBchatException from pathlib import Path politeness_index = 0.5 # ;) epoch = datetime.datetime(1970, 1, 1) # Hack to get the login to work, see: https://github.com/fbchat-dev/fbchat/issues/615#issuecomment-716089816 fbchat._state.FB_DTSG_REGEX = re.compile(r'"name":"fb_dtsg","value":"(.*?)"') def download_file_from_url(url, target_path): """ Download image from a given URL to a specified target path. :param url: URL of file to download :param target_path: Local target path to save the file :type url: str :type target_path: str """ if url is not None: r = requests.get(url) with open(target_path, 'wb') as f: print('\tDownloading image to {path}'.format(path=target_path)) f.write(r.content) def convert_date_to_epoch(date, as_int=True): """ Convert a given date string to epoch (int in milliseconds) :param date: Date string (preferred format %Y-%m-%d) :param as_int: Return unix timestamp as an integer value, instead of a float :type date: str :type as_int: int :return: int """ try: dt = datetime.datetime.strptime(date, '%Y-%m-%d') res = ((dt - epoch).total_seconds() * 1000.0) # convert to milliseconds return int(res) if as_int else res except ValueError: return None def convert_epoch_to_datetime(timestamp, dt_format='%Y-%m-%d_%H.%M.%S'): """ Convert epoch (unix time in ms) to a datetime string :param timestamp: Unix time in ms :param dt_format: Format of datetime string :type timestamp: str :type dt_format: str :return: """ s = int(timestamp) / 1000.0 dt_str = datetime.datetime.fromtimestamp(s).strftime(dt_format) return dt_str if __name__ == '__main__': config_path = Path('.') / 'config.ini' if os.path.exists(config_path) is False: raise Exception("Please create config.ini under this script's current directory") # Load config file config = configparser.ConfigParser() config.read(config_path) download_path = config.get('Download', 'path') if os.path.exists(download_path) is False: raise Exception("The path specified in download_path does not exist ({path}). Please specify a valid path in " "config.ini".format(path=download_path)) # Initialize FB Client fb_email = config.get('Credentials', 'email') fb_pw = config.get('Credentials', 'password') user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36" fb_client = Client(fb_email, fb_pw, user_agent=user_agent) # Search for latest threads thread_search_limit = int(config.get('Threads', 'search_limit')) thread_search_before = convert_date_to_epoch(config.get('Threads', 'before_date')) if thread_search_before is not None: threads = fb_client.fetchThreadList(limit=thread_search_limit, before=thread_search_before) else: threads = fb_client.fetchThreadList(limit=thread_search_limit) # Find correct thread for given user URL my_thread = None friend_url = config.get('Friend', 'url') for thread in threads: if hasattr(thread, 'url') and (thread.url == friend_url): my_thread = thread break # Get Messages for my_thread if my_thread is not None: thread_message_count = my_thread.message_count thread_message_name = my_thread.name print('Found {count} messages in thread with {friend_name}'.format(count=thread_message_count, friend_name=thread_message_name)) message_before_date = config.get('Messages', 'before_date') message_search_limit = int(config.get('Messages', 'search_limit')) message_search_before = convert_date_to_epoch(message_before_date) if message_search_limit > thread_message_count: message_search_limit = thread_message_count print('\tWarning: Message search limit was greater than the total number of messages in thread.\n') if message_search_before is not None: messages = fb_client.fetchThreadMessages(my_thread.uid, limit=message_search_limit, before=message_search_before) print('Searching for images in the {message_limit} messages sent before {before_date}...'.format( message_limit=message_search_limit, before_date=message_before_date)) else: messages = fb_client.fetchThreadMessages(my_thread.uid, limit=message_search_limit) print('Searching for images in the last {message_limit} messages...'.format( message_limit=message_search_limit)) sender_id = None if config.getboolean('Media', 'sender_only'): sender_id = my_thread.uid print('\tNote: Only images sent by {friend_name} will be downloaded (as specified by sender_only in your ' 'config.ini)'.format(friend_name=thread_message_name)) # Extract Image attachments' full-sized image signed URLs (along with their original file extension) total_count = 0 skip_count = 0 full_images = [] last_message_date = None print('\n') extension_blacklist = str.split(config.get('Media', 'ext_blacklist'), ',') for message in messages: message_datetime = convert_epoch_to_datetime(message.timestamp) if len(message.attachments) > 0: if (sender_id is None) or (sender_id == message.author): for attachment in message.attachments: if isinstance(attachment, ImageAttachment): try: attachment_ext = str.lower(attachment.original_extension) if attachment_ext not in extension_blacklist: full_images.append({ 'extension': attachment_ext, 'timestamp': message_datetime, 'full_url': fb_client.fetchImageUrl(attachment.uid) }) print('+', sep=' ', end='', flush=True) else: skip_count += 1 print('-', sep=' ', end='', flush=True) total_count += 1 except FBchatException: pass # ignore errors last_message_date = message_datetime # Download Full Images if len(full_images) > 0: images_count = len(full_images) print('\n\nFound a total of {total_count} images. Skipped {skip_count} images that had a blacklisted ' 'extension'.format(total_count=total_count, skip_count=skip_count)) print('Attempting to download {count} images...................\n'.format(count=images_count)) for full_image in full_images: friend_name = str.lower(my_thread.name).replace(' ', '_') file_uid = str(uuid.uuid4()) file_ext = full_image['extension'] file_timestamp = full_image['timestamp'] img_url = full_image['full_url'] image_path = ''.join([download_path, '\\', 'fb-image-', file_uid, '-', friend_name, '-', file_timestamp, '.', file_ext]) download_file_from_url(img_url, image_path) # Sleep half a second between file downloads to avoid getting flagged as a bot time.sleep(politeness_index) else: print('No images to download in the last {count} messages'.format(count=message_search_limit)) # Reminder of last message found print('\nLast message scanned for image attachments was dated: {last_message_date}'.format( last_message_date=last_message_date)) else: print('Thread not found for URL provided')
2.75
3
guru/users/models.py
Jeromeschmidt/Guru
0
13085
from django.contrib.auth.models import AbstractUser from django.db.models import (BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField) from django.contrib.postgres.fields import ArrayField from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User class User(AbstractUser): # First Name and Last Name do not cover name patterns # around the globe. name = CharField(_("Name of User"), blank=True, max_length=255) # is_customer = BooleanField(default=True) # # user = OneToOneField(User, on_delete=CASCADE, primary_key=True) skills = ArrayField(CharField(max_length=10, blank=True), size=8, null=True, ) # ArrayField(_("A list of skills that user can help with"), null=True, # base_field=CharField(max_length=255)) classes_taken = ArrayField(null=True, base_field=CharField(max_length=255), size=20) is_teachingassistant = BooleanField(default=False) rating = IntegerField(null=True, blank=True) avg_reponse = FloatField(null=True, blank=True) is_online = BooleanField(default=False) messages_received = IntegerField(null=True, blank=True) bio = CharField(blank=True, max_length=500) def get_absolute_url(self): return reverse("users:detail", kwargs={"username": self.username})
2.34375
2
movie.py
jmclinn/mapdraw
0
13086
import os,time ## File Variable (USER INPUT) ## ========================== ## if multiple files are being accessed to create movie... ## ...specify the beginning and ending of the file names... ## ...and the date list text file in the variables below ## Please use True or False to set whether multiple files will be accessed for movie file_is_variable = False ## If file_is_variable = True ## -------------------------- ## make sure to leave trailing slash '/' on 'path_to_files' path_to_files = '/path/to/files/' ## For series of files with similar prefixes (file_part1) and filetypes (file_part2) file_part1 = 'pre.fixes.' file_part2 = '.nc' ## location of file listing (with each entry on a new line) the variable part of the filename dates_list_text_file = '/path/to/file/variable_list.txt' ## If file_is_variable = False ## --------------------------- #file = '/path/to/single/file.nc' file = '/Users/Jon/Documents/other_projects/Aluie/visuals/1-12/mapdraw/sgs.nc' ## Variables (USER INPUT) ## ====================== ## all variable lists must be the same length ## set unused variables equal to '_empty_' ## if variable requires double-quotes on command line include them --> '" ... "' ## ----------------------------------------------------------------------------- data = 'sgsflux' #cannot be '_empty_' lat = 'u_lat' #cannot be '_empty_' lon = 'u_lon' #cannot be '_empty_' depth = 'w_dep,9' #cannot be '_empty_' mask = '-1e33,#000000' maxr = '100' #use for 'max' minr = '-100' #use for 'min' norm = '_empty_' colors = '"0:#0000AA,45:#0000FF,50:#FFFFFF,55:#FF0000,100:#AA0000"' clr_min_max = '_empty_' title = '_empty_' crop = '_empty_' lines = '_empty_' ## Sphere (for mapping onto Earth's spherical representation) ## ---------------------------------------------------------- ## For use of 'sphere' set to True. If not leave False. sphere_mapping = False ## Number of images (must match other variable list lengths from above) sphere_frames = 3 ## Start and stop points of sphere rotation (leave start/stop the same for no rotation in lat/lon) sphere_lon_start = -10 sphere_lon_stop = 10 sphere_lat_start = -10 sphere_lat_stop = 10 ## 'zoom' argument described in README file (leave False if zoom = 1) zoom = 1.5 ## Primary Variable (USER INPUT) ## ============================= ## choose from the variables above ## specify without quotes ## if not a list will only output single result ## -------------------------------------------- primary_variable = file ## Save Location (USER INPUT) ## ========================== ## provide folder location (without filename(s)) ## --------------------------------------------- save = '/Users/Jon/Desktop/' ## Image Filename Prefix (USER INPUT) ## ================================== ## prefix for output filenames before auto-incremented counter ## ----------------------------------------------------------- file_prefix = 'img_' ## Image Counter Start (USER INPUT) ## ================================ ## start of auto-incremented counter ## --------------------------------- count_start = 0 ## Image File Type (USER INPUT) ## ============================ ## ex: '.png' or '.jpg' ## -------------------- img_type = '.png' ## Display Toggle (USER INPUT) ## ========================== ## toggle if each image displays in the loop ## use 'yes' or 'no' to control display preference ## ----------------------------------------------- display = 'no' # # # # # # # # # # # # # # # # # # # # # # # # # # ---- NO USER INPUTS AFTER THIS POINT ---- # # # # # # # # # # # # # # # # # # # # # # # # # # ## If 'file' is variable this establishes list of files to loop through (Do Not Alter) ## =================================================================================== if file_is_variable: file1 = [] file0 = open(dates_list_text_file,'r').read().splitlines() for line in file0: file1.append(str(path_to_files) + str(file_part1) + str(line) + str(file_part2)) file = file1 primary_variable = file ## Parsing of 'sphere' rotation inputs (Do Not Alter) ## ================================================== if sphere_mapping: lon_step = ( sphere_lon_stop - sphere_lon_start ) / ( sphere_frames - 1 ) lat_step = ( sphere_lat_stop - sphere_lat_start ) / ( sphere_frames - 1 ) sphere = [] for i in range(sphere_frames): sphere.append(str(sphere_lon_start + lon_step * i)+','+str(sphere_lat_start + lat_step * i)) primary_variable = sphere ## Defining & Executing Command Expression (Do Not Alter) ## ====================================================== displayx = 'display ' + display command = displayx if title != '_empty_': titlex = ' title ' + str(title) command = command + titlex if lines != '_empty_': linesx = ' lines ' + str(lines) command = command + linesx if type(primary_variable) is list: loop_len = len(primary_variable) else: loop_len = 1 for i in range(loop_len): savex = ' save ' + str(save) + str(file_prefix) + str(i + int(count_start)) + str(img_type) command = command + savex if type(file) is list: filei = file[i] else: filei = file if i != '_empty_': filex = ' file ' + str(filei) command = command + filex if type(data) is list: datai = data[i] else: datai = data if datai != '_empty_': datax = ' data ' + str(datai) command = command + datax if type(lat) is list: lati = lat[i] else: lati = lat if lati != '_empty_': latx = ' lat ' + str(lati) command = command + latx if type(lon) is list: loni = lon[i] else: loni = lon if loni != '_empty_': lonx = ' lon ' + str(loni) command = command + lonx if type(depth) is list: depthi = depth[i] else: depthi = depth if depthi != '_empty_': depthx = ' depth ' + str(depthi) command = command + depthx if type(mask) is list: maski = mask[i] else: maski = mask if maski != '_empty_': maskx = ' mask ' + str(maski) command = command + maskx if type(maxr) is list: maxri = maxr[i] else: maxri = maxr if maxri != '_empty_': maxrx = ' max ' + str(maxri) command = command + maxrx if type(minr) is list: minri = minr[i] else: minri = minr if minri != '_empty_': minrx = ' min ' + str(minri) command = command + minrx if type(norm) is list: normi = norm[i] else: normi = norm if normi != '_empty_': normx = ' norm ' + str(normi) command = command + normx if type(crop) is list: cropi = crop[i] else: cropi = crop if cropi != '_empty_': cropx = ' crop ' + str(cropi) command = command + cropx if type(colors) is list: colorsi = colors[i] else: colorsi = colors if colorsi != '_empty_': colorsx = ' colors ' + str(colorsi) command = command + colorsx if type(clr_min_max) is list: clr_min_maxi = clr_min_max[i] else: clr_min_maxi = clr_min_max if clr_min_maxi != '_empty_': clr_min_maxx = ' clr_min_max ' + str(clr_min_maxi) command = command + clr_min_maxx if sphere_mapping: spherei = sphere[i] spherex = ' sphere ' + str(spherei) command = command + spherex if type(zoom) is list: zoomi = zoom[i] elif zoom: zoomi = zoom if zoom: zoomx = ' zoom ' + str(zoomi) command = command + zoomx time0 = time.time() os.system('python map.py ' + command) if display == 'no': print str(i) + ' - ' + str(round((time.time() - time0),2)) + ' sec'
3.140625
3
gaetk2/tools/auth0tools.py
mdornseif/appengine-toolkit2
1
13087
#!/usr/bin/env python # -*- coding: utf-8 -*- """gaetk2.tools.auth0.py Tools for working with auth0 Created by <NAME> on 2017-12-05. Copyright 2017 HUDROA. MIT Licensed. """ from __future__ import unicode_literals import logging from google.appengine.api import memcache from auth0.v3.authentication import GetToken from auth0.v3.exceptions import Auth0Error from auth0.v3.management import Auth0 from gaetk2.config import gaetkconfig logger = logging.getLogger(__name__) def get_auth0_access_token(): """Get a Token for the Management-API.""" ret = memcache.get('get_auth0_access_token()') if not ret: assert gaetkconfig.AUTH0_DOMAIN != '*unset*' assert gaetkconfig.AUTH0_CLIENT_ID != '*unset*' get_token = GetToken(gaetkconfig.AUTH0_DOMAIN) token = get_token.client_credentials( gaetkconfig.AUTH0_CLIENT_ID, gaetkconfig.AUTH0_CLIENT_SECRET, 'https://{}/api/v2/'.format(gaetkconfig.AUTH0_DOMAIN)) ret = token['access_token'] memcache.set('get_auth0_access_token()', ret, token['expires_in'] / 2) return ret def create_from_credential(credential): """Create an entry in the Auth0.DefaultDatabase for a credential.""" if credential.external_uid: return if not credential.secret: return if not credential.email: return if not getattr(credential, 'name', None): credential.name = credential.text if not getattr(credential, 'name', None): credential.name = credential.org_designator auth0api = Auth0(gaetkconfig.AUTH0_DOMAIN, get_auth0_access_token()) payload = { 'connection': 'DefaultDatabase', 'email': credential.email, 'password': <PASSWORD>.<PASSWORD>, 'user_id': credential.uid, 'user_metadata': { 'name': credential.name, 'nickname': 'User fuer {}'.format(credential.org_designator) }, 'email_verified': True, 'verify_email': False, 'app_metadata': { 'org_designator': credential.org_designator, 'permissions': credential.permissions, } } newuser = None try: newuser = auth0api.users.create(payload) except Auth0Error as ex: if ex.status_code in [400, 409] and ex.message == 'The user already exists.': logger.info('The user already exists: %s %r %s', credential.uid, ex, payload) try: newuser = auth0api.users.get('auth0|{}'.format(credential.uid)) except: logger.warn('email collision? %s', credential.uid) # propbably we have an E-Mail Address collision. This means # several Credentials with the same E-Mail Adresses. reply = auth0api.users.list( connection='DefaultDatabase', q='email:"{}"'.format(credential.email), search_engine='v2') if reply['length'] > 0: logger.info('reply=%s', reply) other_uid = reply['users'][0]['user_id'] newuser = auth0api.users.get(other_uid) # doppelbelegung bei Auth0 notieren if newuser.get('app_metadata'): logger.debug('app_metadata=%r', newuser['app_metadata']) altd = newuser['app_metadata'].get('org_designator_alt', []) altd = list(set(altd + [credential.org_designator])) altu = newuser['app_metadata'].get('uid_alt', []) altu = list(set(altu + [credential.uid])) logger.warn('updating duplicate Auth0 %s %s %s %s', altd, altu, other_uid, newuser) auth0api.users.update( other_uid, {'app_metadata': {'org_designator_alt': altd, 'uid_alt': altu}}) else: logger.error('%r newuser = %s %s', 'auth0|{}'.format(credential.uid), newuser, ex) raise except: logger.warn('payload = %s', payload) raise if newuser is None or (newuser.get('error')): logger.warn('reply=%s payload = %s', newuser, payload) raise RuntimeError('Auth0-Fehler: %s' % newuser) logger.info('new auth0 user %s', newuser) credential.meta['auth0_user_id'] = credential.external_uid = newuser['user_id'] credential.put() return
2.0625
2
Q56MergeIntervals.py
ChenliangLi205/LeetCode
0
13088
# Definition for an interval. # class Interval: # def __init__(self, s=0, e=0): # self.start = s # self.end = e class Solution: def merge(self, intervals): """ :type intervals: List[Interval] :rtype: List[Interval] """ if len(intervals) <= 1: return intervals intervals.sort(key=lambda x: x.start) newIntervals = [intervals[0]] for i in range(1, len(intervals)): cur = intervals[i] last = newIntervals[-1] if cur.start > last.end: newIntervals.append(cur) else: last.end = max(cur.end, last.end) return newIntervals
3.640625
4
.github/scripts/check-status.py
antmicro/f4pga-arch-defs
0
13089
#!/usr/bin/env python3 from sys import argv from pathlib import Path from re import compile as re_compile PACKAGE_RE = re_compile("symbiflow-arch-defs-([a-zA-Z0-9_-]+)-([a-z0-9])") with (Path(__file__).parent.parent.parent / 'packages.list').open('r') as rptr: for artifact in rptr.read().splitlines(): m = PACKAGE_RE.match(artifact) assert m, f"Package name not recognized! {artifact}" package_name = m.group(1) if package_name == "install": package_name == "toolchain" with (Path("install") / f"symbiflow-{package_name}-latest").open("w") as wptr: wptr.write( 'https://storage.googleapis.com/symbiflow-arch-defs/artifacts/prod/' f'foss-fpga-tools/symbiflow-arch-defs/continuous/install/{argv[1]}/{artifact}' )
2.09375
2
DocOCR/urls.py
trangnm58/DocOCR
0
13090
from django.conf.urls import url, include urlpatterns = [ url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api/viet_ocr/', include('viet_ocr.api.urls', namespace="viet_ocr-api")), url(r'^api/post_process/', include('post_process.api.urls', namespace="post_process-api")), url(r'^api/pre_process/', include('pre_process.api.urls', namespace="pre_process-api")), url(r'^api/doc_ocr/', include('doc_ocr.api.urls', namespace="doc_ocr-api")), ]
1.617188
2
utils/neuron/models/metrics/multi_task_metrics.py
tsingqguo/ABA
12
13091
import torch import torch.nn as nn import neuron.ops as ops from neuron.config import registry @registry.register_module class ReID_Metric(nn.Module): def __init__(self, metric_cls, metric_rank): super(ReID_Metric, self).__init__() self.metric_cls = metric_cls self.metric_rank = metric_rank def forward(self, *args): if len(args) == 2: scores = None feats, labels = args elif len(args) == 3: scores, feats, labels = args else: raise ValueError('Expected to have 2 or 3 inputs,' 'but got {}'.format(len(args))) metrics = self.metric_rank(feats, labels) if scores is not None: metrics.update(self.metric_cls(scores, labels)) return metrics
2.28125
2
lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-synthval/myIntSynthProvider.py
medismailben/llvm-project
2,338
13092
<gh_stars>1000+ class myIntSynthProvider(object): def __init__(self, valobj, dict): self.valobj = valobj self.val = self.valobj.GetChildMemberWithName("theValue") def num_children(self): return 0 def get_child_at_index(self, index): return None def get_child_index(self, name): return None def update(self): return False def has_children(self): return False def get_value(self): return self.val class myArraySynthProvider(object): def __init__(self, valobj, dict): self.valobj = valobj self.array = self.valobj.GetChildMemberWithName("array") def num_children(self, max_count): if 16 < max_count: return 16 return max_count def get_child_at_index(self, index): return None # Keep it simple when this is not tested here. def get_child_index(self, name): return None # Keep it simple when this is not tested here. def has_children(self): return True
3
3
incremental-update.py
tarasowski/apache-spark
1
13093
<reponame>tarasowski/apache-spark from pyspark.sql import SparkSession from pyspark.sql.types import DateType from pyspark.sql.functions import col from pyspark.sql import types as t import sys from pyspark.sql.window import Window from pyspark.sql.functions import spark_partition_id from pyspark.sql import Row def show_partition_id(df): return df.select(*df.columns, spark_partition_id().alias("partition_id")).show() spark = SparkSession \ .builder \ .appName("Python Spark SQL basic example") \ .config("spark.some.config.option", "some-value") \ .getOrCreate() # https://dwbi.org/pages/75/methods-of-incremental-loading-in-data-warehouse customers = [ Row(1, "John", "Individual", "22-Mar-2012"), Row(2, "Ryan", "Individual", "22-Mar-2012"), Row(3, "Bakers", "Corporate", "23-Mar-2012"), ] sales = [ Row(1, 1, "White sheet (A4)", 100, 4.00, "22-Mar-2012"), Row(2, 1, "<NAME> (Box)", 1, 2.50, "22-Mar-2012"), Row(3, 2, "Whiteboard Maker", 1, 2.00, "22-Mar-2012"), Row(4, 3, "Letter Envelop", 200, 75.00, "23-Mar-2012"), Row(5, 1, "<NAME>", 12, 4.00, "23-Mar-2012"), ] batch = [ Row(1, "22-Mar-2012", "Success"), ] customersDF = spark.createDataFrame(customers, schema=["customer_id", "customer_name", "type", "entry_date"]) salesDF = spark.createDataFrame(sales, schema=["id", "customer_id", "product_description", "qty", "revenue", "sales_date"]) batchDF = spark.createDataFrame(batch, schema=["batch_id", "loaded_untill", "status"]) customersDF.createOrReplaceTempView("customers") salesDF.createOrReplaceTempView("sales") batchDF.createOrReplaceTempView("batch") _23_march_customers = spark.sql(""" select t.* from customers t where t.entry_date > (select nvl( max(b.loaded_untill), to_date("01-01-1900", "MM-DD-YYYY") ) from batch b where b.status = "Success") """) _23_march_sales = spark.sql(""" select t.* from sales t where t.sales_date > (select nvl( max(b.loaded_untill), to_date("01-01-1900", "MM-DD-YYYY") ) from batch b where b.status = "Success") """) print("customers table") _23_march_customers.show() print("sales table") _23_march_sales.show() # Incremental Data Load Patterns # https://www.youtube.com/watch?v=INuucWEg3sY # 1) Stage / left Outer Join (moving to another server, make a staging and left join, check null on right table, you know this data is new) # 2) Control Table # Load | Cust | Table | Date # Id | Table | Id | Date # 3) Change Data Capture # Source based incremental loading # https://support.timextender.com/hc/en-us/articles/115001301963-How-incremental-loading-works # The source table have a reliable natural or surrogate key and reliable incremental field such as "ModifiedDateTime" or "TimeStamp"
2.984375
3
asset/admin.py
shoaibsaikat/Django-Office-Management-BackEnd
0
13094
from django.contrib import admin from .models import Asset # Register your models here. admin.site.register(Asset)
1.273438
1
instagram_api/response/send_confirm_email.py
Yuego/instagram_api
13
13095
<gh_stars>10-100 from .mapper import ApiResponse, ApiResponseInterface from .mapper.types import Timestamp, AnyType __all__ = ['SendConfirmEmailResponse'] class SendConfirmEmailResponseInterface(ApiResponseInterface): title: AnyType is_email_legit: AnyType body: AnyType class SendConfirmEmailResponse(ApiResponse, SendConfirmEmailResponseInterface): pass
1.75
2
webpages/views.py
18praneeth/udayagiri-scl-maxo
8
13096
<filename>webpages/views.py from django.shortcuts import render, redirect from django.contrib import messages from .models import Contact from django.contrib.auth.decorators import login_required def home(request): if request.user.is_authenticated: return render(request, 'webpages/home.html') else: return render(request, 'webpages/index.html') def about(request): return render(request, 'webpages/about.html') @login_required def team(request): return render(request, 'webpages/team.html') @login_required def privacy(request): return render(request, 'webpages/privacy.html') @login_required def license(request): return render(request, 'webpages/license.html') @login_required def contact(request): if request.POST: name = request.POST['name'] email = request.POST['email'] subject = request.POST['subject'] comment = request.POST['message'] message = Contact() message.name = name message.email = email message.subject = subject message.comments = comment message.save() messages.success(request, 'Your response is recorded') return redirect('contact') else: return render(request, 'webpages/contact.html',{})
2.0625
2
src/pywbemReq/tupletree.py
sinbawang/smisarray
2
13097
<gh_stars>1-10 # # (C) Copyright 2003,2004 Hewlett-Packard Development Company, L.P. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # Author: <NAME> <<EMAIL>> # """ tupletree - Convert XML DOM objects to and from tuple trees. DOM is the standard in-memory representation of XML documents, but it is very cumbersome for some types of processing where XML encodes object structures rather than text documents. Direct mapping to Python classes may not be a good match either. tupletrees may be created from an in-memory DOM using dom_to_tupletree(), or from a string using xml_to_tupletree(). Since the Python XML libraries deal mostly with Unicode strings they are also returned here. If plain Strings are passed in they will be converted by xmldom. Each node of the tuple tree is a Python 4-tuple, corresponding to an XML Element (i.e. <tag>): (NAME, ATTRS, CONTENTS, None) The NAME is the name of the element. The ATTRS are a name-value hash of element attributes. The CONTENTS is a list of child elements. The fourth element is reserved. """ import xml.dom.minidom from pywbemReq.cim_types import is_text __all__ = ['dom_to_tupletree', 'xml_to_tupletree'] def dom_to_tupletree(node): """Convert a DOM object to a pyRXP-style tuple tree. Each element is a 4-tuple of (NAME, ATTRS, CONTENTS, None). Very nice for processing complex nested trees. """ if node.nodeType == node.DOCUMENT_NODE: # boring; pop down one level return dom_to_tupletree(node.firstChild) assert node.nodeType == node.ELEMENT_NODE name = node.nodeName attrs = {} contents = [] for child in node.childNodes: if child.nodeType == child.ELEMENT_NODE: contents.append(dom_to_tupletree(child)) elif child.nodeType == child.TEXT_NODE: assert is_text(child.nodeValue), \ "text node %s is not a string" % repr(child) contents.append(child.nodeValue) elif child.nodeType == child.CDATA_SECTION_NODE: contents.append(child.nodeValue) else: raise RuntimeError("can't handle %s" % child) for i in range(node.attributes.length): attr_node = node.attributes.item(i) attrs[attr_node.nodeName] = attr_node.nodeValue # XXX: Cannot yet handle comments, cdata, processing instructions and # other XML batshit. # it's so easy in retrospect! return name, attrs, contents, None def xml_to_tupletree(xml_string): """Parse XML straight into tupletree.""" dom_xml = xml.dom.minidom.parseString(xml_string) return dom_to_tupletree(dom_xml)
2.6875
3
src/Word.py
AlexandreLadriere/ColorfulWords
0
13098
#!/usr/bin/env python3* import unicodedata class Word: """ Object representation for a word Parameters ---------- text : str word text formatedText : str word text without accent, punctuation, etc (UTF-8) color : List of integers pixel color values in rgb for the word - eg: [0, 255, 56] """ def __init__(self, text): """ Initialize a Word object with the given string Parameters ---------- text : str word text """ self.text = text self.formatedText = self.__formatText() @property def color(self): """ Return a list of 3 values (RGB) corresponding to the color representation of the word """ alpha = "abcdefghijklmnopqrstuvwxyz" # alpha[1] = "b" alphaPos = dict([ (x[1],x[0]) for x in enumerate(alpha) ]) # alphaPos["b"] = 1 colorValue = 0 for letter in self.formatedText: if letter.isdigit(): colorValue += int(letter) else: colorValue += alphaPos[letter.lower()] return [(colorValue * len(self.formatedText)) % 256, (colorValue * 2) % 256, (colorValue * 3 % 256)] def __formatText(self): """ Return the formated word """ uniText = ''.join(e for e in self.text if e.isalnum()) # remove punctuation uniText = ''.join(c for c in unicodedata.normalize('NFD', uniText) if unicodedata.category(c) != 'Mn') # Remove accents and other special letter chars uniText = uniText.replace("œ", "oe") uniText = uniText.replace("ª", "a") return uniText
3.890625
4
LeetCode/Python3/DynamicProgramming/123. Best Time to Buy and Sell Stock III.py
WatsonWangZh/CodingPractice
11
13099
<gh_stars>10-100 # Say you have an array for which the ith element is the price of a given stock on day i. # Design an algorithm to find the maximum profit. You may complete at most two transactions. # Note: You may not engage in multiple transactions at the same time # (i.e., you must sell the stock before you buy again). # Example 1: # Input: [3,3,5,0,0,3,1,4] # Output: 6 # Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3. # Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 = 3. # Example 2: # Input: [1,2,3,4,5] # Output: 4 # Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4. # Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are # engaging multiple transactions at the same time. You must sell before buying again. # Example 3: # Input: [7,6,4,3,1] # Output: 0 # Explanation: In this case, no transaction is done, i.e. max profit = 0. class Solution(object): def maxProfit(self, prices): """ :type prices: List[int] :rtype: int """ # M1. 两轮贪心,一个从前往后,一个从后往前。 # 首先,从前往后遍历,保留最小值buy → 记录截止第i天(包含第i天)的maxProfit; # 然后,从后往前遍历,保留最大值sell → 记录第i天之后(不包含第i天)的maxProfit。 # 注意 - 可能只交易1次,所以保留遍历一趟后profit的值。 # if not prices: # return 0 # # Record min-buy # profits = [0] # buy, profit = prices[0], 0 # for price in prices[1:]: # buy = min(buy, price) # profit = max(profit, price-buy) # profits.append(profit) # # Record max-sell - Note remember the value of profit # sell = prices[-1] # temp = 0 # for i in range(len(prices)-1, 0, -1): # sell = max(sell, prices[i]) # temp = max(temp, sell - prices[i]) # profit = max(profit, temp + profits[i-1]) # return profit # M2. DP # 第i天有4种状态:第一笔交易买入状态最大收益buy1和第一笔交易卖出状态最大收益sell1,第二笔交易买入状态最大收益buy2和第二笔交易卖出状态最大收益sell2。 # 则有下列状态方程: # sell2[i] = max(sell2[i-1], buy2[i-1] + prices[i]) # buy2[i] = max(buy2[i-1], sell1[i-1] - prices[i]) # sell1[i] = max(sell1[i-1], buy1[i-1] + prices[i]) # buy1[i] = max(buy1[i-1], - prices[i]) buy1 = buy2 = float('-inf') sell1 = sell2 = 0 for price in prices: buy1 = max(buy1, -price) sell1 = max(sell1, buy1 + price) buy2 = max(buy2, sell1 - price) sell2 = max(sell2, buy2 + price) return sell2
3.90625
4