content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# -*- coding: utf-8 -*- from django.db import models from apps.registro.models.TipoDomicilio import TipoDomicilio from apps.registro.models.Localidad import Localidad from apps.registro.models.Establecimiento import Establecimiento from django.core.exceptions import ValidationError from apps.seguridad.audit import audit
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 6725, 13, 2301, 396, 305, 13, 27530, 13, 28434, 78, 35, 10179, 346, 952, 1330, 23095, 78, 35, 10179, 346, 952, 198...
2.990741
108
# 6.5 Write code using find() and string slicing (see section 6.10) to extract # the number at the end of the line below. # Convert the extracted value to a floating point number and print it out. text = "X-DSPAM-Confidence: 0.8475"; pos = text.find(':') text = float(text[pos+1:]) print text
[ 2, 718, 13, 20, 19430, 2438, 1262, 1064, 3419, 290, 4731, 49289, 357, 3826, 2665, 718, 13, 940, 8, 284, 7925, 220, 198, 2, 262, 1271, 379, 262, 886, 286, 262, 1627, 2174, 13, 220, 198, 2, 38240, 262, 21242, 1988, 284, 257, 12462, ...
3.040404
99
import logging logger = logging.getLogger(__name__) print(f"!!!!!!!!!! getEffectiveLevel: {logger.getEffectiveLevel()} !!!!!!!!!!!!!") from dltb.base.observer import Observable, change from network import Network, loader from network.lucid import Network as LucidNetwork # lucid.modelzoo.vision_models: # A module providinge the pretrained networks by name, e.g. # models.AlexNet import lucid.modelzoo.vision_models as models import lucid.modelzoo.nets_factory as nets from lucid.modelzoo.vision_base import Model as LucidModel import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform # FIXME[old]: this is too make old code happy. New code should use # Engine.Change and Engine.Observer directly. EngineChange = Engine.Change EngineObserver = Engine.Observer
[ 11748, 18931, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 198, 4798, 7, 69, 1, 34635, 3228, 651, 44831, 4971, 25, 1391, 6404, 1362, 13, 1136, 44831, 4971, 3419, 92, 220, 34635, 13896, 2474, 8, 198, 198,...
3.421875
256
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import logging from collections import Counter as c_counter, OrderedDict, deque, namedtuple from functools import wraps from six import iteritems, text_type from six.moves import range from canonicaljson import json from prometheus_client import Counter, Histogram from twisted.internet import defer import synapse.metrics from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.metrics import BucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.state import StateResolutionStore from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore from synapse.storage.state import StateGroupWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util import batch_iter from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable from synapse.util.logutils import log_function from synapse.util.metrics import Measure logger = logging.getLogger(__name__) persist_event_counter = Counter("synapse_storage_events_persisted_events", "") event_counter = Counter( "synapse_storage_events_persisted_events_sep", "", ["type", "origin_type", "origin_entity"], ) # The number of times we are recalculating the current state state_delta_counter = Counter("synapse_storage_events_state_delta", "") # The number of times we are recalculating state when there is only a # single forward extremity state_delta_single_event_counter = Counter( "synapse_storage_events_state_delta_single_event", "" ) # The number of times we are reculating state when we could have resonably # calculated the delta when we calculated the state for an event we were # persisting. state_delta_reuse_delta_counter = Counter( "synapse_storage_events_state_delta_reuse_delta", "" ) # The number of forward extremities for each new event. forward_extremities_counter = Histogram( "synapse_storage_events_forward_extremities_persisted", "Number of forward extremities for each new event", buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"), ) # The number of stale forward extremities for each new event. Stale extremities # are those that were in the previous set of extremities as well as the new. stale_forward_extremities_counter = Histogram( "synapse_storage_events_stale_forward_extremities_persisted", "Number of unchanged forward extremities for each new event", buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"), ) def encode_json(json_object): """ Encode a Python object as JSON and return it in a Unicode string. """ out = frozendict_json_encoder.encode(json_object) if isinstance(out, bytes): out = out.decode("utf8") return out _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) def _retry_on_integrity_error(func): """Wraps a database function so that it gets retried on IntegrityError, with `delete_existing=True` passed in. Args: func: function that returns a Deferred and accepts a `delete_existing` arg """ return f # inherits from EventFederationStore so that we can call _update_backward_extremities # and _handle_mult_prev_events (though arguably those could both be moved in here) def _update_current_state_txn(self, txn, state_delta_by_room, stream_id): for room_id, current_state_tuple in iteritems(state_delta_by_room): to_delete, to_insert = current_state_tuple # First we add entries to the current_state_delta_stream. We # do this before updating the current_state_events table so # that we can use it to calculate the `prev_event_id`. (This # allows us to not have to pull out the existing state # unnecessarily). # # The stream_id for the update is chosen to be the minimum of the stream_ids # for the batch of the events that we are persisting; that means we do not # end up in a situation where workers see events before the # current_state_delta updates. # sql = """ INSERT INTO current_state_delta_stream (stream_id, room_id, type, state_key, event_id, prev_event_id) SELECT ?, ?, ?, ?, ?, ( SELECT event_id FROM current_state_events WHERE room_id = ? AND type = ? AND state_key = ? ) """ txn.executemany( sql, ( ( stream_id, room_id, etype, state_key, None, room_id, etype, state_key, ) for etype, state_key in to_delete # We sanity check that we're deleting rather than updating if (etype, state_key) not in to_insert ), ) txn.executemany( sql, ( ( stream_id, room_id, etype, state_key, ev_id, room_id, etype, state_key, ) for (etype, state_key), ev_id in iteritems(to_insert) ), ) # Now we actually update the current_state_events table txn.executemany( "DELETE FROM current_state_events" " WHERE room_id = ? AND type = ? AND state_key = ?", ( (room_id, etype, state_key) for etype, state_key in itertools.chain(to_delete, to_insert) ), ) self._simple_insert_many_txn( txn, table="current_state_events", values=[ { "event_id": ev_id, "room_id": room_id, "type": key[0], "state_key": key[1], } for key, ev_id in iteritems(to_insert) ], ) txn.call_after( self._curr_state_delta_stream_cache.entity_has_changed, room_id, stream_id, ) # Invalidate the various caches # Figure out the changes of membership to invalidate the # `get_rooms_for_user` cache. # We find out which membership events we may have deleted # and which we have added, then we invlidate the caches for all # those users. members_changed = set( state_key for ev_type, state_key in itertools.chain(to_delete, to_insert) if ev_type == EventTypes.Member ) for member in members_changed: txn.call_after( self.get_rooms_for_user_with_stream_ordering.invalidate, (member,) ) self._invalidate_state_caches_and_stream(txn, room_id, members_changed) def _update_room_depths_txn(self, txn, events_and_contexts, backfilled): """Update min_depth for each room Args: txn (twisted.enterprise.adbapi.Connection): db connection events_and_contexts (list[(EventBase, EventContext)]): events we are persisting backfilled (bool): True if the events were backfilled """ depth_updates = {} for event, context in events_and_contexts: # Remove the any existing cache entries for the event_ids txn.call_after(self._invalidate_get_event_cache, event.event_id) if not backfilled: txn.call_after( self._events_stream_cache.entity_has_changed, event.room_id, event.internal_metadata.stream_ordering, ) if not event.internal_metadata.is_outlier() and not context.rejected: depth_updates[event.room_id] = max( event.depth, depth_updates.get(event.room_id, event.depth) ) for room_id, depth in iteritems(depth_updates): self._update_min_depth_for_room_txn(txn, room_id, depth) def _update_outliers_txn(self, txn, events_and_contexts): """Update any outliers with new event info. This turns outliers into ex-outliers (unless the new event was rejected). Args: txn (twisted.enterprise.adbapi.Connection): db connection events_and_contexts (list[(EventBase, EventContext)]): events we are persisting Returns: list[(EventBase, EventContext)] new list, without events which are already in the events table. """ txn.execute( "SELECT event_id, outlier FROM events WHERE event_id in (%s)" % (",".join(["?"] * len(events_and_contexts)),), [event.event_id for event, _ in events_and_contexts], ) have_persisted = {event_id: outlier for event_id, outlier in txn} to_remove = set() for event, context in events_and_contexts: if event.event_id not in have_persisted: continue to_remove.add(event) if context.rejected: # If the event is rejected then we don't care if the event # was an outlier or not. continue outlier_persisted = have_persisted[event.event_id] if not event.internal_metadata.is_outlier() and outlier_persisted: # We received a copy of an event that we had already stored as # an outlier in the database. We now have some state at that # so we need to update the state_groups table with that state. # insert into event_to_state_groups. try: self._store_event_state_mappings_txn(txn, ((event, context),)) except Exception: logger.exception("") raise metadata_json = encode_json(event.internal_metadata.get_dict()) sql = ( "UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?" ) txn.execute(sql, (metadata_json, event.event_id)) # Add an entry to the ex_outlier_stream table to replicate the # change in outlier status to our workers. stream_order = event.internal_metadata.stream_ordering state_group_id = context.state_group self._simple_insert_txn( txn, table="ex_outlier_stream", values={ "event_stream_ordering": stream_order, "event_id": event.event_id, "state_group": state_group_id, }, ) sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?" txn.execute(sql, (False, event.event_id)) # Update the event_backward_extremities table now that this # event isn't an outlier any more. self._update_backward_extremeties(txn, [event]) return [ec for ec in events_and_contexts if ec[0] not in to_remove] def _store_event_txn(self, txn, events_and_contexts): """Insert new events into the event and event_json tables Args: txn (twisted.enterprise.adbapi.Connection): db connection events_and_contexts (list[(EventBase, EventContext)]): events we are persisting """ if not events_and_contexts: # nothing to do here return self._simple_insert_many_txn( txn, table="event_json", values=[ { "event_id": event.event_id, "room_id": event.room_id, "internal_metadata": encode_json( event.internal_metadata.get_dict() ), "json": encode_json(event_dict(event)), "format_version": event.format_version, } for event, _ in events_and_contexts ], ) self._simple_insert_many_txn( txn, table="events", values=[ { "stream_ordering": event.internal_metadata.stream_ordering, "topological_ordering": event.depth, "depth": event.depth, "event_id": event.event_id, "room_id": event.room_id, "type": event.type, "processed": True, "outlier": event.internal_metadata.is_outlier(), "origin_server_ts": int(event.origin_server_ts), "received_ts": self._clock.time_msec(), "sender": event.sender, "contains_url": ( "url" in event.content and isinstance(event.content["url"], text_type) ), } for event, _ in events_and_contexts ], ) def _store_rejected_events_txn(self, txn, events_and_contexts): """Add rows to the 'rejections' table for received events which were rejected Args: txn (twisted.enterprise.adbapi.Connection): db connection events_and_contexts (list[(EventBase, EventContext)]): events we are persisting Returns: list[(EventBase, EventContext)] new list, without the rejected events. """ # Remove the rejected events from the list now that we've added them # to the events table and the events_json table. to_remove = set() for event, context in events_and_contexts: if context.rejected: # Insert the event_id into the rejections table self._store_rejections_txn(txn, event.event_id, context.rejected) to_remove.add(event) return [ec for ec in events_and_contexts if ec[0] not in to_remove] def _update_metadata_tables_txn( self, txn, events_and_contexts, all_events_and_contexts, backfilled ): """Update all the miscellaneous tables for new events Args: txn (twisted.enterprise.adbapi.Connection): db connection events_and_contexts (list[(EventBase, EventContext)]): events we are persisting all_events_and_contexts (list[(EventBase, EventContext)]): all events that we were going to persist. This includes events we've already persisted, etc, that wouldn't appear in events_and_context. backfilled (bool): True if the events were backfilled """ # Insert all the push actions into the event_push_actions table. self._set_push_actions_for_event_and_users_txn( txn, events_and_contexts=events_and_contexts, all_events_and_contexts=all_events_and_contexts, ) if not events_and_contexts: # nothing to do here return for event, context in events_and_contexts: if event.type == EventTypes.Redaction and event.redacts is not None: # Remove the entries in the event_push_actions table for the # redacted event. self._remove_push_actions_for_event_id_txn( txn, event.room_id, event.redacts ) # Remove from relations table. self._handle_redaction(txn, event.redacts) # Update the event_forward_extremities, event_backward_extremities and # event_edges tables. self._handle_mult_prev_events( txn, events=[event for event, _ in events_and_contexts] ) for event, _ in events_and_contexts: if event.type == EventTypes.Name: # Insert into the room_names and event_search tables. self._store_room_name_txn(txn, event) elif event.type == EventTypes.Topic: # Insert into the topics table and event_search table. self._store_room_topic_txn(txn, event) elif event.type == EventTypes.Message: # Insert into the event_search table. self._store_room_message_txn(txn, event) elif event.type == EventTypes.Redaction: # Insert into the redactions table. self._store_redaction(txn, event) elif event.type == EventTypes.RoomHistoryVisibility: # Insert into the event_search table. self._store_history_visibility_txn(txn, event) elif event.type == EventTypes.GuestAccess: # Insert into the event_search table. self._store_guest_access_txn(txn, event) self._handle_event_relations(txn, event) # Insert into the room_memberships table. self._store_room_members_txn( txn, [ event for event, _ in events_and_contexts if event.type == EventTypes.Member ], backfilled=backfilled, ) # Insert event_reference_hashes table. self._store_event_reference_hashes_txn( txn, [event for event, _ in events_and_contexts] ) state_events_and_contexts = [ ec for ec in events_and_contexts if ec[0].is_state() ] state_values = [] for event, context in state_events_and_contexts: vals = { "event_id": event.event_id, "room_id": event.room_id, "type": event.type, "state_key": event.state_key, } # TODO: How does this work with backfilling? if hasattr(event, "replaces_state"): vals["prev_state"] = event.replaces_state state_values.append(vals) self._simple_insert_many_txn(txn, table="state_events", values=state_values) # Prefill the event cache self._add_to_cache(txn, events_and_contexts) def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" return -self._backfill_id_gen.get_current_token() def get_current_events_token(self): """The current maximum token that events have reached""" return self._stream_id_gen.get_current_token() def get_all_new_forward_event_rows(self, last_id, current_id, limit): if last_id == current_id: return defer.succeed([]) return self.runInteraction( "get_all_new_forward_event_rows", get_all_new_forward_event_rows ) def get_all_new_backfill_event_rows(self, last_id, current_id, limit): if last_id == current_id: return defer.succeed([]) return self.runInteraction( "get_all_new_backfill_event_rows", get_all_new_backfill_event_rows ) def purge_history(self, room_id, token, delete_local_events): """Deletes room history before a certain point Args: room_id (str): token (str): A topological token to delete events before delete_local_events (bool): if True, we will delete local events as well as remote ones (instead of just marking them as outliers and deleting their state groups). """ return self.runInteraction( "purge_history", self._purge_history_txn, room_id, token, delete_local_events, ) def _find_unreferenced_groups_during_purge(self, txn, state_groups): """Used when purging history to figure out which state groups can be deleted and which need to be de-delta'ed (due to one of its prev groups being scheduled for deletion). Args: txn state_groups (set[int]): Set of state groups referenced by events that are going to be deleted. Returns: tuple[set[int], set[int]]: The set of state groups that can be deleted and the set of state groups that need to be de-delta'ed """ # Graph of state group -> previous group graph = {} # Set of events that we have found to be referenced by events referenced_groups = set() # Set of state groups we've already seen state_groups_seen = set(state_groups) # Set of state groups to handle next. next_to_search = set(state_groups) while next_to_search: # We bound size of groups we're looking up at once, to stop the # SQL query getting too big if len(next_to_search) < 100: current_search = next_to_search next_to_search = set() else: current_search = set(itertools.islice(next_to_search, 100)) next_to_search -= current_search # Check if state groups are referenced sql = """ SELECT DISTINCT state_group FROM event_to_state_groups LEFT JOIN events_to_purge AS ep USING (event_id) WHERE state_group IN (%s) AND ep.event_id IS NULL """ % ( ",".join("?" for _ in current_search), ) txn.execute(sql, list(current_search)) referenced = set(sg for sg, in txn) referenced_groups |= referenced # We don't continue iterating up the state group graphs for state # groups that are referenced. current_search -= referenced rows = self._simple_select_many_txn( txn, table="state_group_edges", column="prev_state_group", iterable=current_search, keyvalues={}, retcols=("prev_state_group", "state_group"), ) prevs = set(row["state_group"] for row in rows) # We don't bother re-handling groups we've already seen prevs -= state_groups_seen next_to_search |= prevs state_groups_seen |= prevs for row in rows: # Note: Each state group can have at most one prev group graph[row["state_group"]] = row["prev_state_group"] to_delete = state_groups_seen - referenced_groups to_dedelta = set() for sg in referenced_groups: prev_sg = graph.get(sg) if prev_sg and prev_sg in to_delete: to_dedelta.add(sg) return to_delete, to_dedelta def get_all_updated_current_state_deltas(self, from_token, to_token, limit): return self.runInteraction( "get_all_updated_current_state_deltas", get_all_updated_current_state_deltas_txn, ) AllNewEventsResult = namedtuple( "AllNewEventsResult", [ "new_forward_events", "new_backfill_events", "forward_ex_outliers", "backward_ex_outliers", ], )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 1946, 12, 5304, 4946, 27470, 12052, 198, 2, 15069, 2864, 12, 23344, 968, 20650, 12052, 198, 2, 15069, 13130, 383, 24936, 13, 2398, 5693, 327, 13, 40, 13, 34,...
2.118965
11,827
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Metrics support manager.""" import logging from buildtool import in_memory_metrics from buildtool import prometheus_metrics from buildtool import stackdriver_metrics from buildtool.util import add_parser_argument
[ 2, 15069, 2177, 3012, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, ...
3.899522
209
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) import os import subprocess from twitter.common import log from pants.backend.android.targets.android_binary import AndroidBinary from pants.backend.android.targets.android_resources import AndroidResources from pants.backend.android.tasks.aapt_task import AaptTask from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnit from pants.util.dirutil import safe_mkdir
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 1946, 41689, 1628, 20420, 357, 3826, 27342, 9865, 3843, 20673, 13, 9132, 737, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 3826, 38559, 24290, 737, 198, 198, 6738, 1...
3.327434
226
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file contains a class which acts as a wrapper around the PPR algorithm. This class has the following functionality: 1. Load the KB graph, 2. Given list of seed entities, get topk entities from PPR. 3. Get unique facts between all extracted entities. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor from fat.fat_bert_nq.ppr.kb_csr_io import CsrData flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_bool( 'verbose_logging', False, 'If true, all of the warnings related to data processing will be printed. ' 'A number of warnings are expected for a normal NQ evaluation.')
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 12131, 383, 3012, 4992, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, ...
3.408019
424
import os import sys import time from IPython.display import Image import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb sb.set_style("dark") #### Initial Setup #### #plant info plant_info = pd.read_csv('../data/plant_data.csv') plant_info.index.name = 'plant_index' plants = plant_info.name.to_numpy() plant_index = plant_info.index.to_numpy() num_plants = len(plants) plant_sun_req = plant_info.sun.to_numpy() perennials = plant_info[plant_info.perennial==1].index.to_list() problem_plants = plant_info[plant_info.problem_plant==1].index.to_list() #calculate weighted average preference for each plant family = ['evan','gina','liesse','lizzie','jack'] plant_info['avg_pref'] = np.average(plant_info[family],axis=1,weights=[.5,.5,0,0,0]) plant_info.drop(family,axis=1,inplace=True) preferences = plant_info.avg_pref.to_numpy() #bed info bed_info = pd.read_csv('../data/bed_data.csv') bed_info.index.name = 'bed_index' beds = bed_info.bed.to_numpy() bed_index = bed_info.index.to_numpy() bed_sun_req = bed_info.sun.to_numpy() num_beds = len(beds) #time dimension num_years = 3 years = np.array(range(1,num_years+1)) year_index = np.array(range(num_years)) #for keeping track of what axis is which plant_axis = 0 bed_axis = 1 year_axis = 2 ##### Constraints ##### #initialize sun constraint. 1 where plant can feasibly be planted in bed. 0 where sun requirements do not match. sun_constraint = np.ones(shape=(num_plants,num_beds,num_years)) for p in plant_index: for b in bed_index: p_sun = plant_sun_req[p] b_sun = bed_sun_req[b] if p_sun != b_sun: sun_constraint[p,b,:] = 0 def enforce_sun_constraint(plan,sun_constraint): """ Force plan to be 0 where sun requirements for plant and bed do not match. """ return plan*sun_constraint def enforce_perennial_constraint(plan,plant,bed,year,perennials): """Forward fill plan for perennial plants. If 1 in a given bed/year, it will be 1 in same bed thereafter.""" perennial_plan = plan.copy() #what was planted the year before plant_last_year = perennial_plan[:,bed,year-1].argmax() #if the plant is a perennial, plant it this year and every year thereafter if plant in perennials: perennial_plan[:,bed,year:] = 0 # zeros out anything else that may have been planted in bed in current and subsequent years during a previous make_neighbor call perennial_plan[plant,bed,year:] = 1 #sets plant to 1 in bed every year after the current year #if what was planted already in this bed was a perennial, remove it from previous years elif plant_last_year in perennials: perennial_plan[plant_last_year,bed,:year] = 0 return perennial_plan def enforce_disease_constraint(plan,problem_plants): """Creates a mask to determine if the same veg was planted in the same bed over multiple years. Multiplies the plan for problem plants by 0 in subsequent years where we planned to put them in the same bed """ disease_plan = plan.copy() #mask to determine cases where same thing was planted in the same bed yoy same_veg_in_bed_yoy = disease_plan.cumsum(axis=year_axis)>1 #multiply plan for specific problem plants by 0 disease_plan[problem_plants] = disease_plan[problem_plants]*(abs(1-same_veg_in_bed_yoy)[problem_plants]) return disease_plan ##### Objectives ##### #the most satisfied you could be (planting fruit or vegetable with highest preference in all beds every year) max_yums = num_beds*num_years*np.max(preferences) def compute_yummy_score(plan,preferences,max_yums): """Takes the weighted average of the preferences of each plant, weighted by the total qty of plants in the current plan for each plant. Maximization encourages plants with higher preferences to be planted in higher quantities.""" plan_yummy = plan.copy() plan_by_plant = plan_yummy.sum(axis=(bed_axis,year_axis)) yums = round(np.dot(preferences,plan_by_plant)/max_yums*100,1) return yums def compute_variety_score(plan,num_plants): """Sums the number of unique plants that are actually planted in the garden. Counts the number of plants that are being planted across all beds. Then counts the number of plants with non-zero planting plan. Maximization encourages more unique plants to be planted.""" plan_variety = plan.copy() num_plants_in_plan = (plan_variety.sum(axis=(bed_axis,year_axis)) > 0).sum() variety_score = round(num_plants_in_plan/num_plants*100,1) return variety_score #### Analysis & Visualization ####
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 640, 198, 198, 6738, 6101, 7535, 13, 13812, 1330, 7412, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67,...
2.832108
1,632
import warnings from collections import OrderedDict from distutils.version import LooseVersion from functools import partial from inspect import isclass from typing import Callable, Optional, Dict, Union import numpy as np import torch import tqdm from torch import Tensor, nn from torch.nn import functional as F from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor: """ Generates one random target in (num_classes - 1) possibilities for each label that is different from the original label. Parameters ---------- labels: Tensor Original labels. Generated targets will be different from labels. num_classes: int Number of classes to generate the random targets from. Returns ------- targets: Tensor Random target for each label. Has the same shape as labels. """ random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float) random.scatter_(1, labels.unsqueeze(-1), 0) return random.argmax(1) def get_all_targets(labels: Tensor, num_classes: int): """ Generates all possible targets that are different from the original labels. Parameters ---------- labels: Tensor Original labels. Generated targets will be different from labels. num_classes: int Number of classes to generate the random targets from. Returns ------- targets: Tensor Random targets for each label. shape: (len(labels), num_classes - 1). """ all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long) all_classes = set(range(num_classes)) for i in range(len(labels)): this_label = labels[i].item() other_labels = list(all_classes.difference({this_label})) all_possible_targets[i] = torch.tensor(other_labels) return all_possible_targets _default_metrics = OrderedDict([ ('linf', linf_distances), ('l0', l0_distances), ('l1', l1_distances), ('l2', l2_distances), ])
[ 11748, 14601, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 1233, 26791, 13, 9641, 1330, 6706, 577, 14815, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 10104, 1330, 318, 4871, 198, 6738, 19720, 1330, 4889, 540, 11, 32...
2.83917
771
#!/usr/bin/env python # Filename: polygons_cd """ introduction: compare two polygons in to shape file authors: Huang Lingcao email:huanglingcao@gmail.com add time: 26 February, 2020 """ import sys,os from optparse import OptionParser # added path of DeeplabforRS sys.path.insert(0, os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS')) import basic_src.io_function as io_function import basic_src.basic as basic import basic_src.map_projection as map_projection import parameters import polygons_cd_multi import polygons_cd if __name__ == "__main__": usage = "usage: %prog [options] old_shape_file new_shape_file " parser = OptionParser(usage=usage, version="1.0 2020-02-26") parser.description = 'Introduction: compare two groups of polygons ' parser.add_option("-p", "--para", action="store", dest="para_file", help="the parameters file") parser.add_option('-o', '--output', action="store", dest = 'output', help='the path to save the change detection results') (options, args) = parser.parse_args() if len(sys.argv) < 2: parser.print_help() sys.exit(2) # # set parameters files # if options.para_file is None: # print('error, no parameters file') # parser.print_help() # sys.exit(2) # else: # parameters.set_saved_parafile_path(options.para_file) basic.setlogfile('polygons_changeDetection.log') main(options, args)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 7066, 12453, 25, 25052, 684, 62, 10210, 220, 198, 37811, 198, 27427, 596, 25, 8996, 734, 25052, 684, 287, 284, 5485, 2393, 198, 198, 41617, 25, 31663, 25116, 66, 5488, 198, 12888, ...
2.485342
614
import tensorflow as tf # Convert the model. converter = tf.lite.TFLiteConverter.from_saved_model('model.py') tflite_model = converter.convert() open("trash_ai.tflite", "wb").write(tflite_model)
[ 11748, 11192, 273, 11125, 355, 48700, 198, 198, 2, 38240, 262, 2746, 13, 198, 1102, 332, 353, 796, 48700, 13, 36890, 13, 51, 3697, 578, 3103, 332, 353, 13, 6738, 62, 82, 9586, 62, 19849, 10786, 19849, 13, 9078, 11537, 198, 83, 2704,...
2.6
75
from django_cron import CronJobBase, Schedule from .models import Link from django.utils import timezone
[ 6738, 42625, 14208, 62, 66, 1313, 1330, 31683, 33308, 14881, 11, 19281, 198, 6738, 764, 27530, 1330, 7502, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 628 ]
3.785714
28
# coding: utf8 """ weasyprint.tests.stacking ------------------------- :copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import division, unicode_literals from ..stacking import StackingContext from .test_boxes import serialize from .test_layout import parse from .testing_utils import assert_no_logs
[ 2, 19617, 25, 3384, 69, 23, 198, 37811, 198, 220, 220, 220, 356, 4107, 4798, 13, 41989, 13, 301, 5430, 198, 220, 220, 220, 220, 22369, 12, 628, 220, 220, 220, 1058, 22163, 4766, 25, 15069, 2813, 12, 6999, 11288, 35980, 259, 290, 2...
3.288
125
from django.shortcuts import render from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from sesame import utils from django.core.mail import send_mail
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 17594, 62, 35827, 198, 6738, 264...
3.482759
58
from .custom_check import CustomCheck, CustomCheckError from typing import Any, List import logging logger = logging.getLogger(__name__)
[ 6738, 764, 23144, 62, 9122, 1330, 8562, 9787, 11, 8562, 9787, 12331, 198, 6738, 19720, 1330, 4377, 11, 7343, 198, 198, 11748, 18931, 198, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 628, 198 ]
3.525
40
#!/usr/bin/python2.4 # Copyright (C) 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """imports Zurich timetables, converting them from DIVA export format to Google Transit format.""" from __future__ import print_function # This was written before transitfeed.py and we haven't yet found the # motivation to port it. Please see the examples directory for better # examples. try: from io import StringIO as cStringIO except ImportError: import cStringIO import csv import datetime import optparse import sys import urllib import zipfile # Zurich tram lines TRAM_LINES = {'2': ['FF3300', 'FFFFFF'], '3': ['009933', 'FFFFFF'], '4': ['333399', 'FFFFFF'], '5': ['996600', 'FFFFFF'], '6': ['CC9933', 'FFFFFF'], '7': ['000000', 'FFFFFF'], '8': ['99CC00', '000000'], '9': ['333399', 'FFFFFF'], '10': ['FF6699', 'FFFFFF'], '11': ['009933', 'FFFFFF'], '12': ['FFFFFF', '000000'], '13': ['FFCC33', '000000'], '14': ['3399CC', 'FFFFFF'], '15': ['FF3300', 'FFFFFF']} # Terms that indicate points of interest. Used to split station names # to (name, city). POI_TERMS = {'Bahnhof': 1, 'Dorfzentrum': 1, 'Schiffstation': 1, 'Station': 1, u'Zentrum': 1, 'Dorfplatz': 1, 'Zentrum/Bahnhof': 1, 'Dorf': 1} # Maps station names to (name, city). Used as exception list where our # simple heuristcs doesn't work. SPECIAL_NAMES = { 'Freienbach SOB, Bahnhof': ('Freienbach SOB', 'Freienbach'), 'Herrliberg-Feldmeilen,Bhf West': ('Bahnhof West', 'Herrliberg-Feldmeilen'), 'Neue Forch': ('Neue Forch', u'Z\u00fcrich'), 'Oberrieden Dorf Bahnhof': ('Oberrieden Dorf', 'Oberrieden'), 'Spital Zollikerberg': ('Spital', 'Zollikerberg'), 'Triemli': ('Triemli', u'Z\u00fcrich'), 'Zentrum Glatt': ('Zentrum Glatt', 'Wallisellen'), } # Cities whose names we want to prettify/correct at import time. SPECIAL_CITIES = { 'Affoltern a. A.': 'Affoltern am Albis', 'Wangen b. D.': 'Wangen' } def convert_c_h1903(x, y): "Converts coordinates from the 1903 Swiss national grid system to WGS-84." yb = (x - 600000.0) / 1e6; xb = (y - 200000.0) / 1e6; lam = 2.6779094 \ + 4.728982 * yb \ + 0.791484 * yb * xb \ + 0.1306 * yb * xb * xb \ - 0.0436 * yb * yb * yb phi = 16.9023892 \ + 3.238372 * xb \ - 0.270978 * yb * yb \ - 0.002582 * xb * xb \ - 0.0447 * yb * yb * xb \ - 0.0140 * xb * xb * xb return phi * 100.0 / 36.0, lam * 100.0 / 36.0 def encode_for_csv(x): "Encodes one value for CSV." k = x.encode('utf-8') if ',' in k or '"' in k: return '"%s"' % k.replace('"', '""') else: return k def write_row(stream, values): "writes one row of comma-separated values to stream." stream.write(','.join([encode_for_csv(val) for val in values])) stream.write('\n') # https://developers.google.com/transit/gtfs/ TYPE_TRAM = 0 TYPE_BUS = 3 if __name__ == '__main__': main(sys.argv)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 17, 13, 19, 198, 198, 2, 15069, 357, 34, 8, 3648, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, ...
2.268473
1,624
# -*- coding: utf-8 -*- """This module contains classes for documents, and lists of documents. Documents are defined by the document rules in settings.py A file can contain one or more document. However, a document can not be constructed from more than one file. This is a limitation, obvious in cases like Gotlands kommun, where meeting minutes are split up in a large number of files. """ import settings from modules.utils import make_unicode, last_index from modules.extractors.documentBase import ExtractionNotAllowed document_headers = { "Content-Type": "text/plain", "Content-Disposition": "attachment", "Cache-Control": "public" } if __name__ == "__main__": print "This module is only intended to be called from other scripts." import sys sys.exit()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 1212, 8265, 4909, 6097, 329, 4963, 11, 290, 8341, 286, 4963, 13, 198, 220, 220, 33267, 389, 5447, 416, 262, 3188, 3173, 287, 6460, 13, 9078, 628, 220, 220, 317,...
3.417021
235
#!/usr/bin/env python3 import os import os.path import shutil import subprocess import sys import tempfile import uuid import mgm_utils if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 28686, 198, 11748, 28686, 13, 6978, 198, 11748, 4423, 346, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 11748, 20218, 7753, 198, 11748, 334, 27112, 198, 198, 11748, 10...
2.793651
63
""" Testing 3D tracer advection-diffusion equation with method of manufactured solution (MMS). """ from thetis import * import numpy from scipy import stats import pytest def run(setup, refinement, order, do_export=True, **options): """Run single test and return L2 error""" print_output('--- running {:} refinement {:}'.format(setup.__name__, refinement)) # domain dimensions lx = 15e3 ly = 10e3 area = lx*ly t_end = 200.0 setup_obj = setup() # mesh n_layers = 4*refinement nx = 4*refinement ny = 4*refinement mesh2d = RectangleMesh(nx, ny, lx, ly) # outputs outputdir = 'outputs' if do_export: out_t = File(os.path.join(outputdir, 'T.pvd')) # bathymetry x_2d, y_2d = SpatialCoordinate(mesh2d) p1_2d = get_functionspace(mesh2d, 'CG', 1) bathymetry_2d = Function(p1_2d, name='Bathymetry') bathymetry_2d.project(setup_obj.bath(x_2d, y_2d, lx, ly)) solver_obj = solver.FlowSolver(mesh2d, bathymetry_2d, n_layers) solver_obj.options.element_family = 'dg-dg' solver_obj.options.polynomial_degree = order solver_obj.options.horizontal_velocity_scale = Constant(1.0) solver_obj.options.use_bottom_friction = False solver_obj.options.no_exports = not do_export solver_obj.options.output_directory = outputdir solver_obj.options.simulation_end_time = t_end solver_obj.options.fields_to_export = ['salt_3d', 'uv_3d', 'w_3d'] solver_obj.options.horizontal_viscosity_scale = Constant(50.0) solver_obj.options.update(options) solver_obj.create_function_spaces() # functions for source terms x, y, z = SpatialCoordinate(solver_obj.mesh) solver_obj.options.salinity_source_3d = setup_obj.residual(x, y, z, lx, ly) # diffusivuty solver_obj.options.horizontal_diffusivity = setup_obj.kappa(x, y, z, lx, ly) # analytical solution trac_ana = setup_obj.tracer(x, y, z, lx, ly) bnd_salt = {'value': trac_ana} solver_obj.bnd_functions['salt'] = {1: bnd_salt, 2: bnd_salt, 3: bnd_salt, 4: bnd_salt} # NOTE use symmetic uv condition to get correct w bnd_mom = {'symm': None} solver_obj.bnd_functions['momentum'] = {1: bnd_mom, 2: bnd_mom, 3: bnd_mom, 4: bnd_mom} solver_obj.create_equations() dt = solver_obj.dt # elevation field solver_obj.fields.elev_2d.project(setup_obj.elev(x_2d, y_2d, lx, ly)) # update mesh and fields solver_obj.mesh_updater.update_mesh_coordinates() # salinity field solver_obj.fields.salt_3d.project(setup_obj.tracer(x, y, z, lx, ly)) # velocity field solver_obj.fields.uv_3d.project(setup_obj.uv(x, y, z, lx, ly)) solver_obj.w_solver.solve() if do_export: out_t.write(trac_ana) solver_obj.export() # solve salinity advection-diffusion equation with residual source term ti = solver_obj.timestepper ti.timesteppers.salt_expl.initialize(ti.fields.salt_3d) t = 0 while t < t_end - 1e-5: ti.timesteppers.salt_expl.advance(t) if ti.options.use_limiter_for_tracers: ti.solver.tracer_limiter.apply(ti.fields.salt_3d) t += dt if do_export: out_t.write(trac_ana) solver_obj.export() l2_err = errornorm(trac_ana, solver_obj.fields.salt_3d)/numpy.sqrt(area) print_output('L2 error {:.12f}'.format(l2_err)) return l2_err def run_convergence(setup, ref_list, order, do_export=False, save_plot=False, **options): """Runs test for a list of refinements and computes error convergence rate""" l2_err = [] for r in ref_list: l2_err.append(run(setup, r, order, do_export=do_export, **options)) x_log = numpy.log10(numpy.array(ref_list, dtype=float)**-1) y_log = numpy.log10(numpy.array(l2_err)) check_convergence(x_log, y_log, order+1, 'tracer', save_plot) # --------------------------- # standard tests for pytest # --------------------------- def test_convergence(setup, timestepper_type): run_convergence(setup, [1, 2, 3], 1, save_plot=False, timestepper_type=timestepper_type) if __name__ == '__main__': run_convergence(Setup4, [1, 2, 3], 1, save_plot=True, timestepper_type='SSPRK22')
[ 37811, 198, 44154, 513, 35, 491, 11736, 512, 303, 596, 12, 26069, 4241, 16022, 351, 2446, 286, 15943, 4610, 357, 44, 5653, 737, 198, 37811, 198, 6738, 262, 48010, 1330, 1635, 198, 11748, 299, 32152, 198, 6738, 629, 541, 88, 1330, 9756...
2.207198
1,945
from lxml import etree from django import forms from django.db import models
[ 6738, 300, 19875, 1330, 2123, 631, 198, 198, 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 628 ]
3.590909
22
import tensorflow as tf from tensorflow import keras from keras_cv_attention_models.attention_layers import ( activation_by_name, batchnorm_with_activation, conv2d_no_bias, depthwise_conv2d_no_bias, add_pre_post_process, ) from keras_cv_attention_models import model_surgery from keras_cv_attention_models.download_and_load import reload_model_weights from keras_cv_attention_models.coco.eval_func import DecodePredictions PRETRAINED_DICT = { "yolox_nano": {"coco": "7c97d60d4cc9d54321176f844acee627"}, "yolox_tiny": {"coco": "f9b51ff24290090c86a10a45f811140b"}, "yolox_s": {"coco": "a989f5a808ddc4a8242157a6a3e64977"}, "yolox_m": {"coco": "5c2333d2f12b2f48e3ec8555b29d242f"}, "yolox_l": {"coco": "a07c48994b7a67dba421025ef39b858b"}, "yolox_x": {"coco": "de9741d3f67f50c54856bcae0f07b7ef"}, } """ CSPDarknet backbone """ BATCH_NORM_EPSILON = 1e-3 BATCH_NORM_MOMENTUM = 0.03 """ path aggregation fpn """ """ YOLOXHead """ """ YOLOX models """
[ 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 11192, 273, 11125, 1330, 41927, 292, 198, 6738, 41927, 292, 62, 33967, 62, 1078, 1463, 62, 27530, 13, 1078, 1463, 62, 75, 6962, 1330, 357, 198, 220, 220, 220, 14916, 62, 1525, 62, 3672,...
2.146186
472
from pytest import raises from pydantic import ValidationError from robot_server.service.json_api.response import ( ResponseDataModel, ResponseModel, MultiResponseModel, ) from tests.service.helpers import ItemResponseModel
[ 6738, 12972, 9288, 1330, 12073, 198, 6738, 279, 5173, 5109, 1330, 3254, 24765, 12331, 198, 198, 6738, 9379, 62, 15388, 13, 15271, 13, 17752, 62, 15042, 13, 26209, 1330, 357, 198, 220, 220, 220, 18261, 6601, 17633, 11, 198, 220, 220, 2...
3.422535
71
from game.game_view import GameView from game.menu_view import menu_view from game import constants import arcade SCREEN_WIDTH = constants.SCREEN_WIDTH SCREEN_HEIGHT = constants.SCREEN_HEIGHT SCREEN_TITLE = constants.SCREEN_TITLE window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) start_view = menu_view() window.show_view(start_view) arcade.run()
[ 198, 6738, 983, 13, 6057, 62, 1177, 1330, 3776, 7680, 198, 6738, 983, 13, 26272, 62, 1177, 1330, 6859, 62, 1177, 198, 6738, 983, 1330, 38491, 198, 11748, 27210, 628, 198, 6173, 2200, 1677, 62, 54, 2389, 4221, 796, 38491, 13, 6173, 2...
2.678832
137
# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from neutron.db import rbac_db_models """rbac_qos_policy Revision ID: c6c112992c9 Revises: 8a6d8bdae39 Create Date: 2015-11-25 18:45:03.831359 """ # revision identifiers, used by Alembic. revision = 'c6c112992c9' down_revision = 'e3278ee65050' depends_on = ('15e43b934f81',) qos_rbacs = sa.Table( 'qospolicyrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False)) # A simple model of the qos_policies table with only the fields needed for # the migration. qos_policy = sa.Table('qos_policies', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False))
[ 2, 15069, 1853, 4946, 25896, 5693, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 26...
2.519718
710
from concurrent.futures import TimeoutError from google.cloud import pubsub_v1 project_id = "pubsub-testing-331300" subscription_id = "test-sub" # Number of seconds the subscriber should listen for messages timeout = 5.0 subscriber = pubsub_v1.SubscriberClient() # The `subscription_path` method creates a fully qualified identifier # in the form `projects/{project_id}/subscriptions/{subscription_id}` subscription_path = subscriber.subscription_path(project_id, subscription_id) streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback) print(f"Listening for messages on {subscription_path}..\n") # Wrap subscriber in a 'with' block to automatically call close() when done. with subscriber: try: # When `timeout` is not set, result() will block indefinitely, # unless an exception is encountered first. streaming_pull_future.result(timeout=timeout) except TimeoutError: streaming_pull_future.cancel() # Trigger the shutdown. streaming_pull_future.result() # Block until the shutdown is complete.
[ 6738, 24580, 13, 69, 315, 942, 1330, 3862, 448, 12331, 198, 6738, 23645, 13, 17721, 1330, 2240, 7266, 62, 85, 16, 198, 198, 16302, 62, 312, 796, 366, 12984, 7266, 12, 33407, 12, 2091, 1485, 405, 1, 198, 7266, 33584, 62, 312, 796, ...
3.246988
332
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2142, 286, 10529, 2238, 13, 4091, 38559, 24290, 2393, 329, 1336, 6634, 290, 15665, 3307, 13, 198, 198, 6738, 16298, 2238, 1330, 40391, 11, 7032, 11, 4981, 11, 4808, ...
3.133333
45
""" Data Loader for Generating Tasks Author: Zhao Na, 2020 """ import os import random import math import glob import numpy as np import h5py as h5 import transforms3d from itertools import combinations import torch from torch.utils.data import Dataset def sample_K_pointclouds(data_path, num_point, pc_attribs, pc_augm, pc_augm_config, scan_names, sampled_class, sampled_classes, is_support=False): '''sample K pointclouds and the corresponding labels for one class (one_way)''' ptclouds = [] labels = [] for scan_name in scan_names: ptcloud, label = sample_pointcloud(data_path, num_point, pc_attribs, pc_augm, pc_augm_config, scan_name, sampled_classes, sampled_class, support=is_support) ptclouds.append(ptcloud) labels.append(label) ptclouds = np.stack(ptclouds, axis=0) labels = np.stack(labels, axis=0) return ptclouds, labels def augment_pointcloud(P, pc_augm_config): """" Augmentation on XYZ and jittering of everything """ M = transforms3d.zooms.zfdir2mat(1) if pc_augm_config['scale'] > 1: s = random.uniform(1 / pc_augm_config['scale'], pc_augm_config['scale']) M = np.dot(transforms3d.zooms.zfdir2mat(s), M) if pc_augm_config['rot'] == 1: angle = random.uniform(0, 2 * math.pi) M = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], angle), M) # z=upright assumption if pc_augm_config['mirror_prob'] > 0: # mirroring x&y, not z if random.random() < pc_augm_config['mirror_prob'] / 2: M = np.dot(transforms3d.zooms.zfdir2mat(-1, [1, 0, 0]), M) if random.random() < pc_augm_config['mirror_prob'] / 2: M = np.dot(transforms3d.zooms.zfdir2mat(-1, [0, 1, 0]), M) P[:, :3] = np.dot(P[:, :3], M.T) if pc_augm_config['jitter']: sigma, clip = 0.01, 0.05 # https://github.com/charlesq34/pointnet/blob/master/provider.py#L74 P = P + np.clip(sigma * np.random.randn(*P.shape), -1 * clip, clip).astype(np.float32) return P ################################################ Static Testing Dataset ################################################ ################################################ Pre-train Dataset ################################################
[ 37811, 6060, 8778, 263, 329, 2980, 803, 309, 6791, 198, 198, 13838, 25, 29436, 11013, 11, 12131, 198, 37811, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 10688, 198, 11748, 15095, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 289, ...
2.403926
968
from dataclasses import dataclass from typing import List from greendoge.types.condition_opcodes import ConditionOpcode from greendoge.util.streamable import Streamable, streamable
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 10536, 31110, 469, 13, 19199, 13, 31448, 62, 404, 40148, 1330, 24295, 18257, 8189, 198, 6738, 10536, 31110, 469, 13, 22602, 13, 5532, 540, 133...
3.893617
47
"""The nexia integration base entity.""" from aiopvapi.resources.shade import ATTR_TYPE from homeassistant.const import ATTR_MODEL, ATTR_SW_VERSION import homeassistant.helpers.device_registry as dr from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import ( DEVICE_FIRMWARE, DEVICE_MAC_ADDRESS, DEVICE_MODEL, DEVICE_NAME, DEVICE_SERIAL_NUMBER, DOMAIN, FIRMWARE, FIRMWARE_BUILD, FIRMWARE_REVISION, FIRMWARE_SUB_REVISION, MANUFACTURER, )
[ 37811, 464, 497, 36072, 11812, 2779, 9312, 526, 15931, 198, 198, 6738, 257, 14922, 85, 15042, 13, 37540, 13, 1477, 671, 1330, 5161, 5446, 62, 25216, 198, 198, 6738, 1363, 562, 10167, 13, 9979, 1330, 5161, 5446, 62, 33365, 3698, 11, 51...
2.579186
221
# Xlib.ext.res -- X-Resource extension module # # Copyright (C) 2021 Aleksei Bavshin <alebastr89@gmail.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation; either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, # Fifth Floor, # Boston, MA 02110-1301 USA """X-Resource extension allows a client to query the X server about its usage of various resources. For detailed description see any of the following documents. Protocol specification: https://www.x.org/releases/current/doc/resourceproto/resproto.txt XCB Protocol specification: https://cgit.freedesktop.org/xcb/proto/tree/src/res.xml """ from Xlib.protocol import rq RES_MAJOR_VERSION = 1 RES_MINOR_VERSION = 2 extname = "X-Resource" # v1.0 ResQueryVersion = 0 ResQueryClients = 1 ResQueryClientResources = 2 ResQueryClientPixmapBytes = 3 # v1.2 ResQueryClientIds = 4 ResQueryResourceBytes = 5 def query_version(self, client_major=RES_MAJOR_VERSION, client_minor=RES_MINOR_VERSION): """ Query the protocol version supported by the X server. The client sends the highest supported version to the server and the server sends the highest version it supports, but no higher than the requested version.""" return QueryVersion( display=self.display, opcode=self.display.get_extension_major(extname), client_major=client_major, client_minor=client_minor) Client = rq.Struct( rq.Card32("resource_base"), rq.Card32("resource_mask")) def query_clients(self): """Request the list of all currently connected clients.""" return QueryClients( display=self.display, opcode=self.display.get_extension_major(extname)) Type = rq.Struct( rq.Card32("resource_type"), rq.Card32("count")) def query_client_resources(self, client): """Request the number of resources owned by a client. The server will return the counts of each type of resource. """ return QueryClientResources( display=self.display, opcode=self.display.get_extension_major(extname), client=client) def query_client_pixmap_bytes(self, client): """Query the pixmap usage of some client. The returned number is a sum of memory usage of each pixmap that can be attributed to the given client. """ return QueryClientPixmapBytes( display=self.display, opcode=self.display.get_extension_major(extname), client=client) ClientXIDMask = 1 << 0 LocalClientPIDMask = 1 << 1 ClientIdSpec = rq.Struct( rq.Card32("client"), rq.Card32("mask")) ClientIdValue = rq.Struct( rq.Object("spec", ClientIdSpec), SizeOf("value", 4, 4), rq.List("value", rq.Card32Obj)) def query_client_ids(self, specs): """Request to identify a given set of clients with some identification method. The request sends a list of specifiers that select clients and identification methods to server. The server then tries to identify the chosen clients using the identification methods specified for each client. The server returns IDs for those clients that were successfully identified. """ return QueryClientIds( display=self.display, opcode=self.display.get_extension_major(extname), specs=specs) ResourceIdSpec = rq.Struct( rq.Card32("resource"), rq.Card32("type")) ResourceSizeSpec = rq.Struct( # inline struct ResourceIdSpec to work around # a parser bug with nested objects rq.Card32("resource"), rq.Card32("type"), rq.Card32("bytes"), rq.Card32("ref_count"), rq.Card32("use_count")) ResourceSizeValue = rq.Struct( rq.Object("size", ResourceSizeSpec), rq.LengthOf("cross_references", 4), rq.List("cross_references", ResourceSizeSpec)) def query_resource_bytes(self, client, specs): """Query the sizes of resources from X server. The request sends a list of specifiers that selects resources for size calculation. The server tries to calculate the sizes of chosen resources and returns an estimate for a resource only if the size could be determined """ return QueryResourceBytes( display=self.display, opcode=self.display.get_extension_major(extname), client=client, specs=specs)
[ 2, 1395, 8019, 13, 2302, 13, 411, 1377, 1395, 12, 26198, 7552, 8265, 201, 198, 2, 201, 198, 2, 220, 220, 220, 15069, 357, 34, 8, 33448, 9300, 74, 36455, 37313, 1477, 259, 1279, 32100, 459, 81, 4531, 31, 14816, 13, 785, 29, 201, ...
2.595607
2,003
# Process the unix command line of the pipeline. import argparse from version import rubra_version parser = argparse.ArgumentParser( description='A bioinformatics pipeline system.') parser.add_argument( 'pipeline', metavar='PIPELINE_FILE', type=str, help='Your Ruffus pipeline stages (a Python module)') parser.add_argument( '--config', metavar='CONFIG_FILE', type=str, nargs='+', required=True, help='One or more configuration files (Python modules)') parser.add_argument( '--verbose', type=int, choices=(0, 1, 2), required=False, default=1, help='Output verbosity level: 0 = quiet; 1 = normal; \ 2 = chatty (default is 1)') parser.add_argument( '--style', type=str, choices=('print', 'run', 'flowchart', 'touchfiles'), required=False, default='print', help='Pipeline behaviour: print; run; touchfiles; flowchart (default is print)') parser.add_argument( '--force', metavar='TASKNAME', type=str, required=False, default=[], nargs='+', help='tasks which are forced to be out of date regardless of timestamps') parser.add_argument( '--end', metavar='TASKNAME', type=str, required=False, help='end points (tasks) for the pipeline') parser.add_argument( '--rebuild', type=str, choices=('fromstart', 'fromend'), required=False, default='fromstart', help='rebuild outputs by working back from end tasks or forwards \ from start tasks (default is fromstart)') parser.add_argument( '--version', action='version', version='%(prog)s ' + rubra_version)
[ 2, 10854, 262, 555, 844, 3141, 1627, 286, 262, 11523, 13, 198, 198, 11748, 1822, 29572, 198, 6738, 2196, 1330, 6437, 430, 62, 9641, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 198, 220, 220, 220, 6764, 11639, 32, 1...
2.622793
623
import webbrowser import config from Generator import Generator if __name__ == '__main__': main()
[ 11748, 3992, 40259, 198, 11748, 4566, 198, 6738, 35986, 1330, 35986, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
3.387097
31
#!/usr/bin/env python import getopt import socket import sys import cbor #from cbor2 import dumps, loads import json import time import traceback from coapthon.client.helperclient import HelperClient from coapthon.utils import parse_uri from coapthon import defines client = None paths = {} paths_extend = {} my_base = "" # no json tags as strings # id ==> 0 # href ==> 11 # ga ==> 7 # cflag ==> 8 # id ==> 0 # ia ==> 12 # path ==> 112 # url ==> 10 # ga ==> 7 # id ==> 0 # ia ==> 12 # path ==> 112 # url ==> 10 # ga ==> 7 # cmd ==> 2 # ./knx resource # sia ==> 4 # ga ==> 7 # st 6 # ./knx resource if __name__ == '__main__': # pragma: no cover main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 651, 8738, 198, 11748, 17802, 198, 11748, 25064, 198, 11748, 269, 2865, 198, 2, 6738, 269, 2865, 17, 1330, 45514, 11, 15989, 198, 11748, 33918, 198, 11748, 640, 198, 11748, 12854,...
2.162465
357
hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt'] from PyInstaller.hooks.hookutils import qt4_plugins_binaries
[ 30342, 320, 3742, 796, 37250, 82, 541, 3256, 705, 20519, 48, 83, 19, 13, 48, 83, 8205, 72, 3256, 705, 20519, 48, 83, 19, 13557, 39568, 20520, 198, 198, 6738, 9485, 15798, 263, 13, 25480, 82, 13, 25480, 26791, 1330, 10662, 83, 19, ...
2.346939
49
from PyTradier.base import BasePyTradier from typing import Union from datetime import datetime if __name__ == "__main__": from utils import printer data = MarketData() symbol = "AAPL" response = data.option_lookup(symbol) # response = data.option_strike(symbol, dates[0]) printer(response)
[ 6738, 9485, 2898, 38868, 13, 8692, 1330, 7308, 20519, 2898, 38868, 198, 6738, 19720, 1330, 4479, 198, 6738, 4818, 8079, 1330, 4818, 8079, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 422, ...
2.9
110
# Copyright 2018 Jetperch LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from PySide2 import QtCore, QtWidgets from . import joulescope_rc from .meter_value_widget import MeterValueWidget import logging log = logging.getLogger(__name__) FIELDS = [ ('current', 'A', 'Amps'), ('voltage', 'V', 'Volts'), ('power', 'W', 'Watts'), ('energy', 'J', 'Joules'), ]
[ 2, 15069, 2864, 19013, 525, 354, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921...
3.251852
270
""" Services are the heart of RPyC: each side of the connection exposes a *service*, which define the capabilities available to the other side. Note that the services by both parties need not be symmetric, e.g., one side may exposed *service A*, while the other may expose *service B*. As long as the two can interoperate, you're good to go. """ from functools import partial from rpyc.lib import hybridmethod from rpyc.lib.compat import execute, is_py3k from rpyc.core.protocol import Connection
[ 37811, 198, 31007, 389, 262, 2612, 286, 371, 20519, 34, 25, 1123, 1735, 286, 262, 4637, 32142, 257, 1635, 15271, 25666, 198, 4758, 8160, 262, 9889, 1695, 284, 262, 584, 1735, 13, 198, 198, 6425, 326, 262, 2594, 416, 1111, 4671, 761, ...
3.618705
139
import unittest from testbase import TaskmatorTestBase from taskmator.task import core, util from taskmator import context if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 1332, 8692, 1330, 15941, 76, 1352, 14402, 14881, 198, 6738, 4876, 76, 1352, 13, 35943, 1330, 4755, 11, 7736, 198, 6738, 4876, 76, 1352, 1330, 4732, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 1241...
3.127273
55
"""Test discovery of entities for device-specific schemas for the Z-Wave JS integration."""
[ 37811, 14402, 9412, 286, 12066, 329, 3335, 12, 11423, 3897, 5356, 329, 262, 1168, 12, 39709, 26755, 11812, 526, 15931, 628, 628 ]
4.318182
22
from typing import Optional from botocore.client import BaseClient from typing import Dict from typing import Union from botocore.paginate import Paginator from botocore.waiter import Waiter from typing import List
[ 6738, 19720, 1330, 32233, 198, 6738, 10214, 420, 382, 13, 16366, 1330, 7308, 11792, 198, 6738, 19720, 1330, 360, 713, 198, 6738, 19720, 1330, 4479, 198, 6738, 10214, 420, 382, 13, 79, 363, 4559, 1330, 31525, 20900, 198, 6738, 10214, 420...
4
54
# Copyright 2021 Raven Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from queue import Queue from threading import Thread from benchmark.workload.tpch import TpchLoopWorkload if __name__ == '__main__': workload = TpchLoopWorkload() print(workload) queue = Queue() generate_thread = Thread( target=workload.generate_one_loop_queries, args=(queue,), name='QueryGenerator' ) generate_thread.start() print_thread = Thread( target=print_queries, args=(queue,), name='QueryPrinter' ) print_thread.start()
[ 2, 15069, 33448, 12552, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789,...
3.049046
367
from rest_framework import serializers from .models import *
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 201, 198, 6738, 764, 27530, 1330, 1635, 201 ]
4.133333
15
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed """Automatically generated unit tests list - DO NOT EDIT.""" google_cloud_cpp_common_unit_tests = [ "common_options_test.cc", "future_generic_test.cc", "future_generic_then_test.cc", "future_void_test.cc", "future_void_then_test.cc", "iam_bindings_test.cc", "internal/algorithm_test.cc", "internal/api_client_header_test.cc", "internal/backoff_policy_test.cc", "internal/base64_transforms_test.cc", "internal/big_endian_test.cc", "internal/compiler_info_test.cc", "internal/credentials_impl_test.cc", "internal/env_test.cc", "internal/filesystem_test.cc", "internal/format_time_point_test.cc", "internal/future_impl_test.cc", "internal/invoke_result_test.cc", "internal/log_impl_test.cc", "internal/pagination_range_test.cc", "internal/parse_rfc3339_test.cc", "internal/random_test.cc", "internal/retry_policy_test.cc", "internal/status_payload_keys_test.cc", "internal/strerror_test.cc", "internal/throw_delegate_test.cc", "internal/tuple_test.cc", "internal/type_list_test.cc", "internal/user_agent_prefix_test.cc", "internal/utility_test.cc", "kms_key_name_test.cc", "log_test.cc", "options_test.cc", "polling_policy_test.cc", "project_test.cc", "status_or_test.cc", "status_test.cc", "stream_range_test.cc", "terminate_handler_test.cc", "tracing_options_test.cc", ]
[ 2, 15069, 2864, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 7330...
2.637626
792
import unittest from api import create_app
[ 11748, 555, 715, 395, 198, 6738, 40391, 1330, 2251, 62, 1324, 198 ]
3.583333
12
from django import template from django.template.defaultfilters import stringfilter from django.utils.safestring import SafeString import markdown import urllib register = template.Library()
[ 6738, 42625, 14208, 1330, 11055, 198, 6738, 42625, 14208, 13, 28243, 13, 12286, 10379, 1010, 1330, 4731, 24455, 198, 6738, 42625, 14208, 13, 26791, 13, 49585, 395, 1806, 1330, 19978, 10100, 198, 11748, 1317, 2902, 198, 11748, 2956, 297, 5...
3.979167
48
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.CloudbusUserInfo import CloudbusUserInfo
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 33918, 198, 198, 6738, 435, 541, 323, 13, 64, 404, 13, 15042, 13, 9979, 415, 13, 22973, 34184, 1187, 1330, 163...
2.652174
69
# Generated by Django 2.2.4 on 2019-08-10 08:09 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 19, 319, 13130, 12, 2919, 12, 940, 8487, 25, 2931, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
#! /usr/bin/env python import sys import json import urllib import urllib2 import time import argparse import re # Category ID for Discrete Semiconductors > Transistors > BJTs TRANSISTOR_ID = b814751e89ff63d3 def find_total_hits(search_query): """ Function: find_total_hits -------------------- Returns the number of hits that correspond to the search query. """ url = "http://octopart.com/api/v3/categories/" # NOTE: Use your API key here (https://octopart.com/api/register) url += "?apikey=09b32c6c" args = [ ('q', search_query), ('start', 0), ('limit', 1), #change to increase number of datasheets ('include[]','datasheets') ] url += '&' + urllib.urlencode(args) data = urllib.urlopen(url).read() # perform a SearchRequest search_response = json.loads(data) # Grab the SearchResponse # return number of hits return search_response['hits'] def download_datasheets(search_query): """ Function: download_datasheets -------------------- Uses the OctoPart API to download all datasheets associated with a given set of search keywords. """ MAX_RESULTS = 100 counter = 0 total_hits = find_total_hits(search_query) # print number of hits print "[info] Search Response Hits: %s" % (total_hits) # Calculate how many multiples of 100s of hits there are num_hundreds = total_hits / MAX_RESULTS print "[info] Performing %s iterations of %s results." % (num_hundreds, MAX_RESULTS) for i in range(num_hundreds+1): url = "http://octopart.com/api/v3/parts/search" # NOTE: Use your API key here (https://octopart.com/api/register) url += "?apikey=09b32c6c" args = [ ('q', search_query), ('start', (i * MAX_RESULTS)), ('limit', MAX_RESULTS), # change to edit number of datasheets ('include[]','datasheets') # ('include[]','specs'), # ('include[]','descriptions') ] url += '&' + urllib.urlencode(args) data = urllib.urlopen(url).read() # perform a SearchRequest search_response = json.loads(data) # Grab the SearchResponse # Iterate through the SearchResults in the SearchResponse if not search_response.get('results'): print "[error] no results returned in outer loop: " + str(i) continue for result in search_response['results']: part = result['item'] # Grab the Part in the SearchResult print ("[info] %s_%s..." % (part['brand']['name'].replace(" ", ""), part['mpn'])), sys.stdout.flush() # Iterate through list of datasheets for the given part for datasheet in part['datasheets']: # Grab the Datasheet URL pdflink = datasheet['url'] if pdflink is not None: # Download the PDF try: response = urllib2.urlopen(pdflink) except urllib2.HTTPError, err: if err.code == 404: print "[error] Page not found!...", elif err.code == 403: print "[error] Access Denied!...", else: print "[error] HTTP Error code ", err.code, continue; # advance to next datasheet rather than crashing try: filename = re.search('([^/]*)\.[^.]*$', datasheet['url']).group(1) except AttributeError: continue; # skip to next datasheet rather than crashing file = open("../datasheets/%s.pdf" % filename, 'w') file.write(response.read()) file.close() counter += 1 # Increment the counter of files downloaded # NOTE: Not sure if this is necessary. Just a precaution. time.sleep(0.4) # Limit ourselves to 3 HTTP Requests/second print("DONE") print("[info] %s Parts Completed." % MAX_RESULTS) print("[info] COMPLETED: %s datasheets for the query were downloaded." % counter) def parse_args(): """ Function: parse_args -------------------- Parse the arguments for the Octopart Datasheet Scraper """ # Define what commandline arguments can be accepted parser = argparse.ArgumentParser() parser.add_argument('query',metavar="\"SEARCH_KEYWORDS\"", help="keywords to query in quotes (required)") parser.add_argument('--version', action='version', version='%(prog)s 0.1.0') args = parser.parse_args() return args.query # Main Function if __name__ == "__main__": reload(sys) sys.setdefaultencoding('utf-8') search_query = parse_args() # Parse commandline arguments start_time = time.time() print "[info] Download datasheets for %s" % search_query download_datasheets(search_query) finish_time = time.time() print '[info] Took', finish_time - start_time, 'sec total.'
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 25064, 198, 11748, 33918, 198, 11748, 2956, 297, 571, 198, 11748, 2956, 297, 571, 17, 198, 11748, 640, 198, 11748, 1822, 29572, 198, 11748, 302, 198, 198, 2, 21743, 4522,...
2.257938
2,299
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # FOGLAMP_BEGIN # See: http://foglamp.readthedocs.io/ # FOGLAMP_END """ fogbench -- a Python script used to test FogLAMP. The objective is to simulate payloads for input, REST and other requests against one or more FogLAMP instances. This version of fogbench is meant to test the CoAP and HTTP plugins interface of FogLAMP southbound services. fogbench [IN] -h --help Print this help -i --interval The interval in seconds between each iteration (default: 0) [IN] -k --keep Do not delete (keep) the running sample (default: no) [IN] -o --output Set the output file for statistics [IN] -p --payload Type of payload and protocol (default: coap) [IN] -t --template Set the template to use [IN] -v --version Display the version and exit [IN] -H --host The FogLAMP host (default: localhost) -I --iterations The number of iterations of the test (default: 1) [IN] -O --occurrences The number of occurrences of the template (default: 1) [IN] -P --port The FogLAMP port. Default depends on payload and protocol [IN] -S --statistic The type of statistics to collect Example: $ cd $FOGLAMP_ROOT/bin $ ./fogbench Help: $ ./fogbench -h * Create reading objects from given template, as per the json file name specified with -t * Save those objects to the file, as per the file name specified with -o * Read those objects * Send those to CoAP or HTTP south plugin server, on specific host and port .. todo:: * Try generators """ import sys import os import random import json from datetime import datetime, timezone import argparse import collections import asyncio import aiohttp from .exceptions import * __author__ = "Praveen Garg" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" _FOGBENCH_VERSION = u"0.1.1" _start_time = [] _end_time = [] _tot_msgs_transferred = [] _tot_byte_transferred = [] _num_iterated = 0 """Statistics to be collected""" # _logger = logger.setup(__name__) def local_timestamp(): """ :return: str - current time stamp with microseconds and machine timezone info :example '2018-05-08 14:06:40.517313+05:30' """ return str(datetime.now(timezone.utc).astimezone()) def get_statistics(_stats_type=None, _out_file=None): stat = '' global _start_time global _end_time global _tot_msgs_transferred global _tot_byte_transferred global _num_iterated if _stats_type == 'total': stat += u"Total Statistics:\n" stat += (u"\nStart Time: {}".format(datetime.strftime(_start_time[0], "%Y-%m-%d %H:%M:%S.%f"))) stat += (u"\nEnd Time: {}\n".format(datetime.strftime(_end_time[-1], "%Y-%m-%d %H:%M:%S.%f"))) stat += (u"\nTotal Messages Transferred: {}".format(sum(_tot_msgs_transferred))) stat += (u"\nTotal Bytes Transferred: {}\n".format(sum(_tot_byte_transferred))) stat += (u"\nTotal Iterations: {}".format(_num_iterated)) stat += (u"\nTotal Messages per Iteration: {}".format(sum(_tot_msgs_transferred)/_num_iterated)) stat += (u"\nTotal Bytes per Iteration: {}\n".format(sum(_tot_byte_transferred)/_num_iterated)) _msg_rate = [] _byte_rate = [] for itr in range(_num_iterated): time_taken = _end_time[itr] - _start_time[itr] _msg_rate.append(_tot_msgs_transferred[itr]/(time_taken.seconds+time_taken.microseconds/1E6)) _byte_rate.append(_tot_byte_transferred[itr] / (time_taken.seconds+time_taken.microseconds/1E6)) stat += (u"\nMin messages/second: {}".format(min(_msg_rate))) stat += (u"\nMax messages/second: {}".format(max(_msg_rate))) stat += (u"\nAvg messages/second: {}\n".format(sum(_msg_rate)/_num_iterated)) stat += (u"\nMin Bytes/second: {}".format(min(_byte_rate))) stat += (u"\nMax Bytes/second: {}".format(max(_byte_rate))) stat += (u"\nAvg Bytes/second: {}".format(sum(_byte_rate)/_num_iterated)) if _out_file: with open(_out_file, 'w') as f: f.write(stat) else: print(stat) # should we also show total time diff? end_time - start_time def check_server(payload_type='coap'): template_str = ">>> Make sure south {} plugin service is running \n & listening on specified host and port \n" if payload_type == 'coap': print(template_str.format("CoAP")) elif payload_type == 'http': print(template_str.format("HTTP")) parser = argparse.ArgumentParser(prog='fogbench') parser.description = '%(prog)s -- a Python script used to test FogLAMP (simulate payloads)' parser.epilog = 'The initial version of %(prog)s is meant to test the south plugin interface of ' \ 'FogLAMP using CoAP or HTTP' parser.add_argument('-v', '--version', action='version', version='%(prog)s {0!s}'.format(_FOGBENCH_VERSION)) parser.add_argument('-k', '--keep', default=False, choices=['y', 'yes', 'n', 'no'], help='Do not delete the running sample (default: no)') parser.add_argument('-t', '--template', required=True, help='Set the template file, json extension') parser.add_argument('-o', '--output', default=None, help='Set the statistics output file') parser.add_argument('-p', '--payload', default='coap', choices=['coap', 'http'], help='Type of payload ' 'and protocol (default: coap)') parser.add_argument('-I', '--iterations', help='The number of iterations of the test (default: 1)') parser.add_argument('-O', '--occurrences', help='The number of occurrences of the template (default: 1)') parser.add_argument('-H', '--host', help='Server host address (default: localhost)') parser.add_argument('-P', '--port', help='The FogLAMP port. (default: 5683)') parser.add_argument('-i', '--interval', default=0, help='The interval in seconds for each iteration (default: 0)') parser.add_argument('-S', '--statistics', default='total', choices=['total'], help='The type of statistics to collect ' '(default: total)') namespace = parser.parse_args(sys.argv[1:]) infile = '{0}'.format(namespace.template if namespace.template else '') statistics_file = os.path.join(os.path.dirname(__file__), "out/{}".format(namespace.output)) if namespace.output else None keep_the_file = True if namespace.keep in ['y', 'yes'] else False # iterations and occurrences arg_iterations = int(namespace.iterations) if namespace.iterations else 1 arg_occurrences = int(namespace.occurrences) if namespace.occurrences else 1 # interval between each iteration arg_interval = int(namespace.interval) if namespace.interval else 0 arg_stats_type = '{0}'.format(namespace.statistics) if namespace.statistics else 'total' if namespace.payload: arg_payload_protocol = namespace.payload arg_host = '{0}'.format(namespace.host) if namespace.host else 'localhost' default_port = 6683 if arg_payload_protocol == 'http' else 5683 arg_port = int(namespace.port) if namespace.port else default_port check_server(arg_payload_protocol) sample_file = os.path.join("/tmp", "foglamp_running_sample.{}".format(os.getpid())) parse_template_and_prepare_json(_template_file=infile, _write_to_file=sample_file, _occurrences=arg_occurrences) read_out_file(_file=sample_file, _keep=keep_the_file, _iterations=arg_iterations, _interval=arg_interval, send_to=arg_payload_protocol) get_statistics(_stats_type=arg_stats_type, _out_file=statistics_file) # TODO: Change below per local_timestamp() values """ Expected output from given template { "timestamp" : "2017-08-04T06:59:57.503Z", "asset" : "TI sensorTag/luxometer", "sensor_values" : { "lux" : 49 } } { "timestamp" : "2017-08-04T06:59:57.863Z", "asset" : "TI sensorTag/pressure", "sensor_values" : { "pressure" : 1021.2 } } { "timestamp" : "2017-08-04T06:59:58.863Z", "asset" : "TI sensorTag/humidity", "sensor_values" : { "humidity" : 71.2, "temperature" : 18.6 } } { "timestamp" : "2017-08-04T06:59:59.863Z", "asset" : "TI sensorTag/temperature", "sensor_values" : { "object" : 18.2, "ambient" : 21.6 } } { "timestamp" : "2017-08-04T07:00:00.863Z", "asset" : "TI sensorTag/accelerometer", "sensor_values" : { "x" : 1.2, "y" : 0.0, "z" : -0.6 } } { "timestamp" : "2017-08-04T07:00:01.863Z", "asset" : "TI sensorTag/gyroscope", "sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 } } { "timestamp" : "2017-08-04T07:00:02.863Z", "asset" : "TI sensorTag/magnetometer", "sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 } } { "timestamp" : "2017-08-04T07:00:03.863Z", "asset" : "mouse", "sensor_values" : { "button" : "down" } } { "timestamp" : "2017-08-04T07:00:04.863Z", "asset" : "wall clock", "sensor_values" : { "tick" : "tock" } } """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 376, 7730, 43, 23518, 62, 33, 43312, 198, 2, 4091, 25, 2638, 1378, 69, 28678, 696, 13, 961, 83, ...
2.43847
3,738
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=cell-var-from-loop,invalid-name """ Measurement correction filters. """ from typing import List, Union from copy import deepcopy from scipy.optimize import minimize import scipy.linalg as la import numpy as np import qiskit from qiskit import QiskitError from qiskit.tools import parallel_map from qiskit.ignis.verification.tomography import count_keys def _apply_correction(self, resultidx, raw_data, method): """Wrapper to call apply with a counts dictionary.""" new_counts = self.apply( raw_data.get_counts(resultidx), method=method) return resultidx, new_counts class TensoredFilter(): """ Tensored measurement error mitigation filter. Produced from a tensored measurement calibration fitter and can be applied to data. """ def __init__(self, cal_matrices: np.matrix, substate_labels_list: list, mit_pattern: list): """ Initialize a tensored measurement error mitigation filter using the cal_matrices from a tensored measurement calibration fitter. A simple usage this class is explained [here] (https://qiskit.org/documentation/tutorials/noise/3_measurement_error_mitigation.html). Args: cal_matrices: the calibration matrices for applying the correction. substate_labels_list: for each calibration matrix a list of the states (as strings, states in the subspace) mit_pattern: for each calibration matrix a list of the logical qubit indices (as int, states in the subspace) """ self._cal_matrices = cal_matrices self._qubit_list_sizes = [] self._indices_list = [] self._substate_labels_list = [] self.substate_labels_list = substate_labels_list self._mit_pattern = mit_pattern def apply(self, raw_data: Union[qiskit.result.result.Result, dict], method: str = 'least_squares', meas_layout: List[int] = None): """ Apply the calibration matrices to results. Args: raw_data (dict or Result): The data to be corrected. Can be in one of two forms: * A counts dictionary from results.get_counts * A Qiskit Result method (str): fitting method. The following methods are supported: * 'pseudo_inverse': direct inversion of the cal matrices. Mitigated counts can contain negative values and the sum of counts would not equal to the shots. Mitigation is conducted qubit wise: For each qubit, mitigate the whole counts using the calibration matrices which affect the corresponding qubit. For example, assume we are mitigating the 3rd bit of the 4-bit counts using '2\times 2' calibration matrix `A_3`. When mitigating the count of '0110' in this step, the following formula is applied: `count['0110'] = A_3^{-1}[1, 0]*count['0100'] + A_3^{-1}[1, 1]*count['0110']`. The total time complexity of this method is `O(m2^{n + t})`, where `n` is the size of calibrated qubits, `m` is the number of sets in `mit_pattern`, and `t` is the size of largest set of mit_pattern. If the `mit_pattern` is shaped like `[[0], [1], [2], ..., [n-1]]`, which corresponds to the tensor product noise model without cross-talk, then the time complexity would be `O(n2^n)`. If the `mit_pattern` is shaped like `[[0, 1, 2, ..., n-1]]`, which exactly corresponds to the complete error mitigation, then the time complexity would be `O(2^(n+n)) = O(4^n)`. * 'least_squares': constrained to have physical probabilities. Instead of directly applying inverse calibration matrices, this method solve a constrained optimization problem to find the closest probability vector to the result from 'pseudo_inverse' method. Sequential least square quadratic programming (SLSQP) is used in the internal process. Every updating step in SLSQP takes `O(m2^{n+t})` time. Since this method is using the SLSQP optimization over the vector with lenght `2^n`, the mitigation for 8 bit counts with the `mit_pattern = [[0], [1], [2], ..., [n-1]]` would take 10 seconds or more. * If `None`, 'least_squares' is used. meas_layout (list of int): the mapping from classical registers to qubits * If you measure qubit `2` to clbit `0`, `0` to `1`, and `1` to `2`, the list becomes `[2, 0, 1]` * If `None`, flatten(mit_pattern) is used. Returns: dict or Result: The corrected data in the same form as raw_data Raises: QiskitError: if raw_data is not in a one of the defined forms. """ all_states = count_keys(self.nqubits) num_of_states = 2**self.nqubits if meas_layout is None: meas_layout = [] for qubits in self._mit_pattern: meas_layout += qubits # check forms of raw_data if isinstance(raw_data, dict): # counts dictionary # convert to list raw_data2 = [np.zeros(num_of_states, dtype=float)] for state, count in raw_data.items(): stateidx = int(state, 2) raw_data2[0][stateidx] = count elif isinstance(raw_data, qiskit.result.result.Result): # extract out all the counts, re-call the function with the # counts and push back into the new result new_result = deepcopy(raw_data) new_counts_list = parallel_map( self._apply_correction, [resultidx for resultidx, _ in enumerate(raw_data.results)], task_args=(raw_data, method, meas_layout)) for resultidx, new_counts in new_counts_list: new_result.results[resultidx].data.counts = new_counts return new_result else: raise QiskitError("Unrecognized type for raw_data.") if method == 'pseudo_inverse': pinv_cal_matrices = [] for cal_mat in self._cal_matrices: pinv_cal_matrices.append(la.pinv(cal_mat)) meas_layout = meas_layout[::-1] # reverse endian qubits_to_clbits = [-1 for _ in range(max(meas_layout) + 1)] for i, qubit in enumerate(meas_layout): qubits_to_clbits[qubit] = i # Apply the correction for data_idx, _ in enumerate(raw_data2): if method == 'pseudo_inverse': for pinv_cal_mat, pos_qubits, indices in zip(pinv_cal_matrices, self._mit_pattern, self._indices_list): inv_mat_dot_x = np.zeros([num_of_states], dtype=float) pos_clbits = [qubits_to_clbits[qubit] for qubit in pos_qubits] for state_idx, state in enumerate(all_states): first_index = self.compute_index_of_cal_mat(state, pos_clbits, indices) for i in range(len(pinv_cal_mat)): # i is index of pinv_cal_mat source_state = self.flip_state(state, i, pos_clbits) second_index = self.compute_index_of_cal_mat(source_state, pos_clbits, indices) inv_mat_dot_x[state_idx] += pinv_cal_mat[first_index, second_index]\ * raw_data2[data_idx][int(source_state, 2)] raw_data2[data_idx] = inv_mat_dot_x elif method == 'least_squares': x0 = np.random.rand(num_of_states) x0 = x0 / sum(x0) nshots = sum(raw_data2[data_idx]) cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)}) bnds = tuple((0, nshots) for x in x0) res = minimize(fun, x0, method='SLSQP', constraints=cons, bounds=bnds, tol=1e-6) raw_data2[data_idx] = res.x else: raise QiskitError("Unrecognized method.") # convert back into a counts dictionary new_count_dict = {} for state_idx, state in enumerate(all_states): if raw_data2[0][state_idx] != 0: new_count_dict[state] = raw_data2[0][state_idx] return new_count_dict def flip_state(self, state: str, mat_index: int, flip_poses: List[int]) -> str: """Flip the state according to the chosen qubit positions""" flip_poses = [pos for i, pos in enumerate(flip_poses) if (mat_index >> i) & 1] flip_poses = sorted(flip_poses) new_state = "" pos = 0 for flip_pos in flip_poses: new_state += state[pos:flip_pos] new_state += str(int(state[flip_pos], 2) ^ 1) # flip the state pos = flip_pos + 1 new_state += state[pos:] return new_state def compute_index_of_cal_mat(self, state: str, pos_qubits: List[int], indices: dict) -> int: """Return the index of (pseudo inverse) calibration matrix for the input quantum state""" sub_state = "" for pos in pos_qubits: sub_state += state[pos] return indices[sub_state] def _apply_correction(self, resultidx: int, raw_data: qiskit.result.result.Result, method: str, meas_layout: List[int]): """Wrapper to call apply with a counts dictionary.""" new_counts = self.apply( raw_data.get_counts(resultidx), method=method, meas_layout=meas_layout) return resultidx, new_counts
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 770, 2438, 318, 636, 286, 1195, 1984, 270, 13, 198, 2, 198, 2, 357, 34, 8, 15069, 19764, 13130, 13, 198, 2, 198, 2, 770, 2438, 318, 11971, 739, 262, 24843,...
2.087204
5,275
#!/usr/bin/python def meh(captcha): """Returns the sum of the digits which match the next one in the captcha input string. >>> meh('1122') 3 >>> meh('1111') 4 >>> meh('1234') 0 >>> meh('91212129') 9 """ result = 0 for n in range(len(captcha)): if captcha[n] == captcha[(n + 1) % len(captcha)]: result += int(captcha[n]) return result def meh2(captcha): """Returns the sum of the digits which match the next one in the captcha input string. >>> meh2('1212') 6 >>> meh2('1221') 0 >>> meh2('123425') 4 >>> meh2('123123') 12 >>> meh2('12131415') 4 """ result = 0 for n in range(len(captcha)): if captcha[n] == captcha[(n + len(captcha) / 2) % len(captcha)]: result += int(captcha[n]) return result if __name__ == '__main__': input = '57276274387944537823652626177853384411146325384494935924454336611953119173638191671326254832624841593421667683474349154668177743437745965461678636631863541462893547616877914914662358836365421198516263335926544716331814125295712581158399321372683742773423626286669759415959391374744214595682795818615532673877868424196926497731144319736445141728123322962547288572434564178492753681842244888368542423832228211172842456231275738182764232265933625119312598161192193214898949267765417468348935134618964683127194391796165368145548814473129857697989322621368744725685183346825333247866734735894493395218781464346951777873929898961358796274889826894529599645442657423438562423853247543621565468819799931598754753467593832328147439341586125262733737128386961596394728159719292787597426898945198788211417854662948358422729471312456437778978749753927251431677533575752312447488337156956217451965643454445329758327129966657189332824969141448538681979632611199385896965946849725421978137753366252459914913637858783146735469758716752765718189175583956476935185985918536318424248425426398158278111751711911227818826766177996223718837428972784328925743869885232266127727865267881592395643836999244218345184474613129823933659422223685422732186536199153988717455568523781673393698356967355875123554797755491181791593156433735591529495984256519631187849654633243225118132152549712643273819314433877592644693826861523243946998615722951182474773173215527598949553185313259992227879964482121769617218685394776778423378182462422788277997523913176326468957342296368178321958626168785578977414537368686438348124283789748775163821457641135163495649331144436157836647912852483177542224864952271874645274572426458614384917923623627532487625396914111582754953944965462576624728896917137599778828769958626788685374749661741223741834844643725486925886933118382649581481351844943368484853956759877215252766294896496444835264357169642341291412768946589781812493421379575569593678354241223363739129813633236996588711791919421574583924743119867622229659211793468744163297478952475933163259769578345894367855534294493613767564497137369969315192443795512585' print meh(input) print meh2(input)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 628, 198, 4299, 502, 71, 7, 27144, 11693, 2599, 198, 220, 220, 220, 37227, 35561, 262, 2160, 286, 262, 19561, 543, 2872, 262, 1306, 530, 287, 262, 48972, 198, 220, 220, 220, 5128, 4731, 13, 628,...
2.27017
1,351
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Module for graph representations of crystals. """ import copy import logging import os.path import subprocess import warnings from collections import defaultdict, namedtuple from itertools import combinations from operator import itemgetter import networkx as nx import networkx.algorithms.isomorphism as iso import numpy as np from monty.json import MSONable from monty.os.path import which from networkx.drawing.nx_agraph import write_dot from networkx.readwrite import json_graph from scipy.spatial import KDTree from scipy.stats import describe from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure from pymatgen.core.structure import FunctionalGroups from pymatgen.util.coord import lattice_points_in_supercell from pymatgen.vis.structure_vtk import EL_COLORS try: import igraph IGRAPH_AVAILABLE = True except ImportError: IGRAPH_AVAILABLE = False logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) __author__ = "Matthew Horton, Evan Spotte-Smith, Samuel Blau" __version__ = "0.1" __maintainer__ = "Matthew Horton" __email__ = "mkhorton@lbl.gov" __status__ = "Production" __date__ = "August 2017" ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist") def _compare(g1, g2, i1, i2): """ Helper function called by isomorphic to ensure comparison of node identities. """ return g1.vs[i1]["species"] == g2.vs[i2]["species"] def _igraph_from_nxgraph(graph): """ Helper function that converts a networkx graph object into an igraph graph object. """ nodes = graph.nodes(data=True) new_igraph = igraph.Graph() for node in nodes: new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"]) new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()]) return new_igraph def _isomorphic(frag1, frag2): """ Internal function to check if two graph objects are isomorphic, using igraph if if is available and networkx if it is not. """ f1_nodes = frag1.nodes(data=True) f2_nodes = frag2.nodes(data=True) if len(f1_nodes) != len(f2_nodes): return False f2_edges = frag2.edges() if len(f2_edges) != len(f2_edges): return False f1_comp_dict = {} f2_comp_dict = {} for node in f1_nodes: if node[1]["specie"] not in f1_comp_dict: f1_comp_dict[node[1]["specie"]] = 1 else: f1_comp_dict[node[1]["specie"]] += 1 for node in f2_nodes: if node[1]["specie"] not in f2_comp_dict: f2_comp_dict[node[1]["specie"]] = 1 else: f2_comp_dict[node[1]["specie"]] += 1 if f1_comp_dict != f2_comp_dict: return False if IGRAPH_AVAILABLE: ifrag1 = _igraph_from_nxgraph(frag1) ifrag2 = _igraph_from_nxgraph(frag2) return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare) nm = iso.categorical_node_match("specie", "ERROR") return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm) def insert_node( self, i, species, coords, coords_are_cartesian=False, validate_proximity=False, site_properties=None, edges=None, ): """ A wrapper around Molecule.insert(), which also incorporates the new site into the MoleculeGraph. :param i: Index at which to insert the new site :param species: Species for the new site :param coords: 3x1 array representing coordinates of the new site :param coords_are_cartesian: Whether coordinates are cartesian. Defaults to False. :param validate_proximity: For Molecule.insert(); if True (default False), distance will be checked to ensure that site can be safely added. :param site_properties: Site properties for Molecule :param edges: List of dicts representing edges to be added to the MoleculeGraph. These edges must include the index of the new site i, and all indices used for these edges should reflect the MoleculeGraph AFTER the insertion, NOT before. Each dict should at least have a "to_index" and "from_index" key, and can also have a "weight" and a "properties" key. :return: """ self.structure.insert( i, species, coords, coords_are_cartesian=coords_are_cartesian, validate_proximity=validate_proximity, properties=site_properties, ) mapping = {} for j in range(len(self.structure) - 1): if j < i: mapping[j] = j else: mapping[j] = j + 1 nx.relabel_nodes(self.graph, mapping, copy=False) self.graph.add_node(i) self.set_node_attributes() if edges is not None: for edge in edges: try: self.add_edge( edge["from_index"], edge["to_index"], from_jimage=(0, 0, 0), to_jimage=edge["to_jimage"], weight=edge.get("weight", None), edge_properties=edge.get("properties", None), ) except KeyError: raise RuntimeError("Some edges are invalid.") def set_node_attributes(self): """ Gives each node a "specie" and a "coords" attribute, updated with the current species and coordinates. :return: """ species = {} coords = {} properties = {} for node in self.graph.nodes(): species[node] = self.structure[node].specie.symbol coords[node] = self.structure[node].coords properties[node] = self.structure[node].properties nx.set_node_attributes(self.graph, species, "specie") nx.set_node_attributes(self.graph, coords, "coords") nx.set_node_attributes(self.graph, properties, "properties") def alter_edge( self, from_index, to_index, to_jimage=None, new_weight=None, new_edge_properties=None, ): """ Alters either the weight or the edge_properties of an edge in the StructureGraph. :param from_index: int :param to_index: int :param to_jimage: tuple :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return: """ existing_edges = self.graph.get_edge_data(from_index, to_index) # ensure that edge exists before attempting to change it if not existing_edges: raise ValueError( "Edge between {} and {} cannot be altered;\ no edge exists between those sites.".format( from_index, to_index ) ) if to_jimage is None: edge_index = 0 else: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i if new_weight is not None: self.graph[from_index][to_index][edge_index]["weight"] = new_weight if new_edge_properties is not None: for prop in list(new_edge_properties.keys()): self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop] def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False): """ Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edges = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if to_jimage is None: raise ValueError("Image must be supplied, to avoid ambiguity.") if existing_edges: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(from_index, to_index, edge_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: for i, properties in existing_reverse.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(to_index, from_index, edge_index) else: raise ValueError( "Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index ) ) def remove_nodes(self, indices): """ A wrapper for Molecule.remove_sites(). :param indices: list of indices in the current Molecule (and graph) to be removed. :return: """ self.structure.remove_sites(indices) self.graph.remove_nodes_from(indices) mapping = {} for correct, current in enumerate(sorted(self.graph.nodes)): mapping[current] = correct nx.relabel_nodes(self.graph, mapping, copy=False) self.set_node_attributes() def substitute_group( self, index, func_grp, strategy, bond_order=1, graph_dict=None, strategy_params=None, ): """ Builds off of Structure.substitute to replace an atom in self.structure with a functional group. This method also amends self.graph to incorporate the new functional group. NOTE: Care must be taken to ensure that the functional group that is substituted will not place atoms to close to each other, or violate the dimensions of the Lattice. :param index: Index of atom to substitute. :param func_grp: Substituent molecule. There are two options: 1. Providing an actual Molecule as the input. The first atom must be a DummySpecies X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. :param strategy: Class from pymatgen.analysis.local_env. :param bond_order: A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. :param graph_dict: Dictionary representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. If None, then the algorithm will attempt to automatically determine bonds using one of a list of strategies defined in pymatgen.analysis.local_env. :param strategy_params: dictionary of keyword arguments for strategy. If None, default parameters will be used. :return: """ if isinstance(func_grp, Molecule): func_grp = copy.deepcopy(func_grp) else: try: func_grp = copy.deepcopy(FunctionalGroups[func_grp]) except Exception: raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead") self.structure.substitute(index, func_grp, bond_order=bond_order) mapping = map_indices(func_grp) # Remove dummy atom "X" func_grp.remove_species("X") if graph_dict is not None: for (u, v) in graph_dict.keys(): edge_props = graph_dict[(u, v)] if "to_jimage" in edge_props.keys(): to_jimage = edge_props["to_jimage"] del edge_props["to_jimage"] else: # By default, assume that all edges should stay remain # inside the initial image to_jimage = (0, 0, 0) if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] self.add_edge( mapping[u], mapping[v], to_jimage=to_jimage, weight=weight, edge_properties=edge_props, ) else: if strategy_params is None: strategy_params = {} strat = strategy(**strategy_params) for site in mapping.values(): neighbors = strat.get_nn_info(self.structure, site) for neighbor in neighbors: self.add_edge( from_index=site, from_jimage=(0, 0, 0), to_index=neighbor["site_index"], to_jimage=neighbor["image"], weight=neighbor["weight"], warn_duplicates=False, ) def get_connected_sites(self, n, jimage=(0, 0, 0)): """ Returns a named tuple of neighbors of site n: periodic_site, jimage, index, weight. Index is the index of the corresponding site in the original structure, weight can be None if not defined. :param n: index of Site in Structure :param jimage: lattice vector of site :return: list of ConnectedSite tuples, sorted by closest first """ connected_sites = set() connected_site_images = set() out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)] in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)] for u, v, d, dir in out_edges + in_edges: to_jimage = d["to_jimage"] if dir == "in": u, v = v, u to_jimage = np.multiply(-1, to_jimage) to_jimage = tuple(map(int, np.add(to_jimage, jimage))) site_d = self.structure[v].as_dict() site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist() site = PeriodicSite.from_dict(site_d) # from_site if jimage arg != (0, 0, 0) relative_jimage = np.subtract(to_jimage, jimage) dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage) weight = d.get("weight", None) if (v, to_jimage) not in connected_site_images: connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist) connected_sites.add(connected_site) connected_site_images.add((v, to_jimage)) # return list sorted by closest sites first connected_sites = list(connected_sites) connected_sites.sort(key=lambda x: x.dist) return connected_sites def get_coordination_of_site(self, n): """ Returns the number of neighbors of site n. In graph terms, simply returns degree of node corresponding to site n. :param n: index of site :return (int): """ number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v]) return self.graph.degree(n) - number_of_self_loops def draw_graph_to_file( self, filename="graph", diff=None, hide_unconnected_nodes=False, hide_image_edges=True, edge_colors=False, node_labels=False, weight_labels=False, image_labels=False, color_scheme="VESTA", keep_dot=False, algo="fdp", ): """ Draws graph using GraphViz. The networkx graph object itself can also be drawn with networkx's in-built graph drawing methods, but note that this might give misleading results for multigraphs (edges are super-imposed on each other). If visualization is difficult to interpret, `hide_image_edges` can help, especially in larger graphs. :param filename: filename to output, will detect filetype from extension (any graphviz filetype supported, such as pdf or png) :param diff (StructureGraph): an additional graph to compare with, will color edges red that do not exist in diff and edges green that are in diff graph but not in the reference graph :param hide_unconnected_nodes: if True, hide unconnected nodes :param hide_image_edges: if True, do not draw edges that go through periodic boundaries :param edge_colors (bool): if True, use node colors to color edges :param node_labels (bool): if True, label nodes with species and site index :param weight_labels (bool): if True, label edges with weights :param image_labels (bool): if True, label edges with their periodic images (usually only used for debugging, edges to periodic images always appear as dashed lines) :param color_scheme (str): "VESTA" or "JMOL" :param keep_dot (bool): keep GraphViz .dot file for later visualization :param algo: any graphviz algo, "neato" (for simple graphs) or "fdp" (for more crowded graphs) usually give good outputs :return: """ if not which(algo): raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.") # Developer note: NetworkX also has methods for drawing # graphs using matplotlib, these also work here. However, # a dedicated tool like GraphViz allows for much easier # control over graph appearance and also correctly displays # mutli-graphs (matplotlib can superimpose multiple edges). g = self.graph.copy() g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"} # add display options for nodes for n in g.nodes(): # get label by species name label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else "" # use standard color scheme for nodes c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0]) # get contrasting font color # magic numbers account for perceived luminescence # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff" # convert color to hex string color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2]) g.add_node( n, fillcolor=color, fontcolor=fontcolor, label=label, fontname="Helvetica-bold", style="filled", shape="circle", ) edges_to_delete = [] # add display options for edges for u, v, k, d in g.edges(keys=True, data=True): # retrieve from/to images, set as origin if not defined to_image = d["to_jimage"] # set edge style d["style"] = "solid" if to_image != (0, 0, 0): d["style"] = "dashed" if hide_image_edges: edges_to_delete.append((u, v, k)) # don't show edge directions d["arrowhead"] = "none" # only add labels for images that are not the origin if image_labels: d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image)) d["arrowhead"] = "normal" if d["headlabel"] else "none" # optionally color edges using node colors color_u = g.nodes[u]["fillcolor"] color_v = g.nodes[v]["fillcolor"] d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000" # optionally add weights to graph if weight_labels: units = g.graph.get("edge_weight_units", "") if d.get("weight"): d["label"] = "{:.2f} {}".format(d["weight"], units) # update edge with our new style attributes g.edges[u, v, k].update(d) # optionally remove periodic image edges, # these can be confusing due to periodic boundaries if hide_image_edges: for edge_to_delete in edges_to_delete: g.remove_edge(*edge_to_delete) # optionally hide unconnected nodes, # these can appear when removing periodic edges if hide_unconnected_nodes: g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0]) # optionally highlight differences with another graph if diff: diff = self.diff(diff, strict=True) green_edges = [] red_edges = [] for u, v, k, d in g.edges(keys=True, data=True): if (u, v, d["to_jimage"]) in diff["self"]: # edge has been deleted red_edges.append((u, v, k)) elif (u, v, d["to_jimage"]) in diff["other"]: # edge has been added green_edges.append((u, v, k)) for u, v, k in green_edges: g.edges[u, v, k].update({"color_uv": "#00ff00"}) for u, v, k in red_edges: g.edges[u, v, k].update({"color_uv": "#ff0000"}) basename, extension = os.path.splitext(filename) extension = extension[1:] write_dot(g, basename + ".dot") with open(filename, "w") as f: args = [algo, "-T", extension, basename + ".dot"] rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True) rs.communicate() if rs.returncode != 0: raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode)) if not keep_dot: os.remove(basename + ".dot") def types_of_coordination_environments(self, anonymous=False): """ Extract information on the different co-ordination environments present in the graph. :param anonymous: if anonymous, will replace specie names with A, B, C, etc. :return: a list of co-ordination environments, e.g. ['Mo-S(6)', 'S-Mo(3)'] """ motifs = set() for idx, site in enumerate(self.structure): centre_sp = site.species_string connected_sites = self.get_connected_sites(idx) connected_species = [connected_site.site.species_string for connected_site in connected_sites] labels = [] for sp in set(connected_species): count = connected_species.count(sp) labels.append((count, sp)) labels = sorted(labels, reverse=True) if anonymous: mapping = {centre_sp: "A"} available_letters = [chr(66 + i) for i in range(25)] for label in labels: sp = label[1] if sp not in mapping: mapping[sp] = available_letters.pop(0) centre_sp = "A" labels = [(label[0], mapping[label[1]]) for label in labels] labels = ["{}({})".format(label[1], label[0]) for label in labels] motif = "{}-{}".format(centre_sp, ",".join(labels)) motifs.add(motif) return sorted(list(motifs)) def as_dict(self): """ As in :Class: `pymatgen.core.Structure` except with using `to_dict_of_dicts` from NetworkX to store graph information. """ d = { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": self.structure.as_dict(), "graphs": json_graph.adjacency_data(self.graph), } return d def __mul__(self, scaling_matrix): """ Replicates the graph, creating a supercell, intelligently joining together edges that lie on periodic boundaries. In principle, any operations on the expanded graph could also be done on the original graph, but a larger graph can be easier to visualize and reason about. :param scaling_matrix: same as Structure.__mul__ :return: """ # Developer note: a different approach was also trialed, using # a simple Graph (instead of MultiDiGraph), with node indices # representing both site index and periodic image. Here, the # number of nodes != number of sites in the Structure. This # approach has many benefits, but made it more difficult to # keep the graph in sync with its corresponding Structure. # Broadly, it would be easier to multiply the Structure # *before* generating the StructureGraph, but this isn't # possible when generating the graph using critic2 from # charge density. # Multiplication works by looking for the expected position # of an image node, and seeing if that node exists in the # supercell. If it does, the edge is updated. This is more # computationally expensive than just keeping track of the # which new lattice images present, but should hopefully be # easier to extend to a general 3x3 scaling matrix. # code adapted from Structure.__mul__ scale_matrix = np.array(scaling_matrix, np.int16) if scale_matrix.shape != (3, 3): scale_matrix = np.array(scale_matrix * np.eye(3), np.int16) else: # TODO: test __mul__ with full 3x3 scaling matrices raise NotImplementedError("Not tested with 3x3 scaling matrices yet.") new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix)) f_lat = lattice_points_in_supercell(scale_matrix) c_lat = new_lattice.get_cartesian_coords(f_lat) new_sites = [] new_graphs = [] for v in c_lat: # create a map of nodes from original graph to its image mapping = {n: n + len(new_sites) for n in range(len(self.structure))} for idx, site in enumerate(self.structure): s = PeriodicSite( site.species, site.coords + v, new_lattice, properties=site.properties, coords_are_cartesian=True, to_unit_cell=False, ) new_sites.append(s) new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True)) new_structure = Structure.from_sites(new_sites) # merge all graphs into one big graph new_g = nx.MultiDiGraph() for new_graph in new_graphs: new_g = nx.union(new_g, new_graph) edges_to_remove = [] # tuple of (u, v, k) edges_to_add = [] # tuple of (u, v, attr_dict) # list of new edges inside supercell # for duplicate checking edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)] new_periodic_images = [] orig_lattice = self.structure.lattice # use k-d tree to match given position to an # existing Site in Structure kd_tree = KDTree(new_structure.cart_coords) # tolerance in for sites to be considered equal # this could probably be a lot smaller tol = 0.05 for u, v, k, d in new_g.edges(keys=True, data=True): to_jimage = d["to_jimage"] # for node v # reduce unnecessary checking if to_jimage != (0, 0, 0): # get index in original site n_u = u % len(self.structure) n_v = v % len(self.structure) # get fractional co-ordinates of where atoms defined # by edge are expected to be, relative to original # lattice (keeping original lattice has # significant benefits) v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage) u_frac = self.structure[n_u].frac_coords # using the position of node u as a reference, # get relative Cartesian co-ordinates of where # atoms defined by edge are expected to be v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac) u_cart = orig_lattice.get_cartesian_coords(u_frac) v_rel = np.subtract(v_image_cart, u_cart) # now retrieve position of node v in # new supercell, and get asgolute Cartesian # co-ordinates of where atoms defined by edge # are expected to be v_expec = new_structure[u].coords + v_rel # now search in new structure for these atoms # query returns (distance, index) v_present = kd_tree.query(v_expec) v_present = v_present[1] if v_present[0] <= tol else None # check if image sites now present in supercell # and if so, delete old edge that went through # periodic boundary if v_present is not None: new_u = u new_v = v_present new_d = d.copy() # node now inside supercell new_d["to_jimage"] = (0, 0, 0) edges_to_remove.append((u, v, k)) # make sure we don't try to add duplicate edges # will remove two edges for everyone one we add if {new_u, new_v} not in edges_inside_supercell: # normalize direction if new_v < new_u: new_u, new_v = new_v, new_u edges_inside_supercell.append({new_u, new_v}) edges_to_add.append((new_u, new_v, new_d)) else: # want to find new_v such that we have # full periodic boundary conditions # so that nodes on one side of supercell # are connected to nodes on opposite side v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec) # find new to_jimage # use np.around to fix issues with finite precision leading to incorrect image v_expec_image = np.around(v_expec_frac, decimals=3) v_expec_image = v_expec_image - v_expec_image % 1 v_expec_frac = np.subtract(v_expec_frac, v_expec_image) v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac) v_present = kd_tree.query(v_expec) v_present = v_present[1] if v_present[0] <= tol else None if v_present is not None: new_u = u new_v = v_present new_d = d.copy() new_to_jimage = tuple(map(int, v_expec_image)) # normalize direction if new_v < new_u: new_u, new_v = new_v, new_u new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int)) new_d["to_jimage"] = new_to_jimage edges_to_remove.append((u, v, k)) if (new_u, new_v, new_to_jimage) not in new_periodic_images: edges_to_add.append((new_u, new_v, new_d)) new_periodic_images.append((new_u, new_v, new_to_jimage)) logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add))) # add/delete marked edges for edges_to_remove in edges_to_remove: new_g.remove_edge(*edges_to_remove) for (u, v, d) in edges_to_add: new_g.add_edge(u, v, **d) # return new instance of StructureGraph with supercell d = { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": new_structure.as_dict(), "graphs": json_graph.adjacency_data(new_g), } sg = StructureGraph.from_dict(d) return sg def __len__(self): """ :return: length of Structure / number of nodes in graph """ return len(self.structure) def sort(self, key=None, reverse=False): """ Same as Structure.sort(), also remaps nodes in graph. :param key: :param reverse: :return: """ old_structure = self.structure.copy() # sort Structure self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse) # apply Structure ordering to graph mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)} self.graph = nx.relabel_nodes(self.graph, mapping, copy=True) # normalize directions of edges edges_to_remove = [] edges_to_add = [] for u, v, k, d in self.graph.edges(keys=True, data=True): if v < u: new_v, new_u, new_d = u, v, d.copy() new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int)) edges_to_remove.append((u, v, k)) edges_to_add.append((new_u, new_v, new_d)) # add/delete marked edges for edges_to_remove in edges_to_remove: self.graph.remove_edge(*edges_to_remove) for (u, v, d) in edges_to_add: self.graph.add_edge(u, v, **d) def __eq__(self, other): """ Two StructureGraphs are equal if they have equal Structures, and have the same edges between Sites. Edge weights can be different and StructureGraphs can still be considered equal. :param other: StructureGraph :return (bool): """ # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure} other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)]) edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} return (edges == edges_other) and (self.structure == other_sorted.structure) def diff(self, other, strict=True): """ Compares two StructureGraphs. Returns dict with keys 'self', 'other', 'both' with edges that are present in only one StructureGraph ('self' and 'other'), and edges that are present in both. The Jaccard distance is a simple measure of the dissimilarity between two StructureGraphs (ignoring edge weights), and is defined by 1 - (size of the intersection / size of the union) of the sets of edges. This is returned with key 'dist'. Important note: all node indices are in terms of the StructureGraph this method is called from, not the 'other' StructureGraph: there is no guarantee the node indices will be the same if the underlying Structures are ordered differently. :param other: StructureGraph :param strict: if False, will compare bonds from different Structures, with node indices replaced by Species strings, will not count number of occurrences of bonds :return: """ if self.structure != other.structure and strict: return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.") if strict: # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure} other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)]) edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} else: edges = { (str(self.structure[u].specie), str(self.structure[v].specie)) for u, v, d in self.graph.edges(keys=False, data=True) } edges_other = { (str(other.structure[u].specie), str(other.structure[v].specie)) for u, v, d in other.graph.edges(keys=False, data=True) } if len(edges) == 0 and len(edges_other) == 0: jaccard_dist = 0 # by definition else: jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other)) return { "self": edges - edges_other, "other": edges_other - edges, "both": edges.intersection(edges_other), "dist": jaccard_dist, } def get_subgraphs_as_molecules(self, use_weights=False): """ Retrieve subgraphs as molecules, useful for extracting molecules from periodic crystals. Will only return unique molecules, not any duplicates present in the crystal (a duplicate defined as an isomorphic subgraph). :param use_weights (bool): If True, only treat subgraphs as isomorphic if edges have the same weights. Typically, this means molecules will need to have the same bond lengths to be defined as duplicates, otherwise bond lengths can differ. This is a fairly robust approach, but will treat e.g. enantiomers as being duplicates. :return: list of unique Molecules in Structure """ # creating a supercell is an easy way to extract # molecules (and not, e.g., layers of a 2D crystal) # without adding extra logic if getattr(self, "_supercell_sg", None) is None: self._supercell_sg = supercell_sg = self * (3, 3, 3) # make undirected to find connected subgraphs supercell_sg.graph = nx.Graph(supercell_sg.graph) # find subgraphs all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)] # discount subgraphs that lie across *supercell* boundaries # these will subgraphs representing crystals molecule_subgraphs = [] for subgraph in all_subgraphs: intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True)) if not intersects_boundary: molecule_subgraphs.append(nx.MultiDiGraph(subgraph)) # add specie names to graph to be able to test for isomorphism for subgraph in molecule_subgraphs: for n in subgraph: subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie)) # now define how we test for isomorphism # prune duplicate subgraphs unique_subgraphs = [] for subgraph in molecule_subgraphs: already_present = [ nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs ] if not any(already_present): unique_subgraphs.append(subgraph) # get Molecule objects for each subgraph molecules = [] for subgraph in unique_subgraphs: coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()] species = [supercell_sg.structure[n].specie for n in subgraph.nodes()] molecule = Molecule(species, coords) # shift so origin is at center of mass molecule = molecule.get_centered_molecule() molecules.append(molecule) return molecules class MolGraphSplitError(Exception): """ Raised when a molecule graph is failed to split into two disconnected subgraphs """ pass class MoleculeGraph(MSONable): """ This is a class for annotating a Molecule with bond information, stored in the form of a graph. A "bond" does not necessarily have to be a chemical bond, but can store any kind of information that connects two Sites. """ def __init__(self, molecule, graph_data=None): """ If constructing this class manually, use the `with_empty_graph` method or `with_local_env_strategy` method (using an algorithm provided by the `local_env` module, such as O'Keeffe). This class that contains connection information: relationships between sites represented by a Graph structure, and an associated structure object. This class uses the NetworkX package to store and operate on the graph itself, but contains a lot of helper methods to make associating a graph with a given molecule easier. Use cases for this include storing bonding information, NMR J-couplings, Heisenberg exchange parameters, etc. :param molecule: Molecule object :param graph_data: dict containing graph information in dict format (not intended to be constructed manually, see as_dict method for format) """ if isinstance(molecule, MoleculeGraph): # just make a copy from input graph_data = molecule.as_dict()["graphs"] self.molecule = molecule self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data) # tidy up edge attr dicts, reading to/from json duplicates # information for u, v, k, d in self.graph.edges(keys=True, data=True): if "id" in d: del d["id"] if "key" in d: del d["key"] # ensure images are tuples (conversion to lists happens # when serializing back from json), it's important images # are hashable/immutable if "to_jimage" in d: d["to_jimage"] = tuple(d["to_jimage"]) if "from_jimage" in d: d["from_jimage"] = tuple(d["from_jimage"]) self.set_node_attributes() def add_edge( self, from_index, to_index, weight=None, warn_duplicates=True, edge_properties=None, ): """ Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return: """ # this is not necessary for the class to work, but # just makes it neater if to_index < from_index: to_index, from_index = from_index, to_index # sanitize types from_index, to_index = int(from_index), int(to_index) # check we're not trying to add a duplicate edge # there should only ever be at most one edge # between two sites existing_edge_data = self.graph.get_edge_data(from_index, to_index) if existing_edge_data and warn_duplicates: warnings.warn( "Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index) ) return # generic container for additional edge properties, # similar to site properties edge_properties = edge_properties or {} if weight: self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties) else: self.graph.add_edge(from_index, to_index, **edge_properties) def insert_node( self, i, species, coords, validate_proximity=False, site_properties=None, edges=None, ): """ A wrapper around Molecule.insert(), which also incorporates the new site into the MoleculeGraph. :param i: Index at which to insert the new site :param species: Species for the new site :param coords: 3x1 array representing coordinates of the new site :param validate_proximity: For Molecule.insert(); if True (default False), distance will be checked to ensure that site can be safely added. :param site_properties: Site properties for Molecule :param edges: List of dicts representing edges to be added to the MoleculeGraph. These edges must include the index of the new site i, and all indices used for these edges should reflect the MoleculeGraph AFTER the insertion, NOT before. Each dict should at least have a "to_index" and "from_index" key, and can also have a "weight" and a "properties" key. :return: """ self.molecule.insert( i, species, coords, validate_proximity=validate_proximity, properties=site_properties, ) mapping = {} for j in range(len(self.molecule) - 1): if j < i: mapping[j] = j else: mapping[j] = j + 1 nx.relabel_nodes(self.graph, mapping, copy=False) self.graph.add_node(i) self.set_node_attributes() if edges is not None: for edge in edges: try: self.add_edge( edge["from_index"], edge["to_index"], weight=edge.get("weight", None), edge_properties=edge.get("properties", None), ) except KeyError: raise RuntimeError("Some edges are invalid.") def set_node_attributes(self): """ Replicates molecule site properties (specie, coords, etc.) in the MoleculeGraph. :return: """ species = {} coords = {} properties = {} for node in self.graph.nodes(): species[node] = self.molecule[node].specie.symbol coords[node] = self.molecule[node].coords properties[node] = self.molecule[node].properties nx.set_node_attributes(self.graph, species, "specie") nx.set_node_attributes(self.graph, coords, "coords") nx.set_node_attributes(self.graph, properties, "properties") def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None): """ Alters either the weight or the edge_properties of an edge in the MoleculeGraph. :param from_index: int :param to_index: int :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return: """ existing_edge = self.graph.get_edge_data(from_index, to_index) # ensure that edge exists before attempting to change it if not existing_edge: raise ValueError( "Edge between {} and {} cannot be altered;\ no edge exists between those sites.".format( from_index, to_index ) ) # Third index should always be 0 because there should only be one edge between any two nodes if new_weight is not None: self.graph[from_index][to_index][0]["weight"] = new_weight if new_edge_properties is not None: for prop in list(new_edge_properties.keys()): self.graph[from_index][to_index][0][prop] = new_edge_properties[prop] def break_edge(self, from_index, to_index, allow_reverse=False): """ Remove an edge from the MoleculeGraph :param from_index: int :param to_index: int :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edge = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if existing_edge: self.graph.remove_edge(from_index, to_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: self.graph.remove_edge(to_index, from_index) else: raise ValueError( "Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index ) ) def remove_nodes(self, indices): """ A wrapper for Molecule.remove_sites(). :param indices: list of indices in the current Molecule (and graph) to be removed. :return: """ self.molecule.remove_sites(indices) self.graph.remove_nodes_from(indices) mapping = {} for correct, current in enumerate(sorted(self.graph.nodes)): mapping[current] = correct nx.relabel_nodes(self.graph, mapping, copy=False) self.set_node_attributes() def get_disconnected_fragments(self): """ Determine if the MoleculeGraph is connected. If it is not, separate the MoleculeGraph into different MoleculeGraphs, where each resulting MoleculeGraph is a disconnected subgraph of the original. Currently, this function naively assigns the charge of the total molecule to a single submolecule. A later effort will be to actually accurately assign charge. NOTE: This function does not modify the original MoleculeGraph. It creates a copy, modifies that, and returns two or more new MoleculeGraph objects. :return: list of MoleculeGraphs """ if nx.is_weakly_connected(self.graph): return [copy.deepcopy(self)] original = copy.deepcopy(self) sub_mols = list() # Had to use nx.weakly_connected_components because of deprecation # of nx.weakly_connected_component_subgraphs subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)] for subg in subgraphs: nodes = sorted(list(subg.nodes)) # Molecule indices are essentially list-based, so node indices # must be remapped, incrementing from 0 mapping = {} for i, n in enumerate(nodes): mapping[n] = i # just give charge to whatever subgraph has node with index 0 # TODO: actually figure out how to distribute charge if 0 in nodes: charge = self.molecule.charge else: charge = 0 # relabel nodes in graph to match mapping new_graph = nx.relabel_nodes(subg, mapping) species = nx.get_node_attributes(new_graph, "specie") coords = nx.get_node_attributes(new_graph, "coords") raw_props = nx.get_node_attributes(new_graph, "properties") properties = {} for prop_set in raw_props.values(): for prop in prop_set.keys(): if prop in properties: properties[prop].append(prop_set[prop]) else: properties[prop] = [prop_set[prop]] # Site properties must be present for all atoms in the molecule # in order to be used for Molecule instantiation for k, v in properties.items(): if len(v) != len(species): del properties[k] new_mol = Molecule(species, coords, charge=charge, site_properties=properties) graph_data = json_graph.adjacency_data(new_graph) # create new MoleculeGraph sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data)) return sub_mols def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None): """ Split MoleculeGraph into two or more MoleculeGraphs by breaking a set of bonds. This function uses MoleculeGraph.break_edge repeatedly to create disjoint graphs (two or more separate molecules). This function does not only alter the graph information, but also changes the underlying Molecules. If the bonds parameter does not include sufficient bonds to separate two molecule fragments, then this function will fail. Currently, this function naively assigns the charge of the total molecule to a single submolecule. A later effort will be to actually accurately assign charge. NOTE: This function does not modify the original MoleculeGraph. It creates a copy, modifies that, and returns two or more new MoleculeGraph objects. :param bonds: list of tuples (from_index, to_index) representing bonds to be broken to split the MoleculeGraph. :param alterations: a dict {(from_index, to_index): alt}, where alt is a dictionary including weight and/or edge properties to be changed following the split. :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: list of MoleculeGraphs """ self.set_node_attributes() original = copy.deepcopy(self) for bond in bonds: original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse) if nx.is_weakly_connected(original.graph): raise MolGraphSplitError( "Cannot split molecule; \ MoleculeGraph is still connected." ) # alter any bonds before partition, to avoid remapping if alterations is not None: for (u, v) in alterations.keys(): if "weight" in alterations[(u, v)]: weight = alterations[(u, v)]["weight"] del alterations[(u, v)]["weight"] edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties) else: original.alter_edge(u, v, new_edge_properties=alterations[(u, v)]) return original.get_disconnected_fragments() def build_unique_fragments(self): """ Find all possible fragment combinations of the MoleculeGraphs (in other words, all connected induced subgraphs) :return: """ self.set_node_attributes() graph = self.graph.to_undirected() # find all possible fragments, aka connected induced subgraphs frag_dict = {} for ii in range(1, len(self.molecule)): for combination in combinations(graph.nodes, ii): mycomp = [] for idx in combination: mycomp.append(str(self.molecule[idx].specie)) mycomp = "".join(sorted(mycomp)) subgraph = nx.subgraph(graph, combination) if nx.is_connected(subgraph): mykey = mycomp + str(len(subgraph.edges())) if mykey not in frag_dict: frag_dict[mykey] = [copy.deepcopy(subgraph)] else: frag_dict[mykey].append(copy.deepcopy(subgraph)) # narrow to all unique fragments using graph isomorphism unique_frag_dict = {} for key in frag_dict: unique_frags = [] for frag in frag_dict[key]: found = False for f in unique_frags: if _isomorphic(frag, f): found = True break if not found: unique_frags.append(frag) unique_frag_dict[key] = copy.deepcopy(unique_frags) # convert back to molecule graphs unique_mol_graph_dict = {} for key in unique_frag_dict: unique_mol_graph_list = [] for fragment in unique_frag_dict[key]: mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))} remapped = nx.relabel_nodes(fragment, mapping) species = nx.get_node_attributes(remapped, "specie") coords = nx.get_node_attributes(remapped, "coords") edges = {} for from_index, to_index, key in remapped.edges: edge_props = fragment.get_edge_data(from_index, to_index, key=key) edges[(from_index, to_index)] = edge_props unique_mol_graph_list.append( self.with_edges( Molecule(species=species, coords=coords, charge=self.molecule.charge), edges, ) ) frag_key = ( str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula) + " E" + str(len(unique_mol_graph_list[0].graph.edges())) ) unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list) return unique_mol_graph_dict def substitute_group( self, index, func_grp, strategy, bond_order=1, graph_dict=None, strategy_params=None, ): """ Builds off of Molecule.substitute to replace an atom in self.molecule with a functional group. This method also amends self.graph to incorporate the new functional group. NOTE: using a MoleculeGraph will generally produce a different graph compared with using a Molecule or str (when not using graph_dict). :param index: Index of atom to substitute. :param func_grp: Substituent molecule. There are three options: 1. Providing an actual molecule as the input. The first atom must be a DummySpecies X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. 3. A MoleculeGraph object. :param strategy: Class from pymatgen.analysis.local_env. :param bond_order: A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. :param graph_dict: Dictionary representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. If None, then the algorithm will attempt to automatically determine bonds using one of a list of strategies defined in pymatgen.analysis.local_env. :param strategy_params: dictionary of keyword arguments for strategy. If None, default parameters will be used. :return: """ # Work is simplified if a graph is already in place if isinstance(func_grp, MoleculeGraph): self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order) mapping = map_indices(func_grp.molecule) for (u, v) in list(func_grp.graph.edges()): edge_props = func_grp.graph.get_edge_data(u, v)[0] weight = None if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props) else: if isinstance(func_grp, Molecule): func_grp = copy.deepcopy(func_grp) else: try: func_grp = copy.deepcopy(FunctionalGroups[func_grp]) except Exception: raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead") self.molecule.substitute(index, func_grp, bond_order=bond_order) mapping = map_indices(func_grp) # Remove dummy atom "X" func_grp.remove_species("X") if graph_dict is not None: for (u, v) in graph_dict.keys(): edge_props = graph_dict[(u, v)] if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] self.add_edge( mapping[u], mapping[v], weight=weight, edge_properties=edge_props, ) else: if strategy_params is None: strategy_params = {} strat = strategy(**strategy_params) graph = self.with_local_env_strategy(func_grp, strat) for (u, v) in list(graph.graph.edges()): edge_props = graph.graph.get_edge_data(u, v)[0] weight = None if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] if 0 not in list(graph.graph.nodes()): # If graph indices have different indexing u, v = (u - 1), (v - 1) self.add_edge( mapping[u], mapping[v], weight=weight, edge_properties=edge_props, ) def replace_group( self, index, func_grp, strategy, bond_order=1, graph_dict=None, strategy_params=None, ): """ Builds off of Molecule.substitute and MoleculeGraph.substitute_group to replace a functional group in self.molecule with a functional group. This method also amends self.graph to incorporate the new functional group. TODO: Figure out how to replace into a ring structure. :param index: Index of atom to substitute. :param func_grp: Substituent molecule. There are three options: 1. Providing an actual molecule as the input. The first atom must be a DummySpecies X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. 3. A MoleculeGraph object. :param strategy: Class from pymatgen.analysis.local_env. :param bond_order: A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. :param graph_dict: Dictionary representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. If None, then the algorithm will attempt to automatically determine bonds using one of a list of strategies defined in pymatgen.analysis.local_env. :param strategy_params: dictionary of keyword arguments for strategy. If None, default parameters will be used. :return: """ self.set_node_attributes() neighbors = self.get_connected_sites(index) # If the atom at index is terminal if len(neighbors) == 1: self.substitute_group( index, func_grp, strategy, bond_order=bond_order, graph_dict=graph_dict, strategy_params=strategy_params, ) else: rings = self.find_rings(including=[index]) if len(rings) != 0: raise RuntimeError( "Currently functional group replacement" "cannot occur at an atom within a ring" "structure." ) to_remove = set() sizes = dict() disconnected = self.graph.to_undirected() disconnected.remove_node(index) for neighbor in neighbors: sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2])) keep = max(sizes, key=lambda x: sizes[x]) for i in sizes.keys(): if i != keep: to_remove.add(i) self.remove_nodes(list(to_remove)) self.substitute_group( index, func_grp, strategy, bond_order=bond_order, graph_dict=graph_dict, strategy_params=strategy_params, ) def find_rings(self, including=None): """ Find ring structures in the MoleculeGraph. :param including: list of site indices. If including is not None, then find_rings will only return those rings including the specified sites. By default, this parameter is None, and all rings will be returned. :return: dict {index:cycle}. Each entry will be a ring (cycle, in graph theory terms) including the index found in the Molecule. If there is no cycle including an index, the value will be an empty list. """ # Copies self.graph such that all edges (u, v) matched by edges (v, u) undirected = self.graph.to_undirected() directed = undirected.to_directed() cycles_nodes = [] cycles_edges = [] # Remove all two-edge cycles all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2] # Using to_directed() will mean that each cycle always appears twice # So, we must also remove duplicates unique_sorted = [] unique_cycles = [] for cycle in all_cycles: if sorted(cycle) not in unique_sorted: unique_sorted.append(sorted(cycle)) unique_cycles.append(cycle) if including is None: cycles_nodes = unique_cycles else: for i in including: for cycle in unique_cycles: if i in cycle and cycle not in cycles_nodes: cycles_nodes.append(cycle) for cycle in cycles_nodes: edges = [] for i, e in enumerate(cycle): edges.append((cycle[i - 1], e)) cycles_edges.append(edges) return cycles_edges def get_connected_sites(self, n): """ Returns a named tuple of neighbors of site n: periodic_site, jimage, index, weight. Index is the index of the corresponding site in the original structure, weight can be None if not defined. :param n: index of Site in Molecule :param jimage: lattice vector of site :return: list of ConnectedSite tuples, sorted by closest first """ connected_sites = set() out_edges = list(self.graph.out_edges(n, data=True)) in_edges = list(self.graph.in_edges(n, data=True)) for u, v, d in out_edges + in_edges: weight = d.get("weight", None) if v == n: site = self.molecule[u] dist = self.molecule[v].distance(self.molecule[u]) connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist) else: site = self.molecule[v] dist = self.molecule[u].distance(self.molecule[v]) connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist) connected_sites.add(connected_site) # return list sorted by closest sites first connected_sites = list(connected_sites) connected_sites.sort(key=lambda x: x.dist) return connected_sites def get_coordination_of_site(self, n): """ Returns the number of neighbors of site n. In graph terms, simply returns degree of node corresponding to site n. :param n: index of site :return (int): """ number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v]) return self.graph.degree(n) - number_of_self_loops def draw_graph_to_file( self, filename="graph", diff=None, hide_unconnected_nodes=False, hide_image_edges=True, edge_colors=False, node_labels=False, weight_labels=False, image_labels=False, color_scheme="VESTA", keep_dot=False, algo="fdp", ): """ Draws graph using GraphViz. The networkx graph object itself can also be drawn with networkx's in-built graph drawing methods, but note that this might give misleading results for multigraphs (edges are super-imposed on each other). If visualization is difficult to interpret, `hide_image_edges` can help, especially in larger graphs. :param filename: filename to output, will detect filetype from extension (any graphviz filetype supported, such as pdf or png) :param diff (StructureGraph): an additional graph to compare with, will color edges red that do not exist in diff and edges green that are in diff graph but not in the reference graph :param hide_unconnected_nodes: if True, hide unconnected nodes :param hide_image_edges: if True, do not draw edges that go through periodic boundaries :param edge_colors (bool): if True, use node colors to color edges :param node_labels (bool): if True, label nodes with species and site index :param weight_labels (bool): if True, label edges with weights :param image_labels (bool): if True, label edges with their periodic images (usually only used for debugging, edges to periodic images always appear as dashed lines) :param color_scheme (str): "VESTA" or "JMOL" :param keep_dot (bool): keep GraphViz .dot file for later visualization :param algo: any graphviz algo, "neato" (for simple graphs) or "fdp" (for more crowded graphs) usually give good outputs :return: """ if not which(algo): raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.") # Developer note: NetworkX also has methods for drawing # graphs using matplotlib, these also work here. However, # a dedicated tool like GraphViz allows for much easier # control over graph appearance and also correctly displays # mutli-graphs (matplotlib can superimpose multiple edges). g = self.graph.copy() g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"} # add display options for nodes for n in g.nodes(): # get label by species name label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else "" # use standard color scheme for nodes c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0]) # get contrasting font color # magic numbers account for perceived luminescence # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff" # convert color to hex string color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2]) g.add_node( n, fillcolor=color, fontcolor=fontcolor, label=label, fontname="Helvetica-bold", style="filled", shape="circle", ) edges_to_delete = [] # add display options for edges for u, v, k, d in g.edges(keys=True, data=True): # retrieve from/to images, set as origin if not defined if "to_image" in d: to_image = d["to_jimage"] else: to_image = (0, 0, 0) # set edge style d["style"] = "solid" if to_image != (0, 0, 0): d["style"] = "dashed" if hide_image_edges: edges_to_delete.append((u, v, k)) # don't show edge directions d["arrowhead"] = "none" # only add labels for images that are not the origin if image_labels: d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image)) d["arrowhead"] = "normal" if d["headlabel"] else "none" # optionally color edges using node colors color_u = g.node[u]["fillcolor"] color_v = g.node[v]["fillcolor"] d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000" # optionally add weights to graph if weight_labels: units = g.graph.get("edge_weight_units", "") if d.get("weight"): d["label"] = "{:.2f} {}".format(d["weight"], units) # update edge with our new style attributes g.edges[u, v, k].update(d) # optionally remove periodic image edges, # these can be confusing due to periodic boundaries if hide_image_edges: for edge_to_delete in edges_to_delete: g.remove_edge(*edge_to_delete) # optionally hide unconnected nodes, # these can appear when removing periodic edges if hide_unconnected_nodes: g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0]) # optionally highlight differences with another graph if diff: diff = self.diff(diff, strict=True) green_edges = [] red_edges = [] for u, v, k, d in g.edges(keys=True, data=True): if (u, v, d["to_jimage"]) in diff["self"]: # edge has been deleted red_edges.append((u, v, k)) elif (u, v, d["to_jimage"]) in diff["other"]: # edge has been added green_edges.append((u, v, k)) for u, v, k in green_edges: g.edges[u, v, k].update({"color_uv": "#00ff00"}) for u, v, k in red_edges: g.edges[u, v, k].update({"color_uv": "#ff0000"}) basename, extension = os.path.splitext(filename) extension = extension[1:] write_dot(g, basename + ".dot") with open(filename, "w") as f: args = [algo, "-T", extension, basename + ".dot"] rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True) rs.communicate() if rs.returncode != 0: raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode)) if not keep_dot: os.remove(basename + ".dot") def as_dict(self): """ As in :Class: `pymatgen.core.Molecule` except with using `to_dict_of_dicts` from NetworkX to store graph information. """ d = { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "molecule": self.molecule.as_dict(), "graphs": json_graph.adjacency_data(self.graph), } return d def __str__(self): s = "Molecule Graph" s += "\nMolecule: \n{}".format(self.molecule.__str__()) s += "\nGraph: {}\n".format(self.name) s += self._edges_to_string(self.graph) return s def __repr__(self): s = "Molecule Graph" s += "\nMolecule: \n{}".format(self.molecule.__repr__()) s += "\nGraph: {}\n".format(self.name) s += self._edges_to_string(self.graph) return s def __len__(self): """ :return: length of Molecule / number of nodes in graph """ return len(self.molecule) def sort(self, key=None, reverse=False): """ Same as Molecule.sort(), also remaps nodes in graph. :param key: :param reverse: :return: """ old_molecule = self.molecule.copy() # sort Molecule self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse) # apply Molecule ordering to graph mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)} self.graph = nx.relabel_nodes(self.graph, mapping, copy=True) # normalize directions of edges edges_to_remove = [] edges_to_add = [] for u, v, k, d in self.graph.edges(keys=True, data=True): if v < u: new_v, new_u, new_d = u, v, d.copy() new_d["to_jimage"] = (0, 0, 0) edges_to_remove.append((u, v, k)) edges_to_add.append((new_u, new_v, new_d)) # add/delete marked edges for edges_to_remove in edges_to_remove: self.graph.remove_edge(*edges_to_remove) for (u, v, d) in edges_to_add: self.graph.add_edge(u, v, **d) def __eq__(self, other): """ Two MoleculeGraphs are equal if they have equal Molecules, and have the same edges between Sites. Edge weights can be different and MoleculeGraphs can still be considered equal. :param other: MoleculeGraph :return (bool): """ # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key try: mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule} except ValueError: return False other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.coords)]) edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} return (edges == edges_other) and (self.molecule == other_sorted.molecule) def isomorphic_to(self, other): """ Checks if the graphs of two MoleculeGraphs are isomorphic to one another. In order to prevent problems with misdirected edges, both graphs are converted into undirected nx.Graph objects. :param other: MoleculeGraph object to be compared. :return: bool """ if len(self.molecule) != len(other.molecule): return False if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula: return False if len(self.graph.edges()) != len(other.graph.edges()): return False return _isomorphic(self.graph, other.graph) def diff(self, other, strict=True): """ Compares two MoleculeGraphs. Returns dict with keys 'self', 'other', 'both' with edges that are present in only one MoleculeGraph ('self' and 'other'), and edges that are present in both. The Jaccard distance is a simple measure of the dissimilarity between two MoleculeGraphs (ignoring edge weights), and is defined by 1 - (size of the intersection / size of the union) of the sets of edges. This is returned with key 'dist'. Important note: all node indices are in terms of the MoleculeGraph this method is called from, not the 'other' MoleculeGraph: there is no guarantee the node indices will be the same if the underlying Molecules are ordered differently. :param other: MoleculeGraph :param strict: if False, will compare bonds from different Molecules, with node indices replaced by Species strings, will not count number of occurrences of bonds :return: """ if self.molecule != other.molecule and strict: return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.") if strict: # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule} other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)]) edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = { (u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True) } else: edges = { (str(self.molecule[u].specie), str(self.molecule[v].specie)) for u, v, d in self.graph.edges(keys=False, data=True) } edges_other = { (str(other.structure[u].specie), str(other.structure[v].specie)) for u, v, d in other.graph.edges(keys=False, data=True) } if len(edges) == 0 and len(edges_other) == 0: jaccard_dist = 0 # by definition else: jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other)) return { "self": edges - edges_other, "other": edges_other - edges, "both": edges.intersection(edges_other), "dist": jaccard_dist, }
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 15069, 357, 66, 8, 350, 4948, 265, 5235, 7712, 4816, 13, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 17168, 13789, 13, 198, 198, 37811, 198, 26796, 329, 4823, 24612, 286, 24770, 13, 198, ...
2.162737
40,212
from . import image from . import container from . import system
[ 6738, 764, 1330, 2939, 198, 6738, 764, 1330, 9290, 198, 6738, 764, 1330, 1080, 198 ]
4.333333
15
from django.shortcuts import render, redirect from django.http import HttpResponse from .models import Article from django.contrib.auth.decorators import login_required from . import forms
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 18941, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 764, 27530, 1330, 10172, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, ...
3.82
50
# Parser based on RFC 5228, especially the grammar as defined in section 8. All # references are to sections in RFC 5228 unless stated otherwise. import ply.yacc import sifter.grammar from sifter.grammar.lexer import tokens import sifter.handler import logging __all__ = ('parser',) def p_commands_list(p): """commands : commands command""" p[0] = p[1] # section 3.2: REQUIRE command must come before any other commands if p[2].RULE_IDENTIFIER == 'REQUIRE': if any(command.RULE_IDENTIFIER != 'REQUIRE' for command in p[0].commands): log = logging.getLogger("sifter") log.error(("REQUIRE command on line %d must come before any " "other non-REQUIRE commands" % p.lineno(2))) raise SyntaxError # section 3.1: ELSIF and ELSE must follow IF or another ELSIF elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'): if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'): log = logging.getLogger("sifter") log.error(("ELSIF/ELSE command on line %d must follow an IF/ELSIF " "command" % p.lineno(2))) raise SyntaxError p[0].commands.append(p[2]) def p_commands_empty(p): """commands : """ p[0] = sifter.grammar.CommandList() def p_command(p): """command : IDENTIFIER arguments ';' | IDENTIFIER arguments block""" #print("COMMAND:", p[1], p[2], p[3]) tests = p[2].get('tests') block = None if p[3] != ';': block = p[3] handler = sifter.handler.get('command', p[1]) if handler is None: log = logging.getLogger("sifter") log.error(("No handler registered for command '%s' on line %d" % (p[1], p.lineno(1)))) raise SyntaxError p[0] = handler(arguments=p[2]['args'], tests=tests, block=block) def p_command_error(p): """command : IDENTIFIER error ';' | IDENTIFIER error block""" log = logging.getLogger("sifter") log.error(("Syntax error in command definition after %s on line %d" % (p[1], p.lineno(1)))) raise SyntaxError def p_block(p): """block : '{' commands '}' """ # section 3.2: REQUIRE command must come before any other commands, # which means it can't be in the block of another command if any(command.RULE_IDENTIFIER == 'REQUIRE' for command in p[2].commands): log = logging.getLogger("sifter") log.error(("REQUIRE command not allowed inside of a block (line %d)" % (p.lineno(2)))) raise SyntaxError p[0] = p[2] def p_block_error(p): """block : '{' error '}'""" log = logging.getLogger("sifter") log.error(("Syntax error in command block that starts on line %d" % (p.lineno(1),))) raise SyntaxError def p_arguments(p): """arguments : argumentlist | argumentlist test | argumentlist '(' testlist ')'""" p[0] = { 'args' : p[1], } if len(p) > 2: if p[2] == '(': p[0]['tests'] = p[3] else: p[0]['tests'] = [ p[2] ] def p_testlist_error(p): """arguments : argumentlist '(' error ')'""" log = logging.getLogger("sifter") log.error(("Syntax error in test list that starts on line %d" % p.lineno(2))) raise SyntaxError def p_argumentlist_list(p): """argumentlist : argumentlist argument""" p[0] = p[1] p[0].append(p[2]) def p_argumentlist_empty(p): """argumentlist : """ p[0] = [] def p_test(p): """test : IDENTIFIER arguments""" #print("TEST:", p[1], p[2]) tests = p[2].get('tests') handler = sifter.handler.get('test', p[1]) if handler is None: log = logging.getLogger("sifter") log.error(("No handler registered for test '%s' on line %d" % (p[1], p.lineno(1)))) raise SyntaxError p[0] = handler(arguments=p[2]['args'], tests=tests) def p_testlist_list(p): """testlist : test ',' testlist""" p[0] = p[3] p[0].insert(0, p[1]) def p_testlist_single(p): """testlist : test""" p[0] = [ p[1] ] def p_argument_stringlist(p): """argument : '[' stringlist ']'""" p[0] = p[2] def p_argument_string(p): """argument : string""" # for simplicity, we treat all single strings as a string list p[0] = [ p[1] ] def p_argument_number(p): """argument : NUMBER""" p[0] = p[1] def p_argument_tag(p): """argument : TAG""" p[0] = sifter.grammar.Tag(p[1]) def p_stringlist_error(p): """argument : '[' error ']'""" log = logging.getLogger("sifter") log.error(("Syntax error in string list that starts on line %d" % p.lineno(1))) raise SyntaxError def p_stringlist_list(p): """stringlist : string ',' stringlist""" p[0] = p[3] p[0].insert(0, p[1]) def p_stringlist_single(p): """stringlist : string""" p[0] = [ p[1] ] def p_string(p): """string : QUOTED_STRING""" p[0] = sifter.grammar.String(p[1])
[ 2, 23042, 263, 1912, 319, 30978, 642, 23815, 11, 2592, 262, 23491, 355, 5447, 287, 2665, 807, 13, 1439, 198, 2, 10288, 389, 284, 9004, 287, 30978, 642, 23815, 4556, 5081, 4306, 13, 198, 198, 11748, 35960, 13, 88, 4134, 198, 198, 117...
2.243694
2,220
from sklearn import datasets from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.cross_validation import train_test_split from sklearn.cross_validation import cross_val_score from sklearn.cross_validation import ShuffleSplit from sklearn.cross_validation import Bootstrap from sklearn.naive_bayes import MultinomialNB from sklearn.grid_search import GridSearchCV from scipy.stats import sem from pprint import pprint import numpy as np import pylab as pl import string import matplotlib.pyplot as plt # Calculates the mean of the scores with the standard deviation rand_baseline = list() test_results = list() sem_results = list() com_results = list() #test_docs("problemA") for i in string.uppercase[:13]: test_docs("problem"+i) #graph(rand_baseline,test_results,com_results,13) import os import time as tm sub_dir = "Results/" location = "multiDoc" + tm.strftime("%Y%m%d-%H%M%S") + ".txt" with open(os.path.join(sub_dir, location), 'w') as myFile: myFile.write(str(rand_baseline)) myFile.write("\n") myFile.write(str(test_results)) myFile.write("\n") myFile.write(str(sem_results)) myFile.write("\n") myFile.write(str(com_results)) # CV with ShuffleSpit ''' cv = ShuffleSplit(n_samples, n_iter=100, test_size=0.2, random_state=0) test_scores = cross_val_score(mnb, X, y, cv=cv) print np.mean(test_scores) ''' # Single run through ''' X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) print X_train.shape print y_train.shape print X_test.shape print y_test.shape mnb = MultinomialNB().fit(X_train, y_train) print mnb.score(X_test, y_test) '''
[ 6738, 1341, 35720, 1330, 40522, 198, 6738, 1341, 35720, 13, 30053, 62, 2302, 7861, 13, 5239, 1330, 309, 69, 312, 69, 38469, 7509, 198, 6738, 1341, 35720, 13, 30053, 62, 2302, 7861, 13, 5239, 1330, 2764, 38469, 7509, 198, 198, 6738, 13...
2.718901
619
import io import fast import spreadsheet import tab import utils import web from io import * from fast import * from spreadsheet import * from tab import * from utils import * from web import * __all__ = [] __all__.extend(io.__all__) __all__.extend(fast.__all__) __all__.extend(spreadsheet.__all__) __all__.extend(tab.__all__) __all__.extend(utils.__all__) __all__.extend(web.__all__)
[ 11748, 33245, 198, 11748, 3049, 198, 11748, 30117, 198, 11748, 7400, 198, 11748, 3384, 4487, 198, 11748, 3992, 198, 198, 6738, 33245, 1330, 1635, 198, 6738, 3049, 1330, 1635, 198, 6738, 30117, 1330, 1635, 198, 6738, 7400, 1330, 1635, 198,...
2.71831
142
# (C) Copyright 2017 Inova Development Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Define the base of targets (i.e. systems to be tested) TargetID = Column(Integer(11), primary_key=True) IPAddress = Column(String(15), nullable=False) CompanyID = Column(Integer(11), ForeignKey("Companies.CompanyID")) Namespace = Column(String(30), nullable=False) SMIVersion = Column(String(15), nullable=False) Product = Column(String(30), nullable=False) Principal = Column(String(30), nullable=False) Credential = Column(String(30), nullable=False) CimomVersion = Column(String(30), nullable=False) InteropNamespace = Column(String(30), nullable=False) Notify = Column(Enum('Enabled', 'Disabled'), default='Disabled') NotifyUsers = Column(String(12), nullable=False) ScanEnabled = Column(Enum('Enabled', 'Disabled'), default='Enabled') Protocol = Column(String(10), default='http') Port = Column(String(10), nullable=False) """ # TODO change ip_address to hostname where host name is name : port from __future__ import print_function, absolute_import import os import csv import re from collections import OrderedDict from textwrap import wrap import six from mysql.connector import Error as mysqlerror from ._dbtablebase import DBTableBase from ._mysqldbmixin import MySQLDBMixin from ._common import get_url_str from ._logging import AUDIT_LOGGER_NAME, get_logger from ._companiestable import CompaniesTable __all__ = ['TargetsTable']
[ 2, 357, 34, 8, 15069, 2177, 554, 10071, 7712, 3457, 13, 198, 2, 1439, 6923, 33876, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, ...
3.28363
617
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # import os from az_code_gen.base import * from AzReflectionCpp import format_cpp_annotations # Factory function - called from launcher def create_drivers(env): return [AZEBusInline_Driver(env)]
[ 2, 198, 2, 1439, 393, 16690, 286, 428, 2393, 15069, 357, 66, 8, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 393, 198, 2, 663, 8240, 669, 13, 198, 2, 198, 2, 1114, 1844, 6634, 290, 5964, 2846, 3387, 766, 262, 38559, 24290, 379,...
3.746114
193
import os from QUANTAXIS.QASetting import QALocalize #from QUANTAXIS_CRAWLY.run_selenium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver) from QUANTAXIS_CRAWLY.run_selenium_alone import * import urllib import pandas as pd import time from QUANTAXIS.QAUtil import (DATABASE) ''' reqeust '''
[ 11748, 28686, 198, 6738, 19604, 1565, 5603, 55, 1797, 13, 48, 1921, 35463, 1330, 1195, 1847, 4374, 1096, 198, 2, 6738, 19604, 1565, 5603, 55, 1797, 62, 34, 20530, 11319, 13, 5143, 62, 741, 47477, 62, 17749, 1330, 357, 961, 62, 23316, ...
2.412587
143
#!/usr/bin/env python import logging import sys from app import app as application # Set default log level for the general logger # each handler can then restrict the messages logged application.logger.setLevel(logging.INFO) setup_flask_logging() if __name__ == '__main__': application.run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 18931, 198, 11748, 25064, 198, 6738, 598, 1330, 598, 355, 3586, 628, 198, 198, 2, 5345, 4277, 2604, 1241, 329, 262, 2276, 49706, 198, 2, 1123, 21360, 460, 788, 4239, 262, 6218, ...
3.45977
87
#!/usr/bin/env python from game.base.being import Being
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 983, 13, 8692, 13, 11873, 1330, 11204, 628 ]
3.052632
19
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-06-25 15:10 from __future__ import unicode_literals from django.db import migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 16, 319, 2177, 12, 3312, 12, 1495, 1315, 25, 940, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.690909
55
# coding: utf-8 """ Quetzal API Quetzal: an API to manage data files and their associated metadata. OpenAPI spec version: 0.5.0 Contact: support@quetz.al Generated by: https://openapi-generator.tech """ from setuptools import setup, find_packages # noqa: H301 NAME = "quetzal-openapi-client" VERSION = "0.5.0" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"] setup( name=NAME, version=VERSION, description="Quetzal API auto-generated client", author='David Ojeda', author_email="support@quetz.al", url="https://github.com/quet.zal/quetzal-openapi-client", project_urls={ "Documentation": "https://quetzal-openapi-client.readthedocs.io", "Code": "https://github.com/quetz-al/quetzal-openapi-client", "Issue tracker": "https://github.com/quetz-al/quetzal-openapi-client/issues", }, license="BSD-3-Clause", keywords=["OpenAPI", "OpenAPI-Generator", "Quetzal API"], install_requires=REQUIRES, packages=find_packages(exclude=['test', 'docs']), namespace_packages=['quetzal'], include_package_data=True, long_description="""\ quetzal-openapi-client ====================== This is an auto-generated package using [openapi-generator](https://github.com/OpenAPITools/openapi-generator) from an OpenAPI specification of the Quetzal API. An improvement layer on this client exists in the quetzal-client package. Quetzal is an API to manage data files and their associated metadata. See more at [quetz.al](https://quetz.al) and its [readthedocs documentation](https://quetzal-api.readthedocs.io). """, long_description_content_type='text/markdown', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Database :: Front-Ends', 'Topic :: Internet :: WWW/HTTP', 'Topic :: System :: Archiving', ], )
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 37811, 198, 220, 220, 220, 2264, 23773, 282, 7824, 628, 220, 220, 220, 2264, 23773, 282, 25, 281, 7824, 284, 6687, 1366, 3696, 290, 511, 3917, 20150, 13, 628, 220, 220, 220, 4946, 17614, 1...
2.672336
882
# coding: utf-8 from __future__ import unicode_literals import re from .adobepass import AdobePassIE from ..compat import compat_str from ..utils import ( fix_xml_ampersands, xpath_text, int_or_none, determine_ext, float_or_none, parse_duration, xpath_attr, update_url_query, ExtractorError, strip_or_none, url_or_none, )
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 302, 198, 198, 6738, 764, 324, 672, 538, 562, 1330, 21771, 14478, 10008, 198, 6738, 11485, 5589, 265, 1330, 8330, 62, ...
2.365385
156
from alpha_vantage.timeseries import TimeSeries from pprint import pprint import json import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('symbol', type=str, help="the stock symbol you want to download") parser.add_argument('time_window', type=str, choices=[ 'intraday', 'daily', 'daily_adj'], help="the time period you want to download the stock history for") namespace = parser.parse_args() save_dataset(**vars(namespace))
[ 6738, 17130, 62, 38815, 13, 22355, 10640, 1330, 3862, 27996, 198, 6738, 279, 4798, 1330, 279, 4798, 198, 11748, 33918, 198, 11748, 1822, 29572, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, ...
2.905028
179
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 import fnmatch from io import StringIO import json import os import shutil import zipfile import re from datetime import datetime, timedelta, tzinfo from distutils.util import strtobool import boto3 import placebo from botocore.response import StreamingBody from placebo import pill from c7n.testing import CustodianTestCore from .constants import ACCOUNT_ID # Custodian Test Account. This is used only for testing. # Access is available for community project maintainers. ########################################################################### # BEGIN PLACEBO MONKEY PATCH # # Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony... # These monkeypatch patches represent fixes on trunk of that repo that have not been released # into an extant version, we carry them here. We can drop this when this issue is resolved # # https://github.com/garnaat/placebo/issues/63 # # License - Apache 2.0 # Copyright (c) 2015 Mitch Garnaat utc = UTC() def deserialize(obj): """Convert JSON dicts back into objects.""" # Be careful of shallow copy here target = dict(obj) class_name = None if "__class__" in target: class_name = target.pop("__class__") if "__module__" in obj: obj.pop("__module__") # Use getattr(module, class_name) for custom types if needed if class_name == "datetime": return datetime(tzinfo=utc, **target) if class_name == "StreamingBody": return StringIO(target["body"]) # Return unrecognized structures as-is return obj def serialize(obj): """Convert objects into JSON structures.""" # Record class and module information for deserialization result = {"__class__": obj.__class__.__name__} try: result["__module__"] = obj.__module__ except AttributeError: pass # Convert objects to dictionary representation based on type if isinstance(obj, datetime): result["year"] = obj.year result["month"] = obj.month result["day"] = obj.day result["hour"] = obj.hour result["minute"] = obj.minute result["second"] = obj.second result["microsecond"] = obj.microsecond return result if isinstance(obj, StreamingBody): result["body"] = obj.read() obj._raw_stream = StringIO(result["body"]) obj._amount_read = 0 return result if isinstance(obj, bytes): return obj.decode('utf8') # Raise a TypeError if the object isn't recognized raise TypeError("Type not serializable") pill.FakeHttpResponse.raw = None placebo.pill.serialize = serialize placebo.pill.deserialize = deserialize # END PLACEBO MONKEY ########################################################################## def attach(session, data_path, prefix=None, debug=False): pill = ZippedPill(data_path, prefix=prefix, debug=debug) pill.attach(session, prefix) return pill
[ 2, 15069, 383, 10130, 40619, 375, 666, 46665, 13, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 11748, 24714, 15699, 198, 6738, 33245, 1330, 10903, 9399, 198, 11748, 33918, 198, 11748, 28686, 198, 1174...
3.034205
994
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin import pytest from flexget.entry import Entry # TODO Add more standard tests
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 11, 7297, 11, 4112, 62, 11748, 198, 6738, 3170, 1040, 1330, 1635, 220, 1303, 645, 20402, 279, 2645, 600, 25, 15560, 28, 403, 1484, 12, 11748, 11, 2266, 18156, 12, 18780, 259,...
3.454545
66
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module define a WulffShape class to generate the Wulff shape from a lattice, a list of indices and their corresponding surface energies, and the total area and volume of the wulff shape,the weighted surface energy, the anisotropy and shape_factor can also be calculated. In support of plotting from a given view in terms of miller index. The lattice is from the conventional unit cell, and (hkil) for hexagonal lattices. If you use this code extensively, consider citing the following: Tran, R.; Xu, Z.; Radhakrishnan, B.; Winston, D.; Persson, K. A.; Ong, S. P. (2016). Surface energies of elemental crystals. Scientific Data. """ from pymatgen.core.structure import Structure from pymatgen.util.coord import get_angle import numpy as np import scipy as sp from scipy.spatial import ConvexHull import logging import warnings __author__ = 'Zihan Xu, Richard Tran, Shyue Ping Ong' __copyright__ = 'Copyright 2013, The Materials Virtual Lab' __version__ = '0.1' __maintainer__ = 'Zihan Xu' __email__ = 'zix009@eng.ucsd.edu' __date__ = 'May 5 2016' logger = logging.getLogger(__name__) def hkl_tuple_to_str(hkl): """ Prepare for display on plots "(hkl)" for surfaces Agrs: hkl: in the form of [h, k, l] or (h, k, l) """ str_format = '($' for x in hkl: if x < 0: str_format += '\\overline{' + str(-x) + '}' else: str_format += str(x) str_format += '$)' return str_format def get_tri_area(pts): """ Given a list of coords for 3 points, Compute the area of this triangle. Args: pts: [a, b, c] three points """ a, b, c = pts[0], pts[1], pts[2] v1 = np.array(b) - np.array(a) v2 = np.array(c) - np.array(a) area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2) return area_tri
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 15069, 357, 66, 8, 350, 4948, 265, 5235, 7712, 4816, 13, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 17168, 13789, 13, 198, 198, 37811, 198, 1212, 8265, 8160, 257, 370, 377, 487, 33383, ...
2.600269
743
from app import app from flask import Flask, request, jsonify, g import sqlite3 import os import json from random import randint from flask_jwt_extended import jwt_required import datetime from flask_mysqldb import MySQL mysql = MySQL()
[ 6738, 598, 1330, 598, 198, 6738, 42903, 1330, 46947, 11, 2581, 11, 33918, 1958, 11, 308, 198, 11748, 44161, 578, 18, 198, 11748, 28686, 198, 11748, 33918, 198, 6738, 4738, 1330, 43720, 600, 198, 6738, 42903, 62, 73, 46569, 62, 2302, 1...
3.442857
70
from python_on_rails.either import as_either, Failure, Success def test_success_executes_bindings(): result = Success(1).bind(add_one).bind(times_five) assert isinstance(result, Success) assert result.value == 10 def test_a_failure_stops_the_execution_of_later_bindings(): result = Success("NaN").bind(add_one).bind(times_five) assert isinstance(result, Failure) assert type(result.value) == TypeError assert repr(result.value) == "TypeError('can only concatenate str (not \"int\") to str')"
[ 6738, 21015, 62, 261, 62, 430, 4487, 13, 31336, 1330, 355, 62, 31336, 11, 25743, 11, 16282, 628, 628, 198, 4299, 1332, 62, 13138, 62, 18558, 1769, 62, 21653, 654, 33529, 198, 220, 220, 220, 1255, 796, 16282, 7, 16, 737, 21653, 7, ...
2.89011
182
from django.db import models # Create your models here. # Station # Customers # Items # Payments
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 2, 13610, 534, 4981, 994, 13, 198, 2, 9327, 198, 198, 2, 36707, 198, 198, 2, 17230, 198, 198, 2, 41318, 628, 628, 628 ]
3.212121
33
# Test definitions for Lit, the LLVM test runner. # # This is reusing the LLVM Lit test runner in the interim until the new build # rules are upstreamed. # TODO(b/136126535): remove this custom rule. """Lit runner globbing test """ load("//tensorflow:tensorflow.bzl", "filegroup") load("@bazel_skylib//lib:paths.bzl", "paths") load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts") # Default values used by the test runner. _default_test_file_exts = ["mlir", ".pbtxt", ".td"] _default_driver = "@llvm-project//mlir:run_lit.sh" _default_size = "small" _default_tags = [] # These are patterns which we should never match, for tests, subdirectories, or # test input data files. _ALWAYS_EXCLUDE = [ "**/LICENSE.txt", "**/README.txt", "**/lit.local.cfg", # Exclude input files that have spaces in their names, since bazel # cannot cope with such "targets" in the srcs list. "**/* *", "**/* */**", ] def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties): """Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir. Note that, due to Bazel's hermetic builds, lit only sees the tests that are included in the `data` parameter, regardless of what other tests might exist in the directory searched. Args: name: str, the name of the test, including extension. data: [str], the data input to the test. size: str, the size of the test. tags: [str], tags to attach to the test. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. """ name_without_suffix = test_file[0].split('.')[0] local_test_files = name + ".test_files" filegroup( name = local_test_files, srcs = native.glob([ "data/" + name_without_suffix + "*.mlir", ]), ) tf_cc_test( name = name, srcs = test_file, size = size, deps = [ "//tensorflow/compiler/mlir/disc/tests:mlir_feature_test", "//tensorflow/core:test", "//tensorflow/core:test_main", "//tensorflow/core:testlib", ], data = [":" + local_test_files] + data + [ "//tensorflow/compiler/mlir/disc:disc_compiler_main", "//tensorflow/compiler/mlir:tf-mlir-translate", "//tensorflow/compiler/mlir:tf-opt", ], ) def glob_op_tests( exclude = [], test_file_exts = _default_test_file_exts, default_size = _default_size, size_override = {}, data = [], per_test_extra_data = {}, default_tags = _default_tags, tags_override = {}, driver = _default_driver, features = [], exec_properties = {}): """Creates all plausible Lit tests (and their inputs) under this directory. Args: exclude: [str], paths to exclude (for tests and inputs). test_file_exts: [str], extensions for files that are tests. default_size: str, the test size for targets not in "size_override". size_override: {str: str}, sizes to use for specific tests. data: [str], additional input data to the test. per_test_extra_data: {str: [str]}, extra data to attach to a given file. default_tags: [str], additional tags to attach to the test. tags_override: {str: str}, tags to add to specific tests. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. exec_properties: a dictionary of properties to pass on. """ # Ignore some patterns by default for tests and input data. exclude = _ALWAYS_EXCLUDE + exclude tests = native.glob( ["*." + ext for ext in test_file_exts], exclude = exclude, ) # Run tests individually such that errors can be attributed to a specific # failure. for i in range(len(tests)): curr_test = tests[i] # Instantiate this test with updated parameters. lit_test( name = curr_test, data = data + per_test_extra_data.get(curr_test, []), size = size_override.get(curr_test, default_size), tags = default_tags + tags_override.get(curr_test, []), driver = driver, features = features, exec_properties = exec_properties, ) def lit_test( name, data = [], size = _default_size, tags = _default_tags, driver = _default_driver, features = [], exec_properties = {}): """Runs test files under lit. Args: name: str, the name of the test. data: [str], labels that should be provided as data inputs. size: str, the size of the test. tags: [str], tags to attach to the test. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. """ _run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
[ 2, 6208, 17336, 329, 25659, 11, 262, 27140, 15996, 1332, 17490, 13, 198, 2, 198, 2, 770, 318, 302, 3500, 262, 27140, 15996, 25659, 1332, 17490, 287, 262, 19303, 1566, 262, 649, 1382, 198, 2, 3173, 389, 28717, 276, 13, 198, 2, 16926,...
2.538023
2,104
""" These are functions to add to the configure context. """ def __checkCanLink(context, source, source_type, message_libname, real_libs=[]): """ Check that source can be successfully compiled and linked against real_libs. Keyword arguments: source -- source to try to compile source_type -- type of source file, (probably should be ".c") message_libname -- library name to show in the message output from scons real_libs -- list of actual libraries to link against (defaults to a list with one element, the value of messager_libname) """ if not real_libs: real_libs = [message_libname] context.Message("Checking for %s..." % message_libname) libsave = context.env.get('LIBS') context.env.AppendUnique(LIBS=real_libs) ret = context.TryLink(source, source_type) context.Result( ret ) if libsave is None: del(context.env['LIBS']) else: context.env['LIBS'] = libsave return ret libuuid_source = ''' #include <uuid/uuid.h> int main() { uuid_t uu; char uuid_str[37]; uuid_generate(uu); uuid_unparse(uu, uuid_str); return 0; } ''' selinux_source = ''' #include <selinux/selinux.h> int main() { security_context_t ctx; getpeercon(0, &ctx); return 0; } ''' byteswap_source = ''' #include <byteswap.h> #include <stdint.h> int main() { uint16_t b16 = 0x00FF; uint32_t b32 = 0x0011EEFF; uint64_t b64 = 0x00112233CCDDEEFF; bswap_16(b16); bswap_32(b32); bswap_64(b64); return 0; } ''' bdb_source = ''' #include <db.h> #if defined(DB_VERSION_MAJOR) && DB_VERSION_MAJOR >= 4 #if DB_VERSION_MAJOR == 4 #if defined(DB_VERSION_MINOR) && DB_VERSION_MINOR >= 3 #else #error "" #endif #endif #else #error "" #endif '''
[ 37811, 198, 4711, 389, 5499, 284, 751, 284, 262, 17425, 4732, 13, 198, 37811, 198, 198, 4299, 11593, 9122, 6090, 11280, 7, 22866, 11, 2723, 11, 2723, 62, 4906, 11, 3275, 62, 8019, 3672, 11, 1103, 62, 8019, 82, 28, 21737, 2599, 198, ...
2.473997
673
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch - TF 2.0 general utilities.""" import logging import os import re import numpy logger = logging.getLogger(__name__) def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""): """ Convert a TF 2.0 model variable name in a pytorch model weight name. Conventions for TF2.0 scopes -> PyTorch attribute names conversions: - '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) - '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) return tuple with: - pytorch model weight name - transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other """ tf_name = tf_name.replace(":0", "") # device ids tf_name = re.sub( r"/[^/]*___([^/]*)/", r"/\1/", tf_name ) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) tf_name = tf_name.replace( "_._", "/" ) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators tf_name = tf_name[1:] # Remove level zero # When should we transpose the weights transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name) # Convert standard TF2.0 names in PyTorch names if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma": tf_name[-1] = "weight" if tf_name[-1] == "beta": tf_name[-1] = "bias" # Remove prefix if needed tf_name = ".".join(tf_name) if start_prefix_to_remove: tf_name = tf_name.replace(start_prefix_to_remove, "", 1) return tf_name, transpose ##################### # PyTorch => TF 2.0 # ##################### def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False): """ Load pytorch checkpoints in a TF 2.0 model """ try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise pt_path = os.path.abspath(pytorch_checkpoint_path) logger.info("Loading PyTorch weights from {}".format(pt_path)) pt_state_dict = torch.load(pt_path, map_location="cpu") logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values()))) return load_pytorch_weights_in_tf2_model( tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys ) def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False): """ Load pytorch checkpoints in a TF 2.0 model """ pt_state_dict = pt_model.state_dict() return load_pytorch_weights_in_tf2_model( tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys ) def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False): """ Load pytorch state_dict in a TF 2.0 model. """ try: import torch # noqa: F401 import tensorflow as tf # noqa: F401 from tensorflow.python.keras import backend as K except ImportError: logger.error( "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if tf_inputs is not None: tf_model(tf_inputs, training=False) # Make sure model is built # Adapt state dict - TODO remove this and update the AWS weights files instead # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in pt_state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): pt_state_dict[new_key] = pt_state_dict.pop(old_key) # Make sure we are able to load PyTorch base models as well as derived models (with heads) # TF models always have a prefix, some of PyTorch models (base ones) don't start_prefix_to_remove = "" if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()): start_prefix_to_remove = tf_model.base_model_prefix + "." symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights tf_loaded_numel = 0 weight_value_tuples = [] all_pytorch_weights = set(list(pt_state_dict.keys())) for symbolic_weight in symbolic_weights: sw_name = symbolic_weight.name name, transpose = convert_tf_weight_name_to_pt_weight_name( sw_name, start_prefix_to_remove=start_prefix_to_remove ) # Find associated numpy array in pytorch model state dict if name not in pt_state_dict: if allow_missing_keys: continue raise AttributeError("{} not found in PyTorch model".format(name)) array = pt_state_dict[name].numpy() if transpose: array = numpy.transpose(array) if len(symbolic_weight.shape) < len(array.shape): array = numpy.squeeze(array) elif len(symbolic_weight.shape) > len(array.shape): array = numpy.expand_dims(array, axis=0) try: assert list(symbolic_weight.shape) == list(array.shape) except AssertionError as e: e.args += (symbolic_weight.shape, array.shape) raise e tf_loaded_numel += array.size # logger.warning("Initialize TF weight {}".format(symbolic_weight.name)) weight_value_tuples.append((symbolic_weight, array)) all_pytorch_weights.discard(name) K.batch_set_value(weight_value_tuples) if tf_inputs is not None: tf_model(tf_inputs, training=False) # Make sure restore ops are run logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel)) logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights)) return tf_model ##################### # TF 2.0 => PyTorch # ##################### def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False): """ Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). """ try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise import transformers logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path)) # Instantiate and load the associated TF 2.0 model tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining tf_model_class = getattr(transformers, tf_model_class_name) tf_model = tf_model_class(pt_model.config) if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if tf_inputs is not None: tf_model(tf_inputs, training=False) # Make sure model is built tf_model.load_weights(tf_checkpoint_path, by_name=True) return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys) def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False): """ Load TF 2.0 model in a pytorch model """ weights = tf_model.weights return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys) def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False): """ Load TF2.0 symbolic weights in a PyTorch model """ try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise new_pt_params_dict = {} current_pt_params_dict = dict(pt_model.named_parameters()) # Make sure we are able to load PyTorch base models as well as derived models (with heads) # TF models always have a prefix, some of PyTorch models (base ones) don't start_prefix_to_remove = "" if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()): start_prefix_to_remove = pt_model.base_model_prefix + "." # Build a map from potential PyTorch weight names to TF 2.0 Variables tf_weights_map = {} for tf_weight in tf_weights: pt_name, transpose = convert_tf_weight_name_to_pt_weight_name( tf_weight.name, start_prefix_to_remove=start_prefix_to_remove ) tf_weights_map[pt_name] = (tf_weight.numpy(), transpose) all_tf_weights = set(list(tf_weights_map.keys())) loaded_pt_weights_data_ptr = {} missing_keys_pt = [] for pt_weight_name, pt_weight in current_pt_params_dict.items(): # Handle PyTorch shared weight ()not duplicated in TF 2.0 if pt_weight.data_ptr() in loaded_pt_weights_data_ptr: new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()] continue # Find associated numpy array in pytorch model state dict if pt_weight_name not in tf_weights_map: if allow_missing_keys: missing_keys_pt.append(pt_weight_name) continue raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name)) array, transpose = tf_weights_map[pt_weight_name] if transpose: array = numpy.transpose(array) if len(pt_weight.shape) < len(array.shape): array = numpy.squeeze(array) elif len(pt_weight.shape) > len(array.shape): array = numpy.expand_dims(array, axis=0) try: assert list(pt_weight.shape) == list(array.shape) except AssertionError as e: e.args += (pt_weight.shape, array.shape) raise e # logger.warning("Initialize PyTorch weight {}".format(pt_weight_name)) new_pt_params_dict[pt_weight_name] = torch.from_numpy(array) loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array) all_tf_weights.discard(pt_weight_name) missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False) missing_keys += missing_keys_pt if len(missing_keys) > 0: logger.info( "Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys) ) if len(unexpected_keys) > 0: logger.info( "Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys) ) logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights)) return pt_model
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 2864, 383, 3012, 9552, 15417, 4816, 46665, 290, 383, 12905, 2667, 32388, 3457, 13, 1074, 13, 198, 2, 15069, 357, 66, 8, 2864, 11, 15127, 23929, 44680, 6234, 13, 220, 1439, 2489, 10395, 13,...
2.466197
5,251
import os from timeit import default_timer as timer import unittest import pytest from decorator import decorator from hail.utils.java import Env import hail as hl from hail.backend.local_backend import LocalBackend _initialized = False _test_dir = os.environ.get('HAIL_TEST_RESOURCES_DIR', '../src/test/resources') _doctest_dir = os.environ.get('HAIL_DOCTEST_DATA_DIR', 'hail/docs/data') _dataset = None fails_local_backend = pytest.mark.xfail( os.environ.get('HAIL_QUERY_BACKEND') == 'local', reason="doesn't yet work on local backend", strict=True)
[ 11748, 28686, 198, 6738, 640, 270, 1330, 4277, 62, 45016, 355, 19781, 198, 11748, 555, 715, 395, 198, 11748, 12972, 9288, 198, 6738, 11705, 1352, 1330, 11705, 1352, 198, 198, 6738, 32405, 13, 26791, 13, 12355, 1330, 2039, 85, 198, 11748...
2.723005
213
""" Modifications copyright (C) 2020 Michael Strobl """ import time import tensorflow as tf import numpy as np from entity_linker.models.base import Model
[ 37811, 198, 5841, 6637, 6634, 357, 34, 8, 12131, 3899, 30183, 2436, 198, 37811, 198, 198, 11748, 640, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 9312, 62, 8726, 263, 13, 27530, 13, 8...
3.488889
45
#!/usr/bin/env python import sys import parmed as pmd import numpy as np from scipy.spatial import distance if len(sys.argv) < 2: print "Usage: molecular_diameter.py <mymolecule.mol2>" exit(1) mol = pmd.load_file(sys.argv[1]) crds = mol.coordinates dist = distance.cdist(crds, crds, 'euclidean') print np.max(dist) exit(0)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 25064, 198, 11748, 1582, 1150, 355, 9114, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 2777, 34961, 1330, 5253, 198, 198, 361, 18896, 7, 17597, 13, ...
2.384058
138
import sys import pandas as pd from simpletransformers.classification import ClassificationModel prefix = "data/" train_df = pd.read_csv(prefix + "train.csv", header=None) train_df.head() eval_df = pd.read_csv(prefix + "test.csv", header=None) eval_df.head() train_df[0] = (train_df[0] == 2).astype(int) eval_df[0] = (eval_df[0] == 2).astype(int) train_df = pd.DataFrame( {"text": train_df[1].replace(r"\n", " ", regex=True), "labels": train_df[0]} ) print(train_df.head()) eval_df = pd.DataFrame( {"text": eval_df[1].replace(r"\n", " ", regex=True), "labels": eval_df[0]} ) print(eval_df.head()) model_type = sys.argv[1] if model_type == "bert": model_name = "bert-base-cased" elif model_type == "roberta": model_name = "roberta-base" elif model_type == "distilbert": model_name = "distilbert-base-cased" elif model_type == "distilroberta": model_type = "roberta" model_name = "distilroberta-base" elif model_type == "electra-base": model_type = "electra" model_name = "google/electra-base-discriminator" elif model_type == "electra-small": model_type = "electra" model_name = "google/electra-small-discriminator" elif model_type == "xlnet": model_name = "xlnet-base-cased" train_args = { "reprocess_input_data": True, "overwrite_output_dir": True, "use_cached_eval_features": True, "output_dir": f"outputs/{model_type}", "best_model_dir": f"outputs/{model_type}/best_model", "evaluate_during_training": True, "max_seq_length": 128, "num_train_epochs": 3, "evaluate_during_training_steps": 1000, "wandb_project": "Classification Model Comparison", "wandb_kwargs": {"name": model_name}, "save_model_every_epoch": False, "save_eval_checkpoints": False, # "use_early_stopping": True, # "early_stopping_metric": "mcc", # "n_gpu": 2, # "manual_seed": 4, # "use_multiprocessing": False, "train_batch_size": 128, "eval_batch_size": 64, # "config": { # "output_hidden_states": True # } } if model_type == "xlnet": train_args["train_batch_size"] = 64 train_args["gradient_accumulation_steps"] = 2 # Create a ClassificationModel model = ClassificationModel(model_type, model_name, args=train_args) # Train the model model.train_model(train_df, eval_df=eval_df) # # # Evaluate the model # result, model_outputs, wrong_predictions = model.eval_model(eval_df)
[ 11748, 25064, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 2829, 35636, 364, 13, 4871, 2649, 1330, 40984, 17633, 198, 198, 40290, 796, 366, 7890, 30487, 198, 198, 27432, 62, 7568, 796, 279, 67, 13, 961, 62, 40664, 7, 4...
2.40337
1,009
import networkx as nx import os.path
[ 11748, 3127, 87, 355, 299, 87, 198, 11748, 28686, 13, 6978, 628, 198 ]
3
13
import logging import uuid from django.db import models from django.urls import reverse from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from model_utils.managers import InheritanceManager from mayan.apps.django_gpg.exceptions import VerificationError from mayan.apps.django_gpg.models import Key from mayan.apps.documents.models import DocumentVersion from mayan.apps.storage.classes import DefinedStorageLazy from .literals import STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE from .managers import DetachedSignatureManager, EmbeddedSignatureManager logger = logging.getLogger(name=__name__)
[ 11748, 18931, 198, 11748, 334, 27112, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 6738, 42625, 14208, 13, 26791, 13, 12685, 7656, 1330, 2700, 62, 5239, 198, 6738, 42625, 14...
3.372449
196
from ConfigParser import ConfigParser from sys import argv REPLACE_PROPERTIES = ["file_path", "database_connection", "new_file_path"] MAIN_SECTION = "app:main" if __name__ == '__main__': sync()
[ 6738, 17056, 46677, 1330, 17056, 46677, 198, 6738, 25064, 1330, 1822, 85, 198, 198, 2200, 6489, 11598, 62, 4805, 3185, 17395, 11015, 796, 14631, 7753, 62, 6978, 1600, 366, 48806, 62, 38659, 1600, 366, 3605, 62, 7753, 62, 6978, 8973, 198...
2.819444
72
# Copyright (c) 2020 Huawei Technologies Co.,Ltd. # # openGauss is licensed under Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: # # http://license.coscl.org.cn/MulanPSL2 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, # EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # ------------------------------------------------------------------------- # # test_ssh.py # # IDENTIFICATION # src/gausskernel/dbmind/xtuner/test/test_ssh.py # # ------------------------------------------------------------------------- from ssh import ExecutorFactory if __name__ == "__main__": test_remote() test_local()
[ 2, 15069, 357, 66, 8, 12131, 43208, 21852, 1766, 1539, 43, 8671, 13, 198, 2, 198, 2, 1280, 35389, 1046, 318, 11971, 739, 17996, 272, 6599, 43, 410, 17, 13, 198, 2, 921, 460, 779, 428, 3788, 1864, 284, 262, 2846, 290, 3403, 286, ...
3.222222
270
# Original work Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University # Modified work Copyright 2018 Weiyan Shi. import tensorflow as tf import numpy as np from nltk.translate.bleu_score import sentence_bleu from nltk.translate.bleu_score import SmoothingFunction def get_bow(embedding, avg=False): """ Assumption, the last dimension is the embedding The second last dimension is the sentence length. The rank must be 3 """ embedding_size = embedding.get_shape()[2].value if avg: return tf.reduce_mean(embedding, reduction_indices=[1]), embedding_size else: return tf.reduce_sum(embedding, reduction_indices=[1]), embedding_size def get_rnn_encode(embedding, cell, length_mask=None, scope=None, reuse=None): """ Assumption, the last dimension is the embedding The second last dimension is the sentence length. The rank must be 3 The padding should have zero """ with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse): if length_mask is None: length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1) length_mask = tf.to_int32(length_mask) _, encoded_input = tf.nn.dynamic_rnn(cell, embedding, sequence_length=length_mask, dtype=tf.float32) return encoded_input, cell.state_size def get_bi_rnn_encode(embedding, f_cell, b_cell, length_mask=None, scope=None, reuse=None): """ Assumption, the last dimension is the embedding The second last dimension is the sentence length. The rank must be 3 The padding should have zero """ with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse): if length_mask is None: length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1) length_mask = tf.to_int32(length_mask) _, encoded_input = tf.nn.bidirectional_dynamic_rnn(f_cell, b_cell, embedding, sequence_length=length_mask, dtype=tf.float32) encoded_input = tf.concat(encoded_input, 1) return encoded_input, f_cell.state_size+b_cell.state_size def get_prob_for_one_sent(vocab_prob, sent, length_mask=None): """ :param vocab_prob: :param sent: :param length_mask: :return: """ tf.boolean_mask(tf.reshape(usr_input_sent, [-1, 50]), tf.sequence_mask(length_mask, 50)) def tf_repeat(tensor, repeats): """ :param tensor: :param repeats: :return: """ with tf.variable_scope("repeat"): expanded_tensor = tf.expand_dims(tensor, -1) multiples = [1] + repeats tiled_tensor = tf.tile(expanded_tensor, multiples=multiples) repeated_tensor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats) return repeated_tensor
[ 2, 13745, 670, 15069, 357, 34, 8, 2177, 20834, 2395, 782, 29436, 11, 33976, 49808, 2059, 198, 2, 40499, 670, 15069, 2864, 29341, 4121, 16380, 13, 198, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, ...
2.551317
1,101
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable= arguments-differ # pylint: disable= missing-docstring "Addtional image transforms." import random import math import numpy as np from mxnet import image, nd from mxnet.gluon import Block __all__ = ['RandomCrop', 'RandomErasing']
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
3.724382
283
import plotly.graph_objects as go import streamlit as st import pandas as pd from utils import * import glob import wfdb import os ANNOTATIONS_COL_NAME = 'annotations' ''' # MIT-BIH Arrhythmia DB Exploration ''' record_ids = [os.path.basename(file)[:-4] for file in glob.glob('data/*.dat')] if len(record_ids) == 0: st.write('Warning ! No data could be found under the ./data/ directory.', '*\*.dat*, *\*.hea*, *\*.atr* files and such should be placed ', 'immediately under the ./data/ directory') else: record_ids.sort() record_id = st.selectbox('Select a record id', record_ids) record = wfdb.rdrecord(f'data/{record_id}') annotation = wfdb.rdann(f'data/{record_id}', 'atr') st.write('Signals found in this record :') for idx, signal in enumerate(record.sig_name): st.write(f'- `{signal}` : in {record.units[idx]}, with a frequency of ' f'{record.fs * record.samps_per_frame[idx]}hz') st.write(f'Comments for this record : {record.comments}') signals_df = pd.DataFrame(record.p_signal, columns=record.sig_name) annot_serie = pd.Series(annotation.symbol, index=annotation.sample, name=ANNOTATIONS_COL_NAME) full_df = pd.concat([signals_df, annot_serie], axis=1) ''' ## Annotations ''' beat_annot_count = annot_serie.isin(dict(beat_annotations)).sum() non_beat_annot_count = annot_serie.isin(dict(non_beat_annotations)).sum() unique_annot = annot_serie.value_counts().index.values st.write(f'This record contains `{annot_serie.size}` annotations ' f'among which `{beat_annot_count}` beat annotations and ' f'`{non_beat_annot_count}` non beat annotation(s).') st.write('The annotations are the followings :') for annot in unique_annot: st.write(f'- `{annot}` : {annotation_definitions[annot]}') st.write('More explanations on the annotations are available here : ' 'https://archive.physionet.org/physiobank/annotations.shtml') # Plot counts for each annotation annot_counts_df = annot_serie \ .value_counts() \ .rename_axis(ANNOTATIONS_COL_NAME) \ .reset_index(name='counts') bar_fig = go.Figure(data=[go.Bar(x=annot_counts_df[ANNOTATIONS_COL_NAME], y=annot_counts_df['counts'], text=annot_counts_df['counts'], textposition='auto' )]) bar_fig.update_layout(title='Annotations by count', yaxis_title='counts', xaxis_title='annotations') st.write(bar_fig) ''' ## Explore full dataset ''' signal = st.selectbox('Select a signal', record.sig_name) # Plot signals and annotations matching_rows_by_annot = {} for annot in unique_annot: matching_rows_by_annot[annot] = full_df[ANNOTATIONS_COL_NAME] == annot fig = go.Figure(layout=go.Layout(title=go.layout.Title( text='{} signal with annotations'.format(signal)))) fig.add_trace(go.Scatter(x=full_df.index.values, y=full_df[signal], mode='lines', name=signal)) for annot, annot_matching_rows in matching_rows_by_annot.items(): fig.add_trace(go.Scatter(x=full_df.index[annot_matching_rows].values, y=full_df[annot_matching_rows][signal].values, mode='markers', name='{} (annot)'.format(annot))) st.plotly_chart(fig)
[ 11748, 7110, 306, 13, 34960, 62, 48205, 355, 467, 198, 11748, 4269, 18250, 355, 336, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 3384, 4487, 1330, 1635, 198, 11748, 15095, 198, 11748, 266, 69, 9945, 198, 11748, 28686, 198, 198, 1...
2.163886
1,678
from .config_store import ConfigStore config = ConfigStore() config.set_mqtt_broker("mqtt", 1883) config.set_redis_config("redis", 6379, 0)
[ 6738, 764, 11250, 62, 8095, 1330, 17056, 22658, 198, 198, 11250, 796, 17056, 22658, 3419, 198, 198, 11250, 13, 2617, 62, 76, 80, 926, 62, 7957, 6122, 7203, 76, 80, 926, 1600, 1248, 5999, 8, 198, 198, 11250, 13, 2617, 62, 445, 271, ...
2.6
55
# -*- coding: utf-8 -*- import socket as csocket from struct import pack,unpack from website.contrib.communication.models import *
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 17802, 355, 269, 44971, 198, 6738, 2878, 1330, 2353, 11, 403, 8002, 198, 6738, 3052, 13, 3642, 822, 13, 32560, 13, 27530, 1330, 1635, 628, 628 ]
3.268293
41
import pandas as pd import ete2 from ete2 import faces, Tree, AttrFace, TreeStyle import pylab from matplotlib.colors import hex2color, rgb2hex, hsv_to_rgb, rgb_to_hsv kelly_colors_hex = [ 0xFFB300, # Vivid Yellow 0x803E75, # Strong Purple 0xFF6800, # Vivid Orange 0xA6BDD7, # Very Light Blue 0xC10020, # Vivid Red 0xCEA262, # Grayish Yellow 0x817066, # Medium Gray # The following don't work well for people with defective color vision 0x007D34, # Vivid Green 0xF6768E, # Strong Purplish Pink 0x00538A, # Strong Blue 0xFF7A5C, # Strong Yellowish Pink 0x53377A, # Strong Violet 0xFF8E00, # Vivid Orange Yellow 0xB32851, # Strong Purplish Red 0xF4C800, # Vivid Greenish Yellow 0x7F180D, # Strong Reddish Brown 0x93AA00, # Vivid Yellowish Green 0x593315, # Deep Yellowish Brown 0xF13A13, # Vivid Reddish Orange 0x232C16, # Dark Olive Green ] def adjust_kelly_brightness(hex_color, val, recon_min, recon_max): """set brightness according to change in continuous reconstruction value""" h, s, v = rgb_to_hsv(hex2color('#{0:06X}'.format(hex_color))) scale_factor = 1 - (recon_max - val) / (recon_max - recon_min) v_new = v - (v * (scale_factor)) return rgb2hex(hsv_to_rgb(pd.np.array([h, s, v_new]))) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser("""visualize target list of features""") parser.add_argument("node_recon", help = "node ancestral character state reconstruction") parser.add_argument("gain_recon", help = "gain events ancestral character state reconstruction") parser.add_argument("loss_recon", help = "loss events ancestral character state reconstruction") parser.add_argument("tree", help = "tree with internal nodes labeled") parser.add_argument("pfam_mapping", help = "feature mapping/list") parser.add_argument("feat_list", help = "list of features") parser.add_argument("--target_node", default = "N1", help = "list of features") parser.add_argument("phenotype", help = "target phenotype") parser.add_argument("--are_continuous_features_with_discrete_phenotype", action = 'store_true', help = "set if using continuous features with a discrete phenotype") parser.add_argument("threshold", type = float, help = "threshold to call genotype/phenotype events") parser.add_argument("sample_mapping", help = "mapping between sample ids and names") parser.add_argument("out", help = "output file") parser.add_argument("--max_feats", type = int, default = 10, help = "visualize at most max_feats features") parser.add_argument("--miscl", help = "table of misclassified samples") parser.add_argument("--node_annotation", help = "table of binary features for labeling the nodes") a = parser.parse_args() pt_tree, feats, pf2color = get_tree(node_recon = a.node_recon, gain_recon = a.gain_recon, loss_recon = a.loss_recon, pfam_mapping = a.pfam_mapping, tree = a.tree, feat_list = a.feat_list, phenotype = a.phenotype, target_node = a.target_node, threshold = a.threshold, sample_mapping = a.sample_mapping, are_continuous_features_with_discrete_phenotype = a.are_continuous_features_with_discrete_phenotype, max_feats = a.max_feats, miscl = a.miscl, node_annotation = a.node_annotation) plot_tree(pt_tree, a.target_node, a.out) plot_legend(feats, a.out, pf2color)
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 304, 660, 17, 198, 6738, 304, 660, 17, 1330, 6698, 11, 12200, 11, 3460, 81, 32388, 11, 12200, 21466, 198, 11748, 279, 2645, 397, 198, 6738, 2603, 29487, 8019, 13, 4033, 669, 1330, 17910, ...
2.73756
1,246
import attr from firedrake import * import numpy as np import matplotlib.pyplot as plt import matplotlib from scipy.linalg import svd from scipy.sparse.linalg import svds from scipy.sparse import csr_matrix from slepc4py import SLEPc import pandas as pd from tqdm import tqdm import os matplotlib.use('Agg') def plot_matrix(assembled_form, **kwargs): """Provides a plot of a matrix.""" fig, ax = plt.subplots(1, 1) petsc_mat = assembled_form.M.handle size = petsc_mat.getSize() Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size) Mnp.eliminate_zeros() Mnp = Mnp.toarray() # Eliminate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).all(1)] idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(length=0) ax.set_xticklabels([]) ax.set_yticklabels([]) return plot def plot_matrix_mixed(assembled_form, **kwargs): """Provides a plot of a mixed matrix.""" fig, ax = plt.subplots(1, 1) petsc_mat = assembled_form.M.handle f0_size = assembled_form.M[0, 0].handle.getSize() size = petsc_mat.getSize() Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size) Mnp.eliminate_zeros() Mnp = Mnp.toarray() # Eliminate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).all(1)] idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(length=0) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axhline(y=f0_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] - 0.5, color="k") return plot def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs): """Provides a plot of a full hybrid-mixed matrix.""" fig, ax = plt.subplots(1, 1) assembled_form = assemble(a_form, bcs=bcs, mat_type="aij") petsc_mat = assembled_form.M.handle f0_size = assembled_form.M[0, 0].handle.getSize() size = petsc_mat.getSize() Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size) Mnp.eliminate_zeros() Mnp = Mnp.toarray() # Eliminate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).all(1)] idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(length=0) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axhline(y=f0_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] - 0.5, color="k") return plot def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs): """Provides a plot of a full hybrid-mixed matrix.""" fig, ax = plt.subplots(1, 1) assembled_form = assemble(a_form, bcs=bcs, mat_type="aij") petsc_mat = assembled_form.M.handle f0_size = assembled_form.M[0, 0].handle.getSize() f1_size = assembled_form.M[1, 1].handle.getSize() size = petsc_mat.getSize() Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size) Mnp.eliminate_zeros() Mnp = Mnp.toarray() # Eliminate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).all(1)] idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(length=0) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axhline(y=f0_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] - 0.5, color="k") ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k") return plot def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs): """Provides a plot of a condensed hybrid-mixed matrix for single scale problems.""" fig, ax = plt.subplots(1, 1) _A = Tensor(a_form) A = _A.blocks idx = trace_index S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx] Smat = assemble(S, bcs=bcs) petsc_mat = Smat.M.handle size = petsc_mat.getSize() Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size) Mnp.eliminate_zeros() Mnp = Mnp.toarray() # Eliminate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).all(1)] idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Below there is the spy alternative # plot = plt.spy(Am, **kwargs) # Remove axis ticks and values ax.tick_params(length=0) ax.set_xticklabels([]) ax.set_yticklabels([]) return plot def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray: """Utility function to filter real part in a numpy array. :param array: Array with real and complex numbers. :param imag_threshold: Threshold to cut off imaginary part in complex number. :return: Filtered array with only real numbers. """ real_part_array = array.real[abs(array.imag) < 1e-5] return real_part_array # Solver options solvers_options = { # "cg": solve_poisson_cg, # "cgls": solve_poisson_cgls, # "dgls": solve_poisson_dgls, # "sdhm": solve_poisson_sdhm, # "ls": solve_poisson_ls, # "dls": solve_poisson_dls, "lsh": solve_poisson_lsh, # "vms": solve_poisson_vms, # "dvms": solve_poisson_dvms, # "mixed_RT": solve_poisson_mixed_RT, # "hdg": solve_poisson_hdg, # "cgh": solve_poisson_cgh, # "ldgc": solve_poisson_ldgc, # "sipg": solve_poisson_sipg, } degree = 1 last_degree = 1 for current_solver in solvers_options: # Setting the output file name name = f"{current_solver}" # Selecting the solver and its kwargs solver = solvers_options[current_solver] # Performing the convergence study hp_refinement_cond_number_calculation( solver, min_degree=degree, max_degree=degree + last_degree, quadrilateral=True, name=name ) # N = 5 # mesh = UnitSquareMesh(N, N, quadrilateral=True) # result = solve_poisson_lsh(mesh, degree=1) # print(f'Is symmetric? {result.is_operator_symmetric}') # print(f'nnz: {result.nnz}') # print(f'DoFs: {result.number_of_dofs}') # print(f'Condition Number: {result.condition_number}') # # Plotting the resulting matrix # matplotlib.use('TkAgg') # import copy # my_cmap = copy.copy(plt.cm.get_cmap("winter")) # my_cmap.set_bad(color="lightgray") # # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmap=my_cmap) # # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmap=my_cmap) # plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmap=my_cmap) # # plot_matrix(result.assembled_form, cmap=my_cmap) # # plot_matrix_mixed(result.assembled_form, cmap=my_cmap) # plt.tight_layout() # plt.savefig("sparse_pattern.png") # plt.show()
[ 11748, 708, 81, 198, 6738, 6294, 33788, 1330, 1635, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2603, 29487, 8019, 198, 6738, 629, 541, 88, 13, 75, 1292, 70, 1330, 264,...
2.200237
3,371
# -*- encoding: utf-8 -*- ''' @File :_time_domain_features.py @Time :2021/04/16 20:02:55 @Author :wlgls @Version :1.0 ''' import numpy as np def statistics(data, combined=True): """Statistical features include Power, Mean, Std, 1st differece, Normalized 1st difference, 2nd difference, Normalized 2nd difference. Parameters ---------- data array data, for DEAP dataset, It's shape may be (n_trials, n_channels, points) Return ---------- f: Solved feature, It's shape is similar to the shape of your input data. e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features) Example ---------- In [13]: d.shape, l.shape Out[13]: ((40, 32, 8064), (40, 1)) In [14]: statistics_feature(d).shape Out[14]: (40, 32, 7) """ # Power power = np.mean(data**2, axis=-1) # Mean ave = np.mean(data, axis=-1) # Standard Deviation std = np.std(data, axis=-1) # the mean of the absolute values of 1st differece mean diff_1st = np.mean(np.abs(np.diff(data,n=1, axis=-1)), axis=-1) # the mean of the absolute values of Normalized 1st difference normal_diff_1st = diff_1st / std # the mean of the absolute values of 2nd difference mean diff_2nd = np.mean(np.abs(data[..., 2:] - data[..., :-2]), axis=-1) # the mean of the absolute values of Normalized 2nd difference normal_diff_2nd = diff_2nd / std # Features.append(np.concatenate((Power, Mean, Std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=2)) f = np.stack((power, ave, std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=-1) if combined: f = f.reshape((*f.shape[:-2])) return f def hjorth(data, combined=True): """Solving Hjorth features include activity, mobility, complexity Parameters ---------- data array data, for DEAP dataset, It's shape may be (n_trials, n_channels, points) Return ---------- f: Solved feature, It's shape is similar to the shape of your input data. e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features) Example ---------- In [15]: d.shape, l.shape Out[15]: ((40, 32, 8064), (40, 1)) In [16]: hjorth_features(d).shape Out[16]: (40, 32, 3) """ data = np.array(data) ave = np.mean(data, axis=-1)[..., np.newaxis] diff_1st = np.diff(data, n=1, axis=-1) # print(diff_1st.shape) diff_2nd = data[..., 2:] - data[..., :-2] # Activity activity = np.mean((data-ave)**2, axis=-1) # print(Activity.shape) # Mobility varfdiff = np.var(diff_1st, axis=-1) # print(varfdiff.shape) mobility = np.sqrt(varfdiff / activity) # Complexity varsdiff = np.var(diff_2nd, axis=-1) complexity = np.sqrt(varsdiff/varfdiff) / mobility f = np.stack((activity, mobility, complexity), axis=-1) if combined: f = f.reshape((*f.shape[:-2])) return f def higher_order_crossing(data, k=10, combined=True): """Solving the feature of hoc. Hoc is a high order zero crossing quantity. Parameters ---------- data : array data, for DEAP dataset, It's shape may be (n_trials, n_channels, points) k : int, optional Order, by default 10 Return ---------- nzc: Solved feature, It's shape is similar to the shape of your input data. e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features) Example ---------- In [4]: d, l = load_deap(path, 0) In [5]: hoc(d, k=10).shape Out[5]: (40, 32, 10) In [6]: hoc(d, k=5).shape Out[6]: (40, 32, 5) """ nzc = [] for i in range(k): curr_diff = np.diff(data, n=i) x_t = curr_diff >= 0 x_t = np.diff(x_t) x_t = np.abs(x_t) count = np.count_nonzero(x_t, axis=-1) nzc.append(count) f = np.stack(nzc, axis=-1) if combined: f = f.reshape((*f.shape[:-2])) return f def sevcik_fd(data, combined=True): """Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on. Sevcik method: fast calculation and robust analysis of noise Higuchi: closer to the theoretical value than box counting The Sevick method is used here because it is easier to implement Parameters ---------- Parameters ---------- data array data, for DEAP dataset, It's shape may be (n_trials, n_channels, points) Return ---------- f: Solved feature, It's shape is similar to the shape of your input data. e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features) Example ---------- In [7]: d.shape, l.shape Out[7]: ((40, 32, 8064), (40, 1)) In [8]: sevcik_fd(d).shape Out[8]: (40, 32, 1) """ points = data.shape[-1] x = np.arange(1, points+1) x_ = x / np.max(x) miny = np.expand_dims(np.min(data, axis=-1), axis=-1) maxy = np.expand_dims(np.max(data, axis=-1), axis=-1) y_ = (data-miny) / (maxy-miny) L = np.expand_dims(np.sum(np.sqrt(np.diff(y_, axis=-1)**2 + np.diff(x_)**2), axis=-1), axis=-1) f = 1 + np.log(L) / np.log(2 * (points-1)) # print(FD.shape) if combined: f = f.reshape((*f.shape[:-2])) return f def calc_L(X, k, m): """ Return Lm(k) as the length of the curve. """ N = X.shape[-1] n = np.floor((N-m)/k).astype(np.int64) norm = (N-1) / (n*k) ss = np.sum(np.abs(np.diff(X[..., m::k], n=1)), axis=-1) Lm = (ss*norm) / k return Lm def calc_L_average(X, k): """ Return <L(k)> as the average value over k sets of Lm(k). """ calc_L_series = np.frompyfunc(lambda m: calc_L(X, k, m), 1, 1) L_average = np.average(calc_L_series(np.arange(1, k+1))) return L_average def higuchi_fd(data, k_max, combined=True): """Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on. Sevcik method: fast calculation and robust analysis of noise Higuchi: closer to the theoretical value than box counting The higuchi method is used here because it is easier to implement Parameters ---------- Parameters ---------- data array data, for DEAP dataset, It's shape may be (n_trials, n_channels, points) Return ---------- f: Solved feature, It's shape is similar to the shape of your input data. e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features) Example ---------- In [7]: d.shape, l.shape Out[7]: ((40, 32, 8064), (40, 1)) In [8]: higuchi_fd(dif combined: f = f return ).shape Out[8]: (40, 32, 1) """ calc_L_average_series = np.frompyfunc(lambda k: calc_L_average(data, k), 1, 1) k = np.arange(1, k_max+1) L = calc_L_average_series(k) L = np.stack(L, axis=-1) fd = np.zeros(data.shape[:-1]) for ind in np.argwhere(L[..., 0]): tmp = L[ind[0], ind[1], ind[2]] D, _= np.polyfit(np.log2(k), np.log2(tmp), 1) fd[ind[0], ind[1if combined: f = f return ], ind[2]] = - D f = np.expand_dims(fd, axis=-1) if combined: f = f.reshape((*f.shape[:-2])) return f
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 31, 8979, 220, 220, 220, 220, 220, 220, 220, 1058, 62, 2435, 62, 27830, 62, 40890, 13, 9078, 198, 31, 7575, 220, 220, 220, 220, 220, 220, 220, 1058, ...
2.315511
3,404
from .session import Session, MutualAPI
[ 6738, 764, 29891, 1330, 23575, 11, 48807, 17614 ]
4.875
8
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField from wtforms.validators import InputRequired, Email, ValidationError from models import User
[ 6738, 42903, 62, 86, 27110, 1330, 46947, 8479, 198, 6738, 266, 83, 23914, 1330, 10903, 15878, 11, 30275, 15878, 11, 39900, 15878, 198, 6738, 266, 83, 23914, 13, 12102, 2024, 1330, 23412, 37374, 11, 9570, 11, 3254, 24765, 12331, 198, 673...
4.065217
46
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json
[ 2, 24700, 1137, 11617, 11050, 509, 2662, 6981, 26144, 532, 8410, 5626, 48483, 198, 11748, 479, 296, 392, 198, 11748, 33918, 628, 198, 220, 220, 220, 220, 198, 220, 220, 220, 220, 628 ]
2.393939
33
#!/usr/bin/python3 from pyspark.sql import SparkSession from haychecker.dhc.metrics import rule spark = SparkSession.builder.appName("rule_example").getOrCreate() df = spark.read.format("csv").option("header", "true").load("examples/resources/employees.csv") df.show() condition1 = {"column": "salary", "operator": "gt", "value": 2100} conditions = [condition1] r1 = rule(conditions, df)[0] print("Rule salary>2100: {}".format(r1)) condition1 = {"column": "salary", "operator": "lt", "value": 2100} condition2 = {"column": "title", "operator": "eq", "value": "Sales Representative"} conditions = [condition1, condition2] task1 = rule(conditions) condition1 = {"column": "salary", "operator": "lt", "value": 2100} condition2 = {"column": "city", "operator": "eq", "value": "London"} conditions = [condition1, condition2] task2 = rule(conditions) task3 = task1.add(task2) result = task3.run(df) r1 = result[0]["scores"][0] r2 = result[1]["scores"][0] print("Rule salary<2100 and title=\"Sales Representative\": {}," " rule salary<2100 and city=\"London\": {}".format(r1, r2))
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 6738, 279, 893, 20928, 13, 25410, 1330, 17732, 36044, 198, 198, 6738, 27678, 9122, 263, 13, 34985, 66, 13, 4164, 10466, 1330, 3896, 198, 198, 2777, 668, 796, 17732, 36044, 13, 38272, 13, ...
2.868421
380
import collections import logging import urllib.parse from structlog import wrap_logger from secure_message.constants import MESSAGE_BY_ID_ENDPOINT, MESSAGE_LIST_ENDPOINT, MESSAGE_QUERY_LIMIT from secure_message.services.service_toggles import party, internal_user_service logger = wrap_logger(logging.getLogger(__name__)) MessageArgs = collections.namedtuple( 'MessageArgs', 'page limit business_id surveys cc label desc ce is_closed my_conversations new_respondent_conversations all_conversation_types unread_conversations') def get_options(args): # NOQA pylint:disable=too-complex """extract options from request , allow label to be set by caller :param args: contains search arguments. Not all end points support all args :returns: MessageArgs named tuple containing the args for the search business_id If set , restricts search to conversations regarding this specific party id surveys If set allows the count to be restricted by a list of survey_ids cc If set , allows the count to be restricted by a particular case ce If set, alows the count to be restricted by a particular collection exercise is_closed If set to 'true' only counts closed conversations, else only open conversations my_conversations If set to 'true only counts my conversations. I.e conversations where the current user id is the to actor id new_respondent_conversations If set to 'true'only counts conversations where the to actor is set to 'GROUP' all_conversation_types If set 'true', overrides is_closed, my_conversations and new_respondent_conversations and returns 4 counts 1 for each of , open , closed, my_conversations and new_respondent_conversations page If set requests the specific page of information to return limit If set it sets the maximum number of results to return desc If present, requests the information in descending order """ fields = {'page': 1, 'limit': MESSAGE_QUERY_LIMIT, 'business_id': None, 'surveys': None, 'desc': True, 'cc': None, 'label': None, 'ce': None, 'is_closed': False, 'my_conversations': False, 'new_respondent_conversations': False, 'all_conversation_types': False, 'unread_conversations': False} for field in ['cc', 'ce', 'business_id', 'label']: if args.get(field): fields[field] = str(args.get(field)) fields['surveys'] = args.getlist('survey') for field in ['limit', 'page']: if args.get(field): fields[field] = int(args.get(field)) if args.get('desc') == 'false': fields['desc'] = False if args.get('is_closed') == 'true': fields['is_closed'] = True if args.get('my_conversations') == 'true': fields['my_conversations'] = True if args.get('new_respondent_conversations') == 'true': fields['new_respondent_conversations'] = True if args.get('all_conversation_types') == 'true': fields['all_conversation_types'] = True if args.get('unread_conversations') == 'true': fields['unread_conversations'] = True return MessageArgs(page=fields['page'], limit=fields['limit'], business_id=fields['business_id'], surveys=fields['surveys'], cc=fields['cc'], label=fields['label'], desc=fields['desc'], ce=fields['ce'], is_closed=fields['is_closed'], my_conversations=fields['my_conversations'], new_respondent_conversations=fields['new_respondent_conversations'], all_conversation_types=fields['all_conversation_types'], unread_conversations=fields['unread_conversations']) def set_conversation_type_args(existing_args, is_closed=False, my_conversations=False, new_conversations=False, all_types=False, unread_conversations=False): """Returns a new set of args based on the existing args which are a named tuple, but allow the conversation type only to be changed""" return MessageArgs(page=existing_args.page, limit=existing_args.limit, business_id=existing_args.business_id, surveys=existing_args.surveys, cc=existing_args.cc, label=existing_args.label, desc=existing_args.desc, ce=existing_args.ce, is_closed=is_closed, my_conversations=my_conversations, new_respondent_conversations=new_conversations, all_conversation_types=all_types, unread_conversations=unread_conversations) def process_paginated_list(paginated_list, host_url, user, message_args, endpoint=MESSAGE_LIST_ENDPOINT, body_summary=True): """used to change a pagination object to json format with links""" messages = [] string_query_args = generate_string_query_args(message_args) for message in paginated_list.items: msg = message.serialize(user, body_summary=body_summary) msg['_links'] = {"self": {"href": f"{host_url}{MESSAGE_BY_ID_ENDPOINT}/{msg['msg_id']}"}} messages.append(msg) links = {'first': {"href": f"{host_url}{endpoint}"}, 'self': {"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page}"}} if paginated_list.has_next: links['next'] = { "href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page + 1}"} if paginated_list.has_prev: links['prev'] = { "href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page - 1}"} return messages, links def add_to_details(messages): """Adds a @msg_to key to every message in a list of messages. Every msg_to uuid is resolved to include details of the user. If the call for the internal user id fails, an exception will be thrown. If the external user id cannot be found in the list that we got from the party service. There won't be a @msg_to value returned in the payload. The API documentation notes that these elements aren't guaranteed to be provided so we're not breaking the contract by doing this. Note: Several of these lines of code could be combined into a more succinct view, spreading them out is deliberate so that log stack traces are better able to identify the cause of log errors """ external_user_details = {} for user in party.get_users_details(get_external_user_uuid_list(messages)): external_user_details[user['id']] = user for message in messages: try: msg_to = message["msg_to"][0] from_internal = message["from_internal"] if not from_internal: msg_to_details = internal_user_service.get_user_details(msg_to) message.update({"@msg_to": [msg_to_details]}) else: msg_to_details = external_user_details.get(msg_to) if msg_to_details: message.update({'@msg_to': [msg_to_details]}) else: logger.info("No details found for the message recipient", msg_to=msg_to) except IndexError: logger.exception("Exception adding to details", msg_to=msg_to, from_internal=from_internal) raise return messages def add_from_details(messages): """Adds a @msg_from key to every message in a list of messages. Every msg_to uuid is resolved to include details of the user. If the call for the internal user id fails, an exception will be thrown. If the external user id cannot be found in the list that we got from the party service. There won't be a @msg_from value returned in the payload. The API documentation notes that these elements aren't guaranteed to be provided so we're not breaking the contract by doing this. """ external_user_details = {} for user in party.get_users_details(get_external_user_uuid_list(messages)): external_user_details[user['id']] = user for message in messages: try: msg_from = message["msg_from"] from_internal = message["from_internal"] if from_internal: message.update({"@msg_from": internal_user_service.get_user_details(msg_from)}) else: if external_user_details.get(message['msg_from']): message.update({'@msg_from': external_user_details.get(msg_from)}) except IndexError: logger.exception("Exception adding from details message", msg_from=msg_from, from_internal=from_internal) raise return messages def get_external_user_uuid_list(messages): """Compiles a list of all unique the external user (respondent) uuids from a list of messages""" external_user_uuids = set() external_msgs = [message for message in messages if message['from_internal'] is False] for message in external_msgs: external_user_uuids.add(message["msg_from"]) internal_messages = [message for message in messages if message['from_internal'] is True] for uuid in internal_messages: external_user_uuids.add(uuid["msg_to"][0]) return external_user_uuids def add_business_details(messages): """Adds a @business_details key to every message in a list of messages.""" business_ids = set() for message in messages: business_ids.add(message['business_id']) business_details = party.get_business_details(business_ids) for message in messages: message['@business_details'] = next((business for business in business_details if business["id"] == message['business_id']), None) return messages def add_users_and_business_details(messages): """Add both user and business details to messages based on data from party service""" if not messages: raise ValueError('messages is a required parameter and must not be empty') messages = add_to_details(messages) messages = add_from_details(messages) logger.info("Successfully added to and from details") messages = add_business_details(messages) logger.info("Successfully added business details") return messages
[ 11748, 17268, 198, 11748, 18931, 198, 11748, 2956, 297, 571, 13, 29572, 198, 198, 6738, 2878, 6404, 1330, 14441, 62, 6404, 1362, 198, 198, 6738, 5713, 62, 20500, 13, 9979, 1187, 1330, 337, 1546, 4090, 8264, 62, 17513, 62, 2389, 62, 16...
2.615677
3,942
# -*- coding: utf-8 -*- """ Defines various renderers for the game of nonogram """ from abc import ABC from sys import stdout from notetool.tool.log import logger from six import integer_types, itervalues, text_type from ..utils.iter import max_safe, pad from ..utils.other import two_powers from .common import BOX, SPACE, UNKNOWN, BlottedBlock, is_list_like def draw(self, cells=None): """Calculate all the cells and draw an image of the board""" self.draw_header() self.draw_side() self.draw_grid(cells=cells) self.render() def draw_header(self): """ Changes the internal state to be able to draw columns descriptions """ raise NotImplementedError() def draw_side(self): """ Changes the internal state to be able to draw rows descriptions """ raise NotImplementedError() def draw_grid(self, cells=None): """ Changes the internal state to be able to draw a main grid """ raise NotImplementedError() def _register_renderers(): res = dict() for obj in itervalues(globals()): if isinstance(obj, type): if issubclass(obj, StreamRenderer) and hasattr(obj, '__rend_name__'): res[obj.__rend_name__] = obj return res RENDERERS = _register_renderers()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 7469, 1127, 2972, 9851, 19288, 329, 262, 983, 286, 1729, 21857, 198, 37811, 198, 198, 6738, 450, 66, 1330, 9738, 198, 6738, 25064, 1330, 14367, 448, 198, 198...
2.516605
542
from __future__ import print_function, division from .str import StrPrinter from sympy.utilities import default_sort_key # numexpr works by altering the string passed to numexpr.evaluate # rather than by populating a namespace. Thus a special printer... def lambdarepr(expr, **settings): """ Returns a string usable for lambdifying. """ return LambdaPrinter(settings).doprint(expr)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 198, 198, 6738, 764, 2536, 1330, 4285, 6836, 3849, 198, 6738, 10558, 88, 13, 315, 2410, 1330, 4277, 62, 30619, 62, 2539, 628, 198, 198, 2, 997, 31937, 2499, 416, 29057, 262, 4...
3.366667
120
""" Fill na with most common of the whole column """ import numpy as np import pandas as pd import time import matplotlib.pyplot as plt from datetime import datetime import re from collections import Counter from statistics import median from tqdm import tqdm file = '/home/nicolasbievre/yelp_data.pkl' file_na = '/home/nicolasbievre/yelp_data_no_na.pkl' df = pd.read_pickle(file) categories = list(set(df['categories'].values)) n = len(categories) for i in tqdm(range(len(df.columns))): col = df.columns[i] if not col in {'review_id': 0, 'business_id': 0, 'user_id': 0, 'postal_code': 0}: df_col = df[col].values na = sum(pd.isna(df_col)) if na > 0: most_commom_term = find_most_common_value(df_col) if not pd.isna(most_commom_term): df.loc[(pd.isna(df_col)), col] = most_commom_term if i % 35 == 0 and i > 0: df.to_pickle(file_na) df.to_pickle(file_na)
[ 37811, 198, 33762, 12385, 351, 749, 2219, 286, 262, 2187, 5721, 198, 37811, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 640, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, ...
2.228438
429
from tkinter import Tk from tkinter import Entry from tkinter import Button from tkinter import StringVar t=Tk() t.title("Tarun Jaiswal") t.geometry("425x300") t.resizable(0,0) t.configure(background="black")#back ground color a=StringVar() e1=Entry(font=("",30),justify="right",textvariable=a) e1.place(x=0,y=0,width=425,height=50) b1=Button(text="7",font=("",25),bg="gray",fg="white",activebackground="yellow",command=show) b1.place(x=5,y=55,width=100,height=50) b1.configure(command=lambda:show("7")) b2=Button(text="8",font=("",25),bg="gray",fg="white",activebackground="yellow") b2.place(x=110,y=55,width=100,height=50) b2.configure(command=lambda:show("8")) b3=Button(text="9",font=("",25),bg="gray",fg="white",activebackground="yellow") b3.place(x=215,y=55,width=100,height=50) b3.configure(command=lambda:show("9")) b4=Button(text="+",font=("",25),bg="gray",fg="white",activebackground="yellow") b4.place(x=320,y=55,width=100,height=50) b4.configure(command=lambda:show("+")) b5=Button(text="4",font=("",25),bg="gray",fg="white",activebackground="yellow") b5.place(x=5,y=110,width=100,height=50) b5.configure(command=lambda:show("4")) b6=Button(text="5",font=("",25),bg="gray",fg="white",activebackground="yellow") b6.place(x=110,y=110,width=100,height=50) b6.configure(command=lambda:show("5")) b7=Button(text="6",font=("",25),bg="gray",fg="white",activebackground="yellow") b7.place(x=215,y=110,width=100,height=50) b7.configure(command=lambda:show("6")) b8=Button(text="-",font=("",25),bg="gray",fg="white",activebackground="yellow") b8.place(x=320,y=110,width=100,height=50) b8.configure(command=lambda:show("-")) b9=Button(text="1",font=("",25),bg="gray",fg="white",activebackground="yellow") b9.place(x=5,y=165,width=100,height=50) b9.configure(command=lambda:show("1")) b10=Button(text="2",font=("",25),bg="gray",fg="white",activebackground="yellow") b10.place(x=110,y=165,width=100,height=50) b10.configure(command=lambda:show("2")) b11=Button(text="3",font=("",25),bg="gray",fg="white",activebackground="yellow") b11.place(x=215,y=165,width=100,height=50) b11.configure(command=lambda:show("3")) b12=Button(text="*",font=("",25),bg="gray",fg="white",activebackground="yellow") b12.place(x=320,y=165,width=100,height=50) b12.configure(command=lambda:show("*")) b13=Button(text="C",font=("",25),bg="gray",fg="white",activebackground="yellow") b13.place(x=5,y=220,width=100,height=50) b13.configure(command=clear) b14=Button(text="0",font=("",25),bg="gray",fg="white",activebackground="yellow") b14.place(x=110,y=220,width=100,height=50) b14.configure(command=lambda:show("0")) b15=Button(text="=",font=("",25),bg="gray",fg="white",activebackground="yellow",command=equal) b15.place(x=215,y=220,width=100,height=50) b15.configure(command=equal) b16=Button(text="/",font=("",25),bg="gray",fg="white",activebackground="yellow") b16.place(x=320,y=220,width=100,height=50) b16.configure(command=lambda:show("/")) t.mainloop()
[ 6738, 256, 74, 3849, 1330, 309, 74, 198, 6738, 256, 74, 3849, 1330, 21617, 198, 6738, 256, 74, 3849, 1330, 20969, 198, 6738, 256, 74, 3849, 1330, 10903, 19852, 198, 198, 83, 28, 51, 74, 3419, 198, 83, 13, 7839, 7203, 47079, 403, 1...
2.433854
1,217