content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import cv2
import numpy as np
import sys
if __name__ == '__main__':
title = "pontilhismo"
cv2.namedWindow(title, cv2.WINDOW_AUTOSIZE)
image = cv2.imread(sys.argv[1], cv2.IMREAD_GRAYSCALE)
cols, rows = image.shape
cv2.imshow(title, image)
cv2.waitKey(0)
points = np.copy(image)
points[...] = 220
step, jitter, radius = 5, 3, 3
xrange = np.arange(rows/step) * step + step/2
yrange = np.arange(cols/step) * step + step/2
np.random.shuffle(xrange)
for i in xrange:
np.random.shuffle(yrange)
for j in yrange:
displacement = np.random.randint(0, 2*jitter) - jitter + 1
i = int(i + displacement if i + displacement < rows else rows - 1)
j = int(j + displacement if j + displacement < cols else cols - 1)
cv2.circle(points, (j, i), radius, int(image[i, j]), -1)
cv2.imshow(title, points)
cv2.waitKey(0) | [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
7839,
796,
366,
79,
756,
346,
71,
44126,
1,
198,
197,
33967,
17,
13,
13190,
277... | 2.310249 | 361 |
import threading, sys, time, traceback
| [
11748,
4704,
278,
11,
25064,
11,
640,
11,
12854,
1891,
198
] | 3.545455 | 11 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/3/29 5:10 下午
# @Author : xinming
# @File : 518_coin_change.py
from typing import List
if __name__=='__main__':
amount = 5
coins = [5, 2, 1]
out = Solution().change(amount, coins)
print(out) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33160,
14,
18,
14,
1959,
642,
25,
940,
220,
10310,
233,
39355,
230,
198,
2,
248... | 2.166667 | 126 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "aptronics"
app_title = "Aptronics Applications"
app_publisher = "Aptronics"
app_description = "All applications and modules used at Aptronics"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "hemant@aptronics.co.za"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/aptronics/css/aptronics.css"
# app_include_js = "/assets/aptronics/js/aptronics.js"
# include js, css files in header of web template
# web_include_css = "/assets/aptronics/css/aptronics.css"
# web_include_js = "/assets/aptronics/js/aptronics.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "aptronics.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "aptronics.install.before_install"
# after_install = "aptronics.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "aptronics.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
doc_events = {
"Sales Invoice": {
"validate": "aptronics.stock.actual_cost.get_actual_cost_by_batch",
# "on_submit": "aptronics.stock.actual_cost.reversal_shipment_not_invoiced",
},
#"Delivery Note": {
# "on_submit": "aptronics.stock.actual_cost.shipped_not_invoiced",
#},
#"GL Entry": {
# "before_insert":"aptronics.stock.actual_cost.gl_entry_insert"
#},
#"Stock Ledger Entry":{
# "before_insert":"aptronics.stock.actual_cost.update_lot"
#},
#"Batch": {
# "before_insert": "aptronics.stock.actual_cost.update_lot"
#}
"Communication": {
"before_insert" : "aptronics.business_rules.email.check_email_address"
},
"Customer":{
"before_insert":"aptronics.business_rules.naming_series.business_partner_naming_series"
},
"Supplier":{
"before_insert":"aptronics.business_rules.naming_series.business_partner_naming_series"
},
"Sales Order":{
"validate":"aptronics.business_rules.validations.sales_order_unique_by_customer"
},
"Purchase Invoice":{
"validate":"aptronics.business_rules.validations.purchase_invoice_excluding_price_check"
}
}
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "aptronics.tasks.all"
# ],
# "daily": [
# "aptronics.tasks.daily"
# ],
# "hourly": [
# "aptronics.tasks.hourly"
# ],
# "weekly": [
# "aptronics.tasks.weekly"
# ]
# "monthly": [
# "aptronics.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "aptronics.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "aptronics.event.get_events"
# }
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
764,
1330,
11593,
9641,
834,
355,
598,
62,
9641,
198,
198,
1324,
62,
3672,
796,
366,
2373,
20844,
... | 2.690409 | 1,418 |
#Function with parameter
add(5,3)
add(a=3,b=5)
#Function with default parameters
add_new(2)
basic_window(500,350,bgc='a')
| [
198,
2,
22203,
351,
11507,
220,
220,
220,
220,
198,
198,
2860,
7,
20,
11,
18,
8,
198,
2860,
7,
64,
28,
18,
11,
65,
28,
20,
8,
198,
198,
2,
22203,
351,
4277,
10007,
198,
198,
2860,
62,
3605,
7,
17,
8,
198,
198,
35487,
62,
1... | 2.237288 | 59 |
import heapq
'''
String[] base = {"Gold Medal","Silver Medal","Bronze Medal"};
String[] re = new String[nums.length];
int[] copy = Arrays.copyOf(nums,nums.length);
Map<Integer,Integer> map = new HashMap<>();
Arrays.sort(copy);
for (int i = 0; i < nums.length; i++) {
map.put(copy[i],nums.length - 1 - i);
}
int rank = 0;
for (int i = 0; i < nums.length; i++) {
rank = map.get(nums[i]);
re[i] = rank < 3 ? base[rank] : String.valueOf(rank + 1);
}
return re;
''' | [
11748,
24575,
80,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
7061,
6,
198,
220,
220,
220,
10903,
21737,
2779,
796,
19779,
13306,
9064,
2430,
26766,
9064,
2430,
18760,
2736,
9064,
1,
19629,
198,
220,
220,
220,
10903,
21737,
302,... | 2.173387 | 248 |
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
13,
18211,
1330,
32053,
16934,
628,
198
] | 3.583333 | 24 |
#
# This integration is towards MQTT in Home-Assistant and can easily
# be configured to provide both images streamed unfiltered or diff-filtered.
#
# Author: Joakim Eriksson, joakim.eriksson@ri.se
#
import paho.mqtt.client as mqttClient
import threading, time, yaml
import numpy as np, sys, time
import cv2
import images_pb2
video = 0
if len(sys.argv) > 1:
video = sys.argv[1]
mqCam = MQTTCamera("localhost", video, topic="ha/camera/mqtt")
mqCam.show = False
mqCam.camera_loop()
# When everything done, release the capture
cv2.destroyAllWindows() | [
2,
198,
2,
770,
11812,
318,
3371,
337,
48,
15751,
287,
5995,
12,
48902,
290,
460,
3538,
198,
2,
307,
17839,
284,
2148,
1111,
4263,
35377,
3684,
346,
4400,
393,
814,
12,
10379,
4400,
13,
198,
2,
198,
2,
6434,
25,
5302,
461,
320,
... | 2.724638 | 207 |
# Generated by Django 3.1.6 on 2021-02-10 07:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2999,
12,
940,
8753,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python
# Created by Bruce yuan on 18-1-25.
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15622,
416,
11088,
34847,
319,
1248,
12,
16,
12,
1495,
13,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419
] | 2.45 | 40 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=W0212,W0222,W0221
import typing
import unittest
from opentelemetry import trace
from opentelemetry.trace.span import INVALID_SPAN_CONTEXT, NonRecordingSpan
| [
2,
15069,
383,
4946,
31709,
41935,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
9... | 3.553488 | 215 |
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import json
import os.path
import pathlib2 as pathlib
import grovepi
import sys
import os
import time
import ast
import socket
# Import MQTT client modules
import paho.mqtt.client as mqtt
sys.path.append('/home/pi/GrovePi/Software/Python/grove_rgb_lcd')
from grovepi import *
from grove_rgb_lcd import *
# Import Plugwise modules for both stick and circle
from plugwise import Stick
from plugwise import Circle
# MAC ID for both the Circles
mac1 = "000D6F0003BD7F1F"
mac2 = "000D6F0002C0DE2E"
# Plugwise Stick port
plugwise_stick = Stick(port="/dev/ttyUSB0")
# Binding each circle to the stick
plugwise_Circle_1 = Circle(mac1, plugwise_stick) # for heater
plugwise_Circle_2 = Circle(mac2, plugwise_stick) # lamp
# turning off the devices conected to circles by default
plugwise_Circle_1.switch_off()
plugwise_Circle_2.switch_off()
# Sensor Pin configuration
# Analog Port Declaration for the Sensors
sound_sensor_port = 0 # sound sensor in analog port 0
# Digital Port Declaration
ultra_sound_sensor_port = 2
# Setting the PinModes
grovepi.pinMode(sound_sensor_port,"INPUT")
# Threshold for light sensor
sound_threshold_level = 70
ultra_sound_threshold_level = 20
light_status = False
heater_status = False
indicator_led_1 = 4
indicator_led_2 = 3
''' Data Packet containing all the sensor information to be transmitted to
MQTT Server via client running on raspberry PI.
'''
# Define event callbacks
client = mqtt.Client()
# Assign event callbacks
client.on_message = on_message
client.on_connect = on_connect
client.on_publish = on_publish
client.on_subscribe = on_subscribe
# Uncomment to enable debug messages
#client.on_log = on_log
# user name has to be called before connect - my notes.
client.username_pw_set("ndcvwock", "yotAE_3zRCsF")
client.connect('m24.cloudmqtt.com', 10480, 60)
client.loop_start()
client.subscribe ("/light",0)
client.subscribe ("/heater",0)
userpresent = "user"
nouserpresent = "nouser"
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
3108,
8019,
17,
355,
3108,
8019,
198,
11748,
... | 2.930299 | 703 |
import os
from os.path import join
import sys
sys.path.append('.')
from cycada.data.adda_datasets import AddaDataLoader
from cycada.data.cyclegta5 import CycleGTA5
from cycada.data import *
from cycada.data.usps import USPS
from cycada.data.mnist import MNIST
from cycada.data.svhn import SVHN
from cycada.data.cyclegan import Svhn2MNIST, Usps2Mnist, Mnist2Usps
from cycada.tools.train_task_net import train as train_source
from cycada.tools.test_task_net import load_and_test_net
from cycada.tools.train_adda_net import train_adda
import torch
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--s', dest='src', type=str, default='usps2mnist')
parser.add_argument('--t', dest='tgt', type=str, default='mnist')
parser.add_argument('--b', dest='batchSize', type=int, default=128)
parser.add_argument('--wd', dest='weight_decay', type=int, default=0)
parser.add_argument('--dd', dest='datadir', type=str, default='/home/ubuntu/anthro-efs/anthro-backup-virginia/data')
parser.add_argument('--mn', dest='modelName', type=str, default='model_A')
parser.add_argument('--m', dest='model', type=str, default='LeNet')
parser.add_argument('--nc', dest='numClasses', type=int, default=10)
parser.add_argument('--pe', dest='pixLevEpochs', type=int, default=100)
parser.add_argument('--fe', dest='featLevEpochs', type=int, default=200)
parser.add_argument('--plr', dest='pixLR', type=float, default=1e-4)
parser.add_argument('--flr', dest='featLR', type=float, default=1e-5)
parser.add_argument('--iter', dest='iter', type=int, default=None)
parser.add_argument('--ns', dest='numSave', type=int, default=50)
args = parser.parse_args()
# set random seed to 4325
# to reproduce the exact numbers
np.random.seed(4325)
###################################
# Set to your preferred data path #
###################################
datadir = args.datadir
###################################
# Problem Params
src = args.src
tgt = args.tgt
base_src = src.split('2')[0]
model = args.model
num_cls = args.numClasses
# Output directory
outdir = 'results/{}_to_{}/{}'.format(src, tgt, args.modelName)
# Optimization Params
betas = (0.9, 0.999) # Adam default
weight_decay = args.weight_decay # Adam default
batch = args.batchSize
src_lr = args.pixLR
adda_lr = args.featLR
src_num_epoch = args.pixLevEpochs
adda_num_epoch = args.featLevEpochs
src_datadir = join(datadir, src)
args.src_net_file = join(outdir, '{}_net_{}'.format(model, src))
args.adda_net_file = join(outdir, 'adda_{:s}_net_{:s}_{:s}'.format(model, src, tgt))
src_net_file = args.src_net_file + '_final.pth'
adda_net_file = args.adda_net_file + '_final.pth'
#######################
# 1. Train Source Net #
#######################
if os.path.exists(src_net_file):
print('Skipping source net training, exists:', src_net_file)
else:
train_source(src, src_datadir, model, num_cls, args,
outdir=outdir, num_epoch=src_num_epoch, batch=batch,
lr=src_lr, betas=betas, weight_decay=weight_decay)
#####################
# 2. Train Adda Net #
#####################
if os.path.exists(adda_net_file):
print('Skipping adda training, exists:', adda_net_file)
else:
train_adda(src, tgt, model, num_cls, args, num_epoch=adda_num_epoch,
batch=batch, datadir=datadir,
outdir=outdir, src_weights=src_net_file,
lr=adda_lr, betas=betas, weight_decay=weight_decay)
##############################
# 3. Evalute source and adda #
##############################
tgt_datadir = join(datadir, tgt)
print()
if src == base_src:
print('----------------')
print('Test set:', src)
print('----------------')
print('Evaluating {} source model: {}'.format(src, src_net_file))
load_and_test_net(src, src_datadir, src_net_file, model, num_cls,
dset='test', base_model=None)
print('----------------')
print('Test set:', tgt)
print('----------------')
print('Evaluating {} source model: {}'.format(src, src_net_file))
cm = load_and_test_net(tgt, tgt_datadir, src_net_file, model, num_cls,
dset='test', base_model=None)
print(cm)
print('Evaluating {}->{} adda model: {}'.format(src, tgt, adda_net_file))
cm = load_and_test_net(tgt, tgt_datadir, adda_net_file, 'AddaNet', num_cls,
dset='test', base_model=model)
print(cm)
| [
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
2637,
8,
198,
198,
6738,
3075,
66,
4763,
13,
7890,
13,
2860,
64,
62,
19608,
292,
1039,
1330,
3060,
64,
6601,
17401,
... | 2.566429 | 1,686 |
# -*- coding: utf-8 -*-
import abc
import six
import numpy as np
###############################################################################
@six.add_metaclass(abc.ABCMeta)
class OUActionNoise(object):
"""Noise generated with an Ornstein-Uhlenbeck."""
class AdaptiveParamNoiseSpec(object):
"""Adaptative Parameter Noise
Technique is introduced in the paper Parameter Space Noise for Exploration
https://arxiv.org/abs/1706.01905
This implementation is taken from OpenAI's baselines.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
450,
66,
198,
11748,
2237,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
29113,
29113,
7804,
4242,
21017,
198,
198,
31,
19412,
13,
2860,
62,
4164,
330,
... | 3.335443 | 158 |
#!/usr/bin/env python3
"""HolbertonBnB Review view."""
from api.v1.views import app_views
from flask import abort, jsonify, request
from flasgger import swag_from
from models import storage
from models.review import Review
@app_views.route("/places/<place_id>/reviews", methods=["GET", "POST"])
@swag_from("../apidocs/places_reviews/get_reviews.yml", methods=["GET"])
@swag_from("../apidocs/places_reviews/post.yml", methods=["POST"])
def reviews(place_id):
"""Defines the GET and POST method for reviews on /places route.
GET - Retrieves a list of all Reviews related to a given place_id.
POST - Creates a Review.
"""
place = storage.get("Place", place_id)
if place is None:
abort(404)
# GET method
if request.method == "GET":
return jsonify([r.to_dict() for r in place.reviews])
# POST method
data = request.get_json(silent=True)
if data is None:
return "Not a JSON", 400
user_id = data.get("user_id")
if user_id is None:
return "Missing user_id", 400
if storage.get("User", user_id) is None:
abort(404)
if data.get("text") is None:
return "Missing text", 400
data["place_id"] = place_id
review = Review(**data)
review.save()
return jsonify(review.to_dict()), 201
@app_views.route("/reviews/<review_id>", methods=["GET", "DELETE", "PUT"])
@swag_from("../apidocs/places_reviews/get_review_id.yml", methods=["GET"])
@swag_from("../apidocs/places_reviews/delete.yml", methods=["DELETE"])
@swag_from("../apidocs/places_reviews/put.yml", methods=["PUT"])
def review_id(review_id):
"""Defines the GET, PUT and DELETE methods for a specific ID on reviews.
GET - Retrieves a Review object with the given id.
PUT - Updates a Review object with the given id using JSON key/values.
DELETE - Deletes a Review object with the given id.
"""
review = storage.get("Review", review_id)
if review is None:
abort(404)
# GET method
if request.method == "GET":
return jsonify(review.to_dict())
# DELETE method
elif request.method == "DELETE":
storage.delete(review)
storage.save()
return jsonify({})
# PUT method
data = request.get_json(silent=True)
if data is None:
return "Not a JSON", 400
avoid = {"id", "user_id", "place_id", "created_at", "updated_at"}
[setattr(review, k, v) for k, v in data.items() if k not in avoid]
review.save()
return jsonify(review.to_dict())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
28115,
527,
1122,
33,
77,
33,
6602,
1570,
526,
15931,
198,
6738,
40391,
13,
85,
16,
13,
33571,
1330,
598,
62,
33571,
198,
6738,
42903,
1330,
15614,
11,
33918,
1958,
11,
25... | 2.535895 | 989 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
10529,
2238,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
6738,
16298,
2238,
1330,
4981,
628
] | 3.205128 | 39 |
"""Train networks for reproducing multi-cognitive-tasks from
Task representations in neural networks trained to perform many cognitive tasks
https://www.nature.com/articles/s41593-018-0310-2
"""
import os
import time
import numpy as np
import torch
import torch.nn as nn
import gym
import neurogym as ngym
from neurogym.wrappers import ScheduleEnvs
from neurogym.utils.scheduler import RandomSchedule
from models import RNNNet, get_performance
# Environment
kwargs = {'dt': 100}
# kwargs = {'dt': 100, 'sigma': 0, 'dim_ring': 2, 'cohs': [0.1, 0.3, 0.6, 1.0]}
seq_len = 100
# Make supervised dataset
tasks = ngym.get_collection('yang19')
envs = [gym.make(task, **kwargs) for task in tasks]
schedule = RandomSchedule(len(envs))
env = ScheduleEnvs(envs, schedule=schedule, env_input=True)
dataset = ngym.Dataset(env, batch_size=4, seq_len=seq_len)
env = dataset.env
ob_size = env.observation_space.shape[0]
act_size = env.action_space.n
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
model = RNNNet(input_size=ob_size, hidden_size=256, output_size=act_size,
dt=env.dt).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
print_step = 200
running_loss = 0.0
running_task_time = 0
running_train_time = 0
for i in range(40000):
task_time_start = time.time()
inputs, labels = dataset()
running_task_time += time.time() - task_time_start
inputs = torch.from_numpy(inputs).type(torch.float).to(device)
labels = torch.from_numpy(labels.flatten()).type(torch.long).to(device)
train_time_start = time.time()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs, _ = model(inputs)
loss = criterion(outputs.view(-1, act_size), labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
running_train_time += time.time() - train_time_start
# print statistics
running_loss += loss.item()
if i % print_step == (print_step - 1):
print('{:d} loss: {:0.5f}'.format(i + 1, running_loss / print_step))
running_loss = 0.0
if True:
print('Task/Train time {:0.1f}/{:0.1f} ms/step'.format(
running_task_time / print_step * 1e3,
running_train_time / print_step * 1e3))
running_task_time, running_train_time = 0, 0
perf = get_performance(model, env, num_trial=200, device=device)
print('{:d} perf: {:0.2f}'.format(i + 1, perf))
fname = os.path.join('files', 'model.pt')
torch.save(model.state_dict(), fname)
print('Finished Training')
| [
37811,
44077,
7686,
329,
8186,
2259,
5021,
12,
66,
46610,
12,
83,
6791,
422,
198,
198,
25714,
24612,
287,
17019,
7686,
8776,
284,
1620,
867,
10870,
8861,
198,
5450,
1378,
2503,
13,
21353,
13,
785,
14,
26845,
14,
82,
35038,
6052,
12,
... | 2.480111 | 1,081 |
# Python3 program to perform basic timSort
MIN_MERGE = 32 #O tamanho da execução pode variar de 32 a 64, dependendo do tamanho da matriz
def calcMinRun(n):
"""Retorna o comprimento mínimo de um
correr de 23-64 para que
o len (array) / minrun é menor que ou
igual a uma potência de 2.
e.g. 1=>1, ..., 63=>63, 64=>32, 65=>33,
..., 127=>64, 128=>32, ...
"""
r = 0
while n >= MIN_MERGE:
r |= n & 1
n >>= 1
return n + r
# Esta função classifica a matriz do índice esquerdo para
# para o índice da direita que é de tamanho no máximo RUN
# A função de mesclagem mescla as execuções classificadas
# Função Timsort iterativa para classificar o
# array[0...n-1] (similar to merge sort)
# Começando o programa!
if __name__ == "__main__":
arr = [-2, 7, 15, -14, 0, 15, 0,
7, -7, -4, -13, 5, 8, -14, 12]
print("Given Array is")
print(arr)
# chamando função TimSort
timSort(arr)
# print("After Sorting Array is")
#print(arr)
# [-14, -14, -13, -7, -4, -2, 0, 0,
# 5, 7, 7, 8, 12, 15, 15] | [
2,
11361,
18,
1430,
284,
1620,
4096,
4628,
42758,
198,
23678,
62,
29296,
8264,
796,
3933,
1303,
46,
256,
10546,
8873,
12379,
2452,
84,
16175,
28749,
279,
1098,
5553,
283,
390,
3933,
257,
5598,
11,
4745,
31110,
466,
256,
10546,
8873,
1... | 2.096408 | 529 |
# ======================================================================
# Author: TrungNT
# ======================================================================
from __future__ import print_function, division
from ..utils import function, frame
from ..model import model
from .. import tensor
from .. import logger
import unittest
import os
import numpy as np
from six.moves import zip, range
# ===========================================================================
# Main Test
# ===========================================================================
# ===========================================================================
# Main
# ===========================================================================
if __name__ == '__main__':
print(' Use nosetests to run these tests ')
| [
2,
38093,
1421,
28,
198,
2,
6434,
25,
833,
2150,
11251,
198,
2,
38093,
1421,
28,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
198,
6738,
11485,
26791,
1330,
2163,
11,
5739,
198,
6738,
11485,
19849,
1330,
2746,
... | 5.97037 | 135 |
import os
from influxspeedtest.config.configmanager import ConfigManager
if os.getenv('influxspeedtest'):
config = os.getenv('influxspeedtest')
else:
config = 'config.ini'
config = ConfigManager(config) | [
11748,
28686,
198,
198,
6738,
25065,
12287,
9288,
13,
11250,
13,
11250,
37153,
1330,
17056,
13511,
198,
198,
361,
28686,
13,
1136,
24330,
10786,
10745,
22564,
12287,
9288,
6,
2599,
198,
220,
220,
220,
4566,
796,
28686,
13,
1136,
24330,
... | 3.086957 | 69 |
import socketserver, socket, sys
host = "0.0.0.0"
port = 10000
address = (host, port)
if __name__ == "__main__":
command = sys.argv[1]
if command == "serve":
serve()
elif command == "ping":
ping()
else: print("invalid")
| [
11748,
37037,
18497,
11,
17802,
11,
25064,
198,
198,
4774,
796,
366,
15,
13,
15,
13,
15,
13,
15,
1,
198,
634,
796,
33028,
198,
21975,
796,
357,
4774,
11,
2493,
8,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
... | 2.297297 | 111 |
# -*- coding: utf-8 -*-
"""
This file is part of Ludolph: Ansible plugin
Copyright (C) 2015 Erigones, s. r. o.
See the LICENSE file for copying permission.
"""
from __future__ import absolute_import
from __future__ import print_function
from os import path
from ludolph.command import CommandError, PermissionDenied, command
from ludolph.plugins.plugin import LudolphPlugin
from ansible import utils
from ansible.errors import AnsibleError
from ansible.inventory import Inventory
from ansible.playbook import PlayBook
from ansible.playbook.play import Play
from . import __version__
from .playbook_callbacks import banner, AggregateStats, PlaybookCallbacks, PlaybookRunnerCallbacks
class DisplayCallback(object):
"""
Display task output.
"""
buffer = []
# noinspection PyUnusedLocal,PyMethodMayBeStatic
__call__ = display
class Playbook(LudolphPlugin):
"""
Run ansible playbooks from ludolph.
"""
__version__ = __version__
_available_options = (
('forks', int),
('check', _bool),
('private_key_file', _file),
)
@staticmethod
def _get_playbook(self, msg, pb_name):
"""Get playbook by name"""
if self.admin_required and not self.xmpp.is_jid_admin(self.xmpp.get_jid(msg)):
raise PermissionDenied
try:
pb_name = self.playbooks[pb_name]
except KeyError:
if self.restrict_playbooks:
raise CommandError('Invalid playbook name: **%s**' % pb_name)
if not pb_name.endswith('.yml'):
pb_name += '.yml'
pb_path = path.abspath(path.realpath(path.join(self.basedir, pb_name)))
if not pb_path.startswith(self.basedir + path.sep):
raise CommandError('Invalid playbook name: **%s**' % pb_name)
if not path.isfile(pb_path):
raise CommandError('Playbook **%s** not found' % pb_name)
options = self._get_callbacks(msg)
options.update(self.options)
return PlayBook(playbook=pb_path, **options)
@command
def apb(self, msg, playbook, *args):
"""
Run an ansible playbook and display the results.
Usage: apb <playbook> [options]
Available options:
tags=tag1,tag2,...
check=no
subset=*domain1*
"""
pb = self._get_playbook(msg, playbook)
for arg in args:
try:
key, val = arg.split('=')
except ValueError:
raise CommandError('Invalid option: **%s**' % arg)
else:
key, val = key.strip(), val.strip()
if key == 'tags':
pb.only_tags = map(str.strip, val.split(','))
elif key == 'check':
pb.check = _bool(val)
elif key == 'subset':
pb.inventory.subset(val)
else:
raise CommandError('Invalid option: **%s**' % arg)
res = []
try:
pb.run()
pb.callbacks.display.flush()
hosts = sorted(pb.stats.processed.keys())
res.append(banner(''))
for h in hosts:
t = pb.stats.summarize(h)
res.append('%s : ok=%-4s changed=%-4s unreachable=%-4s failed=%-4s' % (
hostcolor(h, t), t['ok'], t['changed'], t['unreachable'], t['failures']
))
res.append('')
except AnsibleError as exc:
raise CommandError('Ansible error: **%s**' % exc)
return '\n'.join(res)
@command
def apb_tags(self, msg, playbook):
"""
List all tags available in a playbook.
Usage: apb-tags <playbook>
"""
pb = self._get_playbook(msg, playbook)
i = 0
res = ['', 'playbook: %s' % pb.filename, '']
for play in self._get_playbook_data(pb):
i += 1
res.append(' play #%d (%s):\tTAGS: [%s]' % (i, play.name, ','.join(sorted(set(play.tags)))))
tags = set()
for task in pb.tasks_to_run_in_play(play):
tags.update(task.tags)
res.append(' TASK TAGS: [%s]' % (', '.join(sorted(tags.difference(['untagged'])))))
res.append('')
return '\n'.join(res)
@command
def apb_tasks(self, msg, playbook, tag=None):
"""
List all tasks available in a playbook.
Usage: apb-tasks <playbook> [tag]
"""
pb = self._get_playbook(msg, playbook)
i = 0
res = ['', 'playbook: %s' % pb.filename, '']
if tag:
tag = tag.strip()
for play in self._get_playbook_data(pb):
i += 1
res.append(' play #%d (%s):\tTAGS: [%s]' % (i, play.name, ','.join(sorted(set(play.tags)))))
num_tasks = 0
for task in pb.tasks_to_run_in_play(play):
if getattr(task, 'name', None) is not None: # meta tasks have no names
tags = set(task.tags).difference(['untagged'])
if tag and tag not in tags:
continue
res.append(' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(tags))))
num_tasks += 1
if not num_tasks:
if tag:
res.append(' (no tasks for tag: **%s**)' % tag)
else:
res.append(' (no tasks)')
res.append('')
return '\n'.join(res)
@command
def apb_hosts(self, msg, playbook):
"""
List all hosts available in a playbook.
Usage: apb-hosts <playbook>
"""
pb = self._get_playbook(msg, playbook)
i = 0
res = ['', 'playbook: %s' % pb.filename, '']
for play in self._get_playbook_data(pb):
i += 1
hosts = pb.inventory.list_hosts(play.hosts)
res.append(' play #%d (%s): host count=%d' % (i, play.name, len(hosts)))
for host in hosts:
res.append(' %s' % host)
res.append('')
return '\n'.join(res)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
2393,
318,
636,
286,
24177,
10196,
25,
28038,
856,
13877,
198,
15269,
357,
34,
8,
1853,
412,
4359,
1952,
11,
264,
13,
374,
13,
267,
13,
198,
198,
6... | 2.015097 | 3,047 |
from __future__ import division
from MenuHUDPanel import MenuHUDPanel
import pygame
# Call constructor
# Set Menu State
# Get Menu State
# Get hud activity
# Set hud activity
# Call when the hud is opened
# Event method (pass pygame.Event to HUD)
# Get / set / do an action within the panel
# @param "self = instance, string = paneltype, action = event(string)"
# When the menu is closed
# Update method
# Draw method
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
21860,
39,
52,
6322,
272,
417,
1330,
21860,
39,
52,
6322,
272,
417,
198,
11748,
12972,
6057,
628,
220,
220,
220,
1303,
4889,
23772,
628,
220,
220,
220,
1303,
5345,
21860,
1812,
628,
220,... | 3.031847 | 157 |
from collections import OrderedDict
import copy
from numbers import Integral
import os
import warnings
import h5py
import numpy as np
import openmc
from openmc.data import REACTION_MT, REACTION_NAME, FISSION_MTS
import openmc.checkvalue as cv
from ..tallies import ESTIMATOR_TYPES
from . import EnergyGroups
# Supported cross section types
MGXS_TYPES = (
'total',
'transport',
'nu-transport',
'absorption',
'capture',
'fission',
'nu-fission',
'kappa-fission',
'scatter',
'nu-scatter',
'scatter matrix',
'nu-scatter matrix',
'multiplicity matrix',
'nu-fission matrix',
'scatter probability matrix',
'consistent scatter matrix',
'consistent nu-scatter matrix',
'chi',
'chi-prompt',
'inverse-velocity',
'prompt-nu-fission',
'prompt-nu-fission matrix',
'current',
'diffusion-coefficient',
'nu-diffusion-coefficient'
)
# Some scores from REACTION_MT are not supported, or are simply overkill to
# support and test (like inelastic levels), remoev those from consideration
_BAD_SCORES = ["(n,misc)", "(n,absorption)", "(n,total)", "fission"]
_BAD_SCORES += [REACTION_NAME[mt] for mt in FISSION_MTS]
ARBITRARY_VECTOR_TYPES = tuple(k for k in REACTION_MT.keys()
if k not in _BAD_SCORES)
ARBITRARY_MATRIX_TYPES = []
for rxn in ARBITRARY_VECTOR_TYPES:
# Preclude the fission channels from being treated as a matrix
if rxn not in [REACTION_NAME[mt] for mt in FISSION_MTS]:
split_rxn = rxn.strip("()").split(",")
if len(split_rxn) > 1 and "n" in split_rxn[1]:
# Then there is a neutron product, so it can also be a matrix
ARBITRARY_MATRIX_TYPES.append(rxn + " matrix")
ARBITRARY_MATRIX_TYPES = tuple(ARBITRARY_MATRIX_TYPES)
# Supported domain types
DOMAIN_TYPES = (
'cell',
'distribcell',
'universe',
'material',
'mesh'
)
# Filter types corresponding to each domain
_DOMAIN_TO_FILTER = {
'cell': openmc.CellFilter,
'distribcell': openmc.DistribcellFilter,
'universe': openmc.UniverseFilter,
'material': openmc.MaterialFilter,
'mesh': openmc.MeshFilter
}
# Supported domain classes
_DOMAINS = (
openmc.Cell,
openmc.Universe,
openmc.Material,
openmc.RegularMesh
)
# Supported ScatterMatrixXS angular distribution types. Note that 'histogram' is
# defined here and used in mgxs_library.py, but it is not used for the current
# module
SCATTER_TABULAR = 'tabular'
SCATTER_LEGENDRE = 'legendre'
SCATTER_HISTOGRAM = 'histogram'
MU_TREATMENTS = (
SCATTER_LEGENDRE,
SCATTER_HISTOGRAM
)
# Maximum Legendre order supported by OpenMC
_MAX_LEGENDRE = 10
def _df_column_convert_to_bin(df, current_name, new_name, values_to_bin,
reverse_order=False):
"""Convert a Pandas DataFrame column from the bin edges to an index for
each bin. This method operates on the DataFrame, df, in-place.
Parameters
----------
df : pandas.DataFrame
A Pandas DataFrame containing the cross section data.
current_name : str
Name of the column to replace with bins
new_name : str
New name for column after the data is replaced with bins
values_to_bin : Iterable of Real
Values of the bin edges to be used for identifying the bins
reverse_order : bool
Whether the bin indices should be reversed
"""
# Get the current values
df_bins = np.asarray(df[current_name])
new_vals = np.zeros_like(df_bins, dtype=int)
# Replace the values with the index of the closest entry in values_to_bin
# The closest is used because it is expected that the values in df could
# have lost precision along the way
for i, df_val in enumerate(df_bins):
idx = np.searchsorted(values_to_bin, df_val)
# Check to make sure if the value is just above the search result
if idx > 0 and np.isclose(values_to_bin[idx - 1], df_val):
idx -= 1
# If it is just below the search result then we are done
new_vals[i] = idx
# Switch to a one-based indexing
new_vals += 1
# Reverse the ordering if requested (this is for energy group ordering)
if reverse_order:
new_vals = (len(values_to_bin) - 1) - new_vals + 1
# Assign the values
df[current_name] = new_vals[:]
# And rename the column
df.rename(columns={current_name: new_name}, inplace=True)
class MGXS:
"""An abstract multi-group cross section for some energy group structure
within some spatial domain.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations.
.. note:: Users should instantiate the subclasses of this abstract class.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data
_divide_by_density = True
def _add_angle_filters(self, filters):
"""Add the azimuthal and polar bins to the MGXS filters if needed.
Filters will be provided as a ragged 2D list of openmc.Filter objects.
Parameters
----------
filters : Iterable of Iterable of openmc.Filter
Ragged 2D list of openmc.Filter objects for the energy and spatial
domains. The angle filters will be added to the list.
Returns
-------
Iterable of Iterable of openmc.Filter
Ragged 2D list of openmc.Filter objects for the energy and spatial
domains with the angle filters added to the list.
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
# Then the user has requested angular data, so create the bins
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
for filt in filters:
filt.insert(0, openmc.PolarFilter(pol_bins))
filt.insert(1, openmc.AzimuthalFilter(azi_bins))
return filters
def _squeeze_xs(self, xs):
"""Remove dimensions which are not needed from a cross section array
due to user options. This is used by the openmc.Mgxs.get_xs(...) method
Parameters
----------
xs : np.ndarray
Cross sections array with dimensions to be squeezed
Returns
-------
np.ndarray
Squeezed array of cross sections
"""
# numpy.squeeze will return a ValueError if the axis has a size
# greater than 1, to avoid this we will try each axis one at a
# time to preclude the ValueError.
initial_shape = len(xs.shape)
for axis in range(initial_shape - 1, -1, -1):
if axis not in self._dont_squeeze and xs.shape[axis] == 1:
xs = np.squeeze(xs, axis=axis)
return xs
def _df_convert_columns_to_bins(self, df):
"""This method converts all relevant and present DataFrame columns from
their bin boundaries to the index for each bin. This method operates on
the DataFrame, df, in place. The method returns a list of the columns
in which it has operated on.
Parameters
----------
df : pandas.DataFrame
A Pandas DataFrame containing the cross section data.
Returns
-------
columns : Iterable of str
Names of the re-named and re-valued columns
"""
# Override polar and azimuthal bounds with indices
if self.num_polar > 1 or self.num_azimuthal > 1:
# First for polar
bins = np.linspace(0., np.pi, self.num_polar + 1, True)
_df_column_convert_to_bin(df, 'polar low', 'polar bin', bins)
del df['polar high']
# Second for azimuthal
bins = np.linspace(-np.pi, np.pi, self.num_azimuthal + 1, True)
_df_column_convert_to_bin(df, 'azimuthal low', 'azimuthal bin',
bins)
del df['azimuthal high']
columns = ['polar bin', 'azimuthal bin']
else:
columns = []
# Override energy groups bounds with indices
if 'energy low [eV]' in df:
_df_column_convert_to_bin(df, 'energy low [eV]', 'group in',
self.energy_groups.group_edges,
reverse_order=True)
del df['energy high [eV]']
columns += ['group in']
if 'energyout low [eV]' in df:
_df_column_convert_to_bin(df, 'energyout low [eV]', 'group out',
self.energy_groups.group_edges,
reverse_order=True)
del df['energyout high [eV]']
columns += ['group out']
if 'mu low' in df and hasattr(self, 'histogram_bins'):
# Only the ScatterMatrix class has the histogram_bins attribute
bins = np.linspace(-1., 1., self.histogram_bins + 1, True)
_df_column_convert_to_bin(df, 'mu low', 'mu bin', bins)
del df['mu high']
columns += ['mu bin']
return columns
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3)
else:
return (1, )
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@name.setter
@by_nuclide.setter
@nuclides.setter
@estimator.setter
@domain.setter
@domain_type.setter
@energy_groups.setter
@num_polar.setter
@num_azimuthal.setter
@tally_trigger.setter
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within the Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
# Sparsify or densify the derived MGXS tallies and the base tallies
if self._xs_tally:
self.xs_tally.sparse = sparse
if self._rxn_rate_tally:
self.rxn_rate_tally.sparse = sparse
for tally_name in self.tallies:
self.tallies[tally_name].sparse = sparse
self._sparse = sparse
@staticmethod
def get_mgxs(mgxs_type, domain=None, domain_type=None,
energy_groups=None, by_nuclide=False, name='', num_polar=1,
num_azimuthal=1):
"""Return a MGXS subclass object for some energy group structure within
some spatial domain for some reaction type.
This is a factory method which can be used to quickly create MGXS
subclass objects for various reaction types.
Parameters
----------
mgxs_type : str or Integral
The type of multi-group cross section object to return; valid
values are members of MGXS_TYPES, or the reaction types that are
the keys of REACTION_MT. Note that if a reaction type from
REACTION_MT is used, it can be appended with ' matrix' to obtain
a multigroup matrix (from incoming to outgoing energy groups) for
reactions with a neutron in an outgoing channel.
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain.
Defaults to False
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file. Defaults to the empty string.
num_polar : Integral, optional
Number of equi-width polar angles for angle discretization;
defaults to no discretization
num_azimuthal : Integral, optional
Number of equi-width azimuthal angles for angle discretization;
defaults to no discretization
Returns
-------
openmc.mgxs.MGXS
A subclass of the abstract MGXS class for the multi-group cross
section type requested by the user
"""
cv.check_value(
"mgxs_type", mgxs_type,
MGXS_TYPES + ARBITRARY_VECTOR_TYPES + ARBITRARY_MATRIX_TYPES)
if mgxs_type == 'total':
mgxs = TotalXS(domain, domain_type, energy_groups)
elif mgxs_type == 'transport':
mgxs = TransportXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-transport':
mgxs = TransportXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'absorption':
mgxs = AbsorptionXS(domain, domain_type, energy_groups)
elif mgxs_type == 'capture':
mgxs = CaptureXS(domain, domain_type, energy_groups)
elif mgxs_type == 'fission':
mgxs = FissionXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-fission':
mgxs = FissionXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'kappa-fission':
mgxs = KappaFissionXS(domain, domain_type, energy_groups)
elif mgxs_type == 'scatter':
mgxs = ScatterXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-scatter':
mgxs = ScatterXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups, nu=True)
elif mgxs_type == 'multiplicity matrix':
mgxs = MultiplicityMatrixXS(domain, domain_type, energy_groups)
elif mgxs_type == 'scatter probability matrix':
mgxs = ScatterProbabilityMatrix(domain, domain_type, energy_groups)
elif mgxs_type == 'consistent scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups)
mgxs.formulation = 'consistent'
elif mgxs_type == 'consistent nu-scatter matrix':
mgxs = ScatterMatrixXS(domain, domain_type, energy_groups, nu=True)
mgxs.formulation = 'consistent'
elif mgxs_type == 'nu-fission matrix':
mgxs = NuFissionMatrixXS(domain, domain_type, energy_groups)
elif mgxs_type == 'chi':
mgxs = Chi(domain, domain_type, energy_groups)
elif mgxs_type == 'chi-prompt':
mgxs = Chi(domain, domain_type, energy_groups, prompt=True)
elif mgxs_type == 'inverse-velocity':
mgxs = InverseVelocity(domain, domain_type, energy_groups)
elif mgxs_type == 'prompt-nu-fission':
mgxs = FissionXS(domain, domain_type, energy_groups, prompt=True)
elif mgxs_type == 'prompt-nu-fission matrix':
mgxs = NuFissionMatrixXS(domain, domain_type, energy_groups,
prompt=True)
elif mgxs_type == 'current':
mgxs = Current(domain, domain_type, energy_groups)
elif mgxs_type == 'diffusion-coefficient':
mgxs = DiffusionCoefficient(domain, domain_type, energy_groups)
elif mgxs_type == 'nu-diffusion-coefficient':
mgxs = DiffusionCoefficient(domain, domain_type, energy_groups, nu=True)
elif mgxs_type in ARBITRARY_VECTOR_TYPES:
# Then it is a reaction not covered by the above that is
# supported by the ArbitraryXS Class
mgxs = ArbitraryXS(mgxs_type, domain, domain_type, energy_groups)
elif mgxs_type in ARBITRARY_MATRIX_TYPES:
mgxs = ArbitraryMatrixXS(mgxs_type, domain, domain_type,
energy_groups)
mgxs.by_nuclide = by_nuclide
mgxs.name = name
mgxs.num_polar = num_polar
mgxs.num_azimuthal = num_azimuthal
return mgxs
def get_nuclides(self):
"""Get all nuclides in the cross section's spatial domain.
Returns
-------
list of str
A list of the string names for each nuclide in the spatial domain
(e.g., ['U235', 'U238', 'O16'])
Raises
------
ValueError
When this method is called before the spatial domain has been set.
"""
if self.domain is None:
raise ValueError('Unable to get all nuclides without a domain')
# If the user defined nuclides, return them
if self._nuclides:
return self._nuclides
# Otherwise, return all nuclides in the spatial domain
else:
return self.domain.get_nuclides()
def get_nuclide_density(self, nuclide):
"""Get the atomic number density in units of atoms/b-cm for a nuclide
in the cross section's spatial domain.
Parameters
----------
nuclide : str
A nuclide name string (e.g., 'U235')
Returns
-------
float
The atomic number density (atom/b-cm) for the nuclide of interest
"""
cv.check_type('nuclide', nuclide, str)
# Get list of all nuclides in the spatial domain
nuclides = self.domain.get_nuclide_densities()
return nuclides[nuclide][1] if nuclide in nuclides else 0.0
def get_nuclide_densities(self, nuclides='all'):
"""Get an array of atomic number densities in units of atom/b-cm for all
nuclides in the cross section's spatial domain.
Parameters
----------
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the atom densities for all nuclides
in the spatial domain. The special string 'sum' will return the atom
density summed across all nuclides in the spatial domain. Defaults
to 'all'.
Returns
-------
numpy.ndarray of float
An array of the atomic number densities (atom/b-cm) for each of the
nuclides in the spatial domain
Raises
------
ValueError
When this method is called before the spatial domain has been set.
"""
if self.domain is None:
raise ValueError('Unable to get nuclide densities without a domain')
# Sum the atomic number densities for all nuclides
if nuclides == 'sum':
nuclides = self.get_nuclides()
densities = np.zeros(1, dtype=np.float)
for nuclide in nuclides:
densities[0] += self.get_nuclide_density(nuclide)
# Tabulate the atomic number densities for all nuclides
elif nuclides == 'all':
nuclides = self.get_nuclides()
densities = np.zeros(self.num_nuclides, dtype=np.float)
for i, nuclide in enumerate(nuclides):
densities[i] += self.get_nuclide_density(nuclide)
# Tabulate the atomic number densities for each specified nuclide
else:
densities = np.zeros(len(nuclides), dtype=np.float)
for i, nuclide in enumerate(nuclides):
densities[i] = self.get_nuclide_density(nuclide)
return densities
def _compute_xs(self):
"""Performs generic cleanup after a subclass' uses tally arithmetic to
compute a multi-group cross section as a derived tally.
This method replaces CrossNuclides generated by tally arithmetic with
the original Nuclide objects in the xs_tally instance attribute. The
simple Nuclides allow for cleaner output through Pandas DataFrames as
well as simpler data access through the get_xs(...) class method.
In addition, this routine resets NaNs in the multi group cross section
array to 0.0. This may be needed occur if no events were scored in
certain tally bins, which will lead to a divide-by-zero situation.
"""
# If computing xs for each nuclide, replace CrossNuclides with originals
if self.by_nuclide:
self.xs_tally._nuclides = []
nuclides = self.get_nuclides()
for nuclide in nuclides:
self.xs_tally.nuclides.append(openmc.Nuclide(nuclide))
# Remove NaNs which may have resulted from divide-by-zero operations
self.xs_tally._mean = np.nan_to_num(self.xs_tally.mean)
self.xs_tally._std_dev = np.nan_to_num(self.xs_tally.std_dev)
self.xs_tally.sparse = self.sparse
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
.. note:: The statepoint must be linked with an OpenMC Summary object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
cv.check_type('statepoint', statepoint, openmc.StatePoint)
if statepoint.summary is None:
msg = 'Unable to load data from a statepoint which has not been ' \
'linked with a summary file'
raise ValueError(msg)
# Override the domain object that loaded from an OpenMC summary file
# NOTE: This is necessary for micro cross-sections which require
# the isotopic number densities as computed by OpenMC
su = statepoint.summary
if self.domain_type in ('cell', 'distribcell'):
self.domain = su._fast_cells[self.domain.id]
elif self.domain_type == 'universe':
self.domain = su._fast_universes[self.domain.id]
elif self.domain_type == 'material':
self.domain = su._fast_materials[self.domain.id]
elif self.domain_type == 'mesh':
self.domain = statepoint.meshes[self.domain.id]
else:
msg = 'Unable to load data from a statepoint for domain type {0} ' \
'which is not yet supported'.format(self.domain_type)
raise ValueError(msg)
# Use tally "slicing" to ensure that tallies correspond to our domain
# NOTE: This is important if tally merging was used
if self.domain_type == 'mesh':
filters = [_DOMAIN_TO_FILTER[self.domain_type]]
filter_bins = [tuple(self.domain.indices)]
elif self.domain_type != 'distribcell':
filters = [_DOMAIN_TO_FILTER[self.domain_type]]
filter_bins = [(self.domain.id,)]
# Distribcell filters only accept single cell - neglect it when slicing
else:
filters = []
filter_bins = []
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
# Find, slice and store Tallies from StatePoint
# The tally slicing is needed if tally merging was used
for tally_type, tally in self.tallies.items():
sp_tally = statepoint.get_tally(
tally.scores, tally.filters, tally.nuclides,
estimator=tally.estimator, exact_filters=True)
sp_tally = sp_tally.get_slice(
tally.scores, filters, filter_bins, tally.nuclides)
sp_tally.sparse = self.sparse
self.tallies[tally_type] = sp_tally
self._loaded_sp = True
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', squeeze=True, **kwargs):
r"""Returns an array of multi-group cross sections.
This method constructs a 3D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups (2nd dimension), and nuclides
(3rd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# If user requested the sum for all nuclides, use tally summation
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups)
else:
new_shape = (num_subdomains, num_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_flux(self, groups='all', subdomains='all',
order_groups='increasing', value='mean',
squeeze=True, **kwargs):
r"""Returns an array of the fluxes used to weight the MGXS.
This method constructs a 2D NumPy array for the requested
weighting flux for one or more subdomains (1st dimension), and
energy groups (2nd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the flux indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the data is available from tally
data, or, when this is used on an MGXS type without a flux score.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Determine which flux to obtain
# Step through in order of usefulness
for key in ['flux', 'flux (tracklength)', 'flux (analog)']:
if key in self.tally_keys:
tally = self.tallies[key]
break
else:
msg = "MGXS of Type {} do not have an explicit weighting flux!"
raise ValueError(msg.format(self.__name__))
flux = tally.get_values(filters=filters, filter_bins=filter_bins,
nuclides=['total'], value=value)
# Eliminate the trivial score dimension
flux = np.squeeze(flux, axis=len(flux.shape) - 1)
# Eliminate the trivial nuclide dimension
flux = np.squeeze(flux, axis=len(flux.shape) - 1)
flux = np.nan_to_num(flux)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(flux.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups)
else:
new_shape = (num_subdomains, num_groups)
new_shape += flux.shape[1:]
flux = np.reshape(flux, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
flux = flux[..., ::-1]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
flux = self._squeeze_xs(flux)
return flux
def get_condensed_xs(self, coarse_groups):
"""Construct an energy-condensed version of this cross section.
Parameters
----------
coarse_groups : openmc.mgxs.EnergyGroups
The coarse energy group structure of interest
Returns
-------
MGXS
A new MGXS condensed to the group structure of interest
"""
cv.check_type('coarse_groups', coarse_groups, EnergyGroups)
cv.check_less_than('coarse groups', coarse_groups.num_groups,
self.num_groups, equality=True)
cv.check_value('upper coarse energy', coarse_groups.group_edges[-1],
[self.energy_groups.group_edges[-1]])
cv.check_value('lower coarse energy', coarse_groups.group_edges[0],
[self.energy_groups.group_edges[0]])
# Clone this MGXS to initialize the condensed version
condensed_xs = copy.deepcopy(self)
condensed_xs._rxn_rate_tally = None
condensed_xs._xs_tally = None
condensed_xs._sparse = False
condensed_xs._energy_groups = coarse_groups
# Build energy indices to sum across
energy_indices = []
for group in range(coarse_groups.num_groups, 0, -1):
low, high = coarse_groups.get_group_bounds(group)
low_index = np.where(self.energy_groups.group_edges == low)[0][0]
energy_indices.append(low_index)
fine_edges = self.energy_groups.group_edges
# Condense each of the tallies to the coarse group structure
for tally in condensed_xs.tallies.values():
# Make condensed tally derived and null out sum, sum_sq
tally._derived = True
tally._sum = None
tally._sum_sq = None
# Get tally data arrays reshaped with one dimension per filter
mean = tally.get_reshaped_data(value='mean')
std_dev = tally.get_reshaped_data(value='std_dev')
# Sum across all applicable fine energy group filters
for i, tally_filter in enumerate(tally.filters):
if not isinstance(tally_filter, (openmc.EnergyFilter,
openmc.EnergyoutFilter)):
continue
elif len(tally_filter.bins) != len(fine_edges) - 1:
continue
elif not np.allclose(tally_filter.bins[:, 0], fine_edges[:-1]):
continue
else:
cedge = coarse_groups.group_edges
tally_filter.values = cedge
tally_filter.bins = np.vstack((cedge[:-1], cedge[1:])).T
mean = np.add.reduceat(mean, energy_indices, axis=i)
std_dev = np.add.reduceat(std_dev**2, energy_indices,
axis=i)
std_dev = np.sqrt(std_dev)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally.shape)
std_dev = np.reshape(std_dev, tally.shape)
# Override tally's data with the new condensed data
tally._mean = mean
tally._std_dev = std_dev
# Compute the energy condensed multi-group cross section
condensed_xs.sparse = self.sparse
return condensed_xs
def get_subdomain_avg_xs(self, subdomains='all'):
"""Construct a subdomain-averaged version of this cross section.
This method is useful for averaging cross sections across distribcell
instances. The method performs spatial homogenization to compute the
scalar flux-weighted average cross section across the subdomains.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs to average across. Defaults to 'all'.
Returns
-------
openmc.mgxs.MGXS
A new MGXS averaged across the subdomains of interest
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
# Construct a collection of the subdomain filter bins to average across
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
subdomains = [(subdomain,) for subdomain in subdomains]
subdomains = [tuple(subdomains)]
elif self.domain_type == 'distribcell':
subdomains = [i for i in range(self.num_subdomains)]
subdomains = [tuple(subdomains)]
else:
subdomains = None
# Clone this MGXS to initialize the subdomain-averaged version
avg_xs = copy.deepcopy(self)
avg_xs._rxn_rate_tally = None
avg_xs._xs_tally = None
# Average each of the tallies across subdomains
for tally_type, tally in avg_xs.tallies.items():
filt_type = _DOMAIN_TO_FILTER[self.domain_type]
tally_avg = tally.summation(filter_type=filt_type,
filter_bins=subdomains)
avg_xs.tallies[tally_type] = tally_avg
avg_xs._domain_type = 'sum({0})'.format(self.domain_type)
avg_xs.sparse = self.sparse
return avg_xs
def _get_homogenized_mgxs(self, other_mgxs, denom_score='flux'):
"""Construct a homogenized MGXS with other MGXS objects.
This method constructs a new MGXS object that is the flux-weighted
combination of two MGXS objects. It is equivalent to what one would
obtain if the tally spatial domain were designed to encompass the
individual domains for both MGXS objects. This is accomplished by
summing the rxn rate (numerator) tally and the denominator tally
(often a tally of the flux over the spatial domain) that are used to
compute a multi-group cross-section.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
denom_score : str
The denominator score in the denominator of computing the MGXS.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
# Check type of denom score
cv.check_type('denom_score', denom_score, str)
# Construct a collection of the subdomain filter bins to homogenize
# across
if isinstance(other_mgxs, openmc.mgxs.MGXS):
other_mgxs = [other_mgxs]
cv.check_iterable_type('other_mgxs', other_mgxs, openmc.mgxs.MGXS)
for mgxs in other_mgxs:
if mgxs.rxn_type != self.rxn_type:
msg = 'Not able to homogenize two MGXS with different rxn types'
raise ValueError(msg)
# Clone this MGXS to initialize the homogenized version
homogenized_mgxs = copy.deepcopy(self)
homogenized_mgxs._derived = True
name = 'hom({}, '.format(self.domain.name)
# Get the domain filter
filter_type = _DOMAIN_TO_FILTER[self.domain_type]
self_filter = self.rxn_rate_tally.find_filter(filter_type)
# Get the rxn rate and denom tallies
rxn_rate_tally = self.rxn_rate_tally
denom_tally = self.tallies[denom_score]
for mgxs in other_mgxs:
# Swap the domain filter bins for the other mgxs rxn rate tally
other_rxn_rate_tally = copy.deepcopy(mgxs.rxn_rate_tally)
other_filter = other_rxn_rate_tally.find_filter(filter_type)
other_filter._bins = self_filter._bins
# Swap the domain filter bins for the denom tally
other_denom_tally = copy.deepcopy(mgxs.tallies[denom_score])
other_filter = other_denom_tally.find_filter(filter_type)
other_filter._bins = self_filter._bins
# Add the rxn rate and denom tallies
rxn_rate_tally += other_rxn_rate_tally
denom_tally += other_denom_tally
# Update the name for the homogenzied MGXS
name += '{}, '.format(mgxs.domain.name)
# Set the properties of the homogenized MGXS
homogenized_mgxs._rxn_rate_tally = rxn_rate_tally
homogenized_mgxs.tallies[denom_score] = denom_tally
homogenized_mgxs._domain.name = name[:-2] + ')'
return homogenized_mgxs
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized mgxs with other MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'flux')
def get_slice(self, nuclides=[], groups=[]):
"""Build a sliced MGXS for the specified nuclides and energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
groups : list of int
A list of energy group indices starting at 1 for the high energies
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MGXS
A new MGXS object which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) requested in the
parameters.
"""
cv.check_iterable_type('nuclides', nuclides, str)
cv.check_iterable_type('energy_groups', groups, Integral)
# Build lists of filters and filter bins to slice
filters = []
filter_bins = []
if len(groups) != 0:
energy_bins = []
for group in groups:
group_bounds = self.energy_groups.get_group_bounds(group)
energy_bins.append(group_bounds)
filter_bins.append(tuple(energy_bins))
filters.append(openmc.EnergyFilter)
# Clone this MGXS to initialize the sliced version
slice_xs = copy.deepcopy(self)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice each of the tallies across nuclides and energy groups
for tally_type, tally in slice_xs.tallies.items():
slice_nuclides = [nuc for nuc in nuclides if nuc in tally.nuclides]
if len(groups) != 0 and tally.contains_filter(openmc.EnergyFilter):
tally_slice = tally.get_slice(filters=filters,
filter_bins=filter_bins,
nuclides=slice_nuclides)
else:
tally_slice = tally.get_slice(nuclides=slice_nuclides)
slice_xs.tallies[tally_type] = tally_slice
# Assign sliced energy group structure to sliced MGXS
if groups:
new_group_edges = []
for group in groups:
group_edges = self.energy_groups.get_group_bounds(group)
new_group_edges.extend(group_edges)
new_group_edges = np.unique(new_group_edges)
slice_xs.energy_groups.group_edges = sorted(new_group_edges)
# Assign sliced nuclides to sliced MGXS
if nuclides:
slice_xs.nuclides = nuclides
slice_xs.sparse = self.sparse
return slice_xs
def can_merge(self, other):
"""Determine if another MGXS can be merged with this one
If results have been loaded from a statepoint, then MGXS are only
mergeable along one and only one of enegy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MGXS
MGXS to check for merging
"""
if not isinstance(other, type(self)):
return False
# Compare reaction type, energy groups, nuclides, domain type
if self.rxn_type != other.rxn_type:
return False
elif not self.energy_groups.can_merge(other.energy_groups):
return False
elif self.by_nuclide != other.by_nuclide:
return False
elif self.domain_type != other.domain_type:
return False
elif 'distribcell' not in self.domain_type and self.domain != other.domain:
return False
elif not self.xs_tally.can_merge(other.xs_tally):
return False
elif not self.rxn_rate_tally.can_merge(other.rxn_rate_tally):
return False
# If all conditionals pass then MGXS are mergeable
return True
def merge(self, other):
"""Merge another MGXS with this one
MGXS are only mergeable if their energy groups and nuclides are either
identical or mutually exclusive. If results have been loaded from a
statepoint, then MGXS are only mergeable along one and only one of
energy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MGXS
MGXS to merge with this one
Returns
-------
merged_mgxs : openmc.mgxs.MGXS
Merged MGXS
"""
if not self.can_merge(other):
raise ValueError('Unable to merge MGXS')
# Create deep copy of tally to return as merged tally
merged_mgxs = copy.deepcopy(self)
merged_mgxs._derived = True
# Merge energy groups
if self.energy_groups != other.energy_groups:
merged_groups = self.energy_groups.merge(other.energy_groups)
merged_mgxs.energy_groups = merged_groups
# Merge nuclides
if self.nuclides != other.nuclides:
# The nuclides must be mutually exclusive
for nuclide in self.nuclides:
if nuclide in other.nuclides:
msg = 'Unable to merge MGXS with shared nuclides'
raise ValueError(msg)
# Concatenate lists of nuclides for the merged MGXS
merged_mgxs.nuclides = self.nuclides + other.nuclides
# Null base tallies but merge reaction rate and cross section tallies
merged_mgxs._tallies = OrderedDict()
merged_mgxs._rxn_rate_tally = self.rxn_rate_tally.merge(other.rxn_rate_tally)
merged_mgxs._xs_tally = self.xs_tally.merge(other.xs_tally)
return merged_mgxs
def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):
"""Print a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will report
the cross sections summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
elif nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Build header for string with type and domain info
string = 'Multi-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', self.rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
# Set polar/azimuthal bins
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if nuclide != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{0: <16}\n'.format(xs_header)
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]:\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean')
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err')
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azimuthal, and energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for group in range(1, self.num_groups + 1):
bounds = \
self.energy_groups.get_group_bounds(group)
string += '\t' + template.format('', group,
bounds[0],
bounds[1])
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, group - 1],
rel_err_xs[pol, azi, group - 1])
string += '\n'
string += '\n'
else:
# Loop over energy groups
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0],
bounds[1])
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[group - 1], rel_err_xs[group - 1])
string += '\n'
string += '\n'
string += '\n'
print(string)
def build_hdf5_store(self, filename='mgxs.h5', directory='mgxs',
subdomains='all', nuclides='all',
xs_type='macro', row_column='inout', append=True,
libver='earliest'):
"""Export the multi-group cross section data to an HDF5 binary file.
This method constructs an HDF5 file which stores the multi-group
cross section data. The data is stored in a hierarchy of HDF5 groups
from the domain type, domain id, subdomain id (for distribcell domains),
nuclides and cross section type. Two datasets for the mean and standard
deviation are stored for each subdomain entry in the HDF5 file.
.. note:: This requires the h5py Python package.
Parameters
----------
filename : str
Filename for the HDF5 file. Defaults to 'mgxs.h5'.
directory : str
Directory for the HDF5 file. Defaults to 'mgxs'.
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will report
the cross sections summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Store the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
row_column: {'inout', 'outin'}
Store scattering matrices indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
append : bool
If true, appends to an existing HDF5 file with the same filename
directory (if one exists). Defaults to True.
libver : {'earliest', 'latest'}
Compatibility mode for the HDF5 file. 'latest' will produce files
that are less backwards compatible but have performance benefits.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, filename)
filename = filename.replace(' ', '-')
if append and os.path.isfile(filename):
xs_results = h5py.File(filename, 'a')
else:
xs_results = h5py.File(filename, 'w', libver=libver)
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'sum(distribcell)':
domain_filter = self.xs_tally.find_filter('sum(distribcell)')
subdomains = domain_filter.bins
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
densities = np.zeros(len(nuclides), dtype=np.float)
elif nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Create an HDF5 group within the file for the domain
domain_type_group = xs_results.require_group(self.domain_type)
domain_group = domain_type_group.require_group(str(self.domain.id))
# Determine number of digits to pad subdomain group keys
num_digits = len(str(self.num_subdomains))
# Create a separate HDF5 group for each subdomain
for subdomain in subdomains:
# Create an HDF5 group for the subdomain
if self.domain_type == 'distribcell':
group_name = ''.zfill(num_digits)
subdomain_group = domain_group.require_group(group_name)
else:
subdomain_group = domain_group
# Create a separate HDF5 group for this cross section
rxn_group = subdomain_group.require_group(self.hdf5_key)
# Create a separate HDF5 group for each nuclide
for j, nuclide in enumerate(nuclides):
if nuclide != 'sum':
density = densities[j]
nuclide_group = rxn_group.require_group(nuclide)
nuclide_group.require_dataset('density', dtype=np.float64,
data=[density], shape=(1,))
else:
nuclide_group = rxn_group
# Extract the cross section for this subdomain and nuclide
average = self.get_xs(subdomains=[subdomain], nuclides=[nuclide],
xs_type=xs_type, value='mean',
row_column=row_column)
std_dev = self.get_xs(subdomains=[subdomain], nuclides=[nuclide],
xs_type=xs_type, value='std_dev',
row_column=row_column)
# Add MGXS results data to the HDF5 group
nuclide_group.require_dataset('average', dtype=np.float64,
shape=average.shape, data=average)
nuclide_group.require_dataset('std. dev.', dtype=np.float64,
shape=std_dev.shape, data=std_dev)
# Close the results HDF5 file
xs_results.close()
def export_xs_data(self, filename='mgxs', directory='mgxs',
format='csv', groups='all', xs_type='macro'):
"""Export the multi-group cross section data to a file.
This method leverages the functionality in the Pandas library to export
the multi-group cross section data in a variety of output file formats
for storage and/or post-processing.
Parameters
----------
filename : str
Filename for the exported file. Defaults to 'mgxs'.
directory : str
Directory for the exported file. Defaults to 'mgxs'.
format : {'csv', 'excel', 'pickle', 'latex'}
The format for the exported data file. Defaults to 'csv'.
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Store the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
cv.check_type('filename', filename, str)
cv.check_type('directory', directory, str)
cv.check_value('format', format, ['csv', 'excel', 'pickle', 'latex'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, filename)
filename = filename.replace(' ', '-')
# Get a Pandas DataFrame for the data
df = self.get_pandas_dataframe(groups=groups, xs_type=xs_type)
# Export the data using Pandas IO API
if format == 'csv':
df.to_csv(filename + '.csv', index=False)
elif format == 'excel':
if self.domain_type == 'mesh':
df.to_excel(filename + '.xls')
else:
df.to_excel(filename + '.xls', index=False)
elif format == 'pickle':
df.to_pickle(filename + '.pkl')
elif format == 'latex':
if self.domain_type == 'distribcell':
msg = 'Unable to export distribcell multi-group cross section' \
'data to a LaTeX table'
raise NotImplementedError(msg)
df.to_latex(filename + '.tex', bold_rows=True,
longtable=True, index=False)
# Surround LaTeX table with code needed to run pdflatex
with open(filename + '.tex', 'r') as original:
data = original.read()
with open(filename + '.tex', 'w') as modified:
modified.write(
'\\documentclass[preview, 12pt, border=1mm]{standalone}\n')
modified.write('\\usepackage{caption}\n')
modified.write('\\usepackage{longtable}\n')
modified.write('\\usepackage{booktabs}\n')
modified.write('\\begin{document}\n\n')
modified.write(data)
modified.write('\n\\end{document}')
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=True):
"""Build a Pandas DataFrame for the MGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the dataframe. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will include the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
include the cross sections summed over all nuclides. Defaults
to 'all'.
xs_type: {'macro', 'micro'}
Return macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
if nuclides != 'all' and nuclides != 'sum':
cv.check_iterable_type('nuclides', nuclides, str)
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Get a Pandas DataFrame from the derived xs tally
if self.by_nuclide and nuclides == 'sum':
# Use tally summation to sum across all nuclides
xs_tally = self.xs_tally.summation(nuclides=self.get_nuclides())
df = xs_tally.get_pandas_dataframe(paths=paths)
# Remove nuclide column since it is homogeneous and redundant
if self.domain_type == 'mesh':
df.drop('sum(nuclide)', axis=1, level=0, inplace=True)
else:
df.drop('sum(nuclide)', axis=1, inplace=True)
# If the user requested a specific set of nuclides
elif self.by_nuclide and nuclides != 'all':
xs_tally = self.xs_tally.get_slice(nuclides=nuclides)
df = xs_tally.get_pandas_dataframe(paths=paths)
# If the user requested all nuclides, keep nuclide column in dataframe
else:
df = self.xs_tally.get_pandas_dataframe(paths=paths)
# Remove the score column since it is homogeneous and redundant
if self.domain_type == 'mesh':
df = df.drop('score', axis=1, level=0)
else:
df = df.drop('score', axis=1)
# Convert azimuthal, polar, energy in and energy out bin values in to
# bin indices
columns = self._df_convert_columns_to_bins(df)
# Select out those groups the user requested
if not isinstance(groups, str):
if 'group in' in df:
df = df[df['group in'].isin(groups)]
if 'group out' in df:
df = df[df['group out'].isin(groups)]
# If user requested micro cross sections, divide out the atom densities
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
densities = np.repeat(densities, len(self.rxn_rate_tally.scores))
tile_factor = int(df.shape[0] / len(densities))
df['mean'] /= np.tile(densities, tile_factor)
df['std. dev.'] /= np.tile(densities, tile_factor)
# Replace NaNs by zeros (happens if nuclide density is zero)
df['mean'].replace(np.nan, 0.0, inplace=True)
df['std. dev.'].replace(np.nan, 0.0, inplace=True)
# Sort the dataframe by domain type id (e.g., distribcell id) and
# energy groups such that data is from fast to thermal
if self.domain_type == 'mesh':
mesh_str = 'mesh {0}'.format(self.domain.id)
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'z')] + columns, inplace=True)
else:
df.sort_values(by=[self.domain_type] + columns, inplace=True)
return df
def get_units(self, xs_type='macro'):
"""This method returns the units of a MGXS based on a desired xs_type.
Parameters
----------
xs_type: {'macro', 'micro'}
Return the macro or micro cross section units.
Defaults to 'macro'.
Returns
-------
str
A string representing the units of the MGXS.
"""
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
return 'cm^-1' if xs_type == 'macro' else 'barns'
class MatrixMGXS(MGXS):
"""An abstract multi-group cross section for some energy group structure
within some spatial domain. This class is specifically intended for
cross sections which depend on both the incoming and outgoing energy groups
and are therefore represented by matrices. Examples of this include the
scattering and nu-fission matrices.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations.
.. note:: Users should instantiate the subclasses of this abstract class.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3, 4)
else:
return (1, 2)
@property
def get_xs(self, in_groups='all', out_groups='all', subdomains='all',
nuclides='all', xs_type='macro', order_groups='increasing',
row_column='inout', value='mean', squeeze=True, **kwargs):
"""Returns an array of multi-group cross sections.
This method constructs a 4D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups in (2nd dimension), energy groups out
(3rd dimension), and nuclides (4th dimension).
Parameters
----------
in_groups : Iterable of Integral or 'all'
Incoming energy groups of interest. Defaults to 'all'.
out_groups : Iterable of Integral or 'all'
Outgoing energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
return the cross section summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
row_column: {'inout', 'outin'}
Return the cross section indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(in_groups, str):
cv.check_iterable_type('groups', in_groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in in_groups:
energy_bins.append((self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(out_groups, str):
cv.check_iterable_type('groups', out_groups, Integral)
for group in out_groups:
filters.append(openmc.EnergyoutFilter)
filter_bins.append((
self.energy_groups.get_group_bounds(group),))
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# Use tally summation if user requested the sum for all nuclides
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(filters=filters, filter_bins=filter_bins,
value=value)
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if in_groups == 'all':
num_in_groups = self.num_groups
else:
num_in_groups = len(in_groups)
if out_groups == 'all':
num_out_groups = self.num_groups
else:
num_out_groups = len(out_groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_in_groups * num_out_groups *
self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_in_groups, num_out_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 3, 4)
else:
new_shape = (num_subdomains, num_in_groups, num_out_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 1, 2)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and in/out energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_slice(self, nuclides=[], in_groups=[], out_groups=[]):
"""Build a sliced MatrixMGXS object for the specified nuclides and
energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
in_groups : list of int
A list of incoming energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
out_groups : list of int
A list of outgoing energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MatrixMGXS
A new MatrixMGXS object which encapsulates the subset of data
requested for the nuclide(s) and/or energy group(s) requested in
the parameters.
"""
# Call super class method and null out derived tallies
slice_xs = super().get_slice(nuclides, in_groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice outgoing energy groups if needed
if len(out_groups) != 0:
filter_bins = []
for group in out_groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice each of the tallies across energyout groups
for tally_type, tally in slice_xs.tallies.items():
if tally.contains_filter(openmc.EnergyoutFilter):
tally_slice = tally.get_slice(
filters=[openmc.EnergyoutFilter],
filter_bins=filter_bins)
slice_xs.tallies[tally_type] = tally_slice
slice_xs.sparse = self.sparse
return slice_xs
def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):
"""Prints a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
report the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
if nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Build header for string with type and domain info
string = 'Multi-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', self.rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
string += '{0: <16}\n'.format('\tEnergy Groups:')
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\n'
# Loop over energy groups ranges
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0], bounds[1])
# Set polar and azimuthal bins if necessary
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if xs_type != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{0: <16}\n'.format(xs_header)
template = '{0: <12}Group {1} -> Group {2}:\t\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean')
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err')
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azi, and in/out energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += '\t' + template.format('',
in_group,
out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, in_group - 1,
out_group - 1],
rel_err_xs[pol, azi, in_group - 1,
out_group - 1])
string += '\n'
string += '\n'
string += '\n'
else:
# Loop over incoming/outgoing energy groups ranges
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += template.format('', in_group, out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[in_group - 1, out_group - 1],
rel_err_xs[in_group - 1, out_group - 1])
string += '\n'
string += '\n'
string += '\n'
string += '\n'
print(string)
class TotalXS(MGXS):
r"""A total multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group total cross sections for multi-group neutronics calculations. At
a minimum, one needs to set the :attr:`TotalXS.energy_groups` and
:attr:`TotalXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`TotalXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`TotalXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
total cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_t (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TotalXS.tally_keys` property and values
are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
class TransportXS(MGXS):
r"""A transport-corrected total multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`TransportXS.energy_groups` and
:attr:`TransportXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`TransportXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`TransportXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
transport-corrected total cross section is calculated as:
.. math::
\begin{aligned}
\langle \sigma_t \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \sigma_t (r, E) \psi
(r, E, \Omega) \\
\langle \sigma_{s1} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_{-1}^1 d\mu \; \mu \sigma_s
(r, E' \rightarrow E, \Omega' \cdot \Omega)
\phi (r, E', \Omega) \\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{tr} &= \frac{\langle \sigma_t \phi \rangle - \langle \sigma_{s1}
\phi \rangle}{\langle \phi \rangle}
\end{aligned}
To incorporate the effect of scattering multiplication in the above
relation, the `nu` parameter can be set to `True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False.
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TransportXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@property
@property
@property
@property
@property
@nu.setter
class DiffusionCoefficient(TransportXS):
r"""A diffusion coefficient multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`DiffusionCoefficient.energy_groups` and
:attr:`DiffusionCoefficient.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`DiffusionCoefficient.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`DiffusionCoefficient.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
diffusion coefficient is calculated as:
.. math::
\begin{aligned}
\langle \sigma_t \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \sigma_t (r, E) \psi
(r, E, \Omega) \\
\langle \sigma_{s1} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_{-1}^1 d\mu \; \mu \sigma_s
(r, E' \rightarrow E, \Omega' \cdot \Omega)
\phi (r, E', \Omega) \\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{tr} &= \frac{\langle \sigma_t \phi \rangle - \langle \sigma_{s1}
\phi \rangle}{\langle \phi \rangle} \\
D = \frac{1}{3 \sigma_{tr}}
\end{aligned}
To incorporate the effect of scattering multiplication in the above
relation, the `nu` parameter can be set to `True`.
.. versionadded:: 0.12.1
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False.
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TransportXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@property
class AbsorptionXS(MGXS):
r"""An absorption multi-group cross section.
Absorption is defined as all reactions that do not produce secondary
neutrons (disappearance) plus fission reactions.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group absorption cross sections for multi-group neutronics
calculations. At a minimum, one needs to set the
:attr:`AbsorptionXS.energy_groups` and :attr:`AbsorptionXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`AbsorptionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`AbsorptionXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
absorption cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_a (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`AbsorptionXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
class CaptureXS(MGXS):
r"""A capture multi-group cross section.
The neutron capture reaction rate is defined as the difference between
OpenMC's 'absorption' and 'fission' reaction rate score types. This includes
not only radiative capture, but all forms of neutron disappearance aside
from fission (i.e., MT > 100).
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group capture cross sections for multi-group neutronics
calculations. At a minimum, one needs to set the
:attr:`CaptureXS.energy_groups` and :attr:`CaptureXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`CaptureXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`CaptureXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
capture cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\left [ \sigma_a (r, E) \psi (r, E, \Omega) - \sigma_f (r, E) \psi (r, E,
\Omega) \right ]}{\int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`CaptureXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@property
class FissionXS(MGXS):
r"""A fission multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group fission cross sections for multi-group neutronics
calculations. At a minimum, one needs to set the
:attr:`FissionXS.energy_groups` and :attr:`FissionXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`FissionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`FissionXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
fission cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_f (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
To incorporate the effect of neutron multiplication in the above
relation, the `nu` parameter can be set to `True`.
This class can also be used to gather a prompt-nu-fission cross section
(which only includes the contributions from prompt neutrons). This is
accomplished by setting the :attr:`FissionXS.prompt` attribute to `True`.
Since the prompt-nu-fission cross section requires neutron multiplication,
the `nu` parameter will automatically be set to `True` if `prompt` is also
`True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False
prompt : bool
If true, computes cross sections which only includes prompt neutrons;
defaults to False which includes prompt and delayed in total. Setting
this to True will also set nu to True
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
prompt : bool
If true, computes cross sections which only includes prompt neutrons
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`FissionXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@property
@nu.setter
@prompt.setter
class KappaFissionXS(MGXS):
r"""A recoverable fission energy production rate multi-group cross section.
The recoverable energy per fission, :math:`\kappa`, is defined as the
fission product kinetic energy, prompt and delayed neutron kinetic energies,
prompt and delayed :math:`\gamma`-ray total energies, and the total energy
released by the delayed :math:`\beta` particles. The neutrino energy does
not contribute to this response. The prompt and delayed :math:`\gamma`-rays
are assumed to deposit their energy locally.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`KappaFissionXS.energy_groups` and
:attr:`KappaFissionXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`KappaFissionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`KappaFissionXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
recoverable fission energy production rate cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\kappa\sigma_f (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`KappaFissionXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
class ScatterXS(MGXS):
r"""A scattering multi-group cross section.
The scattering cross section is defined as the difference between the total
and absorption cross sections.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ScatterXS.energy_groups` and
:attr:`ScatterXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ScatterXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ScatterXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
scattering cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\left [ \sigma_t (r, E) \psi (r, E, \Omega) - \sigma_a (r, E) \psi (r, E,
\Omega) \right ]}{\int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
To incorporate the effect of scattering multiplication from (n,xn)
reactions in the above relation, the `nu` parameter can be set to `True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ScatterXS.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@nu.setter
class ArbitraryXS(MGXS):
r"""A multi-group cross section for an arbitrary reaction type.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group total cross sections for multi-group neutronics calculations.
At a minimum, one needs to set the :attr:`ArbitraryXS.energy_groups` and
:attr:`ArbitraryXS.domain` properties. Tallies for the flux and appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`ArbitraryXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ArbitraryXS.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
requested cross section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\sigma_X (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}
where :math:`\sigma_X` is the requested reaction type of interest.
Parameters
----------
rxn_type : str
Reaction type (e.g., '(n,2n)', '(n,Xt)', etc.)
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., '(n,2n)', '(n,Xt)', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TotalXS.tally_keys` property and values
are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
class ArbitraryMatrixXS(MatrixMGXS):
r"""A multi-group matrix cross section for an arbitrary reaction type.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ArbitraryMatrixXS.energy_groups` and
:attr:`ArbitraryMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ArbitraryMatrixXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ArbitraryMatrixXS.xs_tally` property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the fission production is calculated as:
.. math::
\begin{aligned}
\langle \sigma_{X,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{E_g}^{E_{g-1}} dE
\; \chi(E) \sigma_X (r, E') \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{X,g'\rightarrow g} &= \frac{\langle \sigma_{X,g'\rightarrow
g} \phi \rangle}{\langle \phi \rangle}
\end{aligned}
where :math:`\sigma_X` is the requested reaction type of interest.
Parameters
----------
rxn_type : str
Reaction type (e.g., '(n,2n)', '(n,nta)', etc.). Valid names have
neutrons as a product.
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`NuFissionMatrixXS.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
class ScatterMatrixXS(MatrixMGXS):
r"""A scattering matrix multi-group cross section with the cosine of the
change-in-angle represented as one or more Legendre moments or a histogram.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ScatterMatrixXS.energy_groups` and
:attr:`ScatterMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ScatterMatrixXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ScatterMatrixXS.xs_tally` property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the Legendre scattering moments are calculated as:
.. math::
\begin{aligned}
\langle \sigma_{s,\ell,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; P_\ell (\Omega \cdot \Omega') \sigma_s (r, E'
\rightarrow E, \Omega' \cdot \Omega) \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\sigma_{s,\ell,g'\rightarrow g} &= \frac{\langle
\sigma_{s,\ell,g'\rightarrow g} \phi \rangle}{\langle \phi \rangle}
\end{aligned}
If the order is zero and a :math:`P_0` transport-correction is applied
(default), the scattering matrix elements are:
.. math::
\sigma_{s,g'\rightarrow g} = \frac{\langle \sigma_{s,0,g'\rightarrow g}
\phi \rangle - \delta_{gg'} \sum_{g''} \langle \sigma_{s,1,g''\rightarrow
g} \phi \rangle}{\langle \phi \rangle}
To incorporate the effect of neutron multiplication from (n,xn) reactions
in the above relation, the `nu` parameter can be set to `True`.
An alternative form of the scattering matrix is computed when the
`formulation` property is set to 'consistent' rather than the default
of 'simple'. This formulation computes the scattering matrix multi-group
cross section as the product of the scatter cross section and
group-to-group scattering probabilities.
Unlike the default 'simple' formulation, the 'consistent' formulation
is computed from the groupwise scattering cross section which uses a
tracklength estimator. This ensures that reaction rate balance is exactly
preserved with a :class:`TotalXS` computed using a tracklength estimator.
For a scattering probability matrix :math:`P_{s,\ell,g'\rightarrow g}` and
scattering cross section :math:`\sigma_s (r, E)` for incoming energy group
:math:`[E_{g'},E_{g'-1}]` and outgoing energy group :math:`[E_g,E_{g-1}]`,
the Legendre scattering moments are calculated as:
.. math::
\sigma_{s,\ell,g'\rightarrow g} = \sigma_s (r, E) \times
P_{s,\ell,g'\rightarrow g}
To incorporate the effect of neutron multiplication from (n,xn) reactions
in the 'consistent' scattering matrix, the `nu` parameter can be set to `True`
such that the Legendre scattering moments are calculated as:
.. math::
\sigma_{s,\ell,g'\rightarrow g} = \upsilon_{g'\rightarrow g} \times
\sigma_s (r, E) \times P_{s,\ell,g'\rightarrow g}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : int, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : int, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
nu : bool
If True, the cross section data will include neutron multiplication;
defaults to False
Attributes
----------
formulation : 'simple' or 'consistent'
The calculation approach to use ('simple' by default). The 'simple'
formulation simply divides the group-to-group scattering rates by
the groupwise flux, each computed from analog tally estimators. The
'consistent' formulation multiplies the groupwise scattering rates
by the group-to-group scatter probability matrix, the former computed
from tracklength tallies and the latter computed from analog tallies.
The 'consistent' formulation is designed to better conserve reaction
rate balance with the total and absorption cross sections computed
using tracklength tally estimators.
correction : 'P0' or None
Apply the P0 correction to scattering matrices if set to 'P0'; this is
used only if :attr:`ScatterMatrixXS.scatter_format` is 'legendre'
scatter_format : {'legendre', or 'histogram'}
Representation of the angular scattering distribution (default is
'legendre')
legendre_order : int
The highest Legendre moment in the scattering matrix; this is used if
:attr:`ScatterMatrixXS.scatter_format` is 'legendre'. (default is 0)
histogram_bins : int
The number of equally-spaced bins for the histogram representation of
the angular scattering distribution; this is used if
:attr:`ScatterMatrixXS.scatter_format` is 'histogram'. (default is 16)
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
nu : bool
If True, the cross section data will include neutron multiplication
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : int
Number of equi-width polar angle bins for angle discretization
num_azimuthal : int
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ScatterMatrixXS.tally_keys` property
and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
if self.scatter_format == SCATTER_HISTOGRAM:
return (0, 1, 3, 4, 5)
else:
return (0, 1, 3, 4)
else:
if self.scatter_format == SCATTER_HISTOGRAM:
return (1, 2, 3)
else:
return (1, 2)
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@nu.setter
@formulation.setter
@correction.setter
@scatter_format.setter
@legendre_order.setter
@histogram_bins.setter
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
.. note:: The statepoint must be linked with an OpenMC Summary object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
super().load_from_statepoint(statepoint)
def get_slice(self, nuclides=[], in_groups=[], out_groups=[],
legendre_order='same'):
"""Build a sliced ScatterMatrix for the specified nuclides and
energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
in_groups : list of int
A list of incoming energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
out_groups : list of int
A list of outgoing energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
legendre_order : int or 'same'
The highest Legendre moment in the sliced MGXS. If order is 'same'
then the sliced MGXS will have the same Legendre moments as the
original MGXS (default). If order is an integer less than the
original MGXS' order, then only those Legendre moments up to that
order will be included in the sliced MGXS.
Returns
-------
openmc.mgxs.MatrixMGXS
A new MatrixMGXS which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) requested in the
parameters.
"""
# Call super class method and null out derived tallies
slice_xs = super().get_slice(nuclides, in_groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice the Legendre order if needed
if legendre_order != 'same' and self.scatter_format == SCATTER_LEGENDRE:
cv.check_type('legendre_order', legendre_order, Integral)
cv.check_less_than('legendre_order', legendre_order,
self.legendre_order, equality=True)
slice_xs.legendre_order = legendre_order
# Slice the scattering tally
filter_bins = [tuple(['P{}'.format(i)
for i in range(self.legendre_order + 1)])]
slice_xs.tallies[self.rxn_type] = \
slice_xs.tallies[self.rxn_type].get_slice(
filters=[openmc.LegendreFilter], filter_bins=filter_bins)
# Slice outgoing energy groups if needed
if len(out_groups) != 0:
filter_bins = []
for group in out_groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice each of the tallies across energyout groups
for tally_type, tally in slice_xs.tallies.items():
if tally.contains_filter(openmc.EnergyoutFilter):
tally_slice = tally.get_slice(
filters=[openmc.EnergyoutFilter],
filter_bins=filter_bins)
slice_xs.tallies[tally_type] = tally_slice
slice_xs.sparse = self.sparse
return slice_xs
def get_xs(self, in_groups='all', out_groups='all',
subdomains='all', nuclides='all', moment='all',
xs_type='macro', order_groups='increasing',
row_column='inout', value='mean', squeeze=True):
r"""Returns an array of multi-group cross sections.
This method constructs a 5D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups in (2nd dimension), energy groups out
(3rd dimension), nuclides (4th dimension), and moments/histograms
(5th dimension).
.. note:: The scattering moments are not multiplied by the
:math:`(2\ell+1)/2` prefactor in the expansion of the
scattering source into Legendre moments in the neutron
transport equation.
Parameters
----------
in_groups : Iterable of Integral or 'all'
Incoming energy groups of interest. Defaults to 'all'.
out_groups : Iterable of Integral or 'all'
Outgoing energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
moment : int or 'all'
The scattering matrix moment to return. All moments will be
returned if the moment is 'all' (default); otherwise, a specific
moment will be returned.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
row_column: {'inout', 'outin'}
Return the cross section indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral, max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(in_groups, str):
cv.check_iterable_type('groups', in_groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in in_groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(out_groups, str):
cv.check_iterable_type('groups', out_groups, Integral)
for group in out_groups:
filters.append(openmc.EnergyoutFilter)
filter_bins.append((self.energy_groups.get_group_bounds(group),))
# Construct CrossScore for requested scattering moment
if self.scatter_format == SCATTER_LEGENDRE:
if moment != 'all':
cv.check_type('moment', moment, Integral)
cv.check_greater_than('moment', moment, 0, equality=True)
cv.check_less_than(
'moment', moment, self.legendre_order, equality=True)
filters.append(openmc.LegendreFilter)
filter_bins.append(('P{}'.format(moment),))
num_angle_bins = 1
else:
num_angle_bins = self.legendre_order + 1
else:
num_angle_bins = self.histogram_bins
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# Use tally summation if user requested the sum for all nuclides
scores = self.xs_tally.scores
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(scores=scores, filters=filters,
filter_bins=filter_bins, value=value)
else:
xs = self.xs_tally.get_values(scores=scores, filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro' and self._divide_by_density:
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Convert and nans to zero
xs = np.nan_to_num(xs)
if in_groups == 'all':
num_in_groups = self.num_groups
else:
num_in_groups = len(in_groups)
if out_groups == 'all':
num_out_groups = self.num_groups
else:
num_out_groups = len(out_groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_angle_bins * num_in_groups *
num_out_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal,
num_subdomains, num_in_groups, num_out_groups,
num_angle_bins)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the scattering matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 3, 4)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[:, :, :, ::-1, ::-1, ...]
else:
new_shape = (num_subdomains, num_in_groups, num_out_groups,
num_angle_bins)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the scattering matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 1, 2)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[:, ::-1, ::-1, ...]
if squeeze:
# We want to squeeze out everything but the angles, in_groups,
# out_groups, and, if needed, num_angle_bins dimension. These must
# not be squeezed so 1-group, 1-angle problems have the correct
# shape.
xs = self._squeeze_xs(xs)
return xs
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=False):
"""Build a Pandas DataFrame for the MGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the dataframe. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will include the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
include the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
# Build the dataframe using the parent class method
df = super().get_pandas_dataframe(groups, nuclides, xs_type,
paths=paths)
# If the matrix is P0, remove the legendre column
if self.scatter_format == SCATTER_LEGENDRE and self.legendre_order == 0:
df = df.drop(axis=1, labels=['legendre'])
return df
def print_xs(self, subdomains='all', nuclides='all',
xs_type='macro', moment=0):
"""Prints a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
report the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
moment : int
The scattering moment to print (default is 0)
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
subdomains = list(self.domain.indices)
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
if nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, str)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
if self.correction != 'P0' and self.scatter_format == SCATTER_LEGENDRE:
rxn_type = '{0} (P{1})'.format(self.rxn_type, moment)
else:
rxn_type = self.rxn_type
# Build header for string with type and domain info
string = 'Multi-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
string += '{0: <16}\n'.format('\tEnergy Groups:')
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\n'
# Loop over energy groups ranges
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0], bounds[1])
# Set polar and azimuthal bins if necessary
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if xs_type != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{0: <16}\n'.format(xs_header)
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean',
moment=moment)
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err',
moment=moment)
rel_err_xs = rel_err_xs * 100.
# Create a function for printing group and histogram data
# Set the number of histogram bins
if self.scatter_format == SCATTER_HISTOGRAM:
num_mu_bins = self.histogram_bins
else:
num_mu_bins = 0
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azi, and in/out energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += \
'\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
string += print_groups_and_histogram(
average_xs[pol, azi, ...],
rel_err_xs[pol, azi, ...], self.num_groups,
num_mu_bins)
string += '\n'
else:
string += print_groups_and_histogram(
average_xs, rel_err_xs, self.num_groups, num_mu_bins)
string += '\n'
string += '\n'
string += '\n'
print(string)
class MultiplicityMatrixXS(MatrixMGXS):
r"""The scattering multiplicity matrix.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`MultiplicityMatrixXS.energy_groups` and
:attr:`MultiplicityMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`MultiplicityMatrixXS.tallies` property, which
can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`MultiplicityMatrixXS.xs_tally`
property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the multiplicity is calculated as:
.. math::
\begin{aligned}
\langle \upsilon \sigma_{s,g'\rightarrow g} \phi \rangle &= \int_{r \in
D} dr \int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \sum_i \upsilon_i \sigma_i (r, E' \rightarrow
E, \Omega' \cdot \Omega) \psi(r, E', \Omega') \\
\langle \sigma_{s,g'\rightarrow g} \phi \rangle &= \int_{r \in
D} dr \int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \sum_i \upsilon_i \sigma_i (r, E' \rightarrow
E, \Omega' \cdot \Omega) \psi(r, E', \Omega') \\
\upsilon_{g'\rightarrow g} &= \frac{\langle \upsilon
\sigma_{s,g'\rightarrow g} \rangle}{\langle \sigma_{s,g'\rightarrow g}
\rangle}
\end{aligned}
where :math:`\upsilon_i` is the multiplicity for the :math:`i`-th reaction.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`MultiplicityMatrixXS.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since a multiplicity matrix should reflect the
# multiplication relative to 1, this class will not divide by density
# for microscopic data
_divide_by_density = False
@property
@property
@property
@property
class ScatterProbabilityMatrix(MatrixMGXS):
r"""The group-to-group scattering probability matrix.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`ScatterProbabilityMatrix.energy_groups`
and :attr:`ScatterProbabilityMatrix.domain` properties. Tallies for the
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`ScatterProbabilityMatrix.tallies` property,
which can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`ScatterProbabilityMatrix.xs_tally`
property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the group-to-group scattering probabilities are calculated as:
.. math::
\begin{aligned}
\langle \sigma_{s,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \sigma_{s} (r, E' \rightarrow E, \Omega'
\cdot \Omega) \psi(r, E', \Omega')\\
\langle \sigma_{s,0,g'} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{4\pi} d\Omega
\int_{0}^{\infty} dE \; \sigma_s (r, E'
\rightarrow E, \Omega' \cdot \Omega) \psi(r, E', \Omega')\\
P_{s,g'\rightarrow g} &= \frac{\langle
\sigma_{s,g'\rightarrow g} \phi \rangle}{\langle
\sigma_{s,g'} \phi \rangle}
\end{aligned}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ScatterProbabilityMatrix.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since this probability matrix is always normalized
# to 1.0, this density division is not necessary
_divide_by_density = False
@property
@property
@property
@property
class NuFissionMatrixXS(MatrixMGXS):
r"""A fission production matrix multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`NuFissionMatrixXS.energy_groups` and
:attr:`NuFissionMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`NuFissionMatrixXS.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`NuFissionMatrixXS.xs_tally` property.
For a spatial domain :math:`V`, incoming energy group
:math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,
the fission production is calculated as:
.. math::
\begin{aligned}
\langle \nu\sigma_{f,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{E_g}^{E_{g-1}} dE
\; \chi(E) \nu\sigma_f (r, E') \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\nu\sigma_{f,g'\rightarrow g} &= \frac{\langle \nu\sigma_{f,g'\rightarrow
g} \phi \rangle}{\langle \phi \rangle}
\end{aligned}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
prompt : bool
If true, computes cross sections which only includes prompt neutrons;
defaults to False which includes prompt and delayed in total
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
prompt : bool
If true, computes cross sections which only includes prompt neutrons
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`NuFissionMatrixXS.tally_keys`
property and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@prompt.setter
class Chi(MGXS):
r"""The fission spectrum.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group cross sections for multi-group neutronics calculations. At a
minimum, one needs to set the :attr:`Chi.energy_groups` and
:attr:`Chi.domain` properties. Tallies for the flux and appropriate reaction
rates over the specified domain are generated automatically via the
:attr:`Chi.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`Chi.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
fission spectrum is calculated as:
.. math::
\begin{aligned}
\langle \nu\sigma_{f,g' \rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_0^\infty dE' \int_{E_g}^{E_{g-1}} dE \; \chi(E)
\nu\sigma_f (r, E') \psi(r, E', \Omega')\\
\langle \nu\sigma_f \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \chi(E) \nu\sigma_f (r,
E') \psi(r, E', \Omega') \\
\chi_g &= \frac{\langle \nu\sigma_{f,g' \rightarrow g} \phi \rangle}
{\langle \nu\sigma_f \phi \rangle}
\end{aligned}
This class can also be used to gather a prompt-chi (which only includes the
outgoing energy spectrum of prompt neutrons). This is accomplished by
setting the :attr:`Chi.prompt` attribute to `True`.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
prompt : bool
If true, computes cross sections which only includes prompt neutrons;
defaults to False which includes prompt and delayed in total
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
prompt : bool
If true, computes cross sections which only includes prompt neutrons
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`Chi.tally_keys` property and values are
instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since this chi data is normalized to 1.0, the
# data should not be divided by the number density
_divide_by_density = False
@property
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3)
else:
return (1,)
@property
@property
@property
@property
@property
@prompt.setter
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized mgxs with other MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'nu-fission-in')
def get_slice(self, nuclides=[], groups=[]):
"""Build a sliced Chi for the specified nuclides and energy groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
groups : list of Integral
A list of energy group indices starting at 1 for the high energies
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MGXS
A new MGXS which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) requested in the
parameters.
"""
# Temporarily remove energy filter from nu-fission-in since its
# group structure will work in super MGXS.get_slice(...) method
nu_fission_in = self.tallies['nu-fission-in']
energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)
nu_fission_in.remove_filter(energy_filter)
# Call super class method and null out derived tallies
slice_xs = super().get_slice(nuclides, groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice energy groups if needed
if len(groups) != 0:
filter_bins = []
for group in groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice nu-fission-out tally along energyout filter
nu_fission_out = slice_xs.tallies['nu-fission-out']
tally_slice = nu_fission_out.get_slice(
filters=[openmc.EnergyoutFilter], filter_bins=filter_bins)
slice_xs._tallies['nu-fission-out'] = tally_slice
# Add energy filter back to nu-fission-in tallies
self.tallies['nu-fission-in'].add_filter(energy_filter)
slice_xs._tallies['nu-fission-in'].add_filter(energy_filter)
slice_xs.sparse = self.sparse
return slice_xs
def merge(self, other):
"""Merge another Chi with this one
If results have been loaded from a statepoint, then Chi are only
mergeable along one and only one of energy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MGXS
MGXS to merge with this one
Returns
-------
merged_mgxs : openmc.mgxs.MGXS
Merged MGXS
"""
if not self.can_merge(other):
raise ValueError('Unable to merge a Chi MGXS')
# Create deep copy of tally to return as merged tally
merged_mgxs = copy.deepcopy(self)
merged_mgxs._derived = True
merged_mgxs._rxn_rate_tally = None
merged_mgxs._xs_tally = None
# Merge energy groups
if self.energy_groups != other.energy_groups:
merged_groups = self.energy_groups.merge(other.energy_groups)
merged_mgxs.energy_groups = merged_groups
# Merge nuclides
if self.nuclides != other.nuclides:
# The nuclides must be mutually exclusive
for nuclide in self.nuclides:
if nuclide in other.nuclides:
msg = 'Unable to merge a Chi MGXS with shared nuclides'
raise ValueError(msg)
# Concatenate lists of nuclides for the merged MGXS
merged_mgxs.nuclides = self.nuclides + other.nuclides
# Merge tallies
for tally_key in self.tallies:
merged_tally = self.tallies[tally_key].merge(other.tallies[tally_key])
merged_mgxs.tallies[tally_key] = merged_tally
return merged_mgxs
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', squeeze=True, **kwargs):
"""Returns an array of the fission spectrum.
This method constructs a 3D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups (2nd dimension), and nuclides
(3rd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
This parameter is not relevant for chi but is included here to
mirror the parent MGXS.get_xs(...) class method
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyoutFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# If chi was computed for each nuclide in the domain
if self.by_nuclide:
# Get the sum as the fission source weighted average chi for all
# nuclides in the domain
if nuclides == 'sum' or nuclides == ['sum']:
# Retrieve the fission production tallies
nu_fission_in = self.tallies['nu-fission-in']
nu_fission_out = self.tallies['nu-fission-out']
# Sum out all nuclides
nuclides = self.get_nuclides()
nu_fission_in = nu_fission_in.summation(nuclides=nuclides)
nu_fission_out = nu_fission_out.summation(nuclides=nuclides)
# Remove coarse energy filter to keep it out of tally arithmetic
energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)
nu_fission_in.remove_filter(energy_filter)
# Compute chi and store it as the xs_tally attribute so we can
# use the generic get_xs(...) method
xs_tally = nu_fission_out / nu_fission_in
# Add the coarse energy filter back to the nu-fission tally
nu_fission_in.filters.append(energy_filter)
xs = xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Get chi for all nuclides in the domain
elif nuclides == 'all':
nuclides = self.get_nuclides()
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=nuclides, value=value)
# Get chi for user-specified nuclides in the domain
else:
cv.check_iterable_type('nuclides', nuclides, str)
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=nuclides, value=value)
# If chi was computed as an average of nuclides in the domain
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups) + xs.shape[1:]
else:
new_shape = (num_subdomains, num_groups) + xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_units(self, xs_type='macro'):
"""Returns the units of Chi.
This method returns the units of Chi, which is "%" for both macro
and micro xs types.
Parameters
----------
xs_type: {'macro', 'micro'}
Return the macro or micro cross section units.
Defaults to 'macro'.
Returns
-------
str
A string representing the units of Chi.
"""
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Chi has the same units (%) for both macro and micro
return '%'
class InverseVelocity(MGXS):
r"""An inverse velocity multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group neutron inverse velocities for multi-group neutronics
calculations. The units of inverse velocity are seconds per centimeter. At a
minimum, one needs to set the :attr:`InverseVelocity.energy_groups` and
:attr:`InverseVelocity.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`InverseVelocity.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`InverseVelocity.xs_tally` property.
For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the
neutron inverse velocities are calculated by tallying the flux-weighted
inverse velocity and the flux. The inverse velocity is then the
flux-weighted inverse velocity divided by the flux:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\frac{\psi (r, E, \Omega)}{v (r, E)}}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`InverseVelocity.tally_keys` property
and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
# Store whether or not the number density should be removed for microscopic
# values of this data; since the inverse velocity does not contain number
# density scaling, we should not remove the number density from microscopic
# values
_divide_by_density = False
def get_units(self, xs_type='macro'):
"""Returns the units of InverseVelocity.
This method returns the units of an InverseVelocity based on a desired
xs_type.
Parameters
----------
xs_type: {'macro', 'micro'}
Return the macro or micro cross section units.
Defaults to 'macro'.
Returns
-------
str
A string representing the units of the InverseVelocity.
"""
if xs_type == 'macro':
return 'second/cm'
else:
raise ValueError('Unable to return the units of InverseVelocity'
' for xs_type other than "macro"')
class MeshSurfaceMGXS(MGXS):
"""An abstract multi-group cross section for some energy group structure
on the surfaces of a mesh domain.
This class can be used for both OpenMC input generation and tally data
post-processing to compute surface- and energy-integrated multi-group cross
sections for multi-group neutronics calculations.
.. note:: Users should instantiate the subclasses of this abstract class.
.. versionadded:: 0.12.1
Parameters
----------
domain : openmc.RegularMesh
The domain for spatial homogenization
domain_type : {'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
Unused in MeshSurfaceMGXS
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
Unused in MeshSurfaceMGXS
domain : Mesh
Domain for spatial homogenization
domain_type : {'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is equal to the number of mesh surfaces times
two to account for both the incoming and outgoing current from the
mesh cell surfaces.
num_nuclides : int
Unused in MeshSurfaceMGXS
nuclides : Iterable of str or 'sum'
Unused in MeshSurfaceMGXS
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
@property
@property
@domain.setter
@domain_type.setter
@property
@property
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
.. note:: The statepoint must first be linked with a :class:`openmc.Summary`
object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
cv.check_type('statepoint', statepoint, openmc.statepoint.StatePoint)
if statepoint.summary is None:
msg = 'Unable to load data from a statepoint which has not been ' \
'linked with a summary file'
raise ValueError(msg)
filters= []
filter_bins = []
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
# Find, slice and store Tallies from StatePoint
# The tally slicing is needed if tally merging was used
for tally_type, tally in self.tallies.items():
sp_tally = statepoint.get_tally(
tally.scores, tally.filters, tally.nuclides,
estimator=tally.estimator, exact_filters=True)
sp_tally = sp_tally.get_slice(
tally.scores, filters, filter_bins, tally.nuclides)
sp_tally.sparse = self.sparse
self.tallies[tally_type] = sp_tally
self._loaded_sp = True
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', squeeze=True, **kwargs):
r"""Returns an array of multi-group cross sections.
This method constructs a 3D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), energy groups (2nd dimension), and nuclides
(3rd dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
Unused in MeshSurfaceMGXS, its value will be ignored. The nuclides
dimension of the resultant array will always have a length of 1.
xs_type: {'macro'}
The 'macro'/'micro' distinction does not apply to MeshSurfaceMGXS.
The calculation of a 'micro' xs_type is omited in this class.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro'])
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, str):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
subdomain_bins = []
for subdomain in subdomains:
subdomain_bins.append(subdomain)
filter_bins.append(tuple(subdomain_bins))
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
filters.append(openmc.EnergyFilter)
energy_bins = []
for group in groups:
energy_bins.append(
(self.energy_groups.get_group_bounds(group),))
filter_bins.append(tuple(energy_bins))
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_surfaces = 4 * self.domain.n_dimension
num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *
self.num_azimuthal * num_surfaces))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_groups, num_surfaces)
else:
new_shape = (num_subdomains, num_groups, num_surfaces)
new_shape += xs.shape[1:]
new_xs = np.zeros(new_shape)
for cell in range(num_subdomains):
for g in range(num_groups):
for s in range(num_surfaces):
new_xs[cell,g,s] = \
xs[cell*num_surfaces*num_groups+s*num_groups+g]
xs = new_xs
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=True):
"""Build a Pandas DataFrame for the MGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
Unused in MeshSurfaceMGXS, its value will be ignored. The nuclides
dimension of the resultant array will always have a length of 1.
xs_type: {'macro'}
'micro' unused in MeshSurfaceMGXS.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
if not isinstance(groups, str):
cv.check_iterable_type('groups', groups, Integral)
cv.check_value('xs_type', xs_type, ['macro'])
df = self.xs_tally.get_pandas_dataframe(paths=paths)
# Remove the score column since it is homogeneous and redundant
df = df.drop('score', axis=1, level=0)
# Convert azimuthal, polar, energy in and energy out bin values in to
# bin indices
columns = self._df_convert_columns_to_bins(df)
# Select out those groups the user requested
if not isinstance(groups, str):
if 'group in' in df:
df = df[df['group in'].isin(groups)]
if 'group out' in df:
df = df[df['group out'].isin(groups)]
mesh_str = 'mesh {0}'.format(self.domain.id)
col_key = (mesh_str, 'surf')
surfaces = df.pop(col_key)
df.insert(len(self.domain.dimension), col_key, surfaces)
if len(self.domain.dimension) == 1:
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'surf')]
+ columns, inplace=True)
elif len(self.domain.dimension) == 2:
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'surf')] + columns, inplace=True)
elif len(self.domain.dimension) == 3:
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'z'), (mesh_str, 'surf')] + columns, inplace=True)
return df
class Current(MeshSurfaceMGXS):
r"""A current multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute surface- and energy-integrated
multi-group current cross sections for multi-group neutronics calculations. At
a minimum, one needs to set the :attr:`Current.energy_groups` and
:attr:`Current.domain` properties. Tallies for the appropriate
reaction rates over the specified domain are generated automatically via the
:attr:`Current.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`Current.xs_tally` property.
For a spatial domain :math:`S` and energy group :math:`[E_g,E_{g-1}]`, the
total cross section is calculated as:
.. math::
\frac{\int_{r \in S} dS \int_{E_g}^{E_{g-1}} dE \;
J(r, E)}{\int_{r \in S} dS \int_{E_g}^{E_{g-1}} dE}.
.. versionadded:: 0.12.1
Parameters
----------
domain : openmc.RegularMesh
The domain for spatial homogenization
domain_type : ('mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
Unused in MeshSurfaceMGXS
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
Unused in MeshSurfaceMGXS
domain : openmc.RegularMesh
Domain for spatial homogenization
domain_type : {'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`TotalXS.tally_keys` property and values
are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is equal to the number of mesh surfaces times
two to account for both the incoming and outgoing current from the
mesh cell surfaces.
num_nuclides : int
Unused in MeshSurfaceMGXS
nuclides : Iterable of str or 'sum'
Unused in MeshSurfaceMGXS
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
4866,
198,
6738,
3146,
1330,
15995,
1373,
198,
11748,
28686,
198,
11748,
14601,
198,
198,
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
1280,
23209,
198,
... | 2.419062 | 104,419 |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import json
from io import StringIO
from PIL import Image
from watson_developer_cloud import VisualRecognitionV3
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Replace with your api key
visual_recognition = VisualRecognitionV3('2016-05-20', api_key='API-KEY')
MAX_NUMBER_OF_BOXES = 10
MINIMUM_CONFIDENCE = 0.6
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'w']
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
print('Downloading model... (This may take over 5 minutes)')
# Download model if not already downloaded
if not os.path.exists(PATH_TO_CKPT):
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
print('Extracting...')
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
else:
print('Model already downloaded')
# Load model into memory
print('Loading model...')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Path to test image, "test_image/image1.jpg"
TEST_IMAGE_PATH = 'test_image/image1.jpg'
print('detecting...')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image = Image.open(TEST_IMAGE_PATH)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, num_detections) = sess.run([boxes, scores, num_detections], feed_dict={image_tensor: image_np_expanded})
# Create figure and axes and display the image
fig, ax = plt.subplots(1)
ax.imshow(image_np)
(height, width, x) = image_np.shape
for i in range(0, int(min(num_detections, MAX_NUMBER_OF_BOXES))):
score = np.squeeze(scores)[i]
if score < MINIMUM_CONFIDENCE:
break
box = np.squeeze(boxes)[i]
box_x = box[1] * width
box_y = box[0] * height
box_width = (box[3] - box[1]) * width
box_height = (box[2] - box[0]) * height
box_x2 = box[3] * width
box_y2 = box[2] * height
img2 = image.crop((box_x, box_y, box_x2, box_y2))
path = 'cropped/image1'
os.makedirs(path, exist_ok=True)
full_path = os.path.join(path, 'img{}.jpg'.format(i))
img2.save(full_path)
# Classify images with watson visual recognition
with open(full_path, 'rb') as images_file:
results = visual_recognition.classify(images_file=images_file, threshold=0.7, classifier_ids=['default'])
print(json.dumps(results, indent=2))
label = results['images'][0]['classifiers'][0]['classes'][0]['class']
ax.text(box_x + 5, box_y - 5, label, fontsize=10, color='white', bbox={'facecolor':COLORS[i % 8], 'edgecolor':'none'})
# Create a Rectangle patch
rect = patches.Rectangle((box_x, box_y), box_width, box_height, linewidth=2, edgecolor=COLORS[i % 8], facecolor='none')
ax.add_patch(rect)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2237,
13,
76,
5241,
13,
333,
297,
571,
355,
2956,
297,
571,
198,
11748,
25064,
198,
11748,
13422,
7753,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
19974,
7753,
19... | 2.269168 | 1,839 |
"""
Tests module for ThreatExchangeV2 integration logic
"""
from typing import Tuple, List, Dict, Optional
from ThreatExchangeV2 import ThreatExchangeV2Status, calculate_dbot_score, calculate_engines,\
get_reputation_data_statuses, convert_string_to_epoch_time, flatten_outputs_paging
import pytest
from CommonServerPython import Common
MALICIOUS_THRESHOLD = 50
SUSPICIOUS_THRESHOLD = 1
NON_MALICIOUS_THRESHOLD = 50
STATUS = 'status'
| [
37811,
198,
51,
3558,
8265,
329,
25238,
3109,
3803,
53,
17,
11812,
9156,
198,
37811,
198,
6738,
19720,
1330,
309,
29291,
11,
7343,
11,
360,
713,
11,
32233,
198,
198,
6738,
25238,
3109,
3803,
53,
17,
1330,
25238,
3109,
3803,
53,
17,
... | 2.901316 | 152 |
import pandas as pd
import numpy as np | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941
] | 3.166667 | 12 |
# ******************************************************************************
# Copyright 2020. NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import random
import pinpointPy
from pinpoint.common.Defines import *
from pinpoint.settings import * | [
171,
119,
123,
2,
41906,
17174,
46068,
1174,
198,
2,
220,
220,
15069,
220,
12131,
13,
11746,
5959,
11421,
13,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
2... | 4.085308 | 211 |
import re
import checksum
import ch.systemsx.cisd.etlserver.registrator.api.v2
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchSubCriteria
import csv
from subprocess import Popen, PIPE
barcode_pattern = re.compile('Q[a-zA-Z0-9]{4}[0-9]{3}[A-Z][a-zA-Z0-9]')
conda_home_path = "/home/qeana10/miniconda2/"
omero_lib_path = "/home/qeana10/openbis/servers/core-plugins/QBIC/1/dss/drop-boxes/register-omero-metadata/OMERO.py-5.4.10-ice36-b105"
etl_home_path = "/home/qeana10/openbis/servers/core-plugins/QBIC/1/dss/drop-boxes/register-omero-metadata/"
| [
11748,
302,
198,
11748,
8794,
388,
198,
11748,
442,
13,
10057,
82,
87,
13,
66,
9409,
13,
316,
75,
15388,
13,
2301,
396,
12392,
13,
15042,
13,
85,
17,
198,
6738,
442,
13,
10057,
82,
87,
13,
66,
9409,
13,
9654,
41907,
13,
41357,
1... | 2.273684 | 285 |
from streamsvg import Drawing
s = Drawing()
s.addNode("a")
s.addNode("b")
s.addNode("c")
s.addNode("d")
s.addLink("a", "b", 0, 4,color="#BBBBBB",width=2)
s.addLink("a", "b", 6, 9,color="#BBBBBB",width=2)
s.addLink("a", "c", 2, 5, height=0.4,width=3)
s.addLink("b", "c", 1, 8,width=3)
s.addLink("b", "d", 7, 10, height=0.4,color="#BBBBBB",width=2)
s.addLink("c", "d", 6, 9,width=3)
s.addNodeCluster("a",[(2,5)],color="blue",width=3)
s.addNodeCluster("b",[(1,8)],color="blue",width=3)
s.addNodeCluster("d",[(6,9)],color="blue",width=3)
s.addTimeLine(ticks=2)
| [
6738,
15190,
45119,
1330,
40027,
198,
198,
82,
796,
40027,
3419,
198,
198,
82,
13,
2860,
19667,
7203,
64,
4943,
198,
82,
13,
2860,
19667,
7203,
65,
4943,
198,
82,
13,
2860,
19667,
7203,
66,
4943,
198,
82,
13,
2860,
19667,
7203,
67,
... | 2.017921 | 279 |
# Generated by Django 3.2.9 on 2022-02-23 18:19
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
24,
319,
33160,
12,
2999,
12,
1954,
1248,
25,
1129,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import sys
import numpy
from numpy import fft
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_almost_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.ndimage as ndimage
eps = 1e-12
| [
2,
15069,
357,
34,
8,
5816,
12,
14315,
5613,
449,
13,
569,
3760,
263,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
1708,
3403,
198,
2,... | 3.199288 | 562 |
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2012-2020 iSolver Software Solutions (C) 2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import division, absolute_import
import struct
from weakref import proxy
from gevent import sleep, Greenlet
import msgpack
try:
import msgpack_numpy
msgpack_numpy.patch()
except ImportError:
from .errors import print2err
print2err("Warning: msgpack_numpy could not be imported. ",
"This may cause issues for iohub.")
from .devices import Computer
from .errors import print2err, printExceptionDetailsToStdErr
from .util import NumPyRingBuffer as RingBuffer
if Computer.platform == 'win32':
MAX_PACKET_SIZE = 64 * 1024
else:
MAX_PACKET_SIZE = 16 * 1024
##### TIME SYNC CLASS ######
class ioHubTimeSyncConnection(UDPClientConnection):
"""A special purpose version of the UDPClientConnection class which has the
only job of sending and receiving time sync rmessage requests and responses
with a remote ioHub Server instance."""
class ioHubTimeGreenSyncManager(Greenlet):
"""The time synchronization manager class used within an ioHub Server when a
ioHubRemoteEventSubscriber device is running.
The time synchronization manager monitors and calculates the ongoing
offset and drift between the local ioHub instance and a remote ioHub
instance running on another computer that is publishing events that
are being received by the local ioHubRemoteEventSubscriber.
"""
class TimeSyncState(object):
"""Container class used by an ioHubSyncManager to hold the data necessary
to calculate the current time base offset and drift between an ioHub Server
and a ioHubRemoteEventSubscriber client."""
RTTs = RingBuffer(10)
L_times = RingBuffer(10)
R_times = RingBuffer(10)
drifts = RingBuffer(20)
offsets = RingBuffer(20)
def getDrift(self):
"""Current drift between two time bases."""
return self.drifts.mean()
def getOffset(self):
"""Current offset between two time bases."""
return self.offsets.mean()
def getAccuracy(self):
"""Current accuracy of the time synchronization, as calculated as the.
average of the last 10 round trip time sync request - response delays
divided by two.
"""
return self.RTTs.mean() / 2.0
def local2RemoteTime(self, local_time=None):
"""Converts a local time (sec.msec format) to the corresponding remote
computer time, using the current offset and drift measures."""
if local_time is None:
local_time = Computer.getTime()
return self.getDrift() * local_time + self.getOffset()
def remote2LocalTime(self, remote_time):
"""Converts a remote computer time (sec.msec format) to the
corresponding local time, using the current offset and drift
measures."""
return (remote_time - self.getOffset()) / self.getDrift()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
262,
38955,
20519,
5888,
198,
2,
15069,
357,
34,
8,
2321,
12,
42334,
1312,
50,
14375,
10442,
23555,
357,
34,
8,
33448,
4946,
5800,
20003,
12052,
13,
198,... | 3.138889 | 972 |
# -*- coding: utf-8 -*-
from io import open
from setuptools import setup
from sphinx-bootstrap-basic import __version__
setup(
name='sphinx-bootstrap-basic',
version=__version__,
url='https://github.com/Blendify/sphinx-bootstrap-basic/',
license='Apache License 2.0',
author='Aaron Carlisle',
author_email='carlisle.aaron00@gmail.com',
description='Simple bootstrap theme',
long_description=open('README.rst', encoding='utf-8').read(),
zip_safe=False,
packages=['sphinx-bootstrap-basic'],
package_data={'sphinx-bootstrap-basic': [
'theme.conf',
'*.html',
'static/css/*.css',
'static/js/*.js',
]},
include_package_data=True,
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
entry_points = {
'sphinx.html_themes': [
'sphinx-bootstrap-basic = sphinx-bootstrap-basic',
]
},
classifiers=[
'Framework :: Sphinx',
'Framework :: Sphinx :: Theme',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache License 2.0 License',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
33245,
1330,
1280,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
599,
20079,
87,
12,
18769,
26418,
12,
35487,
1330,
11593,
9641,
834,
198,
198,
40406,
7,
19... | 2.456897 | 696 |
"""
USING "DEF" FUNCTION TO APPLY PARAMETERS
TO CREATE..
A CALCULATOR
"""
print(SMCALC())
| [
37811,
201,
198,
2937,
2751,
366,
32988,
1,
29397,
4177,
2849,
5390,
3486,
6489,
56,
29463,
2390,
2767,
4877,
201,
198,
10468,
29244,
6158,
492,
201,
198,
32,
33290,
34,
6239,
25633,
201,
198,
37811,
201,
198,
201,
198,
4798,
7,
50,
... | 1.833333 | 60 |
"""Functions for creating features to the cleaned data
"""
| [
37811,
24629,
2733,
329,
4441,
3033,
284,
262,
20750,
1366,
198,
37811,
198
] | 4.538462 | 13 |
import logging
import os
from typing import List, Optional
import altair
from ps2_census.enums import PlayerState
from ps2_analysis.enums import DamageLocation
from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire
from ps2_analysis.fire_groups.data_files import (
update_data_files as update_fire_groups_data_files,
)
from ps2_analysis.fire_groups.fire_mode import FireMode
from ps2_analysis.utils import CodeTimer
from ps2_analysis.weapons.infantry.data_files import (
update_data_files as update_infantry_weapons_data_files,
)
from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons
from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon
logging.basicConfig(level=logging.INFO)
SERVICE_ID: Optional[str] = os.environ.get("CENSUS_SERVICE_ID")
DATAFILES_DIRECTORY: str = "../datafiles"
if not SERVICE_ID:
raise ValueError("CENSUS_SERVICE_ID envvar not found")
update_fire_groups_data_files(
directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,
)
update_infantry_weapons_data_files(
directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,
)
infantry_weapons: List[InfantryWeapon] = list(
generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY)
)
print(f"Generated {len(infantry_weapons)} infantry weapons")
wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id == 43)
fm: FireMode = wp.fire_groups[0].fire_modes[1]
cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING]
rttks: List[dict] = []
distance: int = 30
burst_length: int
for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1):
control_time: int
for control_time in range(
0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10
):
with CodeTimer(
f"{burst_length} length and {control_time}ms control time simulation"
):
ttk: int
timed_out_ratio: float
ttk, timed_out_ratio = fm.real_time_to_kill(
distance=distance,
runs=500,
control_time=control_time,
auto_burst_length=burst_length,
aim_location=DamageLocation.TORSO,
recoil_compensation=True,
)
rttks.append(
{
"distance": distance,
"control_time": control_time + fm.fire_timing.refire_time,
"burst_length": burst_length,
"ttk": ttk if timed_out_ratio < 0.20 else -1,
"timed_out_ratio": timed_out_ratio,
}
)
dataset = altair.Data(values=rttks)
chart = (
altair.Chart(dataset)
.mark_rect()
.encode(
x="burst_length:O",
y=altair.Y(
"control_time:O",
sort=altair.EncodingSortField("control_time", order="descending"),
),
color=altair.Color(
"ttk:Q", scale=altair.Scale(scheme="plasma"), sort="descending"
),
tooltip=["ttk:Q", "timed_out_ratio:Q"],
)
.properties(
title=f"{wp.name} TTK by burst length and control time at {distance}m",
height=900,
width=900,
)
.interactive()
)
chart.save("bursts_ttk_simulation.html")
| [
11748,
18931,
198,
11748,
28686,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
11748,
5988,
958,
198,
6738,
26692,
17,
62,
66,
7314,
13,
268,
5700,
1330,
7853,
9012,
198,
198,
6738,
26692,
17,
62,
20930,
13,
268,
5700,
1330,
8995... | 2.209786 | 1,492 |
import drawSVG
import math
def draw_chains_for_4x2():
""" Markov chain for four sets of two card types, e.g. AAAABBBB """
r = 25
dx = 100
width = 2 * r + 20 + dx * 4
height = 200
y = 100
svg = drawSVG.SVG({ 'width': width, 'height': height })
chain = Chain()
chain.add_node(r + 10, y, '4A4B')
chain.add_edge(0, 0)
chain.add_node(r + 10 + dx, y, '2A4B')
chain.add_edge(0, 1)
chain.add_edge(1, 1)
chain.add_node(r + 10 + dx * 2, y - dx / 2, '4B')
chain.add_node(r + 10 + dx * 2, y + dx / 2, '2A2B')
chain.add_edge(1, 2)
chain.add_edge(1, 3)
chain.add_edge(3, 3)
chain.add_node(r + 10 + dx * 3, y, '2B')
chain.add_edge(2, 4)
chain.add_edge(3, 4)
chain.add_node(r + 10 + dx * 4, y, 'End')
chain.add_edge(4, 5)
draw_chain(svg, chain, r)
svg.outputToFile('chain_1.svg')
# draw_chains_for_4x2()
# draw_chains_for_4A4B()
draw_chains_for_4A2B() | [
11748,
3197,
50,
43490,
198,
11748,
10688,
628,
628,
628,
628,
628,
198,
4299,
3197,
62,
38861,
62,
1640,
62,
19,
87,
17,
33529,
198,
220,
220,
220,
37227,
2940,
709,
6333,
329,
1440,
5621,
286,
734,
2657,
3858,
11,
304,
13,
70,
1... | 2.016913 | 473 |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 16 00:30:53 2015
@author: jensv
Module for examining stability spaces.
"""
from __future__ import print_function, unicode_literals, division
from __future__ import absolute_import
from future import standard_library, utils
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
import numpy as np
from scipy.special import kv, kvp
import analytic_condition as ac
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib.colors import SymLogNorm, BoundaryNorm
from matplotlib.ticker import FormatStrFormatter, FixedFormatter
import matplotlib.patches as patches
import matplotlib.ticker as ticker
import seaborn as sns
sns.set_style('white')
sns.set_context('poster')
def plot_lambda_k_space_dw(filename, epsilon, name, mode_to_plot='m_neg_1',
show_points=False, lim=None, levels=None, log=True,
linthresh=1E-7, bounds=(1.5, 3.0), norm=True,
analytic_compare=False,
label_pos=((0.5, 0.4), (2.1, 0.4), (2.8, 0.2)),
delta_values=[-1,0,1],
interpolate=False,
cmap=None, hatch=False,
figsize=None,
save_as=None,
return_ax=False,
axes=None):
r"""
Plot the delta_w of external instabilities in the lambda-k space.
"""
if figsize:
fig = plt.figure(figsize=figsize)
epsilon_case = np.load(filename)
lambda_a_mesh = epsilon_case['lambda_a_mesh']
k_a_mesh = epsilon_case['k_a_mesh']
external_m_neg_1 = epsilon_case['d_w_m_neg_1']
external_sausage = epsilon_case['d_w_m_0']
epsilon_case.close()
instability_map = {'m_0': external_sausage,
'm_neg_1': external_m_neg_1}
kink_pal = sns.blend_palette([sns.xkcd_rgb["dandelion"],
sns.xkcd_rgb["white"]], 7, as_cmap=True)
kink_pal = sns.diverging_palette(73, 182, s=72, l=85, sep=1, n=9, as_cmap=True)
sausage_pal = sns.blend_palette(['orange', 'white'], 7, as_cmap=True)
sausage_pal = sns.diverging_palette(49, 181, s=99, l=78, sep=1, n=9, as_cmap=True)
if cmap:
instability_palette = {'m_0': cmap,
'm_neg_1': cmap}
else:
instability_palette = {'m_0': sausage_pal,
'm_neg_1': kink_pal}
if interpolate:
instability_map['m_neg_1'] = interpolate_nans(lambda_a_mesh,
k_a_mesh,
instability_map['m_neg_1']
)
values = instability_map[mode_to_plot]
if norm:
values = values / np.nanmax(np.abs(values))
else:
values = values
if levels:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=SymLogNorm(linthresh))
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, levels=levels,
colors='grey',
norm=SymLogNorm(linthresh))
else:
norm = BoundaryNorm(levels, 256)
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=norm)
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, levels=levels,
colors='grey')
else:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
norm=SymLogNorm(linthresh))
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, colors='grey',
norm=SymLogNorm(linthresh))
else:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot])
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, colors='grey')
if lim:
plot.set_clim(lim)
cbar.add_lines(contourlines)
plt.plot([0.01, 0.1, 1.0, 2.0, 3.0],
[0.005, 0.05, 0.5, 1.0, 1.5], color='black')
axes = plt.gca()
axes.set_axis_bgcolor(sns.xkcd_rgb['white'])
lambda_bar_analytic = np.linspace(0.01, 3., 750)
k_bar_analytic = np.linspace(0.01, 1.5, 750)
(lambda_bar_mesh_analytic,
k_bar_mesh_analytic) = np.meshgrid(lambda_bar_analytic, k_bar_analytic)
if analytic_compare:
analytic_comparison(mode_to_plot, k_bar_mesh_analytic,
lambda_bar_mesh_analytic, epsilon, label_pos)
if show_points:
plt.scatter(lambda_a_mesh, k_a_mesh, marker='o', c='b', s=5)
plt.ylim(0.01, bounds[0])
plt.xlim(0.01, bounds[1])
axes = plt.gca()
axes.set_xticks(np.arange(0., 4.5, 1.))
axes.set_yticks(np.arange(0., 2.0, 0.5))
plt.setp(axes.get_xticklabels(), fontsize=30)
plt.setp(axes.get_yticklabels(), fontsize=30)
plt.ylabel(r'$\bar{k}$', fontsize=40, rotation='horizontal', labelpad=30)
plt.xlabel(r'$\bar{\lambda}$', fontsize=40)
cbar.ax.tick_params(labelsize=30)
labels = [my_formatter_fun(level) for level in levels]
cbar.ax.set_yticklabels(labels)
sns.despine(ax=axes)
if hatch:
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
xy = (xmin,ymin)
width = xmax - xmin
height = ymax - ymin
p = patches.Rectangle(xy, width, height, hatch='+', fill=None, zorder=-10)
axes.add_patch(p)
plt.tight_layout()
if return_ax:
return axes, cbar
else:
plt.savefig('../../output/plots/' + name + '.png')
if save_as:
plt.savefig(save_as)
plt.show()
def interpolate_nans(lambda_a, k_a, quantity):
r"""
Return mesh with nans interpolated from neighboring values.
"""
index_to_keep = np.isnan(quantity.ravel())
interp_values = quantity.ravel()[~index_to_keep]
interp_k = k_a.ravel()[~index_to_keep]
interp_lambda = lambda_a.ravel()[~index_to_keep]
return griddata((interp_lambda, interp_k),
interp_values,
(lambda_a, k_a),
method='linear')
def plot_dW_given_delta(filename, epsilon, name, mode_to_plot='m_neg_1',
show_points=False, lim=None, levels=None, log=False,
linthresh=1E-7, bounds=(1.5, 3.0), floor_norm=False,
analytic_compare=False,
label_pos=((0.5, 0.4), (2.1, 0.4), (2.8, 0.2)),
delta_values=[-1,0,1],
interpolate=False, with_interface=False):
r"""
Plot the delta_w of external instabilities in the lambda-k space.
"""
epsilon_case = np.load(filename)
lambda_a_mesh = epsilon_case['lambda_a_mesh']
k_a_mesh = epsilon_case['k_a_mesh']
delta_mesh_sausage = epsilon_case['delta_m_0']
delta_mesh_kink = epsilon_case['delta_m_neg_1']
epsilon_case.close()
if with_interface:
external_sausage_norm = ac.conditions(k_a_mesh, lambda_a_mesh, epsilon,
0, delta_mesh_sausage)
external_m_neg_1_norm = ac.conditions(k_a_mesh, lambda_a_mesh, epsilon,
1, delta_mesh_kink)
else:
external_sausage_norm = ac.conditions_without_interface(k_a_mesh,
lambda_a_mesh,
epsilon,
0,
delta_mesh_sausage)
external_m_neg_1_norm = ac.conditions_without_interface(k_a_mesh,
lambda_a_mesh,
epsilon,
1,
delta_mesh_kink)
instability_map = {'m_0': external_sausage_norm,
'm_neg_1': external_m_neg_1_norm}
kink_pal = sns.blend_palette([sns.xkcd_rgb["dandelion"],
sns.xkcd_rgb["white"]], 7, as_cmap=True)
kink_pal = sns.diverging_palette(73, 182, s=72, l=85, sep=1, n=9, as_cmap=True)
sausage_pal = sns.blend_palette(['orange', 'white'], 7, as_cmap=True)
sausage_pal = sns.diverging_palette(49, 181, s=99, l=78, sep=1, n=9, as_cmap=True)
instability_palette = {'m_0': sausage_pal,
'm_neg_1': kink_pal}
if interpolate:
instability_map['m_neg_1'] = interpolate_nans(lambda_a_mesh,
k_a_mesh,
instability_map['m_neg_1']
)
values = instability_map[mode_to_plot]
if floor_norm:
values = np.clip(values, -100., 100.)
values = values / -np.nanmin(values)
values = np.clip(values, -1., 1.)
else:
values = values / -np.nanmin(values)
if levels:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=SymLogNorm(linthresh))
else:
norm = BoundaryNorm(levels, 256)
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=norm)
cbar = plt.colorbar(label=r'$\delta W$',
format=FormatStrFormatter('%.0e'))
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh, values,
levels=levels[:-1], colors='grey')
cbar.add_lines(contourlines)
else:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
norm=SymLogNorm(linthresh))
else:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot])
if lim:
plot.set_clim(lim)
plt.plot([0.01, 0.1, 1.0, 2.0, 3.0],
[0.005, 0.05, 0.5, 1.0, 1.5], color='black')
axes = plt.gca()
axes.set_axis_bgcolor(sns.xkcd_rgb['grey'])
lambda_bar = np.linspace(0.01, 3., 750)
k_bar = np.linspace(0.01, 1.5, 750)
lambda_bar_mesh, k_bar_mesh = np.meshgrid(lambda_bar, k_bar)
if analytic_compare:
analytic_comparison(mode_to_plot, k_bar_mesh, lambda_bar_mesh, epsilon,
label_pos)
if show_points:
plt.scatter(lambda_a_mesh, k_a_mesh, marker='o', c='b', s=5)
plt.ylim(0.01, bounds[0])
plt.xlim(0.01, bounds[1])
axes = plt.gca()
plt.setp(axes.get_xticklabels(), fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=40)
plt.ylabel(r'$\bar{k}$', fontsize=45, rotation='horizontal', labelpad=30)
plt.xlabel(r'$\bar{\lambda}$', fontsize=45)
cbar.ax.tick_params(labelsize=40)
sns.despine(ax=axes)
plt.tight_layout()
plt.savefig('../../output/plots/' + name + '.png')
plt.show()
def analytic_comparison_flex(mode_to_plot, k_bar_mesh, lambda_bar_mesh, epsilon,
delta_values, label_pos):
r"""
Add red lines indicating stability boundaries from analytical model.
"""
line_labels = FixedFormatter(delta_values)
assert (mode_to_plot == 'm_neg_1' or
mode_to_plot == 'm_0'), ("Please specify mode_to_plot as either" +
"m_neg_1 or m_0")
if mode_to_plot == 'm_neg_1':
m = 1
color = 'red'
if mode_to_plot == 'm_0':
m = 0
color = 'red'
stability_kink_given_delta = []
for delta in delta_values:
stability_kink_given_delta.append(ac.conditions(k_bar_mesh,
lambda_bar_mesh,
epsilon,
m,
delta))
stability_kink = stability_kink_given_delta[0] < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_kink_given_delta[0] >= 0] = -1.5
stability_kink[stability_kink_given_delta[0] < 0] = -0.5
value = 0.5
for i in range(len(delta_values[1:])):
stability_kink[stability_kink_given_delta[i] < 0] = value
value += 1.
levels = np.array(range(len(delta_values))) - 1
cs = plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=levels, colors=color, linewidths=5,
linestyles='dotted')
line_labels = {}
for i, level in enumerate(levels):
line_labels.update({level: r'$\delta = $ %2.1f' % (delta_values[i])})
print(levels, value, line_labels)
plt.clabel(cs, fmt=line_labels, fontsize=40, manual=label_pos)
return cs
def single_analytic_comparison(mode_to_plot,
k_bar_mesh,
lambda_bar_mesh,
epsilon,
delta_value,
label_pos):
"""
Add contour of analytic stability condition.
"""
line_labels = FixedFormatter(delta_value)
assert (mode_to_plot == 'm_neg_1' or
mode_to_plot == 'm_0'), ("Please specify mode_to_plot as either" +
"m_neg_1 or m_0")
if mode_to_plot == 'm_neg_1':
m = 1
color = 'red'
if mode_to_plot == 'm_0':
m = 0
color = 'red'
stability_kink_given_delta = []
stability_kink_given_delta.append(ac.conditions(k_bar_mesh,
lambda_bar_mesh,
epsilon,
m,
delta_value))
stability_kink = stability_kink_given_delta[0] < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_kink_given_delta[0] >= 0] = -1.5
stability_kink[stability_kink_given_delta[0] < 0] = -0.5
levels = [-1]
cs = plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=levels, colors=color, linewidths=5,
linestyles='dotted')
line_labels = {}
#plt.clabel(cs, fmt={-1: '%2.1f' % delta_value}, fontsize=40, manual=label_pos)
return cs
def analytic_comparison(mode_to_plot, k_bar_mesh, lambda_bar_mesh, epsilon,
label_pos, lines=None, colors=None):
r"""
Add red lines indicating stability boundaries from analytical model.
"""
if not lines:
line_labels = FixedFormatter(['-1', '0', '1'])
else:
line_labels = FixedFormatter([str(line) for line in lines])
assert (mode_to_plot == 'm_neg_1' or
mode_to_plot == 'm_0'), ("Please specify mode_to_plot as either" +
"m_neg_1 or m_0")
if mode_to_plot == 'm_neg_1':
m = 1
if not colors:
color = 'red'
else:
color = colors
if mode_to_plot == 'm_0':
m = 0
if not colors:
color = 'red'
else:
color = colors
if not lines:
stability_kink_m_neg_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, -1.)
stability_kink_m_0 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, 0.)
stability_kink_m_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, 1)
else:
stability_kink_m_neg_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, lines[0])
stability_kink_m_0 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, lines[1])
stability_kink_m_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, lines[2])
stability_kink = stability_kink_m_neg_1 < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_kink_m_neg_1 >= 0] = -1.5
stability_kink[stability_kink_m_neg_1 < 0] = -0.5
stability_kink[stability_kink_m_0 < 0] = 0.5
stability_kink[stability_kink_m_1 < 0] = 1.5
cs = plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=[-1, 0, 1], colors=color, linewidths=10,
linestyles='dotted')
if not lines:
plt.clabel(cs, fmt={-1: r'$\delta = -1$', 0: r'$\delta = 0$',
1: r'$\delta = 1$'}, fontsize=40, manual=label_pos)
else:
plt.clabel(cs, fmt={-1: r'$\delta =$ %' % lines[0], 0: r'$\delta =$ %' % lines[1],
1: r'$\delta =$ %' % lines[2]}, fontsize=40, manual=label_pos)
return cs
def plot_lambda_k_space_delta(filename, mode_to_plot,
clip=False, delta_min=-1.5,
delta_max=1., levels=None,
interpolate=True, compare_analytic=False,
epsilon=None, analytic_label_pos=None, lines=None,
plot_numeric_boundary=False, cmap=None, analytic_color=None):
r"""
Plot values of delta in lambda k space.
"""
data_meshes = np.load(filename)
lambda_mesh = data_meshes['lambda_a_mesh']
k_mesh = data_meshes['k_a_mesh']
if mode_to_plot == 0:
color = 'green'
delta_mesh = data_meshes['delta_m_0']
external_sausage = data_meshes['d_w_m_0']
else:
#color = sns.xkcd_rgb["dandelion"]
color = 'green'
delta_mesh = data_meshes['delta_m_neg_1']
external_kink = data_meshes['d_w_m_neg_1']
if interpolate:
delta_mesh = interpolate_nans(lambda_mesh,
k_mesh,
delta_mesh)
if clip:
delta_mesh = np.clip(delta_mesh, delta_min, delta_max)
if cmap:
colors = cmap
else:
colors = sns.light_palette(color, n_colors=6, reverse=True,
as_cmap=True)
if levels:
plt.contourf(lambda_mesh, k_mesh, delta_mesh, cmap=colors,
levels=levels)
else:
plt.contourf(lambda_mesh, k_mesh, delta_mesh, cmap=colors)
cbar = plt.colorbar(label=r'$\delta$')
cbar.set_label(label=r'$\delta(\bar{\lambda},\bar{k})$', size=45, rotation=0, labelpad=30)
if levels:
contourlines = plt.contour(lambda_mesh, k_mesh, delta_mesh,
colors='grey', levels=levels)
else:
contourlines = plt.contour(lambda_mesh, k_mesh, delta_mesh,
colors='grey')
cbar.add_lines(contourlines)
if mode_to_plot == 0:
mode_to_plot = 'm_0'
else:
mode_to_plot = 'm_neg_1'
if compare_analytic:
if analytic_color:
analytic_comparison(mode_to_plot, k_mesh, lambda_mesh, epsilon,
analytic_label_pos, lines=lines, colors=analytic_color)
else:
analytic_comparison(mode_to_plot, k_mesh, lambda_mesh, epsilon,
analytic_label_pos, lines=lines)
if plot_numeric_boundary:
contour = plt.contour(lambda_mesh,
k_mesh,
external_sausage,
levels=[0],
colors='grey',
linestyles='-.')
axes = plt.gca()
axes.set_axis_bgcolor(sns.xkcd_rgb['grey'])
plt.setp(axes.get_xticklabels(), fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=40)
plt.ylabel(r'$\bar{k}$', fontsize=45, rotation='horizontal', labelpad=30)
plt.xlabel(r'$\bar{\lambda}$', fontsize=45)
cbar.ax.tick_params(labelsize=40)
axes.set_xticks(np.arange(0., 5, 1.))
axes.set_yticks(np.arange(0., 2.0, 0.5))
plt.ylim(0.01, 1.5)
plt.xlim(0.01, 3.0)
sns.despine(ax=axes)
plt.tight_layout()
def sausage_kink_ratio(filename, xy_limits=None, cmap=None, save_as=None,
levels=None, return_ax=False):
r"""
Plot ratio of sausage and kink potential energies.
"""
meshes = np.load(filename)
lambda_bar_mesh = meshes['lambda_a_mesh']
k_bar_mesh = meshes['k_a_mesh']
external_m_neg_1 = meshes['d_w_m_neg_1']
external_sausage = meshes['d_w_m_0']
meshes.close()
sausage_stable_region = np.invert((external_sausage < 0))
ratio = np.abs(external_sausage / external_m_neg_1)
ratio[sausage_stable_region] = np.nan
ratio_log = np.log10(ratio)
if not cmap:
cmap = sns.light_palette(sns.xkcd_rgb['red orange'],
as_cmap=True)
if levels:
contours = plt.contourf(lambda_bar_mesh, k_bar_mesh,
ratio_log, cmap=cmap, levels=levels)
else:
contours = plt.contourf(lambda_bar_mesh, k_bar_mesh,
ratio_log, cmap=cmap)
colorbar = plt.colorbar(format=FormatStrFormatter(r'$10^{%i}$'))
colorbar.set_label(r'$\frac{\delta W_{m=0}}{\delta W_{m=-1}}$',
size=35, rotation=0, labelpad=50)
if levels:
lines = plt.contour(lambda_bar_mesh, k_bar_mesh,
ratio_log, colors='grey', levels=levels)
else:
lines = plt.contour(lambda_bar_mesh, k_bar_mesh,
ratio_log, colors='grey')
colorbar.add_lines(lines)
axes = plt.gca()
axes.plot([0, 3.], [0., 1.5], '--', c='black', lw=5)
axes.set_xlabel(r'$\bar{\lambda}$', fontsize=40)
plt.setp(axes.get_xticklabels(), fontsize=30)
axes.set_xticks(np.arange(0., 4.5, 0.5))
axes.set_ylabel(r'$\bar{k}$', fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=30)
axes.set_yticks(np.arange(0., 2.0, 0.5))
if xy_limits:
axes.set_ylim((xy_limits[0], xy_limits[1]))
axes.set_xlim((xy_limits[2], xy_limits[3]))
sns.despine()
colorbar.ax.yaxis.set_ticks_position('right')
colorbar.ax.tick_params(labelsize=30)
plt.tight_layout()
if return_ax:
return axes, colobar
else:
if save_as:
plt.savefig(save_as)
plt.show()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
3158,
1467,
3571,
25,
1270,
25,
4310,
1853,
198,
198,
31,
9800,
25,
474,
641,
85,
198,
198,
26796,
329,
17247,
10159,
9029,
13,
198,
3781... | 1.747215 | 13,913 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 09:33:07 2020
@author: HI
"""
import xlwings as xw
import math
from scipy.stats import norm
@xw.func
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
7653,
1367,
7769,
25,
2091,
25,
2998,
12131,
198,
198,
31,
9800,
25,
36210,
198,
37811,
198,
198,
11748,
2124,
75,
48819,
355,
2124,
86,
... | 2.362319 | 69 |
import sys, os
import stat
import httplib
import urlparse
import json
import logging
__all__ = [
"WebHDFS",
"WebHDFSError"
]
logging.basicConfig(level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(name='webhdfs')
WEBHDFS_CONTEXT_ROOT="/webhdfs/v1"
######################################################################
######################################################################
######################################################################
class WebHDFS(object):
""" Class for accessing HDFS via WebHDFS
To enable WebHDFS in your Hadoop Installation add the following configuration
to your hdfs_site.xml (requires Hadoop >0.20.205.0):
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
see: https://issues.apache.org/jira/secure/attachment/12500090/WebHdfsAPI20111020.pdf
"""
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
if __name__ == '__main__':
try:
webhdfs = WebHDFS('storm0', 50070, 'azhigimont')
webhdfs.mkDir('/user/azhigimont/tmp')
resp = webhdfs.copyToHDFS('c:/temp/test.json', '/user/azhigimont/tmp/test.json', overwrite = True)
webhdfs.copyFromHDFS('/user/azhigimont/tmp/test.json', 'c:/temp/test1.json', overwrite = True)
webhdfs.listDir('/user/azhigimont/tmp')
webhdfs.delete('/user/azhigimont/tmp', recursive = True)
except WebHDFSError as whe:
print whe
except:
print "Unexpected error:" + str(sys.exc_info())
else:
print '__main__ test completed without errors' | [
11748,
25064,
11,
28686,
198,
11748,
1185,
198,
11748,
1841,
489,
571,
198,
11748,
19016,
29572,
198,
11748,
33918,
198,
11748,
18931,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
13908,
39,
8068,
50,
1600,
198,
220,
22... | 3.393583 | 935 |
# pylint: disable=C0103,C0301
"""
GUI for app
"""
import os
from datetime import datetime
import dash_core_components as dcc
import dash_html_components as html
import dash_dangerously_set_inner_html as ddsih
import luts
import data
# For hosting
path_prefix = os.getenv("REQUESTS_PATHNAME_PREFIX") or "/"
# Used to make the chart exports nice
fig_download_configs = dict(
filename="Daily_Precipitation", width="1000", height="650", scale=2
)
fig_configs = dict(
displayModeBar=True,
showSendToCloud=False,
toImageButtonOptions=fig_download_configs,
modeBarButtonsToRemove=[
"zoom2d",
"pan2d",
"select2d",
"lasso2d",
"zoomIn2d",
"zoomOut2d",
"autoScale2d",
"resetScale2d",
"hoverClosestCartesian",
"hoverCompareCartesian",
"hoverClosestPie",
"hoverClosest3d",
"hoverClosestGl2d",
"hoverClosestGeo",
"toggleHover",
"toggleSpikelines",
],
displaylogo=False,
)
# Helper functions
def wrap_in_section(content, section_classes="", container_classes="", div_classes=""):
"""
Helper function to wrap sections.
Accepts an array of children which will be assigned within
this structure:
<section class="section">
<div class="container">
<div>[children]...
"""
return html.Section(
className="section " + section_classes,
children=[
html.Div(
className="container " + container_classes,
children=[html.Div(className=div_classes, children=content)],
)
],
)
header = ddsih.DangerouslySetInnerHTML(
f"""
<div class="container">
<nav class="navbar" role="navigation" aria-label="main navigation">
<div class="navbar-brand">
<a class="navbar-item" href="https://www.snap.uaf.edu">
<img src="{path_prefix}assets/SNAP_acronym_color_square.svg">
</a>
<a role="button" class="navbar-burger burger" aria-label="menu" aria-expanded="false" data-target="navbarBasicExample">
<span aria-hidden="true"></span>
<span aria-hidden="true"></span>
<span aria-hidden="true"></span>
</a>
</div>
<div class="navbar-menu">
<div class="navbar-end">
<div class="navbar-item">
<div class="buttons">
<a class="button is-primary">
<strong>Feedback</strong>
</a>
</div>
</div>
</div>
</div>
</nav>
</div>
"""
)
communities_dropdown_field = html.Div(
className="field dropdown-selector",
children=[
html.Label("Choose a location", className="label"),
html.Div(
className="control",
children=[
dcc.Dropdown(
id="communities-dropdown",
options=[
{"label": row["name"], "value": row["stid"]}
for index, row in luts.communities.iterrows()
],
value="26411", # Fairbanks
)
],
),
],
)
about = wrap_in_section(
[
ddsih.DangerouslySetInnerHTML(
f"""
<h1 class="title is-3">{luts.title}</h1>
<p>These charts show daily precipitation records for Alaska weather stations.</p>
<p class="camera-icon">Click the <span>
<svg viewBox="0 0 1000 1000" class="icon" height="1em" width="1em"><path d="m500 450c-83 0-150-67-150-150 0-83 67-150 150-150 83 0 150 67 150 150 0 83-67 150-150 150z m400 150h-120c-16 0-34 13-39 29l-31 93c-6 15-23 28-40 28h-340c-16 0-34-13-39-28l-31-94c-6-15-23-28-40-28h-120c-55 0-100-45-100-100v-450c0-55 45-100 100-100h800c55 0 100 45 100 100v450c0 55-45 100-100 100z m-400-550c-138 0-250 112-250 250 0 138 112 250 250 250 138 0 250-112 250-250 0-138-112-250-250-250z m365 380c-19 0-35 16-35 35 0 19 16 35 35 35 19 0 35-16 35-35 0-19-16-35-35-35z" transform="matrix(1 0 0 -1 0 850)"></path></svg>
</span> icon in the upper–right of each chart to download it.</p>
<p>Data provided by the the <a href="http://www.rcc-acis.org">Applied Climate Information System (ACIS)</a>.</p>
<p>Get started by choosing a community.</p>
"""
),
communities_dropdown_field
],
div_classes="content is-size-5",
)
# Daily Precip as a scatterplot + mean line
scatter_graph = wrap_in_section(
[
html.H3("Daily precipitation", className="title is-4"),
html.P(
"""
Placeholder
""",
className="content is-size-5",
),
dcc.Graph(id="precip-scatter", config=fig_configs),
],
section_classes="graph",
)
# Daily precip as a daily bubble chart
bubble_graph = wrap_in_section(
[
html.H3("Daily precipitation TBD Title What", className="title is-4"),
html.P(
"""
TBD Placeholder
""",
className="content is-size-5",
),
html.Div(
className="graph", children=[dcc.Graph(id="precip-bubble", config=fig_configs)]
),
],
section_classes="graph",
)
footer = html.Footer(
className="footer has-text-centered",
children=[
html.Div(
children=[
html.A(
href="https://snap.uaf.edu",
className="snap",
children=[html.Img(src=path_prefix + "assets/SNAP_color_all.svg")],
),
html.A(
href="https://uaf.edu/uaf/",
children=[html.Img(src=path_prefix + "assets/UAF.svg")],
),
]
),
ddsih.DangerouslySetInnerHTML(
"""
<p>UA is an AA/EO employer and educational institution and prohibits illegal discrimination against any individual.
<br><a href="https://www.alaska.edu/nondiscrimination/">Statement of Nondiscrimination</a></p>
"""
),
],
)
layout = html.Div(children=[header, about, scatter_graph, bubble_graph, footer])
| [
2,
279,
2645,
600,
25,
15560,
28,
34,
486,
3070,
11,
34,
3070,
486,
198,
37811,
198,
40156,
329,
598,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288... | 2.142294 | 2,790 |
import inspect
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))))
from pm4py.algo import alignments as ali
from pm4py.models import petri as petri
from pm4py import log as log_lib
from pm4py import util
if __name__ == '__main__':
log = log_lib.importer.xes.import_from_file_xes('C:/Users/bas/Documents/tue/svn/private/logs/a32_logs/a32f0n05.xes')
net, marking = petri.importer.pnml.import_petri_from_pnml(
'C:/Users/bas/Documents/tue/svn/private/logs/a32_logs/a32.pnml')
fmarking = petri.petrinet.Marking()
for p in net.places:
if len(p.out_arcs) == 0:
fmarking[p] = 1
model_cost_function = dict()
sync_cost_function = dict()
for t in net.transitions:
if t.label is not None:
model_cost_function[t] = 1000
sync_cost_function[t] = 0
else:
model_cost_function[t] = 1
print(list(map(lambda trace: align(trace, net, marking, fmarking, model_cost_function, sync_cost_function), log)))
| [
11748,
10104,
198,
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
1040,
806,
13,
1136,
775... | 2.286017 | 472 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from .test_core import *
from .test_commands import *
if (__name__ == u'__main__'):
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,... | 3.030928 | 97 |
import os
import unittest
from unittest import mock
import json
from io import StringIO
import pkg_resources
import numpy as np
from data_profiler.labelers.regex_model import RegexModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
_resource_labeler_dir = pkg_resources.resource_filename('resources', 'labelers')
mock_model_parameters = {
'regex_patterns': {
'PAD': [r'\W'],
'BACKGROUND': ['.*']
},
'encapsulators': {
'start': r'(?<![\w.\$\%\-])',
'end': r'(?:(?=(\b|[ ]))|(?=[^\w\%\$]([^\w]|$))|$)',
},
'ignore_case': True,
'default_label': 'BACKGROUND'
}
mock_label_mapping = {
"PAD": 0,
"CITY": 1, # ensure that overlapping labels get removed.
"BACKGROUND": 1,
"ADDRESS": 2,
}
if __name__ == '__main__':
unittest.main()
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
11748,
33918,
198,
6738,
33245,
1330,
10903,
9399,
198,
11748,
279,
10025,
62,
37540,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1366,
62... | 2.203753 | 373 |
import os
import numpy as np
import textwrap
import h5py
from keras.optimizers import SGD
import copy
from chess_types import GameState, Move, Player, Board, Point
from encoder import SimpleEncoder
import encoder
from typing import List, Dict, Tuple
from model_ac import create_model
| [
11748,
28686,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2420,
37150,
201,
198,
11748,
289,
20,
9078,
201,
198,
201,
198,
6738,
41927,
292,
13,
40085,
11341,
1330,
26147,
35,
201,
198,
11748,
4866,
201,
198,
201,
198,
6... | 3.009804 | 102 |
import json
from abc import ABCMeta
from collections import OrderedDict
from ArubaCloud.base import JsonInterfaceBase
| [
11748,
33918,
198,
6738,
450,
66,
1330,
9738,
48526,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
943,
22013,
18839,
13,
8692,
1330,
449,
1559,
39317,
14881,
628,
628
] | 3.903226 | 31 |
from enum import Enum
ANNUAL = "annual"
months = {
Seasons.SUMMER: [12, 1, 2],
Seasons.AUTUMN: [3, 4, 5],
Seasons.WINTER: [6, 7, 8],
Seasons.SPRING: [9, 10, 11],
}
| [
6738,
33829,
1330,
2039,
388,
198,
198,
22846,
25620,
796,
366,
1236,
723,
1,
628,
198,
198,
41537,
796,
1391,
198,
220,
220,
220,
33159,
13,
50,
5883,
29296,
25,
685,
1065,
11,
352,
11,
362,
4357,
198,
220,
220,
220,
33159,
13,
3... | 2.044444 | 90 |
import getopt
import gc
import sys
import os
import time
import traceback
import psutil
from digiroad.carRoutingExceptions import ImpedanceAttributeNotDefinedException, NotParameterGivenException, \
TransportModeNotDefinedException
from digiroad.connection.PostgisServiceProvider import PostgisServiceProvider
from digiroad.connection.WFSServiceProvider import WFSServiceProvider
from digiroad.logic.MetropAccessDigiroad import MetropAccessDigiroadApplication
from digiroad.transportMode.BicycleTransportMode import BicycleTransportMode
from digiroad.transportMode.PrivateCarTransportMode import PrivateCarTransportMode
from digiroad.util import CostAttributes, getConfigurationProperties, TransportModes, Logger, FileActions, \
getFormattedDatetime, GeneralLogger, timeDifference
def main():
"""
Read the arguments written in the command line to read the input coordinates from a
Geojson file (a set of pair points) and the location (URL) to store the Shortest Path geojson features for each
pair of points.
Call the ``calculateTotalTimeTravel`` from the WFSServiceProvider configured
with the parameters in './resources/configuration.properties' and calculate the shortest path for each
pair of points and store a Geojson file per each of them.
After that, call the function ``createSummary`` to summarize the total time expend to go from one point to another
for each of the different impedance attribute (cost).
:return: None. All the information is stored in the ``shortestPathOutput`` URL.
"""
argv = sys.argv[1:]
opts, args = getopt.getopt(
argv, "s:e:o:c:t:",
["start_point=", "end_point=", "outputFolder=", "costAttributes=",
"transportMode", "is_entry_list", "routes", "summary", "all", "help"]
)
startPointsGeojsonFilename = None
outputFolder = None
# impedance = CostAttributes.DISTANCE
# impedance = None
impedanceList = []
car_impedances = {
"DISTANCE": CostAttributes.DISTANCE,
"SPEED_LIMIT_TIME": CostAttributes.SPEED_LIMIT_TIME,
"DAY_AVG_DELAY_TIME": CostAttributes.DAY_AVG_DELAY_TIME,
"MIDDAY_DELAY_TIME": CostAttributes.MIDDAY_DELAY_TIME,
"RUSH_HOUR_DELAY": CostAttributes.RUSH_HOUR_DELAY
}
bicycle_impedances = {
"DISTANCE": CostAttributes.DISTANCE,
"BICYCLE_FAST_TIME": CostAttributes.BICYCLE_FAST_TIME,
"BICYCLE_SLOW_TIME": CostAttributes.BICYCLE_SLOW_TIME
}
allImpedanceAttribute = False
summaryOnly = False
routesOnly = False
isEntryList = False
impedanceErrorMessage = "Use the paramenter -c or --cost.\nValues allowed: DISTANCE, SPEED_LIMIT_TIME, DAY_AVG_DELAY_TIME, MIDDAY_DELAY_TIME, RUSH_HOUR_DELAY.\nThe parameter --all enable the analysis for all the impedance attributes."
transportModeErrorMessage = "Use the paramenter -t or --transportMode.\nValues allowed: PRIVATE_CAR, BICYCLE."
for opt, arg in opts:
if opt in "--help":
printHelp()
return
# print("options: %s, arg: %s" % (opt, arg))
if opt in ("-s", "--start_point"):
startPointsGeojsonFilename = arg
if opt in ("-e", "--end_point"):
endPointsGeojsonFilename = arg
if opt in ("-o", "--outputFolder"):
outputFolder = arg
if opt in ("-t", "--transportMode"):
transportModeSelected = arg
if opt in "--summary":
summaryOnly = True
if opt in "--routes":
routesOnly = True
if opt in "--is_entry_list":
isEntryList = True
if opt in "--all":
allImpedanceAttribute = True
else:
if opt in ("-c", "--costAttributes"):
impedanceListTemp = arg.split(",")
for impedanceArg in impedanceListTemp:
if (impedanceArg not in car_impedances) and (impedanceArg not in bicycle_impedances):
raise ImpedanceAttributeNotDefinedException(
impedanceErrorMessage)
if impedanceArg in car_impedances:
impedance = car_impedances[impedanceArg]
elif impedanceArg in bicycle_impedances:
impedance = bicycle_impedances[impedanceArg]
impedanceList.append(impedance)
if not startPointsGeojsonFilename or not endPointsGeojsonFilename or not outputFolder:
raise NotParameterGivenException("Type --help for more information.")
if not transportModeSelected:
raise TransportModeNotDefinedException(
transportModeErrorMessage)
if not allImpedanceAttribute and not impedance:
raise ImpedanceAttributeNotDefinedException(
impedanceErrorMessage)
generalLogger = GeneralLogger(loggerName="GENERAL", outputFolder=outputFolder, prefix="General")
MAX_TRIES = 2
RECOVERY_WAIT_TIME = 10
RECOVERY_WAIT_TIME_8_MIN = 480
postgisServiceProvider = PostgisServiceProvider()
transportMode = None
impedances = None
if transportModeSelected == TransportModes.BICYCLE:
transportMode = BicycleTransportMode(postgisServiceProvider)
impedances = bicycle_impedances
elif transportModeSelected == TransportModes.PRIVATE_CAR:
transportMode = PrivateCarTransportMode(postgisServiceProvider)
impedances = car_impedances
starter = MetropAccessDigiroadApplication(
transportMode=transportMode
)
startTime = time.time()
functionName = "Routing Data Analysis"
generalLogger.getLogger().info("%s Start Time: %s" % (functionName, getFormattedDatetime(timemilis=startTime)))
if not isEntryList:
prefix = os.path.basename(startPointsGeojsonFilename) + "_" + os.path.basename(endPointsGeojsonFilename)
error_counts = 0
executed = False
while not executed:
try:
generalLogger.getLogger().info("Analyzing %s" % prefix)
executeSpatialDataAnalysis(outputFolder, startPointsGeojsonFilename, endPointsGeojsonFilename,
starter,
impedanceList, impedances, allImpedanceAttribute,
summaryOnly,
routesOnly,
prefix)
error_counts = 0
executed = True
gc.collect()
except Exception as err:
error_counts += 1
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
generalLogger.getLogger().exception(''.join('>> ' + line for line in lines))
memory = psutil.virtual_memory()
generalLogger.getLogger().warning(
"MEMORY USAGE: total=%s, available=%s, percent=%s, used=%s, free=%s" % (
memory.total, memory.available, memory.percent, memory.used,
memory.free)
)
Logger.getInstance().exception(''.join('>> ' + line for line in lines))
time.sleep(RECOVERY_WAIT_TIME)
generalLogger.getLogger().warning("Calling garbage collector...")
gc.collect()
time.sleep(RECOVERY_WAIT_TIME_8_MIN)
memory = psutil.virtual_memory()
generalLogger.getLogger().warning(
"MEMORY USAGE: total=%s, available=%s, percent=%s, used=%s, free=%s" % (
memory.total, memory.available, memory.percent, memory.used,
memory.free)
)
if error_counts < (MAX_TRIES + 1):
message = "Error recovery for the %s time%s" % (
error_counts, ("" if error_counts < 2 else "s"))
generalLogger.getLogger().warning(message)
Logger.getInstance().warning(message)
else:
message = "Recurrent error, skipping analysis for: %s" % prefix
generalLogger.getLogger().warning(message)
Logger.getInstance().warning(message)
executed = True
else:
for startRoot, startDirs, startFiles in os.walk(startPointsGeojsonFilename):
for startPointsFilename in startFiles:
if startPointsFilename.endswith("geojson"):
for endRoot, endDirs, endFiles in os.walk(endPointsGeojsonFilename):
for endPointsFilename in endFiles:
if endPointsFilename.endswith("geojson"):
prefix = startPointsFilename + "_" + endPointsFilename
error_counts = 0
executed = False
while not executed:
try:
generalLogger.getLogger().info("Analyzing %s" % prefix)
executeSpatialDataAnalysis(outputFolder,
os.path.join(startRoot, startPointsFilename),
os.path.join(endRoot, endPointsFilename),
starter,
impedanceList, impedances, allImpedanceAttribute,
summaryOnly,
routesOnly,
prefix + "-")
error_counts = 0
executed = True
gc.collect()
except Exception as err:
error_counts += 1
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
generalLogger.getLogger().exception(''.join('>> ' + line for line in lines))
memory = psutil.virtual_memory()
generalLogger.getLogger().warning(
"MEMORY USAGE: total=%s, available=%s, percent=%s, used=%s, free=%s" % (
memory.total, memory.available, memory.percent, memory.used,
memory.free)
)
Logger.getInstance().exception(''.join('>> ' + line for line in lines))
time.sleep(RECOVERY_WAIT_TIME)
generalLogger.getLogger().warning("Calling garbage collector...")
gc.collect()
time.sleep(RECOVERY_WAIT_TIME_8_MIN)
memory = psutil.virtual_memory()
generalLogger.getLogger().warning(
"MEMORY USAGE: total=%s, available=%s, percent=%s, used=%s, free=%s" % (
memory.total, memory.available, memory.percent, memory.used,
memory.free)
)
if error_counts < (MAX_TRIES + 1):
message = "Error recovery for the %s time%s" % (
error_counts, ("" if error_counts < 2 else "s"))
generalLogger.getLogger().warning(message)
Logger.getInstance().warning(message)
else:
message = "Recurrent error, skipping analysis for: %s" % prefix
generalLogger.getLogger().warning(message)
Logger.getInstance().warning(message)
executed = True
endTime = time.time()
generalLogger.getLogger().info("%s End Time: %s" % (functionName, getFormattedDatetime(timemilis=endTime)))
totalTime = timeDifference(startTime, endTime)
generalLogger.getLogger().info("%s Total Time: %s m" % (functionName, totalTime))
| [
11748,
651,
8738,
198,
11748,
308,
66,
198,
11748,
25064,
198,
198,
11748,
28686,
198,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
198,
11748,
26692,
22602,
198,
198,
6738,
3100,
72,
6344,
13,
7718,
49,
13660,
3109,
11755,
1330,
1846... | 1.946047 | 6,691 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.BlogIndexView.as_view(), name='blog_index'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
5009,
13,
42383,
15732,
7680,
13,
292,
62,
1177,
22784,
1... | 2.686275 | 51 |
import os
import task_7_1
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
try:
with open(os.path.join(BASE_DIR, 'config.yaml'), 'r', encoding='utf-8') as config:
for name in config:
path = os.path.join(BASE_DIR, name.strip())
if not os.path.exists(path):
mk_dir, mk_file = os.path.split(path)
task_7_1.create_path(mk_dir)
if mk_file:
open(path, 'a').close()
except FileNotFoundError as err:
print(f'Не удалось найти файл config.yaml:\n', rf'{err}')
| [
11748,
28686,
198,
198,
11748,
4876,
62,
22,
62,
16,
628,
198,
33,
11159,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198,
198,
28311,
25,
198,
220,
220,
220,
... | 1.829032 | 310 |
# @file setup.py
# This contains setup info for mu_build pip module
#
##
# Copyright (c) 2018, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mu_build",
author="Project Mu Team",
author_email="maknutse@microsoft.com",
description="Supports CI build operations for Project Mu based firmware",
long_description=long_description,
url="https://github.com/microsoft/mu_pip_build",
license='BSD2',
packages=setuptools.find_packages(),
use_scm_version=True,
setup_requires=['setuptools_scm'],
entry_points={
'console_scripts': ['mu_build=MuBuild.MuBuild:main']
},
install_requires=[
'pyyaml',
'mu_environment>=0.3.3',
'mu_python_library>=0.4.1'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta"
]
)
| [
2,
2488,
7753,
9058,
13,
9078,
198,
2,
770,
4909,
9058,
7508,
329,
38779,
62,
11249,
7347,
8265,
198,
2,
198,
2235,
198,
2,
15069,
357,
66,
8,
2864,
11,
5413,
10501,
198,
2,
198,
2,
1439,
2489,
10395,
13,
198,
2,
2297,
396,
3890... | 3.015564 | 771 |
from .source import SourceRecurly
__all__ = ["SourceRecurly"]
| [
6738,
764,
10459,
1330,
8090,
6690,
333,
306,
198,
198,
834,
439,
834,
796,
14631,
7416,
6690,
333,
306,
8973,
198
] | 3 | 21 |
import sys
if __name__ == '__main__':
main() | [
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3419
] | 2.555556 | 18 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`test_monkey_patching`
=======================
.. moduleauthor:: hbldh <henrik.blidh@swedwise.com>
Created on 2016-08-31, 11:23
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
from itertools import chain
from PIL import Image
import imdirect
this_dir = os.path.dirname(os.path.abspath(__file__))
def test_autorotate_1():
"""Test rotation of real image with orientation value = 6"""
# TODO: Bad test, should be removed or improved.
image_path = os.path.join(this_dir, 'testfile_6.jpg')
img = Image.open(image_path)
assert img.width == 300
assert img.height == 225
assert img._getexif().get(274) == 6
img_rot = imdirect.autorotate(img)
assert img_rot.width == 225
assert img_rot.height == 300
assert not hasattr(img_rot, '_getexif')
def test_rotate_1(base_img, image_with_rotation_value_1):
"""Test rotation of image with orientation = 1"""
rotated_img = imdirect.autorotate(image_with_rotation_value_1)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_2(base_img, image_with_rotation_value_2):
"""Test rotation of image with orientation = 2"""
rotated_img = imdirect.autorotate(image_with_rotation_value_2)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_3(base_img, image_with_rotation_value_3):
"""Test rotation of image with orientation = 3"""
rotated_img = imdirect.autorotate(image_with_rotation_value_3)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_4(base_img, image_with_rotation_value_4):
"""Test rotation of image with orientation = 4"""
rotated_img = imdirect.autorotate(image_with_rotation_value_4)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_5(base_img, image_with_rotation_value_5):
"""Test rotation of image with orientation = 5"""
rotated_img = imdirect.autorotate(image_with_rotation_value_5)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_6(base_img, image_with_rotation_value_6):
"""Test rotation of image with orientation = 6"""
rotated_img = imdirect.autorotate(image_with_rotation_value_6)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_7(base_img, image_with_rotation_value_7):
"""Test rotation of image with orientation = 7"""
rotated_img = imdirect.autorotate(image_with_rotation_value_7)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
def test_rotate_8(base_img, image_with_rotation_value_8):
"""Test rotation of image with orientation = 8"""
rotated_img = imdirect.autorotate(image_with_rotation_value_8)
x = list(rotated_img.getdata())
assert x == list(chain(*base_img))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
25,
4666,
25,
63,
9288,
62,
49572,
62,
17147,
278,
63,
198,
4770,
1421,
18604,
198,
198,
492,
8265,
9800,... | 2.672889 | 1,125 |
#Number Checker:
#In this exercise I had to write a Python program to check if a user input number is positive, negative, or zero.
#Instantiated an input variable that takes in a float.
inputNumber = float(input("Please enter your number here: "))
#I created conditional statements to determine if the input variable is negative, zero or positive
#by creating a condition if the input is greater than, less than or equal to zero.
if(inputNumber < 0):
print("The input number is a negative number.")
elif(inputNumber == 0):
print("The input number is zero.")
elif(inputNumber > 0):
print("The input number is a positive number.")
else:
print("Please enter a valid number!")
| [
2,
15057,
6822,
263,
25,
198,
2,
818,
428,
5517,
314,
550,
284,
3551,
257,
11361,
1430,
284,
2198,
611,
257,
2836,
5128,
1271,
318,
3967,
11,
4633,
11,
393,
6632,
13,
198,
198,
2,
49933,
12931,
281,
5128,
7885,
326,
2753,
287,
257... | 3.745946 | 185 |
from wonderbits import Display, Led, Control, LightBelt, Signal
import random
import time
display1 = Display()
led = Led()
control1 = Control()
# lightBelt = LightBelt()
signal = Signal()
time.sleep(3)
# lightBelt.set_leds_rgb(1, 10, 255, 0, 0)
signal.set_rgb(255, 255, 0)
@control1.event.sw1_pressed()
@control1.event.sw2_pressed()
# content = 1
# while True:
# display1.print(1, 1, content)
# content = content + 1
# led.set_rgb(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
# 获取类
# sw4 = control1.get_sw4()
# print('我收到sw4状态{}'.format(sw4))
# isAtOne = control1.is_sw3_at_1()
# print('我收到sw在1位置: {}'.format(isAtOne))
'''
def __init__(self, index=1):
Wonderbits.__init__(self)
self.index = index
self.event = Control._Event(self)
class _Event():
def __init__(self, this):
self.this = this
def sw1_pressed(self):
def wrapper(cb):
self.this._register_event(
'control{}'.format(self.this.index), 'sw1', cb)
return wrapper
def sw2_pressed(self):
def wrapper(cb):
self.this._register_event(
'control{}'.format(self.this.index), 'sw2', cb)
return wrapper
''' | [
198,
6738,
4240,
9895,
1330,
16531,
11,
22964,
11,
6779,
11,
4401,
33,
2120,
11,
26484,
198,
11748,
4738,
198,
11748,
640,
198,
198,
13812,
16,
796,
16531,
3419,
198,
992,
796,
22964,
3419,
198,
13716,
16,
796,
6779,
3419,
198,
2,
1... | 2 | 653 |
from rdflib.graph import Graph, ConjunctiveGraph, ReadOnlyGraphAggregate
from rdflib.term import BNode, Node, URIRef, IdentifiedNode
from rdflib import Graph, URIRef, Literal # type: ignore
from rdflib.namespace import OWL, DCTERMS, RDF, RDFS, XSD # type: ignore
from rdflib import Namespace
NS = Namespace("https://w3id.org/ftm#") # type: ignore | [
198,
198,
6738,
374,
67,
2704,
571,
13,
34960,
1330,
29681,
11,
1482,
29741,
14070,
37065,
11,
4149,
10049,
37065,
46384,
49373,
198,
6738,
374,
67,
2704,
571,
13,
4354,
1330,
347,
19667,
11,
19081,
11,
37902,
4663,
891,
11,
24517,
19... | 2.771654 | 127 |
from peewee import *
from flask_restful_swagger import swagger
from flask_restful import fields as flask_fields
from marshmallow import Schema, fields
from genre_api.models.meta import BaseModel
from genre_api.models.genre import Genre
from genre_api.models.song import Song
from genre_api.models.playlist import Playlist
| [
6738,
613,
413,
1453,
1330,
1635,
198,
6738,
42903,
62,
2118,
913,
62,
2032,
7928,
1330,
1509,
7928,
198,
6738,
42903,
62,
2118,
913,
1330,
7032,
355,
42903,
62,
25747,
198,
6738,
22397,
42725,
1330,
10011,
2611,
11,
7032,
198,
6738,
... | 3.670455 | 88 |
#This file is used to display the results of various algorithms
import matplotlib.pyplot as plt
import numpy
import christofedes
import nearestNeighbor
import insertion
import branchAndBound
import karpPartition
#Shows the various paths which each algorithm chose to approximate the optimal
#Works best when mapping one or two algorithms but extensible to any number
#Converts the method's string representation to something readable for display
#Generates the given number of random points
if __name__ == "__main__":
plotTours(generateRandomPoints(20), karpPartition.runKarp) | [
2,
1212,
2393,
318,
973,
284,
3359,
262,
2482,
286,
2972,
16113,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
198,
11748,
33826,
1659,
37507,
198,
11748,
16936,
46445,
2865,
198,
11748,
36075,
19... | 4.034483 | 145 |
import netCDF4 as cdf
import datetime as dt
from glob import glob
from collections import OrderedDict
import pyproj
ncf_time_format = "%Y-%m-%d %H:%M:%S"
#filename = "/home/mikhailsavelov/pysteps-data/out/probab_ensemble_nwc_201810271000.ncf"
#ncf = open_netcdf(filename, 'r')
#print(ncf.startdate_str)
#a = get_prob(ncf, "201810271030", 51.92, 53.44)
P = get_prob_dict("/home/ubuntu/pysteps-data/out", 50.9203, 31.1968)
print(P)
| [
11748,
2010,
34,
8068,
19,
355,
269,
7568,
198,
11748,
4818,
8079,
355,
288,
83,
198,
6738,
15095,
1330,
15095,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
12972,
1676,
73,
198,
198,
10782,
69,
62,
2435,
62,
18982,
796,
... | 2.277487 | 191 |
import numpy as np
import pandas as pd
print("(1) タプルのリストによるエッジリスト")
edge_list = [("A", "B"), ("A", "C"), ("C", "D")]
print(edge_list)
print("\n(2) pandasのデータフレームによるエッジリスト(sourceは始点ノード、targetは終点ノード)")
pandas_edge_list = pd.DataFrame({"source": ["A", "B", "C"], "target": ["B", "C", "D"]})
print(pandas_edge_list)
# 隣接行列
print("\n(3) リストのリストによる隣接行列(ノード名は持てない)")
adj_matrix = [[0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]]
print(adj_matrix)
print("\n(4) pandasのデータフレームによる隣接行列(ノード名を持てる)")
pandas_adj_matrix = pd.DataFrame(
[[0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]],
index=["A", "B", "C", "D"],
columns=["A", "B", "C", "D"],
)
print(pandas_adj_matrix)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
4798,
7203,
7,
16,
8,
17433,
123,
30965,
9202,
5641,
12675,
43302,
28618,
1792,
230,
25748,
23544,
14777,
21091,
12675,
43302,
4943,
198,
14907,
62,
4868,
79... | 1.562358 | 441 |
import os
import subprocess
from uranium import task_requires
@task_requires("main")
def distribute(build):
""" distribute the uranium package """
build.packages.install("wheel")
build.executables.run([
"python", "setup.py",
"sdist", "bdist_wheel", "--universal", "upload"
])
| [
11748,
28686,
198,
11748,
850,
14681,
198,
6738,
22010,
1330,
4876,
62,
47911,
628,
198,
198,
31,
35943,
62,
47911,
7203,
12417,
4943,
628,
198,
4299,
14983,
7,
11249,
2599,
198,
220,
220,
220,
37227,
14983,
262,
22010,
5301,
37227,
198... | 2.81982 | 111 |
''' Implementation of NeuralSEA
'''
import torch.nn as nn
from neuralsea._se_block import _SEBlock
from neuralsea._time_distributed import _TimeDistributed
class NeuralSEA(nn.Module):
''' NeuralSEA
A Neural DNA SEquence Analyzer
'''
| [
7061,
6,
46333,
286,
47986,
46887,
198,
7061,
6,
198,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
17019,
8583,
13557,
325,
62,
9967,
1330,
4808,
5188,
12235,
198,
6738,
17019,
8583,
13557,
2435,
62,
17080,
6169,
1330,
... | 3.060976 | 82 |
import requests
| [
11748,
7007,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198
] | 2.076923 | 13 |
#!/usr/bin python3
"""
<Description of the programme>
Author: Pouria Hadjibagheri <pouria.hadjibagheri@phe.gov.uk>
Created: 25 Oct 2020
License: MIT
Contributors: Pouria Hadjibagheri
"""
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from datetime import datetime
from os.path import abspath, split as split_path, join as join_path
from random import randint
# 3rd party:
from pandas import DataFrame
# Internal:
from app.common.data.variables import DestinationMetrics, IsImproving
from app.common.utils import get_release_timestamp
from app.database.postgres import Connection
from app.template_processor import render_template
from app.caching import from_cache_or_func
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'home_page',
'get_home_page'
]
curr_dir, _ = split_path(abspath(__file__))
queries_dir = join_path(curr_dir, "queries")
with open(join_path(queries_dir, "overview_data.sql")) as fp:
overview_data_query = fp.read()
metrics = [
'newAdmissions',
'newAdmissionsChange',
'newAdmissionsChangePercentage',
'newAdmissionsRollingSum',
'newAdmissionsDirection',
"newPeopleVaccinatedFirstDoseByPublishDate",
"newPeopleVaccinatedSecondDoseByPublishDate",
"cumPeopleVaccinatedFirstDoseByPublishDate",
"cumPeopleVaccinatedSecondDoseByPublishDate",
"cumVaccinationFirstDoseUptakeByPublishDatePercentage",
"cumVaccinationSecondDoseUptakeByPublishDatePercentage",
'newDeaths28DaysByPublishDate',
'newDeaths28DaysByPublishDateChange',
'newDeaths28DaysByPublishDateChangePercentage',
'newDeaths28DaysByPublishDateRollingSum',
'newDeaths28DaysByPublishDateDirection',
'newDeaths28DaysByDeathDateRollingRate',
'newCasesByPublishDate',
'newCasesByPublishDateChange',
'newCasesByPublishDateChangePercentage',
'newCasesByPublishDateRollingSum',
'newCasesByPublishDateDirection',
'newCasesBySpecimenDateRollingRate',
'newVirusTests',
'newVirusTestsChange',
'newVirusTestsChangePercentage',
'newVirusTestsRollingSum',
'newVirusTestsDirection',
]
| [
2,
48443,
14629,
14,
8800,
21015,
18,
198,
198,
37811,
198,
27,
11828,
286,
262,
11383,
29,
198,
198,
13838,
25,
220,
220,
220,
220,
220,
220,
220,
350,
280,
7496,
11161,
73,
571,
30450,
72,
1279,
79,
280,
7496,
13,
18108,
73,
571... | 2.840358 | 783 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 3.263158 | 38 |
# -*- coding: utf-8 -*-
# shell:
# $ python plusik-0.0.2.py : uhti
# --
# First type:
print 1 + 1
# then open terminal and run the command
# and next iteration.
print 1 + 2
print 1 + 3
print 1 + 4
# The goal is to show to the kid that you can change something in one place
# and you will get change in another place.
# On each step you need to discuss with the child that line
# "print 1 + x". Find the resolution like 1 finger + 2 fingers = 3 fingers.
# Requirements:
# Keys "Backspace" and "Enter" should be known before (you can involve the kid to type that program)
# Side effect:
# Getting known operation Save File, switch to terminal, re-run previous command.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
7582,
25,
198,
2,
720,
21015,
5556,
1134,
12,
15,
13,
15,
13,
17,
13,
9078,
220,
220,
220,
220,
1058,
334,
4352,
72,
198,
2,
1377,
198,
2,
3274,
2099,
25,
198... | 3.36 | 200 |
import requests
import settings
import logging
import re
from bs4 import BeautifulSoup
from random import randint
import time
logging.basicConfig(level=logging.INFO)
# make auth
# update and return list of subjs and tests
# predict answers for any question
# mark answers into requested question
# commit test
| [
11748,
7007,
198,
11748,
6460,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
4738,
1330,
43720,
600,
198,
11748,
640,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13... | 3.585106 | 94 |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ....core import tile
from ....core.operand import OperandStage
from ...indexing.getitem import DataFrameIndex
from ...initializer import DataFrame
from ..sort_index import sort_index, DataFrameSortIndex
from ..sort_values import dataframe_sort_values, DataFrameSortValues
| [
2,
15069,
7358,
12,
1238,
2481,
41992,
4912,
31703,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.80083 | 241 |
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
from typing import Optional, List
import time
import hashlib
from .AdapterAuxiliaries import AdapterAuxiliaries
from .AdapterBase import AdapterBase
from .AdapterCloseConnectionException import AdapterCloseConnectionException
class ListAdapter(AdapterBase):
"""
This class simply relays messages from a list of strings to the POP3 server.
It has little practical use - it's intended for testing and as an example.
"""
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
18,
12,
2601,
682,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
569,
8836,
83,
3498,
15339,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
... | 3.707804 | 551 |
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import functools
import logging
from collections import defaultdict
from typing import Dict, List, Set
from bodhi.client.bindings import BodhiClient
from packit.copr_helper import CoprHelper
from packit.exceptions import PackitException
from packit.utils.commands import run_command
from packit.utils.decorators import fallback_return_value
ALIASES: Dict[str, List[str]] = {
"fedora-development": ["fedora-rawhide"],
"fedora-latest": ["fedora-35"],
"fedora-stable": ["fedora-34", "fedora-35"],
"fedora-all": ["fedora-34", "fedora-35", "fedora-rawhide"],
"epel-all": ["el-6", "epel-7", "epel-8"],
}
ARCHITECTURE_LIST: List[str] = [
"aarch64",
"armhfp",
"i386",
"ppc64le",
"s390x",
"x86_64",
]
DEPRECATED_TARGET_MAP = {"centos-stream": "centos-stream-8"}
DEPRECATED_TARGET_MAP = {
f"{k}-{x}": f"{v}-{x}"
for k, v in DEPRECATED_TARGET_MAP.items()
for x in ARCHITECTURE_LIST
}
DEFAULT_VERSION = "fedora-stable"
logger = logging.getLogger(__name__)
def get_versions(*name: str, default=DEFAULT_VERSION) -> Set[str]:
"""
Expand the aliases to the name(s).
:param name: name(s) of the system and version (e.g. "fedora-30" or "fedora-stable")
:param default: used if no positional argument was given
:return: set of string containing system name and version
"""
if not (default or name):
return set()
names = list(name) or [default]
versions: Set[str] = set()
for one_name in names:
versions.update(get_aliases().get(one_name, [one_name]))
return versions
def get_build_targets(*name: str, default: str = DEFAULT_VERSION) -> Set[str]:
"""
Expand the aliases to the name(s) and transfer to the build targets.
:param name: name(s) of the system and version (e.g. "fedora-30" or "fedora-stable")
or target name (e.g. "fedora-30-x86_64" or "fedora-stable-x86_64")
:param default: used if no positional argument was given
:return: set of build targets
"""
if not (default or name):
return set()
names = list(name) or [default]
possible_sys_and_versions: Set[str] = set()
for one_name in names:
name_split = one_name.rsplit("-", maxsplit=2)
l_name_split = len(name_split)
if l_name_split < 2: # only one part
# => cannot guess anything other than rawhide
if "rawhide" in one_name:
sys_name, version, architecture = "fedora", "rawhide", "x86_64"
else:
err_msg = (
f"Cannot get build target from '{one_name}'"
f", packit understands values like these: '{list(get_aliases().keys())}'."
)
raise PackitException(err_msg.format(one_name=one_name))
elif l_name_split == 2: # "name-version"
sys_name, version = name_split
architecture = "x86_64" # use the x86_64 as a default
else: # "name-version-architecture"
sys_name, version, architecture = name_split
if architecture not in ARCHITECTURE_LIST:
# we don't know the architecture => probably wrongly parsed
# (e.g. "opensuse-leap-15.0")
sys_name, version, architecture = (
f"{sys_name}-{version}",
architecture,
"x86_64",
)
possible_sys_and_versions.update(
{
f"{sys_and_version}-{architecture}"
for sys_and_version in get_versions(f"{sys_name}-{version}")
}
)
possible_sys_and_versions = {
DEPRECATED_TARGET_MAP.get(target, target)
for target in possible_sys_and_versions
}
return possible_sys_and_versions
def get_valid_build_targets(*name: str, default: str = DEFAULT_VERSION) -> set:
"""
Function generates set which contains build targets available also in copr chroots.
:param name: name(s) of the system and version or target name. (passed to
packit.config.aliases.get_build_targets() function)
or target name (e.g. "fedora-30-x86_64" or "fedora-stable-x86_64")
:param default: used if no positional argument was given
:return: set of build targets available also in copr chroots
"""
build_targets = get_build_targets(*name, default=default)
logger.info(f"Build targets: {build_targets} ")
copr_chroots = CoprHelper.get_available_chroots()
logger.info(f"Copr chroots: {copr_chroots} ")
logger.info(f"Result set: {set(build_targets) & set(copr_chroots)}")
return set(build_targets) & set(copr_chroots)
def get_branches(
*name: str, default=DEFAULT_VERSION, default_dg_branch="main"
) -> Set[str]:
"""
Expand the aliases to the name(s) and transfer to the dist-git branch name.
:param name: name(s) of the system and version (e.g. "fedora-stable" or "fedora-30")
or branch name (e.g. "f30" or "epel8")
:param default: used if no positional argument was given
:param default_dg_branch: repo's default branch
:return: set of dist-git branch names
"""
if not (default or name):
return set()
names = list(name) or [default]
branches = set()
for sys_and_version in get_versions(*names):
if "rawhide" in sys_and_version:
branches.add(default_dg_branch)
elif sys_and_version.startswith("fedora"):
sys, version = sys_and_version.rsplit("-", maxsplit=1)
branches.add(f"f{version}")
elif sys_and_version.startswith("epel"):
split = sys_and_version.rsplit("-", maxsplit=1)
if len(split) < 2:
branches.add(sys_and_version)
continue
sys, version = sys_and_version.rsplit("-", maxsplit=1)
if version.isnumeric() and int(version) <= 6:
branches.add(f"el{version}")
else:
branches.add(f"epel{version}")
else:
# We don't know, let's leave the original name.
branches.add(sys_and_version)
return branches
@functools.lru_cache(maxsize=1)
@fallback_return_value(ALIASES)
def get_aliases() -> Dict[str, List[str]]:
"""
Function to automatically determine fedora-all, fedora-stable, fedora-development,
fedora-latest and epel-all aliases.
Current data are fetched via bodhi client, with default base url
`https://bodhi.fedoraproject.org/'.
:return: dictionary containing aliases
"""
bodhi_client = BodhiClient()
releases = bodhi_client.get_releases(exclude_archived=True)
aliases = defaultdict(list)
for release in releases.releases:
if release.id_prefix == "FEDORA" and release.name != "ELN":
name = release.long_name.lower().replace(" ", "-")
if release.state == "current":
aliases["fedora-stable"].append(name)
elif release.state == "pending":
aliases["fedora-development"].append(name)
elif release.id_prefix == "FEDORA-EPEL":
name = release.name.lower()
aliases["epel-all"].append(name)
if "fedora-development" in aliases:
aliases["fedora-development"].sort(key=lambda x: int(x.rsplit("-")[-1]))
# The Fedora with the highest version is "rawhide", but
# Bodhi always uses release names, and has no concept of "rawhide".
aliases["fedora-development"][-1] = "fedora-rawhide"
if "fedora-stable" in aliases:
aliases["fedora-stable"].sort(key=lambda x: int(x.rsplit("-")[-1]))
aliases["fedora-all"] = aliases["fedora-stable"] + aliases["fedora-development"]
# fedora-rawhide is the last release, we want the second latest (the latest
# non rawhide release)
if len(aliases["fedora-all"]) >= 2:
aliases["fedora-latest"] = [aliases["fedora-all"][-2]]
return aliases
| [
2,
15069,
25767,
669,
284,
262,
6400,
270,
1628,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
11748,
1257,
310,
10141,
198,
11748,
18931,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
19720,
1330,
360,
713... | 2.323487 | 3,453 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from client import wit_client # NOQA
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
5456,
1330,
20868,
62,
16366,
220,
1303,
8005,
48,
32,
198
] | 2.763158 | 38 |
import pandas as pd
import sklearn.utils
from sklearn.utils import shuffle
from tqdm import tqdm
from data_utils import mention_split, normalize_text, MENTION, to_file, SEED
project = 'PATH/TO/PROJECT'
MIN_SENTENCE_LEN = 3
df = pd.read_csv(project + 'data/interim/author-profiling.tsv', sep='\t',
names=['index', 'age', 'gender', 'id', 'text'], skiprows=1, index_col=False, lineterminator='\n',
encoding='utf8')
df = sklearn.utils.shuffle(df, random_state=SEED)
# Gender
males, m_ids = tokenize(df[df['gender'] == 'male'], MIN_SENTENCE_LEN)
_, males = zip(*sorted(zip(m_ids, males)))
females, f_ids = tokenize(df[df['gender'] == 'female'], MIN_SENTENCE_LEN)
_, females = zip(*sorted(zip(f_ids, females)))
train_pos_m, train_neg_m = mention_split(males[:100000], min_len=MIN_SENTENCE_LEN)
test_pos_m, test_neg_m = mention_split(males[102000:], min_len=MIN_SENTENCE_LEN)
train_pos_m = shuffle(train_pos_m, random_state=SEED)
train_neg_m = shuffle(train_neg_m, random_state=SEED)
train_pos_f, train_neg_f = mention_split(females[:92000], min_len=MIN_SENTENCE_LEN)
test_pos_f, test_neg_f = mention_split(females[94000:], min_len=MIN_SENTENCE_LEN)
train_pos_f = shuffle(train_pos_f, random_state=SEED)
train_neg_f = shuffle(train_neg_f, random_state=SEED)
train_size = 40000
sentences = train_pos_m + train_pos_f + train_neg_m + train_neg_f + test_pos_m + test_pos_f + test_neg_m + test_neg_f
vocab = list(set([item for sublist in sentences for item in sublist]))
id2voc = dict(enumerate(vocab))
voc2id = {v: k for k, v in id2voc.iteritems()}
to_file(project + 'data/processed/author_mention_gender/', voc2id, vocab, train_pos_m[:train_size] + test_pos_m,
train_pos_f[:train_size] + test_pos_f, train_neg_m[:train_size] + test_neg_m,
train_neg_f[:train_size] + test_neg_f)
young, y_ids = tokenize(df[(df['age'] == 0) | (df['age'] == 1)], MIN_SENTENCE_LEN)
_, young = zip(*sorted(zip(y_ids, young)))
old, o_ids = tokenize(df[(df['age'] == 2) | (df['age'] == 3) | (df['age'] == 4)], MIN_SENTENCE_LEN)
_, old = zip(*sorted(zip(o_ids, old)))
train_pos_y, train_neg_y = mention_split(young[6500:], min_len=MIN_SENTENCE_LEN)
test_pos_y, test_neg_y = mention_split(young[:6000], min_len=MIN_SENTENCE_LEN)
train_pos_y = shuffle(train_pos_y, random_state=SEED)
train_neg_y = shuffle(train_neg_y, random_state=SEED)
train_pos_o, train_neg_o = mention_split(old[:110000], min_len=MIN_SENTENCE_LEN)
test_pos_o, test_neg_o = mention_split(old[112000:], min_len=MIN_SENTENCE_LEN)
train_pos_o = shuffle(train_pos_o, random_state=SEED)
train_neg_o = shuffle(train_neg_o, random_state=SEED)
sentences = train_pos_y + train_neg_y + train_pos_o + train_neg_o + test_pos_y + test_neg_y + test_pos_o + test_neg_o
vocab = list(set([item for sublist in sentences for item in sublist]))
id2voc = dict(enumerate(vocab))
voc2id = {v: k for k, v in id2voc.iteritems()}
to_file(project + 'data/processed/author_mention_age/', voc2id, vocab, train_pos_y[:train_size] + test_pos_y,
train_pos_o[:train_size] + test_pos_o, train_neg_y[:train_size] + test_neg_y,
train_neg_o[:train_size] + test_neg_o)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
1341,
35720,
13,
26791,
198,
6738,
1341,
35720,
13,
26791,
1330,
36273,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
1366,
62,
26791,
1330,
3068,
62,
35312,
11,
3487,
10... | 2.301901 | 1,368 |
# coding: utf-8
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
a simple class to implement a file like object to provide read only stream
"""
import logging
import os
import random
import numpy
import xxhash
from .config import Config
class IngressObject:
"""
A virtual read only file like object
"""
# class variables so no need to redo this for every instance
SEED_LENGTH = 1048576
DIGEST_BLOCK_SIZE = 1048576
seed = numpy.random.bytes(SEED_LENGTH)
def tell(self):
"""
:return: current offset
"""
return self.offset
def seek(self, offset, from_what=os.SEEK_SET):
"""
adjust offset
:param offset: offset value
:param from_what: seek option. see os module
:return: no return value
"""
if from_what == os.SEEK_SET:
self.offset = offset
elif from_what == os.SEEK_CUR:
self.offset += offset
elif from_what == os.SEEK_END:
self.offset = self.size + offset
else:
raise IOError('Invalid argument')
if self.offset < 0:
raise IOError('Invalid argument')
def read(self, size=-1):
"""
return data up to size
:param size:
:return: data read
"""
if self.offset >= self.size:
# return EOF
return bytearray(0)
# read no more than what is left
if size == -1:
read_size = self.size - self.offset
else:
read_size = min(self.size - self.offset, size)
# read no more than what left in the seed (from the offset)
offset_in_seed = self.offset % self.SEED_LENGTH
read_size = min(read_size, self.SEED_LENGTH - offset_in_seed)
self.offset += read_size
return self.seed[offset_in_seed:offset_in_seed + read_size]
def digest(self):
"""
helper code to return the digest of full content
"""
old_offset = self.tell()
self.seek(0, os.SEEK_SET)
hash_value = xxhash.xxh64()
while True:
data_block = self.read(self.DIGEST_BLOCK_SIZE)
if not data_block:
break
hash_value.update(data_block)
self.seek(old_offset, os.SEEK_SET)
return hash_value.hexdigest()
class EgressObject:
"""
a virtual write only file like object
"""
def write(self, data):
"""
write data
:param data: data to be written
:return: no return value
"""
if self.__digest:
self.__digest.update(data)
self.size = self.size + len(data)
def digest(self):
"""
:return: digest of the written data
"""
return self.__digest.hexdigest()
def generate_weights(size_distribution):
"""
:param size_distribution:
:return: a list that contains ranges with weights.
"""
w_list = []
for key, value in size_distribution.items():
w_list.extend([key] * value['WEIGHT'])
return w_list
class WeightedSizeDistribution:
"""
a helper class to support weighted size distribution.
"""
config = None
# size distribution (weight, low bound, high bound)
size_distribution = None
weighted_size_distribution = None
def __init__(self):
"""
initialize class variable
"""
if not self.config:
self.config = Config()
# size distribution (weight, low bound, high bound)
self.size_distribution = self.config['data']['weights']
self.weighted_size_distribution = generate_weights(self.size_distribution)
def length_range(self, length):
"""
return the range that represents the length
:param length: value to identify the range
:return: range string
"""
for key, value in self.size_distribution.items():
if value['LOW'] <= length < value['HIGH']:
return key
logging.debug('a large object %d', length)
return 'LARGE'
def weighted_rand_size(self):
"""
generate a weighted random size
:return: a weighted random size
"""
slot = random.choice(self.weighted_size_distribution)
option = self.config['data'].get('sizing_option', 'random')
if option == 'low_bound':
size = self.size_distribution[slot]['LOW']
elif option == 'random':
size = numpy.random.randint(self.size_distribution[slot]['LOW'],
high=self.size_distribution[slot]['HIGH'])
else:
raise Exception('unsupported sizing option')
return size
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
15069,
13130,
35288,
33993,
5673,
38303,
311,
31033,
11,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
40... | 2.358003 | 2,243 |
import pygame
import random
import time
from pygame.locals import *
from . import ClassicScreen
from .. import * | [
11748,
12972,
6057,
198,
11748,
4738,
198,
11748,
640,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
6738,
764,
1330,
13449,
23901,
198,
6738,
11485,
1330,
1635
] | 3.862069 | 29 |
from flask import Blueprint, render_template, url_for, redirect, flash, request
from flask import current_app as app
from flask_login import LoginManager, login_user, logout_user, login_required
from .forms import LoginForm, RegisterForm
from ..models import User, db
auth_bp = Blueprint('auth', __name__,
template_folder='templates', static_folder='static')
# view to redirect to for anonymous users
login_manager = LoginManager()
login_manager.login_view = '.login'
@login_manager.user_loader
def load_user(id):
"""
Get connected user from session on each page request.
"""
return User.query.get(id)
@auth_bp.route('/login', methods=['GET', 'POST'])
@auth_bp.route('/register', methods=['GET', 'POST'])
@auth_bp.route('/success')
@auth_bp.route('/logout')
@login_required
@auth_bp.route('/profile')
@login_required
| [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
19016,
62,
1640,
11,
18941,
11,
7644,
11,
2581,
198,
6738,
42903,
1330,
1459,
62,
1324,
355,
598,
198,
6738,
42903,
62,
38235,
1330,
23093,
13511,
11,
17594,
62,
7220,
11,
2604,
448,
... | 3.017361 | 288 |
from michelanglo_app.transplier import PyMolTranspiler
transpiler = PyMolTranspiler(file='michelanglo_app/demo/1gfl.pse')
#print(transpiler.get_view())
#print(transpiler.get_reps())
print(transpiler.get_loadfun_js(tag_wrapped=True, viewport='viewport', funname='loadfun'))
| [
6738,
12314,
2978,
648,
5439,
62,
1324,
13,
7645,
489,
959,
1330,
9485,
44,
349,
8291,
79,
5329,
198,
198,
7645,
79,
5329,
796,
9485,
44,
349,
8291,
79,
5329,
7,
7753,
11639,
9383,
2978,
648,
5439,
62,
1324,
14,
9536,
78,
14,
16,
... | 2.513761 | 109 |
from os import environ
from pprint import pprint
from python_anticaptcha import AnticaptchaClient, ImageToTextTask
from sys import argv
api_key = environ["KEY"]
soft_id = argv[1]
mode = argv[2]
if __name__ == "__main__":
pprint(process(soft_id, mode))
| [
6738,
28686,
1330,
551,
2268,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
21015,
62,
5109,
2373,
11693,
1330,
3738,
291,
2373,
11693,
11792,
11,
7412,
2514,
8206,
25714,
198,
6738,
25064,
1330,
1822,
85,
198,
198,
15042,
62,
2539,... | 2.747368 | 95 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django_summernote.admin import SummernoteModelAdmin
from lxml.html.clean import Cleaner
from django.core.exceptions import ObjectDoesNotExist
from .models import *
admin.site.site_header = 'Notícias - Administração'
admin.site.site_title = 'Admin'
admin.site.index_title = 'Gestor'
@admin.register(Position)
admin.site.unregister(User)
admin.site.register(User, ProfileAdmin)
@admin.register(Author)
@admin.register(Category)
@admin.register(Tag)
@admin.register(Article)
@admin.register(ArticleTag)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
198,
198,
6738,
42625,
14208,
62,
16345,
647,
11295,
13,
28482,
1330,
10216,
11295,
17633,
46787,
198,
... | 3.019802 | 202 |
x = 1
y = 7
conta = y - 2
while x <= 9:
for w in range(1,4):
print("I=%i J=%i" % (x, y))
y = y - 1
x+= 2
y += 5
| [
87,
796,
352,
198,
88,
796,
767,
198,
198,
3642,
64,
796,
331,
532,
362,
198,
198,
4514,
2124,
19841,
860,
25,
198,
220,
220,
220,
329,
266,
287,
2837,
7,
16,
11,
19,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7203,
4... | 1.591398 | 93 |
import pickle
from cobrame.io.json import load_json_me_model
import cobrame
from sympy import Basic
from os.path import dirname, abspath
currency_met_to_synthesis_rxn = {'coa': 'DPCOAK',
'thf': 'DHFS',
# use this reaction because DHFR is coupled to dUMP synthesis
'nadp': 'NADK',
'nad': 'NADS1',
# need two reactions, one creates intermediate for NADK, other couples
'ribflv': 'RBFSb',
'gthox': 'GTHS',
'q8': 'DMQMT',
'2dmmq8': 'DHNAOT4',
# must make 2dmmql8 to produce mql8
'mqn8': 'AMMQLT8',
'fmn': '',
'fad': ''
}
met_to_name = {'2dmmq8': '2-Demethyl-\nmenaquinone 8',
'2fe2s': '2Fe-2S',
'4fe4s': '4Fe-4S',
'adocbl': 'Adenosyl-\ncobalamin',
'cbl1': 'Cobalamin',
'ala__L': 'L-Alanine',
'arg__L': 'L-Arginine',
'asn__L': 'L-Asparagine',
'asp__L': 'L-Aspartate',
'bmocogdp': 'bis-MGD',
'btn': 'Biotin',
'coa': 'Coenzyme A',
'cys__L': 'L-Cysteine',
'gln__L': 'L-Glutamine',
'glu__L': 'L-Glutamate',
'gly': 'Glycine',
'gthox': 'Glutathione',
'hemeO': 'Heme O',
'his__L': 'L-Histidine',
'ile__L': 'L-Isoleucine',
'leu__L': 'L-Leucine',
'met__L': 'L-Methionine',
'mqn8': 'Menaquinone 8',
'nad': 'NAD',
'nadp': 'NADP',
'phe__L': 'L-Phenylalanine',
'pheme': 'Protoheme',
'pro__L': 'L-Proline',
'pydx5p': "Pyridoxal \n 5'-phosphate",
'q8': 'Ubiquinone-8',
'ribflv': 'Riboflavin',
'ser__L': 'L-Serine',
'sheme': 'Siroheme',
'thf': 'Tetrahydrofolate',
'thmpp': 'Thiamine \n diphosphate',
'thr__L': 'L-Threonine',
'trp__L': 'L-Tryptophan',
'tyr__L': 'L-Tyrosine',
'val__L': 'L-Valine'}
here = dirname(abspath(__file__))
| [
198,
11748,
2298,
293,
198,
6738,
22843,
28073,
13,
952,
13,
17752,
1330,
3440,
62,
17752,
62,
1326,
62,
19849,
198,
11748,
22843,
28073,
198,
6738,
10558,
88,
1330,
14392,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
2352,
6978,
... | 1.669428 | 1,207 |
from enum import Enum
from sys import stderr
from typing import List
# region Old code
"""class Circle:
def __init__(self, e_type: Shape.EType = Shape.EType.square, radius: float = 1, center: Point = None):
self.type_ = e_type
self.radius_ = radius
if center is None:
center = Point(0, 0)
self.center_ = center
class Square:
def __init__(self, e_type: Shape.EType = Shape.EType.square, side: float = 1, center: Point = None):
self.type_ = e_type
self.side_ = side
if center is None:
center = Point(0, 0)
self.center_ = center
class Rhomb:
def __init__(self, e_type: Shape.EType = Shape.EType.square,
side: float = 1,
angle: float = 90,
center: Point = None):
self.type_ = e_type
self.side_ = side
self.angle = angle
if center is None:
center = Point(0, 0)
self.center_ = center
def draw_square(square: Square):
print("in draw_square", file=stderr)
def draw_circle(circle: Circle):
print("in draw_circle", file=stderr)
def draw_rhomb(rhomb: Rhomb):
print("in draw_rhomb", file=stderr)
def draw_shapes(shapes: List, n: int):
for i in range(0, min(n, len(shapes))):
s = shapes[i]
if s.type_ is Shape.EType.square:
draw_square(s)
elif s.type_ is Shape.EType.circle:
draw_circle(s)
elif s.type_ is Shape.EType.rhomb:
draw_rhomb(s)
else:
assert False
def move_shapes(shape, diff_x: int = 0, diff_y: int = 0):
shape.center_.x += diff_x
shape.center_.y += diff_y
print("{}'s center is now {} (from {})".format(type(shape).__name__,
shape.center_,
(shape.center_.x - diff_x, shape.center_.y - diff_y)),
file=stderr)
def main():
shapes = list()
shapes.append(Circle(e_type=Shape.EType.circle))
shapes.append(Square(e_type=Shape.EType.square))
shapes.append(Square(e_type=Shape.EType.square))
shapes.append(Circle(e_type=Shape.EType.circle))
draw_shapes(shapes, 4)
for shape in shapes:
move_shapes(shape, *(1, 1))
shapes.append(Rhomb(e_type=Shape.EType.rhomb))
draw_shapes(shapes[4:], 1)"""
# endregion
# region New Code
# endregion
main()
| [
6738,
33829,
1330,
2039,
388,
201,
198,
6738,
25064,
1330,
336,
1082,
81,
201,
198,
6738,
19720,
1330,
7343,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
2,
3814,
5706,
2438,
201,
198,
37811,
4871,
16291,
25,
201,
198,
220,
220... | 1.951108 | 1,309 |
import numpy as np
import cv2, math, os, sys
import classify
import sys
import cv2
import preprocess
import time
import cache
| [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
269,
85,
17,
11,
10688,
11,
28686,
11,
25064,
198,
11748,
36509,
198,
11748,
25064,
198,
11748,
269,
85,
17,
198,
11748,
662,
14681,
198,
11748,
640,
198,
11748,
12940,
198,
220,
220,
22... | 2.6 | 55 |
PWR_MGM1 = 0x6b
PWR_MGM2 = 0x6c
GYRO_XOUT = 0x43
GYRO_YOUT = 0x45
GYRO_ZOUT = 0x47
ACC_XOUT = 0x3b
ACC_YOUT = 0x3d
ACC_ZOUT = 0x3f
SMPRT_DIV = 0x19
CONFIG=0x1a
GYRO_CONFIG = 0x1b
ACCEL_CONFIG = 0x1c
#PWR_MGM1 values
RESET=0b10000000
SLEEP=0b01000000
CLK_SEL_X = 1
CLK_SEL_Y = 2
CLK_SEL_Z = 3
#PWR_MGM2 values
STBY_AX = 0b00100000
STBY_AY = 0b00010000
STBY_AZ = 0b00001000
STBY_GX = 0b00000100
STBY_GY = 0b00000010
STBY_GZ = 0b00000001
# Accelerometer | Gyroscope
# F-sampling 1kHz |
# Bandwidth(Hz) | Delay(ms) | Bandwidth(Hz) | Delay (ms) | F-sampling (kHz)
# ----------------------------------------------------------------------------
DLPF_CFG_0 = 0 # 260 | 0.0 | 256 | 0.98 | 8
DLPF_CFG_1 = 1 # 184 | 2.0 | 188 | 1.9 | 1
DLPF_CFG_2 = 2 # 94 | 3.0 | 98 | 2.8 | 1
DLPF_CFG_3 = 3 # 44 | 4.9 | 42 | 4.8 | 1
DLPF_CFG_4 = 4 # 21 | 8.5 | 20 | 8.3 | 1
DLPF_CFG_5 = 5 # 10 | 13.8 | 10 | 13.4 | 1
DLPF_CFG_6 = 6 # 5 | 19.0 | 5 | 18.6 | 1
# ----------------------------------------------------------------------------
DLPF_CFG_7 = 7 # RESERVED | RESERVED | 8
GFS_250 = 0
GFS_500 = 0b00001000
GFS_1000 = 0b00010000
GFS_2000 = 0b00011000
AFS_2 = 0
AFS_4 = 0b00001000
AFS_8 = 0b00010000
AFS_16 = 0b00011000
| [
47,
18564,
62,
44,
15548,
16,
796,
657,
87,
21,
65,
201,
198,
47,
18564,
62,
44,
15548,
17,
796,
657,
87,
21,
66,
201,
198,
201,
198,
31212,
13252,
62,
55,
12425,
796,
657,
87,
3559,
201,
198,
31212,
13252,
62,
56,
12425,
796,
... | 1.579777 | 1,078 |
"""Entry point for histdatacom api
histdatacom(options)
Returns:
data: returns a data frame or a list of data frames and metadata
"""
import sys
from . import histdata_com
__version__ = "0.76.9"
__author__ = 'David Midlo'
class Options(sys.modules[__name__].__class__):
"""Options. A Masquerade class.
A class that extends sys.modules[__name__].__class__ (or the histdatacom class)
extends/overwrites with a __call__ method to allow the module to be callable.
Returns:
data: returns a data frame or a list of data frames and metadata
"""
sys.modules[__name__].__class__ = Options
| [
37811,
30150,
966,
329,
1554,
19608,
330,
296,
40391,
198,
198,
10034,
19608,
330,
296,
7,
25811,
8,
198,
198,
35561,
25,
198,
220,
220,
220,
1366,
25,
5860,
257,
1366,
5739,
393,
257,
1351,
286,
1366,
13431,
290,
20150,
198,
37811,
... | 3.019512 | 205 |
import random
| [
11748,
4738,
628
] | 5 | 3 |
import argparse
from autogluon.tabular import TabularDataset, TabularPredictor
from autogluon.tabular.models import CatBoostModel, KNNModel, LGBModel, XGBoostModel, TabularNeuralNetModel, RFModel
import os
from numpy.core.fromnumeric import trace
import pandas as pd
import traceback
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset_dir', help='path to cpp directory', type=str, default='dataset/cpp')
parser.add_argument('-p', '--problem', help='only run this problem if specified', type=str, default=None)
parser.add_argument('-r', '--result_path', help='file to save test set score to', type=str, default='sanitycheck/cpp/result.csv')
parser.add_argument('-m', '--mode', help='what AutoGluon setting to try', choices=['ag', 'ag-stack'], default='ag-stack')
parser.add_argument('-t', '--time_limit', help='time limit in minutes', type=int, default=60)
args = parser.parse_args()
# DATASETS = [
# "1db99236-0601-4e03-b8bb-96b5eb236d74",
# "20e6e8e8-a4da-4fea-a9de-c784cdf84c1f",
# "2cbd9a22-0da1-404d-a7ba-49911840a622",
# "3cf28e5f-886a-4ace-bebf-299e1dbde654",
# "4dbb8031-56a6-43bf-9e03-40ea2affa163",
# "5729f07d-8d43-463d-894b-7dfa2da63efb",
# "5d1e3461-8b01-463c-a9db-2e4c48db1467",
# "60c60200-2341-427d-b0ec-2fc30c4bfdd8",
# ]
TIME_LIMIT = args.time_limit * 60.
RESULT_PATH = args.result_path
EXCEPTIONS_PATH = os.path.join(os.path.dirname(args.result_path), 'exceptions.csv')
if args.problem is None:
DATASETS = sorted([dataset for dataset in os.listdir(args.dataset_dir) if not dataset.startswith('.')])[1:]
else:
DATASETS = [args.problem]
FEATURE_PRUNE_KWARGS = {}
for dataset in DATASETS:
train_data = pd.read_csv(os.path.join(args.dataset_dir, dataset, 'train.csv'))
test_data = pd.merge(pd.read_csv(os.path.join(args.dataset_dir, dataset, 'testFeaturesNoLabel.csv')),
pd.read_csv(os.path.join(args.dataset_dir, dataset, 'testLabel.csv')), on='ID')
y_test = test_data['label']
presets = ['medium_quality_faster_train'] if args.mode == 'ag' else ['best_quality']
n_sample, n_feature = len(train_data), len(train_data.columns) - 1
result = {'dataset': [], 'mode': [], 'val_score': [], 'test_score': [], 'time_limit': [], 'n_sample': [], 'n_feature': []}
exception = {'dataset': [], 'type': [], 'error_str': [], 'stacktrace': []}
try:
predictor = TabularPredictor(label='label', eval_metric='roc_auc')
predictor = predictor.fit(train_data, presets=presets, time_limit=TIME_LIMIT, ag_args_fit=dict(num_cpu=8))
leaderboard = predictor.leaderboard(test_data)
best_val_row = leaderboard.loc[leaderboard['score_val'].idxmax()]
val_score, test_score = best_val_row['score_val'], best_val_row['score_test']
add_datapoint(result, dataset, presets[0], val_score, test_score, TIME_LIMIT, n_sample, n_feature)
except Exception as e:
add_exception(exception, dataset, presets[0], str(e), traceback.format_exc())
try:
predictor = TabularPredictor(label='label', eval_metric='roc_auc')
predictor = predictor.fit(train_data, presets=presets, time_limit=TIME_LIMIT, ag_args_fit=dict(num_cpu=8), feature_prune_kwargs=FEATURE_PRUNE_KWARGS)
leaderboard = predictor.leaderboard(test_data)
best_val_row = leaderboard.loc[leaderboard['score_val'].idxmax()]
val_score, test_score = best_val_row['score_val'], best_val_row['score_test']
add_datapoint(result, dataset, presets[0] + "_prune", val_score, test_score, TIME_LIMIT, n_sample, n_feature)
except Exception as e:
add_exception(exception, dataset, presets[0] + "_prune", str(e), traceback.format_exc())
result_df = pd.DataFrame(result)
if os.path.exists(RESULT_PATH):
original_result_df = pd.read_csv(RESULT_PATH)
result_df = pd.concat([original_result_df, result_df], axis=0)
result_df.to_csv(RESULT_PATH, index=False)
exception_df = pd.DataFrame(exception)
if os.path.exists(EXCEPTIONS_PATH):
original_exception_df = pd.read_csv(EXCEPTIONS_PATH)
exception_df = pd.concat([original_exception_df, exception_df], axis=0)
exception_df.to_csv(EXCEPTIONS_PATH, index=False)
| [
11748,
1822,
29572,
198,
6738,
1960,
49006,
261,
13,
8658,
934,
1330,
16904,
934,
27354,
292,
316,
11,
16904,
934,
47,
17407,
273,
198,
6738,
1960,
49006,
261,
13,
8658,
934,
13,
27530,
1330,
5181,
45686,
17633,
11,
509,
6144,
17633,
... | 2.367152 | 1,784 |
"""
View this repository on github: https://github.com/Jothin-kumar/Geometry-app
MIT License
Copyright (c) 2021 B.Jothin kumar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Author: Jothin kumar (https://jothin.tech)
Github repository of this project: https://github.com/Jothin-kumar/Geometry-app
"""
import global_variables
import gui
import shapes
from current_mode import get_current_shape, set_point_modify_mode
from shape_panels import refresh_all as refresh_all_panels
previous_point_property = None
previous_highlighted_point = None
| [
37811,
198,
7680,
428,
16099,
319,
33084,
25,
3740,
1378,
12567,
13,
785,
14,
41,
849,
259,
12,
74,
44844,
14,
10082,
15748,
12,
1324,
198,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
33448,
347,
13,
41,
849,
259,
479,
44844,
... | 3.617577 | 421 |
phrase = input('Please enter a sentence to do the analysis: ')
numberOfA = phrase.upper().count('A')
firstA = phrase.upper().find('A') + 1
lastA = phrase.upper().rfind('A') + 1
print('In the phrase "{}" appear {} letters "A".'.format(phrase, numberOfA))
print('The letter "A" appears for the first time in the position: {}.'.format(firstA))
print('The letter "A" appears for the last time in the position: {}.'.format(lastA))
| [
34675,
796,
5128,
10786,
5492,
3802,
257,
6827,
284,
466,
262,
3781,
25,
705,
8,
198,
198,
17618,
5189,
32,
796,
9546,
13,
45828,
22446,
9127,
10786,
32,
11537,
198,
11085,
32,
796,
9546,
13,
45828,
22446,
19796,
10786,
32,
11537,
134... | 3.218045 | 133 |
from Callbacks import *
| [
6738,
4889,
10146,
1330,
1635,
628,
198
] | 3.714286 | 7 |
# coding: utf-8
from .suite import BaseSuite
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
764,
2385,
578,
1330,
7308,
5606,
578,
628
] | 2.611111 | 18 |
from datetime import datetime
from anti_detection import *
from config import *
# 登录前有可能要更新数据
# 回到主界面
# 返回上一层
# 刚上线的一系列操作
# 戳老婆
# 领金币
# 取体力
# 远征
# 打工
# 商店必买品
# 每日活跃
# 一键减负
# 领凭证奖励
# 领邮件
# 领舰团每周贡献奖励
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
3098,
62,
15255,
3213,
1330,
1635,
198,
6738,
4566,
1330,
1635,
628,
198,
2,
13328,
247,
119,
37605,
243,
30298,
235,
17312,
231,
20998,
107,
47797,
121,
17358,
223,
162,
249,
112,
2... | 0.966245 | 237 |
""" Cisco_IOS_XR_ip_sbfd_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ip\-sbfd package operational data.
This module contains definitions
for the following management objects\:
sbfd\: Seamless BFD (S\-BFD) operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class BfdAfIdEnum(Enum):
"""
BfdAfIdEnum
Bfd af id
.. data:: bfd_af_id_none = 0
No Address
.. data:: bfd_af_id_ipv4 = 2
IPv4 AFI
.. data:: bfd_af_id_ipv6 = 26
IPv6 AFI
"""
bfd_af_id_none = 0
bfd_af_id_ipv4 = 2
bfd_af_id_ipv6 = 26
@staticmethod
class SbfdAddressFamilyEnum(Enum):
"""
SbfdAddressFamilyEnum
Sbfd address family
.. data:: ipv4 = 1
ipv4
.. data:: ipv6 = 2
ipv6
"""
ipv4 = 1
ipv6 = 2
@staticmethod
class Sbfd(object):
"""
Seamless BFD (S\-BFD) operational data
.. attribute:: target_identifier
Target\-identifier information
**type**\: :py:class:`TargetIdentifier <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier>`
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class TargetIdentifier(object):
"""
Target\-identifier information
.. attribute:: local_vrfs
SBFD local discriminator data
**type**\: :py:class:`LocalVrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.LocalVrfs>`
.. attribute:: remote_vrfs
SBFD remote discriminator data
**type**\: :py:class:`RemoteVrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.RemoteVrfs>`
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class RemoteVrfs(object):
"""
SBFD remote discriminator data
.. attribute:: remote_vrf
Table of remote discriminator data per VRF
**type**\: list of :py:class:`RemoteVrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.RemoteVrfs.RemoteVrf>`
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class RemoteVrf(object):
"""
Table of remote discriminator data per VRF
.. attribute:: vrf_name <key>
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: remote_discriminator
SBFD remote discriminator
**type**\: list of :py:class:`RemoteDiscriminator <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.RemoteVrfs.RemoteVrf.RemoteDiscriminator>`
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class RemoteDiscriminator(object):
"""
SBFD remote discriminator
.. attribute:: address
Address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: discr
Remote discriminator
**type**\: int
**range:** 0..4294967295
.. attribute:: discr_src
Discriminator source name
**type**\: str
.. attribute:: ip_address
IP address
**type**\: :py:class:`IpAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.RemoteVrfs.RemoteVrf.RemoteDiscriminator.IpAddress>`
.. attribute:: remote_discriminator
Remote Discriminator
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: status
Status
**type**\: str
.. attribute:: tid_type
Target identifier for sbfd
**type**\: :py:class:`SbfdAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.SbfdAddressFamilyEnum>`
.. attribute:: vrf_name
VRF Name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: vrf_name_xr
VRF Name
**type**\: str
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class IpAddress(object):
"""
IP address
.. attribute:: afi
AFI
**type**\: :py:class:`BfdAfIdEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.BfdAfIdEnum>`
.. attribute:: dummy
No Address
**type**\: int
**range:** 0..255
.. attribute:: ipv4
IPv4 address type
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6 address type
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
class LocalVrfs(object):
"""
SBFD local discriminator data
.. attribute:: local_vrf
Table of local discriminator data per VRF
**type**\: list of :py:class:`LocalVrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.LocalVrfs.LocalVrf>`
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class LocalVrf(object):
"""
Table of local discriminator data per VRF
.. attribute:: vrf_name <key>
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: local_discriminator
SBFD local discriminator
**type**\: list of :py:class:`LocalDiscriminator <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_sbfd_oper.Sbfd.TargetIdentifier.LocalVrfs.LocalVrf.LocalDiscriminator>`
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
class LocalDiscriminator(object):
"""
SBFD local discriminator
.. attribute:: discr
Local discriminator
**type**\: int
**range:** 0..4294967295
.. attribute:: discr_src
Discriminator source name
**type**\: str
.. attribute:: flags
MODE name
**type**\: str
.. attribute:: local_discriminator
Local discriminator
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: status
Status
**type**\: str
.. attribute:: vrf_name
VRF Name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: vrf_name_xr
VRF Name
**type**\: str
"""
_prefix = 'ip-sbfd-oper'
_revision = '2015-11-09'
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
@property
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
@staticmethod
| [
37811,
28289,
62,
40,
2640,
62,
55,
49,
62,
541,
62,
36299,
16344,
62,
3575,
220,
198,
198,
1212,
8265,
4909,
257,
4947,
286,
575,
15567,
17336,
198,
1640,
28289,
314,
2640,
41441,
55,
49,
20966,
41441,
36299,
16344,
5301,
13919,
1366... | 1.548614 | 8,259 |
# An example of FuncDesigner eigenvalues/eigenvectors for a linear equations system,
# see http://openopt.org/EIG for more examples and details
from FuncDesigner import *
from numpy import arange
n = 100
# create some variables
a, b, c = oovar('a'), oovar('b', size=n), oovar('c', size=2*n)
# let's construct some linear functions R^i -> R^j
# in Python range(m, k) is [m, m+1, ..., m+k-1]
f1 = a + sum(b*arange(5, n+5)) # R^(n+1) -> R
f2 = a + 2*b + c.sum() # R^(2n+1) -> R^n
# you could use size of oovars
f3 = a + a.size + 2*c.size # R^(2n+1) -> R; a.size and c.size will be resolved into 1 and 2*n
f4 = c + arange(4, 2*n+4) + f1 + 0.5*f2.sum() + 4*f3
# We can use "for" cycle:
for i in range(4):
f4 = 0.5*f4 + a + f1 + 1
# Also, we could use matrix multiplication, eg f5 = dot(someMatrix, f4):
rng = 1.5 + cos(range(2*n)).reshape(-1, 1) # define 2n x 1 vector
R = dot(rng, rng.T) # create a matrix of shape 2n x 2n
f4 = dot(R, f4) # involve matrix multiplication
# Create Python list of linear equations
f = [a+f4+5, 2*a+b*arange(10, n+10)+15, a+4*b.sum()+2*c.sum()-45]
# alternatively, you could pass equations:
f = [a+f4==-5, 2*a+b==-15, a==-4*b.sum()-2*c.sum()+45]
linSys = sle(f)
print('dimension of the SLE: %d' % linSys.n) # dimension of the SLE: 301
# let's search for 4 largest magnitude eigenvalues
r = linSys.eig(goal={'lm':4}, solver='arpack') # requires SciPy installed
# or goal={'largest magnitude':4}, with or without space inside, case-insensitive
# for whole list of available goals see http://openopt.org/EIG
# or use numpy_eig solver instead to search all eigenvalues / eigenvectors:
#r = linSys.eig(solver='numpy_eig') # requires only NumPy installed
print(r.eigenvalues)
#[ -1.35516602e-05 -1.71948079e-05j -6.93570858e-01 +0.00000000e+00j
# 1.73033511e+00 +0.00000000e+00j 4.88614250e+06 +0.00000000e+00j]
# let's print eigenvector for 1st of the obtained eigenvalues with largest magnitude:
print(r.eigenvectors[0])
#{a: (1.5254915493391314e-11-6.5463605815307811e-11j), b: array([ 5.44424793e-07 -7.86615045e-07j, 2.49866501e-07 +1.42239402e-06j,...
# c: array([ -1.41371978e-06 -1.14259649e-06j,1.62417813e-07 -8.00444176e-07j, ..., 5.24756666e-01 -4.13335624e-01j]}
print(r.eigenvectors[-1][a])
#(-0.10673471576669166+0j)
print(type(r.eigenvectors[-1][a]))
#<type 'numpy.complex128'>
| [
2,
1052,
1672,
286,
11138,
66,
23067,
263,
304,
9324,
27160,
14,
68,
9324,
303,
5217,
329,
257,
14174,
27490,
1080,
11,
198,
2,
766,
2638,
1378,
9654,
8738,
13,
2398,
14,
36,
3528,
329,
517,
6096,
290,
3307,
198,
6738,
11138,
66,
... | 2.299507 | 1,015 |
# Generated by Django 2.0.1 on 2018-05-09 19:24
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
16,
319,
2864,
12,
2713,
12,
2931,
678,
25,
1731,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
"""Palindrome string
Use incremental development to check if string is palindrome
A palindrome is a word that is spelled the same backward and forward.
Example: noon, redivider
Version 3: test middle
"""
assert is_palindrome("abcba") == True
assert is_palindrome("abca") == False | [
37811,
11531,
521,
5998,
4731,
198,
11041,
29497,
2478,
284,
2198,
611,
4731,
318,
6340,
521,
5998,
198,
32,
6340,
521,
5998,
318,
257,
1573,
326,
318,
32213,
262,
976,
19528,
290,
2651,
13,
198,
16281,
25,
19613,
11,
2266,
452,
1304,... | 3.602564 | 78 |
from .time_series_parameter import TimeSeriesParameter
| [
6738,
764,
2435,
62,
25076,
62,
17143,
2357,
1330,
3862,
27996,
36301,
628
] | 4.307692 | 13 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
from torch.distributions import MultivariateNormal
import numpy as np
class ActorModel(nn.Module):
""" Actor model used for policy function approximation """
def __init__(self, input_dim, output_dim):
""" Initialize the actor model
Args:
input_dim (int): dimension of input layer of nn (observation space number)
output_dim (int): dimension of output layer of nn (action space number)
"""
super(ActorModel, self).__init__()
self.first_layer = nn.Linear(input_dim, 128)
self.second_layer = nn.Linear(128, 64)
self.third_layer = nn.Linear(64, output_dim)
def forward(self, x):
""" Forward pass through actor neural network
Args:
x (tensor): current state
Returns:
(tensor): action to take in the current state
"""
warnings.filterwarnings("ignore")
x = torch.tensor(x, dtype=torch.float32)
x = F.relu(self.first_layer(x))
x = F.relu(self.second_layer(x))
x = self.third_layer(x)
return x
class Actor():
""" Main class that is used as Actor in TRPO algorithm """
def __init__(self, input_dim, output_dim):
""" Initialize the actor class
Args:
input_dim (int): dimension of input layer of nn (observation space number)
output_dim (int): dimension of output layer of nn (action space number)
"""
self.model = ActorModel(input_dim, output_dim)
self.covariance_matrix = torch.diag(input=torch.full(size=(output_dim,), fill_value=0.5), diagonal=0)
def get_action(self, state):
""" Getting action for current state
Args:
state (tensor): current state
Returns:
(tensor, tensor): action and logaritmic probability of that action
"""
mu = self.model.forward(state)
multivariate_gaussian_distribution = MultivariateNormal(loc=mu, covariance_matrix=self.covariance_matrix)
action = multivariate_gaussian_distribution.sample()
log_probability = multivariate_gaussian_distribution.log_prob(value=action)
return action, log_probability
def get_mean_std(self, states):
""" Based on states returns means and standard deviations
Args:
states (tensor): observed states
Returns:
(tensor, tensor): means and standard deviations
"""
mean = self.model.forward(states)
std = torch.exp(nn.Parameter(torch.zeros(1, 17)).expand_as(mean))
return mean, std
def update_parameters(self, grads):
""" Manually updating parameters of actor model with gradient
Args:
grads (array): gradient to update actor nn's parameters
"""
n = 0
for p in self.model.parameters():
numel = p.numel()
g = grads[n:n + numel].view(p.shape)
p.data += g
n += numel
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
14601,
198,
6738,
28034,
13,
17080,
2455,
507,
1330,
7854,
42524,
26447,
198,
11748,
299,
32152,
355,
45941,
628,
1... | 2.349507 | 1,319 |
#!/usr/bin/env python3
import argparse
import pymef90.mesh
import sys
import os
parser = argparse.ArgumentParser()
parser.add_argument("gmeshFile", help = "The name of the mesh file to be parsed.", type = str)
parser.add_argument("exoFile", help = "The name of the exodus file to be written.", type = str)
parser.add_argument("--force",action="store_true",default=False,help="Overwrite existing files without prompting")
args = parser.parse_args()
if os.path.exists(args.exoFile):
if args.force:
os.remove(args.exoFile)
else:
if pymef90.confirm("ExodusII file {0} already exists. Overwrite?".format(args.exoFile)):
os.remove(args.exoFile)
else:
print ('\n\t{0} was NOT generated from {1}\n'.format(args.exoFile,args.gmeshFile))
sys.exit()
(coord,vertexSet,cellSet,numDim) = pymef90.mesh.GMSHread(args.gmeshFile)
pymef90.mesh.EXODUSwrite(coord,vertexSet,cellSet,numDim,args.exoFile)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
11748,
279,
4948,
891,
3829,
13,
76,
5069,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,... | 2.503937 | 381 |